diff --git a/examples/pulser_compiler.py b/examples/pulser_compiler.py index eccf708..2ca0be4 100644 --- a/examples/pulser_compiler.py +++ b/examples/pulser_compiler.py @@ -2,6 +2,7 @@ import numpy as np from qadence2_expressions import RX, RY, add_qpu_directives, compile_to_model, parameter +from qadence2_platforms.abstracts import OnEnum from qadence2_platforms.compiler import compile_to_backend a = parameter("a") @@ -14,5 +15,6 @@ f_params = {"a": np.array([1.0])} compiled_model = compile_to_backend(model, "fresnel1") -res = compiled_model.sample(values=f_params, shots=10_000, on="emulator") +res = compiled_model.sample(values=f_params, shots=10_000, on=OnEnum.EMULATOR) + print(f"sample result: {res}") diff --git a/examples/pyq_compiler.py b/examples/pyq_compiler.py index 8f438f8..a5d9c46 100644 --- a/examples/pyq_compiler.py +++ b/examples/pyq_compiler.py @@ -20,4 +20,4 @@ wf = compiled_model.run(state=pyq.zero_state(2), values=f_params) dfdx = torch.autograd.grad(wf, f_params["a"], torch.ones_like(wf))[0] -print(f"{dfdx = }\n") +print(f"{dfdx=}\n")