Quantum models
QuantumModel(circuit, observable=None, backend=BackendName.PYQTORCH, diff_mode=DiffMode.AD, measurement=None, noise=None, mitigation=None, configuration=None)
Section titled “
QuantumModel(circuit, observable=None, backend=BackendName.PYQTORCH, diff_mode=DiffMode.AD, measurement=None, noise=None, mitigation=None, configuration=None)
”
Bases: Module
The central class of qadence that executes QuantumCircuits and make them differentiable.
This class should be used as base class for any new quantum model supported in the qadence framework for information on the implementation of custom models see here.
Example:
import torchfrom qadence import QuantumModel, QuantumCircuit, RX, RY, Z, PI, chain, kronfrom qadence import FeatureParameter, VariationalParameter
theta = VariationalParameter("theta")phi = FeatureParameter("phi")
block = chain( kron(RX(0, theta), RY(1, theta)), kron(RX(0, phi), RY(1, phi)),)
circuit = QuantumCircuit(2, block)
observable = Z(0) + Z(1)
model = QuantumModel(circuit, observable)values = {"phi": torch.tensor([PI, PI/2]), "theta": torch.tensor([PI, PI/2])}
wf = model.run(values)xs = model.sample(values, n_shots=100)ex = model.expectation(values)print(wf)print(xs)print(ex)tensor([[ 1.0000e+00+0.0000e+00j, -1.2246e-16+0.0000e+00j, 0.0000e+00+1.2246e-16j, 0.0000e+00-1.4998e-32j], [ 4.9304e-32+0.0000e+00j, 2.2204e-16+0.0000e+00j, 0.0000e+00-2.2204e-16j, 0.0000e+00-1.0000e+00j]])[OrderedCounter({'00': 100}), OrderedCounter({'11': 100})]tensor([[ 2.], [-2.]], requires_grad=True)Initialize a generic QuantumModel instance.
| PARAMETER | DESCRIPTION |
|---|---|
circuit
|
The circuit that is executed.
TYPE:
|
observable
|
Optional observable(s) that are used only in the
TYPE:
|
backend
|
A backend for circuit execution.
TYPE:
|
diff_mode
|
A differentiability mode. Parameter shift based modes work on all backends. AD based modes only on PyTorch based backends. |
measurement
|
Optional measurement protocol. If None, use exact expectation value with a statevector simulator.
TYPE:
|
configuration
|
Configuration for the backend.
TYPE:
|
noise
|
A noise model to use.
TYPE:
|
| RAISES | DESCRIPTION |
|---|---|
ValueError
|
if the |
Source code in qadence/model.py
85 86 87 88 89 90 91 92 93 94 95 96 97 98 99100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163def __init__( self, circuit: QuantumCircuit, observable: list[AbstractBlock] | AbstractBlock | None = None, backend: BackendName | str = BackendName.PYQTORCH, diff_mode: DiffMode = DiffMode.AD, measurement: Measurements | None = None, noise: NoiseHandler | None = None, mitigation: Mitigations | None = None, configuration: BackendConfiguration | dict | None = None,): """Initialize a generic QuantumModel instance.
Arguments: circuit: The circuit that is executed. observable: Optional observable(s) that are used only in the `expectation` method. You can also provide observables on the fly to the expectation call directly. backend: A backend for circuit execution. diff_mode: A differentiability mode. Parameter shift based modes work on all backends. AD based modes only on PyTorch based backends. measurement: Optional measurement protocol. If None, use exact expectation value with a statevector simulator. configuration: Configuration for the backend. noise: A noise model to use.
Raises: ValueError: if the `diff_mode` argument is set to None """ super().__init__()
if not isinstance(circuit, QuantumCircuit): TypeError( f"The circuit should be of type ''. Got {type(circuit)}." )
if diff_mode is None: raise ValueError("`diff_mode` cannot be `None` in a `QuantumModel`.")
self.backend = backend_factory( backend=backend, diff_mode=diff_mode, configuration=configuration )
if isinstance(observable, list) or observable is None: observable = observable else: observable = [observable]
def _is_feature_param(p: Parameter) -> bool: return not p.trainable and not p.is_number
if observable is None: self.inputs = list(filter(_is_feature_param, circuit.unique_parameters)) else: uparams = unique_parameters(chain(circuit.block, *observable)) self.inputs = list(filter(_is_feature_param, uparams))
conv = self.backend.convert(circuit, observable) self.embedding_fn = conv.embedding_fn self._circuit = conv.circuit self._observable = conv.observable self._backend_name = backend self._diff_mode = diff_mode self._measurement = measurement self._noise = noise self._mitigation = mitigation if check_param_dict_values(conv.params): self._params = nn.ParameterDict( { str(key): nn.Parameter(val, requires_grad=val.requires_grad) # type: ignore[union-attr] for key, val in conv.params.items() } ) else: self._params = nn.ParameterDict( { str(key): nn.Parameter(val, requires_grad=val.requires_grad) # type: ignore[union-attr] for key, val in merge_separate_params(conv.params).items() } )
device
property
Section titled “
device
property
”Get device.
| RETURNS | DESCRIPTION |
|---|---|
device
|
torch.device |
in_features
property
Section titled “
in_features
property
”Number of inputs.
num_vparams
property
Section titled “
num_vparams
property
”The number of variational parameters.
out_features
property
Section titled “
out_features
property
”Number of outputs.
params
property
Section titled “
params
property
”All parameters.
show_config
property
Section titled “
show_config
property
”Attain current quantum model configurations.
vals_vparams
property
Section titled “
vals_vparams
property
”Dictionary with parameters which are actually updated during optimization.
vparams
property
Section titled “
vparams
property
”Variational parameters.
assign_parameters(values)
Section titled “
assign_parameters(values)
”Return the final, assigned circuit that is used in e.g. backend.run.
| PARAMETER | DESCRIPTION |
|---|---|
values
|
Values dict which contains values for the parameters.
TYPE:
|
| RETURNS | DESCRIPTION |
|---|---|
Any
|
Final, assigned circuit that is used in e.g. |
Source code in qadence/model.py
633634635636637638639640641642643def assign_parameters(self, values: dict[str, Tensor]) -> Any: """Return the final, assigned circuit that is used in e.g. `backend.run`.
Arguments: values: Values dict which contains values for the parameters.
Returns: Final, assigned circuit that is used in e.g. `backend.run` """ params = self.embedding_fn(self._params, values) return self.backend.assign_parameters(self._circuit, params)
change_config(new_config)
Section titled “
change_config(new_config)
”Change configuration with the input.
Source code in qadence/model.py
204205206207208def change_config(self, new_config: dict) -> None: """Change configuration with the input.""" if isinstance(self.backend, DifferentiableBackend): current_config = self.backend.backend.config BackendConfiguration.change_config(current_config, new_config)
circuit(circuit)
Section titled “
circuit(circuit)
”Get backend-converted circuit.
| PARAMETER | DESCRIPTION |
|---|---|
circuit
|
QuantumCircuit instance.
TYPE:
|
| RETURNS | DESCRIPTION |
|---|---|
ConvertedCircuit
|
Backend circuit. |
Source code in qadence/model.py
284285286287288289290291292293def circuit(self, circuit: QuantumCircuit) -> ConvertedCircuit: """Get backend-converted circuit.
Args: circuit: QuantumCircuit instance.
Returns: Backend circuit. """ return self.backend.circuit(circuit)
expectation(values={}, observable=None, state=None, measurement=None, noise=None, mitigation=None, endianness=Endianness.BIG)
Section titled “
expectation(values={}, observable=None, state=None, measurement=None, noise=None, mitigation=None, endianness=Endianness.BIG)
”Compute expectation using the given backend.
Given an input state , a set of variational parameters and the unitary representation of the model we return .
| PARAMETER | DESCRIPTION |
|---|---|
values
|
Values dict which contains values for the parameters.
TYPE:
|
observable
|
Observable part of the expectation.
TYPE:
|
state
|
Optional input state.
TYPE:
|
measurement
|
Optional measurement protocol. If None, use exact expectation value with a statevector simulator.
TYPE:
|
noise
|
A noise model to use.
TYPE:
|
mitigation
|
A mitigation protocol to use.
TYPE:
|
endianness
|
Storage convention for binary information.
TYPE:
|
| RAISES | DESCRIPTION |
|---|---|
ValueError
|
when no observable is set. |
| RETURNS | DESCRIPTION |
|---|---|
Tensor
|
A torch.Tensor of shape n_batches x n_obs |
Source code in qadence/model.py
390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452def expectation( self, values: dict[str, Tensor] = {}, observable: list[ConvertedObservable] | ConvertedObservable | None = None, state: Optional[Tensor] = None, measurement: Measurements | None = None, noise: NoiseHandler | None = None, mitigation: Mitigations | None = None, endianness: Endianness = Endianness.BIG,) -> Tensor: r"""Compute expectation using the given backend.
Given an input state $|\psi_0 \rangle$, a set of variational parameters $\vec{\theta}$ and the unitary representation of the model $U(\vec{\theta})$ we return $\langle \psi_0 | U(\vec{\theta}) | \psi_0 \rangle$.
Arguments: values: Values dict which contains values for the parameters. observable: Observable part of the expectation. state: Optional input state. measurement: Optional measurement protocol. If None, use exact expectation value with a statevector simulator. noise: A noise model to use. mitigation: A mitigation protocol to use. endianness: Storage convention for binary information.
Raises: ValueError: when no observable is set.
Returns: A torch.Tensor of shape n_batches x n_obs """ if observable is None: if self._observable is None: raise ValueError( "Provide an AbstractBlock as the observable to compute expectation." "Either pass a 'native_observable' directly to 'QuantumModel.expectation'" "or pass a (non-native) '' to the 'QuantumModel.__init__'." ) observable = self._observable
params = self.embedding_fn(self._params, values) if measurement is None: measurement = self._measurement if noise is None: noise = self._noise else: self._noise = noise if mitigation is None: mitigation = self._mitigation return self.backend.expectation( circuit=self._circuit, observable=observable, param_values=params, state=state, measurement=measurement, noise=noise, mitigation=mitigation, endianness=endianness, )
forward(*args, **kwargs)
Section titled “
forward(*args, **kwargs)
”Calls run method with arguments.
| RETURNS | DESCRIPTION |
|---|---|
Tensor
|
A torch.Tensor representing output.
TYPE:
|
Source code in qadence/model.py
317318319320321322323def forward(self, *args: Any, **kwargs: Any) -> Tensor: """Calls run method with arguments.
Returns: Tensor: A torch.Tensor representing output. """ return self.run(*args, **kwargs)
load(file_path, as_torch=False, map_location='cpu')
classmethod
Section titled “
load(file_path, as_torch=False, map_location='cpu')
classmethod
”Load QuantumModel.
| PARAMETER | DESCRIPTION |
|---|---|
file_path
|
File path to load model from.
TYPE:
|
as_torch
|
Load parameters as torch tensor. Defaults to False.
TYPE:
|
map_location
|
Location for loading. Defaults to "cpu".
TYPE:
|
| RETURNS | DESCRIPTION |
|---|---|
QuantumModel
|
QuantumModel from file_path. |
Source code in qadence/model.py
605606607608609610611612613614615616617618619620621622623624625626627628629630631@classmethoddef load( cls, file_path: str | Path, as_torch: bool = False, map_location: str | torch.device = "cpu") -> QuantumModel: """Load QuantumModel.
Arguments: file_path: File path to load model from. as_torch: Load parameters as torch tensor. Defaults to False. map_location (str | torch.device, optional): Location for loading. Defaults to "cpu".
Returns: QuantumModel from file_path. """ qm_pt = {} if isinstance(file_path, str): file_path = Path(file_path) if os.path.isdir(file_path): from qadence.ml_tools.callbacks.saveload import get_latest_checkpoint_name
file_path = file_path / get_latest_checkpoint_name(file_path, "model")
try: qm_pt = torch.load(file_path, map_location=map_location, weights_only=False) except Exception as e: logger.error(f"Unable to load QuantumModel due to {e}") return cls._from_dict(qm_pt, as_torch)
load_params_from_dict(d, strict=True)
Section titled “
load_params_from_dict(d, strict=True)
”Copy parameters from dictionary into this QuantumModel.
Unlike :meth:~qadence.QuantumModel.from_dict, this method does not create a new
QuantumModel instance, but rather loads the parameters into the same QuantumModel.
The behaviour of this method is similar to :meth:~torch.nn.Module.load_state_dict.
The dictionary is assumed to have the format as saved via
:meth:~qadence.QuantumModel.to_dict
| PARAMETER | DESCRIPTION |
|---|---|
d
|
The dictionary
TYPE:
|
strict
|
Whether to strictly enforce that the parameter keys in the dictionary and
in the model match exactly. Default:
TYPE:
|
Source code in qadence/model.py
541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583def load_params_from_dict(self, d: dict, strict: bool = True) -> None: """Copy parameters from dictionary into this QuantumModel.
Unlike :meth:`~qadence.QuantumModel.from_dict`, this method does not create a new QuantumModel instance, but rather loads the parameters into the same QuantumModel. The behaviour of this method is similar to :meth:`~torch.nn.Module.load_state_dict`.
The dictionary is assumed to have the format as saved via :meth:`~qadence.QuantumModel.to_dict`
Args: d (dict): The dictionary strict (bool, optional): Whether to strictly enforce that the parameter keys in the dictionary and in the model match exactly. Default: ``True``. """ param_dict = d["param_dict"] missing_keys = set(self._params.keys()) - set(param_dict.keys()) unexpected_keys = set(param_dict.keys()) - set(self._params.keys())
if strict: error_msgs = [] if len(unexpected_keys) > 0: error_msgs.append(f"Unexpected key(s) in dictionary: {unexpected_keys}") if len(missing_keys) > 0: error_msgs.append(f"Missing key(s) in dictionary: {missing_keys}") if len(error_msgs) > 0: errors_string = "\n\t".join(error_msgs) raise RuntimeError( f"Error(s) loading the parameter dictionary due to: \n\t{errors_string}\n" "This error was thrown because the `strict` argument is set `True`." "If you don't need the parameter keys of the dictionary to exactly match " "the model parameters, set `strict=False`." )
for n, param in param_dict.items(): try: with torch.no_grad(): self._params[n].copy_( torch.nn.Parameter(param, requires_grad=param.requires_grad) ) except Exception as e: logger.warning(f"Unable to load parameter {n} from dictionary due to {e}.")
observable(observable, n_qubits)
Section titled “
observable(observable, n_qubits)
”Get backend observable.
| PARAMETER | DESCRIPTION |
|---|---|
observable
|
Observable block.
TYPE:
|
n_qubits
|
Number of qubits
TYPE:
|
| RETURNS | DESCRIPTION |
|---|---|
Any
|
Backend observable. |
Source code in qadence/model.py
295296297298299300301302303304305def observable(self, observable: AbstractBlock, n_qubits: int) -> Any: """Get backend observable.
Args: observable: Observable block. n_qubits: Number of qubits
Returns: Backend observable. """ return self.backend.observable(observable, n_qubits)
observables_to_expression()
Section titled “
observables_to_expression()
”Convert the observable to a dictionary representation of Pauli terms.If no observable is set, returns an empty dictionary. Each observable is represented by its tag (if available) as the key and its mathematical expression as the value.
| RETURNS | DESCRIPTION |
|---|---|
dict[str, str] | str
|
dict[str, str]: A dictionary where the keys are observable tags (or "Obs." if not provided) and the values are the corresponding mathematical expressions. |
Source code in qadence/model.py
677678679680681682683684685686687688689690691692693694695696697def observables_to_expression(self) -> dict[str, str] | str: """ Convert the observable to a dictionary representation of Pauli terms.
If no observable is set, returns an empty dictionary. Each observable is represented by its tag (if available) as the key and its mathematical expression as the value.
Returns: dict[str, str]: A dictionary where the keys are observable tags (or "Obs." if not provided) and the values are the corresponding mathematical expressions. """ if self._observable is None: return "No observable set." else: return { obs.original.tag if obs.original.tag else "Obs.": block_to_mathematical_expression( obs.original ) for obs in self._observable }
overlap()
Section titled “
overlap()
”Overlap of model.
| RAISES | DESCRIPTION |
|---|---|
NotImplementedError
|
The overlap method is not implemented for this model. |
Source code in qadence/model.py
454455456457458459460def overlap(self) -> Tensor: """Overlap of model.
Raises: NotImplementedError: The overlap method is not implemented for this model. """ raise NotImplementedError("The overlap method is not implemented for this model.")
reset_vparams(values)
Section titled “
reset_vparams(values)
”Reset all the variational parameters with a given list of values.
Source code in qadence/model.py
307308309310311312313314315def reset_vparams(self, values: Sequence) -> None: """Reset all the variational parameters with a given list of values.""" current_vparams = OrderedDict({k: v for k, v in self._params.items() if v.requires_grad})
assert ( len(values) == self.num_vparams ), "Pass an iterable with the values of all variational parameters" for i, k in enumerate(current_vparams.keys()): current_vparams[k].data = torch.tensor([values[i]])
run(values=None, state=None, endianness=Endianness.BIG)
Section titled “
run(values=None, state=None, endianness=Endianness.BIG)
”Run model.
Given an input state , a set of variational parameters and the unitary representation of the model we return .
| PARAMETER | DESCRIPTION |
|---|---|
values
|
Values dict which contains values for the parameters.
TYPE:
|
state
|
Optional input state to apply model on.
TYPE:
|
endianness
|
Storage convention for binary information.
TYPE:
|
| RETURNS | DESCRIPTION |
|---|---|
Tensor
|
A torch.Tensor representing output. |
Source code in qadence/model.py
325326327328329330331332333334335336337338339340341342343344345346347348349350351def run( self, values: dict[str, Tensor] = None, state: Tensor | None = None, endianness: Endianness = Endianness.BIG,) -> Tensor: r"""Run model.
Given an input state $| \psi_0 \rangle$, a set of variational parameters $\vec{\theta}$ and the unitary representation of the model $U(\vec{\theta})$ we return $U(\vec{\theta}) | \psi_0 \rangle$.
Arguments: values: Values dict which contains values for the parameters. state: Optional input state to apply model on. endianness: Storage convention for binary information.
Returns: A torch.Tensor representing output. """ if values is None: values = {}
params = self.embedding_fn(self._params, values)
return self.backend.run(self._circuit, params, state=state, endianness=endianness)
sample(values={}, n_shots=1000, state=None, noise=None, mitigation=None, endianness=Endianness.BIG)
Section titled “
sample(values={}, n_shots=1000, state=None, noise=None, mitigation=None, endianness=Endianness.BIG)
”Obtain samples from model.
| PARAMETER | DESCRIPTION |
|---|---|
values
|
Values dict which contains values for the parameters.
TYPE:
|
n_shots
|
Observable part of the expectation.
TYPE:
|
state
|
Optional input state to apply model on.
TYPE:
|
noise
|
A noise model to use.
TYPE:
|
mitigation
|
A mitigation protocol to use.
TYPE:
|
endianness
|
Storage convention for binary information.
TYPE:
|
| RETURNS | DESCRIPTION |
|---|---|
list[Counter]
|
A list of Counter instances with the sample results. |
Source code in qadence/model.py
353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388def sample( self, values: dict[str, torch.Tensor] = {}, n_shots: int = 1000, state: torch.Tensor | None = None, noise: NoiseHandler | None = None, mitigation: Mitigations | None = None, endianness: Endianness = Endianness.BIG,) -> list[Counter]: """Obtain samples from model.
Arguments: values: Values dict which contains values for the parameters. n_shots: Observable part of the expectation. state: Optional input state to apply model on. noise: A noise model to use. mitigation: A mitigation protocol to use. endianness: Storage convention for binary information.
Returns: A list of Counter instances with the sample results. """ params = self.embedding_fn(self._params, values) if noise is None: noise = self._noise if mitigation is None: mitigation = self._mitigation return self.backend.sample( self._circuit, params, n_shots=n_shots, state=state, noise=noise, mitigation=mitigation, endianness=endianness, )
save(folder, file_name='quantum_model.pt', save_params=True)
Section titled “
save(folder, file_name='quantum_model.pt', save_params=True)
”Save model.
| PARAMETER | DESCRIPTION |
|---|---|
folder
|
Folder where model is saved.
TYPE:
|
file_name
|
File name for saving model. Defaults to "quantum_model.pt".
TYPE:
|
save_params
|
Save parameters if True. Defaults to True.
TYPE:
|
| RAISES | DESCRIPTION |
|---|---|
FileNotFoundError
|
If folder is not a directory. |
Source code in qadence/model.py
585586587588589590591592593594595596597598599600601602603def save( self, folder: str | Path, file_name: str = "quantum_model.pt", save_params: bool = True) -> None: """Save model.
Arguments: folder: Folder where model is saved. file_name: File name for saving model. Defaults to "quantum_model.pt". save_params: Save parameters if True. Defaults to True.
Raises: FileNotFoundError: If folder is not a directory. """ if not os.path.isdir(folder): raise FileNotFoundError try: torch.save(self._to_dict(save_params), folder / Path(file_name)) except Exception as e: logger.error(f"Unable to write QuantumModel to disk due to {e}")
set_as_fixed(params=list())
Section titled “
set_as_fixed(params=list())
”Set as fixed the list of names in params.
| PARAMETER | DESCRIPTION |
|---|---|
params
|
List of parameters to fix. Defaults to list().
TYPE:
|
Source code in qadence/model.py
247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282def set_as_fixed(self, params: list[str] = list()) -> None: """Set as fixed the list of names in `params`.
Args: params (list[str], optional): List of parameters to fix. Defaults to list(). """ circuit: QuantumCircuit = self._circuit.original if self._observable is not None: if isinstance(self._observable, list): for obs in self._observable: set_as_fixed(obs.original, params) observable = [obs.original for obs in self._observable] else: set_as_fixed(self._observable.original, params) observable = [self._observable.original] else: observable = self._observable # type: ignore[assignment] set_as_fixed(circuit.block, params) conv = self.backend.convert(circuit, observable) self.embedding_fn = conv.embedding_fn self._circuit = conv.circuit self._observable = conv.observable if check_param_dict_values(conv.params): self._params = nn.ParameterDict( { str(key): nn.Parameter(val, requires_grad=val.requires_grad) # type: ignore[union-attr] for key, val in conv.params.items() } ) else: self._params = nn.ParameterDict( { str(key): nn.Parameter(val, requires_grad=val.requires_grad) # type: ignore[union-attr] for key, val in merge_separate_params(conv.params).items() } )
set_as_variational(params=list())
Section titled “
set_as_variational(params=list())
”Set as variational the list of names in params.
| PARAMETER | DESCRIPTION |
|---|---|
params
|
List of parameters to fix. Defaults to list().
TYPE:
|
Source code in qadence/model.py
210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245def set_as_variational(self, params: list[str] = list()) -> None: """Set as variational the list of names in `params`.
Args: params (list[str], optional): List of parameters to fix. Defaults to list(). """ circuit: QuantumCircuit = self._circuit.original if self._observable is not None: if isinstance(self._observable, list): for obs in self._observable: set_as_variational(obs.original, params) observable = [obs.original for obs in self._observable] else: set_as_variational(self._observable.original, params) observable = [self._observable.original] else: observable = self._observable # type: ignore[assignment] set_as_variational(circuit.block, params) conv = self.backend.convert(circuit, observable) self.embedding_fn = conv.embedding_fn self._circuit = conv.circuit self._observable = conv.observable if check_param_dict_values(conv.params): self._params = nn.ParameterDict( { str(key): nn.Parameter(val, requires_grad=val.requires_grad) # type: ignore[union-attr] for key, val in conv.params.items() } ) else: self._params = nn.ParameterDict( { str(key): nn.Parameter(val, requires_grad=val.requires_grad) # type: ignore[union-attr] for key, val in merge_separate_params(conv.params).items() } )
to(*args, **kwargs)
Section titled “
to(*args, **kwargs)
”Conversion method for device or types.
| RETURNS | DESCRIPTION |
|---|---|
QuantumModel
|
QuantumModel with conversions. |
Source code in qadence/model.py
645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675def to(self, *args: Any, **kwargs: Any) -> QuantumModel: """Conversion method for device or types.
Returns: QuantumModel with conversions. """ from pyqtorch import QuantumCircuit as PyQCircuit
try: if isinstance(self._circuit.native, PyQCircuit): self._circuit.native = self._circuit.native.to(*args, **kwargs) if self._observable is not None: if isinstance(self._observable, ConvertedObservable): self._observable.native = self._observable.native.to(*args, **kwargs) elif isinstance(self._observable, list): for obs in self._observable: obs.native = obs.native.to(*args, **kwargs) self._params = self._params.to( device=self._circuit.native.device, dtype=( torch.float64 if self._circuit.native.dtype == torch.cdouble else torch.float32 ), ) logger.debug(f"Moved {self} to {args}, {kwargs}.") else: logger.debug("QuantumModel.to only supports pyqtorch.QuantumCircuits.") except Exception as e: logger.warning(f"Unable to move {self} to {args}, {kwargs} due to {e}.") return self