Skip to content
Pasqal Documentation

Parameters

Connects UUIDs of parameters to their expressions and names.

This class is not user-facing and only needed for more complex block definitions. It provides convenient access to expressions/UUIDs/names needed in different backends.

PARAMETER DESCRIPTION
kwargs

Parameters.

TYPE: str | TNumber | Tensor | Basic | Parameter DEFAULT: {}

Example:

import sympy
from qadence.parameters import ParamMap
(x,y) = sympy.symbols("x y")
ps = ParamMap(omega=2.0, duration=x+y)
print(f"{ps.names() = }")
print(f"{ps.expressions() = }")
print(f"{ps.uuids() = }")
ps.names() = dict_keys(['omega', 'duration'])
ps.expressions() = dict_values([2.00000000000000, x + y])
ps.uuids() = dict_keys(['ea7d2acd-8248-4051-9df2-76fa8e1739a7', '73480baa-954b-4367-812c-e8160d46899e'])
Source code in qadence/parameters.py
323
324
325
326
327
328
329
330def __init__(self, **kwargs: str | TNumber | Tensor | Basic | Parameter):
self._name_dict: dict[str, tuple[str, Basic]] = {}
self._uuid_dict: dict[str, str] = {}
for name, v in kwargs.items():
param = v if isinstance(v, sympy.Basic) else Parameter(v)
uuid = str(uuid4())
self._name_dict[name] = (uuid, param)
self._uuid_dict[uuid] = param

Bases: Symbol

A wrapper on top of sympy.Symbol.

Includes two additional keywords: trainable and value. This class is to define both feature parameter and variational parameters.

Trainable parameters are variational parameters.

Non-trainable parameters are feature parameters.

(Initial) value of the parameter.

Arguments:

name: When given a string only, the class
constructs a trainable Parameter with a a randomly initialized value.
**assumptions: are passed on to the parent class `sympy.Symbol`. Two new assumption
kwargs are supported by this constructor: `trainable: bool`, and `value: TNumber`.

Example:

from qadence.parameters import Parameter, VariationalParameter
theta = Parameter("theta")
print(f"{theta}: trainable={theta.trainable} value={theta.value}")
assert not theta.is_number
# you can specify both trainable/value in the constructor
theta = Parameter("theta", trainable=True, value=2.0)
print(f"{theta}: trainable={theta.trainable} value={theta.value}")
# VariationalParameter/FeatureParameter are constructing
# trainable/untrainable Parameters
theta = VariationalParameter("theta", value=2.0)
assert theta == Parameter("theta", trainable=True, value=2.0)
# When provided with a numeric type, Parameter constructs a sympy numeric type":
constant_zero = Parameter(0)
assert constant_zero.is_number
# When passed a Parameter or a sympy expression, it just returns it.
expr = Parameter("x") * Parameter("y")
print(f"{expr=} : {expr.free_symbols}")
theta: trainable=True value=0.14591884209971773
theta: trainable=True value=2.0
expr=x*y : {y, x}
Source code in qadence/parameters.py
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131def __new__(
cls, name: str | TNumber | Tensor | Basic | Parameter, **assumptions: Any
) -> Parameter | Basic | Expr | Array:
"""
Arguments:
name: When given a string only, the class
constructs a trainable Parameter with a a randomly initialized value.
**assumptions: are passed on to the parent class `sympy.Symbol`. Two new assumption
kwargs are supported by this constructor: `trainable: bool`, and `value: TNumber`.
Example:
```python exec="on" source="material-block" result="json"
from qadence.parameters import Parameter, VariationalParameter
theta = Parameter("theta")
print(f"{theta}: trainable={theta.trainable} value={theta.value}")
assert not theta.is_number
# you can specify both trainable/value in the constructor
theta = Parameter("theta", trainable=True, value=2.0)
print(f"{theta}: trainable={theta.trainable} value={theta.value}")
# VariationalParameter/FeatureParameter are constructing
# trainable/untrainable Parameters
theta = VariationalParameter("theta", value=2.0)
assert theta == Parameter("theta", trainable=True, value=2.0)
# When provided with a numeric type, Parameter constructs a sympy numeric type":
constant_zero = Parameter(0)
assert constant_zero.is_number
# When passed a Parameter or a sympy expression, it just returns it.
expr = Parameter("x") * Parameter("y")
print(f"{expr=} : {expr.free_symbols}")
```
"""
p: Parameter
if isinstance(name, get_args(TNumber)):
return sympify(name)
elif isinstance(name, Tensor):
if name.numel() == 1:
return sympify(name)
else:
return Array(name.detach().numpy())
elif isinstance(name, Parameter):
p = super().__new__(cls, name.name, **assumptions)
p.name = name.name
p.trainable = name.trainable
p.value = name.value
p.is_time = name.is_time
return p
elif isinstance(name, (Basic, Expr)):
if name.is_number:
return sympify(evaluate(name))
return name
elif isinstance(name, str):
p = super().__new__(cls, name, **assumptions)
p.trainable = assumptions.get("trainable", True)
p.value = assumptions.get("value", None)
p.is_time = assumptions.get("is_time", False)
if p.value is None:
p.value = rand(1).item()
return p
else:
raise TypeError(f"Parameter does not support type {type(name)}")

Shorthand for Parameter(..., trainable=False).

Source code in qadence/parameters.py
175
176
177def FeatureParameter(name: str, **kwargs: Any) -> Parameter:
"""Shorthand for `Parameter(..., trainable=False)`."""
return Parameter(name, trainable=False, **kwargs)

Shorthand for Parameter(..., trainable=False, is_time=True).

Source code in qadence/parameters.py
185
186
187def TimeParameter(name: str) -> Parameter:
"""Shorthand for `Parameter(..., trainable=False, is_time=True)`."""
return Parameter(name, trainable=False, is_time=True)

Shorthand for Parameter(..., trainable=True).

Source code in qadence/parameters.py
180
181
182def VariationalParameter(name: str, **kwargs: Any) -> Parameter:
"""Shorthand for `Parameter(..., trainable=True)`."""
return Parameter(name, trainable=True, **kwargs)

evaluate(expr, values=None, as_torch=False)

Section titled “ evaluate(expr, values=None, as_torch=False) ”

Arguments:

expr: An expression consisting of Parameters.
values: values dict which contains values for the Parameters,
if empty, Parameter.value will be used.
as_torch: Whether to retrieve a torch-differentiable expression result.

Example:

from qadence.parameters import Parameter, evaluate
expr = Parameter("x") * Parameter("y")
# Unless specified, Parameter initialized random values
# Lets evaluate this expression and see what the result is
res = evaluate(expr)
print(res)
# We can also evaluate the expr using a custom dict
d = {"x": 1, "y":2}
res = evaluate(expr, d)
print(res)
# Lastly, if we want a differentiable result, lets put the as_torch flag
res = evaluate(expr, d, as_torch=True)
print(res)
0.02503978083311486
2
tensor([2])
Source code in qadence/parameters.py
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283def evaluate(expr: Expr, values: dict | None = None, as_torch: bool = False) -> TNumber | Tensor:
"""
Arguments:
expr: An expression consisting of Parameters.
values: values dict which contains values for the Parameters,
if empty, Parameter.value will be used.
as_torch: Whether to retrieve a torch-differentiable expression result.
Example:
```python exec="on" source="material-block" result="json"
from qadence.parameters import Parameter, evaluate
expr = Parameter("x") * Parameter("y")
# Unless specified, Parameter initialized random values
# Lets evaluate this expression and see what the result is
res = evaluate(expr)
print(res)
# We can also evaluate the expr using a custom dict
d = {"x": 1, "y":2}
res = evaluate(expr, d)
print(res)
# Lastly, if we want a differentiable result, lets put the as_torch flag
res = evaluate(expr, d, as_torch=True)
print(res)
```
"""
res: Basic
res_value: TNumber | Tensor
query: dict[Parameter, TNumber | Tensor] = dict()
values = values or dict()
if isinstance(expr, Array):
return Tensor(expr.tolist())
else:
if not expr.is_number:
for s in expr.free_symbols:
if s.name in values.keys():
query[s] = values[s.name]
elif hasattr(s, "value"):
query[s] = s.value
else:
raise ValueError(f"No value provided for symbol {s.name}")
if as_torch:
res_value = make_differentiable(expr)(**{s.name: tensor(v) for s, v in query.items()})
else:
res = expr.subs(query)
res_value = sympy_to_numeric(res)
return res_value

Given an Expression, what was the original "param" given by the user? It is either.

going to be a numeric value, or a sympy Expression (in case a string was given, it was converted via Parameter("string").

Source code in qadence/parameters.py
190
191
192
193
194
195
196
197
198
199def extract_original_param_entry(
param: Expr,
) -> TNumber | Tensor | Expr:
"""
Given an Expression, what was the original "param" given by the user? It is either.
going to be a numeric value, or a sympy Expression (in case a string was given,
it was converted via Parameter("string").
"""
return param if not param.is_number else evaluate(param)

embedding(block, to_gate_params=False, engine=Engine.TORCH)

Section titled “ embedding(block, to_gate_params=False, engine=Engine.TORCH) ”

Construct embedding function which maps user-facing parameters to either expression-level.

parameters or gate-level parameters. The constructed embedding function has the signature:

embedding_fn(params: ParamDictType, inputs: ParamDictType) -> ParamDictType:

which means that it maps the variational parameter dict params and the feature parameter dict inputs to one new parameter dict embedded_dict which holds all parameters that are needed to execute a circuit on a given backend. There are two different modes for this mapping:

  • Expression-level parameters: For AD-based optimization. For every unique expression we end up with one entry in the embedded dict: len(embedded_dict) == len(unique_parameter_expressions).
  • Gate-level parameters: For PSR-based optimization or real devices. One parameter for each gate parameter, regardless if they are based on the same expression. len(embedded_dict) == len(parametric_gates). This is needed because PSR requires to shift the angles of every gate where the same parameter appears.
PARAMETER DESCRIPTION
block

parametrized block into which we want to embed parameters.

TYPE: AbstractBlock

to_gate_params

A boolean flag whether to generate gate-level parameters or expression-level parameters.

TYPE: bool DEFAULT: False

RETURNS DESCRIPTION
tuple[ParamDictType, Callable[[ParamDictType, ParamDictType], ParamDictType]]

A tuple with variational parameter dict and the embedding function.

Source code in qadence/blocks/embedding.py
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185def embedding(
block: AbstractBlock, to_gate_params: bool = False, engine: Engine = Engine.TORCH
) -> tuple[
ParamDictType,
Callable[[ParamDictType, ParamDictType], ParamDictType],
]:
"""Construct embedding function which maps user-facing parameters to either *expression-level*.
parameters or *gate-level* parameters. The constructed embedding function has the signature:
embedding_fn(params: ParamDictType, inputs: ParamDictType) -> ParamDictType:
which means that it maps the *variational* parameter dict `params` and the *feature* parameter
dict `inputs` to one new parameter dict `embedded_dict` which holds all parameters that are
needed to execute a circuit on a given backend. There are two different *modes* for this
mapping:
- *Expression-level* parameters: For AD-based optimization. For every unique expression we end
up with one entry in the embedded dict:
`len(embedded_dict) == len(unique_parameter_expressions)`.
- *Gate-level* parameters: For PSR-based optimization or real devices. One parameter for each
gate parameter, regardless if they are based on the same expression. `len(embedded_dict) ==
len(parametric_gates)`. This is needed because PSR requires to shift the angles of **every**
gate where the same parameter appears.
Arguments:
block: parametrized block into which we want to embed parameters.
to_gate_params: A boolean flag whether to generate gate-level parameters or
expression-level parameters.
Returns:
A tuple with variational parameter dict and the embedding function.
"""
concretize_parameter = _concretize_parameter(engine)
if engine == Engine.TORCH:
cast_dtype = tensor
else:
from jax.numpy import array
cast_dtype = array
unique_expressions = unique(expressions(block))
unique_symbols = [p for p in unique(parameters(block)) if not isinstance(p, sympy.Array)]
unique_const_matrices = [e for e in unique_expressions if isinstance(e, sympy.Array)]
unique_expressions = [e for e in unique_expressions if not isinstance(e, sympy.Array)]
# NOTE
# there are 3 kinds of parameters in qadence
# - non-trainable which are considered as inputs for classical data
# - trainable which are the variational parameters to be optimized
# - fixed: which are non-trainable parameters with fixed value (e.g. pi/2)
#
# both non-trainable and trainable parameters can have the same element applied
# to different operations in the quantum circuit, e.g. assigning the same parameter
# to multiple gates.
non_numeric_symbols = [p for p in unique_symbols if not p.is_number]
trainable_symbols = [p for p in non_numeric_symbols if p.trainable]
constant_expressions = [expr for expr in unique_expressions if expr.is_number]
# we dont need to care about constant symbols if they are contained in an symbolic expression
# we only care about gate params which are ONLY a constant
embeddings: dict[sympy.Expr, DifferentiableExpression] = {
expr: make_differentiable(expr=expr, engine=engine)
for expr in unique_expressions
if not expr.is_number
}
uuid_to_expr = uuid_to_expression(block)
def embedding_fn(params: ParamDictType, inputs: ParamDictType) -> ParamDictType:
embedded_params: dict[sympy.Expr, ArrayLike] = {}
if "circuit" in inputs or "observables" in inputs:
inputs = merge_separate_params(inputs)
for expr, fn in embeddings.items():
angle: ArrayLike
values = {}
for symbol in expr.free_symbols:
if symbol.name in inputs:
value = inputs[symbol.name]
elif symbol.name in params:
value = params[symbol.name]
else:
if symbol.is_time:
value = tensor(1.0)
else:
msg_trainable = "Trainable" if symbol.trainable else "Non-trainable"
raise KeyError(
f"{msg_trainable} parameter '{symbol.name}' not found in the "
f"inputs list: {list(inputs.keys())} nor the "
f"params list: {list(params.keys())}."
)
values[symbol.name] = value
angle = fn(**values)
# do not reshape parameters which are multi-dimensional
# tensors, such as for example generator matrices
if not len(angle.squeeze().shape) > 1:
angle = angle.reshape(-1)
embedded_params[expr] = angle
for e in constant_expressions + unique_const_matrices:
embedded_params[e] = params[stringify(e)]
if to_gate_params:
gate_lvl_params: ParamDictType = {}
for uuid, e in uuid_to_expr.items():
gate_lvl_params[uuid] = embedded_params[e]
return gate_lvl_params
else:
embedded_params.update(inputs)
for k, v in params.items():
if k not in embedded_params:
embedded_params[k] = v
out = {
stringify(k) if not isinstance(k, str) else k: (
as_tensor(v)[None] if as_tensor(v).ndim == 0 else v
)
for k, v in embedded_params.items()
}
return out
params: ParamDictType
params = {
p.name: concretize_parameter(value=p.value, trainable=True) for p in trainable_symbols
}
params.update(
{
stringify(expr): concretize_parameter(value=evaluate(expr), trainable=False)
for expr in constant_expressions
}
)
params.update(
{
stringify(expr): cast_dtype(nparray(expr.tolist(), dtype=npcdouble))
for expr in unique_const_matrices
}
)
return params, embedding_fn