Skip to content

Results are limited to the current section : Application solving tools

Drive Shaping Methods

import torch
from qubosolver.qubo_instance import QUBOInstance
from qubosolver.config import SolverConfig, DriveShapingConfig
from qubosolver.qubo_types import DriveType
from qubosolver.solver import QuboSolver
import matplotlib.pyplot as plt
plt.rcParams["animation.html"] = "jshtml"
%matplotlib inline
coefficients = torch.tensor([[-1.0, 0.5, 0.2], [0.5, -2.0, 0.3], [0.2, 0.3, -3.0]])
instance = QUBOInstance(coefficients)

Default method

import torch
from qubosolver.qubo_instance import QUBOInstance
from qubosolver.config import EmbeddingConfig, LocalEmulator
from pulser_simulation import QutipBackendV2
def best_from_solution(sol):
# sol.costs: tensor([..]) ; sol.bitstrings: tensor([[..],..])
if sol.costs is None or len(sol.costs) == 0:
return None, None
idx = int(torch.argmin(sol.costs).item())
best_cost = float(sol.costs[idx].item())
best_bitstring = sol.bitstrings[idx].tolist()
return best_cost, best_bitstring
def run_one(method: DriveType, q: torch.Tensor, runs: int = 500, dmm: bool = True):
instance = QUBOInstance(coefficients=q)
embed_cfg = EmbeddingConfig(
embedding_method="greedy",
greedy_traps=6,
# pas de greedy_spacing => default
)
drive_cfg = DriveShapingConfig(
drive_shaping_method=method,
dmm=dmm,
)
backend = LocalEmulator(backend_type=QutipBackendV2, runs=runs)
config = SolverConfig(
use_quantum=True,
embedding=embed_cfg,
drive_shaping=drive_cfg,
backend=backend,
)
solver = QuboSolver(instance, config)
# solve (inclut exécution + mesure)
sol = solver.solve()
# afficher la séquence
embedding = solver.embedding()
drive = solver.drive(embedding)[0]
solver.draw_sequence(drive, embedding)
best_cost, best_bitstring = best_from_solution(sol)
tag = "HEURISTIC"
print("\n" + "=" * 80)
print(f"[{tag}] runs={runs}")
print("Solution object:")
print(sol)
print(f"Best sampled cost: {best_cost}")
print(f"Best sampled bitstring: {best_bitstring}")
return sol, best_cost, best_bitstring
if __name__ == "__main__":
Q1 = torch.tensor([
[-6.0, 2.0, 2.0, 2.0],
[ 2.0, -7.5, 2.0, 2.0],
[ 2.0, 2.0, -7.5, 2.0],
[ 2.0, 2.0, 2.0, -7.0],
])
sol_h_dmm, best_h_dmm, bs_h_dmm = run_one(DriveType.HEURISTIC, Q1, runs=500, dmm=True)
sol_h_nodmm, best_h_nodmm, bs_h_nodmm = run_one(DriveType.HEURISTIC, Q1, runs=500, dmm=False)
print("\n" + "#" * 80)
print("COMPARISON (best sampled cost):")
print(f"HEURISTIC (dmm=True) : {best_h_dmm} bitstring={bs_h_dmm}")
print(f"HEURISTIC (dmm=False): {best_h_nodmm} bitstring={bs_h_nodmm}")

For the OPTIMIZED drive shaping, we have the following parameters:

  • optimized_n_calls: Number of calls for the optimization process.
  • optimized_re_execute_opt_drive: Whether to re-run the optimal drive sequence after optimization.
  • optimized_initial_omega_parameters: Default initial omega parameters for the drive. Defaults to (1, 2, 1).
  • optimized_initial_detuning_parameters: Default initial detuning parameters for the drive. Defaults to (-2, 0, 2).
default_config = SolverConfig.from_kwargs(
use_quantum=True, drive_shaping=DriveShapingConfig(drive_shaping_method=DriveType.OPTIMIZED),
)
solver = QuboSolver(instance, default_config)
solution = solver.solve()
print(solution)
default_config = SolverConfig.from_kwargs(
use_quantum=True, drive_shaping=DriveShapingConfig(drive_shaping_method=DriveType.OPTIMIZED, optimized_n_calls=13),
)
solver = QuboSolver(instance, default_config)
solution = solver.solve()
print(solution)

Initial parameters of the optimization procedure can be changed as optimized_initial_omega_parameters and optimized_initial_detuning_parameters

default_config = SolverConfig.from_kwargs(
use_quantum=True, drive_shaping=DriveShapingConfig(drive_shaping_method=DriveType.OPTIMIZED, optimized_initial_omega_parameters=[1.0, 3.0, 1.0,], optimized_initial_detuning_parameters=[-9.0, 0.0, 5.0]),
)
solver = QuboSolver(instance, default_config)
solution = solver.solve()
print(solution)
default_config = SolverConfig.from_kwargs(
use_quantum=True, drive_shaping=DriveShapingConfig(drive_shaping_method=DriveType.OPTIMIZED, optimized_re_execute_opt_drive=True),
)
solver = QuboSolver(instance, default_config)
solution = solver.solve()
print(solution)

One can change the drive shaping method by incorporating custom functions for:

  • Evaluating a candidate bitstring and QUBO via optimized_custom_qubo_cost
  • Performing optimization with a different objective than the best cost via optimized_custom_objective
  • Adding callback functions via optimized_callback_objective.
from qubosolver.utils.qubo_eval import calculate_qubo_cost
# example of penalization
def penalized_qubo(bitstring: str, QUBO: torch.Tensor) -> float:
return calculate_qubo_cost(bitstring, QUBO) + 2 * bitstring.count("0")
# example of saving intermediate results
opt_results = list()
def callback(d: dict) -> None:
opt_results.append(d)
# example of using an average cost
def average_ojective(
bitstrings: list,
counts: list,
probabilities: list,
costs: list,
best_cost: float,
best_bitstring: str,
) -> float:
return sum([p * c for p, c in zip(probabilities, costs)])
drive_shaping=DriveShapingConfig(drive_shaping_method=DriveType.OPTIMIZED,
optimized_re_execute_opt_drive=True,
optimized_custom_qubo_cost=penalized_qubo,
optimized_callback_objective=callback,
optimized_custom_objective = average_ojective,
)
config = SolverConfig(
use_quantum=True,
drive_shaping=drive_shaping,
)
solver = QuboSolver(instance, config)
solution = solver.solve()
len(opt_results), opt_results[-1]
solution