Skip to content
Pasqal Documentation

Running noisy simulations

import pulser
import emu_mps
import numpy as np
import logging #used to turn of logging in emu_mps
reg = pulser.Register.from_coordinates([[0,0],[10,0]])
reg.draw(blockade_radius=1e-10, draw_graph=True, draw_half_radius=True) #draw blockade radius as 0, since we will mask interactions in the MPSConfig
seq = pulser.Sequence(reg, pulser.devices.MockDevice)
t = 1000
pulse = pulser.Pulse.ConstantAmplitude(
0., pulser.waveforms.ConstantWaveform(t, 0.), 0.0
)
seq.declare_channel("ising_global", "rydberg_global")
seq.add(pulse, "ising_global")
noise = pulser.NoiseModel(relaxation_rate=1, dephasing_rate=1.)
times = np.arange(10., 1000.+1e-8, 10) #need to include 1000.
times /= seq.get_duration()
basis = ("r", "g")
magnetization = emu_mps.Occupation(evaluation_times=times)
#define initial state
initial_state = emu_mps.MPS.from_state_amplitudes(amplitudes={"rr":1.},eigenstates=basis)
#define config and backend
config = emu_mps.MPSConfig(
noise_model=noise,
num_gpus_to_use=0, #small systems are faster on cpu
interaction_cutoff=1e10, #this will put all interactions to 0, regardless of spacing
initial_state=initial_state,
observables=[magnetization],
log_level = logging.WARN #don't print stuff for the many runs
)
backend = emu_mps.MPSBackend(seq, config=config)
results = []
nruns = 500 #0.125 seconds per run on my machine
for _ in range(nruns):
results.append(backend.run())
magnetization.default_aggregation_type
#this is temporarily not available pending a move of the aggregation functionality to pulser
#this is currently under discussion
#this api will be subject to change when the aggregation functionality moves to pulser.
aggregated_results = emu_mps.aggregate(results)
aggregated_results.get_result(magnetization, 100/seq.get_duration())[0] # average magnetization of qubit 0 at time 100ns
import statistics
def median_qubit_density(qubit_density_values: list[list[float]]):
return [statistics.median(qubit_density[qubit_index] for qubit_density in qubit_density_values) for qubit_index in range(2)]
# API for kwargs of `Results.aggregate` is `callback_name=callable_aggregator`
aggregated_median_qubit_density = emu_mps.aggregate(results, occupation=median_qubit_density)
aggregated_median_qubit_density.get_result(magnetization, 100/seq.get_duration())[0] # median magnetization of qubit 0 at time 100ns
def set_density(qubit_density_values: list[list[float]]):
return [set(qubit_density[qubit_index] for qubit_density in qubit_density_values) for qubit_index in range(2)]
aggregated_set_qubit_density = emu_mps.aggregate(results, occupation=set_density)
aggregated_median_qubit_density.get_result(magnetization, 100/seq.get_duration())[0]
# The extra 0.999 value comes from floating-point maths, Monte-Carlo logic and state renormalization.
def densities(results, n):
return [density[0] for density in emu_mps.aggregate(results[:n]).occupation]
expected = [np.exp(-1*t*seq.get_duration()/1000) for t in times]
import matplotlib.pyplot as pl
pl.plot(times, expected, label="expected")
for n in [100, 200, 500]:
pl.plot(times, densities(results, n), label=f"n = {n}")
pl.legend(loc="upper right")