Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

fix transverse damper and improve tests #144

Open
wants to merge 3 commits into
base: refactor/zotter_definition
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
47 changes: 17 additions & 30 deletions tests/test_transverse_damper.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,73 +24,60 @@ def test_transverse_damper(test_context):
alpha = 53.86**-2

e0 = physical_constants['proton mass energy equivalent in MeV'][0]*1e6
en = 450e9
p0 = en * en / c
gamma = en/e0
p0c = 450e9
gamma = p0c/e0
beta = np.sqrt(1-1/gamma**2)

h_rf = 35640
bunch_spacing_buckets = 10
n_bunches = 2
num_bunches = 2
n_slices = 500

q_x = 60.275
q_y = 60.295
chroma = 0

epsn_x = 2e-6
epsn_y = 2e-6
taub = 0.9e-9
sigma_z = taub*beta*c/4

circumference = 26658.883
average_radius = circumference / (2 * np.pi)

momentum_compaction = alpha
eta = momentum_compaction - 1.0 / gamma ** 2
v_rf = 4e6
q_s = np.sqrt((en*v_rf*h_rf*eta)/(2*np.pi*beta*c*p0))
sigma_delta = q_s * sigma_z / (average_radius * eta)
f_rev = beta*c/circumference
f_rf = f_rev*h_rf

beta_x = average_radius/q_x
beta_y = average_radius/q_y

bucket_length = circumference / h_rf
zeta_range = (-0.5*bucket_length, 0.5*bucket_length)

filling_scheme = np.zeros(int(h_rf/bunch_spacing_buckets))
filling_scheme[0:n_bunches] = 1
filling_scheme[0: num_bunches] = 1
filled_slots = np.nonzero(filling_scheme)[0]

betatron_map = xt.LineSegmentMap(
length=circumference, betx=beta_x, bety=beta_y,
qx=q_x, qy=q_y,
one_turn_map = xt.LineSegmentMap(
length=circumference,
betx=70, bety=80,
qx=62.31, qy=60.32,
longitudinal_mode=longitudinal_mode,
voltage_rf=v_rf, frequency_rf=f_rf, lag_rf=180,
momentum_compaction_factor=momentum_compaction,
dqx=chroma, dqy=chroma
)

gain_x = 0.01
gain_y = 0.01

transverse_damper = xf.TransverseDamper(
gain_x=gain_x, gain_y=gain_y,
num_bunches=n_bunches,
filling_scheme=filling_scheme,
filled_slots=filled_slots,
zeta_range=zeta_range,
num_slices=n_slices,
bunch_spacing_zeta=bunch_spacing_buckets*bucket_length,
circumference=circumference
circumference=circumference,
mode='bunch-by-bunch'
)

line = xt.Line(elements=[[betatron_map, transverse_damper][0]],
element_names=[['betatron_map', 'transverse_damper'][0]])
line = xt.Line(elements=[[one_turn_map, transverse_damper][0]],
element_names=[['one_turn_map', 'transverse_damper'][0]])

line.particle_ref = xt.Particles(p0c=450e9)
line.particle_ref = xt.Particles(p0c=p0c)
line.build_tracker(_context=test_context)

particles = xp.generate_matched_gaussian_multibunch_beam(
Expand All @@ -111,13 +98,13 @@ def test_transverse_damper(test_context):
particles.px += amplitude
particles.py += amplitude

mean_x = np.zeros((n_turns, n_bunches))
mean_y = np.zeros((n_turns, n_bunches))
mean_x = np.zeros((n_turns, num_bunches))
mean_y = np.zeros((n_turns, num_bunches))

for i_turn in range(n_turns):
line.track(particles, num_turns=1)
transverse_damper.track(particles, i_turn)
for ib in range(n_bunches):
for ib in range(num_bunches):
mean_x[i_turn, ib] = np.mean(particles.x[n_macroparticles*ib:
n_macroparticles*(ib+1)])
mean_y[i_turn, ib] = np.mean(particles.y[n_macroparticles*ib:
Expand All @@ -128,7 +115,7 @@ def test_transverse_damper(test_context):
i_fit_start = 200
i_fit_end = 600

for i_bunch in range(n_bunches):
for i_bunch in range(num_bunches):
ampls_x = np.abs(hilbert(mean_x[:, i_bunch]))
fit_x = linregress(turns[i_fit_start: i_fit_end],
np.log(ampls_x[i_fit_start: i_fit_end]))
Expand Down
121 changes: 121 additions & 0 deletions tests_mpi/test_transverse_damper_mpi.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,121 @@
import numpy as np
from scipy.constants import c
from scipy.constants import physical_constants
from scipy.signal import hilbert
from scipy.stats import linregress

import xfields as xf
import xtrack as xt
import xpart as xp
from xobjects.test_helpers import for_all_test_contexts


exclude_contexts = ['ContextPyopencl', 'ContextCupy']

@for_all_test_contexts(excluding=exclude_contexts)
def test_transverse_damper_mpi(test_context):
longitudinal_mode = 'nonlinear'
# Machine settings
n_turns = 1000

n_macroparticles = 10000
intensity = 8e9

alpha = 53.86**-2

e0 = physical_constants['proton mass energy equivalent in MeV'][0]*1e6
en = 450e9
gamma = en/e0
beta = np.sqrt(1-1/gamma**2)

h_rf = 35640
bunch_spacing_buckets = 10
num_bunches = 4
n_slices = 500

epsn_x = 2e-6
epsn_y = 2e-6
taub = 0.9e-9
sigma_z = taub*beta*c/4

circumference = 26658.883

momentum_compaction = alpha
v_rf = 4e6
f_rev = beta*c/circumference
f_rf = f_rev*h_rf

bucket_length = circumference / h_rf
zeta_range = (-0.5*bucket_length, 0.5*bucket_length)

filling_scheme = np.zeros(int(h_rf/bunch_spacing_buckets))
filling_scheme[0: num_bunches] = 1

one_turn_map = xt.LineSegmentMap(
length=circumference,
betx=70, bety=80,
qx=62.31, qy=60.32,
longitudinal_mode=longitudinal_mode,
voltage_rf=v_rf, frequency_rf=f_rf, lag_rf=180,
momentum_compaction_factor=momentum_compaction,
)

gain_x = 0.01
gain_y = 0.01

transverse_damper = xf.TransverseDamper(
gain_x=gain_x, gain_y=gain_y,
filling_scheme=filling_scheme,
zeta_range=zeta_range,
num_slices=n_slices,
bunch_spacing_zeta=bunch_spacing_buckets*bucket_length,
circumference=circumference
)

line = xt.Line(elements=[one_turn_map, transverse_damper],
element_names=['one_turn_map', 'transverse_damper'])

line.particle_ref = xt.Particles(p0c=450e9)
line.build_tracker(_context=test_context)

particles = xp.generate_matched_gaussian_multibunch_beam(
_context=test_context,
filling_scheme=filling_scheme,
bunch_num_particles=n_macroparticles,
bunch_intensity_particles=intensity,
nemitt_x=epsn_x, nemitt_y=epsn_y, sigma_z=sigma_z,
line=line, bunch_spacing_buckets=bunch_spacing_buckets,
rf_harmonic=[h_rf], rf_voltage=[v_rf],
particle_ref=line.particle_ref,
prepare_line_and_particles_for_mpi_wake_sim=True
)

# apply a distortion to the bunches
amplitude = 1e-3
particles.px += amplitude
particles.py += amplitude

mean_x = np.zeros((n_turns))
mean_y = np.zeros((n_turns))

for i_turn in range(n_turns):
line.track(particles, num_turns=1)
mean_x[i_turn] = np.mean(particles.x)
mean_y[i_turn] = np.mean(particles.y)

turns = np.linspace(0, n_turns-1, n_turns)

i_fit_start = 200
i_fit_end = 600

ampls_x = np.abs(hilbert(mean_x))
fit_x = linregress(turns[i_fit_start: i_fit_end],
np.log(ampls_x[i_fit_start: i_fit_end]))

assert np.isclose(-fit_x.slope*2, gain_x, atol=1e-4, rtol=0)

ampls_y = np.abs(hilbert(mean_y))
fit_y = linregress(turns[i_fit_start: i_fit_end],
np.log(ampls_y[i_fit_start: i_fit_end]))

assert np.isclose(-fit_y.slope*2, gain_y, atol=1e-4, rtol=0)
89 changes: 52 additions & 37 deletions xfields/beam_elements/transverse_damper.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,12 @@
import numpy as np

import xpart as xp
import xfields as xf
import xtrack as xt
from xfields.slicers.compressed_profile import CompressedProfile


class TransverseDamper:
class TransverseDamper(xt.BeamElement):
"""
A simple bunch-by-bunch transverse Damper implementation.

Expand All @@ -22,28 +24,39 @@ class TransverseDamper:
num_slices (int): the number of slices used by the underlying slicer,
bunch_spacing_zeta (float): the bunch spacing in meters
circumference (float): the machine circumference
mode (str): the mode of operation of the damper. Either 'bunch-by-bunch'
(default) or 'slice-by-slice'. In 'bunch-by-bunch' mode, the damper
acts on the average of the particles in each bunch. In
'slice-by-slice' mode, the damper acts on the average of the
particles in each slice.

Returns:
(TransverseDamper): A transverse damper beam element.
"""
def __init__(self, gain_x, gain_y, num_bunches, filling_scheme,
filled_slots, zeta_range, num_slices, bunch_spacing_zeta,
circumference):

iscollective = True

def __init__(self, gain_x, gain_y, filling_scheme, zeta_range, num_slices, bunch_spacing_zeta,
circumference, bunch_selection=None, **kwargs):
self.gains = {
'px': gain_x,
'py': gain_y,
}

self.slicer = xf.UniformBinSlicer(
filling_scheme=filling_scheme,
bunch_selection=filled_slots,
bunch_selection=bunch_selection,
zeta_range=zeta_range,
num_slices=num_slices,
bunch_spacing_zeta=bunch_spacing_zeta,
moments=['px', 'py']
)

self.num_bunches = num_bunches
if filling_scheme is not None:
i_last_bunch = np.where(filling_scheme)[0][-1]
num_periods = i_last_bunch + 1
else:
num_periods = 1

self.moments_data = {}
for moment in self.gains.keys():
Expand All @@ -52,11 +65,30 @@ def __init__(self, gain_x, gain_y, num_bunches, filling_scheme,
zeta_range=zeta_range,
num_slices=num_slices,
bunch_spacing_zeta=bunch_spacing_zeta,
num_periods=num_bunches,
num_periods=num_periods,
num_turns=1,
circumference=circumference
)

def _reconfigure_for_parallel(self, n_procs, my_rank):
filled_slots = self.slicer.filled_slots
scheme = np.zeros(np.max(filled_slots) + 1,
dtype=np.int64)
scheme[filled_slots] = 1

split_scheme = xp.matched_gaussian.split_scheme
bunch_selection_rank = split_scheme(filling_scheme=scheme,
n_chunk=int(n_procs))

self.slicer = xf.UniformBinSlicer(
filling_scheme=scheme,
bunch_selection=bunch_selection_rank[my_rank],
zeta_range=self.slicer.zeta_range,
num_slices=self.slicer.num_slices,
bunch_spacing_zeta=self.slicer.bunch_spacing_zeta,
moments=['px', 'py']
)

def track(self, particles, i_turn=0):
i_slice_particles = particles.particle_id * 0 + -999
i_slot_particles = particles.particle_id * 0 + -9999
Expand All @@ -65,37 +97,20 @@ def track(self, particles, i_turn=0):
i_slot_particles=i_slot_particles)

for moment in ['px', 'py']:

particles_moment = getattr(particles, moment)[:]
slice_means = self.slicer.mean(moment)

for i_bunch, bunch_number in enumerate(
self.slicer.bunch_selection):

nnz_slices = self.slicer.num_particles[i_bunch, :] > 0
moments_bunch = {
moment: (np.ones_like(slice_means[i_bunch, :]) *
np.mean(slice_means[i_bunch, nnz_slices]))
}

self.moments_data[moment].set_moments(
moments=moments_bunch,
i_turn=0,
i_source=self.slicer.filled_slots[bunch_number])

interpolated_result = particles.zeta * 0

md = self.moments_data[moment]

self.moments_data[moment]._interp_result(
particles=particles,
data_shape_0=md.data.shape[0],
data_shape_1=md.data.shape[1],
data_shape_2=md.data.shape[2],
data=md.data,
i_slot_particles=i_slot_particles,
i_slice_particles=i_slice_particles,
out=interpolated_result
)
self.slicer.bunch_selection):
slot_mask = i_slot_particles == bunch_number
slot_slices = np.unique(i_slice_particles[slot_mask])

if len(self.slicer.bunch_selection) == 1:
slot_mean = np.mean(slice_means[slot_slices])
else:
slot_mean = np.mean(slice_means[i_bunch, slot_slices])

slot_mean = np.mean(particles_moment[slot_mask])

getattr(particles, moment)[:] -= (self.gains[moment] *
interpolated_result)
particles_moment[slot_mask] -= (self.gains[moment] *
slot_mean)