diff --git a/extra_requirements/requirements-tests.txt b/extra_requirements/requirements-tests.txt index ef401a943..ae69b24a1 100644 --- a/extra_requirements/requirements-tests.txt +++ b/extra_requirements/requirements-tests.txt @@ -4,5 +4,5 @@ git+https://github.com/XENONnT/ax_env wfsim==1.0.2 nbmake pytest-xdist -xedocs==0.2.23 +xedocs==0.2.24 ipython==8.12.1 diff --git a/straxen/plugins/events/corrected_areas.py b/straxen/plugins/events/corrected_areas.py index 6666714da..4ee6545e0 100644 --- a/straxen/plugins/events/corrected_areas.py +++ b/straxen/plugins/events/corrected_areas.py @@ -21,7 +21,7 @@ class CorrectedAreas(strax.Plugin): cs2_top and cs2_bottom are corrected by the corresponding maps, and cs2 is the sum of the two. """ - __version__ = '0.3.0' + __version__ = '0.5.0' depends_on = ['event_basics', 'event_positions'] @@ -79,25 +79,45 @@ class CorrectedAreas(strax.Plugin): help='circular cut (cm) for ab region, check out the note https://xe1t-wiki.lngs.infn.it/doku.php?id=jlong:sr0_2_region_se_correction' ) + # cS2 AFT correction due to photon ionization + # https://xe1t-wiki.lngs.infn.it/doku.php?id=xenon:xenonnt:zihao:sr1_s2aft_photonionization_correction + cs2_bottom_top_ratio_correction = straxen.URLConfig( + default=1, + help='Scaling factor for cS2 AFT correction due to photon ionization' + ) + def infer_dtype(self): dtype = [] dtype += strax.time_fields for peak_type, peak_name in zip(['', 'alt_'], ['main', 'alternate']): - dtype += [(f'{peak_type}cs1', np.float32, f'Corrected area of {peak_name} S1 [PE]'), - (f'{peak_type}cs1_wo_timecorr', np.float32, - f'Corrected area of {peak_name} S1 [PE] before time-dep LY correction'), - (f'{peak_type}cs2_wo_elifecorr', np.float32, - f'Corrected area of {peak_name} S2 before elife correction ' - f'(s2 xy correction + SEG/EE correction applied) [PE]'), - (f'{peak_type}cs2_wo_timecorr', np.float32, - f'Corrected area of {peak_name} S2 before SEG/EE and elife corrections' - f'(s2 xy correction applied) [PE]'), - (f'{peak_type}cs2_area_fraction_top', np.float32, - f'Fraction of area seen by the top PMT array for corrected {peak_name} S2'), - (f'{peak_type}cs2_bottom', np.float32, - f'Corrected area of {peak_name} S2 in the bottom PMT array [PE]'), - (f'{peak_type}cs2', np.float32, f'Corrected area of {peak_name} S2 [PE]'), ] + # Only apply + dtype += [ + (f'{peak_type}cs1', np.float32, f'Corrected area of {peak_name} S1 [PE]'), + ( + f'{peak_type}cs1_wo_timecorr', np.float32, + f'Corrected area of {peak_name} S1 (before LY correction) [PE]', + ), + ] + names = ['_wo_timecorr', '_wo_picorr', '_wo_elifecorr', ''] + descriptions = ['S2 xy', 'SEG/EE', 'photon ionization', 'elife'] + for i, name in enumerate(names): + if i == len(names) - 1: + description = '' + else: + description = ' (before ' + ' + '.join(descriptions[i + 1:]) + description += ', after ' + ' + '.join(descriptions[:i + 1]) + ')' + dtype += [ + ( + f'{peak_type}cs2{name}', np.float32, + f'Corrected area of {peak_name} S2{description} [PE]', + ), + ( + f'{peak_type}cs2_area_fraction_top{name}', np.float32, + f'Fraction of area seen by the top PMT array for corrected ' + f'{peak_name} S2{description}', + ), + ] return dtype def ab_region(self, x, y): @@ -109,9 +129,9 @@ def ab_region(self, x, y): def cd_region(self, x, y): return ~self.ab_region(x, y) - + def s2_map_names(self): - + # S2 top and bottom are corrected separately, and cS2 total is the sum of the two # figure out the map name if len(self.s2_xy_map.map_names) > 1: @@ -120,13 +140,13 @@ def s2_map_names(self): else: s2_top_map_name = "map" s2_bottom_map_name = "map" - + return s2_top_map_name, s2_bottom_map_name - + def seg_ee_correction_preparation(self): """Get single electron gain and extraction efficiency options""" self.regions = {'ab': self.ab_region, 'cd': self.cd_region} - + # setup SEG and EE corrections # if they are dicts, we just leave them as is # if they are not, we assume they are floats and @@ -147,7 +167,7 @@ def seg_ee_correction_preparation(self): ee = {key: self.rel_extraction_eff for key in self.regions} return seg, avg_seg, ee - + def compute(self, events): result = np.zeros(len(events), self.dtype) result['time'] = events['time'] @@ -159,13 +179,13 @@ def compute(self, events): event_positions = np.vstack([events['x'], events['y'], events['z']]).T for peak_type in ["", "alt_"]: - result[f"{peak_type}cs1_wo_timecorr"] = events[f'{peak_type}s1_area'] / self.s1_xyz_map(event_positions) - result[f"{peak_type}cs1"] = result[f"{peak_type}cs1_wo_timecorr"] / self.rel_light_yield + result[f"{peak_type}cs1_wo_timecorr"] = ( + events[f'{peak_type}s1_area'] / self.s1_xyz_map(event_positions)) + result[f"{peak_type}cs1"] = ( + result[f"{peak_type}cs1_wo_timecorr"] / self.rel_light_yield) - # s2 corrections - # s2 corrections + # S2 corrections s2_top_map_name, s2_bottom_map_name = self.s2_map_names() - seg, avg_seg, ee = self.seg_ee_correction_preparation() # now can start doing corrections @@ -173,43 +193,54 @@ def compute(self, events): # S2(x,y) corrections use the observed S2 positions s2_positions = np.vstack([events[f'{peak_type}s2_x'], events[f'{peak_type}s2_y']]).T - # corrected s2 with s2 xy map only, i.e. no elife correction - # this is for s2-only events which don't have drift time info - - cs2_top_xycorr = (events[f'{peak_type}s2_area'] - * events[f'{peak_type}s2_area_fraction_top'] - / self.s2_xy_map(s2_positions, map_name=s2_top_map_name)) - cs2_bottom_xycorr = (events[f'{peak_type}s2_area'] - * (1 - events[f'{peak_type}s2_area_fraction_top']) - / self.s2_xy_map(s2_positions, map_name=s2_bottom_map_name)) - - # For electron lifetime corrections to the S2s, + # corrected S2 with S2(x,y) map only, i.e. no elife correction + # this is for S2-only events which don't have drift time info + s2_xy_top = self.s2_xy_map(s2_positions, map_name=s2_top_map_name) + cs2_top_xycorr = ( + events[f'{peak_type}s2_area'] * events[f'{peak_type}s2_area_fraction_top'] + / s2_xy_top) + s2_xy_bottom = self.s2_xy_map(s2_positions, map_name=s2_bottom_map_name) + cs2_bottom_xycorr = ( + events[f'{peak_type}s2_area'] * (1 - events[f'{peak_type}s2_area_fraction_top']) + / s2_xy_bottom) + + # collect electron lifetime correction + # for electron lifetime corrections to the S2s, # use drift time computed using the main S1. el_string = peak_type + "s2_interaction_" if peak_type == "alt_" else peak_type elife_correction = np.exp(events[f'{el_string}drift_time'] / self.elife) - result[f"{peak_type}cs2_wo_timecorr"] = (cs2_top_xycorr + cs2_bottom_xycorr) * elife_correction + # collect SEG and EE corrections + seg_ee_corr = np.zeros(len(events)) for partition, func in self.regions.items(): - # partitioned SE and EE + # partitioned SEG and EE partition_mask = func(events[f'{peak_type}s2_x'], events[f'{peak_type}s2_y']) - - # Correct for SEgain and extraction efficiency - seg_ee_corr = seg[partition] / avg_seg[partition] * ee[partition] - - # note that these are already masked! - cs2_top_wo_elifecorr = cs2_top_xycorr[partition_mask] / seg_ee_corr - cs2_bottom_wo_elifecorr = cs2_bottom_xycorr[partition_mask] / seg_ee_corr - - result[f"{peak_type}cs2_wo_elifecorr"][partition_mask] = cs2_top_wo_elifecorr + cs2_bottom_wo_elifecorr - - # cs2aft doesn't need elife/time corrections as they cancel - result[f"{peak_type}cs2_area_fraction_top"][partition_mask] = cs2_top_wo_elifecorr / \ - ( - cs2_top_wo_elifecorr + cs2_bottom_wo_elifecorr) - - result[f"{peak_type}cs2"][partition_mask] = result[f"{peak_type}cs2_wo_elifecorr"][partition_mask] * \ - elife_correction[partition_mask] - result[f"{peak_type}cs2_bottom"][partition_mask] = cs2_bottom_wo_elifecorr * elife_correction[ - partition_mask] - + # correct for SEG and EE + seg_ee_corr[partition_mask] = seg[partition] / avg_seg[partition] * ee[partition] + + # apply S2 xy correction + result[f"{peak_type}cs2_wo_timecorr"] = cs2_top_xycorr + cs2_bottom_xycorr + result[f"{peak_type}cs2_area_fraction_top_wo_timecorr"] = ( + cs2_top_xycorr / result[f"{peak_type}cs2_wo_timecorr"]) + + # apply SEG and EE correction + cs2_top_wo_picorr = cs2_top_xycorr / seg_ee_corr + cs2_bottom_wo_picorr = cs2_bottom_xycorr / seg_ee_corr + result[f"{peak_type}cs2_wo_picorr"] = cs2_top_wo_picorr + cs2_bottom_wo_picorr + result[f"{peak_type}cs2_area_fraction_top_wo_picorr"] = ( + cs2_top_wo_picorr / result[f"{peak_type}cs2_wo_picorr"]) + + # apply photon ionization intensity and cS2 AFT correction (see #1247) + # cS2 bottom should be corrected by photon ionization, but not cS2 top + cs2_top_wo_elifecorr = cs2_top_wo_picorr + cs2_bottom_wo_elifecorr = ( + cs2_bottom_wo_picorr * self.cs2_bottom_top_ratio_correction) + result[f"{peak_type}cs2_wo_elifecorr"] = cs2_top_wo_elifecorr + cs2_bottom_wo_elifecorr + result[f"{peak_type}cs2_area_fraction_top_wo_elifecorr"] = ( + cs2_top_wo_elifecorr / result[f"{peak_type}cs2_wo_elifecorr"]) + + # apply electron lifetime correction + result[f"{peak_type}cs2"] = result[f"{peak_type}cs2_wo_elifecorr"] * elife_correction + result[f"{peak_type}cs2_area_fraction_top"] = ( + result[f"{peak_type}cs2_area_fraction_top_wo_elifecorr"]) return result diff --git a/straxen/plugins/events/event_positions.py b/straxen/plugins/events/event_positions.py index 7f8b74173..55aebe42f 100644 --- a/straxen/plugins/events/event_positions.py +++ b/straxen/plugins/events/event_positions.py @@ -19,7 +19,7 @@ class EventPositions(strax.Plugin): depends_on = ('event_basics',) - __version__ = '0.2.1' + __version__ = '0.3.0' default_reconstruction_algorithm = straxen.URLConfig( default=DEFAULT_POSREC_ALGO, @@ -116,34 +116,21 @@ def compute(self, events): for s_i in [0, 1, 2]: # alt_sx_interaction_drift_time is calculated between main Sy and alternative Sx drift_time = events['drift_time'] if not s_i else events[f'alt_s{s_i}_interaction_drift_time'] - z_obs = - self.electron_drift_velocity * drift_time xy_pos = 's2_' if s_i != 2 else 'alt_s2_' orig_pos = np.vstack([events[f'{xy_pos}x'], events[f'{xy_pos}y'], z_obs]).T r_obs = np.linalg.norm(orig_pos[:, :2], axis=1) - delta_r = self.map(orig_pos) z_obs = z_obs + self.electron_drift_velocity * self.electron_drift_time_gate - # apply radial correction + # apply z bias correction + z_dv_delta = self.z_bias_map(np.array([r_obs, z_obs]).T, map_name='z_bias_map') + corr_pos = np.vstack([events[f"{xy_pos}x"], events[f"{xy_pos}y"], z_obs - z_dv_delta]).T + # apply r bias correction + delta_r = self.map(corr_pos) with np.errstate(invalid='ignore', divide='ignore'): r_cor = r_obs + delta_r scale = np.divide(r_cor, r_obs, out=np.zeros_like(r_cor), where=r_obs != 0) - # z correction due to longer drift time for distortion - # calculated based on the Pythagorean theorem where - # the electron track is assumed to be a straight line - # (geometrical reasoning not valid if |delta_r| > |z_obs|, - # as cathetus cannot be longer than hypothenuse) - with np.errstate(invalid='ignore'): - z_cor = -(z_obs ** 2 - delta_r ** 2) ** 0.5 - invalid = np.abs(z_obs) < np.abs(delta_r) - # do not apply z correction above gate - invalid |= z_obs >= 0 - z_cor[invalid] = z_obs[invalid] - delta_z = z_cor - z_obs - # correction of z bias due to non-uniform field - z_dv_delta = self.z_bias_map(np.array([r_obs, z_obs]).T, map_name='z_bias_map') - pre_field = '' if s_i == 0 else f'alt_s{s_i}_' post_field = '' if s_i == 0 else '_fdc' result.update({f'{pre_field}x{post_field}': orig_pos[:, 0] * scale, @@ -156,7 +143,6 @@ def compute(self, events): # using z_dv_corr (z_obs - z_dv_delta) in agreement with the dtype description # the FDC for z (z_cor) is found to be not reliable (see #527) f'{pre_field}z': z_obs - z_dv_delta, - f'{pre_field}z_field_distortion_correction': delta_z, f'{pre_field}z_dv_corr': z_obs - z_dv_delta, }) return result