Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

new parameter: analyse_dpp #461

Draft
wants to merge 2 commits into
base: master
Choose a base branch
from
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion omc3/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@
__title__ = "omc3"
__description__ = "An accelerator physics tools package for the OMC team at CERN."
__url__ = "https://github.com/pylhc/omc3"
__version__ = "0.16.0"
__version__ = "0.16.2"
__author__ = "pylhc"
__author_email__ = "pylhc@github.com"
__license__ = "MIT"
Expand Down
3 changes: 3 additions & 0 deletions omc3/hole_in_one.py
Original file line number Diff line number Diff line change
Expand Up @@ -610,6 +610,8 @@ def optics_params():
help="Calculate second order dispersion")
params.add_parameter(name="chromatic_beating", action="store_true",
help="Calculate chromatic beatings: W, PHI and coupling")
params.add_parameter(name="analyse_dpp", type=iotools.OptionalFloat, default=OPTICS_DEFAULTS["analyse_dpp"],
help="Filter files to analyse by this value (phase advance, rdt, crdt).")
return params


Expand Down Expand Up @@ -637,6 +639,7 @@ def optics_params():
"range_of_bpms": 11,
"compensation": "model",
"rdt_magnet_order": 4,
"analyse_dpp": 0,
}


Expand Down
45 changes: 30 additions & 15 deletions omc3/optics_measurements/crdt.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,24 +8,38 @@
https://arxiv.org/pdf/1402.1461.pdf.
"""
from __future__ import annotations

from collections.abc import Sequence
from pathlib import Path
from typing import TYPE_CHECKING

import numpy as np
import pandas as pd
import tfs
import scipy.odr
from omc3.optics_measurements.constants import ERR, EXT, AMPLITUDE, MDL, PHASE, REAL, IMAG
from omc3.utils import iotools, logging_tools
from omc3.utils.stats import circular_nanmean, circular_nanerror
from omc3.definitions.constants import PLANES, PI2
from omc3.harpy.constants import COL_AMP, COL_MU, COL_PHASE, COL_ERR
from omc3.optics_measurements.rdt import get_line_sign_and_suffix
import tfs

from typing import TYPE_CHECKING
from omc3.definitions.constants import PI2, PLANES
from omc3.harpy.constants import COL_AMP, COL_ERR, COL_MU, COL_PHASE
from omc3.optics_measurements.constants import (
AMPLITUDE,
ERR,
EXT,
IMAG,
MDL,
PHASE,
REAL,
)
from omc3.optics_measurements.data_models import (
InputFiles,
check_and_warn_about_offmomentum_data,
filter_for_dpp,
)
from omc3.optics_measurements.rdt import get_line_sign_and_suffix
from omc3.utils import iotools, logging_tools
from omc3.utils.stats import circular_nanerror, circular_nanmean

if TYPE_CHECKING:
from generic_parser import DotDict
from omc3.optics_measurements.data_models import InputFiles
from generic_parser import DotDict

LOGGER = logging_tools.get_logger(__name__)

Expand Down Expand Up @@ -57,11 +71,12 @@
def calculate(measure_input: DotDict, input_files: InputFiles, invariants, header):
""" Calculate the CRDT values. """
LOGGER.info("Start of CRDT analysis")

# Todo: only on-momentum required? If not, remove this or set `dpp_value=None`, see https://github.com/pylhc/omc3/issues/456
# For now: use only the actions belonging to the current dpp value!
dpp_value = 0
invariants = {plane: inv[input_files.dpp_frames_indices(plane, dpp_value)] for plane, inv in invariants.items()}
dpp_value = measure_input.analyse_dpp
if dpp_value is None:
for plane in PLANES:
check_and_warn_about_offmomentum_data(input_files, plane, id_="CRDT calculation")
else:
invariants = filter_for_dpp(invariants, input_files, dpp_value)

assert len(input_files['X']) == len(input_files['Y'])
bpm_names = input_files.bpms(dpp_value=0)
Expand Down
41 changes: 38 additions & 3 deletions omc3/optics_measurements/data_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,8 +24,6 @@

LOGGER = logging_tools.get_logger(__name__)

DPP_TOLERANCE = 1e-6

class InputFiles(dict):
"""
Stores the input files, provides methods to gather quantity specific data
Expand Down Expand Up @@ -96,7 +94,7 @@ def dpp_frames_indices(self, plane: str, dpp_value: float | None):
if dpp_value is None:
return list(range(len(self[plane])))

return np.argwhere(np.abs(self.dpps(plane) - dpp_value) < DPP_TOLERANCE).T[0]
return np.argwhere(np.abs(self.dpps(plane) - dpp_value) < dpp.DPP_TOLERANCE).T[0]

def _all_frames(self, plane: str):
return self[plane]
Expand Down Expand Up @@ -219,3 +217,40 @@ def get_data(frame, column) -> np.ndarray:
"""
columns = InputFiles.get_columns(frame, column)
return frame.loc[:, columns].to_numpy(dtype=np.float64)


# DPP Filtering related functions ------------------------------------------------------------------

def check_and_warn_about_offmomentum_data(input_files: InputFiles, plane: str, id_: str = None):
""" A helper function to check if off-momentum data is present in the input files,
but no dpp-value is given by the user.

See https://github.com/pylhc/omc3/issues/456 .
"""
on_momentum_files = input_files.dpp_frames_indices(plane, dpp_value=0)
if len(on_momentum_files) == len(input_files[plane]):
return # no off-momentum data, nothing to warn

msg = (
"Off-momentum files for analysis found!\n"
"They will be included"
)

if id_ is not None:
msg += f" in the {id_}, which "

msg += (
", which can make the results more inaccurate.\n"
"To avoid, specify `analyse_dpp` or run the analysis only on on-momentum files"
)

if id_ is not None:
msg += f" or possibly deactivate the {id_}"

msg += "."
LOGGER.warning(msg)


def filter_for_dpp(to_filter: dict[str, Sequence], input_files: InputFiles, dpp_value: float):
""" Filter the given data for the given dpp-value. """
return {plane: values[input_files.dpp_frames_indices(plane, dpp_value)] for plane, values in to_filter.items()}
46 changes: 34 additions & 12 deletions omc3/optics_measurements/dpp.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,25 +5,35 @@
This module contains deltap over p calculations related functionality of ``optics_measurements``.
It provides functions to computes and arrange dp over p.
"""
from __future__ import annotations

from typing import TYPE_CHECKING
from collections.abc import Sequence
import logging

import numpy as np
import pandas as pd

DPP_TOLERANCE = 1e-4
AMP_DPP_TOLERANCE = 1e-5
if TYPE_CHECKING:
import tfs
from generic_parser import DotDict
from omc3.optics_measurements.data_models import InputFiles

DPP_BIN_TOLERANCE: float = 1e-4
DPP_TOLERANCE: float = 1e-5 # not sure if these should be different! (jdilly)

LOGGER = logging.getLogger(__name__)


def arrange_dpps(dpps):
def arrange_dpps(dpps: Sequence[float], tolerance: float = DPP_BIN_TOLERANCE):
"""
Grouping of dpp-values and averaging them in the bins, also zeroes the bin closest to zero.
"""
closest_to_zero = np.argmin(np.abs(dpps))
ranges = _compute_ranges(dpps)
ranges = _compute_ranges(dpps, tolerance)
zero_offset = np.mean(_values_in_range(_find_range_with_element(ranges, closest_to_zero), dpps))
LOGGER.debug(f"dp/p closest to zero is {dpps[closest_to_zero]}")
if np.abs(zero_offset) > DPP_TOLERANCE:
if np.abs(zero_offset) > tolerance:
LOGGER.warning(f"Analysed files have large momentum deviation {zero_offset}. "
f"Optics parameters might be wrong.")
LOGGER.debug(f"Detected dpp differences, aranging as: {ranges}, zero offset: {zero_offset}.")
Expand All @@ -42,32 +52,42 @@ def _find_range_with_element(ranges, element):
return [dpp_range for dpp_range in ranges if element in dpp_range][0]


def _compute_ranges(dpps):
def _compute_ranges(dpps: Sequence[float], tolerance: float) -> list[list[int]]:
""" Groups the indices of dpps in bins of DPP_TOLERANCE.
The function first sorts the indices by their dpp value
adds a new group whenever the dpp of the next index is larger than the first value in the group.

Works for now, but we could improve by using the mean dpp of the group to check against.
Only neccessary if we have some weird outliers. (jdilly)
"""
ordered_ids = np.argsort(dpps)
ranges = [[ordered_ids[0]]]
if len(ordered_ids) == 1:
return ranges
for idx in ordered_ids[1:]:
if abs(dpps[ranges[-1][0]] - dpps[idx]) < DPP_TOLERANCE:
if abs(dpps[ranges[-1][0]] - dpps[idx]) < tolerance:
ranges[-1].append(idx)
else:
ranges.append([idx])
return ranges


def append_amp_dpp(list_of_tfs, dpp_values):
def append_amp_dpp(list_of_tfs: Sequence[tfs.TfsFile], dpp_values: Sequence[float]):
""" Add the dpp values to the DPP-header of the tfs files, if larger than the DPP-tolerance, otherwise set to zero.
This is intended to the DPP value for on-momentum files to zero. """
for i, dpp in enumerate(dpp_values):
list_of_tfs[i].headers["DPPAMP"] = dpp if (not np.isnan(dpp) and np.abs(dpp) > AMP_DPP_TOLERANCE) else 0.0
list_of_tfs[i].headers["DPPAMP"] = dpp if (not np.isnan(dpp) and np.abs(dpp) > DPP_TOLERANCE) else 0.0
return list_of_tfs


def append_dpp(list_of_tfs, dpp_values):
def append_dpp(list_of_tfs: Sequence[tfs.TfsFile], dpp_values: Sequence[float]):
""" Add the dpp values to the DPP-header of the tfs files. """
for i, dpp in enumerate(dpp_values):
list_of_tfs[i].headers["DPP"] = dpp
return list_of_tfs


def calculate_dpoverp(input_files, meas_input):
def calculate_dpoverp(input_files: InputFiles, meas_input: DotDict):
df_orbit = pd.DataFrame(meas_input.accelerator.model).loc[:, ['S', 'DX']]
df_orbit = pd.merge(df_orbit, input_files.joined_frame('X', ['CO', 'CORMS']), how='inner',
left_index=True, right_index=True)
Expand All @@ -85,7 +105,7 @@ def calculate_dpoverp(input_files, meas_input):
return numer / denom


def calculate_amp_dpoverp(input_files, meas_input):
def calculate_amp_dpoverp(input_files: InputFiles, meas_input: DotDict):
df_orbit = pd.DataFrame(meas_input.accelerator.model).loc[:, ['S', 'DX']]
df_orbit = pd.merge(df_orbit, input_files.joined_frame('X', ['AMPX', 'AMPZ']), how='inner',
left_index=True, right_index=True)
Expand All @@ -105,3 +125,5 @@ def calculate_amp_dpoverp(input_files, meas_input):
else:
numer = np.sum(dispersions[:, None] * amps[mask_zeros, :], axis=0)
return numer / denom


Loading
Loading