Skip to content

Commit

Permalink
removed deprecated packages for 0.4.0 release
Browse files Browse the repository at this point in the history
  • Loading branch information
jacanchaplais committed May 31, 2024
1 parent 14425b9 commit e34b6dd
Showing 1 changed file with 0 additions and 236 deletions.
236 changes: 0 additions & 236 deletions graphicle/calculate.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,28 +13,21 @@
import operator as op
import typing as ty
import warnings
from functools import lru_cache, partial

import networkx as nx
import numba as nb
import numpy as np
import numpy.lib.recfunctions as rfn
import scipy.optimize as spo
from deprecation import deprecated

from . import base

if ty.TYPE_CHECKING is True:
from graphicle.data import *

__all__ = [
"azimuth_centre",
"pseudorapidity_centre",
"rapidity_centre",
"weighted_centroid",
"resultant_coords",
"combined_mass",
"flow_trace",
"aggregate_momenta",
"cluster_coeff_distbn",
"thrust",
Expand All @@ -49,96 +42,6 @@
)


@deprecated(
deprecated_in="0.3.1",
removed_in="0.4.0",
details="Use ``weighted_centroid()`` or ``resultant_coords()`` instead.",
)
def azimuth_centre(pmu: "MomentumArray", pt_weight: bool = True) -> float:
"""Calculates the central point in azimuth for a set of particles.
:group: calculate
.. versionadded:: 0.1.7
.. versionchanged:: 0.3.1
The ``pt_weight`` parameter implementation has been corrected.
Previous versions resulted in :math:`p_T`-weighting when
``False``, and :math:`p_T^2`-weighting when ``True``.
Parameters
----------
pmu : MomentumArray
Four-momenta of the particles.
pt_weight : bool
If ``True``, will weight the contributions of each particle by
transverse momentum. Similar to finding a centre-of-mass, but
for transverse momentum. Default is ``True``.
Returns
-------
float
The centre of the particle set in the azimuth dimension.
"""
pol = pmu._xy_pol
if not pt_weight:
pol = pol / pmu.pt
return np.angle(pol.sum()).item()


@deprecated(
deprecated_in="0.3.1",
removed_in="0.4.0",
details="Use ``weighted_centroid()`` or ``resultant_coords()`` instead.",
)
def pseudorapidity_centre(pmu: "MomentumArray") -> float:
"""Calculates the central point in pseudorapidity for a set of
particles.
:group: calculate
.. versionadded:: 0.1.7
Parameters
----------
pmu : MomentumArray
Four-momenta of the particles.
Returns
-------
float
The :math:`p_T` weighted centre of the particle set in the
pseudorapidity dimension.
"""
return ((pmu.eta * pmu.pt).sum() / pmu.pt.sum()).item()


@deprecated(
deprecated_in="0.3.1",
removed_in="0.4.0",
details="Use ``weighted_centroid()`` or ``resultant_coords()`` instead.",
)
def rapidity_centre(pmu: "MomentumArray") -> float:
"""Calculates the central point in rapidity for a set of particles.
:group: calculate
.. versionadded:: 0.2.11
Parameters
----------
pmu : MomentumArray
Four-momenta of the particles.
Returns
-------
float
The :math:`p_T` weighted centre of the particle set in the
rapidity dimension.
"""
return (pmu.rapidity * pmu.pt).sum() / pmu.pt.sum()


def weighted_centroid(
pmu: "MomentumArray", pseudo: bool = True
) -> ty.Tuple[float, float]:
Expand Down Expand Up @@ -286,145 +189,6 @@ def combined_mass(
return mass


def _diffuse(colors: ty.List[base.AnyVector], feats: ty.List[base.AnyVector]):
color_shape = colors[0].shape
av_color = np.zeros((color_shape[0], color_shape[1]), dtype="<f8")
color_stack = np.dstack(colors) # len_basis x feat_dim x num_in
feat_stack = np.vstack(feats).T # feat_dim x num_in
feat_sum = np.sum(feat_stack, axis=1)
nonzero_mask = feat_sum != 0.0
av_color[:, nonzero_mask] = (
np.sum(
color_stack[:, nonzero_mask, :] * feat_stack[nonzero_mask], axis=2
)
/ feat_sum[nonzero_mask]
)
return av_color


@lru_cache(maxsize=None)
def _trace_vector(
nx_graph: nx.DiGraph,
vertex: int,
basis: ty.Tuple[int, ...],
feat_dim: int,
is_structured: bool,
exclusive: bool = False,
) -> base.AnyVector:
len_basis = len(basis)
feat_fmt = rfn.structured_to_unstructured if is_structured else lambda x: x
color = np.zeros((len_basis, feat_dim), dtype="<f8")
if vertex in basis:
color[basis.index(vertex)] = 1.0
if exclusive is True:
return color
in_edges = nx_graph.in_edges(vertex, data=True)
colors_in: list[base.AnyVector] = []
feats = []
for edge in in_edges:
feats.append(feat_fmt(edge[2]["feat"]))
in_vtx = edge[0]
colors_in.append(
_trace_vector(
nx_graph, in_vtx, basis, feat_dim, is_structured, exclusive
)
)
if colors_in:
color += _diffuse(colors_in, feats)
return color


def flow_trace(
graph: "Graphicle",
mask: ty.Union[base.MaskBase, base.BoolVector],
prop: ty.Union[base.ArrayBase, base.AnyVector],
exclusive: bool = False,
target: ty.Optional[ty.Set[int]] = None,
) -> ty.Dict[str, base.DoubleVector]:
"""Performs flow tracing from specified particles in an event, back
to the hard partons.
:group: calculate
.. versionadded:: 0.1.0
Parameters
----------
graph : Graphicle
Full particle event, containing hard partons, showering and
hadronisation.
mask : MaskArray or MaskGroup or ndarray[bool_]
Boolean mask identifying which particles should have their
ancestry traced.
prop : ArrayBase or ndarray[any]
Property to trace back, *eg.* 4-momentum, charge, *etc*. Must be
the same shape as arrays stored in graph. Can be structured,
unstructured, or a graphicle array, though unstructured arrays
must be 1D.
exclusive : bool
If ``True``, double counting from descendant particles in the
hard event will be switched off. *eg.* for event ``t > b W+``,
descendants of ``b`` will show no contribution from ``t``, as
``b`` is a subset of ``t``. Default is ``False``.
target : set[int], optional
Highlights specific partons in the hard event to decompose
properties with respect to. If unset, will use all partons in
hard event, except for incoming partons.
Returns
-------
trace_array : dict[str, ndarray]
Keys are parton names, arrays represent the contributions of
hard partons traced down to the properties of the selected
subset of particles specified by mask.
"""
if isinstance(prop, base.ArrayBase):
prop = prop.data
# encoding graph features onto NetworkX
nx_graph = nx.DiGraph()
graph_dict = graph.adj.to_dicts(edge_data={"feat": prop})
example_feat = graph_dict["edges"][0][2]["feat"]
try:
feat_dim = len(example_feat)
dtype = example_feat.dtype
except TypeError:
feat_dim = 1
dtype = np.dtype(type(example_feat))
is_structured = dtype.names is not None
nx_graph.add_edges_from(graph_dict["edges"])
# identify the hard ancestors to which we trace
hard_mask = graph.hard_mask.copy()
del hard_mask["incoming"]
hard_graph = graph[hard_mask.bitwise_or()]
if target: # restrict hard partons to user specified pdgs
target_mask = hard_graph.pdg.mask(
list(target), blacklist=False, sign_sensitive=True
)
hard_graph = hard_graph[target_mask]
names, vtxs = tuple(hard_graph.pdg.name), tuple(hard_graph.edges["dst"])
# out vertices of user specified particles
focus_pcls = graph.edges[mask]["dst"]
trc = np.array(
[
_trace_vector(
nx_graph, pcl, vtxs, feat_dim, is_structured, exclusive
)
for pcl in focus_pcls
]
)
_trace_vector.cache_clear()
traces = dict()
array_fmt: ty.Callable[[base.AnyVector], base.AnyVector] = (
partial(rfn.unstructured_to_structured, dtype=dtype) # type: ignore
if is_structured
else lambda x: x.squeeze()
)

for i, name in enumerate(names):
traces[name] = array_fmt(trc[:, i, :])
return traces


@nb.njit("float64[:](float64[:], float64[:], float64)", cache=True)
def _rapidity(
energy: base.DoubleVector, z: base.DoubleVector, zero_tol: float
Expand Down

0 comments on commit e34b6dd

Please sign in to comment.