Skip to content

Commit

Permalink
368 theoretical prediction breaking with certain bandpower windows (#388
Browse files Browse the repository at this point in the history
)

* All arguments to ell_for_xi must be ints
* Assert preconditions and ensure postcondition
  • Loading branch information
marcpaterno authored Feb 20, 2024
1 parent 56cf8c0 commit d92a72d
Show file tree
Hide file tree
Showing 2 changed files with 24 additions and 12 deletions.
32 changes: 22 additions & 10 deletions firecrown/likelihood/gauss_family/statistic/two_point.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,10 +34,12 @@
"cmbGalaxy_convergenceShear_xi_t": "NG",
}

ELL_FOR_XI_DEFAULTS = {"minimum": 2, "midpoint": 50, "maximum": 6e4, "n_log": 200}
ELL_FOR_XI_DEFAULTS = {"minimum": 2, "midpoint": 50, "maximum": 60_000, "n_log": 200}


def _ell_for_xi(*, minimum, midpoint, maximum, n_log) -> npt.NDArray[np.float64]:
def _ell_for_xi(
*, minimum: int, midpoint: int, maximum: int, n_log: int
) -> npt.NDArray[np.float64]:
"""Build an array of ells to sample the power spectrum for real-space
predictions.
Expand All @@ -46,12 +48,15 @@ def _ell_for_xi(*, minimum, midpoint, maximum, n_log) -> npt.NDArray[np.float64]
logarithmically spaced values. All values are rounded to the nearest
integer.
"""
assert minimum >= 0
assert minimum < midpoint
assert midpoint < maximum
lower_range = np.linspace(minimum, midpoint - 1, midpoint - minimum)
upper_range = np.logspace(np.log10(midpoint), np.log10(maximum), n_log)
concatenated = np.concatenate((lower_range, upper_range))
# Round the results to the nearest integer values.
# N.B. the dtype of the result is np.dtype[float64]
return np.around(concatenated)
return np.unique(np.around(concatenated))


def _generate_ell_or_theta(*, minimum, maximum, n, binning="log"):
Expand Down Expand Up @@ -145,14 +150,16 @@ class TwoPoint(Statistic):
A dictionary of options for making the ell values at which to compute
Cls for use in real-space integrations. The possible keys are:
- min : int, optional - The minimum angular wavenumber to use for
- minimum : int, optional - The minimum angular wavenumber to use for
real-space integrations. Default is 2.
- mid : int, optional - The midpoint angular wavenumber to use for
real-space integrations. The angular wavenumber samples are linearly
spaced at integers between `min` and `mid`. Default is 50.
- max : float, optional - The maximum angular wavenumber to use for
- midpoint : int, optional - The midpoint angular wavenumber to use
for real-space integrations. The angular wavenumber samples are
linearly spaced at integers between `minimum` and `midpoint`. Default
is 50.
- maximum : int, optional - The maximum angular wavenumber to use for
real-space integrations. The angular wavenumber samples are
logarithmically spaced between `mid` and `max`. Default is 6e4.
logarithmically spaced between `midpoint` and `maximum`. Default is
60,000.
- n_log : int, optional - The number of logarithmically spaced angular
wavenumber samples between `mid` and `max`. Default is 200.
Expand Down Expand Up @@ -336,7 +343,12 @@ def _compute_theory_vector(self, tools: ModelingTools) -> TheoryVector:
if self.ccl_kind == "cl":
self.ells = self.ell_or_theta_
else:
self.ells = _ell_for_xi(**self.ell_for_xi)
self.ells = _ell_for_xi(
minimum=int(self.ell_for_xi["minimum"]),
midpoint=int(self.ell_for_xi["midpoint"]),
maximum=int(self.ell_for_xi["maximum"]),
n_log=int(self.ell_for_xi["n_log"]),
)

# TODO: we should not be adding a new instance variable outside of
# __init__. Why is `self.cells` an instance variable rather than a
Expand Down
4 changes: 2 additions & 2 deletions tests/test_pt_systematics.py
Original file line number Diff line number Diff line change
Expand Up @@ -132,7 +132,7 @@ def test_pt_systematics(weak_lensing_source, number_counts_source, sacc_data):

# Make things faster by only using a couple of ells
for s in likelihood.statistics:
s.ell_for_xi = {"minimum": 2, "midpoint": 5, "maximum": 6e4, "n_log": 10}
s.ell_for_xi = {"minimum": 2, "midpoint": 5, "maximum": 60_000, "n_log": 10}

# Compute the log-likelihood, using the ccl.Cosmology object as the input
_ = likelihood.compute_loglike(modeling_tools)
Expand Down Expand Up @@ -297,7 +297,7 @@ def test_pt_mixed_systematics(sacc_data):

# Make things faster by only using a couple of ells
for s in likelihood.statistics:
s.ell_for_xi = {"minimum": 2, "midpoint": 5, "maximum": 6e4, "n_log": 10}
s.ell_for_xi = {"minimum": 2, "midpoint": 5, "maximum": 60_000, "n_log": 10}

# Compute the log-likelihood, using the ccl.Cosmology object as the input
_ = likelihood.compute_loglike(modeling_tools)
Expand Down

0 comments on commit d92a72d

Please sign in to comment.