diff --git a/docs/_build/doctrees/binning.doctree b/docs/_build/doctrees/binning.doctree new file mode 100644 index 00000000..e8444557 Binary files /dev/null and b/docs/_build/doctrees/binning.doctree differ diff --git a/docs/_build/doctrees/catalog.doctree b/docs/_build/doctrees/catalog.doctree new file mode 100644 index 00000000..d19f107d Binary files /dev/null and b/docs/_build/doctrees/catalog.doctree differ diff --git a/docs/_build/doctrees/changes.doctree b/docs/_build/doctrees/changes.doctree new file mode 100644 index 00000000..07d1c2ec Binary files /dev/null and b/docs/_build/doctrees/changes.doctree differ diff --git a/docs/_build/doctrees/correlation2.doctree b/docs/_build/doctrees/correlation2.doctree new file mode 100644 index 00000000..5bb099c1 Binary files /dev/null and b/docs/_build/doctrees/correlation2.doctree differ diff --git a/docs/_build/doctrees/correlation3.doctree b/docs/_build/doctrees/correlation3.doctree new file mode 100644 index 00000000..3ee6b0b3 Binary files /dev/null and b/docs/_build/doctrees/correlation3.doctree differ diff --git a/docs/_build/doctrees/cov.doctree b/docs/_build/doctrees/cov.doctree new file mode 100644 index 00000000..186fe2af Binary files /dev/null and b/docs/_build/doctrees/cov.doctree differ diff --git a/docs/_build/doctrees/environment.pickle b/docs/_build/doctrees/environment.pickle new file mode 100644 index 00000000..519ebb9f Binary files /dev/null and b/docs/_build/doctrees/environment.pickle differ diff --git a/docs/_build/doctrees/field.doctree b/docs/_build/doctrees/field.doctree new file mode 100644 index 00000000..30423191 Binary files /dev/null and b/docs/_build/doctrees/field.doctree differ diff --git a/docs/_build/doctrees/gg.doctree b/docs/_build/doctrees/gg.doctree new file mode 100644 index 00000000..d934df54 Binary files /dev/null and b/docs/_build/doctrees/gg.doctree differ diff --git a/docs/_build/doctrees/ggg.doctree b/docs/_build/doctrees/ggg.doctree new file mode 100644 index 00000000..25b48d79 Binary files /dev/null and b/docs/_build/doctrees/ggg.doctree differ diff --git a/docs/_build/doctrees/guide.doctree b/docs/_build/doctrees/guide.doctree new file mode 100644 index 00000000..2691ffe7 Binary files /dev/null and b/docs/_build/doctrees/guide.doctree differ diff --git a/docs/_build/doctrees/history.doctree b/docs/_build/doctrees/history.doctree new file mode 100644 index 00000000..ccdae5fe Binary files /dev/null and b/docs/_build/doctrees/history.doctree differ diff --git a/docs/_build/doctrees/index.doctree b/docs/_build/doctrees/index.doctree new file mode 100644 index 00000000..fc47e05a Binary files /dev/null and b/docs/_build/doctrees/index.doctree differ diff --git a/docs/_build/doctrees/kg.doctree b/docs/_build/doctrees/kg.doctree new file mode 100644 index 00000000..8133ede0 Binary files /dev/null and b/docs/_build/doctrees/kg.doctree differ diff --git a/docs/_build/doctrees/kk.doctree b/docs/_build/doctrees/kk.doctree new file mode 100644 index 00000000..3a78bf63 Binary files /dev/null and b/docs/_build/doctrees/kk.doctree differ diff --git a/docs/_build/doctrees/kkk.doctree b/docs/_build/doctrees/kkk.doctree new file mode 100644 index 00000000..0acff797 Binary files /dev/null and b/docs/_build/doctrees/kkk.doctree differ diff --git a/docs/_build/doctrees/metric.doctree b/docs/_build/doctrees/metric.doctree new file mode 100644 index 00000000..59620b40 Binary files /dev/null and b/docs/_build/doctrees/metric.doctree differ diff --git a/docs/_build/doctrees/ng.doctree b/docs/_build/doctrees/ng.doctree new file mode 100644 index 00000000..5f971fe6 Binary files /dev/null and b/docs/_build/doctrees/ng.doctree differ diff --git a/docs/_build/doctrees/nk.doctree b/docs/_build/doctrees/nk.doctree new file mode 100644 index 00000000..649f2f29 Binary files /dev/null and b/docs/_build/doctrees/nk.doctree differ diff --git a/docs/_build/doctrees/nn.doctree b/docs/_build/doctrees/nn.doctree new file mode 100644 index 00000000..abc7f23a Binary files /dev/null and b/docs/_build/doctrees/nn.doctree differ diff --git a/docs/_build/doctrees/nnn.doctree b/docs/_build/doctrees/nnn.doctree new file mode 100644 index 00000000..d94bddc8 Binary files /dev/null and b/docs/_build/doctrees/nnn.doctree differ diff --git a/docs/_build/doctrees/overview.doctree b/docs/_build/doctrees/overview.doctree new file mode 100644 index 00000000..15adc812 Binary files /dev/null and b/docs/_build/doctrees/overview.doctree differ diff --git a/docs/_build/doctrees/params.doctree b/docs/_build/doctrees/params.doctree new file mode 100644 index 00000000..353200b0 Binary files /dev/null and b/docs/_build/doctrees/params.doctree differ diff --git a/docs/_build/doctrees/patches.doctree b/docs/_build/doctrees/patches.doctree new file mode 100644 index 00000000..4aea51c9 Binary files /dev/null and b/docs/_build/doctrees/patches.doctree differ diff --git a/docs/_build/doctrees/scripts.doctree b/docs/_build/doctrees/scripts.doctree new file mode 100644 index 00000000..ce6332aa Binary files /dev/null and b/docs/_build/doctrees/scripts.doctree differ diff --git a/docs/_build/html/.buildinfo b/docs/_build/html/.buildinfo new file mode 100644 index 00000000..baf51e43 --- /dev/null +++ b/docs/_build/html/.buildinfo @@ -0,0 +1,4 @@ +# Sphinx build info version 1 +# This file hashes the configuration used when building these files. When it is not found, a full rebuild will be done. +config: d5f00888c6d3110504c0ad4a618d27b0 +tags: 645f666f9bcd5a90fca523b33c5a78b7 diff --git a/docs/_build/html/_modules/index.html b/docs/_build/html/_modules/index.html new file mode 100644 index 00000000..e0f84241 --- /dev/null +++ b/docs/_build/html/_modules/index.html @@ -0,0 +1,125 @@ + + + + + + Overview: module code — TreeCorr 4.3.0 documentation + + + + + + + + + + + + +
+ + +
+ + +
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/treecorr/binnedcorr2.html b/docs/_build/html/_modules/treecorr/binnedcorr2.html new file mode 100644 index 00000000..7d82fecd --- /dev/null +++ b/docs/_build/html/_modules/treecorr/binnedcorr2.html @@ -0,0 +1,1809 @@ + + + + + + treecorr.binnedcorr2 — TreeCorr 4.3.0 documentation + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for treecorr.binnedcorr2

+# Copyright (c) 2003-2019 by Mike Jarvis
+#
+# TreeCorr is free software: redistribution and use in source and binary forms,
+# with or without modification, are permitted provided that the following
+# conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice, this
+#    list of conditions, and the disclaimer given in the accompanying LICENSE
+#    file.
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+#    this list of conditions, and the disclaimer given in the documentation
+#    and/or other materials provided with the distribution.
+
+"""
+.. module:: binnedcorr2
+"""
+
+import math
+import numpy as np
+import sys
+import coord
+import itertools
+import collections
+
+from . import _lib
+from .config import merge_config, setup_logger, get
+from .util import parse_metric, metric_enum, coord_enum, set_omp_threads, lazy_property
+from .util import depr_pos_kwargs
+
+class Namespace(object):
+    pass
+
+
[docs]class BinnedCorr2(object): + """This class stores the results of a 2-point correlation calculation, along with some + ancillary data. + + This is a base class that is not intended to be constructed directly. But it has a few + helper functions that derived classes can use to help perform their calculations. See + the derived classes for more details: + + - `GGCorrelation` handles shear-shear correlation functions + - `NNCorrelation` handles count-count correlation functions + - `KKCorrelation` handles kappa-kappa correlation functions + - `NGCorrelation` handles count-shear correlation functions + - `NKCorrelation` handles count-kappa correlation functions + - `KGCorrelation` handles kappa-shear correlation functions + + .. note:: + + When we refer to kappa in the correlation functions, that is because TreeCorr was + originally designed for weak lensing applications. But in fact any scalar quantity + may be used here. CMB temperature fluctuations for example. + + The constructor for all derived classes take a config dict as the first argument, + since this is often how we keep track of parameters, but if you don't want to + use one or if you want to change some parameters from what are in a config dict, + then you can use normal kwargs, which take precedence over anything in the config dict. + + There are a number of possible definitions for the distance between two points, which + are appropriate for different use cases. These are specified by the ``metric`` parameter. + The possible options are: + + - 'Euclidean' = straight line Euclidean distance between two points. + - 'FisherRperp' = the perpendicular component of the distance, following the + definitions in Fisher et al, 1994 (MNRAS, 267, 927). + - 'OldRperp' = the perpendicular component of the distance using the definition + of Rperp from TreeCorr v3.x. + - 'Rperp' = an alias for FisherRperp. You can change it to be an alias for + OldRperp if you want by setting ``treecorr.Rperp_alias = 'OldRperp'`` before + using it. + - 'Rlens' = the distance from the first object (taken to be a lens) to the line + connecting Earth and the second object (taken to be a lensed source). + - 'Arc' = the true great circle distance for spherical coordinates. + - 'Periodic' = Like Euclidean, but with periodic boundaries. + + See `Metrics` for more information about these various metric options. + + There are also a few different possibile binning prescriptions to define the range of + distances, which should be placed into each bin. + + - 'Log' - logarithmic binning in the distance. The bin steps will be uniform in + log(r) from log(min_sep) .. log(max_sep). + - 'Linear' - linear binning in the distance. The bin steps will be uniform in r + from min_sep .. max_sep. + - 'TwoD' = 2-dimensional binning from x = (-max_sep .. max_sep) and + y = (-max_sep .. max_sep). The bin steps will be uniform in both x and y. + (i.e. linear in x,y) + + See `Binning` for more information about the different binning options. + + Parameters: + config (dict): A configuration dict that can be used to pass in the below kwargs if + desired. This dict is allowed to have addition entries in addition + to those listed below, which are ignored here. (default: None) + logger: If desired, a logger object for logging. (default: None, in which case + one will be built according to the config dict's verbose level.) + + Keyword Arguments: + + nbins (int): How many bins to use. (Exactly three of nbins, bin_size, min_sep, + max_sep are required. If nbins is not given or set to None, it will be + calculated from the values of the other three, rounding up to the next + highest integer. In this case, bin_size will be readjusted to account + for this rounding up.) + bin_size (float): The width of the bins in log(separation). (Exactly three of nbins, + bin_size, min_sep, max_sep are required. If bin_size is not given or + set to None, it will be calculated from the values of the other three.) + min_sep (float): The minimum separation in units of sep_units, if relevant. (Exactly + three of nbins, bin_size, min_sep, max_sep are required. If min_sep is + not given or set to None, it will be calculated from the values of the + other three.) + max_sep (float): The maximum separation in units of sep_units, if relevant. (Exactly + three of nbins, bin_size, min_sep, max_sep are required. If max_sep is + not given or set to None, it will be calculated from the values of the + other three.) + + sep_units (str): The units to use for the separation values, given as a string. This + includes both min_sep and max_sep above, as well as the units of the + output distance values. Valid options are arcsec, arcmin, degrees, + hours, radians. (default: radians if angular units make sense, but for + 3-d or flat 2-d positions, the default will just match the units of + x,y[,z] coordinates) + bin_slop (float): How much slop to allow in the placement of pairs in the bins. + If bin_slop = 1, then the bin into which a particular pair is placed + may be incorrect by at most 1.0 bin widths. (default: None, which + means to use a bin_slop that gives a maximum error of 10% on any bin, + which has been found to yield good results for most application. + brute (bool): Whether to use the "brute force" algorithm. (default: False) Options + are: + + - False (the default): Stop at non-leaf cells whenever the error in + the separation is compatible with the given bin_slop. + - True: Go to the leaves for both catalogs. + - 1: Always go to the leaves for cat1, but stop at non-leaf cells of + cat2 when the error is compatible with the given bin_slop. + - 2: Always go to the leaves for cat2, but stop at non-leaf cells of + cat1 when the error is compatible with the given bin_slop. + + verbose (int): If no logger is provided, this will optionally specify a logging level + to use: + + - 0 means no logging output + - 1 means to output warnings only (default) + - 2 means to output various progress information + - 3 means to output extensive debugging information + + log_file (str): If no logger is provided, this will specify a file to write the logging + output. (default: None; i.e. output to standard output) + output_dots (bool): Whether to output progress dots during the calcualtion of the + correlation function. (default: False unless verbose is given and >= 2, + in which case True) + + split_method (str): How to split the cells in the tree when building the tree structure. + Options are: + + - mean = Use the arithmetic mean of the coordinate being split. + (default) + - median = Use the median of the coordinate being split. + - middle = Use the middle of the range; i.e. the average of the minimum + and maximum value. + - random: Use a random point somewhere in the middle two quartiles of + the range. + + min_top (int): The minimum number of top layers to use when setting up the field. + (default: :math:`\\max(3, \\log_2(N_{\\rm cpu}))`) + max_top (int): The maximum number of top layers to use when setting up the field. + The top-level cells are where each calculation job starts. There will + typically be of order :math:`2^{\\rm max\\_top}` top-level cells. + (default: 10) + precision (int): The precision to use for the output values. This specifies how many + digits to write. (default: 4) + pairwise (bool): Whether to use a different kind of calculation for cross correlations + whereby corresponding items in the two catalogs are correlated pairwise + rather than the usual case of every item in one catalog being correlated + with every item in the other catalog. (default: False) (DEPRECATED) + m2_uform (str): The default functional form to use for aperture mass calculations. + see `calculateMapSq` for more details. (default: 'Crittenden') + + metric (str): Which metric to use for distance measurements. Options are listed + above. (default: 'Euclidean') + bin_type (str): What type of binning should be used. Options are listed above. + (default: 'Log') + min_rpar (float): The minimum difference in Rparallel to allow for pairs being included + in the correlation function. (default: None) + max_rpar (float): The maximum difference in Rparallel to allow for pairs being included + in the correlation function. (default: None) + period (float): For the 'Periodic' metric, the period to use in all directions. + (default: None) + xperiod (float): For the 'Periodic' metric, the period to use in the x direction. + (default: period) + yperiod (float): For the 'Periodic' metric, the period to use in the y direction. + (default: period) + zperiod (float): For the 'Periodic' metric, the period to use in the z direction. + (default: period) + + var_method (str): Which method to use for estimating the variance. Options are: + 'shot', 'jackknife', 'sample', 'bootstrap', 'marked_bootstrap'. + (default: 'shot') + num_bootstrap (int): How many bootstrap samples to use for the 'bootstrap' and + 'marked_bootstrap' var_methods. (default: 500) + rng (RandomState): If desired, a numpy.random.RandomState instance to use for bootstrap + random number generation. (default: None) + + num_threads (int): How many OpenMP threads to use during the calculation. + (default: use the number of cpu cores) + + .. note:: + + This won't work if the system's C compiler cannot use OpenMP + (e.g. clang prior to version 3.7.) + """ + _valid_params = { + 'nbins' : (int, False, None, None, + 'The number of output bins to use.'), + 'bin_size' : (float, False, None, None, + 'The size of the output bins in log(sep).'), + 'min_sep' : (float, False, None, None, + 'The minimum separation to include in the output.'), + 'max_sep' : (float, False, None, None, + 'The maximum separation to include in the output.'), + 'sep_units' : (str, False, None, coord.AngleUnit.valid_names, + 'The units to use for min_sep and max_sep. ' + 'Also the units of the output distances'), + 'bin_slop' : (float, False, None, None, + 'The fraction of a bin width by which it is ok to let the pairs miss the correct ' + 'bin.', + 'The default is to use 1 if bin_size <= 0.1, or 0.1/bin_size if bin_size > 0.1.'), + 'brute' : (bool, False, False, [False, True, 1, 2], + 'Whether to use brute-force algorithm'), + 'verbose' : (int, False, 1, [0, 1, 2, 3], + 'How verbose the code should be during processing. ', + '0 = Errors Only, 1 = Warnings, 2 = Progress, 3 = Debugging'), + 'log_file' : (str, False, None, None, + 'If desired, an output file for the logging output.', + 'The default is to write the output to stdout.'), + 'output_dots' : (bool, False, None, None, + 'Whether to output dots to the stdout during the C++-level computation.', + 'The default is True if verbose >= 2 and there is no log_file. Else False.'), + 'split_method' : (str, False, 'mean', ['mean', 'median', 'middle', 'random'], + 'Which method to use for splitting cells.'), + 'min_top' : (int, False, None, None, + 'The minimum number of top layers to use when setting up the field.'), + 'max_top' : (int, False, 10, None, + 'The maximum number of top layers to use when setting up the field.'), + 'precision' : (int, False, 4, None, + 'The number of digits after the decimal in the output.'), + 'pairwise' : (bool, True, False, None, + 'Whether to do a pair-wise cross-correlation. (DEPRECATED)'), + 'm2_uform' : (str, False, 'Crittenden', ['Crittenden', 'Schneider'], + 'The function form of the mass aperture.'), + 'metric': (str, False, 'Euclidean', ['Euclidean', 'Rperp', 'FisherRperp', 'OldRperp', + 'Rlens', 'Arc', 'Periodic'], + 'Which metric to use for the distance measurements'), + 'bin_type': (str, False, 'Log', ['Log', 'Linear', 'TwoD'], + 'Which type of binning should be used'), + 'min_rpar': (float, False, None, None, + 'The minimum difference in Rparallel for pairs to include'), + 'max_rpar': (float, False, None, None, + 'The maximum difference in Rparallel for pairs to include'), + 'period': (float, False, None, None, + 'The period to use for all directions for the Periodic metric'), + 'xperiod': (float, False, None, None, + 'The period to use for the x direction for the Periodic metric'), + 'yperiod': (float, False, None, None, + 'The period to use for the y direction for the Periodic metric'), + 'zperiod': (float, False, None, None, + 'The period to use for the z direction for the Periodic metric'), + + 'var_method': (str, False, 'shot', + ['shot', 'jackknife', 'sample', 'bootstrap', 'marked_bootstrap'], + 'The method to use for estimating the variance'), + 'num_bootstrap': (int, False, 500, None, + 'How many bootstrap samples to use for the var_method=bootstrap and ' + 'marked_bootstrap'), + 'num_threads' : (int, False, None, None, + 'How many threads should be used. num_threads <= 0 means auto based on num cores.'), + } + + @depr_pos_kwargs + def __init__(self, config=None, *, logger=None, rng=None, **kwargs): + self._corr = None # Do this first to make sure we always have it for __del__ + self.config = merge_config(config,kwargs,BinnedCorr2._valid_params) + if logger is None: + self.logger = setup_logger(get(self.config,'verbose',int,1), + self.config.get('log_file',None)) + else: + self.logger = logger + + # We'll make a bunch of attributes here, which we put into a namespace called _ro. + # These are the core attributes that won't ever be changed after construction. + # This is an efficiency optimization (both memory and flops), since it will allow + # copy() to just copy a pointer to the _ro namespace without having to copy each + # individual attribute separately. + # The access of these attributes are all via read-only properties. + self._ro = Namespace() + + if 'output_dots' in self.config: + self._ro.output_dots = get(self.config,'output_dots',bool) + else: + self._ro.output_dots = get(self.config,'verbose',int,1) >= 2 + + self._ro.bin_type = self.config.get('bin_type', None) + + self._ro.sep_units = self.config.get('sep_units','') + self._ro._sep_units = get(self.config,'sep_units',str,'radians') + self._ro._log_sep_units = math.log(self._sep_units) + if self.config.get('nbins', None) is None: + if self.config.get('max_sep', None) is None: + raise TypeError("Missing required parameter max_sep") + if self.config.get('min_sep', None) is None and self.bin_type != 'TwoD': + raise TypeError("Missing required parameter min_sep") + if self.config.get('bin_size', None) is None: + raise TypeError("Missing required parameter bin_size") + self._ro.min_sep = float(self.config.get('min_sep',0)) + self._ro.max_sep = float(self.config['max_sep']) + if self.min_sep >= self.max_sep: + raise ValueError("max_sep must be larger than min_sep") + self._ro.bin_size = float(self.config['bin_size']) + self._ro.nbins = None + elif self.config.get('bin_size', None) is None: + if self.config.get('max_sep', None) is None: + raise TypeError("Missing required parameter max_sep") + if self.config.get('min_sep', None) is None and self.bin_type != 'TwoD': + raise TypeError("Missing required parameter min_sep") + self._ro.min_sep = float(self.config.get('min_sep',0)) + self._ro.max_sep = float(self.config['max_sep']) + if self.min_sep >= self.max_sep: + raise ValueError("max_sep must be larger than min_sep") + self._ro.nbins = int(self.config['nbins']) + self._ro.bin_size = None + elif self.config.get('max_sep', None) is None: + if self.config.get('min_sep', None) is None and self.bin_type != 'TwoD': + raise TypeError("Missing required parameter min_sep") + self._ro.min_sep = float(self.config.get('min_sep',0)) + self._ro.nbins = int(self.config['nbins']) + self._ro.bin_size = float(self.config['bin_size']) + self._ro.max_sep = None + else: + if self.bin_type == 'TwoD': + raise TypeError("Only 2 of max_sep, bin_size, nbins are allowed " + "for bin_type='TwoD'.") + if self.config.get('min_sep', None) is not None: + raise TypeError("Only 3 of min_sep, max_sep, bin_size, nbins are allowed.") + self._ro.max_sep = float(self.config['max_sep']) + self._ro.nbins = int(self.config['nbins']) + self._ro.bin_size = float(self.config['bin_size']) + self._ro.min_sep = None + + if self.bin_type == 'Log': + if self.nbins is None: + self._ro.nbins = int(math.ceil(math.log(self.max_sep/self.min_sep)/self.bin_size)) + # Update bin_size given this value of nbins + self._ro.bin_size = math.log(self.max_sep/self.min_sep)/self.nbins + elif self.bin_size is None: + self._ro.bin_size = math.log(self.max_sep/self.min_sep)/self.nbins + elif self.max_sep is None: + self._ro.max_sep = math.exp(self.nbins*self.bin_size)*self.min_sep + else: + self._ro.min_sep = self.max_sep*math.exp(-self.nbins*self.bin_size) + + # This makes nbins evenly spaced entries in log(r) starting with 0 with step bin_size + self._ro.logr = np.linspace(0, self.nbins*self.bin_size, self.nbins, endpoint=False, + dtype=float) + # Offset by the position of the center of the first bin. + self._ro.logr += math.log(self.min_sep) + 0.5*self.bin_size + self._ro.rnom = np.exp(self.logr) + half_bin = np.exp(0.5*self.bin_size) + self._ro.left_edges = self.rnom / half_bin + self._ro.right_edges = self.rnom * half_bin + self._ro._nbins = self.nbins + self._ro._bintype = _lib.Log + min_log_bin_size = self.bin_size + max_log_bin_size = self.bin_size + max_good_slop = 0.1 / self.bin_size + elif self.bin_type == 'Linear': + if self.nbins is None: + self._ro.nbins = int(math.ceil((self.max_sep-self.min_sep)/self.bin_size)) + # Update bin_size given this value of nbins + self._ro.bin_size = (self.max_sep-self.min_sep)/self.nbins + elif self.bin_size is None: + self._ro.bin_size = (self.max_sep-self.min_sep)/self.nbins + elif self.max_sep is None: + self._ro.max_sep = self.min_sep + self.nbins*self.bin_size + else: + self._ro.min_sep = self.max_sep - self.nbins*self.bin_size + + self._ro.rnom = np.linspace(self.min_sep, self.max_sep, self.nbins, endpoint=False, + dtype=float) + # Offset by the position of the center of the first bin. + self._ro.rnom += 0.5*self.bin_size + self._ro.left_edges = self.rnom - 0.5*self.bin_size + self._ro.right_edges = self.rnom + 0.5*self.bin_size + self._ro.logr = np.log(self.rnom) + self._ro._nbins = self.nbins + self._ro._bintype = _lib.Linear + min_log_bin_size = self.bin_size / self.max_sep + max_log_bin_size = self.bin_size / (self.min_sep + self.bin_size/2) + max_good_slop = 0.1 / max_log_bin_size + elif self.bin_type == 'TwoD': + if self.nbins is None: + self._ro.nbins = int(math.ceil(2.*self.max_sep / self.bin_size)) + self._ro.bin_size = 2.*self.max_sep/self.nbins + elif self.bin_size is None: + self._ro.bin_size = 2.*self.max_sep/self.nbins + else: + self._ro.max_sep = self.nbins * self.bin_size / 2. + + sep = np.linspace(-self.max_sep, self.max_sep, self.nbins, endpoint=False, + dtype=float) + sep += 0.5 * self.bin_size + dx, dy = np.meshgrid(sep, sep) + self._ro.left_edges = dx - 0.5*self.bin_size + self._ro.right_edges = dx + 0.5*self.bin_size + self._ro.bottom_edges = dy - 0.5*self.bin_size + self._ro.top_edges = dy + 0.5*self.bin_size + self._ro.rnom = np.sqrt(dx**2 + dy**2) + self._ro.logr = np.zeros_like(self.rnom) + np.log(self.rnom, out=self._ro.logr, where=self.rnom > 0) + self._ro.logr[self.rnom==0.] = -np.inf + self._ro._nbins = self.nbins**2 + self._ro._bintype = _lib.TwoD + min_log_bin_size = self.bin_size / self.max_sep + max_log_bin_size = self.bin_size / (self.min_sep + self.bin_size/2) + max_good_slop = 0.1 / max_log_bin_size + else: # pragma: no cover (Already checked by config layer) + raise ValueError("Invalid bin_type %s"%self.bin_type) + + if self.sep_units == '': + self.logger.info("nbins = %d, min,max sep = %g..%g, bin_size = %g", + self.nbins, self.min_sep, self.max_sep, self.bin_size) + else: + self.logger.info("nbins = %d, min,max sep = %g..%g %s, bin_size = %g", + self.nbins, self.min_sep, self.max_sep, self.sep_units, + self.bin_size) + # The underscore-prefixed names are in natural units (radians for angles) + self._ro._min_sep = self.min_sep * self._sep_units + self._ro._max_sep = self.max_sep * self._sep_units + if self.bin_type in ['Linear', 'TwoD']: + self._ro._bin_size = self.bin_size * self._sep_units + min_log_bin_size *= self._sep_units + else: + self._ro._bin_size = self.bin_size + + self._ro.split_method = self.config.get('split_method','mean') + self.logger.debug("Using split_method = %s",self.split_method) + + self._ro.min_top = get(self.config,'min_top',int,None) + self._ro.max_top = get(self.config,'max_top',int,10) + + self._ro.bin_slop = get(self.config,'bin_slop',float,-1.0) + if self.bin_slop < 0.0: + self._ro.bin_slop = min(max_good_slop, 1.0) + self._ro.b = min_log_bin_size * self.bin_slop + if self.bin_slop > max_good_slop + 0.0001: # Add some numerical slop + self.logger.warning( + "Using bin_slop = %g, bin_size = %g, b = %g\n"%(self.bin_slop,self.bin_size,self.b)+ + "It is recommended to use bin_slop <= %s in this case.\n"%max_good_slop+ + "Larger values of bin_slop (and hence b) may result in significant inaccuracies.") + else: + self.logger.debug("Using bin_slop = %g, b = %g",self.bin_slop,self.b) + + self._ro.brute = get(self.config,'brute',bool,False) + if self.brute: + self.logger.info("Doing brute force calculation%s.", + self.brute is True and "" or + self.brute == 1 and " for first field" or + " for second field") + self.coords = None + self.metric = None + self._ro.min_rpar = get(self.config,'min_rpar',float,-sys.float_info.max) + self._ro.max_rpar = get(self.config,'max_rpar',float,sys.float_info.max) + if self.min_rpar > self.max_rpar: + raise ValueError("min_rpar must be <= max_rpar") + period = get(self.config,'period',float,0) + self._ro.xperiod = get(self.config,'xperiod',float,period) + self._ro.yperiod = get(self.config,'yperiod',float,period) + self._ro.zperiod = get(self.config,'zperiod',float,period) + + self._ro.var_method = get(self.config,'var_method',str,'shot') + self._ro.num_bootstrap = get(self.config,'num_bootstrap',int,500) + self.results = {} # for jackknife, etc. store the results of each pair of patches. + self.npatch1 = self.npatch2 = 1 + self._rng = rng + + @property + def rng(self): + if self._rng is None: + self._rng = np.random.RandomState() + return self._rng + + # Properties for all the read-only attributes ("ro" stands for "read-only") + @property + def output_dots(self): return self._ro.output_dots + @property + def bin_type(self): return self._ro.bin_type + @property + def sep_units(self): return self._ro.sep_units + @property + def _sep_units(self): return self._ro._sep_units + @property + def _log_sep_units(self): return self._ro._log_sep_units + @property + def min_sep(self): return self._ro.min_sep + @property + def max_sep(self): return self._ro.max_sep + @property + def bin_size(self): return self._ro.bin_size + @property + def nbins(self): return self._ro.nbins + @property + def logr(self): return self._ro.logr + @property + def rnom(self): return self._ro.rnom + @property + def left_edges(self): return self._ro.left_edges + @property + def right_edges(self): return self._ro.right_edges + @property + def top_edges(self): return self._ro.top_edges + @property + def bottom_edges(self): return self._ro.bottom_edges + @property + def _bintype(self): return self._ro._bintype + @property + def _nbins(self): return self._ro._nbins + @property + def _min_sep(self): return self._ro._min_sep + @property + def _max_sep(self): return self._ro._max_sep + @property + def _bin_size(self): return self._ro._bin_size + @property + def split_method(self): return self._ro.split_method + @property + def min_top(self): return self._ro.min_top + @property + def max_top(self): return self._ro.max_top + @property + def bin_slop(self): return self._ro.bin_slop + @property + def b(self): return self._ro.b + @property + def brute(self): return self._ro.brute + @property + def min_rpar(self): return self._ro.min_rpar + @property + def max_rpar(self): return self._ro.max_rpar + @property + def xperiod(self): return self._ro.xperiod + @property + def yperiod(self): return self._ro.yperiod + @property + def zperiod(self): return self._ro.zperiod + @property + def var_method(self): return self._ro.var_method + @property + def num_bootstrap(self): return self._ro.num_bootstrap + @property + def _d1(self): return self._ro._d1 + @property + def _d2(self): return self._ro._d2 + + def __getstate__(self): + d = self.__dict__.copy() + d.pop('_corr',None) + d.pop('_ok',None) # Remake this as needed. + d.pop('logger',None) # Oh well. This is just lost in the copy. Can't be pickled. + return d + + def __setstate__(self, d): + self.__dict__ = d + self._corr = None + self.logger = setup_logger(get(self.config,'verbose',int,1), + self.config.get('log_file',None)) + +
[docs] def clear(self): + """Clear all data vectors, the results dict, and any related values. + """ + self._clear() + self.results = {} + self.npatch1 = self.npatch2 = 1 + self.__dict__.pop('_ok',None)
+ + @property + def nonzero(self): + """Return if there are any values accumulated yet. (i.e. npairs > 0) + """ + return np.any(self.npairs) + + def _add_tot(self, i, j, c1, c2): + # No op for all but NNCorrelation, which needs to add the tot value + pass + + def _trivially_zero(self, c1, c2, metric): + # For now, ignore the metric. Just be conservative about how much space we need. + x1,y1,z1,s1 = c1._get_center_size() + x2,y2,z2,s2 = c2._get_center_size() + return _lib.TriviallyZero(self.corr, self._d1, self._d2, self._bintype, + self._metric, self._coords, + x1, y1, z1, s1, x2, y2, z2, s2) + + def _process_all_auto(self, cat1, metric, num_threads, comm, low_mem): + + def is_my_job(my_indices, i, j, n): + # Helper function to figure out if a given (i,j) job should be done on the + # current process. + + # Always my job if not using MPI. + if my_indices is None: + return True + + # Now the tricky part. If using MPI, we need to divide up the jobs smartly. + # The first point is to divvy up the auto jobs evenly. This is where most of the + # work is done, so we want those to be spreads as evenly as possibly across procs. + # Therefore, if both indices are mine, then do the job. + # This reduces the number of catalogs this machine needs to load up. + # If the auto i,i and j,j are both my job, then i and j are already being loaded + # on this machine, so also do that job. + if i in my_indices and j in my_indices: + self.logger.info("Rank %d: Job (%d,%d) is mine.",rank,i,j) + return True + + # If neither index is mine, then it's not my job. + if i not in my_indices and j not in my_indices: + return False + + # For the other jobs, we want to minimize how many other catalogs need to be + # loaded. Unfortunately, the nature of pairs is such that we can't reduce this + # too much. For the set of jobs i,j where i belongs to proc 1 and j belongs to proc 2, + # half of these pairs need to be assigned to each proc. + # The best I could figure for this is to give even i to proc 1 and odd i to proc 2. + # This means proc 1 has to load all the j catalogs, but proc 2 can skip half the i + # catalogs. This would naively have the result that procs with lower indices + # have to load more catalogs than those with higher indices, since i < j. + # So we reverse the procedure when j-i > n/2 to spread out the I/O more. + if j-i < n//2: + ret = i % 2 == (0 if i in my_indices else 1) + else: + ret = j % 2 == (0 if j in my_indices else 1) + if ret: + self.logger.info("Rank %d: Job (%d,%d) is mine.",rank,i,j) + return ret + + if len(cat1) == 1 and cat1[0].npatch == 1: + self.process_auto(cat1[0], metric=metric, num_threads=num_threads) + else: + # When patch processing, keep track of the pair-wise results. + if self.npatch1 == 1: + self.npatch1 = self.npatch2 = cat1[0].npatch if cat1[0].npatch != 1 else len(cat1) + n = self.npatch1 + + # Setup for deciding when this is my job. + if comm: + size = comm.Get_size() + rank = comm.Get_rank() + my_indices = np.arange(n * rank // size, n * (rank+1) // size) + self.logger.info("Rank %d: My indices are %s",rank,my_indices) + else: + my_indices = None + + self._set_metric(metric, cat1[0].coords) + temp = self.copy() + temp.results = {} # Don't mess up the original results + for ii,c1 in enumerate(cat1): + i = c1.patch if c1.patch is not None else ii + if is_my_job(my_indices, i, i, n): + temp._clear() + self.logger.info('Process patch %d auto',i) + temp.process_auto(c1, metric=metric, num_threads=num_threads) + if (i,i) not in self.results: + self.results[(i,i)] = temp.copy() + else: + self.results[(i,i)] += temp + self += temp + for jj,c2 in list(enumerate(cat1))[::-1]: + j = c2.patch if c2.patch is not None else jj + if i < j and is_my_job(my_indices, i, j, n): + temp._clear() + if not self._trivially_zero(c1,c2,metric): + self.logger.info('Process patches %d,%d cross',i,j) + temp.process_cross(c1, c2, metric=metric, num_threads=num_threads) + else: + self.logger.info('Skipping %d,%d pair, which are too far apart ' + + 'for this set of separations',i,j) + if temp.nonzero: + if (i,j) not in self.results: + self.results[(i,j)] = temp.copy() + else: + self.results[(i,j)] += temp + self += temp + else: + # NNCorrelation needs to add the tot value + self._add_tot(i, j, c1, c2) + if low_mem and jj != ii+1: + # Don't unload i+1, since that's the next one we'll need. + c2.unload() + if low_mem: + c1.unload() + if comm is not None: + rank = comm.Get_rank() + size = comm.Get_size() + self.logger.info("Rank %d: Completed jobs %s",rank,list(self.results.keys())) + # Send all the results back to rank 0 process. + if rank > 0: + comm.send(self, dest=0) + else: + for p in range(1,size): + temp = comm.recv(source=p) + self += temp + self.results.update(temp.results) + + def _process_all_cross(self, cat1, cat2, metric, num_threads, comm, low_mem): + + def is_my_job(my_indices, i, j, n1, n2): + # Helper function to figure out if a given (i,j) job should be done on the + # current process. + + # Always my job if not using MPI. + if my_indices is None: + return True + + # This is much simpler than in the auto case, since the set of catalogs for + # cat1 and cat2 are different, we can just split up one of them among the jobs. + if n1 > n2: + k = i + else: + k = j + if k in my_indices: + self.logger.info("Rank %d: Job (%d,%d) is mine.",rank,i,j) + return True + else: + return False + + if get(self.config,'pairwise',bool,False): + import warnings + warnings.warn("The pairwise option is slated to be removed in a future version. "+ + "If you are actually using this parameter usefully, please "+ + "open an issue to describe your use case.", FutureWarning) + if len(cat1) != len(cat2): + raise ValueError("Number of files for 1 and 2 must be equal for pairwise.") + for c1,c2 in zip(cat1,cat2): + if c1.ntot != c2.ntot: + raise ValueError("Number of objects must be equal for pairwise.") + self.process_pairwise(c1, c2, metric=metric, num_threads=num_threads) + elif len(cat1) == 1 and len(cat2) == 1 and cat1[0].npatch == 1 and cat2[0].npatch == 1: + self.process_cross(cat1[0], cat2[0], metric=metric, num_threads=num_threads) + else: + # When patch processing, keep track of the pair-wise results. + if self.npatch1 == 1: + self.npatch1 = cat1[0].npatch if cat1[0].npatch != 1 else len(cat1) + if self.npatch2 == 1: + self.npatch2 = cat2[0].npatch if cat2[0].npatch != 1 else len(cat2) + if self.npatch1 != self.npatch2 and self.npatch1 != 1 and self.npatch2 != 1: + raise RuntimeError("Cross correlation requires both catalogs use the same patches.") + + # Setup for deciding when this is my job. + n1 = self.npatch1 + n2 = self.npatch2 + if comm: + size = comm.Get_size() + rank = comm.Get_rank() + n = max(n1,n2) + my_indices = np.arange(n * rank // size, n * (rank+1) // size) + self.logger.info("Rank %d: My indices are %s",rank,my_indices) + else: + my_indices = None + + self._set_metric(metric, cat1[0].coords, cat2[0].coords) + temp = self.copy() + temp.results = {} # Don't mess up the original results + for ii,c1 in enumerate(cat1): + i = c1.patch if c1.patch is not None else ii + for jj,c2 in enumerate(cat2): + j = c2.patch if c2.patch is not None else jj + if is_my_job(my_indices, i, j, n1, n2): + temp._clear() + if not self._trivially_zero(c1,c2,metric): + self.logger.info('Process patches %d,%d cross',i,j) + temp.process_cross(c1, c2, metric=metric, num_threads=num_threads) + else: + self.logger.info('Skipping %d,%d pair, which are too far apart ' + + 'for this set of separations',i,j) + if temp.nonzero or i==j or n1==1 or n2==1: + if (i,j) not in self.results: + self.results[(i,j)] = temp.copy() + else: + self.results[(i,j)] += temp + self += temp + else: + # NNCorrelation needs to add the tot value + self._add_tot(i, j, c1, c2) + if low_mem: + c2.unload() + if low_mem: + c1.unload() + if comm is not None: + rank = comm.Get_rank() + size = comm.Get_size() + self.logger.info("Rank %d: Completed jobs %s",rank,list(self.results.keys())) + # Send all the results back to rank 0 process. + if rank > 0: + comm.send(self, dest=0) + else: + for p in range(1,size): + temp = comm.recv(source=p) + self += temp + self.results.update(temp.results) + +
[docs] def getStat(self): + """The standard statistic for the current correlation object as a 1-d array. + + Usually, this is just self.xi. But if the metric is TwoD, this becomes self.xi.ravel(). + And for `GGCorrelation`, it is the concatenation of self.xip and self.xim. + """ + return self.xi.ravel()
+ +
[docs] def getWeight(self): + """The weight array for the current correlation object as a 1-d array. + + This is the weight array corresponding to `getStat`. Usually just self.weight, but + raveled for TwoD and duplicated for GGCorrelation to match what `getStat` does in + those cases. + """ + return self.weight.ravel()
+ +
[docs] @depr_pos_kwargs + def estimate_cov(self, method, *, func=None, comm=None): + """Estimate the covariance matrix based on the data + + This function will calculate an estimate of the covariance matrix according to the + given method. + + Options for ``method`` include: + + - 'shot' = The variance based on "shot noise" only. This includes the Poisson + counts of points for N statistics, shape noise for G statistics, and the observed + scatter in the values for K statistics. In this case, the returned covariance + matrix will be diagonal, since there is no way to estimate the off-diagonal terms. + - 'jackknife' = A jackknife estimate of the covariance matrix based on the scatter + in the measurement when excluding one patch at a time. + - 'sample' = An estimate based on the sample covariance of a set of samples, + taken as the patches of the input catalog. + - 'bootstrap' = A bootstrap covariance estimate. It selects patches at random with + replacement and then generates the statistic using all the auto-correlations at + their selected repetition plus all the cross terms that aren't actually auto terms. + - 'marked_bootstrap' = An estimate based on a marked-point bootstrap resampling of the + patches. Similar to bootstrap, but only samples the patches of the first catalog and + uses all patches from the second catalog that correspond to each patch selection of + the first catalog. Based on the algorithm presented in Loh (2008). + cf. https://ui.adsabs.harvard.edu/abs/2008ApJ...681..726L/ + + Both 'bootstrap' and 'marked_bootstrap' use the num_bootstrap parameter, which can be set on + construction. + + .. note:: + + For most classes, there is only a single statistic, so this calculates a covariance + matrix for that vector. `GGCorrelation` has two: ``xip`` and ``xim``, so in this + case the full data vector is ``xip`` followed by ``xim``, and this calculates the + covariance matrix for that full vector including both statistics. The helper + function `getStat` returns the relevant statistic in all cases. + + In all cases, the relevant processing needs to already have been completed and finalized. + And for all methods other than 'shot', the processing should have involved an appropriate + number of patches -- preferably more patches than the length of the vector for your + statistic, although this is not checked. + + The default data vector to use for the covariance matrix is given by the method + `getStat`. As noted above, this is usually just self.xi. However, there is an option + to compute the covariance of some other function of the correlation object by providing + an arbitrary function, ``func``, which should act on the current correlation object + and return the data vector of interest. + + For instance, for an `NGCorrelation`, you might want to compute the covariance of the + imaginary part, ``ng.xi_im``, rather than the real part. In this case you could use + + >>> func = lambda ng: ng.xi_im + + The return value from this func should be a single numpy array. (This is not directly + checked, but you'll probably get some kind of exception if it doesn't behave as expected.) + + .. note:: + + The optional ``func`` parameter is not valid in conjunction with ``method='shot'``. + It only works for the methods that are based on patch combinations. + + This function can be parallelized by passing the comm argument as an mpi4py communicator + to parallelize using that. For MPI, all processes should have the same inputs. + If method == "shot" then parallelization has no effect. + + Parameters: + method (str): Which method to use to estimate the covariance matrix. + func (function): A unary function that acts on the current correlation object and + returns the desired data vector. [default: None, which is + equivalent to ``lambda corr: corr.getStat()``. + comm (mpi comm) If not None, run under MPI + + Returns: + A numpy array with the estimated covariance matrix. + """ + if func is not None: + # Need to convert it to a function of the first item in the list. + all_func = lambda corrs: func(corrs[0]) + else: + all_func = None + return estimate_multi_cov([self], method=method, func=all_func, comm=comm)
+ +
[docs] def build_cov_design_matrix(self, method, *, func=None, comm=None): + """Build the design matrix that is used for estimating the covariance matrix. + + The design matrix for patch-based covariance estimates is a matrix where each row + corresponds to a different estimate of the data vector, :math:`\\xi_i` (or + :math:`f(\\xi_i)` if using the optional ``func`` parameter). + + The different of rows in the matrix for each valid ``method`` are: + + - 'shot': This method is not valid here. + - 'jackknife': The data vector when excluding a single patch. + - 'sample': The data vector using only a single patch for the first catalog. + - 'bootstrap': The data vector for a random resampling of the patches keeping the + sample total number, but allowing some to repeat. Cross terms from repeated patches + are excluded (since they are really auto terms). + - 'marked_bootstrap': The data vector for a random resampling of patches in the first + catalog, using all patches for the second catalog. Based on the algorithm in + Loh(2008). + + See `estimate_cov` for more details. + + The return value includes both the design matrix and a vector of weights (the total weight + array in the computed correlation functions). The weights are used for the sample method + when estimating the covariance matrix. The other methods ignore them, but they are provided + here in case they are useful. + + Parameters: + method (str): Which method to use to estimate the covariance matrix. + func (function): A unary function that takes the list ``corrs`` and returns the + desired full data vector. [default: None, which is equivalent to + ``lambda corrs: np.concatenate([c.getStat() for c in corrs])``] + comm (mpi comm) If not None, run under MPI + + Returns: + A, w: numpy arrays with the design matrix and weights respectively. + """ + if func is not None: + # Need to convert it to a function of the first item in the list. + all_func = lambda corrs: func(corrs[0]) + else: + all_func = None + return build_multi_cov_design_matrix([self], method=method, func=all_func, comm=comm)
+ + def _set_num_threads(self, num_threads): + if num_threads is None: + num_threads = self.config.get('num_threads',None) + # Recheck. + if num_threads is None: + self.logger.debug('Set num_threads automatically') + else: + self.logger.debug('Set num_threads = %d',num_threads) + set_omp_threads(num_threads, self.logger) + + def _set_metric(self, metric, coords1, coords2=None): + if metric is None: + metric = get(self.config,'metric',str,'Euclidean') + coords, metric = parse_metric(metric, coords1, coords2) + if coords != '3d': + if self.min_rpar != -sys.float_info.max: + raise ValueError("min_rpar is only valid for 3d coordinates") + if self.max_rpar != sys.float_info.max: + raise ValueError("max_rpar is only valid for 3d coordinates") + if self.sep_units != '' and coords == '3d' and metric != 'Arc': + raise ValueError("sep_units is invalid with 3d coordinates. " + "min_sep and max_sep should be in the same units as r (or x,y,z).") + if self.coords is not None or self.metric is not None: + if coords != self.coords: + self.logger.warning("Detected a change in catalog coordinate systems.\n"+ + "This probably doesn't make sense!") + if metric != self.metric: + self.logger.warning("Detected a change in metric.\n"+ + "This probably doesn't make sense!") + if metric == 'Periodic': + if self.xperiod == 0 or self.yperiod == 0 or (coords=='3d' and self.zperiod == 0): + raise ValueError("Periodic metric requires setting the period to use.") + else: + if self.xperiod != 0 or self.yperiod != 0 or self.zperiod != 0: + raise ValueError("period options are not valid for %s metric."%metric) + self.coords = coords # These are the regular string values + self.metric = metric + self._coords = coord_enum(coords) # These are the C++-layer enums + self._metric = metric_enum(metric) + + def _apply_units(self, mask): + if self.coords == 'spherical' and self.metric == 'Euclidean': + # Then our distances are all angles. Convert from the chord distance to a real angle. + # L = 2 sin(theta/2) + self.meanr[mask] = 2. * np.arcsin(self.meanr[mask]/2.) + self.meanlogr[mask] = np.log( 2. * np.arcsin(np.exp(self.meanlogr[mask])/2.) ) + self.meanr[mask] /= self._sep_units + self.meanlogr[mask] -= self._log_sep_units + + def _get_minmax_size(self): + if self.metric == 'Euclidean': + # The minimum size cell that will be useful is one where two cells that just barely + # don't split have (d + s1 + s2) = minsep + # The largest s2 we need to worry about is s2 = 2s1. + # i.e. d = minsep - 3s1 and s1 = 0.5 * bd + # d = minsep - 1.5 bd + # d = minsep / (1+1.5 b) + # s = 0.5 * b * minsep / (1+1.5 b) + # = b * minsep / (2+3b) + min_size = self._min_sep * self.b / (2.+3.*self.b) + + # The maximum size cell that will be useful is one where a cell of size s will + # be split at the maximum separation even if the other size = 0. + # i.e. max_size = max_sep * b + max_size = self._max_sep * self.b + return min_size, max_size + else: + # For other metrics, the above calculation doesn't really apply, so just skip + # this relatively modest optimization and go all the way to the leaves. + # (And for the max_size, always split 10 levels for the top-level cells.) + return 0., 0. + +
[docs] @depr_pos_kwargs + def sample_pairs(self, n, cat1, cat2, *, min_sep, max_sep, metric=None): + """Return a random sample of n pairs whose separations fall between min_sep and max_sep. + + This would typically be used to get some random subset of the indices of pairs that + fell into a particular bin of the correlation. E.g. to get 100 pairs from the third + bin of a `BinnedCorr2` instance, corr, you could write:: + + >>> min_sep = corr.left_edges[2] # third bin has i=2 + >>> max_sep = corr.right_edges[2] + >>> i1, i2, sep = corr.sample_pairs(100, cat1, cat2, min_sep, max_sep) + + The min_sep and max_sep should use the same units as were defined when constructing + the corr instance. + + The selection process will also use the same bin_slop as specified (either explicitly or + implicitly) when constructing the corr instance. This means that some of the pairs may + have actual separations slightly outside of the specified range. If you want a selection + using an exact range without any slop, you should construct a new Correlation instance + with bin_slop=0, and call sample_pairs with that. + + The returned separations will likewise correspond to the separation of the cells in the + tree that TreeCorr used to place the pairs into the given bin. Therefore, if these cells + were not leaf cells, then they will not typically be equal to the real separations for the + given metric. If you care about the exact separations for each pair, you should either + call sample_pairs from a Correlation instance with brute=True or recalculate the + distances yourself from the original data. + + Also, note that min_sep and max_sep may be arbitrary. There is no requirement that they + be edges of one of the standard bins for this correlation function. There is also no + requirement that this correlation instance has already accumulated pairs via a call + to process with these catalogs. + + Parameters: + n (int): How many samples to return. + cat1 (Catalog): The catalog from which to sample the first object of each pair. + cat2 (Catalog): The catalog from which to sample the second object of each pair. + (This may be the same as cat1.) + min_sep (float): The minimum separation for the returned pairs (modulo some slop + allowed by the bin_slop parameter). (Note: keyword name is required + for this parameter: min_sep=min_sep) + max_sep (float): The maximum separation for the returned pairs (modulo some slop + allowed by the bin_slop parameter). (Note: keyword name is required + for this parameter: max_sep=max_sep) + metric (str): Which metric to use. See `Metrics` for details. (default: + self.metric, or 'Euclidean' if not set yet) + + Returns: + Tuple containing + + - i1 (array): indices of objects from cat1 + - i2 (array): indices of objects from cat2 + - sep (array): separations of the pairs of objects (i1,i2) + """ + from .util import long_ptr as lp + from .util import double_ptr as dp + + if metric is None: + metric = self.config.get('metric', 'Euclidean') + + self._set_metric(metric, cat1.coords, cat2.coords) + + f1 = cat1.field + f2 = cat2.field + + if f1 is None or f1._coords != self._coords: + # I don't really know if it's possible to get the coords out of sync, + # so the 2nd check might be superfluous. + # The first one though is definitely possible, so we need to check that. + self.logger.debug("In sample_pairs, making default field for cat1") + min_size, max_size = self._get_minmax_size() + f1 = cat1.getNField(min_size=min_size, max_size=max_size, + split_method=self.split_method, + brute=self.brute is True or self.brute == 1, + min_top=self.min_top, max_top=self.max_top, + coords=self.coords) + if f2 is None or f2._coords != self._coords: + self.logger.debug("In sample_pairs, making default field for cat2") + min_size, max_size = self._get_minmax_size() + f2 = cat2.getNField(min_size=min_size, max_size=max_size, + split_method=self.split_method, + brute=self.brute is True or self.brute == 2, + min_top=self.min_top, max_top=self.max_top, + coords=self.coords) + + # Apply units to min_sep, max_sep: + min_sep *= self._sep_units + max_sep *= self._sep_units + + i1 = np.zeros(n, dtype=int) + i2 = np.zeros(n, dtype=int) + sep = np.zeros(n, dtype=float) + ntot = _lib.SamplePairs(self.corr, f1.data, f2.data, min_sep, max_sep, + f1._d, f2._d, self._coords, self._bintype, self._metric, + lp(i1), lp(i2), dp(sep), n) + + if ntot < n: + n = ntot + i1 = i1[:n] + i2 = i2[:n] + sep = sep[:n] + # Convert back to nominal units + sep /= self._sep_units + self.logger.info("Sampled %d pairs out of a total of %d.", n, ntot) + + return i1, i2, sep
+ + # Some helper functions that are relevant for doing the covariance stuff below. + # Note: the word "pairs" in many of these is appropriate for 2pt, but in the 3pt case + # (cf. binnedcorr3.py), these actually refer to triples (i,j,k). + + def _get_npatch(self): + return max(self.npatch1, self.npatch2) + + def _calculate_xi_from_pairs(self, pairs): + # Compute the xi data vector for the given list of pairs. + # pairs is input as a list of (i,j) values. + + # This is the normal calculation. It needs to be overridden when there are randoms. + self._sum([self.results[ij] for ij in pairs]) + self._finalize() + + ######################################################################################### + # # + # Important note for the following two functions. # + # These use to have lines like this: ` # + # # + # return [ [(j,k) for j,k in self.results.keys() if j!=i and k!=i] # + # for i in range(self.npatch1) ] # + # # + # This double list comprehension ends up with a list of lists that takes O(npatch^3) # + # memory, which for moderately large npatch values (say 500) can be multip[le GBytes. # + # # + # The straightforward solution was to change this to using generators: # + # # + # return [ ((j,k) for j,k in self.results.keys() if j!=i and k!=i) # + # for i in range(self.npatch1) ] # + # # + # But this doesn't work with the MPI covariance calculation, since generators aren't # + # picklable. So the iterator classes below hold off making the generator until the # + # iteration is actually started, which keeps them picklable. # + # # + ######################################################################################### + + class PairIterator(collections.abc.Iterator): + def __init__(self, results, npatch1, npatch2, index, ok=None): + self.results = results + self.npatch1 = npatch1 + self.npatch2 = npatch2 + self.index = index + self.ok = ok + + def __iter__(self): + self.gen = iter(self.make_gen()) + return self + + def __next__(self): + return next(self.gen) + + class JackknifePairIterator(PairIterator): + def make_gen(self): + if self.npatch2 == 1: + # k=0 here + return ((j,k) for j,k in self.results.keys() if j!=self.index) + elif self.npatch1 == 1: + # j=0 here + return ((j,k) for j,k in self.results.keys() if k!=self.index) + else: + # For each i: + # Select all pairs where neither is i. + assert self.npatch1 == self.npatch2 + return ((j,k) for j,k in self.results.keys() if j!=self.index and k!=self.index) + + def _jackknife_pairs(self): + np = self.npatch1 if self.npatch1 != 1 else self.npatch2 + return [self.JackknifePairIterator(self.results, self.npatch1, self.npatch2, i) + for i in range(np)] + + class SamplePairIterator(PairIterator): + def make_gen(self): + if self.npatch2 == 1: + # k=0 here. + return ((j,k) for j,k in self.results.keys() if j==self.index) + elif self.npatch1 == 1: + # j=0 here. + return ((j,k) for j,k in self.results.keys() if k==self.index) + else: + assert self.npatch1 == self.npatch2 + # Note: It's not obvious to me a priori which of these should be the right choice. + # Empirically, they both underestimate the variance, but the second one + # does so less on the tests I have in test_patch.py. So that's the one I'm + # using. + # For each i: + # Select all pairs where either is i. + #return ((j,k) for j,k in self.results.keys() if j==self.index or k==self.index) + # + # For each i: + # Select all pairs where first is i. + return ((j,k) for j,k in self.results.keys() if j==self.index) + + def _sample_pairs(self): + np = self.npatch1 if self.npatch1 != 1 else self.npatch2 + return [self.SamplePairIterator(self.results, self.npatch1, self.npatch2, i) + for i in range(np)] + + @lazy_property + def _ok(self): + # It's much faster to make the pair lists for bootstrap iterators if we keep track of + # which (i,j) pairs are in the results dict using an "ok" matrix for quick access. + ok = np.zeros((self.npatch1, self.npatch2), dtype=bool) + for (i,j) in self.results: + ok[i,j] = True + return ok + + class MarkedPairIterator(PairIterator): + def make_gen(self): + if self.npatch2 == 1: + return ( (i,0) for i in self.index if self.ok[i,0] ) + elif self.npatch1 == 1: + return ( (0,i) for i in self.index if self.ok[0,i] ) + else: + assert self.npatch1 == self.npatch2 + # Select all pairs where first point is in index (repeating i as appropriate) + return ( (i,j) for i in self.index for j in range(self.npatch2) if self.ok[i,j] ) + + def _marked_pairs(self, index): + return self.MarkedPairIterator(self.results, self.npatch1, self.npatch2, index, self._ok) + + class BootstrapPairIterator(PairIterator): + def make_gen(self): + if self.npatch2 == 1: + return ( (i,0) for i in self.index if self.ok[i,0] ) + elif self.npatch1 == 1: + return ( (0,i) for i in self.index if self.ok[0,i] ) + else: + assert self.npatch1 == self.npatch2 + # Include all represented auto-correlations once, repeating as appropriate. + # This needs to be done separately from the below step to avoid extra pairs (i,i) + # that you would get by looping i in index and j in index for cases where i=j at + # different places in the index list. E.g. if i=3 shows up 3 times in index, then + # the naive way would get 9 instance of (3,3), whereas we only want 3 instances. + ret1 = ( (i,i) for i in self.index if self.ok[i,i] ) + + # And all other pairs that aren't really auto-correlations. + # These can happen at their natural multiplicity from i and j loops. + # Note: This is way faster with the precomputed ok matrix. + # Like 0.005 seconds per call rather than 1.2 seconds for 128 patches! + ret2 = ( (i,j) for i in self.index for j in self.index if self.ok[i,j] and i!=j ) + + return itertools.chain(ret1, ret2) + + def _bootstrap_pairs(self, index): + return self.BootstrapPairIterator(self.results, self.npatch1, self.npatch2, index, self._ok) + + def _write(self, writer, name, write_patch_results, zero_tot=False): + # These helper properties define what to write for each class. + col_names = self._write_col_names + data = self._write_data + params = self._write_params + + if write_patch_results: + # Note: Only include npatch1, npatch2 in serialization if we are also serializing + # results. Otherwise, the corr that is read in will behave oddly. + params['npatch1'] = self.npatch1 + params['npatch2'] = self.npatch2 + params['num_rows'] = len(self.rnom.ravel()) + num_patch_pairs = len(self.results) + if zero_tot: + i = 0 + for key, corr in self.results.items(): + if not corr._nonzero: + zp_name = name + '_zp_%d'%i + params[zp_name] = repr((key, corr.tot)) + num_patch_pairs -= 1 + i += 1 + params['num_zero_patch'] = i + params['num_patch_pairs'] = num_patch_pairs + + writer.write(col_names, data, params=params, ext=name) + if write_patch_results: + writer.set_precision(16) + i = 0 + for key, corr in self.results.items(): + if zero_tot and not corr._nonzero: continue + col_names = corr._write_col_names + data = corr._write_data + params = corr._write_params + params['key'] = repr(key) + pp_name = name + '_pp_%d'%i + writer.write(col_names, data, params=params, ext=pp_name) + i += 1 + assert i == num_patch_pairs + + def _read(self, reader, name=None): + name = 'main' if 'main' in reader and name is None else name + params = reader.read_params(ext=name) + num_rows = params.get('num_rows', None) + num_patch_pairs = params.get('num_patch_pairs', 0) + num_zero_patch = params.get('num_zero_patch', 0) + name = 'main' if num_patch_pairs and name is None else name + data = reader.read_data(max_rows=num_rows, ext=name) + + # This helper function defines how to set the attributes for each class + # based on what was read in. + self._read_from_data(data, params) + + self.results = {} + for i in range(num_zero_patch): + zp_name = name + '_zp_%d'%i + key, tot = eval(params[zp_name]) + self.results[key] = self._zero_copy(tot) + for i in range(num_patch_pairs): + pp_name = name + '_pp_%d'%i + corr = self.copy() + params = reader.read_params(ext=pp_name) + data = reader.read_data(max_rows=num_rows, ext=pp_name) + corr._read_from_data(data, params) + key = eval(params['key']) + self.results[key] = corr
+ +
[docs]@depr_pos_kwargs +def estimate_multi_cov(corrs, method, *, func=None, comm=None): + """Estimate the covariance matrix of multiple statistics. + + This is like the method `BinnedCorr2.estimate_cov`, except that it will acoommodate + multiple statistics from a list ``corrs`` of `BinnedCorr2` objects. + + Options for ``method`` include: + + - 'shot' = The variance based on "shot noise" only. This includes the Poisson + counts of points for N statistics, shape noise for G statistics, and the observed + scatter in the values for K statistics. In this case, the returned covariance + matrix will be diagonal, since there is no way to estimate the off-diagonal terms. + - 'jackknife' = A jackknife estimate of the covariance matrix based on the scatter + in the measurement when excluding one patch at a time. + - 'sample' = An estimate based on the sample covariance of a set of samples, + taken as the patches of the input catalog. + - 'bootstrap' = A bootstrap covariance estimate. It selects patches at random with + replacement and then generates the statistic using all the auto-correlations at + their selected repetition plus all the cross terms that aren't actually auto terms. + - 'marked_bootstrap' = An estimate based on a marked-point bootstrap resampling of the + patches. Similar to bootstrap, but only samples the patches of the first catalog and + uses all patches from the second catalog that correspond to each patch selection of + the first catalog. Based on the algorithm presented in Loh (2008). + cf. https://ui.adsabs.harvard.edu/abs/2008ApJ...681..726L/ + + Both 'bootstrap' and 'marked_bootstrap' use the num_bootstrap parameter, which can be set on + construction. + + For example, to find the combined covariance matrix for an NG tangential shear statistc, + along with the GG xi+ and xi- from the same area, using jackknife covariance estimation, + you would write:: + + >>> cov = treecorr.estimate_multi_cov([ng,gg], method='jackknife') + + In all cases, the relevant processing needs to already have been completed and finalized. + And for all methods other than 'shot', the processing should have involved an appropriate + number of patches -- preferably more patches than the length of the vector for your + statistic, although this is not checked. + + The default order of the covariance matrix is to simply concatenate the data vectors + for each corr in the list ``corrs``. However, if you want to do something more complicated, + you may provide an arbitrary function, ``func``, which should act on the list of correlations. + For instance, if you have several `GGCorrelation` objects and would like to order the + covariance such that all xi+ results come first, and then all xi- results, you could use + + >>> func = lambda corrs: np.concatenate([c.xip for c in corrs] + [c.xim for c in corrs]) + + Or if you want to compute the covariance matrix of some derived quantity like the ratio + of two correlations, you could use + + >>> func = lambda corrs: corrs[0].xi / corrs[1].xi + + This function can be parallelized by passing the comm argument as an mpi4py communicator to + parallelize using that. For MPI, all processes should have the same inputs. + If method == "shot" then parallelization has no effect. + + The return value from this func should be a single numpy array. (This is not directly + checked, but you'll probably get some kind of exception if it doesn't behave as expected.) + + .. note:: + + The optional ``func`` parameter is not valid in conjunction with ``method='shot'``. + It only works for the methods that are based on patch combinations. + + Parameters: + corrs (list): A list of `BinnedCorr2` instances. + method (str): Which method to use to estimate the covariance matrix. + func (function): A unary function that takes the list ``corrs`` and returns the + desired full data vector. [default: None, which is equivalent to + ``lambda corrs: np.concatenate([c.getStat() for c in corrs])``] + comm (mpi comm) If not None, run under MPI + + Returns: + A numpy array with the estimated covariance matrix. + """ + if method == 'shot': + if func is not None: + raise ValueError("func is invalid with method='shot'") + return _cov_shot(corrs) + elif method == 'jackknife': + return _cov_jackknife(corrs, func, comm=comm) + elif method == 'bootstrap': + return _cov_bootstrap(corrs, func, comm=comm) + elif method == 'marked_bootstrap': + return _cov_marked(corrs, func, comm=comm) + elif method == 'sample': + return _cov_sample(corrs, func, comm=comm) + else: + raise ValueError("Invalid method: %s"%method)
+ +
[docs]def build_multi_cov_design_matrix(corrs, method, *, func=None, comm=None): + """Build the design matrix that is used for estimating the covariance matrix. + + The design matrix for patch-based covariance estimates is a matrix where each row + corresponds to a different estimate of the data vector, :math:`\\xi_i` (or + :math:`f(\\xi_i)` if using the optional ``func`` parameter). + + The different of rows in the matrix for each valid ``method`` are: + + - 'shot': This method is not valid here. + - 'jackknife': The data vector when excluding a single patch. + - 'sample': The data vector using only a single patch for the first catalog. + - 'bootstrap': The data vector for a random resampling of the patches keeping the + sample total number, but allowing some to repeat. Cross terms from repeated patches + are excluded (since they are really auto terms). + - 'marked_bootstrap': The data vector for a random resampling of patches in the first + catalog, using all patches for the second catalog. Based on the algorithm in Loh(2008). + + See `estimate_multi_cov` for more details. + + The return value includes both the design matrix and a vector of weights (the total weight + array in the computed correlation functions). The weights are used for the sample method + when estimating the covariance matrix. The other methods ignore them, but they are provided + here in case they are useful. + + Parameters: + corrs (list): A list of `BinnedCorr2` instances. + method (str): Which method to use to estimate the covariance matrix. + func (function): A unary function that takes the list ``corrs`` and returns the + desired full data vector. [default: None, which is equivalent to + ``lambda corrs: np.concatenate([c.getStat() for c in corrs])``] + comm (mpi comm) If not None, run under MPI + + Returns: + A, w: numpy arrays with the design matrix and weights respectively. + """ + if method == 'shot': + raise ValueError("There is no design matrix for method='shot'") + elif method == 'jackknife': + return _design_jackknife(corrs, func, comm=comm) + elif method == 'bootstrap': + return _design_bootstrap(corrs, func, comm=comm) + elif method == 'marked_bootstrap': + return _design_marked(corrs, func, comm=comm) + elif method == 'sample': + return _design_sample(corrs, func, comm=comm) + else: + raise ValueError("Invalid method: %s"%method)
+ +def _make_cov_design_matrix_core(corrs, plist, func, name, rank=0, size=1): + # plist has the pairs to use for each row in the design matrix for each correlation fn. + # It is a list by row, each element is a list by corr fn of tuples (i,j), being the indices + # to use from the results dict. + # We aggregate and finalize each correlation function based on those pairs, and then call + # the function func on that list of correlation objects. This is the data vector for + # each row in the design matrix. + # We also make a parallel array of the total weight in each row in case the calling routing + # needs it. So far, only sample uses the returned w, but it's very little overhead to compute + # it, and only a small memory overhead to build that array and return it. + + # Make a copy of the correlation objects, so we can overwrite things without breaking + # the original. + corrs = [c.copy() for c in corrs] + + # We can't pickle functions to send via MPI, so have to do this here. + if func is None: + func = lambda corrs: np.concatenate([c.getStat() for c in corrs]) + + # Figure out the shape of the design matrix. + v1 = func(corrs) + dt = v1.dtype + vsize = len(v1) + nrows = len(plist) + + # Make the empty return arrays. They are filled with zeros + # because we will sum them over processes later. + v = np.zeros((nrows,vsize), dtype=dt) + w = np.zeros(nrows, dtype=float) + + for row, pairs in enumerate(plist): + if row % size != rank: + continue + for c, cpairs in zip(corrs, pairs): + cpairs = list(cpairs) + if len(cpairs) == 0: + # This will cause problems downstream if we let it go. + # It probably indicates user error, using an inappropriate covariance estimator. + # So warn about it, and then do something not too crazy. + c.logger.error("WARNING: A xi for calculating the %s covariance has no "%name + + "patch pairs. This probably means these patch specifications " + "are inappropriate for these data.") + c._clear() + else: + c._calculate_xi_from_pairs(cpairs) + v[row] = func(corrs) + w[row] = np.sum([np.sum(c.getWeight()) for c in corrs]) + return v,w + +def _make_cov_design_matrix(corrs, plist, func, name, comm=None): + if comm is not None: + from mpi4py import MPI + v, w = _make_cov_design_matrix_core(corrs, plist, func, name, comm.rank, comm.size) + # These two calls collects the v arrays from w arrays from all the processors, + # sums them all together, and then sends them back to each processor where they + # are put back in-place, overwriting the original v and w array contents. + # Each process has an array which is zeros except for the rows it is responsible + # for, so the sum fills in the entire array. + # Using "Allreduce" instead of "Reduce" means that all processes get a copy of the + # final arrays. This may or may not be needed depending on what users subsequently + # do with the matrix, but is fast since this matrix isn't large. + comm.Allreduce(MPI.IN_PLACE, v) + comm.Allreduce(MPI.IN_PLACE, w) + # Otherwise we just use the regular version, which implicitly does the whole matrix + else: + v, w = _make_cov_design_matrix_core(corrs, plist, func, name) + return v, w + +def _cov_shot(corrs): + # Shot noise "covariance" is just 1/RR or var(g)/weight or var(k)/weight, etc. + # Except for NN, the denominator is always corr.weight. + # For NN, the denominator is set by calculateXi to be RR.weight. + # The numerators are set appropriately for each kind of correlation function as _var_num + # when doing finalize, or for NN also in calculateXi. + # We return it as a covariance matrix for consistency with the other cov functions, + # but the off diagonal terms are all zero. + vlist = [] + for c in corrs: + v = c.getWeight().copy() + mask1 = v != 0 + # Note: if w=0 anywhere, leave v=0 there, rather than divide by zero. + v[mask1] = c._var_num / v[mask1] + vlist.append(v) + return np.diag(np.concatenate(vlist)) # Return as a covariance matrix + +def _check_patch_nums(corrs, name): + # Figure out what pairs (i,j) are possible for these correlation functions. + # Check that the patches used are compatible, and return the npatch to use. + + for c in corrs: + if len(c.results) == 0: + raise ValueError("Using %s covariance requires using patches."%name) + npatch = corrs[0]._get_npatch() + for c in corrs[1:]: + if c._get_npatch() != npatch: + raise RuntimeError("All correlations must use the same number of patches") + return npatch + +def _design_jackknife(corrs, func, comm=None): + npatch = _check_patch_nums(corrs, 'jackknife') + plist = [c._jackknife_pairs() for c in corrs] + # Swap order of plist. Right now it's a list for each corr of a list for each row. + # We want a list by row with a list for each corr. + plist = list(zip(*plist)) + + return _make_cov_design_matrix(corrs, plist, func, 'jackknife', comm=comm) + +def _cov_jackknife(corrs, func, comm=None): + # Calculate the jackknife covariance for the given statistics + + # The basic jackknife formula is: + # C = (1-1/npatch) Sum_i (v_i - v_mean) (v_i - v_mean)^T + # where v_i is the vector when excluding patch i, and v_mean is the mean of all {v_i}. + # v_i = Sum_jk!=i num_jk / Sum_jk!=i denom_jk + + v,w = _design_jackknife(corrs, func, comm) + npatch = v.shape[0] + vmean = np.mean(v, axis=0) + v -= vmean + C = (1.-1./npatch) * v.conj().T.dot(v) + return C + +def _design_sample(corrs, func, comm=None): + npatch = _check_patch_nums(corrs, 'sample') + plist = [c._sample_pairs() for c in corrs] + # Swap order of plist. Right now it's a list for each corr of a list for each row. + # We want a list by row with a list for each corr. + plist = list(zip(*plist)) + + return _make_cov_design_matrix(corrs, plist, func, 'sample', comm=comm) + +def _cov_sample(corrs, func, comm=None): + # Calculate the sample covariance. + + # This is kind of the converse of the jackknife. We take each patch and use any + # correlations of it with any other patch. The sample variance of these is the estimate + # of the overall variance. + + # C = 1/(npatch-1) Sum_i w_i (v_i - v_mean) (v_i - v_mean)^T + # where v_i = Sum_j num_ij / Sum_j denom_ij + # and w_i is the fraction of the total weight in each patch + + v,w = _design_sample(corrs, func, comm) + npatch = v.shape[0] + + if np.any(w == 0): + raise RuntimeError("Cannot compute sample variance when some patches have no data.") + + w /= np.sum(w) # Now w is the fractional weight for each patch + + vmean = np.mean(v, axis=0) + v -= vmean + C = 1./(npatch-1) * (w * v.conj().T).dot(v) + return C + +def _design_marked(corrs, func, comm=None): + npatch = _check_patch_nums(corrs, 'marked_bootstrap') + nboot = np.max([c.num_bootstrap for c in corrs]) # use the maximum if they differ. + + plist = [] + for k in range(nboot): + # Select a random set of indices to use. (Will have repeats.) + index = corrs[0].rng.randint(npatch, size=npatch) + vpairs = [c._marked_pairs(index) for c in corrs] + plist.append(vpairs) + + return _make_cov_design_matrix(corrs, plist, func, 'marked_bootstrap', comm=comm) + +def _cov_marked(corrs, func, comm=None): + # Calculate the marked-point bootstrap covariance + + # This is based on the article A Valid and Fast Spatial Bootstrap for Correlation Functions + # by Ji Meng Loh, 2008, cf. https://ui.adsabs.harvard.edu/abs/2008ApJ...681..726L/abstract + + # We do a bootstrap sampling of the patches. For each patch selected, we include + # all pairs that have the sampled patch in the first position. In the Loh prescription, + # the sums of pairs with a given choice of first patch would be the marks. Here, we + # don't quite do that, since the marks would involve a ratio, so the division is biased + # when somewhat noisy. Rather, we sum the numerators and denominators of the marks + # separately and divide the sums. + + # From the bootstrap totals, v_i, the estimated covariance matrix is + + # C = 1/(nboot) Sum_i (v_i - v_mean) (v_i - v_mean)^T + + v,w = _design_marked(corrs, func, comm) + nboot = v.shape[0] + vmean = np.mean(v, axis=0) + v -= vmean + C = 1./(nboot-1) * v.conj().T.dot(v) + return C + +def _design_bootstrap(corrs, func, comm=None): + npatch = _check_patch_nums(corrs, 'bootstrap') + nboot = np.max([c.num_bootstrap for c in corrs]) # use the maximum if they differ. + + plist = [] + for k in range(nboot): + index = corrs[0].rng.randint(npatch, size=npatch) + vpairs = [c._bootstrap_pairs(index) for c in corrs] + plist.append(vpairs) + + return _make_cov_design_matrix(corrs, plist, func, 'bootstrap', comm=comm) + +def _cov_bootstrap(corrs, func, comm=None): + # Calculate the 2-patch bootstrap covariance estimate. + + # This is a different version of the bootstrap idea. It selects patches at random with + # replacement, and then generates the statistic using all the auto-correlations at their + # selected repetition plus all the cross terms, which aren't actually auto terms. + # It seems to do a slightly better job than the marked-point bootstrap above from the + # tests done in the test suite. But the difference is generally pretty small. + + v,w = _design_bootstrap(corrs, func, comm) + nboot = v.shape[0] + vmean = np.mean(v, axis=0) + v -= vmean + C = 1./(nboot-1) * v.conj().T.dot(v) + return C +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/treecorr/binnedcorr3.html b/docs/_build/html/_modules/treecorr/binnedcorr3.html new file mode 100644 index 00000000..e147e268 --- /dev/null +++ b/docs/_build/html/_modules/treecorr/binnedcorr3.html @@ -0,0 +1,1555 @@ + + + + + + treecorr.binnedcorr3 — TreeCorr 4.3.0 documentation + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for treecorr.binnedcorr3

+# Copyright (c) 2003-2019 by Mike Jarvis
+#
+# TreeCorr is free software: redistribution and use in source and binary forms,
+# with or without modification, are permitted provided that the following
+# conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice, this
+#    list of conditions, and the disclaimer given in the accompanying LICENSE
+#    file.
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+#    this list of conditions, and the disclaimer given in the documentation
+#    and/or other materials provided with the distribution.
+
+"""
+.. module:: binnedcorr3
+"""
+
+import math
+import numpy as np
+import sys
+import coord
+
+from . import _lib
+from .config import merge_config, setup_logger, get
+from .util import parse_metric, metric_enum, coord_enum, set_omp_threads, lazy_property
+from .util import make_reader
+from .util import depr_pos_kwargs
+from .binnedcorr2 import estimate_multi_cov, build_multi_cov_design_matrix
+
+class Namespace(object):
+    pass
+
+
[docs]class BinnedCorr3(object): + """This class stores the results of a 3-point correlation calculation, along with some + ancillary data. + + This is a base class that is not intended to be constructed directly. But it has a few + helper functions that derived classes can use to help perform their calculations. See + the derived classes for more details: + + - `NNNCorrelation` handles count-count-count correlation functions + - `KKKCorrelation` handles kappa-kappa-kappa correlation functions + - `GGGCorrelation` handles gamma-gamma-gamma correlation functions + + Three-point correlations are a bit more complicated than two-point, since the data need + to be binned in triangles, not just the separation between two points. We characterize the + triangles according to the following three parameters based on the three side lenghts + of the triangle with d1 >= d2 >= d3. + + .. math:: + r &= d2 \\\\ + u &= \\frac{d3}{d2} \\\\ + v &= \\pm \\frac{(d1 - d2)}{d3} \\\\ + + The orientation of the triangle is specified by the sign of v. + Positive v triangles have the three sides d1,d2,d3 in counter-clockwise orientation. + Negative v triangles have the three sides d1,d2,d3 in clockwise orientation. + + .. note:: + We always bin the same way for positive and negative v values, and the binning + specification for v should just be for the positive values. E.g. if you specify + min_v=0.2, max_v=0.6, then TreeCorr will also accumulate triangles with + -0.6 < v < -0.2 in addition to those with 0.2 < v < 0.6. + + The constructor for all derived classes take a config dict as the first argument, + since this is often how we keep track of parameters, but if you don't want to + use one or if you want to change some parameters from what are in a config dict, + then you can use normal kwargs, which take precedence over anything in the config dict. + + There are three implemented definitions for the ``metric``, which defines how to calculate + the distance between two points, for three-point corretions: + + - 'Euclidean' = straight line Euclidean distance between two points. For spherical + coordinates (ra,dec without r), this is the chord distance between points on the + unit sphere. + - 'Arc' = the true great circle distance for spherical coordinates. + - 'Periodic' = Like Euclidean, but with periodic boundaries. + + .. note:: + + The triangles for three-point correlations can become ambiguous if d1 > period/2, + which means the maximum d2 (max_sep) should be less than period/4. + This is not enforced. + + So far, there is only one allowed value for the ``bin_type`` for three-point correlations. + + - 'LogRUV' - The bin steps will be uniform in log(r) from log(min_sep) .. log(max_sep). + The u and v values are binned linearly from min_u .. max_u and min_v .. max_v. + + + Parameters: + config (dict): A configuration dict that can be used to pass in the below kwargs if + desired. This dict is allowed to have addition entries in addition + to those listed below, which are ignored here. (default: None) + logger: If desired, a logger object for logging. (default: None, in which case + one will be built according to the config dict's verbose level.) + + Keyword Arguments: + + nbins (int): How many bins to use. (Exactly three of nbins, bin_size, min_sep, + max_sep are required. If nbins is not given or set to None, it will be + calculated from the values of the other three, rounding up to the next + highest integer. In this case, bin_size will be readjusted to account + for this rounding up.) + bin_size (float): The width of the bins in log(separation). (Exactly three of nbins, + bin_size, min_sep, max_sep are required. If bin_size is not given or + set to None, it will be calculated from the values of the other three.) + min_sep (float): The minimum separation in units of sep_units, if relevant. (Exactly + three of nbins, bin_size, min_sep, max_sep are required. If min_sep is + not given or set to None, it will be calculated from the values of the + other three.) + max_sep (float): The maximum separation in units of sep_units, if relevant. (Exactly + three of nbins, bin_size, min_sep, max_sep are required. If max_sep is + not given or set to None, it will be calculated from the values of the + other three.) + + sep_units (str): The units to use for the separation values, given as a string. This + includes both min_sep and max_sep above, as well as the units of the + output distance values. Valid options are arcsec, arcmin, degrees, + hours, radians. (default: radians if angular units make sense, but for + 3-d or flat 2-d positions, the default will just match the units of + x,y[,z] coordinates) + bin_slop (float): How much slop to allow in the placement of triangles in the bins. + If bin_slop = 1, then the bin into which a particular pair is placed + may be incorrect by at most 1.0 bin widths. (default: None, which + means to use a bin_slop that gives a maximum error of 10% on any bin, + which has been found to yield good results for most application. + + nubins (int): Analogous to nbins for the u values. (The default is to calculate from + ubin_size = binsize, min_u = 0, max_u = 1, but this can be overridden + by specifying up to 3 of these four parametes.) + ubin_size (float): Analogous to bin_size for the u values. (default: bin_size) + min_u (float): Analogous to min_sep for the u values. (default: 0) + max_u (float): Analogous to max_sep for the u values. (default: 1) + + nvbins (int): Analogous to nbins for the positive v values. (The default is to + calculate from vbin_size = binsize, min_v = 0, max_v = 1, but this can + be overridden by specifying up to 3 of these four parametes.) + vbin_size (float): Analogous to bin_size for the v values. (default: bin_size) + min_v (float): Analogous to min_sep for the positive v values. (default: 0) + max_v (float): Analogous to max_sep for the positive v values. (default: 1) + + brute (bool): Whether to use the "brute force" algorithm. (default: False) Options + are: + + - False (the default): Stop at non-leaf cells whenever the error in + the separation is compatible with the given bin_slop. + - True: Go to the leaves for both catalogs. + - 1: Always go to the leaves for cat1, but stop at non-leaf cells of + cat2 when the error is compatible with the given bin_slop. + - 2: Always go to the leaves for cat2, but stop at non-leaf cells of + cat1 when the error is compatible with the given bin_slop. + + verbose (int): If no logger is provided, this will optionally specify a logging level + to use: + + - 0 means no logging output + - 1 means to output warnings only (default) + - 2 means to output various progress information + - 3 means to output extensive debugging information + + log_file (str): If no logger is provided, this will specify a file to write the logging + output. (default: None; i.e. output to standard output) + output_dots (bool): Whether to output progress dots during the calcualtion of the + correlation function. (default: False unless verbose is given and >= 2, + in which case True) + + split_method (str): How to split the cells in the tree when building the tree structure. + Options are: + + - mean = Use the arithmetic mean of the coordinate being split. + (default) + - median = Use the median of the coordinate being split. + - middle = Use the middle of the range; i.e. the average of the minimum + and maximum value. + - random: Use a random point somewhere in the middle two quartiles of + the range. + + min_top (int): The minimum number of top layers to use when setting up the field. + (default: :math:`\\max(3, \\log_2(N_{\\rm cpu}))`) + max_top (int): The maximum number of top layers to use when setting up the field. + The top-level cells are where each calculation job starts. There will + typically be of order :math:`2^{\\rm max\\_top}` top-level cells. + (default: 10) + precision (int): The precision to use for the output values. This specifies how many + digits to write. (default: 4) + + metric (str): Which metric to use for distance measurements. Options are listed + above. (default: 'Euclidean') + bin_type (str): What type of binning should be used. Only one option currently. + (default: 'LogRUV') + period (float): For the 'Periodic' metric, the period to use in all directions. + (default: None) + xperiod (float): For the 'Periodic' metric, the period to use in the x direction. + (default: period) + yperiod (float): For the 'Periodic' metric, the period to use in the y direction. + (default: period) + zperiod (float): For the 'Periodic' metric, the period to use in the z direction. + (default: period) + + var_method (str): Which method to use for estimating the variance. Options are: + 'shot', 'jackknife', 'sample', 'bootstrap', 'marked_bootstrap'. + (default: 'shot') + num_bootstrap (int): How many bootstrap samples to use for the 'bootstrap' and + 'marked_bootstrap' var_methods. (default: 500) + rng (RandomState): If desired, a numpy.random.RandomState instance to use for bootstrap + random number generation. (default: None) + + num_threads (int): How many OpenMP threads to use during the calculation. + (default: use the number of cpu cores; this value can also be given in + the constructor in the config dict.) + + .. note:: + + This won't work if the system's C compiler cannot use OpenMP + (e.g. clang prior to version 3.7.) + """ + _valid_params = { + 'nbins' : (int, False, None, None, + 'The number of output bins to use for sep dimension.'), + 'bin_size' : (float, False, None, None, + 'The size of the output bins in log(sep).'), + 'min_sep' : (float, False, None, None, + 'The minimum separation to include in the output.'), + 'max_sep' : (float, False, None, None, + 'The maximum separation to include in the output.'), + 'sep_units' : (str, False, None, coord.AngleUnit.valid_names, + 'The units to use for min_sep and max_sep. Also the units of the output ' + 'distances'), + 'bin_slop' : (float, False, None, None, + 'The fraction of a bin width by which it is ok to let the triangles miss the ' + 'correct bin.', + 'The default is to use 1 if bin_size <= 0.1, or 0.1/bin_size if bin_size > 0.1.'), + 'nubins' : (int, False, None, None, + 'The number of output bins to use for u dimension.'), + 'ubin_size' : (float, False, None, None, + 'The size of the output bins in u.'), + 'min_u' : (float, False, None, None, + 'The minimum u to include in the output.'), + 'max_u' : (float, False, None, None, + 'The maximum u to include in the output.'), + 'nvbins' : (int, False, None, None, + 'The number of output bins to use for positive v values.'), + 'vbin_size' : (float, False, None, None, + 'The size of the output bins in v.'), + 'min_v' : (float, False, None, None, + 'The minimum |v| to include in the output.'), + 'max_v' : (float, False, None, None, + 'The maximum |v| to include in the output.'), + 'brute' : (bool, False, False, [False, True], + 'Whether to use brute-force algorithm'), + 'verbose' : (int, False, 1, [0, 1, 2, 3], + 'How verbose the code should be during processing. ', + '0 = Errors Only, 1 = Warnings, 2 = Progress, 3 = Debugging'), + 'log_file' : (str, False, None, None, + 'If desired, an output file for the logging output.', + 'The default is to write the output to stdout.'), + 'output_dots' : (bool, False, None, None, + 'Whether to output dots to the stdout during the C++-level computation.', + 'The default is True if verbose >= 2 and there is no log_file. Else False.'), + 'split_method' : (str, False, 'mean', ['mean', 'median', 'middle', 'random'], + 'Which method to use for splitting cells.'), + 'min_top' : (int, False, None, None, + 'The minimum number of top layers to use when setting up the field.'), + 'max_top' : (int, False, 10, None, + 'The maximum number of top layers to use when setting up the field.'), + 'precision' : (int, False, 4, None, + 'The number of digits after the decimal in the output.'), + 'metric': (str, False, 'Euclidean', ['Euclidean', 'Arc', 'Periodic'], + 'Which metric to use for the distance measurements'), + 'bin_type': (str, False, 'LogRUV', ['LogRUV'], + 'Which type of binning should be used'), + 'period': (float, False, None, None, + 'The period to use for all directions for the Periodic metric'), + 'xperiod': (float, False, None, None, + 'The period to use for the x direction for the Periodic metric'), + 'yperiod': (float, False, None, None, + 'The period to use for the y direction for the Periodic metric'), + 'zperiod': (float, False, None, None, + 'The period to use for the z direction for the Periodic metric'), + + 'var_method': (str, False, 'shot', + ['shot', 'jackknife', 'sample', 'bootstrap', 'marked_bootstrap'], + 'The method to use for estimating the variance'), + 'num_bootstrap': (int, False, 500, None, + 'How many bootstrap samples to use for the var_method=bootstrap and ' + 'marked_bootstrap'), + 'num_threads' : (int, False, None, None, + 'How many threads should be used. num_threads <= 0 means auto based on num cores.'), + } + + @depr_pos_kwargs + def __init__(self, config=None, *, logger=None, rng=None, **kwargs): + self._corr = None # Do this first to make sure we always have it for __del__ + self.config = merge_config(config,kwargs,BinnedCorr3._valid_params) + if logger is None: + self.logger = setup_logger(get(self.config,'verbose',int,1), + self.config.get('log_file',None)) + else: + self.logger = logger + + # We'll make a bunch of attributes here, which we put into a namespace called _ro. + # These are the core attributes that won't ever be changed after construction. + # This is an efficiency optimization (both memory and flops), since it will allow + # copy() to just copy a pointer to the _ro namespace without having to copy each + # individual attribute separately. + # The access of these attributes are all via read-only properties. + self._ro = Namespace() + + if 'output_dots' in self.config: + self._ro.output_dots = get(self.config,'output_dots',bool) + else: + self._ro.output_dots = get(self.config,'verbose',int,1) >= 2 + + self._ro.bin_type = self.config.get('bin_type', None) + self._ro._bintype = _lib.Log + + self._ro.sep_units = self.config.get('sep_units','') + self._ro._sep_units = get(self.config,'sep_units',str,'radians') + self._ro._log_sep_units = math.log(self._sep_units) + if self.config.get('nbins', None) is None: + if self.config.get('max_sep', None) is None: + raise TypeError("Missing required parameter max_sep") + if self.config.get('min_sep', None) is None: + raise TypeError("Missing required parameter min_sep") + if self.config.get('bin_size', None) is None: + raise TypeError("Missing required parameter bin_size") + self._ro.min_sep = float(self.config['min_sep']) + self._ro.max_sep = float(self.config['max_sep']) + if self.min_sep >= self.max_sep: + raise ValueError("max_sep must be larger than min_sep") + bin_size = float(self.config['bin_size']) + self._ro.nbins = int(math.ceil(math.log(self.max_sep/self.min_sep)/bin_size)) + # Update self.bin_size given this value of nbins + self._ro.bin_size = math.log(self.max_sep/self.min_sep)/self.nbins + # Note in this case, bin_size is saved as the nominal bin_size from the config + # file, and self.bin_size is the one for the radial bins. We'll use the nominal + # bin_size as the default bin_size for u and v below. + elif self.config.get('bin_size', None) is None: + if self.config.get('max_sep', None) is None: + raise TypeError("Missing required parameter max_sep") + if self.config.get('min_sep', None) is None: + raise TypeError("Missing required parameter min_sep") + self._ro.min_sep = float(self.config['min_sep']) + self._ro.max_sep = float(self.config['max_sep']) + if self.min_sep >= self.max_sep: + raise ValueError("max_sep must be larger than min_sep") + self._ro.nbins = int(self.config['nbins']) + bin_size = self._ro.bin_size = math.log(self.max_sep/self.min_sep)/self.nbins + elif self.config.get('max_sep', None) is None: + if self.config.get('min_sep', None) is None: + raise TypeError("Missing required parameter min_sep") + self._ro.min_sep = float(self.config['min_sep']) + self._ro.nbins = int(self.config['nbins']) + bin_size = self._ro.bin_size = float(self.config['bin_size']) + self._ro.max_sep = math.exp(self.nbins*bin_size)*self.min_sep + else: + if self.config.get('min_sep', None) is not None: + raise TypeError("Only 3 of min_sep, max_sep, bin_size, nbins are allowed.") + self._ro.max_sep = float(self.config['max_sep']) + self._ro.nbins = int(self.config['nbins']) + bin_size = self._ro.bin_size = float(self.config['bin_size']) + self._ro.min_sep = self.max_sep*math.exp(-self.nbins*bin_size) + if self.sep_units == '': + self.logger.info("r: nbins = %d, min,max sep = %g..%g, bin_size = %g", + self.nbins, self.min_sep, self.max_sep, self.bin_size) + else: + self.logger.info("r: nbins = %d, min,max sep = %g..%g %s, bin_size = %g", + self.nbins, self.min_sep, self.max_sep, self.sep_units, + self.bin_size) + # The underscore-prefixed names are in natural units (radians for angles) + self._ro._min_sep = self.min_sep * self._sep_units + self._ro._max_sep = self.max_sep * self._sep_units + self._ro._bin_size = self.bin_size # There is not Linear, but if I add it, need to apply + # units to _bin_size in that case as well. + + self._ro.min_u = float(self.config.get('min_u', 0.)) + self._ro.max_u = float(self.config.get('max_u', 1.)) + if self.min_u >= self.max_u: + raise ValueError("max_u must be larger than min_u") + if self.min_u < 0. or self.max_u > 1.: + raise ValueError("Invalid range for u: %f - %f"%(self.min_u, self.max_u)) + self._ro.ubin_size = float(self.config.get('ubin_size', bin_size)) + if 'nubins' not in self.config: + self._ro.nubins = int(math.ceil((self.max_u-self.min_u-1.e-10)/self.ubin_size)) + elif 'max_u' in self.config and 'min_u' in self.config and 'ubin_size' in self.config: + raise TypeError("Only 3 of min_u, max_u, ubin_size, nubins are allowed.") + else: + self._ro.nubins = self.config['nubins'] + # Allow min or max u to be implicit from nubins and ubin_size + if 'ubin_size' in self.config: + if 'min_u' not in self.config: + self._ro.min_u = max(self.max_u - self.nubins * self.ubin_size, 0.) + if 'max_u' not in self.config: + self._ro.max_u = min(self.min_u + self.nubins * self.ubin_size, 1.) + # Adjust ubin_size given the other values + self._ro.ubin_size = (self.max_u-self.min_u)/self.nubins + self.logger.info("u: nbins = %d, min,max = %g..%g, bin_size = %g", + self.nubins,self.min_u,self.max_u,self.ubin_size) + + self._ro.min_v = float(self.config.get('min_v', 0.)) + self._ro.max_v = float(self.config.get('max_v', 1.)) + if self.min_v >= self.max_v: + raise ValueError("max_v must be larger than min_v") + if self.min_v < 0 or self.max_v > 1.: + raise ValueError("Invalid range for |v|: %f - %f"%(self.min_v, self.max_v)) + self._ro.vbin_size = float(self.config.get('vbin_size', bin_size)) + if 'nvbins' not in self.config: + self._ro.nvbins = int(math.ceil((self.max_v-self.min_v-1.e-10)/self.vbin_size)) + elif 'max_v' in self.config and 'min_v' in self.config and 'vbin_size' in self.config: + raise TypeError("Only 3 of min_v, max_v, vbin_size, nvbins are allowed.") + else: + self._ro.nvbins = self.config['nvbins'] + # Allow min or max v to be implicit from nvbins and vbin_size + if 'vbin_size' in self.config: + if 'max_v' not in self.config: + self._ro.max_v = min(self.min_v + self.nvbins * self.vbin_size, 1.) + else: # min_v not in config + self._ro.min_v = max(self.max_v - self.nvbins * self.vbin_size, -1.) + # Adjust vbin_size given the other values + self._ro.vbin_size = (self.max_v-self.min_v)/self.nvbins + self.logger.info("v: nbins = %d, min,max = %g..%g, bin_size = %g", + self.nvbins,self.min_v,self.max_v,self.vbin_size) + + self._ro.split_method = self.config.get('split_method','mean') + self.logger.debug("Using split_method = %s",self.split_method) + + self._ro.min_top = get(self.config,'min_top',int,None) + self._ro.max_top = get(self.config,'max_top',int,10) + + self._ro.bin_slop = get(self.config,'bin_slop',float,-1.0) + if self.bin_slop < 0.0: + if self.bin_size <= 0.1: + self._ro.bin_slop = 1.0 + self._ro.b = self.bin_size + else: + self._ro.bin_slop = 0.1/self.bin_size # The stored bin_slop corresponds to lnr bins. + self._ro.b = 0.1 + if self.ubin_size <= 0.1: + self._ro.bu = self.ubin_size + else: + self._ro.bu = 0.1 + if self.vbin_size <= 0.1: + self._ro.bv = self.vbin_size + else: + self._ro.bv = 0.1 + else: + self._ro.b = self.bin_size * self.bin_slop + self._ro.bu = self.ubin_size * self.bin_slop + self._ro.bv = self.vbin_size * self.bin_slop + + if self.b > 0.100001: # Add some numerical slop + self.logger.warning( + "Using bin_slop = %g, bin_size = %g\n"%(self.bin_slop,self.bin_size)+ + "The b parameter is bin_slop * bin_size = %g"%(self.b)+ + " bu = %g, bv = %g\n"%(self.bu,self.bv)+ + "It is generally recommended to use b <= 0.1 for most applications.\n"+ + "Larger values of this b parameter may result in significant inaccuracies.") + else: + self.logger.debug("Using bin_slop = %g, b = %g, bu = %g, bv = %g", + self.bin_slop,self.b,self.bu,self.bv) + + # This makes nbins evenly spaced entries in log(r) starting with 0 with step bin_size + self._ro.logr1d = np.linspace(start=0, stop=self.nbins*self.bin_size, + num=self.nbins, endpoint=False) + # Offset by the position of the center of the first bin. + self._ro.logr1d += math.log(self.min_sep) + 0.5*self.bin_size + + self._ro.u1d = np.linspace(start=0, stop=self.nubins*self.ubin_size, + num=self.nubins, endpoint=False) + self._ro.u1d += self.min_u + 0.5*self.ubin_size + + self._ro.v1d = np.linspace(start=0, stop=self.nvbins*self.vbin_size, + num=self.nvbins, endpoint=False) + self._ro.v1d += self.min_v + 0.5*self.vbin_size + self._ro.v1d = np.concatenate([-self.v1d[::-1],self.v1d]) + + self._ro.logr = np.tile(self.logr1d[:, np.newaxis, np.newaxis], + (1, self.nubins, 2*self.nvbins)) + self._ro.u = np.tile(self.u1d[np.newaxis, :, np.newaxis], + (self.nbins, 1, 2*self.nvbins)) + self._ro.v = np.tile(self.v1d[np.newaxis, np.newaxis, :], + (self.nbins, self.nubins, 1)) + self._ro.rnom = np.exp(self.logr) + self._ro.rnom1d = np.exp(self.logr1d) + self._ro.brute = get(self.config,'brute',bool,False) + if self.brute: + self.logger.info("Doing brute force calculation.",) + self.coords = None + self.metric = None + period = get(self.config,'period',float,0) + self._ro.xperiod = get(self.config,'xperiod',float,period) + self._ro.yperiod = get(self.config,'yperiod',float,period) + self._ro.zperiod = get(self.config,'zperiod',float,period) + self._ro._nbins = len(self._ro.logr.ravel()) + + self._ro.var_method = get(self.config,'var_method',str,'shot') + self._ro.num_bootstrap = get(self.config,'num_bootstrap',int,500) + self.results = {} # for jackknife, etc. store the results of each pair of patches. + self.npatch1 = self.npatch2 = self.npatch3 = 1 + self._rng = rng + + @property + def rng(self): + if self._rng is None: + self._rng = np.random.RandomState() + return self._rng + + # Properties for all the read-only attributes ("ro" stands for "read-only") + @property + def output_dots(self): return self._ro.output_dots + @property + def bin_type(self): return self._ro.bin_type + @property + def sep_units(self): return self._ro.sep_units + @property + def _sep_units(self): return self._ro._sep_units + @property + def _log_sep_units(self): return self._ro._log_sep_units + @property + def min_sep(self): return self._ro.min_sep + @property + def max_sep(self): return self._ro.max_sep + @property + def min_u(self): return self._ro.min_u + @property + def max_u(self): return self._ro.max_u + @property + def min_v(self): return self._ro.min_v + @property + def max_v(self): return self._ro.max_v + @property + def bin_size(self): return self._ro.bin_size + @property + def ubin_size(self): return self._ro.ubin_size + @property + def vbin_size(self): return self._ro.vbin_size + @property + def nbins(self): return self._ro.nbins + @property + def nubins(self): return self._ro.nubins + @property + def nvbins(self): return self._ro.nvbins + @property + def logr1d(self): return self._ro.logr1d + @property + def u1d(self): return self._ro.u1d + @property + def v1d(self): return self._ro.v1d + @property + def logr(self): return self._ro.logr + @property + def u(self): return self._ro.u + @property + def v(self): return self._ro.v + @property + def rnom(self): return self._ro.rnom + @property + def rnom1d(self): return self._ro.rnom1d + @property + def _bintype(self): return self._ro._bintype + @property + def _nbins(self): return self._ro._nbins + @property + def _min_sep(self): return self._ro._min_sep + @property + def _max_sep(self): return self._ro._max_sep + @property + def _bin_size(self): return self._ro._bin_size + @property + def split_method(self): return self._ro.split_method + @property + def min_top(self): return self._ro.min_top + @property + def max_top(self): return self._ro.max_top + @property + def bin_slop(self): return self._ro.bin_slop + @property + def b(self): return self._ro.b + @property + def bu(self): return self._ro.bu + @property + def bv(self): return self._ro.bv + @property + def brute(self): return self._ro.brute + @property + def xperiod(self): return self._ro.xperiod + @property + def yperiod(self): return self._ro.yperiod + @property + def zperiod(self): return self._ro.zperiod + @property + def var_method(self): return self._ro.var_method + @property + def num_bootstrap(self): return self._ro.num_bootstrap + @property + def _d1(self): return self._ro._d1 + @property + def _d2(self): return self._ro._d2 + @property + def _d3(self): return self._ro._d3 + + def __getstate__(self): + d = self.__dict__.copy() + d.pop('_corr',None) + d.pop('_ok',None) # Remake this as needed. + d.pop('logger',None) # Oh well. This is just lost in the copy. Can't be pickled. + return d + + def __setstate__(self, d): + self.__dict__ = d + self._corr = None + self.logger = setup_logger(get(self.config,'verbose',int,1), + self.config.get('log_file',None)) + +
[docs] def clear(self): + """Clear all data vectors, the results dict, and any related values. + """ + self._clear() + self.results = {} + self.npatch1 = self.npatch2 = self.npatch3 = 1 + self.__dict__.pop('_ok',None)
+ + @property + def nonzero(self): + """Return if there are any values accumulated yet. (i.e. ntri > 0) + """ + return np.any(self.ntri) + + def _add_tot(self, i, j, k, c1, c2, c3): + # No op for all but NNCorrelation, which needs to add the tot value + pass + + def _trivially_zero(self, c1, c2, c3, metric): + # For now, ignore the metric. Just be conservative about how much space we need. + x1,y1,z1,s1 = c1._get_center_size() + x2,y2,z2,s2 = c2._get_center_size() + x3,y3,z3,s3 = c3._get_center_size() + d3 = ((x1-x2)**2 + (y1-y2)**2 + (z1-z2)**2)**0.5 + d1 = ((x2-x3)**2 + (y2-y3)**2 + (z2-z3)**2)**0.5 + d2 = ((x3-x1)**2 + (y3-y1)**2 + (z3-z1)**2)**0.5 + d3, d2, d1 = sorted([d1,d2,d3]) + return (d2 > s1 + s2 + s3 + 2*self._max_sep) # The 2* is where we are being conservative. + + def _process_all_auto(self, cat1, metric, num_threads, comm=None, low_mem=False): + + def is_my_job(my_indices, i, j, k, n): + # Helper function to figure out if a given (i,j,k) job should be done on the + # current process. + + # Always my job if not using MPI. + if my_indices is None: + return True + + # Now the tricky part. If using MPI, we need to divide up the jobs smartly. + # The first point is to divvy up the auto jobs evenly. This is where most of the + # work is done, so we want those to be spreads as evenly as possibly across procs. + # Therefore, if all indices are mine, then do the job. + # This reduces the number of catalogs this machine needs to load up. + n1 = np.sum([i in my_indices, j in my_indices, k in my_indices]) + if n1 == 3: + self.logger.info("Rank %d: Job (%d,%d,%d) is mine.",rank,i,j,k) + return True + + # If none of the indices are mine, then it's not my job. + if n1 == 0: + return False + + # When only one or two of the indices are mine, then we follow the same kind of + # procedure as we did in 2pt. There, we decided based on the parity of i. + # Here that turns into i mod 3. + if ( (i % 3 == 0 and i in my_indices) or + (i % 3 == 1 and j in my_indices) or + (i % 3 == 2 and k in my_indices) ): + self.logger.info("Rank %d: Job (%d,%d,%d) is mine.",rank,i,j,k) + return True + else: + return False + + if len(cat1) == 1 and cat1[0].npatch == 1: + self.process_auto(cat1[0], metric=metric, num_threads=num_threads) + + else: + # When patch processing, keep track of the pair-wise results. + if self.npatch1 == 1: + self.npatch1 = cat1[0].npatch if cat1[0].npatch != 1 else len(cat1) + self.npatch2 = self.npatch3 = self.npatch1 + n = self.npatch1 + + # Setup for deciding when this is my job. + if comm: + size = comm.Get_size() + rank = comm.Get_rank() + my_indices = np.arange(n * rank // size, n * (rank+1) // size) + self.logger.info("Rank %d: My indices are %s",rank,my_indices) + else: + my_indices = None + + temp = self.copy() + for ii,c1 in enumerate(cat1): + i = c1.patch if c1.patch is not None else ii + if is_my_job(my_indices, i, i, i, n): + temp.clear() + self.logger.info('Process patch %d auto',i) + temp.process_auto(c1, metric=metric, num_threads=num_threads) + if (i,i,i) in self.results and self.results[(i,i,i)].nonzero: + self.results[(i,i,i)] += temp + else: + self.results[(i,i,i)] = temp.copy() + self += temp + + for jj,c2 in list(enumerate(cat1))[::-1]: + j = c2.patch if c2.patch is not None else jj + if i < j: + if is_my_job(my_indices, i, j, j, n): + temp.clear() + # One point in c1, 2 in c2. + if not self._trivially_zero(c1,c2,c2,metric): + self.logger.info('Process patches %d,%d cross12',i,j) + temp.process_cross12(c1, c2, metric=metric, num_threads=num_threads) + else: + self.logger.info('Skipping %d,%d pair, which are too far apart ' + + 'for this set of separations',i,j) + if temp.nonzero: + if (i,j,j) in self.results and self.results[(i,j,j)].nonzero: + self.results[(i,j,j)] += temp + else: + self.results[(i,j,j)] = temp.copy() + self += temp + else: + # NNNCorrelation needs to add the tot value + self._add_tot(i, j, j, c1, c2, c2) + + temp.clear() + # One point in c2, 2 in c1. + if not self._trivially_zero(c1,c1,c2,metric): + self.logger.info('Process patches %d,%d cross12',j,i) + temp.process_cross12(c2, c1, metric=metric, num_threads=num_threads) + if temp.nonzero: + if (i,i,j) in self.results and self.results[(i,i,j)].nonzero: + self.results[(i,i,j)] += temp + else: + self.results[(i,i,j)] = temp.copy() + self += temp + else: + # NNNCorrelation needs to add the tot value + self._add_tot(i, i, j, c2, c1, c1) + + # One point in each of c1, c2, c3 + for kk,c3 in enumerate(cat1): + k = c3.patch if c3.patch is not None else kk + if j < k and is_my_job(my_indices, i, j, k, n): + temp.clear() + + if not self._trivially_zero(c1,c2,c3,metric): + self.logger.info('Process patches %d,%d,%d cross',i,j,k) + temp.process_cross(c1, c2, c3, metric=metric, + num_threads=num_threads) + else: + self.logger.info('Skipping %d,%d,%d, which are too far apart ' + + 'for this set of separations',i,j,k) + if temp.nonzero: + if (i,j,k) in self.results and self.results[(i,j,k)].nonzero: + self.results[(i,j,k)] += temp + else: + self.results[(i,j,k)] = temp.copy() + self += temp + else: + # NNNCorrelation needs to add the tot value + self._add_tot(i, j, k, c1, c2, c3) + if low_mem: + c3.unload() + + if low_mem and jj != ii+1: + # Don't unload i+1, since that's the next one we'll need. + c2.unload() + if low_mem: + c1.unload() + if comm is not None: + rank = comm.Get_rank() + size = comm.Get_size() + self.logger.info("Rank %d: Completed jobs %s",rank,list(self.results.keys())) + # Send all the results back to rank 0 process. + if rank > 0: + comm.send(self, dest=0) + else: + for p in range(1,size): + temp = comm.recv(source=p) + self += temp + self.results.update(temp.results) + + def _process_all_cross12(self, cat1, cat2, metric, num_threads, comm=None, low_mem=False): + + def is_my_job(my_indices, i, j, k, n1, n2): + # Helper function to figure out if a given (i,j,k) job should be done on the + # current process. + + # Always my job if not using MPI. + if my_indices is None: + return True + + # If n1 is n, then this can be simple. Just split according to i. + n = max(n1,n2) + if n1 == n: + if i in my_indices: + self.logger.info("Rank %d: Job (%d,%d,%d) is mine.",rank,i,j,k) + return True + else: + return False + + # If not, then this looks like the decision for 2pt auto using j,k. + if j in my_indices and k in my_indices: + self.logger.info("Rank %d: Job (%d,%d,%d) is mine.",rank,i,j,k) + return True + + if j not in my_indices and k not in my_indices: + return False + + if k-j < n//2: + ret = j % 2 == (0 if j in my_indices else 1) + else: + ret = k % 2 == (0 if k in my_indices else 1) + if ret: + self.logger.info("Rank %d: Job (%d,%d,%d) is mine.",rank,i,j,k) + return ret + + if len(cat1) == 1 and len(cat2) == 1 and cat1[0].npatch == 1 and cat2[0].npatch == 1: + self.process_cross12(cat1[0], cat2[0], metric=metric, num_threads=num_threads) + else: + # When patch processing, keep track of the pair-wise results. + if self.npatch1 == 1: + self.npatch1 = cat1[0].npatch if cat1[0].npatch != 1 else len(cat1) + if self.npatch2 == 1: + self.npatch2 = cat2[0].npatch if cat2[0].npatch != 1 else len(cat2) + self.npatch3 = self.npatch2 + if self.npatch1 != self.npatch2 and self.npatch1 != 1 and self.npatch2 != 1: + raise RuntimeError("Cross correlation requires both catalogs use the same patches.") + + # Setup for deciding when this is my job. + n1 = self.npatch1 + n2 = self.npatch2 + if comm: + size = comm.Get_size() + rank = comm.Get_rank() + n = max(n1,n2) + my_indices = np.arange(n * rank // size, n * (rank+1) // size) + self.logger.info("Rank %d: My indices are %s",rank,my_indices) + else: + my_indices = None + + temp = self.copy() + for ii,c1 in enumerate(cat1): + i = c1.patch if c1.patch is not None else ii + for jj,c2 in enumerate(cat2): + j = c2.patch if c2.patch is not None else jj + if is_my_job(my_indices, i, i, j, n1, n2): + temp.clear() + # One point in c1, 2 in c2. + if not self._trivially_zero(c1,c2,c2,metric): + self.logger.info('Process patches %d,%d cross12',i,j) + temp.process_cross12(c1, c2, metric=metric, num_threads=num_threads) + else: + self.logger.info('Skipping %d,%d pair, which are too far apart ' + + 'for this set of separations',i,j) + if temp.nonzero or i==j or n1==1 or n2==1: + if (i,j,j) in self.results and self.results[(i,j,j)].nonzero: + self.results[(i,j,j)] += temp + else: + self.results[(i,j,j)] = temp.copy() + self += temp + else: + # NNNCorrelation needs to add the tot value + self._add_tot(i, j, j, c1, c2, c2) + + # One point in each of c1, c2, c3 + for kk,c3 in list(enumerate(cat2))[::-1]: + k = c3.patch if c3.patch is not None else kk + if j < k and is_my_job(my_indices, i, j, k, n1, n2): + temp.clear() + + if not self._trivially_zero(c1,c2,c3,metric): + self.logger.info('Process patches %d,%d,%d cross',i,j,k) + temp.process_cross(c1, c2, c3, metric=metric, + num_threads=num_threads) + else: + self.logger.info('Skipping %d,%d,%d, which are too far apart ' + + 'for this set of separations',i,j,k) + if temp.nonzero: + if (i,j,k) in self.results and self.results[(i,j,k)].nonzero: + self.results[(i,j,k)] += temp + else: + self.results[(i,j,k)] = temp.copy() + self += temp + else: + # NNNCorrelation needs to add the tot value + self._add_tot(i, j, k, c1, c2, c3) + if low_mem: + c3.unload() + + if low_mem and jj != ii+1: + # Don't unload i+1, since that's the next one we'll need. + c2.unload() + if low_mem: + c1.unload() + if comm is not None: + rank = comm.Get_rank() + size = comm.Get_size() + self.logger.info("Rank %d: Completed jobs %s",rank,list(self.results.keys())) + # Send all the results back to rank 0 process. + if rank > 0: + comm.send(self, dest=0) + else: + for p in range(1,size): + temp = comm.recv(source=p) + self += temp + self.results.update(temp.results) + + def _process_all_cross(self, cat1, cat2, cat3, metric, num_threads, comm=None, low_mem=False): + + def is_my_job(my_indices, i, j, k, n1, n2, n3): + # Helper function to figure out if a given (i,j,k) job should be done on the + # current process. + + # Always my job if not using MPI. + if my_indices is None: + return True + + # Just split up according to one of the catalogs. + n = max(n1,n2,n3) + if n1 == n: + m = i + elif n2 == n: + m = j + else: + m = k + if m in my_indices: + self.logger.info("Rank %d: Job (%d,%d,%d) is mine.",rank,i,j,k) + return True + else: + return False + + if (len(cat1) == 1 and len(cat2) == 1 and len(cat3) == 1 and + cat1[0].npatch == 1 and cat2[0].npatch == 1 and cat3[0].npatch == 1): + self.process_cross(cat1[0], cat2[0], cat3[0], metric=metric, num_threads=num_threads) + else: + # When patch processing, keep track of the pair-wise results. + if self.npatch1 == 1: + self.npatch1 = cat1[0].npatch if cat1[0].npatch != 1 else len(cat1) + if self.npatch2 == 1: + self.npatch2 = cat2[0].npatch if cat2[0].npatch != 1 else len(cat2) + if self.npatch3 == 1: + self.npatch3 = cat3[0].npatch if cat3[0].npatch != 1 else len(cat3) + if self.npatch1 != self.npatch2 and self.npatch1 != 1 and self.npatch2 != 1: + raise RuntimeError("Cross correlation requires all catalogs use the same patches.") + if self.npatch1 != self.npatch3 and self.npatch1 != 1 and self.npatch3 != 1: + raise RuntimeError("Cross correlation requires all catalogs use the same patches.") + + # Setup for deciding when this is my job. + n1 = self.npatch1 + n2 = self.npatch2 + n3 = self.npatch3 + if comm: + size = comm.Get_size() + rank = comm.Get_rank() + n = max(n1,n2,n3) + my_indices = np.arange(n * rank // size, n * (rank+1) // size) + self.logger.info("Rank %d: My indices are %s",rank,my_indices) + else: + my_indices = None + + temp = self.copy() + for ii,c1 in enumerate(cat1): + i = c1.patch if c1.patch is not None else ii + for jj,c2 in enumerate(cat2): + j = c2.patch if c2.patch is not None else jj + for kk,c3 in enumerate(cat3): + k = c3.patch if c3.patch is not None else kk + if is_my_job(my_indices, i, j, k, n1, n2, n3): + temp.clear() + if not self._trivially_zero(c1,c2,c3,metric): + self.logger.info('Process patches %d,%d,%d cross',i,j,k) + temp.process_cross(c1, c2, c3, metric=metric, + num_threads=num_threads) + else: + self.logger.info('Skipping %d,%d,%d, which are too far apart ' + + 'for this set of separations',i,j,k) + if (temp.nonzero or (i==j==k) + or (i==j and n3==1) or (i==k and n2==1) or (j==k and n1==1) + or (n1==n2==1) or (n1==n3==1) or (n2==n3==1)): + if (i,j,k) in self.results and self.results[(i,j,k)].nonzero: + self.results[(i,j,k)] += temp + else: + self.results[(i,j,k)] = temp.copy() + self += temp + else: + # NNNCorrelation needs to add the tot value + self._add_tot(i, j, k, c1, c2, c3) + if low_mem: + c3.unload() + if low_mem and jj != ii+1: + # Don't unload i+1, since that's the next one we'll need. + c2.unload() + if low_mem: + c1.unload() + if comm is not None: + rank = comm.Get_rank() + size = comm.Get_size() + self.logger.info("Rank %d: Completed jobs %s",rank,list(self.results.keys())) + # Send all the results back to rank 0 process. + if rank > 0: + comm.send(self, dest=0) + else: + for p in range(1,size): + temp = comm.recv(source=p) + self += temp + self.results.update(temp.results) + +
[docs] def getStat(self): + """The standard statistic for the current correlation object as a 1-d array. + + Usually, this is just self.zeta. But if the metric is TwoD, this becomes + self.zeta.ravel(). + + And for `GGGCorrelation`, it is the concatenation of the four different correlations + [gam0.ravel(), gam1.ravel(), gam2.ravel(), gam3.ravel()]. + """ + return self.zeta.ravel()
+ +
[docs] def getWeight(self): + """The weight array for the current correlation object as a 1-d array. + + This is the weight array corresponding to `getStat`. Usually just self.weight, but + raveled for TwoD and duplicated for GGGCorrelation to match what `getStat` does in + those cases. + """ + return self.weight.ravel()
+ +
[docs] @depr_pos_kwargs + def estimate_cov(self, method, *, func=None, comm=None): + """Estimate the covariance matrix based on the data + + This function will calculate an estimate of the covariance matrix according to the + given method. + + Options for ``method`` include: + + - 'shot' = The variance based on "shot noise" only. This includes the Poisson + counts of points for N statistics, shape noise for G statistics, and the observed + scatter in the values for K statistics. In this case, the returned covariance + matrix will be diagonal, since there is no way to estimate the off-diagonal terms. + - 'jackknife' = A jackknife estimate of the covariance matrix based on the scatter + in the measurement when excluding one patch at a time. + - 'sample' = An estimate based on the sample covariance of a set of samples, + taken as the patches of the input catalog. + - 'bootstrap' = A bootstrap covariance estimate. It selects patches at random with + replacement and then generates the statistic using all the auto-correlations at + their selected repetition plus all the cross terms that aren't actually auto terms. + - 'marked_bootstrap' = An estimate based on a marked-point bootstrap resampling of the + patches. Similar to bootstrap, but only samples the patches of the first catalog and + uses all patches from the second catalog that correspond to each patch selection of + the first catalog. cf. https://ui.adsabs.harvard.edu/abs/2008ApJ...681..726L/ + + Both 'bootstrap' and 'marked_bootstrap' use the num_bootstrap parameter, which can be set on + construction. + + .. note:: + + For most classes, there is only a single statistic, ``zeta``, so this calculates a + covariance matrix for that vector. `GGGCorrelation` has four: ``gam0``, ``gam1``, + ``gam2``, and ``gam3``, so in this case the full data vector is ``gam0`` followed by + ``gam1``, then ``gam2``, then ``gam3``, and this calculates the covariance matrix for + that full vector including both statistics. The helper function `getStat` returns the + relevant statistic in all cases. + + In all cases, the relevant processing needs to already have been completed and finalized. + And for all methods other than 'shot', the processing should have involved an appropriate + number of patches -- preferably more patches than the length of the vector for your + statistic, although this is not checked. + + The default data vector to use for the covariance matrix is given by the method + `getStat`. As noted above, this is usually just self.zeta. However, there is an option + to compute the covariance of some other function of the correlation object by providing + an arbitrary function, ``func``, which should act on the current correlation object + and return the data vector of interest. + + For instance, for an `GGGCorrelation`, you might want to compute the covariance of just + gam0 and ignore the others. In this case you could use + + >>> func = lambda ggg: ggg.gam0 + + The return value from this func should be a single numpy array. (This is not directly + checked, but you'll probably get some kind of exception if it doesn't behave as expected.) + + .. note:: + + The optional ``func`` parameter is not valid in conjunction with ``method='shot'``. + It only works for the methods that are based on patch combinations. + + This function can be parallelized by passing the comm argument as an mpi4py communicator + to parallelize using that. For MPI, all processes should have the same inputs. + If method == "shot" then parallelization has no effect. + + Parameters: + method (str): Which method to use to estimate the covariance matrix. + func (function): A unary function that acts on the current correlation object and + returns the desired data vector. [default: None, which is + equivalent to ``lambda corr: corr.getStat()``. + comm (mpi comm) If not None, run under MPI + + Returns: + A numpy array with the estimated covariance matrix. + """ + if func is not None: + # Need to convert it to a function of the first item in the list. + all_func = lambda corrs: func(corrs[0]) + else: + all_func = None + return estimate_multi_cov([self], method, func=all_func, comm=comm)
+ +
[docs] def build_cov_design_matrix(self, method, *, func=None, comm=None): + """Build the design matrix that is used for estimating the covariance matrix. + + The design matrix for patch-based covariance estimates is a matrix where each row + corresponds to a different estimate of the data vector, :math:`\\zeta_i` (or + :math:`f(\\zeta_i)` if using the optional ``func`` parameter). + + The different of rows in the matrix for each valid ``method`` are: + + - 'shot': This method is not valid here. + - 'jackknife': The data vector when excluding a single patch. + - 'sample': The data vector using only a single patch for the first catalog. + - 'bootstrap': The data vector for a random resampling of the patches keeping the + sample total number, but allowing some to repeat. Cross terms from repeated patches + are excluded (since they are really auto terms). + - 'marked_bootstrap': The data vector for a random resampling of patches in the first + catalog, using all patches for the second catalog. Based on the algorithm in + Loh(2008). + + See `estimate_cov` for more details. + + The return value includes both the design matrix and a vector of weights (the total weight + array in the computed correlation functions). The weights are used for the sample method + when estimating the covariance matrix. The other methods ignore them, but they are provided + here in case they are useful. + + Parameters: + method (str): Which method to use to estimate the covariance matrix. + func (function): A unary function that takes the list ``corrs`` and returns the + desired full data vector. [default: None, which is equivalent to + ``lambda corrs: np.concatenate([c.getStat() for c in corrs])``] + comm (mpi comm) If not None, run under MPI + + Returns: + A, w: numpy arrays with the design matrix and weights respectively. + """ + if func is not None: + # Need to convert it to a function of the first item in the list. + all_func = lambda corrs: func(corrs[0]) + else: + all_func = None + return build_multi_cov_design_matrix([self], method=method, func=all_func, comm=comm)
+ + def _set_num_threads(self, num_threads): + if num_threads is None: + num_threads = self.config.get('num_threads',None) + if num_threads is None: + self.logger.debug('Set num_threads automatically from ncpu') + else: + self.logger.debug('Set num_threads = %d',num_threads) + set_omp_threads(num_threads, self.logger) + + def _set_metric(self, metric, coords1, coords2=None, coords3=None): + if metric is None: + metric = get(self.config,'metric',str,'Euclidean') + coords, metric = parse_metric(metric, coords1, coords2, coords3) + if self.coords is not None or self.metric is not None: + if coords != self.coords: + self.logger.warning("Detected a change in catalog coordinate systems. "+ + "This probably doesn't make sense!") + if metric != self.metric: + self.logger.warning("Detected a change in metric. "+ + "This probably doesn't make sense!") + if metric == 'Periodic': + if self.xperiod == 0 or self.yperiod == 0 or (coords=='3d' and self.zperiod == 0): + raise ValueError("Periodic metric requires setting the period to use.") + else: + if self.xperiod != 0 or self.yperiod != 0 or self.zperiod != 0: + raise ValueError("period options are not valid for %s metric."%metric) + self.coords = coords + self.metric = metric + self._coords = coord_enum(coords) + self._metric = metric_enum(metric) + + def _apply_units(self, mask): + if self.coords == 'spherical' and self.metric == 'Euclidean': + # Then our distances are all angles. Convert from the chord distance to a real angle. + # L = 2 sin(theta/2) + self.meand1[mask] = 2. * np.arcsin(self.meand1[mask]/2.) + self.meanlogd1[mask] = np.log(2.*np.arcsin(np.exp(self.meanlogd1[mask])/2.)) + self.meand2[mask] = 2. * np.arcsin(self.meand2[mask]/2.) + self.meanlogd2[mask] = np.log(2.*np.arcsin(np.exp(self.meanlogd2[mask])/2.)) + self.meand3[mask] = 2. * np.arcsin(self.meand3[mask]/2.) + self.meanlogd3[mask] = np.log(2.*np.arcsin(np.exp(self.meanlogd3[mask])/2.)) + + self.meand1[mask] /= self._sep_units + self.meanlogd1[mask] -= self._log_sep_units + self.meand2[mask] /= self._sep_units + self.meanlogd2[mask] -= self._log_sep_units + self.meand3[mask] /= self._sep_units + self.meanlogd3[mask] -= self._log_sep_units + + def _get_minmax_size(self): + if self.metric == 'Euclidean': + # The minimum separation we care about is that of the smallest size, which is + # min_sep * min_u. Do the same calculation as for 2pt to get to min_size. + b1 = min(self.b, self.bu, self.bv) + min_size = self._min_sep * self.min_u * b1 / (2.+3.*b1) + + # This time, the maximum size is d1 * b. d1 can be as high as 2*max_sep. + b2 = max(self.b, self.bu, self.bv) + max_size = 2. * self._max_sep * b2 + return min_size, max_size + else: + return 0., 0. + + # The three-point versions of the covariance helpers. + # Note: the word "pairs" in many of these was appropriate for 2pt, but in the 3pt case + # these actually refer to triples (i,j,k). + + def _get_npatch(self): + return max(self.npatch1, self.npatch2, self.npatch3) + + def _calculate_xi_from_pairs(self, pairs): + # Compute the xi data vector for the given list of pairs. + # pairs is input as a list of (i,j) values. + + # This is the normal calculation. It needs to be overridden when there are randoms. + self._sum([self.results[ij] for ij in pairs]) + self._finalize() + + def _jackknife_pairs(self): + if self.npatch3 == 1: + if self.npatch2 == 1: + # k=m=0 + return [ [(j,k,m) for j,k,m in self.results.keys() if j!=i] + for i in range(self.npatch1) ] + elif self.npatch1 == 1: + # j=m=0 + return [ [(j,k,m) for j,k,m in self.results.keys() if k!=i] + for i in range(self.npatch2) ] + else: + # m=0 + assert self.npatch1 == self.npatch2 + return [ [(j,k,m) for j,k,m in self.results.keys() if j!=i and k!=i] + for i in range(self.npatch1) ] + elif self.npatch2 == 1: + if self.npatch1 == 1: + # j=k=0 + return [ [(j,k,m) for j,k,m in self.results.keys() if m!=i] + for i in range(self.npatch3) ] + else: + # k=0 + assert self.npatch1 == self.npatch3 + return [ [(j,k,m) for j,k,m in self.results.keys() if j!=i and m!=i] + for i in range(self.npatch1) ] + elif self.npatch1 == 1: + # j=0 + assert self.npatch2 == self.npatch3 + return [ [(j,k,m) for j,k,m in self.results.keys() if k!=i and m!=i] + for i in range(self.npatch2) ] + else: + assert self.npatch1 == self.npatch2 == self.npatch3 + return [ [(j,k,m) for j,k,m in self.results.keys() if j!=i and k!=i and m!=i] + for i in range(self.npatch1) ] + + def _sample_pairs(self): + if self.npatch3 == 1: + if self.npatch2 == 1: + # k=m=0 + return [ [(j,k,m) for j,k,m in self.results.keys() if j==i] + for i in range(self.npatch1) ] + elif self.npatch1 == 1: + # j=m=0 + return [ [(j,k,m) for j,k,m in self.results.keys() if k==i] + for i in range(self.npatch2) ] + else: + # m=0 + assert self.npatch1 == self.npatch2 + return [ [(j,k,m) for j,k,m in self.results.keys() if j==i] + for i in range(self.npatch1) ] + elif self.npatch2 == 1: + if self.npatch1 == 1: + # j=k=0 + return [ [(j,k,m) for j,k,m in self.results.keys() if m==i] + for i in range(self.npatch3) ] + else: + # k=0 + assert self.npatch1 == self.npatch3 + return [ [(j,k,m) for j,k,m in self.results.keys() if j==i] + for i in range(self.npatch1) ] + elif self.npatch1 == 1: + # j=0 + assert self.npatch2 == self.npatch3 + return [ [(j,k,m) for j,k,m in self.results.keys() if k==i] + for i in range(self.npatch2) ] + else: + assert self.npatch1 == self.npatch2 == self.npatch3 + return [ [(j,k,m) for j,k,m in self.results.keys() if j==i] + for i in range(self.npatch1) ] + + @lazy_property + def _ok(self): + ok = np.zeros((self.npatch1, self.npatch2, self.npatch3), dtype=bool) + for (i,j,k) in self.results: + ok[i,j,k] = True + return ok + + def _marked_pairs(self, indx): + if self.npatch3 == 1: + if self.npatch2 == 1: + return [ (i,0,0) for i in indx if self._ok[i,0,0] ] + elif self.npatch1 == 1: + return [ (0,i,0) for i in indx if self._ok[0,i,0] ] + else: + assert self.npatch1 == self.npatch2 + # Select all pairs where first point is in indx (repeating i as appropriate) + return [ (i,j,0) for i in indx for j in range(self.npatch2) if self._ok[i,j,0] ] + elif self.npatch2 == 1: + if self.npatch1 == 1: + return [ (0,0,i) for i in indx if self._ok[0,0,i] ] + else: + assert self.npatch1 == self.npatch3 + # Select all pairs where first point is in indx (repeating i as appropriate) + return [ (i,0,j) for i in indx for j in range(self.npatch3) if self._ok[i,0,j] ] + elif self.npatch1 == 1: + assert self.npatch2 == self.npatch3 + # Select all pairs where first point is in indx (repeating i as appropriate) + return [ (0,i,j) for i in indx for j in range(self.npatch3) if self._ok[0,i,j] ] + else: + assert self.npatch1 == self.npatch2 == self.npatch3 + # Select all pairs where first point is in indx (repeating i as appropriate) + return [ (i,j,k) for i in indx for j in range(self.npatch2) + for k in range(self.npatch3) if self._ok[i,j,k] ] + + def _bootstrap_pairs(self, indx): + if self.npatch3 == 1: + if self.npatch2 == 1: + return [ (i,0,0) for i in indx if self._ok[i,0,0] ] + elif self.npatch1 == 1: + return [ (0,i,0) for i in indx if self._ok[0,i,0] ] + else: + assert self.npatch1 == self.npatch2 + return ([ (i,i,0) for i in indx if self._ok[i,i,0] ] + + [ (i,j,0) for i in indx for j in indx if self._ok[i,j,0] and i!=j ]) + elif self.npatch2 == 1: + if self.npatch1 == 1: + return [ (0,0,i) for i in indx if self._ok[0,0,i] ] + else: + assert self.npatch1 == self.npatch3 + return ([ (i,0,i) for i in indx if self._ok[i,0,i] ] + + [ (i,0,j) for i in indx for j in indx if self._ok[i,0,j] and i!=j ]) + elif self.npatch1 == 1: + assert self.npatch2 == self.npatch3 + return ([ (0,i,i) for i in indx if self._ok[0,i,i] ] + + [ (0,i,j) for i in indx for j in indx if self._ok[0,i,j] and i!=j ]) + else: + # Like for 2pt we want to avoid getting extra copies of what are actually + # auto-correlations coming from two indices equalling each other in (i,j,k). + # This time, get each (i,i,i) once. + # Then get (i,i,j), (i,j,i), and (j,i,i) once per each (i,j) pair with i!=j + # repeated as often as they show up in the double for loop. + # Finally get all triples (i,j,k) where they are all different repeated as often + # as they show up in the triple for loop. + assert self.npatch1 == self.npatch2 == self.npatch3 + return ([ (i,i,i) for i in indx if self._ok[i,i,i] ] + + [ (i,i,j) for i in indx for j in indx if self._ok[i,i,j] and i!=j ] + + [ (i,j,i) for i in indx for j in indx if self._ok[i,j,i] and i!=j ] + + [ (j,i,i) for i in indx for j in indx if self._ok[j,i,i] and i!=j ] + + [ (i,j,k) for i in indx for j in indx if i!=j + for k in indx if self._ok[i,j,k] and (i!=k and j!=k) ]) + + def _write(self, writer, name, write_patch_results, zero_tot=False): + # These helper properties define what to write for each class. + col_names = self._write_col_names + data = self._write_data + params = self._write_params + params['num_rows'] = len(self.rnom.ravel()) + + if write_patch_results: + # Note: Only include npatch1, npatch2 in serialization if we are also serializing + # results. Otherwise, the corr that is read in will behave oddly. + params['npatch1'] = self.npatch1 + params['npatch2'] = self.npatch2 + params['npatch3'] = self.npatch3 + num_patch_tri = len(self.results) + if zero_tot: + i = 0 + for key, corr in self.results.items(): + if not corr._nonzero: + zp_name = name + '_zp_%d'%i + params[zp_name] = repr((key, corr.tot)) + num_patch_tri -= 1 + i += 1 + params['num_zero_patch'] = i + params['num_patch_tri'] = num_patch_tri + + writer.write(col_names, data, params=params, ext=name) + if write_patch_results: + writer.set_precision(16) + i = 0 + for key, corr in self.results.items(): + if zero_tot and not corr._nonzero: continue + col_names = corr._write_col_names + data = corr._write_data + params = corr._write_params + params['key'] = repr(key) + pp_name = name + '_pp_%d'%i + writer.write(col_names, data, params=params, ext=pp_name) + i += 1 + assert i == num_patch_tri + + def _read(self, reader, name=None): + name = 'main' if 'main' in reader and name is None else name + params = reader.read_params(ext=name) + num_rows = params.get('num_rows', None) + num_patch_tri = params.get('num_patch_tri', 0) + num_zero_patch = params.get('num_zero_patch', 0) + name = 'main' if num_patch_tri and name is None else name + data = reader.read_data(max_rows=num_rows, ext=name) + + # This helper function defines how to set the attributes for each class + # based on what was read in. + self._read_from_data(data, params) + + self.results = {} + for i in range(num_zero_patch): + zp_name = name + '_zp_%d'%i + key, tot = eval(params[zp_name]) + self.results[key] = self._zero_copy(tot) + for i in range(num_patch_tri): + pp_name = name + '_pp_%d'%i + corr = self.copy() + params = reader.read_params(ext=pp_name) + data = reader.read_data(max_rows=num_rows, ext=pp_name) + corr._read_from_data(data, params) + key = eval(params['key']) + self.results[key] = corr
+
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/treecorr/catalog.html b/docs/_build/html/_modules/treecorr/catalog.html new file mode 100644 index 00000000..97aa2316 --- /dev/null +++ b/docs/_build/html/_modules/treecorr/catalog.html @@ -0,0 +1,2613 @@ + + + + + + treecorr.catalog — TreeCorr 4.3.0 documentation + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for treecorr.catalog

+# Copyright (c) 2003-2019 by Mike Jarvis
+#
+# TreeCorr is free software: redistribution and use in source and binary forms,
+# with or without modification, are permitted provided that the following
+# conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice, this
+#    list of conditions, and the disclaimer given in the accompanying LICENSE
+#    file.
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+#    this list of conditions, and the disclaimer given in the documentation
+#    and/or other materials provided with the distribution.
+
+"""
+.. module:: catalog
+"""
+
+import numpy as np
+import coord
+import weakref
+import copy
+import os
+
+from . import _lib
+from .reader import FitsReader, HdfReader, AsciiReader, PandasReader, ParquetReader
+from .config import merge_config, setup_logger, get, get_from_list
+from .util import parse_file_type, LRU_Cache, make_writer, make_reader, set_omp_threads
+from .util import double_ptr as dp
+from .util import long_ptr as lp
+from .util import depr_pos_kwargs
+from .field import NField, KField, GField, NSimpleField, KSimpleField, GSimpleField
+
+
[docs]class Catalog(object): + """A set of input data (positions and other quantities) to be correlated. + + A Catalog object keeps track of the relevant information for a number of objects to + be correlated. The objects each have some kind of position (for instance (x,y), (ra,dec), + (x,y,z), etc.), and possibly some extra information such as weights (w), shear values (g1,g2), + or kappa values (k). + + The simplest way to build a Catalog is to simply pass in numpy arrays for each + piece of information you want included. For instance:: + + >>> cat = treecorr.Catalog(x=x, y=y, k=k, w=w) + + Each of these input paramters should be a numpy array, where each corresponding element + is the value for that object. Of course, all the arrays should be the same size. + + In some cases, there are additional required parameters. For instance, with RA and Dec + positions, you need to declare what units the given input values use:: + + >>> cat = treecorr.Catalog(ra=ra, dec=dec, g1=g1, g2=g2, + ... ra_units='hour', dec_units='deg') + + For (ra,dec) positions, these units fields are required to specify the units of the angular + values. For (x,y) positions, the units are optional (and usually unnecessary). + + You can also initialize a Catalog by reading in columns from a file. For instance:: + + >>> cat = treecorr.Catalog('data.fits', ra_col='ALPHA2000', dec_col='DELTA2000', + ... g1_col='E1', g2_col='E2', ra_units='deg', dec_units='deg') + + This reads the given columns from the input file. The input file may be a FITS file, + an HDF5 file, a Parquet file, or an ASCII file. Normally the file type is determined + according to the file's extension (e.g. '.fits' here), but it can also be set explicitly + with ``file_type``. + + For FITS, HDF5, and Parquet files, the column names should be strings as shown above. + For ASCII files, they may be strings if the input file has column names. But you may + also use integer values giving the index of which column to use. We use a 1-based convention + for these, so x_col=1 would mean to use the first column as the x value. (0 means don't + read that column.) + + Finally, you may store all the various parameters in a configuration dict + and just pass the dict as an argument after the file name:: + + >>> config = { 'ra_col' : 'ALPHA2000', + ... 'dec_col' : 'DELTA2000', + ... 'g1_col' : 'E1', + ... 'g2_col' : 'E2', + ... 'ra_units' : 'deg', + ... 'dec_units' : 'deg' } + >>> cat = treecorr.Catalog(file_name, config) + + This can be useful for encapsulating all the TreeCorr options in a single place in your + code, which might be used multiple times. Notably, this syntax ignores any dict keys + that are not relevant to the Catalog construction, so you can use the same config dict + for the Catalog and your correlation objects, which can be convenient. + + See also `Configuration Parameters` for complete descriptions of all of the relevant + configuration parameters, particularly the first section `Parameters about the input file(s)`. + + You may also override any configuration parameters or add additional parameters as kwargs + after the config dict. For instance, to flip the sign of the g1 values after reading + from the input file, you could write:: + + >>> cat1 = treecorr.Catalog(file_name, config, flip_g1=True) + + After construction, a Catalog object will have the following attributes: + + Attributes: + + x: The x positions, if defined, as a numpy array (converted to radians if x_units + was given). (None otherwise) + y: The y positions, if defined, as a numpy array (converted to radians if y_units + was given). (None otherwise) + z: The z positions, if defined, as a numpy array. (None otherwise) + ra: The right ascension, if defined, as a numpy array (in radians). (None otherwise) + dec: The declination, if defined, as a numpy array (in radians). (None otherwise) + r: The distance, if defined, as a numpy array. (None otherwise) + w: The weights, as a numpy array. (All 1's if no weight column provided.) + wpos: The weights for position centroiding, as a numpy array, if given. (None otherwise, + which means that implicitly wpos = w.) + g1: The g1 component of the shear, if defined, as a numpy array. (None otherwise) + g2: The g2 component of the shear, if defined, as a numpy array. (None otherwise) + k: The convergence, kappa, if defined, as a numpy array. (None otherwise) + patch: The patch number of each object, if patches are being used. (None otherwise) + If the entire catalog is a single patch, then ``patch`` may be an int. + ntot: The total number of objects (including those with zero weight if + ``keep_zero_weight`` is set to True) + nobj: The number of objects with non-zero weight + sumw: The sum of the weights + varg: The shear variance (aka shape noise) (0 if g1,g2 are not defined) + + .. note:: + + If there are weights, this is really :math:`\\sum(w^2 |g|^2)/\\sum(w)`, + which is more like :math:`\\langle w \\rangle \\mathrm{Var}(g)`. + It is only used for ``var_method='shot'``, where the noise estimate is this + value divided by the total weight per bin, so this is the right quantity + to use for that. + + vark: The kappa variance (0 if k is not defined) + + .. note:: + + If there are weights, this is really :math:`\\sum(w^2 \\kappa^2)/\\sum(w)`. + As for ``varg``, this is the right quantity to use for the ``'shot'`` + noise estimate. + + name: When constructed from a file, this will be the file_name. It is only used as + a reference name in logging output after construction, so if you construct it + from data vectors directly, it will be ``''``. You may assign to it if you want to + give this catalog a specific name. + + coords: Which kind of coordinate system is defined for this catalog. + The possibilities for this attribute are: + + - 'flat' = 2-dimensional flat coordinates. Set when x,y are given. + - 'spherical' = spherical coordinates. Set when ra,dec are given. + - '3d' = 3-dimensional coordinates. Set when x,y,z or ra,dec,r are given. + + field: If any of the `get?Field <Catalog.getNField>` methods have been called to construct + a field from this catalog (either explicitly or implicitly via a `corr.process() + <NNCorrelation.process>` command, then this attribute will hold the most recent + field to have been constructed. + + .. note:: + + It holds this field as a weakref, so if caching is turned off with + ``resize_cache(0)``, and the field has been garbage collected, then this + attribute will be None. + + Parameters: + file_name (str): The name of the catalog file to be read in. (default: None, in which + case the columns need to be entered directly with ``x``, ``y``, etc.) + + config (dict): A configuration dict which defines attributes about how to read the + file. Any optional kwargs may be given here in the config dict if + desired. Invalid keys in the config dict are ignored. (default: None) + + Keyword Arguments: + + num (int): Which number catalog are we reading. e.g. for NG correlations the + catalog for the N has num=0, the one for G has num=1. This is only + necessary if you are using a config dict where things like ``x_col`` + have multiple values. (default: 0) + logger: If desired, a Logger object for logging. (default: None, in which case + one will be built according to the config dict's verbose level.) + is_rand (bool): If this is a random file, then setting is_rand to True will let them + skip k_col, g1_col, and g2_col if they were set for the main catalog. + (default: False) + + x (array): The x values. (default: None; When providing values directly, either + x,y are required or ra,dec are required.) + y (array): The y values. (default: None; When providing values directly, either + x,y are required or ra,dec are required.) + z (array): The z values, if doing 3d positions. (default: None; invalid in + conjunction with ra, dec.) + ra (array): The RA values. (default: None; When providing values directly, either + x,y are required or ra,dec are required.) + dec (array): The Dec values. (default: None; When providing values directly, either + x,y are required or ra,dec are required.) + r (array): The r values (the distances of each source from Earth). (default: None; + invalid in conjunction with x, y.) + w (array): The weights to apply when computing the correlations. (default: None) + wpos (array): The weights to use for position centroiding. (default: None, which + means to use the value weights, w, to weight the positions as well.) + flag (array): An optional array of flags, indicating objects to skip. Rows with + flag != 0 (or technically flag & ~ok_flag != 0) will be given a weight + of 0. (default: None) + g1 (array): The g1 values to use for shear correlations. (g1,g2 may represent any + spinor field.) (default: None) + g2 (array): The g2 values to use for shear correlations. (g1,g2 may represent any + spinor field.) (default: None) + k (array): The kappa values to use for scalar correlations. (This may represent + any scalar field.) (default: None) + patch (array or int): Optionally, patch numbers to use for each object. (default: None) + + .. note:: + + This may also be an int if the entire catalog represents a + single patch. If ``patch_centers`` is given this will select those + items from the full input that correspond to the given patch number. + + patch_centers (array or str): Alternative to setting patch by hand or using kmeans, you + may instead give patch_centers either as a file name or an array + from which the patches will be determined. (default: None) + + file_type (str): What kind of file is the input file. Valid options are 'ASCII', 'FITS' + 'HDF', or 'Parquet' (default: if the file_name extension starts with + .fit, then use 'FITS', or with .hdf, then use 'HDF', or with '.par', + then use 'Parquet', else 'ASCII') + delimiter (str): For ASCII files, what delimiter to use between values. (default: None, + which means any whitespace) + comment_marker (str): For ASCII files, what token indicates a comment line. (default: '#') + first_row (int): Which row to take as the first row to be used. (default: 1) + last_row (int): Which row to take as the last row to be used. (default: -1, which means + the last row in the file) + every_nth (int): Only use every nth row of the input catalog. (default: 1) + + npatch (int): How many patches to split the catalog into (using kmeans if no other + patch information is provided) for the purpose of jackknife variance + or other options that involve running via patches. (default: 1) + + .. note:: + + If the catalog has ra,dec,r positions, the patches will + be made using just ra,dec. + + kmeans_init (str): If using kmeans to make patches, which init method to use. + cf. `Field.run_kmeans` (default: 'tree') + kmeans_alt (bool): If using kmeans to make patches, whether to use the alternate kmeans + algorithm. cf. `Field.run_kmeans` (default: False) + + x_col (str or int): The column to use for the x values. An integer is only allowed for + ASCII files. (default: '0', which means not to read in this column. + When reading from a file, either x_col and y_col are required or ra_col + and dec_col are required.) + y_col (str or int): The column to use for the y values. An integer is only allowed for + ASCII files. (default: '0', which means not to read in this column. + When reading from a file, either x_col and y_col are required or ra_col + and dec_col are required.) + z_col (str or int): The column to use for the z values. An integer is only allowed for + ASCII files. (default: '0', which means not to read in this column; + invalid in conjunction with ra_col, dec_col.) + ra_col (str or int): The column to use for the ra values. An integer is only allowed for + ASCII files. (default: '0', which means not to read in this column. + When reading from a file, either x_col and y_col are required or ra_col + and dec_col are required.) + dec_col (str or int): The column to use for the dec values. An integer is only allowed for + ASCII files. (default: '0', which means not to read in this column. + When reading from a file, either x_col and y_col are required or ra_col + and dec_col are required.) + r_col (str or int): The column to use for the r values. An integer is only allowed for + ASCII files. (default: '0', which means not to read in this column; + invalid in conjunction with x_col, y_col.) + + x_units (str): The units to use for the x values, given as a string. Valid options are + arcsec, arcmin, degrees, hours, radians. (default: radians, although + with (x,y) positions, you can often just ignore the units, and the + output separations will be in whatever units x and y are in.) + y_units (str): The units to use for the y values, given as a string. Valid options are + arcsec, arcmin, degrees, hours, radians. (default: radians, although + with (x,y) positions, you can often just ignore the units, and the + output separations will be in whatever units x and y are in.) + ra_units (str): The units to use for the ra values, given as a string. Valid options + are arcsec, arcmin, degrees, hours, radians. (required when using + ra_col or providing ra directly) + dec_units (str): The units to use for the dec values, given as a string. Valid options + are arcsec, arcmin, degrees, hours, radians. (required when using + dec_col or providing dec directly) + + g1_col (str or int): The column to use for the g1 values. An integer is only allowed for + ASCII files. (default: '0', which means not to read in this column.) + g2_col (str or int): The column to use for the g2 values. An integer is only allowed for + ASCII files. (default: '0', which means not to read in this column.) + k_col (str or int): The column to use for the kappa values. An integer is only allowed for + ASCII files. (default: '0', which means not to read in this column.) + patch_col (str or int): The column to use for the patch numbers. An integer is only allowed + for ASCII files. (default: '0', which means not to read in this column.) + w_col (str or int): The column to use for the weight values. An integer is only allowed for + ASCII files. (default: '0', which means not to read in this column.) + wpos_col (str or int): The column to use for the position weight values. An integer is only + allowed for ASCII files. (default: '0', which means not to read in this + column, in which case wpos=w.) + flag_col (str or int): The column to use for the flag values. An integer is only allowed for + ASCII files. Any row with flag != 0 (or technically flag & ~ok_flag + != 0) will be given a weight of 0. (default: '0', which means not to + read in this column.) + ignore_flag (int): Which flags should be ignored. (default: all non-zero flags are ignored. + Equivalent to ignore_flag = ~0.) + ok_flag (int): Which flags should be considered ok. (default: 0. i.e. all non-zero + flags are ignored.) + allow_xyz (bool): Whether to allow x,y,z values in conjunction with ra,dec. Normally, + it is an error to have both kinds of positions, but if you know that + the x,y,z, values are consistent with the given ra,dec values, it + can save time to input them, rather than calculate them using trig + functions. (default: False) + + flip_g1 (bool): Whether to flip the sign of the input g1 values. (default: False) + flip_g2 (bool): Whether to flip the sign of the input g2 values. (default: False) + keep_zero_weight (bool): Whether to keep objects with wpos=0 in the catalog (including + any objects that indirectly get wpos=0 due to NaN or flags), so they + would be included in ntot and also in npairs calculations that use + this Catalog, although of course not contribute to the accumulated + weight of pairs. (default: False) + save_patch_dir (str): If desired, when building patches from this Catalog, save them + as FITS files in the given directory for more efficient loading when + doing cross-patch correlations with the ``low_mem`` option. + + ext (int/str): For FITS/HDF files, Which extension to read. (default: 1 for fits, + root for HDF) + x_ext (int/str): Which extension to use for the x values. (default: ext) + y_ext (int/str): Which extension to use for the y values. (default: ext) + z_ext (int/str): Which extension to use for the z values. (default: ext) + ra_ext (int/str): Which extension to use for the ra values. (default: ext) + dec_ext (int/str): Which extension to use for the dec values. (default: ext) + r_ext (int/str): Which extension to use for the r values. (default: ext) + g1_ext (int/str): Which extension to use for the g1 values. (default: ext) + g2_ext (int/str): Which extension to use for the g2 values. (default: ext) + k_ext (int/str): Which extension to use for the k values. (default: ext) + patch_ext (int/str): Which extension to use for the patch numbers. (default: ext) + w_ext (int/str): Which extension to use for the w values. (default: ext) + wpos_ext (int/str): Which extension to use for the wpos values. (default: ext) + flag_ext (int/str): Which extension to use for the flag values. (default: ext) + + verbose (int): If no logger is provided, this will optionally specify a logging level + to use. + + - 0 means no logging output + - 1 means to output warnings only (default) + - 2 means to output various progress information + - 3 means to output extensive debugging information + + log_file (str): If no logger is provided, this will specify a file to write the logging + output. (default: None; i.e. output to standard output) + + split_method (str): How to split the cells in the tree when building the tree structure. + Options are: + + - mean: Use the arithmetic mean of the coordinate being split. + (default) + - median: Use the median of the coordinate being split. + - middle: Use the middle of the range; i.e. the average of the + minimum and maximum value. + - random: Use a random point somewhere in the middle two quartiles + of the range. + + cat_precision (int): The precision to use when writing a Catalog to an ASCII file. This + should be an integer, which specifies how many digits to write. + (default: 16) + + rng (RandomState): If desired, a numpy.random.RandomState instance to use for any random + number generation (e.g. kmeans patches). (default: None) + + num_threads (int): How many OpenMP threads to use during the catalog load steps. + (default: use the number of cpu cores) + + .. note:: + + This won't work if the system's C compiler cannot use OpenMP + (e.g. clang prior to version 3.7.) + """ + # Dict describing the valid kwarg parameters, what types they are, and a description: + # Each value is a tuple with the following elements: + # type + # may_be_list + # default value + # list of valid values + # description + _valid_params = { + 'file_type' : (str, True, None, ['ASCII', 'FITS', 'HDF', 'Parquet'], + 'The file type of the input files. The default is to use the file name extension.'), + 'delimiter' : (str, True, None, None, + 'The delimiter between values in an ASCII catalog. The default is any whitespace.'), + 'comment_marker' : (str, True, '#', None, + 'The first (non-whitespace) character of comment lines in an input ASCII catalog.'), + 'first_row' : (int, True, 1, None, + 'The first row to use from the input catalog'), + 'last_row' : (int, True, -1, None, + 'The last row to use from the input catalog. The default is to use all of them.'), + 'every_nth' : (int, True, 1, None, + 'Only use every nth row of the input catalog. The default is to use all of them.'), + 'x_col' : (str, True, '0', None, + 'Which column to use for x. Should be an integer for ASCII catalogs.'), + 'y_col' : (str, True, '0', None, + 'Which column to use for y. Should be an integer for ASCII catalogs.'), + 'z_col' : (str, True, '0', None, + 'Which column to use for z. Should be an integer for ASCII catalogs.'), + 'ra_col' : (str, True, '0', None, + 'Which column to use for ra. Should be an integer for ASCII catalogs.'), + 'dec_col' : (str, True, '0', None, + 'Which column to use for dec. Should be an integer for ASCII catalogs.'), + 'r_col' : (str, True, '0', None, + 'Which column to use for r. Only valid with ra,dec. ', + 'Should be an integer for ASCII catalogs.'), + 'x_units' : (str, True, None, coord.AngleUnit.valid_names, + 'The units of x values.'), + 'y_units' : (str, True, None, coord.AngleUnit.valid_names, + 'The units of y values.'), + 'ra_units' : (str, True, None, coord.AngleUnit.valid_names, + 'The units of ra values. Required when using ra_col.'), + 'dec_units' : (str, True, None, coord.AngleUnit.valid_names, + 'The units of dec values. Required when using dec_col.'), + 'g1_col' : (str, True, '0', None, + 'Which column to use for g1. Should be an integer for ASCII catalogs.'), + 'g2_col' : (str, True, '0', None, + 'Which column to use for g2. Should be an integer for ASCII catalogs.'), + 'k_col' : (str, True, '0', None, + 'Which column to use for kappa. Should be an integer for ASCII catalogs. '), + 'patch_col' : (str, True, '0', None, + 'Which column to use for patch numbers. Should be an integer for ASCII catalogs. '), + 'w_col' : (str, True, '0', None, + 'Which column to use for weight. Should be an integer for ASCII catalogs.'), + 'wpos_col' : (str, True, '0', None, + 'Which column to use for position weight. Should be an integer for ASCII ' + 'catalogs.'), + 'flag_col' : (str, True, '0', None, + 'Which column to use for flag. Should be an integer for ASCII catalogs.'), + 'ignore_flag': (int, True, None, None, + 'Ignore objects with flag & ignore_flag != 0 (bitwise &)'), + 'ok_flag': (int, True, 0, None, + 'Ignore objects with flag & ~ok_flag != 0 (bitwise &, ~)'), + 'allow_xyz': (bool, True, False, None, + 'Whether to allow x,y,z inputs in conjunction with ra,dec'), + 'ext': (str, True, None, None, + 'Which extension/group in a fits/hdf file to use. Default=1 (fits), root (hdf)'), + 'x_ext': (str, True, None, None, + 'Which extension to use for the x_col. default is the global ext value.'), + 'y_ext': (str, True, None, None, + 'Which extension to use for the y_col. default is the global ext value.'), + 'z_ext': (str, True, None, None, + 'Which extension to use for the z_col. default is the global ext value.'), + 'ra_ext': (str, True, None, None, + 'Which extension to use for the ra_col. default is the global ext value.'), + 'dec_ext': (str, True, None, None, + 'Which extension to use for the dec_col. default is the global ext value.'), + 'r_ext': (str, True, None, None, + 'Which extension to use for the r_col. default is the global ext value.'), + 'g1_ext': (str, True, None, None, + 'Which extension to use for the g1_col. default is the global ext value.'), + 'g2_ext': (str, True, None, None, + 'Which extension to use for the g2_col. default is the global ext value.'), + 'k_ext': (str, True, None, None, + 'Which extension to use for the k_col. default is the global ext value.'), + 'patch_ext': (str, True, None, None, + 'Which extension to use for the patch_col. default is the global ext value.'), + 'w_ext': (str, True, None, None, + 'Which extension to use for the w_col. default is the global ext value.'), + 'wpos_ext': (str, True, None, None, + 'Which extension to use for the wpos_col. default is the global ext value.'), + 'flag_ext': (str, True, None, None, + 'Which extension to use for the flag_col. default is the global ext value.'), + 'flip_g1' : (bool, True, False, None, + 'Whether to flip the sign of g1'), + 'flip_g2' : (bool, True, False, None, + 'Whether to flip the sign of g2'), + + 'keep_zero_weight' : (bool, False, False, None, + 'Whether to keep objects with zero weight in the catalog'), + 'npatch' : (int, False, 1, None, + 'Number of patches to split the catalog into'), + 'kmeans_init' : (str, False, 'tree', ['tree','random','kmeans++'], + 'Which initialization method to use for kmeans when making patches'), + 'kmeans_alt' : (bool, False, False, None, + 'Whether to use the alternate kmeans algorithm when making patches'), + 'patch_centers' : (str, False, None, None, + 'File with patch centers to use to determine patches'), + 'save_patch_dir' : (str, False, None, None, + 'If desired, save the patches as FITS files in this directory.'), + 'verbose' : (int, False, 1, [0, 1, 2, 3], + 'How verbose the code should be during processing. ', + '0 = Errors Only, 1 = Warnings, 2 = Progress, 3 = Debugging'), + 'log_file' : (str, False, None, None, + 'If desired, an output file for the logging output.', + 'The default is to write the output to stdout.'), + 'split_method' : (str, False, 'mean', ['mean', 'median', 'middle', 'random'], + 'Which method to use for splitting cells.'), + 'cat_precision' : (int, False, 16, None, + 'The number of digits after the decimal in the output.'), + } + _aliases = { + 'hdu' : 'ext', + 'x_hdu' : 'x_ext', + 'y_hdu' : 'y_ext', + 'z_hdu' : 'z_ext', + 'ra_hdu' : 'ra_ext', + 'dec_hdu' : 'dec_ext', + 'r_hdu' : 'r_ext', + 'g1_hdu' : 'g1_ext', + 'g2_hdu' : 'g2_ext', + 'k_hdu' : 'k_ext', + 'w_hdu' : 'w_ext', + 'wpos_hdu' : 'wpos_ext', + 'flag_hdu' : 'flag_ext', + 'patch_hdu' : 'patch_ext', + } + _emitted_pandas_warning = False # Only emit the warning once. Set to True once we have. + + @depr_pos_kwargs + def __init__(self, file_name=None, config=None, *, num=0, logger=None, is_rand=False, + x=None, y=None, z=None, ra=None, dec=None, r=None, w=None, wpos=None, flag=None, + g1=None, g2=None, k=None, patch=None, patch_centers=None, rng=None, **kwargs): + + self.config = merge_config(config, kwargs, Catalog._valid_params, Catalog._aliases) + self.orig_config = config.copy() if config is not None else {} + if config and kwargs: + self.orig_config.update(kwargs) + self._num = num + self._is_rand = is_rand + + if logger is not None: + self.logger = logger + else: + self.logger = setup_logger(get(self.config,'verbose',int,1), + self.config.get('log_file',None)) + + # Start with everything set to None. Overwrite as appropriate. + self._x = None + self._y = None + self._z = None + self._ra = None + self._dec = None + self._r = None + self._w = None + self._wpos = None + self._flag = None + self._g1 = None + self._g2 = None + self._k = None + self._patch = None + self._field = lambda : None + + self._nontrivial_w = None + self._single_patch = None + self._nobj = None + self._sumw = None + self._sumw2 = None + self._varg = None + self._vark = None + self._patches = None + self._centers = None + self._rng = rng + + first_row = get_from_list(self.config,'first_row',num,int,1) + if first_row < 1: + raise ValueError("first_row should be >= 1") + last_row = get_from_list(self.config,'last_row',num,int,-1) + if last_row > 0 and last_row < first_row: + raise ValueError("last_row should be >= first_row") + if last_row > 0: + self.end = last_row + else: + self.end = None + if first_row > 1: + self.start = first_row-1 + else: + self.start = 0 + self.every_nth = get_from_list(self.config,'every_nth',num,int,1) + if self.every_nth < 1: + raise ValueError("every_nth should be >= 1") + + if 'npatch' in self.config and self.config['npatch'] != 1: + self._npatch = get(self.config,'npatch',int) + if self._npatch < 1: + raise ValueError("npatch must be >= 1") + elif self.config.get('patch_col',0) not in (0,'0'): + self._npatch = None # Mark that we need to finish loading to figure out npatch. + else: + self._npatch = 1 # We might yet change this, but it will be correct at end of init. + + try: + self._single_patch = int(patch) + except TypeError: + pass + else: + patch = None + + if patch_centers is None and 'patch_centers' in self.config: + # file name version may be in a config dict, rather than kwarg. + patch_centers = get(self.config,'patch_centers',str) + + if patch_centers is not None: + if patch is not None or self.config.get('patch_col',0) not in (0,'0'): + raise ValueError("Cannot provide both patch and patch_centers") + if isinstance(patch_centers, np.ndarray): + self._centers = patch_centers + else: + self._centers = self.read_patch_centers(patch_centers) + if self._npatch not in [None, 1, self._centers.shape[0]]: + raise ValueError("npatch is incompatible with provided centers") + self._npatch = self._centers.shape[0] + + self.save_patch_dir = self.config.get('save_patch_dir',None) + allow_xyz = self.config.get('allow_xyz', False) + + # First style -- read from a file + if file_name is not None: + if any([v is not None for v in [x,y,z,ra,dec,r,g1,g2,k,patch,w,wpos,flag]]): + raise TypeError("Vectors may not be provided when file_name is provided.") + self.file_name = file_name + self.name = file_name + if self._single_patch is not None: + self.name += " patch " + str(self._single_patch) + + # Figure out which file type the catalog is + file_type = get_from_list(self.config,'file_type',num) + file_type = parse_file_type(file_type, file_name, output=False, logger=self.logger) + if file_type == 'FITS': + self.reader = FitsReader(file_name) + self._check_file(file_name, self.reader, num, is_rand) + elif file_type == 'HDF': + self.reader = HdfReader(file_name) + self._check_file(file_name, self.reader, num, is_rand) + elif file_type == 'PARQUET': + self.reader = ParquetReader(file_name) + self._check_file(file_name, self.reader, num, is_rand) + else: + delimiter = self.config.get('delimiter',None) + comment_marker = self.config.get('comment_marker','#') + try: + self.reader = PandasReader(file_name, delimiter, comment_marker) + except ImportError: + self._pandas_warning() + self.reader = AsciiReader(file_name, delimiter, comment_marker) + self._check_file(file_name, self.reader, num, is_rand) + + self.file_type = file_type + + # Second style -- pass in the vectors directly + else: + self.file_type = None + if x is not None or y is not None: + if x is None or y is None: + raise TypeError("x and y must both be provided") + if (ra is not None or dec is not None) and not allow_xyz: + raise TypeError("ra and dec may not be provided with x,y") + if r is not None and not allow_xyz: + raise TypeError("r may not be provided with x,y") + if ra is not None or dec is not None: + if ra is None or dec is None: + raise TypeError("ra and dec must both be provided") + if g1 is not None or g2 is not None: + if g1 is None or g2 is None: + raise TypeError("g1 and g2 must both be provided") + self.file_name = None + self.name = '' + if self._single_patch is not None: + self.name = "patch " + str(self._single_patch) + self._x = self.makeArray(x,'x') + self._y = self.makeArray(y,'y') + self._z = self.makeArray(z,'z') + self._ra = self.makeArray(ra,'ra') + self._dec = self.makeArray(dec,'dec') + self._r = self.makeArray(r,'r') + self._w = self.makeArray(w,'w') + self._wpos = self.makeArray(wpos,'wpos') + self._flag = self.makeArray(flag,'flag',int) + self._g1 = self.makeArray(g1,'g1') + self._g2 = self.makeArray(g2,'g2') + self._k = self.makeArray(k,'k') + self._patch = self.makeArray(patch,'patch',int) + if self._patch is not None: + self._set_npatch() + if self._x is not None: + self._apply_xyz_units() + if self._ra is not None: + self._apply_radec_units() + + # Check that all columns have the same length. (This is impossible in file input) + if self._x is not None: + ntot = len(self._x) + if len(self._y) != ntot: + raise ValueError("x and y have different numbers of elements") + else: + ntot = len(self._ra) + if len(self._dec) != ntot: + raise ValueError("ra and dec have different numbers of elements") + if self._z is not None and len(self._z) != ntot: + raise ValueError("z has the wrong numbers of elements") + if self._r is not None and len(self._r) != ntot: + raise ValueError("r has the wrong numbers of elements") + if self._w is not None and len(self._w) != ntot: + raise ValueError("w has the wrong numbers of elements") + if self._wpos is not None and len(self._wpos) != ntot: + raise ValueError("wpos has the wrong numbers of elements") + if self._g1 is not None and len(self._g1) != ntot: + raise ValueError("g1 has the wrong numbers of elements") + if self._g2 is not None and len(self._g2) != ntot: + raise ValueError("g2 has the wrong numbers of elements") + if self._k is not None and len(self._k) != ntot: + raise ValueError("k has the wrong numbers of elements") + if self._patch is not None and len(self._patch) != ntot: + raise ValueError("patch has the wrong numbers of elements") + if ntot == 0: + raise ValueError("Input arrays have zero length") + + if x is not None or self.config.get('x_col','0') not in [0,'0']: + if 'x_units' in self.config and 'y_units' not in self.config: + raise TypeError("x_units specified without specifying y_units") + if 'y_units' in self.config and 'x_units' not in self.config: + raise TypeError("y_units specified without specifying x_units") + else: + if 'x_units' in self.config: + raise TypeError("x_units is invalid without x") + if 'y_units' in self.config: + raise TypeError("y_units is invalid without y") + if ra is not None or self.config.get('ra_col','0') not in [0,'0']: + if not self.config.get('ra_units',None): + raise TypeError("ra_units is required when using ra, dec") + if not self.config.get('dec_units',None): + raise TypeError("dec_units is required when using ra, dec") + else: + if 'ra_units' in self.config: + raise TypeError("ra_units is invalid without ra") + if 'dec_units' in self.config: + raise TypeError("dec_units is invalid without dec") + + if file_name is None: + # For vector input option, can finish up now. + if self._single_patch is not None: + self._select_patch(self._single_patch) + self._finish_input() + + @property + def loaded(self): + # _x gets set regardless of whether input used x,y or ra,dec, so the state of this + # attribute is a good sentinal for whether the file has been loaded yet. + return self._x is not None + + @property + def x(self): + self.load() + return self._x + + @property + def y(self): + self.load() + return self._y + + @property + def z(self): + self.load() + return self._z + + @property + def ra(self): + self.load() + return self._ra + + @property + def dec(self): + self.load() + return self._dec + + @property + def r(self): + self.load() + return self._r + + @property + def w(self): + self.load() + return self._w + + @property + def wpos(self): + self.load() + return self._wpos + + @property + def g1(self): + self.load() + return self._g1 + + @property + def g2(self): + self.load() + return self._g2 + + @property + def k(self): + self.load() + return self._k + + @property + def npatch(self): + if self._npatch is None: + self.load() + return self._npatch + + @property + def patch(self): + if self._single_patch is not None: + return self._single_patch + else: + self.load() + return self._patch + + @property + def patches(self): + return self.get_patches() + + @property + def patch_centers(self): + return self.get_patch_centers() + + @property + def varg(self): + if self._varg is None: + if self.nontrivial_w: + if self.g1 is not None: + use = self.w != 0 + self._varg = np.sum(self.w[use]**2 * (self.g1[use]**2 + self.g2[use]**2)) + # The 2 is because we need the variance _per componenet_. + self._varg /= 2.*self.sumw + else: + self._varg = 0. + else: + if self.g1 is not None: + self._varg = np.sum(self.g1**2 + self.g2**2) / (2.*self.nobj) + else: + self._varg = 0. + return self._varg + + @property + def vark(self): + if self._vark is None: + if self.nontrivial_w: + if self.k is not None: + use = self.w != 0 + self._meank = np.sum(self.w[use] * self.k[use]) / self.sumw + self._meank2 = np.sum(self.w[use]**2 * self.k[use]) / self.sumw2 + self._vark = np.sum(self.w[use]**2 * (self.k[use]-self._meank)**2) / self.sumw + else: + self._meank = self._meank2 = 0. + self._vark = 0. + else: + if self.k is not None: + self._meank = self._meank2 = np.mean(self.k) + self._vark = np.sum((self.k-self._meank)**2) / self.nobj + else: + self._meank = self._meank2 = 0. + self._vark = 0. + return self._vark + + @property + def nontrivial_w(self): + if self._nontrivial_w is None: self.load() + return self._nontrivial_w + + @property + def ntot(self): + return len(self.x) + + @property + def nobj(self): + if self._nobj is None: + if self.nontrivial_w: + use = self._w != 0 + self._nobj = np.sum(use) + else: + self._nobj = self.ntot + return self._nobj + + @property + def sumw(self): + if self._sumw is None: self.load() + return self._sumw + + @property + def sumw2(self): + if self._sumw2 is None: + if self.nontrivial_w: + self._sumw2 = np.sum(self.w**2) + else: + self._sumw2 = self.ntot + return self._sumw2 + + @property + def coords(self): + if self.ra is not None: + if self.r is None: + return 'spherical' + else: + return '3d' + else: + if self.z is None: + return 'flat' + else: + return '3d' + + def _get_center_size(self): + if not hasattr(self, '_cen_s'): + mx = np.mean(self.x) + my = np.mean(self.y) + mz = 0 + dsq = (self.x - mx)**2 + (self.y - my)**2 + if self.z is not None: + mz = np.mean(self.z) + dsq += (self.z - mz)**2 + s = np.max(dsq)**0.5 + self._cen_s = (mx, my, mz, s) + return self._cen_s + + def _finish_input(self): + # Finish processing the data based on given inputs. + + # Apply flips if requested + flip_g1 = get_from_list(self.config,'flip_g1',self._num,bool,False) + flip_g2 = get_from_list(self.config,'flip_g2',self._num,bool,False) + if flip_g1: + self.logger.info(" Flipping sign of g1.") + self._g1 = -self._g1 + if flip_g2: + self.logger.info(" Flipping sign of g2.") + self._g2 = -self._g2 + + # Convert the flag to a weight + if self._flag is not None: + if 'ignore_flag' in self.config: + ignore_flag = get_from_list(self.config,'ignore_flag',self._num,int) + else: + ok_flag = get_from_list(self.config,'ok_flag',self._num,int,0) + ignore_flag = ~ok_flag + # If we don't already have a weight column, make one with all values = 1. + if self._w is None: + self._w = np.ones_like(self._flag, dtype=float) + self._w[(self._flag & ignore_flag)!=0] = 0 + if self._wpos is not None: + self._wpos[(self._flag & ignore_flag)!=0] = 0 + self.logger.debug('Applied flag') + + # Check for NaN's: + self.checkForNaN(self._x,'x') + self.checkForNaN(self._y,'y') + self.checkForNaN(self._z,'z') + self.checkForNaN(self._ra,'ra') + self.checkForNaN(self._dec,'dec') + self.checkForNaN(self._r,'r') + self.checkForNaN(self._g1,'g1') + self.checkForNaN(self._g2,'g2') + self.checkForNaN(self._k,'k') + self.checkForNaN(self._w,'w') + self.checkForNaN(self._wpos,'wpos') + + # If using ra/dec, generate x,y,z + # Note: This also makes self.ntot work properly. + self._generate_xyz() + + # Copy w to wpos if necessary (Do this after checkForNaN's, since this may set some + # entries to have w=0.) + if self._wpos is None: + self.logger.debug('Using w for wpos') + else: + # Check that any wpos == 0 points also have w == 0 + if np.any(self._wpos == 0.): + if self._w is None: + self.logger.warning('Some wpos values are zero, setting w=0 for these points.') + self._w = np.ones((self.ntot), dtype=float) + else: + if np.any(self._w[self._wpos == 0.] != 0.): + self.logger.error('Some wpos values = 0 but have w!=0. This is invalid.\n' + 'Setting w=0 for these points.') + self._w[self._wpos == 0.] = 0. + + if self._w is not None: + self._nontrivial_w = True + self._sumw = np.sum(self._w) + if self._sumw == 0: + raise ValueError("Catalog has invalid sumw == 0") + else: + self._nontrivial_w = False + self._sumw = self.ntot + # Make w all 1s to simplify the use of w later in code. + self._w = np.ones((self.ntot), dtype=float) + + keep_zero_weight = get(self.config,'keep_zero_weight',bool,False) + if self._nontrivial_w and not keep_zero_weight: + wpos = self._wpos if self._wpos is not None else self._w + if np.any(wpos == 0): + self.select(np.where(wpos != 0)[0]) + + if self._single_patch is not None or self._patch is not None: + # Easier to get these options out of the way first. + pass + elif self._centers is not None: + if ((self.coords == 'flat' and self._centers.shape[1] != 2) or + (self.coords != 'flat' and self._centers.shape[1] != 3)): + raise ValueError("Centers array has wrong shape.") + self._assign_patches() + self.logger.info("Assigned patch numbers according %d centers",self._npatch) + elif self._npatch is not None and self._npatch != 1: + init = get(self.config,'kmeans_init',str,'tree') + alt = get(self.config,'kmeans_alt',bool,False) + max_top = int.bit_length(self._npatch)-1 + c = 'spherical' if self._ra is not None else self.coords + field = self.getNField(max_top=max_top, coords=c) + self.logger.info("Finding %d patches using kmeans.",self._npatch) + self._patch, self._centers = field.run_kmeans(self._npatch, init=init, alt=alt) + # Clear the cached NField, since we will almost certainly not want this + # particular one again, even if doing N-based correlations (since max_top, etc. + # is almost certainly going to be different). + self.nfields.clear() + + self.logger.info(" nobj = %d",self.nobj) + + def _assign_patches(self): + # This is equivalent to the following: + # field = self.getNField() + # self._patch = field.kmeans_assign_patches(self._centers) + # However, when the field is not already created, it's faster to just run through + # all the points directly and assign which one is closest. + self._patch = np.empty(self.ntot, dtype=int) + centers = np.ascontiguousarray(self._centers) + set_omp_threads(self.config.get('num_threads',None)) + _lib.QuickAssign(dp(centers), self._npatch, + dp(self.x), dp(self.y), dp(self.z), lp(self._patch), self.ntot) + + def _set_npatch(self): + npatch = max(self._patch) + 1 + if self._npatch not in [None, 1] and npatch > self._npatch: + # Note: it's permissible for self._npatch to be larger, but not smaller. + raise ValueError("npatch is incompatible with provided patch numbers") + self._npatch = npatch + self.logger.info("Assigned patch numbers 0..%d",self._npatch-1) + + def _get_patch_index(self, single_patch): + if self._patch is not None: + # This is straightforward. Just select the rows with patch == single_patch + use = np.where(self._patch == single_patch)[0] + elif self._centers is not None: + self._generate_xyz() + use = np.empty(self.ntot, dtype=int) + from .util import double_ptr as dp + from .util import long_ptr as lp + npatch = self._centers.shape[0] + centers = np.ascontiguousarray(self._centers) + if self._z is None: + assert centers.shape[1] == 2 + else: + assert centers.shape[1] == 3 + set_omp_threads(self.config.get('num_threads',None)) + _lib.SelectPatch(single_patch, dp(centers), npatch, + dp(self._x), dp(self._y), dp(self._z), + lp(use), self.ntot) + use = np.where(use)[0] + else: + use = slice(None) # Which ironically means use all. :) + return use + + def _apply_radec_units(self): + self.ra_units = get_from_list(self.config,'ra_units',self._num) + self.dec_units = get_from_list(self.config,'dec_units',self._num) + self._ra *= self.ra_units + self._dec *= self.dec_units + + def _apply_xyz_units(self): + self.x_units = get_from_list(self.config,'x_units',self._num,str, 'radians') + self.y_units = get_from_list(self.config,'y_units',self._num,str, 'radians') + self._x *= self.x_units + self._y *= self.y_units + + def _generate_xyz(self): + if self._x is None: + assert self._y is None + assert self._z is None + assert self._ra is not None + assert self._dec is not None + ntot = len(self._ra) + self._x = np.empty(ntot, dtype=float) + self._y = np.empty(ntot, dtype=float) + self._z = np.empty(ntot, dtype=float) + from .util import double_ptr as dp + set_omp_threads(self.config.get('num_threads',None)) + _lib.GenerateXYZ(dp(self._x), dp(self._y), dp(self._z), + dp(self._ra), dp(self._dec), dp(self._r), ntot) + self.x_units = self.y_units = 1. + + def _select_patch(self, single_patch): + # Trim the catalog to only include a single patch + # Note: This is slightly inefficient in that it reads the whole catalog first + # and then removes all but one patch. But that's easier for now that figuring out + # which items to remove along the way based on the patch_centers. + indx = self._get_patch_index(single_patch) + self._patch = None + self.select(indx) + +
[docs] def select(self, indx): + """Trim the catalog to only include those objects with the give indices. + + Parameters: + indx: A numpy array of index values to keep. + """ + if type(indx) == slice and indx == slice(None): + return + self._x = self._x[indx] if self._x is not None else None + self._y = self._y[indx] if self._y is not None else None + self._z = self._z[indx] if self._z is not None else None + self._ra = self._ra[indx] if self._ra is not None else None + self._dec = self._dec[indx] if self._dec is not None else None + self._r = self._r[indx] if self._r is not None else None + self._w = self._w[indx] if self._w is not None else None + self._wpos = self._wpos[indx] if self._wpos is not None else None + self._g1 = self._g1[indx] if self._g1 is not None else None + self._g2 = self._g2[indx] if self._g2 is not None else None + self._k = self._k[indx] if self._k is not None else None + self._patch = self._patch[indx] if self._patch is not None else None
+ +
[docs] def makeArray(self, col, col_str, dtype=float): + """Turn the input column into a numpy array if it wasn't already. + Also make sure the input is 1-d. + + Parameters: + col (array-like): The input column to be converted into a numpy array. + col_str (str): The name of the column. Used only as information in logging output. + dtype (type): The dtype for the returned array. (default: float) + + Returns: + The column converted to a 1-d numpy array. + """ + if col is not None: + col = np.array(col,dtype=dtype) + if len(col.shape) != 1: + s = col.shape + col = col.reshape(-1) + self.logger.warning("Warning: Input %s column was not 1-d.\n"%col_str + + " Reshaping from %s to %s"%(s,col.shape)) + col = np.ascontiguousarray(col[self.start:self.end:self.every_nth]) + return col
+ +
[docs] def checkForNaN(self, col, col_str): + """Check if the column has any NaNs. If so, set those rows to have w[k]=0. + + Parameters: + col (array): The input column to check. + col_str (str): The name of the column. Used only as information in logging output. + """ + if col is not None and np.any(np.isnan(col)): + index = np.where(np.isnan(col))[0] + s = 's' if len(index) > 1 else '' + self.logger.warning("Warning: %d NaN%s found in %s column.",len(index),s,col_str) + if len(index) < 20: + self.logger.info("Skipping row%s %s.",s,index.tolist()) + else: + self.logger.info("Skipping rows starting %s", + str(index[:10].tolist()).replace(']',' ...]')) + if self._w is None: + self._w = np.ones_like(col, dtype=float) + self._w[index] = 0 + col[index] = 0 # Don't leave the nans there.
+ + def _check_file(self, file_name, reader, num=0, is_rand=False): + # Just check the consistency of the various column numbers so we can fail fast. + + # Get the column names + x_col = get_from_list(self.config,'x_col',num,str,'0') + y_col = get_from_list(self.config,'y_col',num,str,'0') + z_col = get_from_list(self.config,'z_col',num,str,'0') + ra_col = get_from_list(self.config,'ra_col',num,str,'0') + dec_col = get_from_list(self.config,'dec_col',num,str,'0') + r_col = get_from_list(self.config,'r_col',num,str,'0') + w_col = get_from_list(self.config,'w_col',num,str,'0') + wpos_col = get_from_list(self.config,'wpos_col',num,str,'0') + flag_col = get_from_list(self.config,'flag_col',num,str,'0') + g1_col = get_from_list(self.config,'g1_col',num,str,'0') + g2_col = get_from_list(self.config,'g2_col',num,str,'0') + k_col = get_from_list(self.config,'k_col',num,str,'0') + patch_col = get_from_list(self.config,'patch_col',num,str,'0') + allow_xyz = self.config.get('allow_xyz', False) + + if x_col != '0' or y_col != '0': + if x_col == '0': + raise ValueError("x_col missing for file %s"%file_name) + if y_col == '0': + raise ValueError("y_col missing for file %s"%file_name) + if ra_col != '0' and not allow_xyz: + raise ValueError("ra_col not allowed in conjunction with x/y cols") + if dec_col != '0' and not allow_xyz: + raise ValueError("dec_col not allowed in conjunction with x/y cols") + if r_col != '0' and not allow_xyz: + raise ValueError("r_col not allowed in conjunction with x/y cols") + elif ra_col != '0' or dec_col != '0': + if ra_col == '0': + raise ValueError("ra_col missing for file %s"%file_name) + if dec_col == '0': + raise ValueError("dec_col missing for file %s"%file_name) + if z_col != '0' and not allow_xyz: + raise ValueError("z_col not allowed in conjunction with ra/dec cols") + else: + raise ValueError("No valid position columns specified for file %s"%file_name) + + if g1_col == '0' and isGColRequired(self.orig_config,num): + raise ValueError("g1_col is missing for file %s"%file_name) + if g2_col == '0' and isGColRequired(self.orig_config,num): + raise ValueError("g2_col is missing for file %s"%file_name) + if k_col == '0' and isKColRequired(self.orig_config,num): + raise ValueError("k_col is missing for file %s"%file_name) + + # Either both shoudl be 0 or both != 0. + if (g1_col == '0') != (g2_col == '0'): + raise ValueError("g1_col, g2_col=(%s, %s) are invalid for file %s"%( + g1_col,g2_col,file_name)) + + # This opens the file enough to read things inside. The full read doesn't happen here. + with reader: + + # get the vanilla "ext" parameter + ext = get_from_list(self.config, 'ext', num, str, reader.default_ext) + + # Technically, this doesn't catch all possible errors. If someone specifies + # an invalid flag_ext or something, then they'll get the fitsio error message. + # But this should probably catch the majorit of error cases. + reader.check_valid_ext(ext) + + if x_col != '0': + x_ext = get_from_list(self.config, 'x_ext', num, str, ext) + y_ext = get_from_list(self.config, 'y_ext', num, str, ext) + if x_col not in reader.names(x_ext): + raise ValueError("x_col=%s is invalid for file %s"%(x_col,file_name)) + if y_col not in reader.names(y_ext): + raise ValueError("y_col=%s is invalid for file %s"%(y_col, file_name)) + if z_col != '0': + z_ext = get_from_list(self.config, 'z_ext', num, str, ext) + if z_col not in reader.names(z_ext): + raise ValueError("z_col=%s is invalid for file %s"%(z_col, file_name)) + else: + ra_ext = get_from_list(self.config, 'ra_ext', num, str, ext) + dec_ext = get_from_list(self.config, 'dec_ext', num, str, ext) + if ra_col not in reader.names(ra_ext): + raise ValueError("ra_col=%s is invalid for file %s"%(ra_col, file_name)) + if dec_col not in reader.names(dec_ext): + raise ValueError("dec_col=%s is invalid for file %s"%(dec_col, file_name)) + if r_col != '0': + r_ext = get_from_list(self.config, 'r_ext', num, str, ext) + if r_col not in reader.names(r_ext): + raise ValueError("r_col=%s is invalid for file %s"%(r_col, file_name)) + + if w_col != '0': + w_ext = get_from_list(self.config, 'w_ext', num, str, ext) + if w_col not in reader.names(w_ext): + raise ValueError("w_col=%s is invalid for file %s"%(w_col, file_name)) + + if wpos_col != '0': + wpos_ext = get_from_list(self.config, 'wpos_ext', num, str, ext) + if wpos_col not in reader.names(wpos_ext): + raise ValueError("wpos_col=%s is invalid for file %s"%(wpos_col, file_name)) + + if flag_col != '0': + flag_ext = get_from_list(self.config, 'flag_ext', num, str, ext) + if flag_col not in reader.names(flag_ext): + raise ValueError("flag_col=%s is invalid for file %s"%(flag_col, file_name)) + + if patch_col != '0': + patch_ext = get_from_list(self.config, 'patch_ext', num, str, ext) + if patch_col not in reader.names(patch_ext): + raise ValueError("patch_col=%s is invalid for file %s"%(patch_col, file_name)) + + if is_rand: return + + if g1_col != '0': + g1_ext = get_from_list(self.config, 'g1_ext', num, str, ext) + g2_ext = get_from_list(self.config, 'g2_ext', num, str, ext) + if (g1_col not in reader.names(g1_ext) or + g2_col not in reader.names(g2_ext)): + if isGColRequired(self.orig_config,num): + raise ValueError( + "g1_col, g2_col=(%s, %s) are invalid for file %s"%( + g1_col, g2_col, file_name)) + else: + self.logger.warning( + "Warning: skipping g1_col, g2_col=(%s, %s) for %s, num=%d "%( + g1_col, g2_col, file_name, num) + + "because they are invalid, but unneeded.") + + if k_col != '0': + k_ext = get_from_list(self.config, 'k_ext', num, str, ext) + if k_col not in reader.names(k_ext): + if isKColRequired(self.orig_config,num): + raise ValueError("k_col=%s is invalid for file %s"%(k_col, file_name)) + else: + self.logger.warning( + "Warning: skipping k_col=%s for %s, num=%d "%( + k_col, file_name, num) + + "because it is invalid, but unneeded.") + + def _pandas_warning(self): + if Catalog._emitted_pandas_warning: + return + self.logger.warning( + "Unable to import pandas.. Using np.genfromtxt instead.\n"+ + "Installing pandas is recommended for increased speed when "+ + "reading ASCII catalogs.") + Catalog._emitted_pandas_warning = True + + def _read_file(self, file_name, reader, num, is_rand): + # Helper functions for things we might do in one of two places. + def set_pos(data, x_col, y_col, z_col, ra_col, dec_col, r_col): + if x_col != '0' and x_col in data: + self._x = data[x_col].astype(float) + self.logger.debug('read x') + self._y = data[y_col].astype(float) + self.logger.debug('read y') + if z_col != '0': + self._z = data[z_col].astype(float) + self.logger.debug('read z') + self._apply_xyz_units() + if ra_col != '0' and ra_col in data: + self._ra = data[ra_col].astype(float) + self.logger.debug('read ra') + self._dec = data[dec_col].astype(float) + self.logger.debug('read dec') + if r_col != '0': + self._r = data[r_col].astype(float) + self.logger.debug('read r') + self._apply_radec_units() + + def set_patch(data, patch_col): + if patch_col != '0' and patch_col in data: + self._patch = data[patch_col].astype(int) + self.logger.debug('read patch') + self._set_npatch() + + # Get the column names + x_col = get_from_list(self.config,'x_col',num,str,'0') + y_col = get_from_list(self.config,'y_col',num,str,'0') + z_col = get_from_list(self.config,'z_col',num,str,'0') + ra_col = get_from_list(self.config,'ra_col',num,str,'0') + dec_col = get_from_list(self.config,'dec_col',num,str,'0') + r_col = get_from_list(self.config,'r_col',num,str,'0') + w_col = get_from_list(self.config,'w_col',num,str,'0') + wpos_col = get_from_list(self.config,'wpos_col',num,str,'0') + flag_col = get_from_list(self.config,'flag_col',num,str,'0') + g1_col = get_from_list(self.config,'g1_col',num,str,'0') + g2_col = get_from_list(self.config,'g2_col',num,str,'0') + k_col = get_from_list(self.config,'k_col',num,str,'0') + patch_col = get_from_list(self.config,'patch_col',num,str,'0') + + with reader: + + ext = get_from_list(self.config, 'ext', num, str, reader.default_ext) + + # Figure out what slice to use. If all rows, then None is faster, + # otherwise give the range explicitly. + if self.start == 0 and self.end is None and self.every_nth == 1: + s = slice(None) + # fancy indexing in h5py is incredibly slow, so we explicitly + # check if we can slice or not. This checks for the fitsio version in + # the fits case + elif reader.can_slice: + s = slice(self.start, self.end, self.every_nth) + else: + # Note: this is a workaround for a bug in fitsio <= 1.0.6. + # cf. https://github.com/esheldon/fitsio/pull/286 + # We should be able to always use s = slice(self.start, self.end, self.every_nth) + if x_col != '0': + x_ext = get_from_list(self.config, 'x_ext', num, str, ext) + col = x_col + else: + x_ext = get_from_list(self.config, 'ra_ext', num, str, ext) + col = ra_col + end = self.end if self.end is not None else reader.row_count(col, x_ext) + s = np.arange(self.start, end, self.every_nth) + + all_cols = [x_col, y_col, z_col, + ra_col, dec_col, r_col, + patch_col, + w_col, wpos_col, flag_col, + g1_col, g2_col, k_col] + + # It's faster in FITS to read in all the columns in one read, rather than individually. + # Typically (very close to always!), all the columns are in the same extension. + # Thus, the following would normally work fine. + # use_cols = [c for c in all_cols if c != '0'] + # data = fits[ext][use_cols][:] + # However, we allow the option to have different columns read from different extensions. + # So this is slightly more complicated. + x_ext = get_from_list(self.config, 'x_ext', num, str, ext) + y_ext = get_from_list(self.config, 'y_ext', num, str, ext) + z_ext = get_from_list(self.config, 'z_ext', num, str, ext) + ra_ext = get_from_list(self.config, 'ra_ext', num, str, ext) + dec_ext = get_from_list(self.config, 'dec_ext', num, str, ext) + r_ext = get_from_list(self.config, 'r_ext', num, str, ext) + patch_ext = get_from_list(self.config, 'patch_ext', num, str, ext) + w_ext = get_from_list(self.config, 'w_ext', num, str, ext) + wpos_ext = get_from_list(self.config, 'wpos_ext', num, str, ext) + flag_ext = get_from_list(self.config, 'flag_ext', num, str, ext) + g1_ext = get_from_list(self.config, 'g1_ext', num, str, ext) + g2_ext = get_from_list(self.config, 'g2_ext', num, str, ext) + k_ext = get_from_list(self.config, 'k_ext', num, str, ext) + all_exts = [x_ext, y_ext, z_ext, + ra_ext, dec_ext, r_ext, + patch_ext, + w_ext, wpos_ext, flag_ext, + g1_ext, g2_ext, k_ext] + col_by_ext = dict(zip(all_cols,all_exts)) + all_exts = set(all_exts + [ext]) + all_cols = [c for c in all_cols if c != '0'] + + data = {} + # Also, if we are only reading in one patch, we should adjust s before doing this. + if self._single_patch is not None: + if patch_col != '0': + data[patch_col] = reader.read(patch_col, s, patch_ext) + all_cols.remove(patch_col) + set_patch(data, patch_col) + elif self._centers is not None: + pos_cols = [x_col, y_col, z_col, ra_col, dec_col, r_col] + pos_cols = [c for c in pos_cols if c != '0'] + for c in pos_cols: + all_cols.remove(c) + for ext in all_exts: + use_cols1 = [c for c in pos_cols if col_by_ext[c] == ext] + data1 = reader.read(use_cols1, s, ext) + for c in use_cols1: + data[c] = data1[c] + set_pos(data, x_col, y_col, z_col, ra_col, dec_col, r_col) + x_col = y_col = z_col = ra_col = dec_col = r_col = '0' + use = self._get_patch_index(self._single_patch) + self.select(use) + if isinstance(s,np.ndarray): + s = s[use] + elif s == slice(None): + s = use + else: + end1 = np.max(use)+s.start+1 + s = np.arange(s.start, end1, s.step)[use] + self._patch = None + data = {} # Start fresh, since the ones we used so far are done. + + # We might actually be done now, in which case, just return. + # (Else the fits read below won't actually work.) + if len(all_cols) == 0 or (isinstance(s,np.ndarray) and len(s) == 0): + return + + # Now read the rest using the updated s + for ext in all_exts: + use_cols1 = [c for c in all_cols + if col_by_ext[c] == ext and c in reader.names(ext)] + if len(use_cols1) == 0: + continue + data1 = reader.read(use_cols1, s, ext) + for c in use_cols1: + data[c] = data1[c] + + # Set position values + set_pos(data, x_col, y_col, z_col, ra_col, dec_col, r_col) + + # Set patch + set_patch(data, patch_col) + + # Set w + if w_col != '0': + self._w = data[w_col].astype(float) + self.logger.debug('read w') + + # Set wpos + if wpos_col != '0': + self._wpos = data[wpos_col].astype(float) + self.logger.debug('read wpos') + + # Set flag + if flag_col != '0': + self._flag = data[flag_col].astype(int) + self.logger.debug('read flag') + + # Skip g1,g2,k if this file is a random catalog + if not is_rand: + # Set g1,g2 + if g1_col in reader.names(g1_ext): + self._g1 = data[g1_col].astype(float) + self.logger.debug('read g1') + self._g2 = data[g2_col].astype(float) + self.logger.debug('read g2') + + # Set k + if k_col in reader.names(k_ext): + self._k = data[k_col].astype(float) + self.logger.debug('read k') + + @property + def nfields(self): + if not hasattr(self, '_nfields'): + # Make simple functions that call NField, etc. with self as the first argument. + # Note: LRU_Cache keys on the args, not kwargs, so everything but logger should + # be in args for this function. We convert them to kwargs for the NFields init call. + def get_nfield(min_size, max_size, split_method, brute, min_top, max_top, coords, + rng, logger=None): + return NField(self, min_size=min_size, max_size=max_size, + split_method=split_method, brute=brute, + min_top=min_top, max_top=max_top, coords=coords, + rng=rng, logger=logger) + # Now wrap these in LRU_Caches with (initially) just 1 element being cached. + self._nfields = LRU_Cache(get_nfield, 1) + return self._nfields + + @property + def kfields(self): + if not hasattr(self, '_kfields'): + def get_kfield(min_size, max_size, split_method, brute, min_top, max_top, coords, + rng, logger=None): + return KField(self, min_size=min_size, max_size=max_size, + split_method=split_method, brute=brute, + min_top=min_top, max_top=max_top, coords=coords, + rng=rng, logger=logger) + self._kfields = LRU_Cache(get_kfield, 1) + return self._kfields + + @property + def gfields(self): + if not hasattr(self, '_gfields'): + def get_gfield(min_size, max_size, split_method, brute, min_top, max_top, coords, + rng, logger=None): + return GField(self, min_size=min_size, max_size=max_size, + split_method=split_method, brute=brute, + min_top=min_top, max_top=max_top, coords=coords, + rng=rng, logger=logger) + self._gfields = LRU_Cache(get_gfield, 1) + return self._gfields + + @property + def nsimplefields(self): + if not hasattr(self, '_nsimplefields'): + def get_nsimplefield(logger=None): + return NSimpleField(self, logger=logger) + self._nsimplefields = LRU_Cache(get_nsimplefield, 1) + return self._nsimplefields + + @property + def ksimplefields(self): + if not hasattr(self, '_ksimplefields'): + def get_ksimplefield(logger=None): + return KSimpleField(self, logger=logger) + self._ksimplefields = LRU_Cache(get_ksimplefield, 1) + return self._ksimplefields + + @property + def gsimplefields(self): + if not hasattr(self, '_gsimplefields'): + def get_gsimplefield(logger=None): + return GSimpleField(self, logger=logger) + self._gsimplefields = LRU_Cache(get_gsimplefield, 1) + return self._gsimplefields + +
[docs] def resize_cache(self, maxsize): + """Resize all field caches. + + The various kinds of fields built from this catalog are cached. This may or may not + be an optimization for your use case. Normally only a single field is built for a + given catalog, and it is usually efficient to cache it, so it can be reused multiple + times. E.g. for the usual Landy-Szalay NN calculation: + + >>> dd.process(data_cat) + >>> rr.process(rand_cat) + >>> dr.process(data_cat, rand_cat) + + the third line will be able to reuse the same fields built for the data and randoms + in the first two lines. + + However, if you are making many different fields from the same catalog -- for instance + because you keep changing the min_sep and max_sep for different calls -- then saving + them all will tend to blow up the memory. + + Therefore, the default number of fields (of each type) to cache is 1. This lets the + first use case be efficient, but not use too much memory for the latter case. + + If you prefer a different behavior, this method lets you change the number of fields to + cache. The cache is an LRU (Least Recently Used) cache, which means only the n most + recently used fields are saved. I.e. when it is full, the least recently used field + is removed from the cache. + + If you call this with maxsize=0, then caching will be turned off. A new field will be + built each time you call a process function with this catalog. + + If you call this with maxsize>1, then mutiple fields will be saved according to whatever + number you set. This will use more memory, but may be an optimization for you depending + on what your are doing. + + Finally, if you want to set different sizes for the different kinds of fields, then + you can call resize separately for the different caches: + + >>> cat.nfields.resize(maxsize) + >>> cat.kfields.resize(maxsize) + >>> cat.gfields.resize(maxsize) + >>> cat.nsimplefields.resize(maxsize) + >>> cat.ksimplefields.resize(maxsize) + >>> cat.gsimplefields.resize(maxsize) + + Parameters: + maxsize (float): The new maximum number of fields of each type to cache. + """ + if hasattr(self, '_nfields'): self.nfields.resize(maxsize) + if hasattr(self, '_kfields'): self.kfields.resize(maxsize) + if hasattr(self, '_gfields'): self.gfields.resize(maxsize) + if hasattr(self, '_nsimplefields'): self.nsimplefields.resize(maxsize) + if hasattr(self, '_ksimplefields'): self.ksimplefields.resize(maxsize) + if hasattr(self, '_gsimplefields'): self.gsimplefields.resize(maxsize)
+ +
[docs] def clear_cache(self): + """Clear all field caches. + + The various kinds of fields built from this catalog are cached. This may or may not + be an optimization for your use case. Normally only a single field is built for a + given catalog, and it is usually efficient to cache it, so it can be reused multiple + times. E.g. for the usual Landy-Szalay NN calculation: + + >>> dd.process(data_cat) + >>> rr.process(rand_cat) + >>> dr.process(data_cat, rand_cat) + + the third line will be able to reuse the same fields built for the data and randoms + in the first two lines. + + However, this also means that the memory used for the field will persist as long as + the catalog object does. If you need to recover this memory and don't want to delete + the catalog yet, this method lets you clear the cache. + + There are separate caches for each kind of field. If you want to clear just one or + some of them, you can call clear separately for the different caches: + + >>> cat.nfields.clear() + >>> cat.kfields.clear() + >>> cat.gfields.clear() + >>> cat.nsimplefields.clear() + >>> cat.ksimplefields.clear() + >>> cat.gsimplefields.clear() + """ + if hasattr(self, '_nfields'): self.nfields.clear() + if hasattr(self, '_kfields'): self.kfields.clear() + if hasattr(self, '_gfields'): self.gfields.clear() + if hasattr(self, '_nsimplefields'): self.nsimplefields.clear() + if hasattr(self, '_ksimplefields'): self.ksimplefields.clear() + if hasattr(self, '_gsimplefields'): self.gsimplefields.clear() + self._field = lambda : None # Acts like a dead weakref
+ + @property + def field(self): + # The default is to return None here. + # This might also return None if weakref has expired. + # But if the weakref is alive, this returns the field we want. + return self._field() + +
[docs] @depr_pos_kwargs + def getNField(self, *, min_size=0, max_size=None, split_method=None, brute=False, + min_top=None, max_top=10, coords=None, logger=None): + """Return an `NField` based on the positions in this catalog. + + The `NField` object is cached, so this is efficient to call multiple times. + cf. `resize_cache` and `clear_cache` + + Parameters: + min_size (float): The minimum radius cell required (usually min_sep). (default: 0) + max_size (float): The maximum radius cell required (usually max_sep). (default: None) + split_method (str): Which split method to use ('mean', 'median', 'middle', or 'random') + (default: 'mean'; this value can also be given in the Catalog + constructor in the config dict.) + brute (bool): Whether to force traversal to the leaves. (default: False) + min_top (int): The minimum number of top layers to use when setting up the + field. (default: :math:`\\max(3, \\log_2(N_{\\rm cpu}))`) + max_top (int): The maximum number of top layers to use when setting up the + field. (default: 10) + coords (str): The kind of coordinate system to use. (default: self.coords) + logger: A Logger object if desired (default: self.logger) + + Returns: + An `NField` object + """ + if split_method is None: + split_method = get(self.config,'split_method',str,'mean') + if logger is None: + logger = self.logger + field = self.nfields(min_size, max_size, split_method, brute, min_top, max_top, coords, + rng=self._rng, logger=logger) + self._field = weakref.ref(field) + return field
+ + +
[docs] @depr_pos_kwargs + def getKField(self, *, min_size=0, max_size=None, split_method=None, brute=False, + min_top=None, max_top=10, coords=None, logger=None): + """Return a `KField` based on the k values in this catalog. + + The `KField` object is cached, so this is efficient to call multiple times. + cf. `resize_cache` and `clear_cache` + + Parameters: + min_size (float): The minimum radius cell required (usually min_sep). (default: 0) + max_size (float): The maximum radius cell required (usually max_sep). (default: None) + split_method (str): Which split method to use ('mean', 'median', 'middle', or 'random') + (default: 'mean'; this value can also be given in the Catalog + constructor in the config dict.) + brute (bool): Whether to force traversal to the leaves. (default: False) + min_top (int): The minimum number of top layers to use when setting up the + field. (default: :math:`\\max(3, \\log_2(N_{\\rm cpu}))`) + max_top (int): The maximum number of top layers to use when setting up the + field. (default: 10) + coords (str): The kind of coordinate system to use. (default self.coords) + logger: A Logger object if desired (default: self.logger) + + Returns: + A `KField` object + """ + if split_method is None: + split_method = get(self.config,'split_method',str,'mean') + if self.k is None: + raise TypeError("k is not defined.") + if logger is None: + logger = self.logger + field = self.kfields(min_size, max_size, split_method, brute, min_top, max_top, coords, + rng=self._rng, logger=logger) + self._field = weakref.ref(field) + return field
+ + +
[docs] @depr_pos_kwargs + def getGField(self, *, min_size=0, max_size=None, split_method=None, brute=False, + min_top=None, max_top=10, coords=None, logger=None): + """Return a `GField` based on the g1,g2 values in this catalog. + + The `GField` object is cached, so this is efficient to call multiple times. + cf. `resize_cache` and `clear_cache`. + + Parameters: + min_size (float): The minimum radius cell required (usually min_sep). (default: 0) + max_size (float): The maximum radius cell required (usually max_sep). (default: None) + split_method (str): Which split method to use ('mean', 'median', 'middle', or 'random') + (default: 'mean'; this value can also be given in the Catalog + constructor in the config dict.) + brute (bool): Whether to force traversal to the leaves. (default: False) + min_top (int): The minimum number of top layers to use when setting up the + field. (default: :math:`\\max(3, \\log_2(N_{\\rm cpu}))`) + max_top (int): The maximum number of top layers to use when setting up the + field. (default: 10) + coords (str): The kind of coordinate system to use. (default self.coords) + logger: A Logger object if desired (default: self.logger) + + Returns: + A `GField` object + """ + if split_method is None: + split_method = get(self.config,'split_method',str,'mean') + if self.g1 is None or self.g2 is None: + raise TypeError("g1,g2 are not defined.") + if logger is None: + logger = self.logger + field = self.gfields(min_size, max_size, split_method, brute, min_top, max_top, coords, + rng=self._rng, logger=logger) + self._field = weakref.ref(field) + return field
+ + +
[docs] @depr_pos_kwargs + def getNSimpleField(self, *, logger=None): + """Return an `NSimpleField` based on the positions in this catalog. + + The `NSimpleField` object is cached, so this is efficient to call multiple times. + cf. `resize_cache` and `clear_cache` + + Parameters: + logger: A Logger object if desired (default: self.logger) + + Returns: + An `NSimpleField` object + """ + if logger is None: + logger = self.logger + return self.nsimplefields(logger=logger)
+ + +
[docs] @depr_pos_kwargs + def getKSimpleField(self, *, logger=None): + """Return a `KSimpleField` based on the k values in this catalog. + + The `KSimpleField` object is cached, so this is efficient to call multiple times. + cf. `resize_cache` and `clear_cache` + + Parameters: + logger: A Logger object if desired (default: self.logger) + + Returns: + A `KSimpleField` object + """ + if self.k is None: + raise TypeError("k is not defined.") + if logger is None: + logger = self.logger + return self.ksimplefields(logger=logger)
+ + +
[docs] @depr_pos_kwargs + def getGSimpleField(self, *, logger=None): + """Return a `GSimpleField` based on the g1,g2 values in this catalog. + + The `GSimpleField` object is cached, so this is efficient to call multiple times. + cf. `resize_cache` and `clear_cache` + + Parameters: + logger: A Logger object if desired (default: self.logger) + + Returns: + A `GSimpleField` object + """ + if self.g1 is None or self.g2 is None: + raise TypeError("g1,g2 are not defined.") + if logger is None: + logger = self.logger + return self.gsimplefields(logger=logger)
+ + def _weighted_mean(self, x, idx=None): + # Find the weighted mean of some column. + # If weights are set, then return sum(w * x) / sum(w) + # Else, just sum(x) / N + if self.nontrivial_w: + if idx is None: + return np.sum(x * self.w) / self.sumw + else: + return np.sum(x[idx] * self.w[idx]) / np.sum(self.w[idx]) + else: + return np.mean(x[idx]) + +
[docs] def get_patch_centers(self): + """Return an array of patch centers corresponding to the patches in this catalog. + + If the patches were set either using K-Means or by giving the centers, then this + will just return that same center array. Otherwise, it will be calculated from the + positions of the objects with each patch number. + + This function is automatically called when accessing the property + ``patch_centers``. So you should not normally need to call it directly. + + Returns: + An array of center coordinates used to make the patches. + Shape is (npatch, 2) for flat geometries or (npatch, 3) for 3d or + spherical geometries. In the latter case, the centers represent + (x,y,z) coordinates on the unit sphere. + """ + # Early exit + if self._centers is not None: + return self._centers + + self.load() + if self._patch is None: + if self.coords == 'flat': + self._centers = np.array([[self._weighted_mean(self.x), + self._weighted_mean(self.y)]]) + else: + self._centers = np.array([[self._weighted_mean(self.x), + self._weighted_mean(self.y), + self._weighted_mean(self.z)]]) + else: + self._centers = np.empty((self.npatch,2 if self.z is None else 3)) + for p in range(self.npatch): + indx = np.where(self.patch == p)[0] + if len(indx) == 0: + raise RuntimeError("Cannot find center for patch %s."%p + + " No items with this patch number") + if self.coords == 'flat': + self._centers[p] = [self._weighted_mean(self.x,indx), + self._weighted_mean(self.y,indx)] + else: + self._centers[p] = [self._weighted_mean(self.x,indx), + self._weighted_mean(self.y,indx), + self._weighted_mean(self.z,indx)] + if self.coords == 'spherical': + self._centers /= np.sqrt(np.sum(self._centers**2,axis=1))[:,np.newaxis] + return self._centers
+ +
[docs] def write_patch_centers(self, file_name): + """Write the patch centers to a file. + + The output file will include the following columns: + + ======== ======================================================= + Column Description + ======== ======================================================= + patch patch number (0..npatch-1) + x mean x values + y mean y values + z mean z values (only for spherical or 3d coordinates) + ======== ======================================================= + + It will write a FITS file if the file name ends with '.fits', otherwise an ASCII file. + + Parameters: + file_name (str): The name of the file to write to. + """ + self.logger.info('Writing centers to %s',file_name) + + centers = self.patch_centers + col_names = ['patch', 'x', 'y'] + if self.coords != 'flat': + col_names.append('z') + columns = [np.arange(centers.shape[0])] + for i in range(centers.shape[1]): + columns.append(centers[:,i]) + + with make_writer(file_name, precision=16, logger=self.logger) as writer: + writer.write(col_names, columns)
+ +
[docs] def read_patch_centers(self, file_name): + """Read patch centers from a file. + + This function typically gets called automatically when setting patch_centers as a + string, being the file name. The patch centers are read from the file and returned. + + Parameters: + file_name (str): The name of the file to write to. + + Returns: + The centers, as an array, which can be used to determine the patches. + """ + self.logger.info('Reading centers from %s',file_name) + + with make_reader(file_name, logger=self.logger) as reader: + data = reader.read_data() + if 'z' in data.dtype.names: + return np.column_stack((data['x'],data['y'],data['z'])) + else: + return np.column_stack((data['x'],data['y']))
+ +
[docs] def load(self): + """Load the data from a file, if it isn't yet loaded. + + When a Catalog is read in from a file, it tries to delay the loading of the data from + disk until it is actually needed. This is especially important when running over a + set of patches, since you may not be able to fit all the patches in memory at once. + + One does not normally need to call this method explicitly. It will run automatically + whenever the data is needed. However, if you want to directly control when the disk + access happens, you can use this function. + """ + if not self.loaded: + self.logger.info("Reading input file %s",self.name) + self._read_file(self.file_name, self.reader, self._num, self._is_rand) + self._finish_input()
+ +
[docs] def unload(self): + """Bring the Catalog back to an "unloaded" state, if possible. + + When a Catalog is read in from a file, it tries to delay the loading of the data from + disk until it is actually needed. After loading, this method will return the Catalog + back to the unloaded state to recover the memory in the data arrays. If the Catalog is + needed again during further processing, it will re-load the data from disk at that time. + + This will also call `clear_cache` to recover any memory from fields that have been + constructed as well. + + If the Catalog was not read in from a file, then this function will only do the + `clear_cache` step. + """ + if self.file_type is not None: + self._x = None + self._y = None + self._z = None + self._ra = None + self._dec = None + self._r = None + self._w = None + self._wpos = None + self._g1 = None + self._g2 = None + self._k = None + self._patch = None + if self._patches is not None: + for p in self._patches: + p.unload() + self.clear_cache()
+ +
[docs] def get_patch_file_names(self, save_patch_dir): + """Get the names of the files to use for reading/writing patches in save_patch_dir + """ + if self.file_name is not None: + base, ext = os.path.splitext(os.path.basename(self.file_name)) + # Default to FITS file if we do not otherwise recognize the file type. + # It's not critical to match this, just a convenience for if the user + # wants to pre-create their own patch files. + if ext.lower() not in [".fits", ".fit", ".hdf5", ".hdf"]: + ext = ".fits" + names = [base + '_%03d%s'%(i, ext) for i in range(self.npatch)] + else: + names = ['patch%03d.fits'%i for i in range(self.npatch)] + return [os.path.join(save_patch_dir, n) for n in names]
+ +
[docs] def write_patches(self, save_patch_dir=None): + """Write the patches to disk as separate files. + + This can be used in conjunction with ``low_mem=True`` option of `get_patches` (and + implicitly by the various `process <GGCorrelation.process>` methods) to only keep + at most two patches in memory at a time. + + Parameters: + save_patch_dir (str): The directory to write the patches to. [default: None, in which + case self.save_patch_dir will be used. If that is None, a + ValueError will be raised.] + """ + if save_patch_dir is None: + save_patch_dir = self.save_patch_dir + if save_patch_dir is None: + raise ValueError("save_patch_dir is required here, since not given in constructor.") + + file_names = self.get_patch_file_names(save_patch_dir) + for i, p, file_name in zip(range(self.npatch), self.patches, file_names): + self.logger.info('Writing patch %d to %s',i,file_name) + if p.ra is not None: + # Don't multiply and divide by the units on round trip. + p.ra_units = p.dec_units = 1 + p.write(file_name)
+ +
[docs] def read_patches(self, save_patch_dir=None): + """Read the patches from files on disk. + + This function assumes that the patches were written using `write_patches`. + In particular, the file names are not arbitrary, but must match what TreeCorr uses + in that method. + + .. note:: + + The patches that are read in will be in an "unloaded" state. They will load + as needed when some functionality requires it. So this is compatible with using + the ``low_mem`` option in various places. + + Parameters: + save_patch_dir (str): The directory to read from. [default: None, in which + case self.save_patch_dir will be used. If that is None, a + ValueError will be raised.] + """ + if save_patch_dir is None: + save_patch_dir = self.save_patch_dir + if save_patch_dir is None: + raise ValueError("save_patch_dir is required here, since not given in constructor.") + + # Need to be careful here to not trigger a load by accident. + # This would be easier if we just checked e.g. if self.ra is not None, etc. + # But that would trigger an unnecessary load if we aren't loaded yet. + # So do all this with the underscore attributes. + kwargs = {} + if self._ra is not None or self.config.get('ra_col','0') != '0': + kwargs['ra_col'] = 'ra' + kwargs['dec_col'] = 'dec' + kwargs['ra_units'] = 'rad' + kwargs['dec_units'] = 'rad' + if self._r is not None or self.config.get('r_col','0') != '0': + kwargs['r_col'] = 'r' + else: + kwargs['x_col'] = 'x' + kwargs['y_col'] = 'y' + if self._z is not None or self.config.get('z_col','0') != '0': + kwargs['z_col'] = 'z' + if (self._w is not None and self._nontrivial_w) or self.config.get('w_col','0') != '0': + kwargs['w_col'] = 'w' + if self._wpos is not None or self.config.get('wpos_col','0') != '0': + kwargs['wpos_col'] = 'wpos' + if self._g1 is not None or self.config.get('g1_col','0') != '0': + kwargs['g1_col'] = 'g1' + if self._g2 is not None or self.config.get('g2_col','0') != '0': + kwargs['g2_col'] = 'g2' + if self._k is not None or self.config.get('k_col','0') != '0': + kwargs['k_col'] = 'k' + + file_names = self.get_patch_file_names(save_patch_dir) + self._patches = [] + # Check that the files exist, although we won't actually load them yet. + for i, file_name in zip(range(self.npatch), file_names): + if not os.path.isfile(file_name): + raise OSError("Patch file %s not found"%file_name) + self._patches = [Catalog(file_name=name, patch=i, **kwargs) + for i, name in enumerate(file_names)] + self.logger.info('Patches created from files %s .. %s',file_names[0],file_names[-1])
+ +
[docs] @depr_pos_kwargs + def get_patches(self, *, low_mem=False): + """Return a list of Catalog instances each representing a single patch from this Catalog + + After calling this function once, the patches may be repeatedly accessed by the + ``patches`` attribute, without triggering a rebuild of the patches. Furthermore, + if ``patches`` is accessed before calling this function, it will be called automatically + (with the default low_mem parameter). + + Parameters: + low_mem (bool): Whether to try to leave the returned patch catalogs in an + "unloaded" state, wherein they will not load the data from a + file until they are used. This only works if the current catalog + was loaded from a file or the patches were saved (using + ``save_patch_dir``). (default: False) + """ + # Early exit + if self._patches is not None: + return self._patches + if self.npatch == 1 or self._single_patch is not None: + self._patches = [self] + return self._patches + + # See if we have patches already written to disk. If so, use them. + if self.save_patch_dir is not None: + try: + self.read_patches() + except OSError: + # No problem. We'll make them and write them out below. + pass + else: + return self._patches + + if low_mem and self.file_name is not None: + # This is a litle tricky, since we don't want to trigger a load if the catalog + # isn't loaded yet. So try to get the patches from centers or single_patch first. + if self._centers is not None: + patch_set = range(len(self._centers)) + else: + # This triggers a load of the current catalog, but no choice here. + patch_set = sorted(set(self.patch)) + centers = self._centers if self._patch is None else None + self._patches = [Catalog(config=self.config, file_name=self.file_name, + patch=i, npatch=self.npatch, patch_centers=centers) + for i in patch_set] + else: + patch_set = sorted(set(self.patch)) + if len(patch_set) != self.npatch: + self.logger.error("WARNING: Some patch numbers do not contain any objects!") + missing = set(range(self.npatch)) - set(patch_set) + self.logger.warning("The following patch numbers have no objects: %s",missing) + self.logger.warning("This may be a problem depending on your use case.") + self._patches = [] + for i in patch_set: + indx = np.where(self.patch == i)[0] + x=self.x[indx] if self.x is not None else None + y=self.y[indx] if self.y is not None else None + z=self.z[indx] if self.z is not None else None + ra=self.ra[indx] if self.ra is not None else None + dec=self.dec[indx] if self.dec is not None else None + r=self.r[indx] if self.r is not None else None + w=self.w[indx] if self.nontrivial_w else None + wpos=self.wpos[indx] if self.wpos is not None else None + g1=self.g1[indx] if self.g1 is not None else None + g2=self.g2[indx] if self.g2 is not None else None + k=self.k[indx] if self.k is not None else None + check_wpos = self._wpos if self._wpos is not None else self._w + kwargs = dict(keep_zero_weight=np.any(check_wpos==0)) + if self.ra is not None: + kwargs['ra_units'] = 'rad' + kwargs['dec_units'] = 'rad' + kwargs['allow_xyz'] = True + p = Catalog(x=x, y=y, z=z, ra=ra, dec=dec, r=r, w=w, wpos=wpos, + g1=g1, g2=g2, k=k, patch=i, npatch=self.npatch, **kwargs) + self._patches.append(p) + + # Write the patches to files if requested. + if self.save_patch_dir is not None: + self.write_patches() + if low_mem: + # If low_mem, replace _patches with a version the reads from these files. + # This will typically be a lot faster for when the load does happen. + self.read_patches() + + return self._patches
+ +
[docs] @depr_pos_kwargs + def write(self, file_name, *, file_type=None, cat_precision=None): + """Write the catalog to a file. + + The position columns are output using the same units as were used when building the + Catalog. If you want to use a different unit, you can set the catalog's units directly + before writing. e.g.: + + >>> cat = treecorr.Catalog('cat.dat', ra=ra, dec=dec, + ra_units='hours', dec_units='degrees') + >>> cat.ra_units = coord.degrees + >>> cat.write('new_cat.dat') + + The output file will include some of the following columns (those for which the + corresponding attribute is not None): + + ======== ======================================================= + Column Description + ======== ======================================================= + ra self.ra if not None + dec self.dec if not None + r self.r if not None + x self.x if not None + y self.y if not None + z self.z if not None + w self.w if not None and self.nontrivial_w + wpos self.wpos if not None + g1 self.g1 if not None + g2 self.g2 if not None + k self.k if not None + patch self.patch if not None + meanR The mean value <R> of pairs that fell into each bin. + meanlogR The mean value <logR> of pairs that fell into each bin. + ======== ======================================================= + + Parameters: + file_name (str): The name of the file to write to. + file_type (str): The type of file to write ('ASCII' or 'FITS'). (default: + determine the type automatically from the extension of file_name.) + cat_precision (int): For ASCII output catalogs, the desired precision. (default: 16; + this value can also be given in the Catalog constructor in the + config dict.) + Returns: + The column names that were written to the file as a list. + """ + self.logger.info('Writing catalog to %s',file_name) + + col_names = [] + columns = [] + if self.ra is not None: + col_names.append('ra') + columns.append(self.ra / self.ra_units) + col_names.append('dec') + columns.append(self.dec / self.dec_units) + if self.r is not None: + col_names.append('r') + columns.append(self.r) + else: + col_names.append('x') + columns.append(self.x / self.x_units) + col_names.append('y') + columns.append(self.y / self.y_units) + if self.z is not None: + col_names.append('z') + columns.append(self.z) + if self.nontrivial_w: + col_names.append('w') + columns.append(self.w) + if self.wpos is not None: + col_names.append('wpos') + columns.append(self.wpos) + if self.g1 is not None: + col_names.append('g1') + columns.append(self.g1) + if self.g2 is not None: + col_names.append('g2') + columns.append(self.g2) + if self.k is not None: + col_names.append('k') + columns.append(self.k) + if self._patch is not None: + col_names.append('patch') + columns.append(self.patch) + + if cat_precision is None: + cat_precision = get(self.config,'cat_precision',int,16) + + writer = make_writer(file_name, precision=cat_precision, file_type=file_type, + logger=self.logger) + with writer: + writer.write(col_names, columns) + return col_names
+ +
[docs] def copy(self): + """Make a copy""" + return copy.deepcopy(self)
+ + def __getstate__(self): + d = self.__dict__.copy() + d.pop('logger',None) # Oh well. This is just lost in the copy. Can't be pickled. + d.pop('_field',None) + d.pop('_nfields',None) + d.pop('_kfields',None) + d.pop('_gfields',None) + d.pop('_nsimplefields',None) + d.pop('_ksimplefields',None) + d.pop('_gsimplefields',None) + return d + + def __setstate__(self, d): + self.__dict__ = d + self.logger = setup_logger(get(self.config,'verbose',int,1), + self.config.get('log_file',None)) + self._field = lambda : None + + def __repr__(self): + s = 'treecorr.Catalog(' + if self.loaded: + if self.x is not None and self.ra is None: s += 'x='+repr(self.x)+',' + if self.y is not None and self.ra is None: s += 'y='+repr(self.y)+',' + if self.z is not None and self.ra is None: s += 'z='+repr(self.z)+',' + if self.ra is not None: s += 'ra='+repr(self.ra)+",ra_units='rad'," + if self.dec is not None: s += 'dec='+repr(self.dec)+",dec_units='rad'," + if self.r is not None: s += 'r='+repr(self.r)+',' + if self.nontrivial_w: s += 'w='+repr(self.w)+',' + if self.wpos is not None: s += 'wpos='+repr(self.wpos)+',' + if self.g1 is not None: s += 'g1='+repr(self.g1)+',' + if self.g2 is not None: s += 'g2='+repr(self.g2)+',' + if self.k is not None: s += 'k='+repr(self.k)+',' + if self.patch is not None: s += 'patch='+repr(self.patch)+',' + wpos = self._wpos if self._wpos is not None else self._w + if np.any(wpos == 0): s += 'keep_zero_weight=True,' + # remove the last ',' + s = s[:-1] + ')' + else: + # Catalog isn't loaded yet. Use file_name info here instead. + s += 'file_name='+repr(self.file_name)+',' + s += 'config ='+repr(self.config) + s += ')' + return s + + def __eq__(self, other): + return (isinstance(other, Catalog) and + np.array_equal(self.x, other.x) and + np.array_equal(self.y, other.y) and + np.array_equal(self.z, other.z) and + np.array_equal(self.ra, other.ra) and + np.array_equal(self.dec, other.dec) and + np.array_equal(self.r, other.r) and + np.array_equal(self.w, other.w) and + np.array_equal(self.wpos, other.wpos) and + np.array_equal(self.g1, other.g1) and + np.array_equal(self.g2, other.g2) and + np.array_equal(self.k, other.k) and + np.array_equal(self.patch, other.patch))
+ + +
[docs]@depr_pos_kwargs +def read_catalogs(config, key=None, list_key=None, *, num=0, logger=None, is_rand=None): + """Read in a list of catalogs for the given key. + + key should be the file_name parameter or similar key word. + list_key should be be corresponging file_list parameter, if appropriate. + At least one of key or list_key must be provided. If both are provided, then only + one of these should be in the config dict. + + num indicates which key to use if any of the fields like x_col, flip_g1, etc. are lists. + The default is 0, which means to use the first item in the list if they are lists. + + If the config dict specifies that patches be used, the returned list of Catalogs will be + a concatenation of the patches for each of the specified names. + + Parameters: + config (dict): The configuration dict to use for the appropriate parameters + key (str): Which key name to use for the file names. e.g. 'file_name' (default: None) + list_key (str): Which key name to use for the name of a list file. e.g. 'file_list'. + Either key or list_key is required. (default: None) + num (int): Which number catalog does this correspond to. e.g. file_name should use + num=0, file_name2 should use num=1. (default: 0) + logger: If desired, a Logger object for logging. (default: None, in which case + one will be built according to the config dict's verbose level.) + is_rand (bool): If this is a random file, then setting is_rand to True will let them + skip k_col, g1_col, and g2_col if they were set for the main catalog. + (default: False) + + Returns: + A list of Catalogs or None if no catalogs are specified. + """ + if logger is None: + logger = setup_logger(get(config,'verbose',int,1), config.get('log_file',None)) + + if key is None and list_key is None: + raise TypeError("Must provide either key or list_key") + if key is not None and key in config: + if list_key is not None and list_key in config: + raise TypeError("Cannot provide both key and list_key") + file_names = config[key] + elif list_key is not None and list_key in config: + list_file = config[list_key] + with open(list_file,'r') as fin: + file_names = [ f.strip() for f in fin ] + else: + # If this key was required (i.e. file_name) then let the caller check this. + return [] + if is_rand is None: + if key is not None: + is_rand = 'rand' in key + else: + is_rand = 'rand' in list_key + if not isinstance(file_names,list): + file_names = file_names.split() + ret = [] + for file_name in file_names: + ret += Catalog(file_name, config, num=num, logger=logger, is_rand=is_rand).get_patches() + return ret
+ + +
[docs]@depr_pos_kwargs +def calculateVarG(cat_list, *, low_mem=False): + """Calculate the overall shear variance from a list of catalogs. + + The catalogs are assumed to be equivalent, so this is just the average shear + variance (per component) weighted by the number of objects in each catalog. + + Parameters: + cat_list: A Catalog or a list of Catalogs for which to calculate the shear variance. + low_mem: Whether to try to conserve memory when cat is a list by unloading each + catalog after getting its individual varg. [default: False] + + Returns: + The shear variance per component, aka shape noise. + """ + if isinstance(cat_list, Catalog): + return cat_list.varg + elif len(cat_list) == 1: + return cat_list[0].varg + else: + varg = 0 + sumw = 0 + for cat in cat_list: + varg += cat.varg * cat.sumw + sumw += cat.sumw + if low_mem: + cat.unload() + return varg / sumw
+ +
[docs]@depr_pos_kwargs +def calculateVarK(cat_list, *, low_mem=False): + """Calculate the overall kappa variance from a list of catalogs. + + The catalogs are assumed to be equivalent, so this is just the average kappa + variance weighted by the number of objects in each catalog. + + Parameters: + cat_list: A Catalog or a list of Catalogs for which to calculate the kappa variance. + low_mem: Whether to try to conserve memory when cat is a list by unloading each + catalog after getting its individual vark. [default: False] + + Returns: + The kappa variance + """ + if isinstance(cat_list, Catalog): + return cat_list.vark + elif len(cat_list) == 1: + return cat_list[0].vark + else: + # Unlike for g, we allow k to have a non-zero mean around which we take the + # variance. When building up from multiple catalogs, we need to calculate the + # overall mean and get the variance around that. So this is a little more complicated. + # In practice, it probably doesn't matter at all for real data sets, but some of the + # unit tests have small enough N that this matters. + vark = 0 + meank = 0 + meank2 = 0 + sumw = 0 + sumw2 = 0 + for cat in cat_list: + vark += cat.vark * cat.sumw + cat._meank * cat.sumw2 * (2*cat._meank2 - cat._meank) + meank += cat._meank * cat.sumw + meank2 += cat._meank2 * cat.sumw2 + sumw += cat.sumw + sumw2 += cat.sumw2 + if low_mem: + cat.unload() + meank /= sumw + meank2 /= sumw2 + vark = (vark - meank * sumw2 * (2*meank2 - meank)) / sumw + return vark
+ +
[docs]def isGColRequired(config, num): + """A quick helper function that checks whether we need to bother reading the g1,g2 columns. + + It checks the config dict for the output file names gg_file_name, ng_file_name (only if + num == 1), etc. If the output files indicate that we don't need the g1/g2 columns, then + we don't need to raise an error if the g1_col or g2_col is invalid. + + This makes it easier to specify columns. e.g. for an NG correlation function, the + first catalog does not need to have the g1,g2 columns, and typically wouldn't. So + if you specify g1_col=5, g2_col=6, say, and the first catalog does not have these columns, + you would normally get an error. + + But instead, we check that the calculation is going to be NG from the presence of an + ng_file_name parameter, and we let the would-be error pass. + + Parameters: + config (dict): The configuration file to check. + num (int): Which number catalog are we working on. + + Returns: + True if some output file requires this catalog to have valid g1/g2 columns, + False if not. + + """ + return config and ( 'gg_file_name' in config + or 'm2_file_name' in config + or (num==1 and 'norm_file_name' in config) + or (num==1 and 'ng_file_name' in config) + or (num==1 and 'nm_file_name' in config) + or (num==1 and 'kg_file_name' in config) )
+ + +
[docs]def isKColRequired(config, num): + """A quick helper function that checks whether we need to bother reading the k column. + + The logic here is the same as for `isGColRequired`, but we check for output files that require + the k column rather than g1,g2. + + Parameters: + config (dict): The configuration file to check. + num (int): Which number catalog are we working on. + + Returns: + True if some output file requires this catalog to have valid g1/g2 columns, + False if not. + + """ + return config and ( 'kk_file_name' in config + or (num==0 and 'kg_file_name' in config) + or (num==1 and 'nk_file_name' in config) )
+
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/treecorr/config.html b/docs/_build/html/_modules/treecorr/config.html new file mode 100644 index 00000000..9a1176c0 --- /dev/null +++ b/docs/_build/html/_modules/treecorr/config.html @@ -0,0 +1,528 @@ + + + + + + treecorr.config — TreeCorr 4.3.0 documentation + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for treecorr.config

+# Copyright (c) 2003-2019 by Mike Jarvis
+#
+# TreeCorr is free software: redistribution and use in source and binary forms,
+# with or without modification, are permitted provided that the following
+# conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice, this
+#    list of conditions, and the disclaimer given in the accompanying LICENSE
+#    file.
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+#    this list of conditions, and the disclaimer given in the documentation
+#    and/or other materials provided with the distribution.
+
+"""
+.. module:: config
+"""
+
+import sys
+import coord
+import numpy as np
+import warnings
+import logging
+import os
+
+
+
[docs]def parse_variable(config, v): + """Parse a configuration variable from a string that should look like 'key = value' + and write that value to config[key]. + + :param config: The configuration dict to wich to write the key,value pair + :param v: A string of the form 'key = value' + """ + if '=' not in v: + raise ValueError('Improper variable specificationi: %s. Use syntax: key = value.'%v) + key, value = v.split('=',1) + key = key.strip() + # Cut off any trailing comment + if '#' in value: + value = value.split('#')[0] + value = value.strip() + if value[0] in ['{','[','(']: + if value[-1] not in ['}',']',')']: + raise ValueError('List symbol %s not properly matched'%value[0]) + values = value[1:-1].split(',') + values = [ vv.strip() for vv in values ] + else: + values = value.split() # on whitespace + if len(values) == 1: + config[key] = values[0] + else: + config[key] = values
+ + +
[docs]def parse_bool(value): + """Parse a value as a boolean. + + Valid string values for True are: 'true', 'yes', 't', 'y' + Valid string values for False are: 'false', 'no', 'f', 'n', 'none' + Capitalization is ignored. + + If value is a number, it is converted to a bool in the usual way. + + :param value: The value to parse. + + :returns: The value converted to a bool. + """ + if isinstance(value,str): + if value.strip().upper() in [ 'TRUE', 'YES', 'T', 'Y' ]: + return True + elif value.strip().upper() in [ 'FALSE', 'NO', 'F', 'N', 'NONE' ]: + return False + else: + try: + bool(int(value)) + except Exception: + raise ValueError("Unable to parse %s as a bool."%value) + else: + return int(value) + elif isinstance(value,(bool, np.bool_)): + return value + elif isinstance(value,int): + # Note: integers aren't converted to bool, since brute distinguishes 1 vs 2 vs True. + return value + else: + raise ValueError("Unable to parse %s as a bool."%value)
+ +
[docs]def parse_unit(value): + """Parse the input value as a string that should be one of the valid angle units in + coord.AngleUnit.valid_names. + + The value is allowed to merely start with one of the unit names. So 'deg', 'degree', + 'degrees' all convert to 'deg' which is the name in coord.AngleUnit.valid_names. + The return value in this case would be coord.AngleUnit.from_name('deg').value, + which has the value pi/180. + + :param value: The unit as a string value to parse. + + :returns: The given unit in radians. + """ + for unit in coord.AngleUnit.valid_names: + if value.startswith(unit): + return coord.AngleUnit.from_name(value).value + raise ValueError("Unable to parse %s as an angle unit"%value)
+ + +
[docs]def read_config(file_name, file_type='auto'): + """Read a configuration dict from a file. + + :param file_name: The file name from which the configuration dict should be read. + :param file_type: The type of config file. Options are 'auto', 'yaml', 'json', 'params'. + (default: 'auto', which tries to determine the type from the extension) + + :returns: A config dict built from the configuration file. + """ + if file_type == 'auto': + if file_name.endswith('.yaml'): + file_type = 'yaml' + elif file_name.endswith('.json'): + file_type = 'json' + elif file_name.endswith('.params'): + file_type = 'params' + else: + raise ValueError("Unable to determine the type of config file from the extension") + if file_type == 'yaml': + return _read_yaml_file(file_name) + elif file_type == 'json': + return _read_json_file(file_name) + elif file_type == 'params': + return _read_params_file(file_name) + else: + raise ValueError("Invalid file_type %s"%file_type)
+ +def _read_yaml_file(file_name): + import yaml + with open(file_name) as fin: + config = yaml.safe_load(fin.read()) + return config + +def _read_json_file(file_name): + import json + with open(file_name) as fin: + config = json.load(fin) + return config + +def _read_params_file(file_name): + config = dict() + with open(file_name) as fin: + for v in fin: + v = v.strip() + if len(v) == 0 or v[0] == '#': + pass + elif v[0] == '+': + include_file_name = v[1:] + config1 = read_config(include_file_name) + config.update(config1) + else: + parse_variable(config,v) + return config + + +
[docs]def setup_logger(verbose, log_file=None): + """Parse the integer verbosity level from the command line args into a logging_level string + + :param verbose: An integer indicating what verbosity level to use. + :param log_file: If given, a file name to which to write the logging output. + If omitted or None, then output to stdout. + + :returns: The logging.Logger object to use. + """ + logging_levels = { 0: logging.CRITICAL, + 1: logging.WARNING, + 2: logging.INFO, + 3: logging.DEBUG } + logging_level = logging_levels[int(verbose)] + + # Setup logging to go to sys.stdout or (if requested) to an output file + if log_file is None: + name = 'treecorr' + else: + name = 'treecorr_' + log_file + logger = logging.getLogger(name) + + if len(logger.handlers) == 0: # only add handler once! + if log_file is None: + handle = logging.StreamHandler(stream=sys.stdout) + else: + handle = logging.FileHandler(log_file) + formatter = logging.Formatter('%(message)s') # Simple text output + handle.setFormatter(formatter) + logger.addHandler(handle) + logger.setLevel(logging_level) + return logger
+ + +
[docs]def parse(value, value_type, name): + """Parse the input value as the given type. + + :param value: The value to parse. + :param value_type: The type expected for this. + :param name: The name of this value. Only used for error reporting. + + :returns: value + """ + try: + if value_type is bool: + return parse_bool(value) + elif value is None: + return None + else: + return value_type(value) + except ValueError: + raise ValueError("Could not parse {}={} as type {}".format(name, value, value_type))
+ + +
[docs]def check_config(config, params, aliases=None, logger=None): + """Check (and update) a config dict to conform to the given parameter rules. + The params dict has an entry for each valid config parameter whose value is a tuple + with the following items: + + - type + - can be a list? + - default value + - valid values + - description (Multiple entries here are allowed for longer strings) + + The file corr2.py has a list of parameters for the corr2 program. + + :param config: The config dict to check. + :param params: A dict of valid parameters with information about each one. + :param aliases: A dict of deprecated parameters that are still aliases for new names. + (default: None) + :param logger: If desired, a logger object for logging any warnings here. (default: None) + + :returns: The updated config dict. + """ + config = config.copy() + for key in list(config.keys()): + # Check if this is a deprecated alias + if aliases and key in aliases: + if logger: + logger.warning("The parameter %s is deprecated. You should use %s instead."%( + key, aliases[key])) + else: + warnings.warn("The parameter %s is deprecated. You should use %s instead."%( + key, aliases[key]), FutureWarning) + new_key = aliases[key] + config[new_key] = config[key] + del config[key] + key = new_key + + # Check that this is a valid key + if key not in params: + raise TypeError("Invalid parameter %s."%key) + + value_type, may_be_list, default_value, valid_values = params[key][:4] + + # Get the value + if may_be_list and isinstance(config[key], list): + value = [parse(v, value_type, key) for v in config[key] ] + else: + value = parse(config[key], value_type, key) + if value is None: + continue + + # If limited allowed value, check that this is one of them. + if valid_values is not None and value is not None: + if value_type is str: + matches = [ v for v in valid_values if value == v ] + if len(matches) == 0: + # Allow the string to be longer. + # e.g. degrees is valid if 'deg' is in valid_values. + matches = [v for v in valid_values if isinstance(v,str) and value.startswith(v)] + if len(matches) != 1: + raise ValueError("Parameter %s has invalid value %s. Valid values are %s."%( + key, config[key], str(valid_values))) + value = matches[0] + else: + if value not in valid_values: + raise ValueError("Parameter %s has invalid value %s. Valid values are %s."%( + key, config[key], str(valid_values))) + + # Write it back to the dict with the right type + config[key] = value + + # Write the defaults for other parameters to simplify the syntax of getting the values + for key in params: + if key in config: + continue + value_type, may_be_list, default_value, valid_values = params[key][:4] + if default_value is not None: + config[key] = default_value + + return config
+ + + + + +
[docs]def convert(value, value_type, key): + """Convert the given value to the given type. + + The ``key`` helps determine what kind of conversion should be performed. + Specifically if 'unit' is in the ``key`` value, then a unit conversion is done. + Otherwise, it just parses the ``value`` according to the ``value_type``. + + :param value: The input value to be converted. Usually a string. + :param value_type: The type to convert to. + :param key: The key for this value. Only used to see if it includes 'unit'. + + :returns: The converted value. + """ + if value is None: + return None + elif 'unit' in key: + return parse_unit(value) + elif value_type == bool: + return parse_bool(value) + else: + return value_type(value)
+ +
[docs]def get_from_list(config, key, num, value_type=str, default=None): + """A helper function to get a key from config that is allowed to be a list + + Some of the config values are allowed to be lists of values, in which case we take the + ``num`` item from the list. If they are not a list, then the given value is used for + all values of ``num``. + + :param config: The configuration dict from which to get the key value. + :param key: What key to get from config. + :param num: Which number element to use if the item is a list. + :param value_type: What type should the value be converted to. (default: str) + :param default: What value should be used if the key is not in the config dict, + or the value corresponding to the key is None. + (default: None) + + :returns: The specified value, converted as needed. + """ + values = config.get(key, None) + if isinstance(values, list): + try: + value = values[num] + except IndexError: + raise IndexError("num=%d is out of range of list for %s"%(num,key)) + + if value is not None: + return convert(value, value_type, key) + elif default is not None: + return convert(default, value_type, key) + elif values is not None: + return convert(values, value_type, key) + elif default is not None: + return convert(default, value_type, key)
+ + +
[docs]def get(config, key, value_type=str, default=None): + """A helper function to get a key from config converting to a particular type + + :param config: The configuration dict from which to get the key value. + :param key: Which key to get from config. + :param value_type: Which type should the value be converted to. (default: str) + :param default: What value should be used if the key is not in the config dict, + or the value corresponding to the key is None. + (default: None) + + :returns: The specified value, converted as needed. + """ + value = config.get(key, default) + if value is not None: + return convert(value, value_type, key) + elif default is not None: + return convert(default, value_type, key)
+ +
[docs]def merge_config(config, kwargs, valid_params, aliases=None): + """Merge in the values from kwargs into config. + + If either of these is None, then the other one is returned. + If they are both dicts, then the values in kwargs take precedence over ones in config + if there are any keys that are in both. Also, the kwargs dict will be modified in this case. + + :param config: The root config (will not be modified) + :param kwargs: A second dict with more or updated values + :param valid_params: A dict of valid parameters that are allowed for this usage. + The config dict is allowed to have extra items, but kwargs is not. + :param aliases: An optional dict of aliases. (default: None) + + :returns: The merged dict, including only items that are in valid_params. + """ + if kwargs is None: + kwargs = {} + if config: + for key, value in config.items(): + if key in valid_params and key not in kwargs: + kwargs[key] = value + return check_config(kwargs, valid_params, aliases)
+
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/treecorr/corr2.html b/docs/_build/html/_modules/treecorr/corr2.html new file mode 100644 index 00000000..b6461ea8 --- /dev/null +++ b/docs/_build/html/_modules/treecorr/corr2.html @@ -0,0 +1,402 @@ + + + + + + treecorr.corr2 — TreeCorr 4.3.0 documentation + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for treecorr.corr2

+# Copyright (c) 2003-2019 by Mike Jarvis
+#
+# TreeCorr is free software: redistribution and use in source and binary forms,
+# with or without modification, are permitted provided that the following
+# conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice, this
+#    list of conditions, and the disclaimer given in the accompanying LICENSE
+#    file.
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+#    this list of conditions, and the disclaimer given in the documentation
+#    and/or other materials provided with the distribution.
+
+"""
+.. module:: corr2
+"""
+
+from .catalog import Catalog, read_catalogs
+from .binnedcorr2 import BinnedCorr2
+from .config import setup_logger, check_config, print_params, get
+from .util import set_omp_threads
+from .nncorrelation import NNCorrelation
+from .ngcorrelation import NGCorrelation
+from .nkcorrelation import NKCorrelation
+from .kkcorrelation import KKCorrelation
+from .kgcorrelation import KGCorrelation
+from .ggcorrelation import GGCorrelation
+
+# Dict describing the valid parameters, what types they are, and a description:
+# Each value is a tuple with the following elements:
+#    type
+#    may_be_list
+#    default value
+#    list of valid values
+#    description
+corr2_valid_params = {
+
+    # Parameters about the input catalogs
+
+    'file_name' : (str, True, None, None,
+            'The file(s) with the galaxy data.'),
+    'file_name2' : (str, True, None, None,
+            'The file(s) to use for the second field for a cross-correlation.'),
+    'rand_file_name' : (str, True, None, None,
+            'For NN correlations, a list of random files.'),
+    'rand_file_name2' : (str, True, None, None,
+            'The randoms for the second field for a cross-correlation.'),
+    'file_list' : (str, False, None, None,
+            'A text file with file names in lieu of file_name.'),
+    'file_list2' : (str, False, None, None,
+            'A text file with file names in lieu of file_name2.'),
+    'rand_file_list' : (str, False, None, None,
+            'A text file with file names in lieu of rand_file_name.'),
+    'rand_file_list2' : (str, False, None, None,
+            'A text file with file names in lieu of rand_file_name2.'),
+
+    # Parameters about the output file(s)
+
+    'nn_file_name' : (str, False, None, None,
+            'The output filename for point-point correlation function.'),
+    'nn_statistic' : (str, False, 'compensated', ['compensated','simple'],
+            'Which statistic to use for omega as the estimator fo the NN correlation function. '),
+    'ng_file_name' : (str, False, None, None,
+            'The output filename for point-shear correlation function.'),
+    'ng_statistic' : (str, False, None, ['compensated', 'simple'],
+            'Which statistic to use for the mean shear estimator of the NG correlation function. ',
+            'The default is compensated if rand_files is given, otherwise simple'),
+    'gg_file_name' : (str, False, None, None,
+            'The output filename for shear-shear correlation function.'),
+    'nk_file_name' : (str, False, None, None,
+            'The output filename for point-kappa correlation function.'),
+    'nk_statistic' : (str, False, None, ['compensated', 'simple'],
+            'Which statistic to use for the mean kappa estimator of the NK correlation function. ',
+            'The default is compensated if rand_files is given, otherwise simple'),
+    'kk_file_name' : (str, False, None, None,
+            'The output filename for kappa-kappa correlation function.'),
+    'kg_file_name' : (str, False, None, None,
+            'The output filename for kappa-shear correlation function.'),
+
+    # Derived output quantities
+
+    'm2_file_name' : (str, False, None, None,
+            'The output filename for the aperture mass statistics.'),
+    'nm_file_name' : (str, False, None, None,
+            'The output filename for <N Map> and related values.'),
+    'norm_file_name' : (str, False, None, None,
+            'The output filename for <N Map>^2/<N^2><Map^2> and related values.'),
+}
+
+# Add in the valid parameters for the relevant classes
+for c in [ Catalog, BinnedCorr2 ]:
+    corr2_valid_params.update(c._valid_params)
+
+corr2_aliases = {
+}
+
+
[docs]def corr2(config, logger=None): + """Run the full two-point correlation function code based on the parameters in the + given config dict. + + The function `print_corr2_params` will output information about the valid parameters + that are expected to be in the config dict. + + Optionally a logger parameter maybe given, in which case it is used for logging. + If not given, the logging will be based on the verbose and log_file parameters. + + :param config: The configuration dict which defines what to do. + :param logger: If desired, a logger object for logging. (default: None, in which case + one will be built according to the config dict's verbose level.) + """ + # Setup logger based on config verbose value + if logger is None: + logger = setup_logger(config.get('verbose',1), config.get('log_file',None)) + + # Check that config doesn't have any extra parameters. + # (Such values are probably typos.) + # Also convert the given parameters to the correct type, etc. + config = check_config(config, corr2_valid_params, corr2_aliases, logger) + + import pprint + logger.debug('Using configuration dict:\n%s',pprint.pformat(config)) + + if ('output_dots' not in config + and config.get('log_file',None) is None + and config['verbose'] >= 2): + config['output_dots'] = True + + # Set the number of threads + num_threads = config.get('num_threads',None) + logger.debug('From config dict, num_threads = %s',num_threads) + set_omp_threads(num_threads, logger) + + # Read in the input files. Each of these is a list. + cat1 = read_catalogs(config, 'file_name', 'file_list', num=0, logger=logger) + cat2 = read_catalogs(config, 'file_name2', 'file_list2', num=1, logger=logger) + rand1 = read_catalogs(config, 'rand_file_name', 'rand_file_list', num=0, logger=logger) + rand2 = read_catalogs(config, 'rand_file_name2', 'rand_file_list2', num=1, logger=logger) + if len(cat1) == 0: + raise TypeError("Either file_name or file_list is required") + if len(cat2) == 0: cat2 = None + if len(rand1) == 0: rand1 = None + if len(rand2) == 0: rand2 = None + if cat2 is None and rand2 is not None: + raise TypeError("rand_file_name2 is invalid without file_name2") + logger.info("Done creating input catalogs") + + # Do GG correlation function if necessary + if 'gg_file_name' in config or 'm2_file_name' in config: + logger.warning("Performing GG calculations...") + gg = GGCorrelation(config, logger=logger) + gg.process(cat1,cat2) + logger.info("Done GG calculations.") + if 'gg_file_name' in config: + gg.write(config['gg_file_name']) + logger.warning("Wrote GG correlation to %s",config['gg_file_name']) + if 'm2_file_name' in config: + gg.writeMapSq(config['m2_file_name'], m2_uform=config['m2_uform']) + logger.warning("Wrote Mapsq values to %s",config['m2_file_name']) + + # Do NG correlation function if necessary + if 'ng_file_name' in config or 'nm_file_name' in config or 'norm_file_name' in config: + if cat2 is None: + raise TypeError("file_name2 is required for ng correlation") + logger.warning("Performing NG calculations...") + ng = NGCorrelation(config, logger=logger) + ng.process(cat1,cat2) + logger.info("Done NG calculation.") + + # The default ng_statistic is compensated _iff_ rand files are given. + rg = None + if rand1 is None: + if config.get('ng_statistic',None) == 'compensated': + raise TypeError("rand_files is required for ng_statistic = compensated") + elif config.get('ng_statistic','compensated') == 'compensated': + rg = NGCorrelation(config, logger=logger) + rg.process(rand1,cat2) + logger.info("Done RG calculation.") + + if 'ng_file_name' in config: + ng.write(config['ng_file_name'], rg=rg) + logger.warning("Wrote NG correlation to %s",config['ng_file_name']) + if 'nm_file_name' in config: + ng.writeNMap(config['nm_file_name'], rg=rg, m2_uform=config['m2_uform'], + precision=config.get('precision',None)) + logger.warning("Wrote NMap values to %s",config['nm_file_name']) + + if 'norm_file_name' in config: + gg = GGCorrelation(config, logger=logger) + gg.process(cat2) + logger.info("Done GG calculation for norm") + dd = NNCorrelation(config, logger=logger) + dd.process(cat1) + logger.info("Done DD calculation for norm") + rr = NNCorrelation(config, logger=logger) + rr.process(rand1) + logger.info("Done RR calculation for norm") + if config['nn_statistic'] == 'compensated': + dr = NNCorrelation(config, logger=logger) + dr.process(cat1,rand1) + logger.info("Done DR calculation for norm") + else: + dr = None + ng.writeNorm(config['norm_file_name'],gg=gg,dd=dd,rr=rr,dr=dr,rg=rg, + m2_uform=config['m2_uform'], precision=config.get('precision',None)) + logger.warning("Wrote Norm values to %s",config['norm_file_name']) + + # Do NN correlation function if necessary + if 'nn_file_name' in config: + logger.warning("Performing DD calculations...") + dd = NNCorrelation(config, logger=logger) + dd.process(cat1,cat2) + logger.info("Done DD calculations.") + + dr = None + rd = None + if rand1 is None: + logger.warning("No random catalogs given. Only doing npairs calculation.") + rr = None + elif cat2 is None: + logger.warning("Performing RR calculations...") + rr = NNCorrelation(config, logger=logger) + rr.process(rand1) + logger.info("Done RR calculations.") + + if config['nn_statistic'] == 'compensated': + logger.warning("Performing DR calculations...") + dr = NNCorrelation(config, logger=logger) + dr.process(cat1,rand1) + logger.info("Done DR calculations.") + else: + if rand2 is None: + raise TypeError("rand_file_name2 is required when file_name2 is given") + logger.warning("Performing RR calculations...") + rr = NNCorrelation(config, logger=logger) + rr.process(rand1,rand2) + logger.info("Done RR calculations.") + + if config['nn_statistic'] == 'compensated': + logger.warning("Performing DR calculations...") + dr = NNCorrelation(config, logger=logger) + dr.process(cat1,rand2) + logger.info("Done DR calculations.") + rd = NNCorrelation(config, logger=logger) + rd.process(rand1,cat2) + logger.info("Done RD calculations.") + dd.write(config['nn_file_name'], rr=rr, dr=dr, rd=rd) + logger.warning("Wrote NN correlation to %s",config['nn_file_name']) + + # Do KK correlation function if necessary + if 'kk_file_name' in config: + logger.warning("Performing KK calculations...") + kk = KKCorrelation(config, logger=logger) + kk.process(cat1,cat2) + logger.info("Done KK calculations.") + kk.write(config['kk_file_name']) + logger.warning("Wrote KK correlation to %s",config['kk_file_name']) + + # Do NG correlation function if necessary + if 'nk_file_name' in config: + if cat2 is None: + raise TypeError("file_name2 is required for nk correlation") + logger.warning("Performing NK calculations...") + nk = NKCorrelation(config, logger=logger) + nk.process(cat1,cat2) + logger.info("Done NK calculation.") + + rk = None + if rand1 is None: + if config.get('nk_statistic',None) == 'compensated': + raise TypeError("rand_files is required for nk_statistic = compensated") + elif config.get('nk_statistic','compensated') == 'compensated': + rk = NKCorrelation(config, logger=logger) + rk.process(rand1,cat2) + logger.info("Done RK calculation.") + + nk.write(config['nk_file_name'], rk=rk) + logger.warning("Wrote NK correlation to %s",config['nk_file_name']) + + # Do KG correlation function if necessary + if 'kg_file_name' in config: + if cat2 is None: + raise TypeError("file_name2 is required for kg correlation") + logger.warning("Performing KG calculations...") + kg = KGCorrelation(config, logger=logger) + kg.process(cat1,cat2) + logger.info("Done KG calculation.") + kg.write(config['kg_file_name']) + logger.warning("Wrote KG correlation to %s",config['kg_file_name'])
+ + + +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/treecorr/corr3.html b/docs/_build/html/_modules/treecorr/corr3.html new file mode 100644 index 00000000..0e8975d8 --- /dev/null +++ b/docs/_build/html/_modules/treecorr/corr3.html @@ -0,0 +1,281 @@ + + + + + + treecorr.corr3 — TreeCorr 4.3.0 documentation + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for treecorr.corr3

+# Copyright (c) 2003-2019 by Mike Jarvis
+#
+# TreeCorr is free software: redistribution and use in source and binary forms,
+# with or without modification, are permitted provided that the following
+# conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice, this
+#    list of conditions, and the disclaimer given in the accompanying LICENSE
+#    file.
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+#    this list of conditions, and the disclaimer given in the documentation
+#    and/or other materials provided with the distribution.
+
+"""
+.. module:: corr3
+"""
+
+from .catalog import Catalog, read_catalogs
+from .binnedcorr3 import BinnedCorr3
+from .config import setup_logger, check_config, print_params
+from .util import set_omp_threads
+from .nnncorrelation import NNNCorrelation
+from .kkkcorrelation import KKKCorrelation
+from .gggcorrelation import GGGCorrelation
+
+
+# Dict describing the valid parameters, what types they are, and a description:
+# Each value is a tuple with the following elements:
+#    type
+#    may_be_list
+#    default value
+#    list of valid values
+#    description
+corr3_valid_params = {
+
+    # Parameters about the input catlogs
+
+    'file_name' : (str, True, None, None,
+            'The file(s) with the galaxy data.'),
+    'rand_file_name' : (str, True, None, None,
+            'For NNN correlations, a list of random files.'),
+    'file_list' : (str, False, None, None,
+            'A text file with file names in lieu of file_name.'),
+    'rand_file_list' : (str, False, None, None,
+            'A text file with file names in lieu of rand_file_name.'),
+
+    # Parameters about the output file(s)
+
+    'nnn_file_name' : (str, False, None, None,
+            'The output filename for point-point correlation function.'),
+    'nnn_statistic' : (str, False, 'compensated', ['compensated','simple'],
+            'Which statistic to use for omega as the estimator fo the NN correlation function. '),
+    'kkk_file_name' : (str, False, None, None,
+            'The output filename for kappa-kappa-kappa correlation function.'),
+    'ggg_file_name' : (str, False, None, None,
+            'The output filename for gamma-gamma-gamma correlation function.'),
+
+    # Derived output quantities
+
+    'm3_file_name' : (str, False, None, None,
+            'The output filename for the aperture mass skewness.'),
+}
+
+# Add in the valid parameters for the relevant classes
+for c in [ Catalog, BinnedCorr3 ]:
+    corr3_valid_params.update(c._valid_params)
+
+
+corr3_aliases = {
+}
+
+
[docs]def corr3(config, logger=None): + """Run the full three-point correlation function code based on the parameters in the + given config dict. + + The function `print_corr3_params` will output information about the valid parameters + that are expected to be in the config dict. + + Optionally a logger parameter maybe given, in which case it is used for logging. + If not given, the logging will be based on the verbose and log_file parameters. + + :param config: The configuration dict which defines what to do. + :param logger: If desired, a logger object for logging. (default: None, in which case + one will be built according to the config dict's verbose level.) + """ + # Setup logger based on config verbose value + if logger is None: + logger = setup_logger(config.get('verbose',1), config.get('log_file',None)) + + # Check that config doesn't have any extra parameters. + # (Such values are probably typos.) + # Also convert the given parameters to the correct type, etc. + config = check_config(config, corr3_valid_params, corr3_aliases, logger) + + import pprint + logger.debug('Using configuration dict:\n%s',pprint.pformat(config)) + + if ('output_dots' not in config + and config.get('log_file',None) is None + and config['verbose'] >= 2): + config['output_dots'] = True + + # Set the number of threads + num_threads = config.get('num_threads',None) + logger.debug('From config dict, num_threads = %s',num_threads) + set_omp_threads(num_threads, logger) + + # Read in the input files. Each of these is a list. + cat1 = read_catalogs(config, 'file_name', 'file_list', num=0, logger=logger) + # TODO: when giving file_name2, file_name3, should now do the real CrossCorrelation process. + rand1 = read_catalogs(config, 'rand_file_name', 'rand_file_list', num=0, logger=logger) + if len(cat1) == 0: + raise TypeError("Either file_name or file_list is required") + if len(rand1) == 0: rand1 = None + logger.info("Done creating input catalogs") + + # Do GGG correlation function if necessary + if 'ggg_file_name' in config or 'm3_file_name' in config: + logger.warning("Performing GGG calculations...") + ggg = GGGCorrelation(config, logger=logger) + ggg.process(cat1) + logger.info("Done GGG calculations.") + if 'ggg_file_name' in config: + ggg.write(config['ggg_file_name']) + logger.warning("Wrote GGG correlation to %s",config['ggg_file_name']) + if 'm3_file_name' in config: + ggg.writeMap3(config['m3_file_name']) + logger.warning("Wrote Map3 values to %s",config['m3_file_name']) + + # Do NNN correlation function if necessary + if 'nnn_file_name' in config: + logger.warning("Performing DDD calculations...") + ddd = NNNCorrelation(config, logger=logger) + ddd.process(cat1) + logger.info("Done DDD calculations.") + + drr = None + rdd = None + if rand1 is None: + logger.warning("No random catalogs given. Only doing ntri calculation.") + rrr = None + else: + logger.warning("Performing RRR calculations...") + rrr = NNNCorrelation(config, logger=logger) + rrr.process(rand1) + logger.info("Done RRR calculations.") + + if rrr is not None and config['nnn_statistic'] == 'compensated': + logger.warning("Performing DRR calculations...") + drr = NNNCorrelation(config, logger=logger) + drr.process(cat1,rand1) + logger.info("Done DRR calculations.") + logger.warning("Performing DDR calculations...") + rdd = NNNCorrelation(config, logger=logger) + rdd.process(rand1,cat1) + logger.info("Done DDR calculations.") + ddd.write(config['nnn_file_name'], rrr=rrr, drr=drr, rdd=rdd) + logger.warning("Wrote NNN correlation to %s",config['nnn_file_name']) + + # Do KKK correlation function if necessary + if 'kkk_file_name' in config: + logger.warning("Performing KKK calculations...") + kkk = KKKCorrelation(config, logger=logger) + kkk.process(cat1) + logger.info("Done KKK calculations.") + kkk.write(config['kkk_file_name']) + logger.warning("Wrote KKK correlation to %s",config['kkk_file_name'])
+ + + +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/treecorr/field.html b/docs/_build/html/_modules/treecorr/field.html new file mode 100644 index 00000000..1ee6bb83 --- /dev/null +++ b/docs/_build/html/_modules/treecorr/field.html @@ -0,0 +1,974 @@ + + + + + + treecorr.field — TreeCorr 4.3.0 documentation + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for treecorr.field

+# Copyright (c) 2003-2019 by Mike Jarvis
+#
+# TreeCorr is free software: redistribution and use in source and binary forms,
+# with or without modification, are permitted provided that the following
+# conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice, this
+#    list of conditions, and the disclaimer given in the accompanying LICENSE
+#    file.
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+#    this list of conditions, and the disclaimer given in the documentation
+#    and/or other materials provided with the distribution.
+
+"""
+.. module:: field
+"""
+
+import numpy as np
+import weakref
+
+from . import _lib, _ffi
+from .util import get_omp_threads, parse_xyzsep, coord_enum
+from .util import long_ptr as lp
+from .util import double_ptr as dp
+from .util import depr_pos_kwargs
+
+def _parse_split_method(split_method):
+    if split_method == 'middle': return 0
+    elif split_method == 'median': return 1
+    elif split_method == 'mean': return 2
+    else: return 3  # random
+
+
+
[docs]class Field(object): + r"""A Field in TreeCorr is the object that stores the tree structure we use for efficient + calculation of the correlation functions. + + The root "cell" in the tree has information about the whole field, including the total + number of points, the total weight, the mean position, the size (by which we mean the + maximum distance of any point from the mean position), and possibly more information depending + on which kind of field we have. + + It also points to two sub-cells which each describe about half the points. These are commonly + referred to as "daughter cells". They in turn point to two more cells each, and so on until + we get to cells that are considered "small enough" according to the ``min_size`` parameter given + in the constructor. These lowest level cells are referred to as "leaves". + + Technically, a Field doesn't have just one of these trees. To make parallel computation + more efficient, we actually skip the first few layers of the tree as described above and + store a list of root cells. The three parameters that determine how many of these there + will be are ``max_size``, ``min_top``, and ``max_top``: + + - ``max_size`` sets the maximum size cell that we want to make sure we have in the trees, + so the root cells will be at least this large. The default is None, which means + we care about all sizes, so there may be only one root cell (but typically more + because of ``min_top``). + - ``min_top`` sets the minimum number of initial levels to skip. The default is either 3 + or :math:`\log_2(N_{cpu})`, whichever is larger. This means there will be at least 8 + (or :math:`N_{cpu}`) root cells (assuming ntot is at least this large of course). + - ``max_top`` sets the maximum number of initial levels to skip. The default is 10, + which means there could be up to 1024 root cells. + + Finally, the ``split_method`` parameter sets how the points in a cell should be divided + when forming the two daughter cells. The split is always done according to whichever + dimension has the largest extent. E.g. if max(\|x - meanx\|) is larger than max(\|y - meany\|) + and (for 3d) max(\|z - meanz\|), then it will split according to the x values. But then + it may split in different ways according to ``split_method``. The allowed values are: + + - 'mean' means to divide the points at the average (mean) value of x, y or z. + - 'median' means to divide the points at the median value of x, y, or z. + - 'middle' means to divide the points at midpoint between the minimum and maximum values. + - 'random' means to divide the points randomly somewhere between the 40th and 60th + percentile locations in the sorted list. + + Field itself is an abstract base class for the specific types of field classes. + As such, it cannot be constructed directly. You should make one of the concrete subclasses: + + - `NField` describes a field of objects to be counted only. + - `KField` describes a field of points sampling a scalar field (e.g. kappa in the + weak lensing context). In addition to the above values, cells keep track of + the mean kappa value in the given region. + - `GField` describes a field of points sampling a spinor field (e.g. gamma in the + weak lensing context). In addition to the above values, cells keep track of + the mean (complex) gamma value in the given region. + """ + def __init__(self): + raise NotImplementedError("Field is an abstract base class. It cannot be instantiated.") + + def _determine_top(self, min_top, max_top): + if min_top is None: + n_cpu = get_omp_threads() + # int.bit_length is a trick to avoid going through float. + # bit_length(n-1) == ceil(log2(n)), which is what we want. + min_top = max(3, int.bit_length(n_cpu-1)) + else: + min_top = int(min_top) + max_top = int(max_top) + min_top = min(min_top, max_top) # If min_top > max_top favor max_top. + return min_top, max_top + + @property + def nTopLevelNodes(self): + """The number of top-level nodes. + """ + return _lib.FieldGetNTopLevel(self.data, self._d, self._coords) + + @property + def cat(self): + """The catalog from which this field was constructed. + + It is stored as a weakref, so if the Catalog has already been garbage collected, this + might be None. + """ + # _cat is a weakref. This gets back to a Catalog object. + return self._cat() + +
[docs] def count_near(self, *args, **kwargs): + """Count how many points are near a given coordinate. + + Use the existing tree structure to count how many points are within some given separation + of a target coordinate. + + There are several options for how to specify the reference coordinate, which depends + on the type of coordinate system this field implements. + + 1. For flat 2-dimensional coordinates: + + Parameters: + x (float): The x coordinate of the target location + y (float): The y coordinate of the target location + sep (float): The separation distance + + 2. For 3-dimensional Cartesian coordinates: + + Parameters: + x (float): The x coordinate of the target location + y (float): The y coordinate of the target location + z (float): The z coordinate of the target location + sep (float): The separation distance + + 3. For spherical coordinates: + + Parameters: + ra (float or Angle): The right ascension of the target location + dec (float or Angle): The declination of the target location + c (CelestialCorod): A ``coord.CelestialCoord`` object in lieu of (ra, dec) + sep (float or Angle): The separation distance + ra_units (str): The units of ra if given as a float + dec_units (str): The units of dec if given as a float + sep_units (str): The units of sep if given as a float + + 4. For spherical coordinates with distances: + + Parameters: + ra (float or Angle): The right ascension of the target location + dec (float or Angle): The declination of the target location + c (CelestialCorod): A ``coord.CelestialCoord`` object in lieu of (ra, dec) + r (float): The distance to the target location + sep (float): The separation distance + ra_units (str): The units of ra if given as a float + dec_units (str): The units of dec if given as a float + + In all cases, for parameters that are angles (ra, dec, sep for 'spherical'), you may either + provide this quantity as a ``coord.Angle`` instance, or you may provide ra_units, dec_units + or sep_units respectively to specify which angular units are providing. + + Finally, in cases where ra, dec are allowed, you may instead provide a + ``coord.CelestialCoord`` instance as the first argument to specify both RA and Dec. + """ + if self.min_size == 0: + # If min_size = 0, then regular method is already exact. + x,y,z,sep = parse_xyzsep(args, kwargs, self._coords) + return self._count_near(x, y, z, sep) + else: + # Otherwise, we need to expand the radius a bit and then check the actual radii + # using the catalog values. This is already done in get_near, so just do that + # and take the len of the result. + return len(self.get_near(*args, **kwargs))
+ + def _count_near(self, x, y, z, sep): + # If self.min_size > 0, these results may be approximate, since the tree will have + # grouped points within this separation together. + return _lib.FieldCountNear(self.data, x, y, z, sep, self._d, self._coords) + +
[docs] def get_near(self, *args, **kwargs): + """Get the indices of points near a given coordinate. + + Use the existing tree structure to find the points that are within some given separation + of a target coordinate. + + There are several options for how to specify the reference coordinate, which depends + on the type of coordinate system this field implements. + + 1. For flat 2-dimensional coordinates: + + Parameters: + x (float): The x coordinate of the target location + y (float): The y coordinate of the target location + sep (float): The separation distance + + 2. For 3-dimensional Cartesian coordinates: + + Parameters: + x (float): The x coordinate of the target location + y (float): The y coordinate of the target location + z (float): The z coordinate of the target location + sep (float): The separation distance + + 3. For spherical coordinates: + + Parameters: + ra (float or Angle): The right ascension of the target location + dec (float or Angle): The declination of the target location + c (CelestialCorod): A ``coord.CelestialCoord`` object in lieu of (ra, dec) + sep (float or Angle): The separation distance + ra_units (str): The units of ra if given as a float + dec_units (str): The units of dec if given as a float + sep_units (str): The units of sep if given as a float + + 4. For spherical coordinates with distances: + + Parameters: + ra (float or Angle): The right ascension of the target location + dec (float or Angle): The declination of the target location + c (CelestialCorod): A ``coord.CelestialCoord`` object in lieu of (ra, dec) + r (float): The distance to the target location + sep (float): The separation distance + ra_units (str): The units of ra if given as a float + dec_units (str): The units of dec if given as a float + + In all cases, for parameters that are angles (ra, dec, sep for 'spherical'), you may either + provide this quantity as a ``coord.Angle`` instance, or you may provide ra_units, dec_units + or sep_units respectively to specify which angular units are providing. + + Finally, in cases where ra, dec are allowed, you may instead provide a + ``coord.CelestialCoord`` instance as the first argument to specify both RA and Dec. + """ + x,y,z,sep = parse_xyzsep(args, kwargs, self._coords) + if self.min_size == 0: + # If min_size == 0, then regular method is already exact. + ind = self._get_near(x, y, z, sep) + else: + # Expand the radius by the minimum size of the cells. + sep1 = sep + self.min_size + # Get those indices + ind = self._get_near(x, y, z, sep1) + # Now check the actual radii of these points using the catalog x,y,z values. + rsq = (self.cat.x[ind]-x)**2 + (self.cat.y[ind]-y)**2 + if self._coords != _lib.Flat: + rsq += (self.cat.z[ind]-z)**2 + # Select the ones with r < sep + near = rsq < sep**2 + ind = ind[near] + # It comes back unsorted, so sort it. (Not really required, but nicer output.) + return np.sort(ind)
+ + def _get_near(self, x, y, z, sep): + # If self.min_size > 0, these results may be approximate, since the tree will have + # grouped points within this separation together. + # First count how many there are, so we can allocate the array for the indices. + n = self._count_near(x, y, z, sep) + ind = np.empty(n, dtype=int) + # Now fill the array with the indices of the nearby points. + _lib.FieldGetNear(self.data, x, y, z, sep, self._d, self._coords, lp(ind), n) + return ind + +
[docs] @depr_pos_kwargs + def run_kmeans(self, npatch, *, max_iter=200, tol=1.e-5, init='tree', alt=False, rng=None): + r"""Use k-means algorithm to set patch labels for a field. + + The k-means algorithm (cf. https://en.wikipedia.org/wiki/K-means_clustering) identifies + a center position for each patch. Each point is then assigned to the patch whose center + is closest. The centers are then updated to be the mean position of all the points + assigned to the patch. This process is repeated until the center locations have converged. + + The process tends to converge relatively quickly. The convergence criterion we use + is a tolerance on the rms shift in the centroid positions as a fraction of the overall + size of the whole field. This is settable as ``tol`` (default 1.e-5). You can also + set the maximum number of iterations to allow as ``max_iter`` (default 200). + + The upshot of the k-means process is to minimize the total within-cluster sum of squares + (WCSS), also known as the "inertia" of each patch. This tends to produce patches with + more or less similar inertia, which make them useful for jackknife or other sampling + estimates of the errors in the correlation functions. + + More specifically, if the points :math:`j` have vector positions :math:`\vec x_j`, + and we define patches :math:`S_i` to comprise disjoint subsets of the :math:`j` + values, then the inertia :math:`I_i` of each patch is defined as: + + .. math:: + + I_i = \sum_{j \in S_i} \left| \vec x_j - \vec \mu_i \right|^2, + + where :math:`\vec \mu_i` is the center of each patch: + + .. math:: + + \vec \mu_i \equiv \frac{\sum_{j \in S_i} \vec x_j}{N_i}, + + and :math:`N_i` is the number of points assigned to patch :math:`S_i`. + The k-means algorithm finds a solution that is a local minimum in the total inertia, + :math:`\sum_i I_i`. + + In addition to the normal k-means algorithm, we also offer an alternate algorithm, which + can produce slightly better patches for the purpose of patch-based covariance estimation. + The ideal patch definition for such use would be to minimize the standard deviation (std) + of the inertia of each patch, not the total (or mean) inertia. It turns out that it is + difficult to devise an algorithm that literally does this, since it has a tendancy to + become unstable and not converge. + + However, adding a penalty term to the patch assignment step of the normal k-means + algorithm turns out to work reasonably well. The penalty term we use is :math:`f I_i`, + where :math:`f` is a scaling constant (see below). When doing the assignment step we assign + each point :math:`j` to the patch :math:`i` that gives the minimum penalized distance + + .. math:: + + d_{ij}^{\prime\;\! 2} = \left| \vec x_j - \mu_i \right|^2 + f I_i. + + The penalty term means that patches with less inertia get more points on the next + iteration, and vice versa, which tends to equalize the inertia values somewhat. + The resulting patches have significantly lower std inertia, but typically only slightly + higher total inertia. + + For the scaling constant, :math:`f`, we chose + + .. math:: + + f = \frac{3}{\langle N_i\rangle}, + + three times the inverse of the mean number of points in each patch. + + The :math:`1/\langle N_i\rangle` factor makes the two terms of comparable magnitude + near the edges of the patches, so patches still get most of the points near their previous + centers, even if they already have larger than average inertia, but some of the points in + the outskirts of the patch might switch to a nearby patch with smaller inertia. The + factor of 3 is purely empirical, and was found to give good results in terms of std + inertia on some test data (the DES SV field). + + The alternate algorithm is available by specifying ``alt=True``. Despite it typically + giving better patch centers than the standard algorithm, we don't make it the default, + because it may be possible for the iteration to become unstable, leading to some patches + with no points in them. (This happened in our tests when the arbitrary factor in the + scaling constant was 5 instead of 3, but I could not prove that 3 would always avoid this + failure mode.) If this happens for you, your best bet is probably to switch to the + standard algorithm, which can never suffer from this problem. + + Parameters: + npatch (int): How many patches to generate + max_iter (int): How many iterations at most to run. (default: 200) + tol (float): Tolerance in the rms centroid shift to consider as converged + as a fraction of the total field size. (default: 1.e-5) + init (str): Initialization method. Options are: + + - 'tree' (default) = Use the normal tree structure of the + field, traversing down to a level where there are npatch + cells, and use the centroids of these cells as the initial + centers. This is almost always the best choice. + - 'random' = Use npatch random points as the intial centers. + - 'kmeans++' = Use the k-means++ algorithm. + cf. https://en.wikipedia.org/wiki/K-means%2B%2B + + alt (bool): Use the alternate assignment algorithm to minimize the standard + deviation of the inertia rather than the total inertia (aka WCSS). + (default: False) + rng (RandomState): If desired, a numpy.random.RandomState instance to use for random + number generation. (default: None) + + Returns: + Tuple containing + + - patches (array): An array of patch labels, all integers from 0..npatch-1. + Size is self.ntot. + - centers (array): An array of center coordinates used to make the patches. + Shape is (npatch, 2) for flat geometries or (npatch, 3) for 3d or + spherical geometries. In the latter case, the centers represent + (x,y,z) coordinates on the unit sphere. + """ + centers = self.kmeans_initialize_centers(npatch, init=init, rng=rng) + self.kmeans_refine_centers(centers, max_iter=max_iter, tol=tol, alt=alt) + patches = self.kmeans_assign_patches(centers) + return patches, centers
+ +
[docs] @depr_pos_kwargs + def kmeans_initialize_centers(self, npatch, init='tree', *, rng=None): + """Use the field's tree structure to assign good initial centers for a K-Means run. + + The classic K-Means algorithm involves starting with random points as the initial + centers of the patches. This has a tendency to result in rather poor results in + terms of having similar sized patches at the end. Specifically, the standard deviation + of the inertia at the local minimum that the K-Means algorithm settles into tends to be + fairly high for typical geometries. + + A better approach is to use the existing tree structure to start out with centers that + are fairly evenly spread out through the field. This algorithm traverses the tree + until we get to a level that has enough cells for the requested number of patches. + Then it uses the centroids of these cells as the initial patch centers. + + Parameters: + npatch (int): How many patches to generate initial centers for + init (str): Initialization method. Options are: + + - 'tree' (default) = Use the normal tree structure of the + field, traversing down to a level where there are npatch + cells, and use the centroids of these cells as the initial + centers. This is almost always the best choice. + - 'random' = Use npatch random points as the intial centers. + - 'kmeans++' = Use the k-means++ algorithm. + cf. https://en.wikipedia.org/wiki/K-means%2B%2B + + rng (RandomState): If desired, a numpy.random.RandomState instance to use for random + number generation. (default: None) + + Returns: + An array of center coordinates. + Shape is (npatch, 2) for flat geometries or (npatch, 3) for 3d or + spherical geometries. In the latter case, the centers represent + (x,y,z) coordinates on the unit sphere. + """ + if npatch > self.ntot: + raise ValueError("Invalid npatch. Cannot be greater than self.ntot.") + if npatch < 1: + raise ValueError("Invalid npatch. Cannot be less than 1.") + if self._coords == _lib.Flat: + centers = np.empty((npatch, 2)) + else: + centers = np.empty((npatch, 3)) + seed = 0 if rng is None else int(rng.random_sample() * 2**63) + if init == 'tree': + _lib.KMeansInitTree(self.data, dp(centers), int(npatch), self._d, self._coords, seed) + elif init == 'random': + _lib.KMeansInitRand(self.data, dp(centers), int(npatch), self._d, self._coords, seed) + elif init == 'kmeans++': + _lib.KMeansInitKMPP(self.data, dp(centers), int(npatch), self._d, self._coords, seed) + else: + raise ValueError("Invalid init: %s. "%init + + "Must be one of 'tree', 'random', or 'kmeans++.'") + + return centers
+ +
[docs] @depr_pos_kwargs + def kmeans_refine_centers(self, centers, *, max_iter=200, tol=1.e-5, alt=False): + """Fast implementation of the K-Means algorithm + + The standard K-Means algorithm is as follows + (cf. https://en.wikipedia.org/wiki/K-means_clustering): + + 1. Choose centers somehow. Traditionally, this is done by just selecting npatch random + points from the full set, but we do this more smartly in `kmeans_initialize_centers`. + 2. For each point, measure the distance to each current patch center, and assign it to the + patch that has the closest center. + 3. Update all the centers to be the centroid of the points assigned to each patch. + 4. Repeat 2, 3 until the rms shift in the centers is less than some tolerance or the + maximum number of iterations is reached. + 5. Assign the corresponding patch label to each point (`kmeans_assign_patches`). + + In TreeCorr, we use the tree structure to massively increase the speed of steps 2 and 3. + For a given cell, we know both its center and its size, so we can quickly check whether + all the points in the cell are closer to one center than another. This lets us quickly + cull centers from consideration as we traverse the tree. Once we get to a cell where only + one center can be closest for any of the points in it, we stop traversing and assign the + whole cell to that patch. + + Further, it is also fast to update the new centroid, since the sum of all the positions + for a cell is just N times the cell's centroid. + + As a result, this algorithm typically takes a fraction of a second for ~a million points. + Indeed most of the time spent in the full kmeans calculation is in building the tree + in the first place, rather than actually running the kmeans code. With the alternate + algorithm (``alt=True``), the time is only slightly slower from having to calculate + the sizes at each step. + + Parameters: + centers (array): An array of center coordinates. (modified by this function) + Shape is (npatch, 2) for flat geometries or (npatch, 3) for 3d or + spherical geometries. In the latter case, the centers represent + (x,y,z) coordinates on the unit sphere. + max_iter (int): How many iterations at most to run. (default: 200) + tol (float): Tolerance in the rms centroid shift to consider as converged + as a fraction of the total field size. (default: 1.e-5) + alt (bool): Use the alternate assignment algorithm to minimize the standard + deviation of the inertia rather than the total inertia (aka WCSS). + (default: False) + """ + npatch = centers.shape[0] + _lib.KMeansRun(self.data, dp(centers), npatch, int(max_iter), float(tol), + bool(alt), self._d, self._coords)
+ +
[docs] def kmeans_assign_patches(self, centers): + """Assign patch numbers to each point according to the given centers. + + This is final step in the full K-Means algorithm. It assignes patch numbers to each + point in the field according to which center is closest. + + Parameters: + centers (array): An array of center coordinates. + Shape is (npatch, 2) for flat geometries or (npatch, 3) for 3d or + spherical geometries. In the latter case, the centers represent + (x,y,z) coordinates on the unit sphere. + + Returns: + An array of patch labels, all integers from 0..npatch-1. Size is self.ntot. + """ + patches = np.empty(self.ntot, dtype=int) + npatch = centers.shape[0] + centers = np.ascontiguousarray(centers) + _lib.KMeansAssign(self.data, dp(centers), npatch, + lp(patches), self.ntot, self._d, self._coords) + return patches
+ + +
[docs]class NField(Field): + r"""This class stores the positions and number of objects in a tree structure from which it is + efficient to compute correlation functions. + + An NField is typically created from a Catalog object using + + >>> nfield = cat.getNField(min_size=min_size, max_size=max_size) + + Parameters: + cat (Catalog): The catalog from which to make the field. + min_size (float): The minimum radius cell required (usually min_sep). (default: 0) + max_size (float): The maximum radius cell required (usually max_sep). (default: None) + split_method (str): Which split method to use ('mean', 'median', 'middle', or 'random'). + (default: 'mean') + brute (bool): Whether to force traversal to the leaves for this field. + (default: False) + min_top (int): The minimum number of top layers to use when setting up the field. + (default: :math:`\max(3, \log_2(N_{\rm cpu}))`) + max_top (int): The maximum number of top layers to use when setting up the field. + (default: 10) + coords (str): The kind of coordinate system to use. (default: cat.coords) + rng (RandomState): If desired, a numpy.random.RandomState instance to use for random + number generation. (default: None) + logger (Logger): A logger file if desired. (default: None) + """ + @depr_pos_kwargs + def __init__(self, cat, *, min_size=0, max_size=None, split_method='mean', brute=False, + min_top=None, max_top=10, coords=None, rng=None, logger=None): + if logger: + if cat.name != '': + logger.info('Building NField from cat %s',cat.name) + else: + logger.info('Building NField') + + self._cat = weakref.ref(cat) + self.ntot = cat.ntot + self.min_size = float(min_size) if not brute else 0. + self.max_size = float(max_size) if max_size is not None else np.inf + self.split_method = split_method + self._sm = _parse_split_method(split_method) + self._d = 1 # NData + self.brute = bool(brute) + self.min_top, self.max_top = self._determine_top(min_top, max_top) + self.coords = coords if coords is not None else cat.coords + self._coords = coord_enum(self.coords) # These are the C++-layer enums + seed = 0 if rng is None else int(rng.random_sample() * 2**63) + + self.data = _lib.BuildNField(dp(cat.x), dp(cat.y), dp(cat.z), + dp(cat.w), dp(cat.wpos), cat.ntot, + self.min_size, self.max_size, self._sm, seed, + self.brute, self.min_top, self.max_top, self._coords) + if logger: + logger.debug('Finished building NField (%s)',self.coords) + + def __del__(self): + # Using memory allocated from the C layer means we have to explicitly deallocate it + # rather than being able to rely on the Python memory manager. + + # In case __init__ failed to get that far + if hasattr(self,'data'): # pragma: no branch + # I don't get this, but sometimes it gets here when the ffi.lock is already locked. + # When that happens, this will freeze in a `with ffi._lock` line in the ffi api.py. + # So, don't do that, and just accept the memory leak instead. + if not _ffi._lock.locked(): # pragma: no branch + _lib.DestroyNField(self.data, self._coords)
+ + +
[docs]class KField(Field): + r"""This class stores the values of a scalar field (kappa in the weak lensing context) in a + tree structure from which it is efficient to compute correlation functions. + + A KField is typically created from a Catalog object using + + >>> kfield = cat.getKField(min_size, max_size, b) + + Parameters: + cat (Catalog): The catalog from which to make the field. + min_size (float): The minimum radius cell required (usually min_sep). (default: 0) + max_size (float): The maximum radius cell required (usually max_sep). (default: None) + split_method (str): Which split method to use ('mean', 'median', 'middle', or 'random'). + (default: 'mean') + brute (bool): Whether to force traversal to the leaves for this field. + (default: False) + min_top (int): The minimum number of top layers to use when setting up the field. + (default: :math:`\max(3, \log_2(N_{\rm cpu}))`) + max_top (int): The maximum number of top layers to use when setting up the field. + (default: 10) + coords (str): The kind of coordinate system to use. (default: cat.coords) + rng (RandomState): If desired, a numpy.random.RandomState instance to use for random + number generation. (default: None) + logger (Logger): A logger file if desired. (default: None) + """ + @depr_pos_kwargs + def __init__(self, cat, *, min_size=0, max_size=None, split_method='mean', brute=False, + min_top=None, max_top=10, coords=None, rng=None, logger=None): + if logger: + if cat.name != '': + logger.info('Building KField from cat %s',cat.name) + else: + logger.info('Building KField') + + self._cat = weakref.ref(cat) + self.ntot = cat.ntot + self.min_size = float(min_size) if not brute else 0. + self.max_size = float(max_size) if max_size is not None else np.inf + self.split_method = split_method + self._sm = _parse_split_method(split_method) + self._d = 2 # KData + self.brute = bool(brute) + self.min_top, self.max_top = self._determine_top(min_top, max_top) + self.coords = coords if coords is not None else cat.coords + self._coords = coord_enum(self.coords) # These are the C++-layer enums + seed = 0 if rng is None else int(rng.random_sample() * 2**63) + + self.data = _lib.BuildKField(dp(cat.x), dp(cat.y), dp(cat.z), + dp(cat.k), + dp(cat.w), dp(cat.wpos), cat.ntot, + self.min_size, self.max_size, self._sm, seed, + self.brute, self.min_top, self.max_top, self._coords) + if logger: + logger.debug('Finished building KField (%s)',self.coords) + + def __del__(self): + # Using memory allocated from the C layer means we have to explicitly deallocate it + # rather than being able to rely on the Python memory manager. + + # In case __init__ failed to get that far + if hasattr(self,'data'): # pragma: no branch + if not _ffi._lock.locked(): # pragma: no branch + _lib.DestroyKField(self.data, self._coords)
+ + +
[docs]class GField(Field): + r"""This class stores the values of a spinor field (gamma in the weak lensing context) in a + tree structure from which it is efficient to compute correlation functions. + + A GField is typically created from a Catalog object using + + >>> gfield = cat.getGField(min_size, max_size, b) + + Parameters: + cat (Catalog): The catalog from which to make the field. + min_size (float): The minimum radius cell required (usually min_sep). (default: 0) + max_size (float): The maximum radius cell required (usually max_sep). (default: None) + split_method (str): Which split method to use ('mean', 'median', 'middle', or 'random'). + (default: 'mean') + brute (bool): Whether to force traversal to the leaves for this field. + (default: False) + min_top (int): The minimum number of top layers to use when setting up the field. + (default: :math:`\max(3, \log_2(N_{\rm cpu}))`) + max_top (int): The maximum number of top layers to use when setting up the field. + (default: 10) + coords (str): The kind of coordinate system to use. (default: cat.coords) + rng (RandomState): If desired, a numpy.random.RandomState instance to use for random + number generation. (default: None) + logger (Logger): A logger file if desired. (default: None) + """ + @depr_pos_kwargs + def __init__(self, cat, *, min_size=0, max_size=None, split_method='mean', brute=False, + min_top=None, max_top=10, coords=None, rng=None, logger=None): + if logger: + if cat.name != '': + logger.info('Building GField from cat %s',cat.name) + else: + logger.info('Building GField') + + self._cat = weakref.ref(cat) + self.ntot = cat.ntot + self.min_size = float(min_size) if not brute else 0. + self.max_size = float(max_size) if max_size is not None else np.inf + self.split_method = split_method + self._sm = _parse_split_method(split_method) + self._d = 3 # GData + self.brute = bool(brute) + self.min_top, self.max_top = self._determine_top(min_top, max_top) + self.coords = coords if coords is not None else cat.coords + self._coords = coord_enum(self.coords) # These are the C++-layer enums + seed = 0 if rng is None else int(rng.random_sample() * 2**63) + + self.data = _lib.BuildGField(dp(cat.x), dp(cat.y), dp(cat.z), + dp(cat.g1), dp(cat.g2), + dp(cat.w), dp(cat.wpos), cat.ntot, + self.min_size, self.max_size, self._sm, seed, + self.brute, self.min_top, self.max_top, self._coords) + if logger: + logger.debug('Finished building GField (%s)',self.coords) + + def __del__(self): + # Using memory allocated from the C layer means we have to explicitly deallocate it + # rather than being able to rely on the Python memory manager. + + # In case __init__ failed to get that far + if hasattr(self,'data'): # pragma: no branch + if not _ffi._lock.locked(): # pragma: no branch + _lib.DestroyGField(self.data, self._coords)
+ + +
[docs]class SimpleField(object): + """A SimpleField is like a Field, but only stores the leaves as a list, skipping all the + tree stuff. + + Again, this is an abstract base class, which cannot be instantiated. You should + make one of the concrete subclasses: + + - NSimpleField describes a field of objects to be counted only. + - KSimpleField describes a field of points sampling a scalar field. + - GSimpleField describes a field of points sampling a spinor field. + + .. warning:: + + .. deprecated:: 4.1 + + This function is deprecated and slated to be removed. + If you have a need for it, please open an issue to describe your use case. + """ + def __init__(self): + raise NotImplementedError( + "SimpleField is an abstract base class. It cannot be instantiated.")
+ + +
[docs]class NSimpleField(SimpleField): + """This class stores the positions as a list, skipping all the tree stuff. + + An NSimpleField is typically created from a Catalog object using + + >>> nfield = cat.getNSimpleField() + + .. warning:: + + .. deprecated:: 4.1 + + This function is deprecated and slated to be removed. + If you have a need for it, please open an issue to describe your use case. + + Parameters: + cat (Catalog): The catalog from which to make the field. + logger (Logger): A logger file if desired. (default: None) + """ + @depr_pos_kwargs + def __init__(self, cat, *, logger=None): + if logger: + if cat.name != '': + logger.info('Building NSimpleField from cat %s',cat.name) + else: + logger.info('Building NSimpleField') + self._d = 1 # NData + self.coords = cat.coords + self._coords = coord_enum(self.coords) # These are the C++-layer enums + + self.data = _lib.BuildNSimpleField(dp(cat.x), dp(cat.y), dp(cat.z), + dp(cat.w), dp(cat.wpos), cat.ntot, + self._coords) + if logger: + logger.debug('Finished building NSimpleField (%s)',self.coords) + + def __del__(self): + # Using memory allocated from the C layer means we have to explicitly deallocate it + # rather than being able to rely on the Python memory manager. + + # In case __init__ failed to get that far + if hasattr(self,'data'): # pragma: no branch + if not _ffi._lock.locked(): # pragma: no branch + _lib.DestroyNSimpleField(self.data, self._coords)
+ + +
[docs]class KSimpleField(SimpleField): + """This class stores the kappa field as a list, skipping all the tree stuff. + + A KSimpleField is typically created from a Catalog object using + + >>> kfield = cat.getKSimpleField() + + .. warning:: + + .. deprecated:: 4.1 + + This function is deprecated and slated to be removed. + If you have a need for it, please open an issue to describe your use case. + + Parameters: + cat (Catalog): The catalog from which to make the field. + logger (Logger): A logger file if desired. (default: None) + """ + @depr_pos_kwargs + def __init__(self, cat, *, logger=None): + if logger: + if cat.name != '': + logger.info('Building KSimpleField from cat %s',cat.name) + else: + logger.info('Building KSimpleField') + self._d = 2 # KData + self.coords = cat.coords + self._coords = coord_enum(self.coords) # These are the C++-layer enums + + self.data = _lib.BuildKSimpleField(dp(cat.x), dp(cat.y), dp(cat.z), + dp(cat.k), + dp(cat.w), dp(cat.wpos), cat.ntot, + self._coords) + if logger: + logger.debug('Finished building KSimpleField (%s)',self.coords) + + def __del__(self): + # Using memory allocated from the C layer means we have to explicitly deallocate it + # rather than being able to rely on the Python memory manager. + + # In case __init__ failed to get that far + if hasattr(self,'data'): # pragma: no branch + if not _ffi._lock.locked(): # pragma: no branch + _lib.DestroyKSimpleField(self.data, self._coords)
+ + +
[docs]class GSimpleField(SimpleField): + """This class stores the shear field as a list, skipping all the tree stuff. + + A GSimpleField is typically created from a Catalog object using + + >>> gfield = cat.getGSimpleField() + + .. warning:: + + .. deprecated:: 4.1 + + This function is deprecated and slated to be removed. + If you have a need for it, please open an issue to describe your use case. + + Parameters: + cat (Catalog): The catalog from which to make the field. + logger (Logger): A logger file if desired. (default: None) + """ + @depr_pos_kwargs + def __init__(self, cat, *, logger=None): + if logger: + if cat.name != '': + logger.info('Building GSimpleField from cat %s',cat.name) + else: + logger.info('Building GSimpleField') + self._d = 3 # GData + self.coords = cat.coords + self._coords = coord_enum(self.coords) # These are the C++-layer enums + + self.data = _lib.BuildGSimpleField(dp(cat.x), dp(cat.y), dp(cat.z), + dp(cat.g1), dp(cat.g2), + dp(cat.w), dp(cat.wpos), cat.ntot, + self._coords) + if logger: + logger.debug('Finished building KSimpleField (%s)',self.coords) + + def __del__(self): + # Using memory allocated from the C layer means we have to explicitly deallocate it + # rather than being able to rely on the Python memory manager. + + # In case __init__ failed to get that far + if hasattr(self,'data'): # pragma: no branch + if not _ffi._lock.locked(): # pragma: no branch + _lib.DestroyGSimpleField(self.data, self._coords)
+
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/treecorr/ggcorrelation.html b/docs/_build/html/_modules/treecorr/ggcorrelation.html new file mode 100644 index 00000000..67e4be4d --- /dev/null +++ b/docs/_build/html/_modules/treecorr/ggcorrelation.html @@ -0,0 +1,945 @@ + + + + + + treecorr.ggcorrelation — TreeCorr 4.3.0 documentation + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for treecorr.ggcorrelation

+# Copyright (c) 2003-2019 by Mike Jarvis
+#
+# TreeCorr is free software: redistribution and use in source and binary forms,
+# with or without modification, are permitted provided that the following
+# conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice, this
+#    list of conditions, and the disclaimer given in the accompanying LICENSE
+#    file.
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+#    this list of conditions, and the disclaimer given in the documentation
+#    and/or other materials provided with the distribution.
+
+"""
+.. module:: ggcorrelation
+"""
+
+import numpy as np
+
+from . import _lib, _ffi
+from .catalog import calculateVarG
+from .binnedcorr2 import BinnedCorr2
+from .util import double_ptr as dp
+from .util import make_writer, make_reader
+from .util import depr_pos_kwargs
+
+
+
[docs]class GGCorrelation(BinnedCorr2): + r"""This class handles the calculation and storage of a 2-point shear-shear correlation + function. + + Ojects of this class holds the following attributes: + + Attributes: + nbins: The number of bins in logr + bin_size: The size of the bins in logr + min_sep: The minimum separation being considered + max_sep: The maximum separation being considered + + In addition, the following attributes are numpy arrays of length (nbins): + + Attributes: + + logr: The nominal center of the bin in log(r) (the natural logarithm of r). + rnom: The nominal center of the bin converted to regular distance. + i.e. r = exp(logr). + meanr: The (weighted) mean value of r for the pairs in each bin. + If there are no pairs in a bin, then exp(logr) will be used instead. + meanlogr: The (weighted) mean value of log(r) for the pairs in each bin. + If there are no pairs in a bin, then logr will be used instead. + xip: The correlation function, :math:`\xi_+(r)`. + xim: The correlation function, :math:`\xi_-(r)`. + xip_im: The imaginary part of :math:`\xi_+(r)`. + xim_im: The imaginary part of :math:`\xi_-(r)`. + varxip: An estimate of the variance of :math:`\xi_+(r)` + varxim: An estimate of the variance of :math:`\xi_-(r)` + weight: The total weight in each bin. + npairs: The number of pairs going into each bin (including pairs where one or + both objects have w=0). + cov: An estimate of the full covariance matrix for the data vector with + :math:`\xi_+` first and then :math:`\xi_-`. + + .. note:: + + The default method for estimating the variance and covariance attributes (``varxip``, + ``varxim``, and ``cov``) is 'shot', which only includes the shape noise propagated into + the final correlation. This does not include sample variance, so it is always an + underestimate of the actual variance. To get better estimates, you need to set + ``var_method`` to something else and use patches in the input catalog(s). + cf. `Covariance Estimates`. + + If ``sep_units`` are given (either in the config dict or as a named kwarg) then the distances + will all be in these units. + + .. note:: + + If you separate out the steps of the `process` command and use `process_auto` and/or + `process_cross`, then the units will not be applied to ``meanr`` or ``meanlogr`` until + the `finalize` function is called. + + The typical usage pattern is as follows: + + >>> gg = treecorr.GGCorrelation(config) + >>> gg.process(cat) # For auto-correlation. + >>> gg.process(cat1,cat2) # For cross-correlation. + >>> gg.write(file_name) # Write out to a file. + >>> xip = gg.xip # Or access the correlation function directly. + + Parameters: + config (dict): A configuration dict that can be used to pass in kwargs if desired. + This dict is allowed to have addition entries besides those listed + in `BinnedCorr2`, which are ignored here. (default: None) + logger: If desired, a logger object for logging. (default: None, in which case + one will be built according to the config dict's verbose level.) + + Keyword Arguments: + **kwargs: See the documentation for `BinnedCorr2` for the list of allowed keyword + arguments, which may be passed either directly or in the config dict. + """ +
[docs] @depr_pos_kwargs + def __init__(self, config=None, *, logger=None, **kwargs): + """Initialize `GGCorrelation`. See class doc for details. + """ + BinnedCorr2.__init__(self, config, logger=logger, **kwargs) + + self._ro._d1 = 3 # GData + self._ro._d2 = 3 # GData + self.xip = np.zeros_like(self.rnom, dtype=float) + self.xim = np.zeros_like(self.rnom, dtype=float) + self.xip_im = np.zeros_like(self.rnom, dtype=float) + self.xim_im = np.zeros_like(self.rnom, dtype=float) + self.varxip = np.zeros_like(self.rnom, dtype=float) + self.varxim = np.zeros_like(self.rnom, dtype=float) + self.meanr = np.zeros_like(self.rnom, dtype=float) + self.meanlogr = np.zeros_like(self.rnom, dtype=float) + self.weight = np.zeros_like(self.rnom, dtype=float) + self.npairs = np.zeros_like(self.rnom, dtype=float) + self.logger.debug('Finished building GGCorr')
+ + @property + def corr(self): + if self._corr is None: + self._corr = _lib.BuildCorr2( + self._d1, self._d2, self._bintype, + self._min_sep,self._max_sep,self._nbins,self._bin_size,self.b, + self.min_rpar, self.max_rpar, self.xperiod, self.yperiod, self.zperiod, + dp(self.xip),dp(self.xip_im),dp(self.xim),dp(self.xim_im), + dp(self.meanr),dp(self.meanlogr),dp(self.weight),dp(self.npairs)) + return self._corr + + def __del__(self): + # Using memory allocated from the C layer means we have to explicitly deallocate it + # rather than being able to rely on the Python memory manager. + if self._corr is not None: + if not _ffi._lock.locked(): # pragma: no branch + _lib.DestroyCorr2(self.corr, self._d1, self._d2, self._bintype) + +
[docs] def __eq__(self, other): + """Return whether two `GGCorrelation` instances are equal""" + return (isinstance(other, GGCorrelation) and + self.nbins == other.nbins and + self.bin_size == other.bin_size and + self.min_sep == other.min_sep and + self.max_sep == other.max_sep and + self.sep_units == other.sep_units and + self.coords == other.coords and + self.bin_type == other.bin_type and + self.bin_slop == other.bin_slop and + self.min_rpar == other.min_rpar and + self.max_rpar == other.max_rpar and + self.xperiod == other.xperiod and + self.yperiod == other.yperiod and + self.zperiod == other.zperiod and + np.array_equal(self.meanr, other.meanr) and + np.array_equal(self.meanlogr, other.meanlogr) and + np.array_equal(self.xip, other.xip) and + np.array_equal(self.xim, other.xim) and + np.array_equal(self.xip_im, other.xip_im) and + np.array_equal(self.xim_im, other.xim_im) and + np.array_equal(self.varxip, other.varxip) and + np.array_equal(self.varxim, other.varxim) and + np.array_equal(self.weight, other.weight) and + np.array_equal(self.npairs, other.npairs))
+ +
[docs] def copy(self): + """Make a copy""" + ret = GGCorrelation.__new__(GGCorrelation) + for key, item in self.__dict__.items(): + if isinstance(item, np.ndarray): + # Only items that might change need to by deep copied. + ret.__dict__[key] = item.copy() + else: + # For everything else, shallow copy is fine. + # In particular don't deep copy config or logger + # Most of the rest are scalars, which copy fine this way. + # And the read-only things are all in _ro. + # The results dict is trickier. We rely on it being copied in places, but we + # never add more to it after the copy, so shallow copy is fine. + ret.__dict__[key] = item + ret._corr = None # We'll want to make a new one of these if we need it. + return ret
+ +
[docs] def __repr__(self): + return 'GGCorrelation(config=%r)'%self.config
+ +
[docs] @depr_pos_kwargs + def process_auto(self, cat, *, metric=None, num_threads=None): + """Process a single catalog, accumulating the auto-correlation. + + This accumulates the weighted sums into the bins, but does not finalize + the calculation by dividing by the total weight at the end. After + calling this function as often as desired, the `finalize` command will + finish the calculation. + + Parameters: + cat (Catalog): The catalog to process + metric (str): Which metric to use. See `Metrics` for details. + (default: 'Euclidean'; this value can also be given in the + constructor in the config dict.) + num_threads (int): How many OpenMP threads to use during the calculation. + (default: use the number of cpu cores; this value can also be given + in the constructor in the config dict.) + """ + if cat.name == '': + self.logger.info('Starting process GG auto-correlations') + else: + self.logger.info('Starting process GG auto-correlations for cat %s.',cat.name) + + self._set_metric(metric, cat.coords) + self._set_num_threads(num_threads) + min_size, max_size = self._get_minmax_size() + + field = cat.getGField(min_size=min_size, max_size=max_size, + split_method=self.split_method, + brute=bool(self.brute), + min_top=self.min_top, max_top=self.max_top, + coords=self.coords) + + self.logger.info('Starting %d jobs.',field.nTopLevelNodes) + _lib.ProcessAuto2(self.corr, field.data, self.output_dots, + field._d, self._coords, self._bintype, self._metric)
+ + +
[docs] @depr_pos_kwargs + def process_cross(self, cat1, cat2, *, metric=None, num_threads=None): + """Process a single pair of catalogs, accumulating the cross-correlation. + + This accumulates the weighted sums into the bins, but does not finalize + the calculation by dividing by the total weight at the end. After + calling this function as often as desired, the `finalize` command will + finish the calculation. + + Parameters: + cat1 (Catalog): The first catalog to process + cat2 (Catalog): The second catalog to process + metric (str): Which metric to use. See `Metrics` for details. + (default: 'Euclidean'; this value can also be given in the + constructor in the config dict.) + num_threads (int): How many OpenMP threads to use during the calculation. + (default: use the number of cpu cores; this value can also be given + in the constructor in the config dict.) + """ + if cat1.name == '' and cat2.name == '': + self.logger.info('Starting process GG cross-correlations') + else: + self.logger.info('Starting process GG cross-correlations for cats %s, %s.', + cat1.name, cat2.name) + + self._set_metric(metric, cat1.coords, cat2.coords) + self._set_num_threads(num_threads) + min_size, max_size = self._get_minmax_size() + + f1 = cat1.getGField(min_size=min_size, max_size=max_size, + split_method=self.split_method, + brute=self.brute is True or self.brute == 1, + min_top=self.min_top, max_top=self.max_top, + coords=self.coords) + f2 = cat2.getGField(min_size=min_size, max_size=max_size, + split_method=self.split_method, + brute=self.brute is True or self.brute == 2, + min_top=self.min_top, max_top=self.max_top, + coords=self.coords) + + self.logger.info('Starting %d jobs.',f1.nTopLevelNodes) + _lib.ProcessCross2(self.corr, f1.data, f2.data, self.output_dots, + f1._d, f2._d, self._coords, self._bintype, self._metric)
+ + +
[docs] @depr_pos_kwargs + def process_pairwise(self, cat1, cat2, *, metric=None, num_threads=None): + """Process a single pair of catalogs, accumulating the cross-correlation, only using + the corresponding pairs of objects in each catalog. + + This accumulates the weighted sums into the bins, but does not finalize + the calculation by dividing by the total weight at the end. After + calling this function as often as desired, the `finalize` command will + finish the calculation. + + .. warning:: + + .. deprecated:: 4.1 + + This function is deprecated and slated to be removed. + If you have a need for it, please open an issue to describe your use case. + + Parameters: + cat1 (Catalog): The first catalog to process + cat2 (Catalog): The second catalog to process + metric (str): Which metric to use. See `process` for + details. (default: 'Euclidean'; this value can also be given in the + constructor in the config dict.) + num_threads (int): How many OpenMP threads to use during the calculation. + (default: use the number of cpu cores; this value can also be given + in the constructor in the config dict.) + """ + import warnings + warnings.warn("The process_pairwise function is slated to be removed in a future version. "+ + "If you are actually using this function usefully, please "+ + "open an issue to describe your use case.", FutureWarning) + if cat1.name == '' and cat2.name == '': + self.logger.info('Starting process GG pairwise-correlations') + else: + self.logger.info('Starting process GG pairwise-correlations for cats %s, %s.', + cat1.name, cat2.name) + + self._set_metric(metric, cat1.coords, cat2.coords) + self._set_num_threads(num_threads) + + f1 = cat1.getGSimpleField() + f2 = cat2.getGSimpleField() + + _lib.ProcessPair(self.corr, f1.data, f2.data, self.output_dots, + f1._d, f2._d, self._coords, self._bintype, self._metric)
+ +
[docs] def getStat(self): + """The standard statistic for the current correlation object as a 1-d array. + + In this case, this is the concatenation of self.xip and self.xim (raveled if necessary). + """ + return np.concatenate([self.xip.ravel(), self.xim.ravel()])
+ +
[docs] def getWeight(self): + """The weight array for the current correlation object as a 1-d array. + + This is the weight array corresponding to `getStat`. In this case, the weight is + duplicated to account for both xip and xim returned as part of getStat(). + """ + return np.concatenate([self.weight.ravel(), self.weight.ravel()])
+ + def _finalize(self): + mask1 = self.weight != 0 + mask2 = self.weight == 0 + + self.xip[mask1] /= self.weight[mask1] + self.xim[mask1] /= self.weight[mask1] + self.xip_im[mask1] /= self.weight[mask1] + self.xim_im[mask1] /= self.weight[mask1] + self.meanr[mask1] /= self.weight[mask1] + self.meanlogr[mask1] /= self.weight[mask1] + + # Update the units of meanr, meanlogr + self._apply_units(mask1) + + # Use meanr, meanlogr when available, but set to nominal when no pairs in bin. + self.meanr[mask2] = self.rnom[mask2] + self.meanlogr[mask2] = self.logr[mask2] + +
[docs] def finalize(self, varg1, varg2): + """Finalize the calculation of the correlation function. + + The `process_auto` and `process_cross` commands accumulate values in each bin, + so they can be called multiple times if appropriate. Afterwards, this command + finishes the calculation by dividing each column by the total weight. + + Parameters: + varg1 (float): The shear variance per component for the first field. + varg2 (float): The shear variance per component for the second field. + """ + self._finalize() + self._var_num = 2. * varg1 * varg2 + self.cov = self.estimate_cov(self.var_method) + self.varxip.ravel()[:] = self.cov.diagonal()[:self._nbins] + self.varxim.ravel()[:] = self.cov.diagonal()[self._nbins:]
+ + def _clear(self): + """Clear the data vectors + """ + self.xip.ravel()[:] = 0 + self.xim.ravel()[:] = 0 + self.xip_im.ravel()[:] = 0 + self.xim_im.ravel()[:] = 0 + self.meanr.ravel()[:] = 0 + self.meanlogr.ravel()[:] = 0 + self.weight.ravel()[:] = 0 + self.npairs.ravel()[:] = 0 + +
[docs] def __iadd__(self, other): + """Add a second `GGCorrelation`'s data to this one. + + .. note:: + + For this to make sense, both `GGCorrelation` objects should not have had `finalize` + called yet. Then, after adding them together, you should call `finalize` on the sum. + """ + if not isinstance(other, GGCorrelation): + raise TypeError("Can only add another GGCorrelation object") + if not (self._nbins == other._nbins and + self.min_sep == other.min_sep and + self.max_sep == other.max_sep): + raise ValueError("GGCorrelation to be added is not compatible with this one.") + + self._set_metric(other.metric, other.coords, other.coords) + self.xip.ravel()[:] += other.xip.ravel()[:] + self.xim.ravel()[:] += other.xim.ravel()[:] + self.xip_im.ravel()[:] += other.xip_im.ravel()[:] + self.xim_im.ravel()[:] += other.xim_im.ravel()[:] + self.meanr.ravel()[:] += other.meanr.ravel()[:] + self.meanlogr.ravel()[:] += other.meanlogr.ravel()[:] + self.weight.ravel()[:] += other.weight.ravel()[:] + self.npairs.ravel()[:] += other.npairs.ravel()[:] + return self
+ + def _sum(self, others): + # Equivalent to the operation of: + # self._clear() + # for other in others: + # self += other + # but no sanity checks and use numpy.sum for faster calculation. + np.sum([c.xip for c in others], axis=0, out=self.xip) + np.sum([c.xim for c in others], axis=0, out=self.xim) + np.sum([c.xip_im for c in others], axis=0, out=self.xip_im) + np.sum([c.xim_im for c in others], axis=0, out=self.xim_im) + np.sum([c.meanr for c in others], axis=0, out=self.meanr) + np.sum([c.meanlogr for c in others], axis=0, out=self.meanlogr) + np.sum([c.weight for c in others], axis=0, out=self.weight) + np.sum([c.npairs for c in others], axis=0, out=self.npairs) + +
[docs] @depr_pos_kwargs + def process(self, cat1, cat2=None, *, metric=None, num_threads=None, comm=None, low_mem=False, + initialize=True, finalize=True): + """Compute the correlation function. + + - If only 1 argument is given, then compute an auto-correlation function. + - If 2 arguments are given, then compute a cross-correlation function. + + Both arguments may be lists, in which case all items in the list are used + for that element of the correlation. + + Parameters: + cat1 (Catalog): A catalog or list of catalogs for the first G field. + cat2 (Catalog): A catalog or list of catalogs for the second G field, if any. + (default: None) + metric (str): Which metric to use. See `Metrics` for details. + (default: 'Euclidean'; this value can also be given in the + constructor in the config dict.) + num_threads (int): How many OpenMP threads to use during the calculation. + (default: use the number of cpu cores; this value can also be given + in the constructor in the config dict.) + comm (mpi4py.Comm): If running MPI, an mpi4py Comm object to communicate between + processes. If used, the rank=0 process will have the final + computation. This only works if using patches. (default: None) + low_mem (bool): Whether to sacrifice a little speed to try to reduce memory usage. + This only works if using patches. (default: False) + initialize (bool): Whether to begin the calculation with a call to + `BinnedCorr2.clear`. (default: True) + finalize (bool): Whether to complete the calculation with a call to `finalize`. + (default: True) + """ + import math + if initialize: + self.clear() + + if not isinstance(cat1,list): + cat1 = cat1.get_patches(low_mem=low_mem) + if cat2 is not None and not isinstance(cat2,list): + cat2 = cat2.get_patches(low_mem=low_mem) + + if cat2 is None: + self._process_all_auto(cat1, metric, num_threads, comm, low_mem) + else: + self._process_all_cross(cat1, cat2, metric, num_threads, comm, low_mem) + + if finalize: + if cat2 is None: + varg1 = calculateVarG(cat1, low_mem=low_mem) + varg2 = varg1 + self.logger.info("varg = %f: sig_sn (per component) = %f",varg1,math.sqrt(varg1)) + else: + varg1 = calculateVarG(cat1, low_mem=low_mem) + varg2 = calculateVarG(cat2, low_mem=low_mem) + self.logger.info("varg1 = %f: sig_sn (per component) = %f",varg1,math.sqrt(varg1)) + self.logger.info("varg2 = %f: sig_sn (per component) = %f",varg2,math.sqrt(varg2)) + self.finalize(varg1,varg2)
+ + +
[docs] @depr_pos_kwargs + def write(self, file_name, *, file_type=None, precision=None, write_patch_results=False): + r"""Write the correlation function to the file, file_name. + + The output file will include the following columns: + + ========= ======================================================== + Column Description + ========= ======================================================== + r_nom The nominal center of the bin in r + meanr The mean value :math:`\langle r \rangle` of pairs that + fell into each bin + meanlogr The mean value :math:`\langle \log(r) \rangle` of pairs + that fell into each bin + xip The real part of the :math:`\xi_+` correlation function + xim The real part of the :math:`\xi_-` correlation function + xip_im The imag part of the :math:`\xi_+` correlation function + xim_im The imag part of the :math:`\xi_-` correlation function + sigma_xip The sqrt of the variance estimate of :math:`\xi_+` + sigma_xim The sqrt of the variance estimate of :math:`\xi_-` + weight The total weight contributing to each bin + npairs The total number of pairs in each bin + ========= ======================================================== + + If ``sep_units`` was given at construction, then the distances will all be in these units. + Otherwise, they will be in either the same units as x,y,z (for flat or 3d coordinates) or + radians (for spherical coordinates). + + Parameters: + file_name (str): The name of the file to write to. + file_type (str): The type of file to write ('ASCII' or 'FITS'). (default: determine + the type automatically from the extension of file_name.) + precision (int): For ASCII output catalogs, the desired precision. (default: 4; + this value can also be given in the constructor in the config dict.) + write_patch_results (bool): Whether to write the patch-based results as well. + (default: False) + """ + self.logger.info('Writing GG correlations to %s',file_name) + precision = self.config.get('precision', 4) if precision is None else precision + name = 'main' if write_patch_results else None + with make_writer(file_name, precision, file_type, self.logger) as writer: + self._write(writer, name, write_patch_results)
+ + @property + def _write_col_names(self): + return ['r_nom', 'meanr', 'meanlogr', 'xip', 'xim', 'xip_im', 'xim_im', + 'sigma_xip', 'sigma_xim', 'weight', 'npairs'] + + @property + def _write_data(self): + data = [ self.rnom, self.meanr, self.meanlogr, + self.xip, self.xim, self.xip_im, self.xim_im, + np.sqrt(self.varxip), np.sqrt(self.varxim), + self.weight, self.npairs ] + data = [ col.flatten() for col in data ] + return data + + @property + def _write_params(self): + return { 'coords' : self.coords, 'metric' : self.metric, + 'sep_units' : self.sep_units, 'bin_type' : self.bin_type } + +
[docs] @depr_pos_kwargs + def read(self, file_name, *, file_type=None): + """Read in values from a file. + + This should be a file that was written by TreeCorr, preferably a FITS file, so there + is no loss of information. + + .. warning:: + + The `GGCorrelation` object should be constructed with the same configuration + parameters as the one being read. e.g. the same min_sep, max_sep, etc. This is not + checked by the read function. + + Parameters: + file_name (str): The name of the file to read in. + file_type (str): The type of file ('ASCII' or 'FITS'). (default: determine the type + automatically from the extension of file_name.) + """ + self.logger.info('Reading GG correlations from %s',file_name) + with make_reader(file_name, file_type, self.logger) as reader: + self._read(reader)
+ + # Helper function used by _read + def _read_from_data(self, data, params): + s = self.logr.shape + if 'R_nom' in data.dtype.names: # pragma: no cover + self._ro.rnom = data['R_nom'].reshape(s) + self.meanr = data['meanR'].reshape(s) + self.meanlogr = data['meanlogR'].reshape(s) + else: + self._ro.rnom = data['r_nom'].reshape(s) + self.meanr = data['meanr'].reshape(s) + self.meanlogr = data['meanlogr'].reshape(s) + self.xip = data['xip'].reshape(s) + self.xim = data['xim'].reshape(s) + self.xip_im = data['xip_im'].reshape(s) + self.xim_im = data['xim_im'].reshape(s) + # Read old output files without error. + if 'sigma_xi' in data.dtype.names: # pragma: no cover + self.varxip = data['sigma_xi'].reshape(s)**2 + self.varxim = data['sigma_xi'].reshape(s)**2 + else: + self.varxip = data['sigma_xip'].reshape(s)**2 + self.varxim = data['sigma_xim'].reshape(s)**2 + self.weight = data['weight'].reshape(s) + self.npairs = data['npairs'].reshape(s) + self.coords = params['coords'].strip() + self.metric = params['metric'].strip() + self._ro.sep_units = params['sep_units'].strip() + self._ro.bin_type = params['bin_type'].strip() + self.npatch1 = params.get('npatch1', 1) + self.npatch2 = params.get('npatch2', 1) + +
[docs] @depr_pos_kwargs + def calculateMapSq(self, *, R=None, m2_uform=None): + r"""Calculate the aperture mass statistics from the correlation function. + + .. math:: + + \langle M_{ap}^2 \rangle(R) &= \int_{0}^{rmax} \frac{r dr}{2R^2} + \left [ T_+\left(\frac{r}{R}\right) \xi_+(r) + + T_-\left(\frac{r}{R}\right) \xi_-(r) \right] \\ + \langle M_\times^2 \rangle(R) &= \int_{0}^{rmax} \frac{r dr}{2R^2} + \left[ T_+\left(\frac{r}{R}\right) \xi_+(r) - + T_-\left(\frac{r}{R}\right) \xi_-(r) \right] + + The ``m2_uform`` parameter sets which definition of the aperture mass to use. + The default is to use 'Crittenden'. + + If ``m2_uform`` is 'Crittenden': + + .. math:: + + U(r) &= \frac{1}{2\pi} (1-r^2) \exp(-r^2/2) \\ + Q(r) &= \frac{1}{4\pi} r^2 \exp(-r^2/2) \\ + T_+(s) &= \frac{s^4 - 16s^2 + 32}{128} \exp(-s^2/4) \\ + T_-(s) &= \frac{s^4}{128} \exp(-s^2/4) \\ + rmax &= \infty + + cf. Crittenden, et al (2002): ApJ, 568, 20 + + If ``m2_uform`` is 'Schneider': + + .. math:: + + U(r) &= \frac{9}{\pi} (1-r^2) (1/3-r^2) \\ + Q(r) &= \frac{6}{\pi} r^2 (1-r^2) \\ + T_+(s) &= \frac{12}{5\pi} (2-15s^2) \arccos(s/2) \\ + &\qquad + \frac{1}{100\pi} s \sqrt{4-s^2} (120 + 2320s^2 - 754s^4 + 132s^6 - 9s^8) \\ + T_-(s) &= \frac{3}{70\pi} s^3 (4-s^2)^{7/2} \\ + rmax &= 2R + + cf. Schneider, et al (2002): A&A, 389, 729 + + .. note:: + + This function is only implemented for Log binning. + + + Parameters: + R (array): The R values at which to calculate the aperture mass statistics. + (default: None, which means use self.rnom) + m2_uform (str): Which form to use for the aperture mass, as described above. + (default: 'Crittenden'; this value can also be given in the + constructor in the config dict.) + + Returns: + Tuple containing + + - mapsq = array of :math:`\langle M_{ap}^2 \rangle(R)` + - mapsq_im = the imaginary part of mapsq, which is an estimate of + :math:`\langle M_{ap} M_\times \rangle(R)` + - mxsq = array of :math:`\langle M_\times^2 \rangle(R)` + - mxsq_im = the imaginary part of mxsq, which is an estimate of + :math:`\langle M_{ap} M_\times \rangle(R)` + - varmapsq = array of the variance estimate of either mapsq or mxsq + """ + if m2_uform is None: + m2_uform = self.config.get('m2_uform', 'Crittenden') + if m2_uform not in ['Crittenden', 'Schneider']: + raise ValueError("Invalid m2_uform") + if self.bin_type != 'Log': + raise ValueError("calculateMapSq requires Log binning.") + if R is None: + R = self.rnom + + # Make s a matrix, so we can eventually do the integral by doing a matrix product. + s = np.outer(1./R, self.meanr) + ssq = s*s + if m2_uform == 'Crittenden': + exp_factor = np.exp(-ssq/4.) + Tp = (32. + ssq*(-16. + ssq)) / 128. * exp_factor + Tm = ssq * ssq / 128. * exp_factor + else: + Tp = np.zeros_like(s) + Tm = np.zeros_like(s) + sa = s[s<2.] + ssqa = ssq[s<2.] + Tp[s<2.] = 12./(5.*np.pi) * (2.-15.*ssqa) * np.arccos(sa/2.) + Tp[s<2.] += 1./(100.*np.pi) * sa * np.sqrt(4.-ssqa) * ( + 120. + ssqa*(2320. + ssqa*(-754. + ssqa*(132. - 9.*ssqa)))) + Tm[s<2.] = 3./(70.*np.pi) * sa * ssqa * (4.-ssqa)**3.5 + Tp *= ssq + Tm *= ssq + + # Now do the integral by taking the matrix products. + # Note that dlogr = bin_size + Tpxip = Tp.dot(self.xip) + Tmxim = Tm.dot(self.xim) + mapsq = (Tpxip + Tmxim) * 0.5 * self.bin_size + mxsq = (Tpxip - Tmxim) * 0.5 * self.bin_size + Tpxip_im = Tp.dot(self.xip_im) + Tmxim_im = Tm.dot(self.xim_im) + mapsq_im = (Tpxip_im + Tmxim_im) * 0.5 * self.bin_size + mxsq_im = (Tpxip_im - Tmxim_im) * 0.5 * self.bin_size + + # The variance of each of these is + # Var(<Map^2>(R)) = int_r=0..2R [1/4 s^4 dlogr^2 (T+(s)^2 + T-(s)^2) Var(xi)] + varmapsq = (Tp**2).dot(self.varxip) + (Tm**2).dot(self.varxim) + varmapsq *= 0.25 * self.bin_size**2 + + return mapsq, mapsq_im, mxsq, mxsq_im, varmapsq
+ + +
[docs] @depr_pos_kwargs + def calculateGamSq(self, *, R=None, eb=False): + r"""Calculate the tophat shear variance from the correlation function. + + .. math:: + + \langle \gamma^2 \rangle(R) &= \int_0^{2R} \frac{r dr}{R^2} S_+(s) \xi_+(r) \\ + \langle \gamma^2 \rangle_E(R) &= \int_0^{2R} \frac{r dr}{2 R^2} + \left[ S_+\left(\frac{r}{R}\right) \xi_+(r) + + S_-\left(\frac{r}{R}\right) \xi_-(r) \right] \\ + \langle \gamma^2 \rangle_B(R) &= \int_0^{2R} \frac{r dr}{2 R^2} + \left[ S_+\left(\frac{r}{R}\right) \xi_+(r) - + S_-\left(\frac{r}{R}\right) \xi_-(r) \right] \\ + + S_+(s) &= \frac{1}{\pi} \left(4 \arccos(s/2) - s \sqrt{4-s^2} \right) \\ + S_-(s) &= \begin{cases} + s<=2, & \frac{1}{\pi s^4} \left(s \sqrt{4-s^2} (6-s^2) - 8(3-s^2) \arcsin(s/2)\right)\\ + s>=2, & \frac{1}{s^4} \left(4(s^2-3)\right) + \end{cases} + + cf. Schneider, et al (2002): A&A, 389, 729 + + The default behavior is not to compute the E/B versions. They are calculated if + eb is set to True. + + .. note:: + + This function is only implemented for Log binning. + + + Parameters: + R (array): The R values at which to calculate the shear variance. + (default: None, which means use self.rnom) + eb (bool): Whether to include the E/B decomposition as well as the total + :math:`\langle \gamma^2\rangle`. (default: False) + + Returns: + Tuple containing + + - gamsq = array of :math:`\langle \gamma^2 \rangle(R)` + - vargamsq = array of the variance estimate of gamsq + - gamsq_e (Only if eb is True) = array of :math:`\langle \gamma^2 \rangle_E(R)` + - gamsq_b (Only if eb is True) = array of :math:`\langle \gamma^2 \rangle_B(R)` + - vargamsq_e (Only if eb is True) = array of the variance estimate of + gamsq_e or gamsq_b + """ + if self.bin_type != 'Log': + raise ValueError("calculateGamSq requires Log binning.") + + if R is None: + R = self.rnom + s = np.outer(1./R, self.meanr) + ssq = s*s + Sp = np.zeros_like(s) + sa = s[s<2] + ssqa = ssq[s<2] + Sp[s<2.] = 1./np.pi * ssqa * (4.*np.arccos(sa/2.) - sa*np.sqrt(4.-ssqa)) + + # Now do the integral by taking the matrix products. + # Note that dlogr = bin_size + Spxip = Sp.dot(self.xip) + gamsq = Spxip * self.bin_size + vargamsq = (Sp**2).dot(self.varxip) * self.bin_size**2 + + # Stop here if eb is False + if not eb: return gamsq, vargamsq + + Sm = np.empty_like(s) + Sm[s<2.] = 1./(ssqa*np.pi) * (sa*np.sqrt(4.-ssqa)*(6.-ssqa) + -8.*(3.-ssqa)*np.arcsin(sa/2.)) + Sm[s>=2.] = 4.*(ssq[s>=2]-3.)/ssq[s>=2] + # This already includes the extra ssq factor. + + Smxim = Sm.dot(self.xim) + gamsq_e = (Spxip + Smxim) * 0.5 * self.bin_size + gamsq_b = (Spxip - Smxim) * 0.5 * self.bin_size + vargamsq_e = (Sp**2).dot(self.varxip) + (Sm**2).dot(self.varxim) + vargamsq_e *= 0.25 * self.bin_size**2 + + return gamsq, vargamsq, gamsq_e, gamsq_b, vargamsq_e
+ + +
[docs] @depr_pos_kwargs + def writeMapSq(self, file_name, *, R=None, m2_uform=None, file_type=None, precision=None): + r"""Write the aperture mass statistics based on the correlation function to the + file, file_name. + + See `calculateMapSq` for an explanation of the ``m2_uform`` parameter. + + The output file will include the following columns: + + ========= ========================================================== + Column Description + ========= ========================================================== + R The aperture radius + Mapsq The real part of :math:`\langle M_{ap}^2\rangle` + (cf. `calculateMapSq`) + Mxsq The real part of :math:`\langle M_\times^2\rangle` + MMxa The imag part of :math:`\langle M_{ap}^2\rangle`: + an estimator of :math:`\langle M_{ap} M_\times\rangle` + MMxa The imag part of :math:`\langle M_\times^2\rangle`: + an estimator of :math:`\langle M_{ap} M_\times\rangle` + sig_map The sqrt of the variance estimate of + :math:`\langle M_{ap}^2\rangle` + Gamsq The tophat shear variance :math:`\langle \gamma^2\rangle` + (cf. `calculateGamSq`) + sig_gam The sqrt of the variance estimate of + :math:`\langle \gamma^2\rangle` + ========= ========================================================== + + Parameters: + file_name (str): The name of the file to write to. + R (array): The R values at which to calculate the statistics. + (default: None, which means use self.rnom) + m2_uform (str): Which form to use for the aperture mass. (default: 'Crittenden'; + this value can also be given in the constructor in the config dict.) + file_type (str): The type of file to write ('ASCII' or 'FITS'). (default: determine + the type automatically from the extension of file_name.) + precision (int): For ASCII output catalogs, the desired precision. (default: 4; + this value can also be given in the constructor in the config dict.) + """ + self.logger.info('Writing Map^2 from GG correlations to %s',file_name) + + if R is None: + R = self.rnom + mapsq, mapsq_im, mxsq, mxsq_im, varmapsq = self.calculateMapSq(R=R, m2_uform=m2_uform) + gamsq, vargamsq = self.calculateGamSq(R=R) + if precision is None: + precision = self.config.get('precision', 4) + + col_names = ['R','Mapsq','Mxsq','MMxa','MMxb','sig_map','Gamsq','sig_gam'] + columns = [ R, + mapsq, mxsq, mapsq_im, -mxsq_im, np.sqrt(varmapsq), + gamsq, np.sqrt(vargamsq) ] + with make_writer(file_name, precision, file_type, logger=self.logger) as writer: + writer.write(col_names, columns)
+
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/treecorr/gggcorrelation.html b/docs/_build/html/_modules/treecorr/gggcorrelation.html new file mode 100644 index 00000000..aaee276c --- /dev/null +++ b/docs/_build/html/_modules/treecorr/gggcorrelation.html @@ -0,0 +1,1752 @@ + + + + + + treecorr.gggcorrelation — TreeCorr 4.3.0 documentation + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • »
  • +
  • Module code »
  • +
  • treecorr.gggcorrelation
  • +
  • +
  • +
+
+
+
+
+ +

Source code for treecorr.gggcorrelation

+# Copyright (c) 2003-2019 by Mike Jarvis
+#
+# TreeCorr is free software: redistribution and use in source and binary forms,
+# with or without modification, are permitted provided that the following
+# conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice, this
+#    list of conditions, and the disclaimer given in the accompanying LICENSE
+#    file.
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+#    this list of conditions, and the disclaimer given in the documentation
+#    and/or other materials provided with the distribution.
+
+"""
+.. module:: nnncorrelation
+"""
+
+import numpy as np
+
+from . import _lib, _ffi
+from .catalog import calculateVarG
+from .binnedcorr3 import BinnedCorr3
+from .util import double_ptr as dp
+from .util import make_writer, make_reader
+from .util import depr_pos_kwargs
+
+
+
[docs]class GGGCorrelation(BinnedCorr3): + r"""This class handles the calculation and storage of a 3-point shear-shear-shear correlation + function. + + We use the "natural components" of the shear 3-point function described by Schneider & + Lombardi (2003) [Astron.Astrophys. 397 (2003) 809-818]. In this paradigm, the shears + are projected relative to some point defined by the geometry of the triangle. They + give several reasonable choices for this point. We choose the triangle's centroid as the + "most natural" point, as many simple shear fields have purely real :math:`\Gamma_0` using + this definition. It is also a fairly simple point to calculate in the code compared to + some of the other options they offer, so projections relative to it are fairly efficient. + + There are 4 complex-valued 3-point shear corrletion functions defined for triples of shear + values projected relative to the line joining the location of the shear to the cenroid of + the triangle: + + .. math:: + + \Gamma_0 &= \langle \gamma(\mathbf{x1}) \gamma(\mathbf{x2}) \gamma(\mathbf{x3}) \rangle \\ + \Gamma_1 &= \langle \gamma(\mathbf{x1})^* \gamma(\mathbf{x2}) \gamma(\mathbf{x3}) \rangle \\ + \Gamma_2 &= \langle \gamma(\mathbf{x1}) \gamma(\mathbf{x2})^* \gamma(\mathbf{x3}) \rangle \\ + \Gamma_3 &= \langle \gamma(\mathbf{x1}) \gamma(\mathbf{x2}) \gamma(\mathbf{x3})^* \rangle \\ + + where :math:`\mathbf{x1}, \mathbf{x2}, \mathbf{x3}` are the corners of the triange opposite + sides d1, d2, d3 respectively, where d1 > d2 > d3, and :math:`{}^*` indicates complex + conjugation. + + See the doc string of `BinnedCorr3` for a description of how the triangles + are binned. + + This class only holds one set of these :math:`\Gamma` functions, which means that it is only + directly applicable for computing auto-correlations. To describe a cross-correlation of one + shear field with another, you need three sets of these functions. To describe a three-way + cross-correlation of three different shear fields, you need six. These use cases are + enabled by the class `GGGCrossCorrelation` which holds six instances of this class to keep + track of all the various triangles. See that class for more details. + + Ojects of this class holds the following attributes: + + Attributes: + nbins: The number of bins in logr where r = d2 + bin_size: The size of the bins in logr + min_sep: The minimum separation being considered + max_sep: The maximum separation being considered + nubins: The number of bins in u where u = d3/d2 + ubin_size: The size of the bins in u + min_u: The minimum u being considered + max_u: The maximum u being considered + nvbins: The number of bins in v where v = +-(d1-d2)/d3 + vbin_size: The size of the bins in v + min_v: The minimum v being considered + max_v: The maximum v being considered + logr1d: The nominal centers of the nbins bins in log(r). + u1d: The nominal centers of the nubins bins in u. + v1d: The nominal centers of the nvbins bins in v. + + In addition, the following attributes are numpy arrays whose shape is (nbins, nubins, nvbins): + + Attributes: + logr: The nominal center of each bin in log(r). + rnom: The nominal center of the bin converted to regular distance. + i.e. r = exp(logr). + u: The nominal center of each bin in u. + v: The nominal center of each bin in v. + meand1: The (weighted) mean value of d1 for the triangles in each bin. + meanlogd1: The mean value of log(d1) for the triangles in each bin. + meand2: The (weighted) mean value of d2 (aka r) for the triangles in each bin. + meanlogd2: The mean value of log(d2) for the triangles in each bin. + meand2: The (weighted) mean value of d3 for the triangles in each bin. + meanlogd2: The mean value of log(d3) for the triangles in each bin. + meanu: The mean value of u for the triangles in each bin. + meanv: The mean value of v for the triangles in each bin. + gam0: The 0th "natural" correlation function, :math:`\Gamma_0(r,u,v)`. + gam1: The 1st "natural" correlation function, :math:`\Gamma_1(r,u,v)`. + gam2: The 2nd "natural" correlation function, :math:`\Gamma_2(r,u,v)`. + gam3: The 3rd "natural" correlation function, :math:`\Gamma_3(r,u,v)`. + vargam0: The variance of :math:`\Gamma_0`, only including the shot noise + propagated into the final correlation. This (and the related values for + 1,2,3) does not include sample variance, so it is always an underestimate + of the actual variance. + vargam1: The variance of :math:`\Gamma_1`. + vargam2: The variance of :math:`\Gamma_2`. + vargam3: The variance of :math:`\Gamma_3`. + weight: The total weight in each bin. + ntri: The number of triangles going into each bin (including those where one or + more objects have w=0). + + If ``sep_units`` are given (either in the config dict or as a named kwarg) then the distances + will all be in these units. + + .. note:: + + If you separate out the steps of the `process` command and use `process_auto` and/or + `process_cross`, then the units will not be applied to ``meanr`` or ``meanlogr`` until + the `finalize` function is called. + + The typical usage pattern is as follows:: + + >>> ggg = treecorr.GGGCorrelation(config) + >>> ggg.process(cat) # For auto-correlation. + >>> ggg.process(cat1,cat2,cat3) # For cross-correlation. + >>> ggg.write(file_name) # Write out to a file. + >>> gam0 = ggg.gam0, etc. # To access gamma values directly. + >>> gam0r = ggg.gam0r # You can also access real and imag parts separately. + >>> gam0i = ggg.gam0i + + Parameters: + config (dict): A configuration dict that can be used to pass in kwargs if desired. + This dict is allowed to have addition entries besides those listed + in `BinnedCorr3`, which are ignored here. (default: None) + logger: If desired, a logger object for logging. (default: None, in which case + one will be built according to the config dict's verbose level.) + + Keyword Arguments: + **kwargs: See the documentation for `BinnedCorr3` for the list of allowed keyword + arguments, which may be passed either directly or in the config dict. + """ +
[docs] @depr_pos_kwargs + def __init__(self, config=None, *, logger=None, **kwargs): + """Initialize `GGGCorrelation`. See class doc for details. + """ + BinnedCorr3.__init__(self, config, logger=logger, **kwargs) + + self._ro._d1 = 3 # GData + self._ro._d2 = 3 # GData + self._ro._d3 = 3 # GData + shape = self.logr.shape + self.gam0r = np.zeros(shape, dtype=float) + self.gam1r = np.zeros(shape, dtype=float) + self.gam2r = np.zeros(shape, dtype=float) + self.gam3r = np.zeros(shape, dtype=float) + self.gam0i = np.zeros(shape, dtype=float) + self.gam1i = np.zeros(shape, dtype=float) + self.gam2i = np.zeros(shape, dtype=float) + self.gam3i = np.zeros(shape, dtype=float) + self.vargam0 = np.zeros(shape, dtype=float) + self.vargam1 = np.zeros(shape, dtype=float) + self.vargam2 = np.zeros(shape, dtype=float) + self.vargam3 = np.zeros(shape, dtype=float) + self.meand1 = np.zeros(shape, dtype=float) + self.meanlogd1 = np.zeros(shape, dtype=float) + self.meand2 = np.zeros(shape, dtype=float) + self.meanlogd2 = np.zeros(shape, dtype=float) + self.meand3 = np.zeros(shape, dtype=float) + self.meanlogd3 = np.zeros(shape, dtype=float) + self.meanu = np.zeros(shape, dtype=float) + self.meanv = np.zeros(shape, dtype=float) + self.weight = np.zeros(shape, dtype=float) + self.ntri = np.zeros(shape, dtype=float) + self.logger.debug('Finished building GGGCorr')
+ + @property + def gam0(self): + return self.gam0r + 1j * self.gam0i + + @property + def gam1(self): + return self.gam1r + 1j * self.gam1i + + @property + def gam2(self): + return self.gam2r + 1j * self.gam2i + + @property + def gam3(self): + return self.gam3r + 1j * self.gam3i + + @property + def corr(self): + if self._corr is None: + self._corr = _lib.BuildCorr3( + self._d1, self._d2, self._d3, self._bintype, + self._min_sep,self._max_sep,self.nbins,self._bin_size,self.b, + self.min_u,self.max_u,self.nubins,self.ubin_size,self.bu, + self.min_v,self.max_v,self.nvbins,self.vbin_size,self.bv, + self.xperiod, self.yperiod, self.zperiod, + dp(self.gam0r), dp(self.gam0i), dp(self.gam1r), dp(self.gam1i), + dp(self.gam2r), dp(self.gam2i), dp(self.gam3r), dp(self.gam3i), + dp(self.meand1), dp(self.meanlogd1), dp(self.meand2), dp(self.meanlogd2), + dp(self.meand3), dp(self.meanlogd3), dp(self.meanu), dp(self.meanv), + dp(self.weight), dp(self.ntri)) + return self._corr + + def __del__(self): + # Using memory allocated from the C layer means we have to explicitly deallocate it + # rather than being able to rely on the Python memory manager. + if self._corr is not None: + if not _ffi._lock.locked(): # pragma: no branch + _lib.DestroyCorr3(self.corr, self._d1, self._d2, self._d3, self._bintype) + +
[docs] def __eq__(self, other): + """Return whether two `GGGCorrelation` instances are equal""" + return (isinstance(other, GGGCorrelation) and + self.nbins == other.nbins and + self.bin_size == other.bin_size and + self.min_sep == other.min_sep and + self.max_sep == other.max_sep and + self.sep_units == other.sep_units and + self.min_u == other.min_u and + self.max_u == other.max_u and + self.nubins == other.nubins and + self.ubin_size == other.ubin_size and + self.min_v == other.min_v and + self.max_v == other.max_v and + self.nvbins == other.nvbins and + self.vbin_size == other.vbin_size and + self.coords == other.coords and + self.bin_type == other.bin_type and + self.bin_slop == other.bin_slop and + self.xperiod == other.xperiod and + self.yperiod == other.yperiod and + self.zperiod == other.zperiod and + np.array_equal(self.meand1, other.meand1) and + np.array_equal(self.meanlogd1, other.meanlogd1) and + np.array_equal(self.meand2, other.meand2) and + np.array_equal(self.meanlogd2, other.meanlogd2) and + np.array_equal(self.meand3, other.meand3) and + np.array_equal(self.meanlogd3, other.meanlogd3) and + np.array_equal(self.meanu, other.meanu) and + np.array_equal(self.meanv, other.meanv) and + np.array_equal(self.gam0r, other.gam0r) and + np.array_equal(self.gam0i, other.gam0i) and + np.array_equal(self.gam1r, other.gam1r) and + np.array_equal(self.gam1i, other.gam1i) and + np.array_equal(self.gam2r, other.gam2r) and + np.array_equal(self.gam2i, other.gam2i) and + np.array_equal(self.gam3r, other.gam3r) and + np.array_equal(self.gam3i, other.gam3i) and + np.array_equal(self.vargam0, other.vargam0) and + np.array_equal(self.vargam1, other.vargam1) and + np.array_equal(self.vargam2, other.vargam2) and + np.array_equal(self.vargam3, other.vargam3) and + np.array_equal(self.weight, other.weight) and + np.array_equal(self.ntri, other.ntri))
+ +
[docs] def copy(self): + """Make a copy""" + ret = GGGCorrelation.__new__(GGGCorrelation) + for key, item in self.__dict__.items(): + if isinstance(item, np.ndarray): + # Only items that might change need to by deep copied. + ret.__dict__[key] = item.copy() + else: + # For everything else, shallow copy is fine. + # In particular don't deep copy config or logger + # Most of the rest are scalars, which copy fine this way. + ret.__dict__[key] = item + ret._corr = None # We'll want to make a new one of these if we need it. + return ret
+ +
[docs] def __repr__(self): + return 'GGGCorrelation(config=%r)'%self.config
+ +
[docs] @depr_pos_kwargs + def process_auto(self, cat, *, metric=None, num_threads=None): + """Process a single catalog, accumulating the auto-correlation. + + This accumulates the auto-correlation for the given catalog. After + calling this function as often as desired, the `finalize` command will + finish the calculation of meand1, meanlogd1, etc. + + Parameters: + cat (Catalog): The catalog to process + metric (str): Which metric to use. See `Metrics` for details. + (default: 'Euclidean'; this value can also be given in the + constructor in the config dict.) + num_threads (int): How many OpenMP threads to use during the calculation. + (default: use the number of cpu cores; this value can also be given + in the constructor in the config dict.) + """ + if cat.name == '': + self.logger.info('Starting process GGG auto-correlations') + else: + self.logger.info('Starting process GGG auto-correlations for cat %s.', cat.name) + + self._set_metric(metric, cat.coords) + self._set_num_threads(num_threads) + min_size, max_size = self._get_minmax_size() + + field = cat.getGField(min_size=min_size, max_size=max_size, + split_method=self.split_method, brute=bool(self.brute), + min_top=self.min_top, max_top=self.max_top, + coords=self.coords) + + self.logger.info('Starting %d jobs.',field.nTopLevelNodes) + _lib.ProcessAuto3(self.corr, field.data, self.output_dots, + field._d, self._coords, self._bintype, self._metric)
+ +
[docs] @depr_pos_kwargs + def process_cross12(self, cat1, cat2, *, metric=None, num_threads=None): + """Process two catalogs, accumulating the 3pt cross-correlation, where one of the + points in each triangle come from the first catalog, and two come from the second. + + This accumulates the cross-correlation for the given catalogs as part of a larger + auto-correlation calculation. E.g. when splitting up a large catalog into patches, + this is appropriate to use for the cross correlation between different patches + as part of the complete auto-correlation of the full catalog. + + Parameters: + cat1 (Catalog): The first catalog to process. (1 point in each triangle will come + from this catalog.) + cat2 (Catalog): The second catalog to process. (2 points in each triangle will come + from this catalog.) + metric (str): Which metric to use. See `Metrics` for details. + (default: 'Euclidean'; this value can also be given in the + constructor in the config dict.) + num_threads (int): How many OpenMP threads to use during the calculation. + (default: use the number of cpu cores; this value can also be given + in the constructor in the config dict.) + """ + if cat1.name == '' and cat2.name == '': + self.logger.info('Starting process GGG (1-2) cross-correlations') + else: + self.logger.info('Starting process GGG (1-2) cross-correlations for cats %s, %s.', + cat1.name, cat2.name) + + self._set_metric(metric, cat1.coords, cat2.coords) + self._set_num_threads(num_threads) + min_size, max_size = self._get_minmax_size() + + f1 = cat1.getGField(min_size=min_size, max_size=max_size, + split_method=self.split_method, + brute=self.brute is True or self.brute == 1, + min_top=self.min_top, max_top=self.max_top, + coords=self.coords) + f2 = cat2.getGField(min_size=min_size, max_size=max_size, + split_method=self.split_method, + brute=self.brute is True or self.brute == 2, + min_top=self.min_top, max_top=self.max_top, + coords=self.coords) + + self.logger.info('Starting %d jobs.',f1.nTopLevelNodes) + # Note: all 3 correlation objects are the same. Thus, all triangles will be placed + # into self.corr, whichever way the three catalogs are permuted for each triangle. + _lib.ProcessCross12(self.corr, self.corr, self.corr, + f1.data, f2.data, self.output_dots, + f1._d, f2._d, self._coords, + self._bintype, self._metric)
+ +
[docs] @depr_pos_kwargs + def process_cross(self, cat1, cat2, cat3, *, metric=None, num_threads=None): + """Process a set of three catalogs, accumulating the 3pt cross-correlation. + + This accumulates the cross-correlation for the given catalogs as part of a larger + auto-correlation calculation. E.g. when splitting up a large catalog into patches, + this is appropriate to use for the cross correlation between different patches + as part of the complete auto-correlation of the full catalog. + + Parameters: + cat1 (Catalog): The first catalog to process + cat2 (Catalog): The second catalog to process + cat3 (Catalog): The third catalog to process + metric (str): Which metric to use. See `Metrics` for details. + (default: 'Euclidean'; this value can also be given in the + constructor in the config dict.) + num_threads (int): How many OpenMP threads to use during the calculation. + (default: use the number of cpu cores; this value can also be given + in the constructor in the config dict.) + """ + if cat1.name == '' and cat2.name == '' and cat3.name == '': + self.logger.info('Starting process GGG cross-correlations') + else: + self.logger.info('Starting process GGG cross-correlations for cats %s, %s, %s.', + cat1.name, cat2.name, cat3.name) + + self._set_metric(metric, cat1.coords, cat2.coords, cat3.coords) + self._set_num_threads(num_threads) + min_size, max_size = self._get_minmax_size() + + f1 = cat1.getGField(min_size=min_size, max_size=max_size, + split_method=self.split_method, + brute=self.brute is True or self.brute == 1, + min_top=self.min_top, max_top=self.max_top, + coords=self.coords) + f2 = cat2.getGField(min_size=min_size, max_size=max_size, + split_method=self.split_method, + brute=self.brute is True or self.brute == 2, + min_top=self.min_top, max_top=self.max_top, + coords=self.coords) + f3 = cat3.getGField(min_size=min_size, max_size=max_size, + split_method=self.split_method, + brute=self.brute is True or self.brute == 3, + min_top=self.min_top, max_top=self.max_top, + coords=self.coords) + + self.logger.info('Starting %d jobs.',f1.nTopLevelNodes) + # Note: all 6 correlation objects are the same. Thus, all triangles will be placed + # into self.corr, whichever way the three catalogs are permuted for each triangle. + _lib.ProcessCross3(self.corr, self.corr, self.corr, + self.corr, self.corr, self.corr, + f1.data, f2.data, f3.data, self.output_dots, + f1._d, f2._d, f3._d, self._coords, self._bintype, self._metric)
+ + def _finalize(self): + mask1 = self.weight != 0 + mask2 = self.weight == 0 + + self.gam0r[mask1] /= self.weight[mask1] + self.gam0i[mask1] /= self.weight[mask1] + self.gam1r[mask1] /= self.weight[mask1] + self.gam1i[mask1] /= self.weight[mask1] + self.gam2r[mask1] /= self.weight[mask1] + self.gam2i[mask1] /= self.weight[mask1] + self.gam3r[mask1] /= self.weight[mask1] + self.gam3i[mask1] /= self.weight[mask1] + self.meand1[mask1] /= self.weight[mask1] + self.meanlogd1[mask1] /= self.weight[mask1] + self.meand2[mask1] /= self.weight[mask1] + self.meanlogd2[mask1] /= self.weight[mask1] + self.meand3[mask1] /= self.weight[mask1] + self.meanlogd3[mask1] /= self.weight[mask1] + self.meanu[mask1] /= self.weight[mask1] + self.meanv[mask1] /= self.weight[mask1] + + # Update the units + self._apply_units(mask1) + + # Use meanlogr when available, but set to nominal when no triangles in bin. + self.meand2[mask2] = self.rnom[mask2] + self.meanlogd2[mask2] = self.logr[mask2] + self.meanu[mask2] = self.u[mask2] + self.meanv[mask2] = self.v[mask2] + self.meand3[mask2] = self.u[mask2] * self.meand2[mask2] + self.meanlogd3[mask2] = np.log(self.meand3[mask2]) + self.meand1[mask2] = np.abs(self.v[mask2]) * self.meand3[mask2] + self.meand2[mask2] + self.meanlogd1[mask2] = np.log(self.meand1[mask2]) + +
[docs] def finalize(self, varg1, varg2, varg3): + """Finalize the calculation of the correlation function. + + The `process_auto` and `process_cross` commands accumulate values in each bin, + so they can be called multiple times if appropriate. Afterwards, this command + finishes the calculation by dividing by the total weight. + + Parameters: + varg1 (float): The shear variance for the first field. + varg2 (float): The shear variance for the second field. + varg3 (float): The shear variance for the third field. + """ + self._finalize() + mask1 = self.weight != 0 + mask2 = self.weight == 0 + self._var_num = 4 * varg1 * varg2 * varg3 + self.cov = self.estimate_cov(self.var_method) + # Note: diagonal should be very close to pure real. So ok to just copy real part. + diag = self.cov.diagonal() + # This should never trigger. If you find this assert to fail, please post an + # issue about it describing your use case that caused it to fail. + assert np.sum(diag.imag**2) <= 1.e-8 * np.sum(diag.real**2) + self.vargam0.ravel()[:] = self.cov.diagonal()[0:self._nbins].real + self.vargam1.ravel()[:] = self.cov.diagonal()[self._nbins:2*self._nbins].real + self.vargam2.ravel()[:] = self.cov.diagonal()[2*self._nbins:3*self._nbins].real + self.vargam3.ravel()[:] = self.cov.diagonal()[3*self._nbins:4*self._nbins].real
+ + def _clear(self): + """Clear the data vectors + """ + self.gam0r[:,:,:] = 0. + self.gam0i[:,:,:] = 0. + self.gam1r[:,:,:] = 0. + self.gam1i[:,:,:] = 0. + self.gam2r[:,:,:] = 0. + self.gam2i[:,:,:] = 0. + self.gam3r[:,:,:] = 0. + self.gam3i[:,:,:] = 0. + self.vargam0[:,:,:] = 0. + self.vargam1[:,:,:] = 0. + self.vargam2[:,:,:] = 0. + self.vargam3[:,:,:] = 0. + self.meand1[:,:,:] = 0. + self.meanlogd1[:,:,:] = 0. + self.meand2[:,:,:] = 0. + self.meanlogd2[:,:,:] = 0. + self.meand3[:,:,:] = 0. + self.meanlogd3[:,:,:] = 0. + self.meanu[:,:,:] = 0. + self.meanv[:,:,:] = 0. + self.weight[:,:,:] = 0. + self.ntri[:,:,:] = 0. + +
[docs] def __iadd__(self, other): + """Add a second `GGGCorrelation`'s data to this one. + + .. note:: + + For this to make sense, both `GGGCorrelation` objects should not have had `finalize` + called yet. Then, after adding them together, you should call `finalize` on the sum. + """ + if not isinstance(other, GGGCorrelation): + raise TypeError("Can only add another GGGCorrelation object") + if not (self.nbins == other.nbins and + self.min_sep == other.min_sep and + self.max_sep == other.max_sep and + self.nubins == other.nubins and + self.min_u == other.min_u and + self.max_u == other.max_u and + self.nvbins == other.nvbins and + self.min_v == other.min_v and + self.max_v == other.max_v): + raise ValueError("GGGCorrelation to be added is not compatible with this one.") + + if not other.nonzero: return self + self._set_metric(other.metric, other.coords, other.coords, other.coords) + self.gam0r[:] += other.gam0r[:] + self.gam0i[:] += other.gam0i[:] + self.gam1r[:] += other.gam1r[:] + self.gam1i[:] += other.gam1i[:] + self.gam2r[:] += other.gam2r[:] + self.gam2i[:] += other.gam2i[:] + self.gam3r[:] += other.gam3r[:] + self.gam3i[:] += other.gam3i[:] + self.meand1[:] += other.meand1[:] + self.meanlogd1[:] += other.meanlogd1[:] + self.meand2[:] += other.meand2[:] + self.meanlogd2[:] += other.meanlogd2[:] + self.meand3[:] += other.meand3[:] + self.meanlogd3[:] += other.meanlogd3[:] + self.meanu[:] += other.meanu[:] + self.meanv[:] += other.meanv[:] + self.weight[:] += other.weight[:] + self.ntri[:] += other.ntri[:] + return self
+ + def _sum(self, others): + # Equivalent to the operation of: + # self._clear() + # for other in others: + # self += other + # but no sanity checks and use numpy.sum for faster calculation. + np.sum([c.gam0r for c in others], axis=0, out=self.gam0r) + np.sum([c.gam0i for c in others], axis=0, out=self.gam0i) + np.sum([c.gam1r for c in others], axis=0, out=self.gam1r) + np.sum([c.gam1i for c in others], axis=0, out=self.gam1i) + np.sum([c.gam2r for c in others], axis=0, out=self.gam2r) + np.sum([c.gam2i for c in others], axis=0, out=self.gam2i) + np.sum([c.gam3r for c in others], axis=0, out=self.gam3r) + np.sum([c.gam3i for c in others], axis=0, out=self.gam3i) + np.sum([c.meand1 for c in others], axis=0, out=self.meand1) + np.sum([c.meanlogd1 for c in others], axis=0, out=self.meanlogd1) + np.sum([c.meand2 for c in others], axis=0, out=self.meand2) + np.sum([c.meanlogd2 for c in others], axis=0, out=self.meanlogd2) + np.sum([c.meand3 for c in others], axis=0, out=self.meand3) + np.sum([c.meanlogd3 for c in others], axis=0, out=self.meanlogd3) + np.sum([c.meanu for c in others], axis=0, out=self.meanu) + np.sum([c.meanv for c in others], axis=0, out=self.meanv) + np.sum([c.weight for c in others], axis=0, out=self.weight) + np.sum([c.ntri for c in others], axis=0, out=self.ntri) + +
[docs] @depr_pos_kwargs + def process(self, cat1, cat2=None, cat3=None, *, metric=None, num_threads=None, + comm=None, low_mem=False, initialize=True, finalize=True): + """Compute the 3pt correlation function. + + - If only 1 argument is given, then compute an auto-correlation function. + - If 2 arguments are given, then compute a cross-correlation function with the + first catalog taking one corner of the triangles, and the second taking two corners. + - If 3 arguments are given, then compute a three-way cross-correlation function. + + All arguments may be lists, in which case all items in the list are used + for that element of the correlation. + + .. note:: + + For a correlation of multiple catalogs, it typically matters which corner of the + triangle comes from which catalog, which is not kept track of by this function. + The final accumulation will have d1 > d2 > d3 regardless of which input catalog + appears at each corner. The class which keeps track of which catalog appears + in each position in the triangle is `GGGCrossCorrelation`. + + Parameters: + cat1 (Catalog): A catalog or list of catalogs for the first G field. + cat2 (Catalog): A catalog or list of catalogs for the second G field. + (default: None) + cat3 (Catalog): A catalog or list of catalogs for the third G field. + (default: None) + metric (str): Which metric to use. See `Metrics` for details. + (default: 'Euclidean'; this value can also be given in the + constructor in the config dict.) + num_threads (int): How many OpenMP threads to use during the calculation. + (default: use the number of cpu cores; this value can also be given + in the constructor in the config dict.) + comm (mpi4py.Comm): If running MPI, an mpi4py Comm object to communicate between + processes. If used, the rank=0 process will have the final + computation. This only works if using patches. (default: None) + low_mem (bool): Whether to sacrifice a little speed to try to reduce memory usage. + This only works if using patches. (default: False) + initialize (bool): Whether to begin the calculation with a call to + `BinnedCorr3.clear`. (default: True) + finalize (bool): Whether to complete the calculation with a call to `finalize`. + (default: True) + """ + import math + if initialize: + self.clear() + + if not isinstance(cat1,list): + cat1 = cat1.get_patches(low_mem=low_mem) + if cat2 is not None and not isinstance(cat2,list): + cat2 = cat2.get_patches(low_mem=low_mem) + if cat3 is not None and not isinstance(cat3,list): + cat3 = cat3.get_patches(low_mem=low_mem) + + if cat2 is None: + if cat3 is not None: + raise ValueError("For two catalog case, use cat1,cat2, not cat1,cat3") + self._process_all_auto(cat1, metric, num_threads, comm, low_mem) + elif cat3 is None: + self._process_all_cross12(cat1, cat2, metric, num_threads, comm, low_mem) + else: + self._process_all_cross(cat1, cat2, cat3, metric, num_threads, comm, low_mem) + + if finalize: + if cat2 is None: + varg1 = calculateVarG(cat1, low_mem=low_mem) + varg2 = varg1 + varg3 = varg1 + self.logger.info("varg = %f: sig_g = %f",varg1,math.sqrt(varg1)) + elif cat3 is None: + varg1 = calculateVarG(cat1, low_mem=low_mem) + varg2 = calculateVarG(cat2, low_mem=low_mem) + varg3 = varg2 + self.logger.info("varg1 = %f: sig_g = %f",varg1,math.sqrt(varg1)) + self.logger.info("varg2 = %f: sig_g = %f",varg2,math.sqrt(varg2)) + else: + varg1 = calculateVarG(cat1, low_mem=low_mem) + varg2 = calculateVarG(cat2, low_mem=low_mem) + varg3 = calculateVarG(cat3, low_mem=low_mem) + self.logger.info("varg1 = %f: sig_g = %f",varg1,math.sqrt(varg1)) + self.logger.info("varg2 = %f: sig_g = %f",varg2,math.sqrt(varg2)) + self.logger.info("varg3 = %f: sig_g = %f",varg3,math.sqrt(varg3)) + self.finalize(varg1,varg2,varg3)
+ +
[docs] def getStat(self): + """The standard statistic for the current correlation object as a 1-d array. + + In this case, the concatenation of gam0.ravel(), gam1.ravel(), gam2.ravel(), gam3.ravel(). + + .. note:: + + This is a complex array, unlike most other statistics. + The computed covariance matrix will be complex, although since it is Hermitian the + diagonal is real, so the resulting vargam0, etc. will all be real arrays. + """ + return np.concatenate([self.gam0.ravel(), self.gam1.ravel(), + self.gam2.ravel(), self.gam3.ravel()])
+ +
[docs] def getWeight(self): + """The weight array for the current correlation object as a 1-d array. + + In this case, 4 copies of self.weight.ravel(). + """ + return np.concatenate([self.weight.ravel()] * 4)
+ +
[docs] @depr_pos_kwargs + def write(self, file_name, *, file_type=None, precision=None, write_patch_results=False): + r"""Write the correlation function to the file, file_name. + + As described in the doc string for `GGGCorrelation`, we use the "natural components" of + the shear 3-point function described by Schneider & Lombardi (2003) using the triangle + centroid as the projection point. There are 4 complex-valued natural components, so there + are 8 columns in the output file. + + The output file will include the following columns: + + ========== ============================================================= + Column Description + ========== ============================================================= + r_nom The nominal center of the bin in r = d2 where d1 > d2 > d3 + u_nom The nominal center of the bin in u = d3/d2 + v_nom The nominal center of the bin in v = +-(d1-d2)/d3 + meand1 The mean value :math:`\langle d1\rangle` of triangles that + fell into each bin + meanlogd1 The mean value :math:`\langle \log(d1)\rangle` of triangles + that fell into each bin + meand2 The mean value :math:`\langle d2\rangle` of triangles that + fell into each bin + meanlogd2 The mean value :math:`\langle \log(d2)\rangle` of triangles + that fell into each bin + meand3 The mean value :math:`\langle d3\rangle` of triangles that + fell into each bin + meanlogd3 The mean value :math:`\langle \log(d3)\rangle` of triangles + that fell into each bin + meanu The mean value :math:`\langle u\rangle` of triangles that + fell into each bin + meanv The mean value :math:`\langle v\rangle` of triangles that + fell into each bi. + gam0r The real part of the estimator of :math:`\Gamma_0(r,u,v)` + gam0i The imag part of the estimator of :math:`\Gamma_0(r,u,v)` + gam1r The real part of the estimator of :math:`\Gamma_1(r,u,v)` + gam1i The imag part of the estimator of :math:`\Gamma_1(r,u,v)` + gam2r The real part of the estimator of :math:`\Gamma_2(r,u,v)` + gam2i The imag part of the estimator of :math:`\Gamma_2(r,u,v)` + gam3r The real part of the estimator of :math:`\Gamma_3(r,u,v)` + gam3i The imag part of the estimator of :math:`\Gamma_3(r,u,v)` + sigma_gam0 The sqrt of the variance estimate of :math:`\Gamma_0` + sigma_gam1 The sqrt of the variance estimate of :math:`\Gamma_1` + sigma_gam2 The sqrt of the variance estimate of :math:`\Gamma_2` + sigma_gam3 The sqrt of the variance estimate of :math:`\Gamma_3` + weight The total weight of triangles contributing to each bin. + ntri The number of triangles contributing to each bin. + ========== ============================================================= + + If ``sep_units`` was given at construction, then the distances will all be in these units. + Otherwise, they will be in either the same units as x,y,z (for flat or 3d coordinates) or + radians (for spherical coordinates). + + Parameters: + file_name (str): The name of the file to write to. + file_type (str): The type of file to write ('ASCII' or 'FITS'). (default: determine + the type automatically from the extension of file_name.) + precision (int): For ASCII output catalogs, the desired precision. (default: 4; + this value can also be given in the constructor in the config dict.) + write_patch_results (bool): Whether to write the patch-based results as well. + (default: False) + """ + self.logger.info('Writing GGG correlations to %s',file_name) + precision = self.config.get('precision', 4) if precision is None else precision + name = 'main' if write_patch_results else None + with make_writer(file_name, precision, file_type, self.logger) as writer: + self._write(writer, name, write_patch_results)
+ + @property + def _write_col_names(self): + return [ 'r_nom', 'u_nom', 'v_nom', 'meand1', 'meanlogd1', 'meand2', 'meanlogd2', + 'meand3', 'meanlogd3', 'meanu', 'meanv', + 'gam0r', 'gam0i', 'gam1r', 'gam1i', 'gam2r', 'gam2i', 'gam3r', 'gam3i', + 'sigma_gam0', 'sigma_gam1', 'sigma_gam2', 'sigma_gam3', 'weight', 'ntri' ] + + @property + def _write_data(self): + data = [ self.rnom, self.u, self.v, + self.meand1, self.meanlogd1, self.meand2, self.meanlogd2, + self.meand3, self.meanlogd3, self.meanu, self.meanv, + self.gam0r, self.gam0i, self.gam1r, self.gam1i, + self.gam2r, self.gam2i, self.gam3r, self.gam3i, + np.sqrt(self.vargam0), np.sqrt(self.vargam1), np.sqrt(self.vargam2), + np.sqrt(self.vargam3), self.weight, self.ntri ] + data = [ col.flatten() for col in data ] + return data + + @property + def _write_params(self): + return { 'coords' : self.coords, 'metric' : self.metric, + 'sep_units' : self.sep_units, 'bin_type' : self.bin_type } + +
[docs] @depr_pos_kwargs + def read(self, file_name, *, file_type=None): + """Read in values from a file. + + This should be a file that was written by TreeCorr, preferably a FITS file, so there + is no loss of information. + + .. warning:: + + The `GGGCorrelation` object should be constructed with the same configuration + parameters as the one being read. e.g. the same min_sep, max_sep, etc. This is not + checked by the read function. + + Parameters: + file_name (str): The name of the file to read in. + file_type (str): The type of file ('ASCII' or 'FITS'). (default: determine the type + automatically from the extension of file_name.) + """ + self.logger.info('Reading GGG correlations from %s',file_name) + with make_reader(file_name, file_type, self.logger) as reader: + self._read(reader)
+ + def _read_from_data(self, data, params): + s = self.logr.shape + if 'R_nom' in data.dtype.names: # pragma: no cover + self._ro.rnom = data['R_nom'].reshape(s) + else: + self._ro.rnom = data['r_nom'].reshape(s) + self._ro.u = data['u_nom'].reshape(s) + self._ro.v = data['v_nom'].reshape(s) + self.meand1 = data['meand1'].reshape(s) + self.meanlogd1 = data['meanlogd1'].reshape(s) + self.meand2 = data['meand2'].reshape(s) + self.meanlogd2 = data['meanlogd2'].reshape(s) + self.meand3 = data['meand3'].reshape(s) + self.meanlogd3 = data['meanlogd3'].reshape(s) + self.meanu = data['meanu'].reshape(s) + self.meanv = data['meanv'].reshape(s) + self.gam0r = data['gam0r'].reshape(s) + self.gam0i = data['gam0i'].reshape(s) + self.gam1r = data['gam1r'].reshape(s) + self.gam1i = data['gam1i'].reshape(s) + self.gam2r = data['gam2r'].reshape(s) + self.gam2i = data['gam2i'].reshape(s) + self.gam3r = data['gam3r'].reshape(s) + self.gam3i = data['gam3i'].reshape(s) + # Read old output files without error. + if 'sigma_gam' in data.dtype.names: # pragma: no cover + self.vargam0 = data['sigma_gam'].reshape(s)**2 + self.vargam1 = data['sigma_gam'].reshape(s)**2 + self.vargam2 = data['sigma_gam'].reshape(s)**2 + self.vargam3 = data['sigma_gam'].reshape(s)**2 + else: + self.vargam0 = data['sigma_gam0'].reshape(s)**2 + self.vargam1 = data['sigma_gam1'].reshape(s)**2 + self.vargam2 = data['sigma_gam2'].reshape(s)**2 + self.vargam3 = data['sigma_gam3'].reshape(s)**2 + self.weight = data['weight'].reshape(s) + self.ntri = data['ntri'].reshape(s) + self.coords = params['coords'].strip() + self.metric = params['metric'].strip() + self._ro.sep_units = params['sep_units'].strip() + self._ro.bin_type = params['bin_type'].strip() + self.npatch1 = params.get('npatch1', 1) + self.npatch2 = params.get('npatch2', 1) + self.npatch3 = params.get('npatch3', 1) + + @classmethod + def _calculateT(cls, s, t, k1, k2, k3): + # First calculate q values: + q1 = (s+t)/3. + q2 = q1-t + q3 = q1-s + + # |qi|^2 shows up a lot, so save these. + # The a stands for "absolute", and the ^2 part is implicit. + a1 = np.abs(q1)**2 + a2 = np.abs(q2)**2 + a3 = np.abs(q3)**2 + a123 = a1*a2*a3 + + # These combinations also appear multiple times. + # The b doesn't stand for anything. It's just the next letter after a. + b1 = np.conjugate(q1)**2*q2*q3 + b2 = np.conjugate(q2)**2*q1*q3 + b3 = np.conjugate(q3)**2*q1*q2 + + if k1==1 and k2==1 and k3==1: + + # Some factors we use multiple times + expfactor = -np.exp(-(a1 + a2 + a3)/2) + + # JBJ Equation 51 + # Note that we actually accumulate the Gammas with a different choice for + # alpha_i. We accumulate the shears relative to the q vectors, not relative to s. + # cf. JBJ Equation 41 and footnote 3. The upshot is that we multiply JBJ's formulae + # be (q1q2q3)^2 / |q1q2q3|^2 for T0 and (q1*q2q3)^2/|q1q2q3|^2 for T1. + # Then T0 becomes + # T0 = -(|q1 q2 q3|^2)/24 exp(-(|q1|^2+|q2|^2+|q3|^2)/2) + T0 = expfactor * a123 / 24 + + # JBJ Equation 52 + # After the phase adjustment, T1 becomes: + # T1 = -[(|q1 q2 q3|^2)/24 + # - (q1*^2 q2 q3)/9 + # + (q1*^4 q2^2 q3^2 + 2 |q2 q3|^2 q1*^2 q2 q3)/(|q1 q2 q3|^2)/27 + # ] exp(-(|q1|^2+|q2|^2+|q3|^2)/2) + T1 = expfactor * (a123 / 24 - b1 / 9 + (b1**2 + 2*a2*a3*b1) / (a123 * 27)) + T2 = expfactor * (a123 / 24 - b2 / 9 + (b2**2 + 2*a1*a3*b2) / (a123 * 27)) + T3 = expfactor * (a123 / 24 - b3 / 9 + (b3**2 + 2*a1*a2*b3) / (a123 * 27)) + + else: + # SKL Equation 63: + k1sq = k1*k1 + k2sq = k2*k2 + k3sq = k3*k3 + Theta2 = ((k1sq*k2sq + k1sq*k3sq + k2sq*k3sq)/3.)**0.5 + k1sq /= Theta2 # These are now what SKL calls theta_i^2 / Theta^2 + k2sq /= Theta2 + k3sq /= Theta2 + Theta4 = Theta2*Theta2 + Theta6 = Theta4*Theta2 + S = k1sq * k2sq * k3sq + + # SKL Equation 64: + Z = ((2*k2sq + 2*k3sq - k1sq) * a1 + + (2*k3sq + 2*k1sq - k2sq) * a2 + + (2*k1sq + 2*k2sq - k3sq) * a3) / (6*Theta2) + expfactor = -S * np.exp(-Z) / Theta4 + + # SKL Equation 65: + f1 = (k2sq+k3sq)/2 + (k2sq-k3sq)*(q2-q3)/(6*q1) + f2 = (k3sq+k1sq)/2 + (k3sq-k1sq)*(q3-q1)/(6*q2) + f3 = (k1sq+k2sq)/2 + (k1sq-k2sq)*(q1-q2)/(6*q3) + f1c = np.conjugate(f1) + f2c = np.conjugate(f2) + f3c = np.conjugate(f3) + + # SKL Equation 69: + g1 = k2sq*k3sq + (k3sq-k2sq)*k1sq*(q2-q3)/(3*q1) + g2 = k3sq*k1sq + (k1sq-k3sq)*k2sq*(q3-q1)/(3*q2) + g3 = k1sq*k2sq + (k2sq-k1sq)*k3sq*(q1-q2)/(3*q3) + g1c = np.conjugate(g1) + g2c = np.conjugate(g2) + g3c = np.conjugate(g3) + + # SKL Equation 62: + T0 = expfactor * a123 * f1c**2 * f2c**2 * f3c**2 / (24.*Theta6) + + # SKL Equation 68: + T1 = expfactor * ( + a123 * f1**2 * f2c**2 * f3c**2 / (24*Theta6) - + b1 * f1*f2c*f3c*g1c / (9*Theta4) + + (b1**2 * g1c**2 + 2*k2sq*k3sq*a2*a3*b1 * f2c * f3c) / (a123 * 27*Theta2)) + T2 = expfactor * ( + a123 * f1c**2 * f2**2 * f3c**2 / (24*Theta6) - + b2 * f1c*f2*f3c*g2c / (9*Theta4) + + (b2**2 * g2c**2 + 2*k1sq*k3sq*a1*a3*b2 * f1c * f3c) / (a123 * 27*Theta2)) + T3 = expfactor * ( + a123 * f1c**2 * f2c**2 * f3**2 / (24*Theta6) - + b3 * f1c*f2c*f3*g3c / (9*Theta4) + + (b3**2 * g3c**2 + 2*k1sq*k2sq*a1*a2*b3 * f1c * f2c) / (a123 * 27*Theta2)) + + return T0, T1, T2, T3 + +
[docs] @depr_pos_kwargs + def calculateMap3(self, *, R=None, k2=1, k3=1): + r"""Calculate the skewness of the aperture mass from the correlation function. + + The equations for this come from Jarvis, Bernstein & Jain (2004, MNRAS, 352). + See their section 3, especially equations 51 and 52 for the :math:`T_i` functions, + equations 60 and 61 for the calculation of :math:`\langle \cal M^3 \rangle` and + :math:`\langle \cal M^2 M^* \rangle`, and equations 55-58 for how to convert + these to the return values. + + If k2 or k3 != 1, then this routine calculates the generalization of the skewness + proposed by Schneider, Kilbinger & Lombardi (2005, A&A, 431): + :math:`\langle M_{ap}^3(R, k_2 R, k_3 R)\rangle` and related values. + + If k2 = k3 = 1 (the default), then there are only 4 combinations of Map and Mx + that are relevant: + + - map3 = :math:`\langle M_{ap}^3(R)\rangle` + - map2mx = :math:`\langle M_{ap}^2(R) M_\times(R)\rangle`, + - mapmx2 = :math:`\langle M_{ap}(R) M_\times(R)\rangle` + - mx3 = :math:`\langle M_{\rm \times}^3(R)\rangle` + + However, if k2 or k3 != 1, then there are 8 combinations: + + - map3 = :math:`\langle M_{ap}(R) M_{ap}(k_2 R) M_{ap}(k_3 R)\rangle` + - mapmapmx = :math:`\langle M_{ap}(R) M_{ap}(k_2 R) M_\times(k_3 R)\rangle` + - mapmxmap = :math:`\langle M_{ap}(R) M_\times(k_2 R) M_{ap}(k_3 R)\rangle` + - mxmapmap = :math:`\langle M_\times(R) M_{ap}(k_2 R) M_{ap}(k_3 R)\rangle` + - mxmxmap = :math:`\langle M_\times(R) M_\times(k_2 R) M_{ap}(k_3 R)\rangle` + - mxmapmx = :math:`\langle M_\times(R) M_{ap}(k_2 R) M_\times(k_3 R)\rangle` + - mapmxmx = :math:`\langle M_{ap}(R) M_\times(k_2 R) M_\times(k_3 R)\rangle` + - mx3 = :math:`\langle M_\times(R) M_\times(k_2 R) M_\times(k_3 R)\rangle` + + To accommodate this full generality, we always return all 8 values, along with the + estimated variance (which is equal for each), even when k2 = k3 = 1. + + .. note:: + + The formulae for the ``m2_uform`` = 'Schneider' definition of the aperture mass, + described in the documentation of `calculateMapSq`, are not known, so that is not an + option here. The calculations here use the definition that corresponds to + ``m2_uform`` = 'Crittenden'. + + Parameters: + R (array): The R values at which to calculate the aperture mass statistics. + (default: None, which means use self.rnom1d) + k2 (float): If given, the ratio R2/R1 in the SKL formulae. (default: 1) + k3 (float): If given, the ratio R3/R1 in the SKL formulae. (default: 1) + + Returns: + Tuple containing: + + - map3 = array of :math:`\langle M_{ap}(R) M_{ap}(k_2 R) M_{ap}(k_3 R)\rangle` + - mapmapmx = array of :math:`\langle M_{ap}(R) M_{ap}(k_2 R) M_\times(k_3 R)\rangle` + - mapmxmap = array of :math:`\langle M_{ap}(R) M_\times(k_2 R) M_{ap}(k_3 R)\rangle` + - mxmapmap = array of :math:`\langle M_\times(R) M_{ap}(k_2 R) M_{ap}(k_3 R)\rangle` + - mxmxmap = array of :math:`\langle M_\times(R) M_\times(k_2 R) M_{ap}(k_3 R)\rangle` + - mxmapmx = array of :math:`\langle M_\times(R) M_{ap}(k_2 R) M_\times(k_3 R)\rangle` + - mapmxmx = array of :math:`\langle M_{ap}(R) M_\times(k_2 R) M_\times(k_3 R)\rangle` + - mx3 = array of :math:`\langle M_\times(R) M_\times(k_2 R) M_\times(k_3 R)\rangle` + - varmap3 = array of variance estimates of the above values + """ + # As in the calculateMapSq function, we Make s and t matrices, so we can eventually do the + # integral by doing a matrix product. + if R is None: + R = self.rnom1d + + # Pick s = d2, so dlogs is bin_size + s = d2 = np.outer(1./R, self.meand2.ravel()) + + # We take t = d3, but we need the x and y components. (relative to s along x axis) + # cf. Figure 1 in JBJ. + # d1^2 = d2^2 + d3^2 - 2 d2 d3 cos(theta1) + # tx = d3 cos(theta1) = (d2^2 + d3^2 - d1^2)/2d2 + # Simplify this using u=d3/d2 and v=(d1-d2)/d3 + # = (d3^2 - (d1+d2)(d1-d2)) / 2d2 + # = d3 (d3 - (d1+d2)v) / 2d2 + # = d3 (u - (2+uv)v)/2 + # = d3 (u - 2v - uv^2)/2 + # = d3 (u(1-v^2)/2 - v) + # Note that v here is really |v|. We'll account for the sign of v in ty. + d3 = np.outer(1./R, self.meand3.ravel()) + d1 = np.outer(1./R, self.meand1.ravel()) + u = self.meanu.ravel() + v = self.meanv.ravel() + tx = d3*(0.5*u*(1-v**2) - np.abs(v)) + # This form tends to be more stable near potentially degenerate triangles + # than tx = (d2*d2 + d3*d3 - d1*d1) / (2*d2) + # However, add a check to make sure. + bad = (tx <= -d3) | (tx >= d3) + if np.any(bad): # pragma: no cover + self.logger.warning("Warning: Detected some invalid triangles when computing Map^3") + self.logger.warning("Excluding these triangles from the integral.") + self.logger.debug("N bad points = %s",np.sum(bad)) + self.logger.debug("d1[bad] = %s",d1[bad]) + self.logger.debug("d2[bad] = %s",d2[bad]) + self.logger.debug("d3[bad] = %s",d3[bad]) + self.logger.debug("tx[bad] = %s",tx[bad]) + bad = np.where(bad) + tx[bad] = 0 # for now to avoid nans + ty = np.sqrt(d3**2 - tx**2) + ty[:,self.meanv.ravel() > 0] *= -1. + t = tx + 1j * ty + + # Next we need to construct the T values. + T0, T1, T2, T3 = self._calculateT(s,t,1.,k2,k3) + + # Finally, account for the Jacobian in d^2t: jac = |J(tx, ty; u, v)|, + # since our Gammas are accumulated in s, u, v, not s, tx, ty. + # u = d3/d2, v = (d1-d2)/d3 + # tx = d3 (u - 2v - uv^2)/2 + # = s/2 (u^2 - 2uv - u^2v^2) + # dtx/du = s (u - v - uv^2) + # dtx/dv = -us (1 + uv) + # ty = sqrt(d3^2 - tx^2) = sqrt(u^2 s^2 - tx^2) + # dty/du = s^2 u/2ty (1-v^2) (2 + 3uv - u^2 + u^2v^2) + # dty/dv = s^2 u^2/2ty (1 + uv) (u - uv^2 - 2uv) + # + # After some algebra... + # + # J = s^3 u^2 (1+uv) / ty + # = d3^2 d1 / ty + # + jac = np.abs(d3*d3*d1/ty) + jac[bad] = 0. # Exclude any bad triangles from the integral. + d2t = jac * self.ubin_size * self.vbin_size / (2.*np.pi) + sds = s * s * self.bin_size # Remember bin_size is dln(s) + # Note: these are really d2t/2piR^2 and sds/R^2, which are what actually show up + # in JBJ equations 45 and 50. + + T0 *= sds * d2t + T1 *= sds * d2t + T2 *= sds * d2t + T3 *= sds * d2t + + # Now do the integral by taking the matrix products. + gam0 = self.gam0.ravel() + gam1 = self.gam1.ravel() + gam2 = self.gam2.ravel() + gam3 = self.gam3.ravel() + vargam0 = self.vargam0.ravel() + vargam1 = self.vargam1.ravel() + vargam2 = self.vargam2.ravel() + vargam3 = self.vargam3.ravel() + mmm = T0.dot(gam0) + mcmm = T1.dot(gam1) + mmcm = T2.dot(gam2) + mmmc = T3.dot(gam3) + + # These accumulate the coefficients that are being dotted to gam0,1,2,3 respectively. + # Below, we will take the abs^2 and dot it to gam0,1,2,3 in each case to compute the + # total variance. + # Note: This assumes that gam0, gam1, gam2, gam3 have no covariance. + # This is not technically true, but I think it's approximately ok. + var0 = T0.copy() + var1 = T1.copy() + var2 = T2.copy() + var3 = T3.copy() + + if k2 == 1 and k3 == 1: + mmm *= 6 + mcmm += mmcm + mcmm += mmmc + mcmm *= 2 + mmcm = mmmc = mcmm + var0 *= 6 + var1 *= 6 + var2 *= 6 + var3 *= 6 + else: + # Repeat the above for the other permutations + for (_k1, _k2, _k3, _mcmm, _mmcm, _mmmc) in [ + (1,k3,k2,mcmm,mmmc,mmcm), + (k2,1,k3,mmcm,mcmm,mmmc), + (k2,k3,1,mmcm,mmmc,mcmm), + (k3,1,k2,mmmc,mcmm,mmcm), + (k3,k2,1,mmmc,mmcm,mcmm) ]: + T0, T1, T2, T3 = self._calculateT(s,t,_k1,_k2,_k3) + T0 *= sds * d2t + T1 *= sds * d2t + T2 *= sds * d2t + T3 *= sds * d2t + # Relies on numpy array overloading += to actually update in place. + mmm += T0.dot(gam0) + _mcmm += T1.dot(gam1) + _mmcm += T2.dot(gam2) + _mmmc += T3.dot(gam3) + var0 += T0 + var1 += T1 + var2 += T2 + var3 += T3 + + map3 = 0.25 * np.real(mcmm + mmcm + mmmc + mmm) + mapmapmx = 0.25 * np.imag(mcmm + mmcm - mmmc + mmm) + mapmxmap = 0.25 * np.imag(mcmm - mmcm + mmmc + mmm) + mxmapmap = 0.25 * np.imag(-mcmm + mmcm + mmmc + mmm) + mxmxmap = 0.25 * np.real(mcmm + mmcm - mmmc - mmm) + mxmapmx = 0.25 * np.real(mcmm - mmcm + mmmc - mmm) + mapmxmx = 0.25 * np.real(-mcmm + mmcm + mmmc - mmm) + mx3 = 0.25 * np.imag(mcmm + mmcm + mmmc - mmm) + + var0 /= 4 + var1 /= 4 + var2 /= 4 + var3 /= 4 + + # Now finally add up the coefficient squared times each vargam element. + var = np.abs(var0**2).dot(vargam0) + var += np.abs(var1**2).dot(vargam1) + var += np.abs(var2**2).dot(vargam2) + var += np.abs(var3**2).dot(vargam3) + + return map3, mapmapmx, mapmxmap, mxmapmap, mxmxmap, mxmapmx, mapmxmx, mx3, var
+ +
[docs] @depr_pos_kwargs + def writeMap3(self, file_name, *, R=None, file_type=None, precision=None): + r"""Write the aperture mass skewness based on the correlation function to the + file, file_name. + + The output file will include the following columns: + + ========== ========================================================== + Column Description + ========== ========================================================== + R The aperture radius + Map3 An estimate of :math:`\langle M_{ap}^3\rangle(R)` + (cf. `calculateMap3`) + Map2Mx An estimate of :math:`\langle M_{ap}^2 M_\times\rangle(R)` + MapMx2 An estimate of :math:`\langle M_{ap} M_\times^2\rangle(R)` + Mx3 An estimate of :math:`\langle M_\times^3\rangle(R)` + sig_map The sqrt of the variance estimate of each of these + ========== ========================================================== + + Parameters: + file_name (str): The name of the file to write to. + R (array): The R values at which to calculate the statistics. + (default: None, which means use self.rnom) + file_type (str): The type of file to write ('ASCII' or 'FITS'). (default: determine + the type automatically from the extension of file_name.) + precision (int): For ASCII output catalogs, the desired precision. (default: 4; + this value can also be given in the constructor in the config dict.) + """ + self.logger.info('Writing Map^3 from GGG correlations to %s',file_name) + + if R is None: + R = self.rnom1d + stats = self.calculateMap3(R=R) + if precision is None: + precision = self.config.get('precision', 4) + + col_names = ['R','Map3','Map2Mx', 'MapMx2', 'Mx3','sig_map'] + columns = [ R, stats[0], stats[1], stats[4], stats[7], np.sqrt(stats[8]) ] + with make_writer(file_name, precision, file_type, logger=self.logger) as writer: + writer.write(col_names, columns)
+ + +
[docs]class GGGCrossCorrelation(BinnedCorr3): + r"""This class handles the calculation a 3-point shear-shear-shear cross-correlation + function. + + For 3-point cross correlations, it matters which of the two or three fields falls on + each corner of the triangle. E.g. is field 1 on the corner opposite d1 (the longest + size of the triangle) or is it field 2 (or 3) there? This is in contrast to the 2-point + correlation where the symmetry of the situation means that it doesn't matter which point + is identified with each field. This makes it significantly more complicated to keep track + of all the relevant information for a 3-point cross correlation function. + + The `GGGCorrelation` class holds a single set of :math:`\Gamma` functions describing all + possible triangles, parameterized according to their relative side lengths ordered as + d1 > d2 > d3. + + For a cross-correlation of two fields: G1 - G1 - G2 (i.e. the G1 field is at two of the + corners and G2 is at one corner), then we need three sets of these :math:`\Gamma` functions + to capture all of the triangles, since the G2 points may be opposite d1 or d2 or d3. + For a cross-correlation of three fields: G1 - G2 - G3, we need six sets, to account for + all of the possible permutations relative to the triangle sides. + + Therefore, this class holds 6 instances of `GGGCorrelation`, which in turn hold the + information about triangles in each of the relevant configurations. We name these: + + Attributes: + g1g2g3: Triangles where G1 is opposite d1, G2 is opposite d2, G3 is opposite d3. + g1g3g2: Triangles where G1 is opposite d1, G3 is opposite d2, G2 is opposite d3. + g2g1g3: Triangles where G2 is opposite d1, G1 is opposite d2, G3 is opposite d3. + g2g3g1: Triangles where G2 is opposite d1, G3 is opposite d2, G1 is opposite d3. + g3g1g2: Triangles where G3 is opposite d1, G1 is opposite d2, G2 is opposite d3. + g3g2g1: Triangles where G3 is opposite d1, G2 is opposite d2, G1 is opposite d3. + + If for instance G2 and G3 are the same field, then e.g. g1g2g3 and g1g3g2 will have + the same values. + + Ojects of this class also hold the following attributes, which are identical in each of + the above GGGCorrelation instances. + + Attributes: + nbins: The number of bins in logr where r = d2 + bin_size: The size of the bins in logr + min_sep: The minimum separation being considered + max_sep: The maximum separation being considered + nubins: The number of bins in u where u = d3/d2 + ubin_size: The size of the bins in u + min_u: The minimum u being considered + max_u: The maximum u being considered + nvbins: The number of bins in v where v = +-(d1-d2)/d3 + vbin_size: The size of the bins in v + min_v: The minimum v being considered + max_v: The maximum v being considered + logr1d: The nominal centers of the nbins bins in log(r). + u1d: The nominal centers of the nubins bins in u. + v1d: The nominal centers of the nvbins bins in v. + + If ``sep_units`` are given (either in the config dict or as a named kwarg) then the distances + will all be in these units. + + .. note:: + + If you separate out the steps of the `process` command and use `process_cross` directly, + then the units will not be applied to ``meanr`` or ``meanlogr`` until the `finalize` + function is called. + + Parameters: + config (dict): A configuration dict that can be used to pass in kwargs if desired. + This dict is allowed to have addition entries besides those listed + in `BinnedCorr3`, which are ignored here. (default: None) + logger: If desired, a logger object for logging. (default: None, in which case + one will be built according to the config dict's verbose level.) + + Keyword Arguments: + **kwargs: See the documentation for `BinnedCorr3` for the list of allowed keyword + arguments, which may be passed either directly or in the config dict. + """ +
[docs] @depr_pos_kwargs + def __init__(self, config=None, *, logger=None, **kwargs): + """Initialize `GGGCrossCorrelation`. See class doc for details. + """ + BinnedCorr3.__init__(self, config, logger=logger, **kwargs) + + self._ro._d1 = 3 # GData + self._ro._d2 = 3 # GData + self._ro._d3 = 3 # GData + + self.g1g2g3 = GGGCorrelation(config, logger=logger, **kwargs) + self.g1g3g2 = GGGCorrelation(config, logger=logger, **kwargs) + self.g2g1g3 = GGGCorrelation(config, logger=logger, **kwargs) + self.g2g3g1 = GGGCorrelation(config, logger=logger, **kwargs) + self.g3g1g2 = GGGCorrelation(config, logger=logger, **kwargs) + self.g3g2g1 = GGGCorrelation(config, logger=logger, **kwargs) + self._all = [self.g1g2g3, self.g1g3g2, self.g2g1g3, self.g2g3g1, self.g3g1g2, self.g3g2g1] + + self.logger.debug('Finished building GGGCrossCorr')
+ +
[docs] def __eq__(self, other): + """Return whether two `GGGCrossCorrelation` instances are equal""" + return (isinstance(other, GGGCrossCorrelation) and + self.nbins == other.nbins and + self.bin_size == other.bin_size and + self.min_sep == other.min_sep and + self.max_sep == other.max_sep and + self.sep_units == other.sep_units and + self.min_u == other.min_u and + self.max_u == other.max_u and + self.nubins == other.nubins and + self.ubin_size == other.ubin_size and + self.min_v == other.min_v and + self.max_v == other.max_v and + self.nvbins == other.nvbins and + self.vbin_size == other.vbin_size and + self.coords == other.coords and + self.bin_type == other.bin_type and + self.bin_slop == other.bin_slop and + self.xperiod == other.xperiod and + self.yperiod == other.yperiod and + self.zperiod == other.zperiod and + self.g1g2g3 == other.g1g2g3 and + self.g1g3g2 == other.g1g3g2 and + self.g2g1g3 == other.g2g1g3 and + self.g2g3g1 == other.g2g3g1 and + self.g3g1g2 == other.g3g1g2 and + self.g3g2g1 == other.g3g2g1)
+ +
[docs] def copy(self): + """Make a copy""" + ret = GGGCrossCorrelation.__new__(GGGCrossCorrelation) + for key, item in self.__dict__.items(): + if isinstance(item, GGGCorrelation): + ret.__dict__[key] = item.copy() + else: + ret.__dict__[key] = item + # This needs to be the new list: + ret._all = [ret.g1g2g3, ret.g1g3g2, ret.g2g1g3, ret.g2g3g1, ret.g3g1g2, ret.g3g2g1] + return ret
+ +
[docs] def __repr__(self): + return 'GGGCrossCorrelation(config=%r)'%self.config
+ +
[docs] @depr_pos_kwargs + def process_cross12(self, cat1, cat2, *, metric=None, num_threads=None): + """Process two catalogs, accumulating the 3pt cross-correlation, where one of the + points in each triangle come from the first catalog, and two come from the second. + + This accumulates the cross-correlation for the given catalogs. After + calling this function as often as desired, the `finalize` command will + finish the calculation of meand1, meanlogd1, etc. + + .. note:: + + This only adds to the attributes g1g2g3, g2g1g3, g2g3g1, not the ones where + 3 comes before 2. When running this via the regular `process` method, it will + combine them at the end to make sure g1g2g3 == g1g3g2, etc. for a complete + calculation of the 1-2 cross-correlation. + + Parameters: + cat1 (Catalog): The first catalog to process. (1 point in each triangle will come + from this catalog.) + cat2 (Catalog): The second catalog to process. (2 points in each triangle will come + from this catalog.) + metric (str): Which metric to use. See `Metrics` for details. + (default: 'Euclidean'; this value can also be given in the + constructor in the config dict.) + num_threads (int): How many OpenMP threads to use during the calculation. + (default: use the number of cpu cores; this value can also be given + in the constructor in the config dict.) + """ + if cat1.name == '' and cat2.name == '': + self.logger.info('Starting process GGG (1-2) cross-correlations') + else: + self.logger.info('Starting process GGG (1-2) cross-correlations for cats %s, %s.', + cat1.name, cat2.name) + + self._set_metric(metric, cat1.coords, cat2.coords) + for ggg in self._all: + ggg._set_metric(self.metric, self.coords) + self._set_num_threads(num_threads) + min_size, max_size = self._get_minmax_size() + + f1 = cat1.getGField(min_size=min_size, max_size=max_size, + split_method=self.split_method, + brute=self.brute is True or self.brute == 1, + min_top=self.min_top, max_top=self.max_top, + coords=self.coords) + f2 = cat2.getGField(min_size=min_size, max_size=max_size, + split_method=self.split_method, + brute=self.brute is True or self.brute == 2, + min_top=self.min_top, max_top=self.max_top, + coords=self.coords) + + self.logger.info('Starting %d jobs.',f1.nTopLevelNodes) + # Note: all 3 correlation objects are the same. Thus, all triangles will be placed + # into self.corr, whichever way the three catalogs are permuted for each triangle. + _lib.ProcessCross12(self.g1g2g3.corr, self.g2g1g3.corr, self.g2g3g1.corr, + f1.data, f2.data, self.output_dots, + f1._d, f2._d, self._coords, + self._bintype, self._metric)
+ +
[docs] @depr_pos_kwargs + def process_cross(self, cat1, cat2, cat3, *, metric=None, num_threads=None): + """Process a set of three catalogs, accumulating the 3pt cross-correlation. + + This accumulates the cross-correlation for the given catalogs. After + calling this function as often as desired, the `finalize` command will + finish the calculation of meand1, meanlogd1, etc. + + Parameters: + cat1 (Catalog): The first catalog to process + cat2 (Catalog): The second catalog to process + cat3 (Catalog): The third catalog to process + metric (str): Which metric to use. See `Metrics` for details. + (default: 'Euclidean'; this value can also be given in the + constructor in the config dict.) + num_threads (int): How many OpenMP threads to use during the calculation. + (default: use the number of cpu cores; this value can also be given + in the constructor in the config dict.) + """ + if cat1.name == '' and cat2.name == '' and cat3.name == '': + self.logger.info('Starting process GGG cross-correlations') + else: + self.logger.info('Starting process GGG cross-correlations for cats %s, %s, %s.', + cat1.name, cat2.name, cat3.name) + + self._set_metric(metric, cat1.coords, cat2.coords, cat3.coords) + for ggg in self._all: + ggg._set_metric(self.metric, self.coords) + self._set_num_threads(num_threads) + min_size, max_size = self._get_minmax_size() + + f1 = cat1.getGField(min_size=min_size, max_size=max_size, + split_method=self.split_method, + brute=self.brute is True or self.brute == 1, + min_top=self.min_top, max_top=self.max_top, + coords=self.coords) + f2 = cat2.getGField(min_size=min_size, max_size=max_size, + split_method=self.split_method, + brute=self.brute is True or self.brute == 2, + min_top=self.min_top, max_top=self.max_top, + coords=self.coords) + f3 = cat3.getGField(min_size=min_size, max_size=max_size, + split_method=self.split_method, + brute=self.brute is True or self.brute == 3, + min_top=self.min_top, max_top=self.max_top, + coords=self.coords) + + self.logger.info('Starting %d jobs.',f1.nTopLevelNodes) + _lib.ProcessCross3(self.g1g2g3.corr, self.g1g3g2.corr, + self.g2g1g3.corr, self.g2g3g1.corr, + self.g3g1g2.corr, self.g3g2g1.corr, + f1.data, f2.data, f3.data, self.output_dots, + f1._d, f2._d, f3._d, self._coords, self._bintype, self._metric)
+ + def _finalize(self): + for ggg in self._all: + ggg._finalize() + +
[docs] def finalize(self, varg1, varg2, varg3): + """Finalize the calculation of the correlation function. + + The `process_cross` command accumulate values in each bin, so they can be called + multiple times if appropriate. Afterwards, this command finishes the calculation + by dividing by the total weight. + + Parameters: + varg1 (float): The shear variance for the first field that was correlated. + varg2 (float): The shear variance for the second field that was correlated. + varg3 (float): The shear variance for the third field that was correlated. + """ + self.g1g2g3.finalize(varg1,varg2,varg3) + self.g1g3g2.finalize(varg1,varg3,varg2) + self.g2g1g3.finalize(varg2,varg1,varg3) + self.g2g3g1.finalize(varg2,varg3,varg1) + self.g3g1g2.finalize(varg3,varg1,varg2) + self.g3g2g1.finalize(varg3,varg2,varg1)
+ + @property + def nonzero(self): + """Return if there are any values accumulated yet. (i.e. ntri > 0) + """ + return any([ggg.nonzero for ggg in self._all]) + + def _clear(self): + """Clear the data vectors + """ + for ggg in self._all: + ggg._clear() + +
[docs] def __iadd__(self, other): + """Add a second `GGGCrossCorrelation`'s data to this one. + + .. note:: + + For this to make sense, both `GGGCrossCorrelation` objects should not have had + `finalize` called yet. Then, after adding them together, you should call `finalize` + on the sum. + """ + if not isinstance(other, GGGCrossCorrelation): + raise TypeError("Can only add another GGGCrossCorrelation object") + self.g1g2g3 += other.g1g2g3 + self.g1g3g2 += other.g1g3g2 + self.g2g1g3 += other.g2g1g3 + self.g2g3g1 += other.g2g3g1 + self.g3g1g2 += other.g3g1g2 + self.g3g2g1 += other.g3g2g1 + return self
+ + def _sum(self, others): + # Equivalent to the operation of: + # self._clear() + # for other in others: + # self += other + # but no sanity checks and use numpy.sum for faster calculation. + for i, ggg in enumerate(self._all): + ggg._sum([c._all[i] for c in others]) + +
[docs] @depr_pos_kwargs + def process(self, cat1, cat2, cat3=None, *, metric=None, num_threads=None, + comm=None, low_mem=False, initialize=True, finalize=True): + """Accumulate the cross-correlation of the points in the given Catalogs: cat1, cat2, cat3. + + - If 2 arguments are given, then compute a cross-correlation function with the + first catalog taking one corner of the triangles, and the second taking two corners. + - If 3 arguments are given, then compute a three-way cross-correlation function. + + All arguments may be lists, in which case all items in the list are used + for that element of the correlation. + + Parameters: + cat1 (Catalog): A catalog or list of catalogs for the first G field. + cat2 (Catalog): A catalog or list of catalogs for the second G field. + cat3 (Catalog): A catalog or list of catalogs for the third G field. + (default: None) + metric (str): Which metric to use. See `Metrics` for details. + (default: 'Euclidean'; this value can also be given in the + constructor in the config dict.) + num_threads (int): How many OpenMP threads to use during the calculation. + (default: use the number of cpu cores; this value can also be given + in the constructor in the config dict.) + comm (mpi4py.Comm): If running MPI, an mpi4py Comm object to communicate between + processes. If used, the rank=0 process will have the final + computation. This only works if using patches. (default: None) + low_mem (bool): Whether to sacrifice a little speed to try to reduce memory usage. + This only works if using patches. (default: False) + initialize (bool): Whether to begin the calculation with a call to + `BinnedCorr3.clear`. (default: True) + finalize (bool): Whether to complete the calculation with a call to `finalize`. + (default: True) + """ + import math + if initialize: + self.clear() + self._process12 = False + + if not isinstance(cat1,list): cat1 = cat1.get_patches() + if not isinstance(cat2,list): cat2 = cat2.get_patches() + if cat3 is not None and not isinstance(cat3,list): cat3 = cat3.get_patches() + + if cat3 is None: + self._process12 = True + self._process_all_cross12(cat1, cat2, metric, num_threads, comm, low_mem) + else: + self._process_all_cross(cat1, cat2, cat3, metric, num_threads, comm, low_mem) + + if finalize: + if self._process12: + # Then some of the processing involved a cross12 calculation. + # This means that spots 2 and 3 should not be distinguished. + # Combine the relevant arrays. + self.g1g2g3 += self.g1g3g2 + self.g2g1g3 += self.g3g1g2 + self.g2g3g1 += self.g3g2g1 + # Copy back by doing clear and +=. + # This makes sure the coords and metric are set properly. + self.g1g3g2.clear() + self.g3g1g2.clear() + self.g3g2g1.clear() + self.g1g3g2 += self.g1g2g3 + self.g3g1g2 += self.g2g1g3 + self.g3g2g1 += self.g2g3g1 + + varg1 = calculateVarG(cat1, low_mem=low_mem) + varg2 = calculateVarG(cat2, low_mem=low_mem) + self.logger.info("varg1 = %f: sig_g = %f",varg1,math.sqrt(varg1)) + self.logger.info("varg2 = %f: sig_g = %f",varg2,math.sqrt(varg2)) + if cat3 is None: + varg3 = varg2 + else: + varg3 = calculateVarG(cat3, low_mem=low_mem) + self.logger.info("varg3 = %f: sig_g = %f",varg3,math.sqrt(varg3)) + self.finalize(varg1,varg2,varg3)
+ +
[docs] def getStat(self): + """The standard statistic for the current correlation object as a 1-d array. + + In this case, the concatenation of zeta.ravel() for each combination in the following + order: g1g2g3, g1g3g2, g2g1g3, g2g3g1, g3g1g2, g3g2g1. + """ + return np.concatenate([ggg.getStat() for ggg in self._all])
+ +
[docs] def getWeight(self): + """The weight array for the current correlation object as a 1-d array. + + In this case, the concatenation of getWeight() for each combination in the following + order: g1g2g3, g1g3g2, g2g1g3, g2g3g1, g3g1g2, g3g2g1. + """ + return np.concatenate([ggg.getWeight() for ggg in self._all])
+ +
[docs] @depr_pos_kwargs + def write(self, file_name, *, file_type=None, precision=None, write_patch_results=False): + r"""Write the cross-correlation functions to the file, file_name. + + Parameters: + file_name (str): The name of the file to write to. + file_type (str): The type of file to write ('ASCII' or 'FITS'). (default: determine + the type automatically from the extension of file_name.) + precision (int): For ASCII output catalogs, the desired precision. (default: 4; + this value can also be given in the constructor in the config dict.) + write_patch_results (bool): Whether to write the patch-based results as well. + (default: False) + """ + self.logger.info('Writing GGG cross-correlations to %s',file_name) + precision = self.config.get('precision', 4) if precision is None else precision + name = 'main' if write_patch_results else None + with make_writer(file_name, precision, file_type, self.logger) as writer: + names = [ 'g1g2g3', 'g1g3g2', 'g2g1g3', 'g2g3g1', 'g3g1g2', 'g3g2g1' ] + for name, corr in zip(names, self._all): + corr._write(writer, name, write_patch_results)
+ +
[docs] @depr_pos_kwargs + def read(self, file_name, *, file_type=None): + """Read in values from a file. + + This should be a file that was written by TreeCorr, preferably a FITS file, so there + is no loss of information. + + .. warning:: + + The `GGGCrossCorrelation` object should be constructed with the same configuration + parameters as the one being read. e.g. the same min_sep, max_sep, etc. This is not + checked by the read function. + + Parameters: + file_name (str): The name of the file to read in. + file_type (str): The type of file ('ASCII' or 'FITS'). (default: determine the type + automatically from the extension of file_name.) + """ + self.logger.info('Reading GGG cross-correlations from %s',file_name) + with make_reader(file_name, file_type, self.logger) as reader: + names = [ 'g1g2g3', 'g1g3g2', 'g2g1g3', 'g2g3g1', 'g3g1g2', 'g3g2g1' ] + for name, corr in zip(names, self._all): + corr._read(reader, name)
+
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/treecorr/kgcorrelation.html b/docs/_build/html/_modules/treecorr/kgcorrelation.html new file mode 100644 index 00000000..7ad89bcf --- /dev/null +++ b/docs/_build/html/_modules/treecorr/kgcorrelation.html @@ -0,0 +1,603 @@ + + + + + + treecorr.kgcorrelation — TreeCorr 4.3.0 documentation + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for treecorr.kgcorrelation

+# Copyright (c) 2003-2019 by Mike Jarvis
+#
+# TreeCorr is free software: redistribution and use in source and binary forms,
+# with or without modification, are permitted provided that the following
+# conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice, this
+#    list of conditions, and the disclaimer given in the accompanying LICENSE
+#    file.
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+#    this list of conditions, and the disclaimer given in the documentation
+#    and/or other materials provided with the distribution.
+
+"""
+.. module:: kgcorrelation
+"""
+
+import numpy as np
+
+from . import _lib, _ffi
+from .catalog import calculateVarG, calculateVarK
+from .binnedcorr2 import BinnedCorr2
+from .util import double_ptr as dp
+from .util import make_writer, make_reader
+from .util import depr_pos_kwargs
+
+
+
[docs]class KGCorrelation(BinnedCorr2): + r"""This class handles the calculation and storage of a 2-point kappa-shear correlation + function. + + .. note:: + + While we use the term kappa (:math:`\kappa`) here and the letter K in various places, + in fact any scalar field will work here. For example, you can use this to compute + correlations of some survey property, such as seeing, with shear, where "kappa" would + really be the measured property, e.g. the observed sizes of the stars. + + Ojects of this class holds the following attributes: + + Attributes: + nbins: The number of bins in logr + bin_size: The size of the bins in logr + min_sep: The minimum separation being considered + max_sep: The maximum separation being considered + + In addition, the following attributes are numpy arrays of length (nbins): + + Attributes: + logr: The nominal center of the bin in log(r) (the natural logarithm of r). + rnom: The nominal center of the bin converted to regular distance. + i.e. r = exp(logr). + meanr: The (weighted) mean value of r for the pairs in each bin. + If there are no pairs in a bin, then exp(logr) will be used instead. + meanlogr: The (weighted) mean value of log(r) for the pairs in each bin. + If there are no pairs in a bin, then logr will be used instead. + xi: The correlation function, :math:`\xi(r) = \langle \kappa\, \gamma_T\rangle`. + xi_im: The imaginary part of :math:`\xi(r)`. + varxi: An estimate of the variance of :math:`\xi` + weight: The total weight in each bin. + npairs: The number of pairs going into each bin (including pairs where one or + both objects have w=0). + cov: An estimate of the full covariance matrix. + + .. note:: + + The default method for estimating the variance and covariance attributes (``varxi``, + and ``cov``) is 'shot', which only includes the shape noise propagated into the final + correlation. This does not include sample variance, so it is always an underestimate of + the actual variance. To get better estimates, you need to set ``var_method`` to something + else and use patches in the input catalog(s). cf. `Covariance Estimates`. + + If ``sep_units`` are given (either in the config dict or as a named kwarg) then the distances + will all be in these units. + + .. note:: + + If you separate out the steps of the `process` command and use `process_cross`, + then the units will not be applied to ``meanr`` or ``meanlogr`` until the `finalize` + function is called. + + The typical usage pattern is as follows: + + >>> kg = treecorr.KGCorrelation(config) + >>> kg.process(cat1,cat2) # Calculate the cross-correlation + >>> kg.write(file_name) # Write out to a file. + >>> xi = kg.xi # Or access the correlation function directly. + + Parameters: + config (dict): A configuration dict that can be used to pass in kwargs if desired. + This dict is allowed to have addition entries besides those listed + in `BinnedCorr2`, which are ignored here. (default: None) + logger: If desired, a logger object for logging. (default: None, in which case + one will be built according to the config dict's verbose level.) + + Keyword Arguments: + **kwargs: See the documentation for `BinnedCorr2` for the list of allowed keyword + arguments, which may be passed either directly or in the config dict. + """ +
[docs] @depr_pos_kwargs + def __init__(self, config=None, *, logger=None, **kwargs): + """Initialize `KGCorrelation`. See class doc for details. + """ + BinnedCorr2.__init__(self, config, logger=logger, **kwargs) + + self._ro._d1 = 2 # KData + self._ro._d2 = 3 # GData + self.xi = np.zeros_like(self.rnom, dtype=float) + self.xi_im = np.zeros_like(self.rnom, dtype=float) + self.varxi = np.zeros_like(self.rnom, dtype=float) + self.meanr = np.zeros_like(self.rnom, dtype=float) + self.meanlogr = np.zeros_like(self.rnom, dtype=float) + self.weight = np.zeros_like(self.rnom, dtype=float) + self.npairs = np.zeros_like(self.rnom, dtype=float) + self.logger.debug('Finished building KGCorr')
+ + @property + def corr(self): + if self._corr is None: + self._corr = _lib.BuildCorr2( + self._d1, self._d2, self._bintype, + self._min_sep,self._max_sep,self._nbins,self._bin_size,self.b, + self.min_rpar, self.max_rpar, self.xperiod, self.yperiod, self.zperiod, + dp(self.xi),dp(self.xi_im), dp(None), dp(None), + dp(self.meanr),dp(self.meanlogr),dp(self.weight),dp(self.npairs)) + return self._corr + + def __del__(self): + # Using memory allocated from the C layer means we have to explicitly deallocate it + # rather than being able to rely on the Python memory manager. + if self._corr is not None: + if not _ffi._lock.locked(): # pragma: no branch + _lib.DestroyCorr2(self.corr, self._d1, self._d2, self._bintype) + +
[docs] def __eq__(self, other): + """Return whether two `KGCorrelation` instances are equal""" + return (isinstance(other, KGCorrelation) and + self.nbins == other.nbins and + self.bin_size == other.bin_size and + self.min_sep == other.min_sep and + self.max_sep == other.max_sep and + self.sep_units == other.sep_units and + self.coords == other.coords and + self.bin_type == other.bin_type and + self.bin_slop == other.bin_slop and + self.min_rpar == other.min_rpar and + self.max_rpar == other.max_rpar and + self.xperiod == other.xperiod and + self.yperiod == other.yperiod and + self.zperiod == other.zperiod and + np.array_equal(self.meanr, other.meanr) and + np.array_equal(self.meanlogr, other.meanlogr) and + np.array_equal(self.xi, other.xi) and + np.array_equal(self.xi_im, other.xi_im) and + np.array_equal(self.varxi, other.varxi) and + np.array_equal(self.weight, other.weight) and + np.array_equal(self.npairs, other.npairs))
+ +
[docs] def copy(self): + """Make a copy""" + ret = KGCorrelation.__new__(KGCorrelation) + for key, item in self.__dict__.items(): + if isinstance(item, np.ndarray): + # Only items that might change need to by deep copied. + ret.__dict__[key] = item.copy() + else: + # For everything else, shallow copy is fine. + # In particular don't deep copy config or logger + # Most of the rest are scalars, which copy fine this way. + # And the read-only things are all in _ro. + # The results dict is trickier. We rely on it being copied in places, but we + # never add more to it after the copy, so shallow copy is fine. + ret.__dict__[key] = item + ret._corr = None # We'll want to make a new one of these if we need it. + return ret
+ +
[docs] def __repr__(self): + return 'KGCorrelation(config=%r)'%self.config
+ +
[docs] @depr_pos_kwargs + def process_cross(self, cat1, cat2, *, metric=None, num_threads=None): + """Process a single pair of catalogs, accumulating the cross-correlation. + + This accumulates the weighted sums into the bins, but does not finalize + the calculation by dividing by the total weight at the end. After + calling this function as often as desired, the `finalize` command will + finish the calculation. + + Parameters: + cat1 (Catalog): The first catalog to process + cat2 (Catalog): The second catalog to process + metric (str): Which metric to use. See `Metrics` for details. + (default: 'Euclidean'; this value can also be given in the + constructor in the config dict.) + num_threads (int): How many OpenMP threads to use during the calculation. + (default: use the number of cpu cores; this value can also be given + in the constructor in the config dict.) + """ + if cat1.name == '' and cat2.name == '': + self.logger.info('Starting process KG cross-correlations') + else: + self.logger.info('Starting process KG cross-correlations for cats %s, %s.', + cat1.name, cat2.name) + + self._set_metric(metric, cat1.coords, cat2.coords) + self._set_num_threads(num_threads) + min_size, max_size = self._get_minmax_size() + + f1 = cat1.getKField(min_size=min_size, max_size=max_size, + split_method=self.split_method, + brute=self.brute is True or self.brute == 1, + min_top=self.min_top, max_top=self.max_top, + coords=self.coords) + f2 = cat2.getGField(min_size=min_size, max_size=max_size, + split_method=self.split_method, + brute=self.brute is True or self.brute == 2, + min_top=self.min_top, max_top=self.max_top, + coords=self.coords) + + self.logger.info('Starting %d jobs.',f1.nTopLevelNodes) + _lib.ProcessCross2(self.corr, f1.data, f2.data, self.output_dots, + f1._d, f2._d, self._coords, self._bintype, self._metric)
+ +
[docs] @depr_pos_kwargs + def process_pairwise(self, cat1, cat2, *, metric=None, num_threads=None): + """Process a single pair of catalogs, accumulating the cross-correlation, only using + the corresponding pairs of objects in each catalog. + + This accumulates the weighted sums into the bins, but does not finalize + the calculation by dividing by the total weight at the end. After + calling this function as often as desired, the `finalize` command will + finish the calculation. + + .. warning:: + + .. deprecated:: 4.1 + + This function is deprecated and slated to be removed. + If you have a need for it, please open an issue to describe your use case. + + Parameters: + cat1 (Catalog): The first catalog to process + cat2 (Catalog): The second catalog to process + metric (str): Which metric to use. See `Metrics` for details. + (default: 'Euclidean'; this value can also be given in the + constructor in the config dict.) + num_threads (int): How many OpenMP threads to use during the calculation. + (default: use the number of cpu cores; this value can also be given + in the constructor in the config dict.) + """ + import warnings + warnings.warn("The process_pairwise function is slated to be removed in a future version. "+ + "If you are actually using this function usefully, please "+ + "open an issue to describe your use case.", FutureWarning) + if cat1.name == '' and cat2.name == '': + self.logger.info('Starting process KG pairwise-correlations') + else: + self.logger.info('Starting process KG pairwise-correlations for cats %s, %s.', + cat1.name, cat2.name) + + self._set_metric(metric, cat1.coords, cat2.coords) + self._set_num_threads(num_threads) + + f1 = cat1.getKSimpleField() + f2 = cat2.getGSimpleField() + + _lib.ProcessPair(self.corr, f1.data, f2.data, self.output_dots, + f1._d, f2._d, self._coords, self._bintype, self._metric)
+ + def _finalize(self): + mask1 = self.weight != 0 + mask2 = self.weight == 0 + + self.xi[mask1] /= self.weight[mask1] + self.xi_im[mask1] /= self.weight[mask1] + self.meanr[mask1] /= self.weight[mask1] + self.meanlogr[mask1] /= self.weight[mask1] + + # Update the units of meanr, meanlogr + self._apply_units(mask1) + + # Use meanr, meanlogr when available, but set to nominal when no pairs in bin. + self.meanr[mask2] = self.rnom[mask2] + self.meanlogr[mask2] = self.logr[mask2] + +
[docs] def finalize(self, vark, varg): + """Finalize the calculation of the correlation function. + + The `process_cross` command accumulates values in each bin, so it can be called + multiple times if appropriate. Afterwards, this command finishes the calculation + by dividing each column by the total weight. + + Parameters: + vark (float): The kappa variance for the first field. + varg (float): The shear variance per component for the second field. + """ + self._finalize() + self._var_num = vark * varg + self.cov = self.estimate_cov(self.var_method) + self.varxi.ravel()[:] = self.cov.diagonal()
+ + def _clear(self): + """Clear the data vectors + """ + self.xi.ravel()[:] = 0 + self.xi_im.ravel()[:] = 0 + self.meanr.ravel()[:] = 0 + self.meanlogr.ravel()[:] = 0 + self.weight.ravel()[:] = 0 + self.npairs.ravel()[:] = 0 + +
[docs] def __iadd__(self, other): + """Add a second `KGCorrelation`'s data to this one. + + .. note:: + + For this to make sense, both `KGCorrelation` objects should not have had `finalize` + called yet. Then, after adding them together, you should call `finalize` on the sum. + """ + if not isinstance(other, KGCorrelation): + raise TypeError("Can only add another KGCorrelation object") + if not (self._nbins == other._nbins and + self.min_sep == other.min_sep and + self.max_sep == other.max_sep): + raise ValueError("KGCorrelation to be added is not compatible with this one.") + + self._set_metric(other.metric, other.coords, other.coords) + self.xi.ravel()[:] += other.xi.ravel()[:] + self.xi_im.ravel()[:] += other.xi_im.ravel()[:] + self.meanr.ravel()[:] += other.meanr.ravel()[:] + self.meanlogr.ravel()[:] += other.meanlogr.ravel()[:] + self.weight.ravel()[:] += other.weight.ravel()[:] + self.npairs.ravel()[:] += other.npairs.ravel()[:] + return self
+ + def _sum(self, others): + # Equivalent to the operation of: + # self._clear() + # for other in others: + # self += other + # but no sanity checks and use numpy.sum for faster calculation. + np.sum([c.xi for c in others], axis=0, out=self.xi) + np.sum([c.xi_im for c in others], axis=0, out=self.xi_im) + np.sum([c.meanr for c in others], axis=0, out=self.meanr) + np.sum([c.meanlogr for c in others], axis=0, out=self.meanlogr) + np.sum([c.weight for c in others], axis=0, out=self.weight) + np.sum([c.npairs for c in others], axis=0, out=self.npairs) + +
[docs] @depr_pos_kwargs + def process(self, cat1, cat2, *, metric=None, num_threads=None, comm=None, low_mem=False, + initialize=True, finalize=True): + """Compute the correlation function. + + Both arguments may be lists, in which case all items in the list are used + for that element of the correlation. + + Parameters: + cat1 (Catalog): A catalog or list of catalogs for the K field. + cat2 (Catalog): A catalog or list of catalogs for the G field. + metric (str): Which metric to use. See `Metrics` for details. + (default: 'Euclidean'; this value can also be given in the + constructor in the config dict.) + num_threads (int): How many OpenMP threads to use during the calculation. + (default: use the number of cpu cores; this value can also be given + in the constructor in the config dict.) + comm (mpi4py.Comm): If running MPI, an mpi4py Comm object to communicate between + processes. If used, the rank=0 process will have the final + computation. This only works if using patches. (default: None) + low_mem (bool): Whether to sacrifice a little speed to try to reduce memory usage. + This only works if using patches. (default: False) + initialize (bool): Whether to begin the calculation with a call to + `BinnedCorr2.clear`. (default: True) + finalize (bool): Whether to complete the calculation with a call to `finalize`. + (default: True) + """ + import math + if initialize: + self.clear() + + if not isinstance(cat1,list): + cat1 = cat1.get_patches(low_mem=low_mem) + if not isinstance(cat2,list): + cat2 = cat2.get_patches(low_mem=low_mem) + + self._process_all_cross(cat1, cat2, metric, num_threads, comm, low_mem) + + if finalize: + vark = calculateVarK(cat1, low_mem=low_mem) + varg = calculateVarG(cat2, low_mem=low_mem) + self.logger.info("vark = %f: sig_k = %f",vark,math.sqrt(vark)) + self.logger.info("varg = %f: sig_sn (per component) = %f",varg,math.sqrt(varg)) + self.finalize(vark,varg)
+ +
[docs] @depr_pos_kwargs + def write(self, file_name, *, file_type=None, precision=None, write_patch_results=False): + r"""Write the correlation function to the file, file_name. + + The output file will include the following columns: + + ========== ======================================================== + Column Description + ========== ======================================================== + r_nom The nominal center of the bin in r + meanr The mean value :math:`\langle r\rangle` of pairs that + fell into each bin + meanlogr The mean value :math:`\langle \log(r)\rangle` of pairs + that fell into each bin + kgamT The real part of correlation function, + :math:`\langle \kappa\, \gamma_T\rangle` + kgamX The imag part of correlation function, + :math:`\langle \kappa\, \gamma_\times\rangle` + sigma The sqrt of the variance estimate of both of these + weight The total weight contributing to each bin + npairs The total number of pairs in each bin + ========== ======================================================== + + If ``sep_units`` was given at construction, then the distances will all be in these units. + Otherwise, they will be in either the same units as x,y,z (for flat or 3d coordinates) or + radians (for spherical coordinates). + + Parameters: + file_name (str): The name of the file to write to. + file_type (str): The type of file to write ('ASCII' or 'FITS'). (default: determine + the type automatically from the extension of file_name.) + precision (int): For ASCII output catalogs, the desired precision. (default: 4; + this value can also be given in the constructor in the config dict.) + write_patch_results (bool): Whether to write the patch-based results as well. + (default: False) + """ + self.logger.info('Writing KG correlations to %s',file_name) + precision = self.config.get('precision', 4) if precision is None else precision + name = 'main' if write_patch_results else None + with make_writer(file_name, precision, file_type, self.logger) as writer: + self._write(writer, name, write_patch_results)
+ + @property + def _write_col_names(self): + return ['r_nom','meanr','meanlogr','kgamT','kgamX','sigma','weight','npairs'] + + @property + def _write_data(self): + data = [ self.rnom, self.meanr, self.meanlogr, + self.xi, self.xi_im, np.sqrt(self.varxi), + self.weight, self.npairs ] + data = [ col.flatten() for col in data ] + return data + + @property + def _write_params(self): + return { 'coords' : self.coords, 'metric' : self.metric, + 'sep_units' : self.sep_units, 'bin_type' : self.bin_type } + +
[docs] @depr_pos_kwargs + def read(self, file_name, *, file_type=None): + """Read in values from a file. + + This should be a file that was written by TreeCorr, preferably a FITS file, so there + is no loss of information. + + .. warning:: + + The `KGCorrelation` object should be constructed with the same configuration + parameters as the one being read. e.g. the same min_sep, max_sep, etc. This is not + checked by the read function. + + Parameters: + file_name (str): The name of the file to read in. + file_type (str): The type of file ('ASCII' or 'FITS'). (default: determine the type + automatically from the extension of file_name.) + """ + self.logger.info('Reading KG correlations from %s',file_name) + with make_reader(file_name, file_type, self.logger) as reader: + self._read(reader)
+ + def _read_from_data(self, data, params): + s = self.logr.shape + if 'R_nom' in data.dtype.names: # pragma: no cover + self._ro.rnom = data['R_nom'].reshape(s) + self.meanr = data['meanR'].reshape(s) + self.meanlogr = data['meanlogR'].reshape(s) + else: + self._ro.rnom = data['r_nom'].reshape(s) + self.meanr = data['meanr'].reshape(s) + self.meanlogr = data['meanlogr'].reshape(s) + self.xi = data['kgamT'].reshape(s) + self.xi_im = data['kgamX'].reshape(s) + self.varxi = data['sigma'].reshape(s)**2 + self.weight = data['weight'].reshape(s) + self.npairs = data['npairs'].reshape(s) + self.coords = params['coords'].strip() + self.metric = params['metric'].strip() + self._ro.sep_units = params['sep_units'].strip() + self._ro.bin_type = params['bin_type'].strip() + self.npatch1 = params.get('npatch1', 1) + self.npatch2 = params.get('npatch2', 1)
+
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/treecorr/kkcorrelation.html b/docs/_build/html/_modules/treecorr/kkcorrelation.html new file mode 100644 index 00000000..707792d7 --- /dev/null +++ b/docs/_build/html/_modules/treecorr/kkcorrelation.html @@ -0,0 +1,640 @@ + + + + + + treecorr.kkcorrelation — TreeCorr 4.3.0 documentation + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for treecorr.kkcorrelation

+# Copyright (c) 2003-2019 by Mike Jarvis
+#
+# TreeCorr is free software: redistribution and use in source and binary forms,
+# with or without modification, are permitted provided that the following
+# conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice, this
+#    list of conditions, and the disclaimer given in the accompanying LICENSE
+#    file.
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+#    this list of conditions, and the disclaimer given in the documentation
+#    and/or other materials provided with the distribution.
+
+"""
+.. module:: kkcorrelation
+"""
+
+import numpy as np
+
+from . import _lib, _ffi
+from .catalog import calculateVarK
+from .binnedcorr2 import BinnedCorr2
+from .util import double_ptr as dp
+from .util import make_writer, make_reader
+from .util import depr_pos_kwargs
+
+
+
[docs]class KKCorrelation(BinnedCorr2): + r"""This class handles the calculation and storage of a 2-point kappa-kappa correlation + function. + + .. note:: + + While we use the term kappa (:math:`\kappa`) here and the letter K in various places, + in fact any scalar field will work here. For example, you can use this to compute + correlations of the CMB temperature fluctuations, where "kappa" would really be + :math:`\Delta T`. + + Ojects of this class holds the following attributes: + + Attributes: + nbins: The number of bins in logr + bin_size: The size of the bins in logr + min_sep: The minimum separation being considered + max_sep: The maximum separation being considered + + In addition, the following attributes are numpy arrays of length (nbins): + + Attributes: + logr: The nominal center of the bin in log(r) (the natural logarithm of r). + rnom: The nominal center of the bin converted to regular distance. + i.e. r = exp(logr). + meanr: The (weighted) mean value of r for the pairs in each bin. + If there are no pairs in a bin, then exp(logr) will be used instead. + meanlogr: The (weighted) mean value of log(r) for the pairs in each bin. + If there are no pairs in a bin, then logr will be used instead. + xi: The correlation function, :math:`\xi(r)` + varxi: An estimate of the variance of :math:`\xi` + weight: The total weight in each bin. + npairs: The number of pairs going into each bin (including pairs where one or + both objects have w=0). + cov: An estimate of the full covariance matrix. + + .. note:: + + The default method for estimating the variance and covariance attributes (``varxi``, + and ``cov``) is 'shot', which only includes the shot noise propagated into the final + correlation. This does not include sample variance, so it is always an underestimate of + the actual variance. To get better estimates, you need to set ``var_method`` to something + else and use patches in the input catalog(s). cf. `Covariance Estimates`. + + If ``sep_units`` are given (either in the config dict or as a named kwarg) then the distances + will all be in these units. + + .. note:: + + If you separate out the steps of the `process` command and use `process_auto` and/or + `process_cross`, then the units will not be applied to ``meanr`` or ``meanlogr`` until + the `finalize` function is called. + + The typical usage pattern is as follows: + + >>> kk = treecorr.KKCorrelation(config) + >>> kk.process(cat) # For auto-correlation. + >>> kk.process(cat1,cat2) # For cross-correlation. + >>> kk.write(file_name) # Write out to a file. + >>> xi = kk.xi # Or access the correlation function directly. + + Parameters: + config (dict): A configuration dict that can be used to pass in kwargs if desired. + This dict is allowed to have addition entries besides those listed + in `BinnedCorr2`, which are ignored here. (default: None) + logger: If desired, a logger object for logging. (default: None, in which case + one will be built according to the config dict's verbose level.) + + Keyword Arguments: + **kwargs: See the documentation for `BinnedCorr2` for the list of allowed keyword + arguments, which may be passed either directly or in the config dict. + """ +
[docs] @depr_pos_kwargs + def __init__(self, config=None, *, logger=None, **kwargs): + """Initialize `KKCorrelation`. See class doc for details. + """ + BinnedCorr2.__init__(self, config, logger=logger, **kwargs) + + self._ro._d1 = 2 # KData + self._ro._d2 = 2 # KData + self.xi = np.zeros_like(self.rnom, dtype=float) + self.varxi = np.zeros_like(self.rnom, dtype=float) + self.meanr = np.zeros_like(self.rnom, dtype=float) + self.meanlogr = np.zeros_like(self.rnom, dtype=float) + self.weight = np.zeros_like(self.rnom, dtype=float) + self.npairs = np.zeros_like(self.rnom, dtype=float) + self.logger.debug('Finished building KKCorr')
+ + @property + def corr(self): + if self._corr is None: + self._corr = _lib.BuildCorr2( + self._d1, self._d2, self._bintype, + self._min_sep,self._max_sep,self._nbins,self._bin_size,self.b, + self.min_rpar, self.max_rpar, self.xperiod, self.yperiod, self.zperiod, + dp(self.xi), dp(None), dp(None), dp(None), + dp(self.meanr),dp(self.meanlogr),dp(self.weight),dp(self.npairs)) + return self._corr + + def __del__(self): + # Using memory allocated from the C layer means we have to explicitly deallocate it + # rather than being able to rely on the Python memory manager. + if self._corr is not None: + if not _ffi._lock.locked(): # pragma: no branch + _lib.DestroyCorr2(self.corr, self._d1, self._d2, self._bintype) + +
[docs] def __eq__(self, other): + """Return whether two `KKCorrelation` instances are equal""" + return (isinstance(other, KKCorrelation) and + self.nbins == other.nbins and + self.bin_size == other.bin_size and + self.min_sep == other.min_sep and + self.max_sep == other.max_sep and + self.sep_units == other.sep_units and + self.coords == other.coords and + self.bin_type == other.bin_type and + self.bin_slop == other.bin_slop and + self.min_rpar == other.min_rpar and + self.max_rpar == other.max_rpar and + self.xperiod == other.xperiod and + self.yperiod == other.yperiod and + self.zperiod == other.zperiod and + np.array_equal(self.meanr, other.meanr) and + np.array_equal(self.meanlogr, other.meanlogr) and + np.array_equal(self.xi, other.xi) and + np.array_equal(self.varxi, other.varxi) and + np.array_equal(self.weight, other.weight) and + np.array_equal(self.npairs, other.npairs))
+ +
[docs] def copy(self): + """Make a copy""" + ret = KKCorrelation.__new__(KKCorrelation) + for key, item in self.__dict__.items(): + if isinstance(item, np.ndarray): + # Only items that might change need to by deep copied. + ret.__dict__[key] = item.copy() + else: + # For everything else, shallow copy is fine. + # In particular don't deep copy config or logger + # Most of the rest are scalars, which copy fine this way. + # And the read-only things are all in _ro. + # The results dict is trickier. We rely on it being copied in places, but we + # never add more to it after the copy, so shallow copy is fine. + ret.__dict__[key] = item + ret._corr = None # We'll want to make a new one of these if we need it. + return ret
+ +
[docs] def __repr__(self): + return 'KKCorrelation(config=%r)'%self.config
+ +
[docs] @depr_pos_kwargs + def process_auto(self, cat, *, metric=None, num_threads=None): + """Process a single catalog, accumulating the auto-correlation. + + This accumulates the weighted sums into the bins, but does not finalize + the calculation by dividing by the total weight at the end. After + calling this function as often as desired, the `finalize` command will + finish the calculation. + + Parameters: + cat (Catalog): The catalog to process + metric (str): Which metric to use. See `Metrics` for details. + (default: 'Euclidean'; this value can also be given in the + constructor in the config dict.) + num_threads (int): How many OpenMP threads to use during the calculation. + (default: use the number of cpu cores; this value can also be given + in the constructor in the config dict.) + """ + if cat.name == '': + self.logger.info('Starting process KK auto-correlations') + else: + self.logger.info('Starting process KK auto-correlations for cat %s.', cat.name) + + self._set_metric(metric, cat.coords) + self._set_num_threads(num_threads) + min_size, max_size = self._get_minmax_size() + + field = cat.getKField(min_size=min_size, max_size=max_size, + split_method=self.split_method, brute=bool(self.brute), + min_top=self.min_top, max_top=self.max_top, + coords=self.coords) + + self.logger.info('Starting %d jobs.',field.nTopLevelNodes) + _lib.ProcessAuto2(self.corr, field.data, self.output_dots, + field._d, self._coords, self._bintype, self._metric)
+ +
[docs] @depr_pos_kwargs + def process_cross(self, cat1, cat2, *, metric=None, num_threads=None): + """Process a single pair of catalogs, accumulating the cross-correlation. + + This accumulates the weighted sums into the bins, but does not finalize + the calculation by dividing by the total weight at the end. After + calling this function as often as desired, the `finalize` command will + finish the calculation. + + Parameters: + cat1 (Catalog): The first catalog to process + cat2 (Catalog): The second catalog to process + metric (str): Which metric to use. See `Metrics` for details. + (default: 'Euclidean'; this value can also be given in the + constructor in the config dict.) + num_threads (int): How many OpenMP threads to use during the calculation. + (default: use the number of cpu cores; this value can also be given + in the constructor in the config dict.) + """ + if cat1.name == '' and cat2.name == '': + self.logger.info('Starting process KK cross-correlations') + else: + self.logger.info('Starting process KK cross-correlations for cats %s, %s.', + cat1.name, cat2.name) + + self._set_metric(metric, cat1.coords, cat2.coords) + self._set_num_threads(num_threads) + min_size, max_size = self._get_minmax_size() + + f1 = cat1.getKField(min_size=min_size, max_size=max_size, + split_method=self.split_method, + brute=self.brute is True or self.brute == 1, + min_top=self.min_top, max_top=self.max_top, + coords=self.coords) + f2 = cat2.getKField(min_size=min_size, max_size=max_size, + split_method=self.split_method, + brute=self.brute is True or self.brute == 1, + min_top=self.min_top, max_top=self.max_top, + coords=self.coords) + + self.logger.info('Starting %d jobs.',f1.nTopLevelNodes) + _lib.ProcessCross2(self.corr, f1.data, f2.data, self.output_dots, + f1._d, f2._d, self._coords, self._bintype, self._metric)
+ +
[docs] @depr_pos_kwargs + def process_pairwise(self, cat1, cat2, *, metric=None, num_threads=None): + """Process a single pair of catalogs, accumulating the cross-correlation, only using + the corresponding pairs of objects in each catalog. + + This accumulates the weighted sums into the bins, but does not finalize + the calculation by dividing by the total weight at the end. After + calling this function as often as desired, the `finalize` command will + finish the calculation. + + .. warning:: + + .. deprecated:: 4.1 + + This function is deprecated and slated to be removed. + If you have a need for it, please open an issue to describe your use case. + + Parameters: + cat1 (Catalog): The first catalog to process + cat2 (Catalog): The second catalog to process + metric (str): Which metric to use. See `Metrics` for details. + (default: 'Euclidean'; this value can also be given in the + constructor in the config dict.) + num_threads (int): How many OpenMP threads to use during the calculation. + (default: use the number of cpu cores; this value can also be given + in the constructor in the config dict.) + """ + import warnings + warnings.warn("The process_pairwise function is slated to be removed in a future version. "+ + "If you are actually using this function usefully, please "+ + "open an issue to describe your use case.", FutureWarning) + if cat1.name == '' and cat2.name == '': + self.logger.info('Starting process KK pairwise-correlations') + else: + self.logger.info('Starting process KK pairwise-correlations for cats %s, %s.', + cat1.name, cat2.name) + + self._set_metric(metric, cat1.coords, cat2.coords) + self._set_num_threads(num_threads) + + f1 = cat1.getKSimpleField() + f2 = cat2.getKSimpleField() + + _lib.ProcessPair(self.corr, f1.data, f2.data, self.output_dots, + f1._d, f2._d, self._coords, self._bintype, self._metric)
+ + def _finalize(self): + mask1 = self.weight != 0 + mask2 = self.weight == 0 + + self.xi[mask1] /= self.weight[mask1] + self.meanr[mask1] /= self.weight[mask1] + self.meanlogr[mask1] /= self.weight[mask1] + + # Update the units of meanlogr + self._apply_units(mask1) + + # Use meanlogr when available, but set to nominal when no pairs in bin. + self.meanr[mask2] = self.rnom[mask2] + self.meanlogr[mask2] = self.logr[mask2] + +
[docs] def finalize(self, vark1, vark2): + """Finalize the calculation of the correlation function. + + The `process_auto` and `process_cross` commands accumulate values in each bin, + so they can be called multiple times if appropriate. Afterwards, this command + finishes the calculation by dividing each column by the total weight. + + Parameters: + vark1 (float): The kappa variance for the first field. + vark2 (float): The kappa variance for the second field. + """ + self._finalize() + self._var_num = vark1 * vark2 + self.cov = self.estimate_cov(self.var_method) + self.varxi.ravel()[:] = self.cov.diagonal()
+ + def _clear(self): + """Clear the data vectors + """ + self.xi.ravel()[:] = 0 + self.meanr.ravel()[:] = 0 + self.meanlogr.ravel()[:] = 0 + self.weight.ravel()[:] = 0 + self.npairs.ravel()[:] = 0 + +
[docs] def __iadd__(self, other): + """Add a second `KKCorrelation`'s data to this one. + + .. note:: + + For this to make sense, both `KKCorrelation` objects should not have had `finalize` + called yet. Then, after adding them together, you should call `finalize` on the sum. + """ + if not isinstance(other, KKCorrelation): + raise TypeError("Can only add another KKCorrelation object") + if not (self._nbins == other._nbins and + self.min_sep == other.min_sep and + self.max_sep == other.max_sep): + raise ValueError("KKCorrelation to be added is not compatible with this one.") + + self._set_metric(other.metric, other.coords, other.coords) + self.xi.ravel()[:] += other.xi.ravel()[:] + self.meanr.ravel()[:] += other.meanr.ravel()[:] + self.meanlogr.ravel()[:] += other.meanlogr.ravel()[:] + self.weight.ravel()[:] += other.weight.ravel()[:] + self.npairs.ravel()[:] += other.npairs.ravel()[:] + return self
+ + def _sum(self, others): + # Equivalent to the operation of: + # self._clear() + # for other in others: + # self += other + # but no sanity checks and use numpy.sum for faster calculation. + np.sum([c.xi for c in others], axis=0, out=self.xi) + np.sum([c.meanr for c in others], axis=0, out=self.meanr) + np.sum([c.meanlogr for c in others], axis=0, out=self.meanlogr) + np.sum([c.weight for c in others], axis=0, out=self.weight) + np.sum([c.npairs for c in others], axis=0, out=self.npairs) + +
[docs] @depr_pos_kwargs + def process(self, cat1, cat2=None, *, metric=None, num_threads=None, comm=None, low_mem=False, + initialize=True, finalize=True): + """Compute the correlation function. + + - If only 1 argument is given, then compute an auto-correlation function. + - If 2 arguments are given, then compute a cross-correlation function. + + Both arguments may be lists, in which case all items in the list are used + for that element of the correlation. + + Parameters: + cat1 (Catalog): A catalog or list of catalogs for the first K field. + cat2 (Catalog): A catalog or list of catalogs for the second K field, if any. + (default: None) + metric (str): Which metric to use. See `Metrics` for details. + (default: 'Euclidean'; this value can also be given in the + constructor in the config dict.) + num_threads (int): How many OpenMP threads to use during the calculation. + (default: use the number of cpu cores; this value can also be given + in the constructor in the config dict.) + comm (mpi4py.Comm): If running MPI, an mpi4py Comm object to communicate between + processes. If used, the rank=0 process will have the final + computation. This only works if using patches. (default: None) + low_mem (bool): Whether to sacrifice a little speed to try to reduce memory usage. + This only works if using patches. (default: False) + initialize (bool): Whether to begin the calculation with a call to + `BinnedCorr2.clear`. (default: True) + finalize (bool): Whether to complete the calculation with a call to `finalize`. + (default: True) + """ + import math + if initialize: + self.clear() + + if not isinstance(cat1,list): + cat1 = cat1.get_patches(low_mem=low_mem) + if cat2 is not None and not isinstance(cat2,list): + cat2 = cat2.get_patches(low_mem=low_mem) + + if cat2 is None: + self._process_all_auto(cat1, metric, num_threads, comm, low_mem) + else: + self._process_all_cross(cat1, cat2, metric, num_threads, comm, low_mem) + + if finalize: + if cat2 is None: + vark1 = calculateVarK(cat1, low_mem=low_mem) + vark2 = vark1 + self.logger.info("vark = %f: sig_k = %f",vark1,math.sqrt(vark1)) + else: + vark1 = calculateVarK(cat1, low_mem=low_mem) + vark2 = calculateVarK(cat2, low_mem=low_mem) + self.logger.info("vark1 = %f: sig_k = %f",vark1,math.sqrt(vark1)) + self.logger.info("vark2 = %f: sig_k = %f",vark2,math.sqrt(vark2)) + self.finalize(vark1,vark2)
+ +
[docs] @depr_pos_kwargs + def write(self, file_name, *, file_type=None, precision=None, write_patch_results=False): + r"""Write the correlation function to the file, file_name. + + The output file will include the following columns: + + ========== ======================================================== + Column Description + ========== ======================================================== + r_nom The nominal center of the bin in r + meanr The mean value :math:`\langle r \rangle` of pairs that + fell into each bin + meanlogr The mean value :math:`\langle \log(r) \rangle` of pairs + that fell into each bin + xi The estimate of the correlation function xi(r) + sigma_xi The sqrt of the variance estimate of xi(r) + weight The total weight contributing to each bin + npairs The total number of pairs in each bin + ========== ======================================================== + + If ``sep_units`` was given at construction, then the distances will all be in these units. + Otherwise, they will be in either the same units as x,y,z (for flat or 3d coordinates) or + radians (for spherical coordinates). + + Parameters: + file_name (str): The name of the file to write to. + file_type (str): The type of file to write ('ASCII' or 'FITS'). (default: determine + the type automatically from the extension of file_name.) + precision (int): For ASCII output catalogs, the desired precision. (default: 4; + this value can also be given in the constructor in the config dict.) + write_patch_results (bool): Whether to write the patch-based results as well. + (default: False) + """ + self.logger.info('Writing KK correlations to %s',file_name) + precision = self.config.get('precision', 4) if precision is None else precision + name = 'main' if write_patch_results else None + with make_writer(file_name, precision, file_type, self.logger) as writer: + self._write(writer, name, write_patch_results)
+ + @property + def _write_col_names(self): + return ['r_nom','meanr','meanlogr','xi','sigma_xi','weight','npairs'] + + @property + def _write_data(self): + data = [ self.rnom, self.meanr, self.meanlogr, + self.xi, np.sqrt(self.varxi), self.weight, self.npairs ] + data = [ col.flatten() for col in data ] + return data + + @property + def _write_params(self): + return { 'coords' : self.coords, 'metric' : self.metric, + 'sep_units' : self.sep_units, 'bin_type' : self.bin_type } + +
[docs] @depr_pos_kwargs + def read(self, file_name, *, file_type=None): + """Read in values from a file. + + This should be a file that was written by TreeCorr, preferably a FITS file, so there + is no loss of information. + + .. warning:: + + The `KKCorrelation` object should be constructed with the same configuration + parameters as the one being read. e.g. the same min_sep, max_sep, etc. This is not + checked by the read function. + + Parameters: + file_name (str): The name of the file to read in. + file_type (str): The type of file ('ASCII' or 'FITS'). (default: determine the type + automatically from the extension of file_name.) + """ + self.logger.info('Reading KK correlations from %s',file_name) + with make_reader(file_name, file_type, self.logger) as reader: + self._read(reader)
+ + def _read_from_data(self, data, params): + s = self.logr.shape + if 'R_nom' in data.dtype.names: # pragma: no cover + self._ro.rnom = data['R_nom'].reshape(s) + self.meanr = data['meanR'].reshape(s) + self.meanlogr = data['meanlogR'].reshape(s) + else: + self._ro.rnom = data['r_nom'].reshape(s) + self.meanr = data['meanr'].reshape(s) + self.meanlogr = data['meanlogr'].reshape(s) + self.xi = data['xi'].reshape(s) + self.varxi = data['sigma_xi'].reshape(s)**2 + self.weight = data['weight'].reshape(s) + self.npairs = data['npairs'].reshape(s) + self.coords = params['coords'].strip() + self.metric = params['metric'].strip() + self._ro.sep_units = params['sep_units'].strip() + self._ro.bin_type = params['bin_type'].strip() + self.npatch1 = params.get('npatch1', 1) + self.npatch2 = params.get('npatch2', 1)
+
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/treecorr/kkkcorrelation.html b/docs/_build/html/_modules/treecorr/kkkcorrelation.html new file mode 100644 index 00000000..a6df83f3 --- /dev/null +++ b/docs/_build/html/_modules/treecorr/kkkcorrelation.html @@ -0,0 +1,1232 @@ + + + + + + treecorr.kkkcorrelation — TreeCorr 4.3.0 documentation + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • »
  • +
  • Module code »
  • +
  • treecorr.kkkcorrelation
  • +
  • +
  • +
+
+
+
+
+ +

Source code for treecorr.kkkcorrelation

+# Copyright (c) 2003-2019 by Mike Jarvis
+#
+# TreeCorr is free software: redistribution and use in source and binary forms,
+# with or without modification, are permitted provided that the following
+# conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice, this
+#    list of conditions, and the disclaimer given in the accompanying LICENSE
+#    file.
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+#    this list of conditions, and the disclaimer given in the documentation
+#    and/or other materials provided with the distribution.
+
+"""
+.. module:: nnncorrelation
+"""
+
+import numpy as np
+
+from . import _lib, _ffi
+from .catalog import calculateVarK
+from .binnedcorr3 import BinnedCorr3
+from .util import double_ptr as dp
+from .util import make_writer, make_reader
+from .util import depr_pos_kwargs
+
+
+
[docs]class KKKCorrelation(BinnedCorr3): + r"""This class handles the calculation and storage of a 3-point kappa-kappa-kappa correlation + function. + + .. note:: + + While we use the term kappa (:math:`\kappa`) here and the letter K in various places, + in fact any scalar field will work here. For example, you can use this to compute + correlations of the CMB temperature fluctuations, where "kappa" would really be + :math:`\Delta T`. + + See the doc string of `BinnedCorr3` for a description of how the triangles are binned. + + Ojects of this class holds the following attributes: + + Attributes: + nbins: The number of bins in logr where r = d2 + bin_size: The size of the bins in logr + min_sep: The minimum separation being considered + max_sep: The maximum separation being considered + nubins: The number of bins in u where u = d3/d2 + ubin_size: The size of the bins in u + min_u: The minimum u being considered + max_u: The maximum u being considered + nvbins: The number of bins in v where v = +-(d1-d2)/d3 + vbin_size: The size of the bins in v + min_v: The minimum v being considered + max_v: The maximum v being considered + logr1d: The nominal centers of the nbins bins in log(r). + u1d: The nominal centers of the nubins bins in u. + v1d: The nominal centers of the nvbins bins in v. + + In addition, the following attributes are numpy arrays whose shape is (nbins, nubins, nvbins): + + Attributes: + logr: The nominal center of the bin in log(r). + rnom: The nominal center of the bin converted to regular distance. + i.e. r = exp(logr). + u: The nominal center of the bin in u. + v: The nominal center of the bin in v. + meand1: The (weighted) mean value of d1 for the triangles in each bin. + meanlogd1: The mean value of log(d1) for the triangles in each bin. + meand2: The (weighted) mean value of d2 (aka r) for the triangles in each bin. + meanlogd2: The mean value of log(d2) for the triangles in each bin. + meand2: The (weighted) mean value of d3 for the triangles in each bin. + meanlogd2: The mean value of log(d3) for the triangles in each bin. + meanu: The mean value of u for the triangles in each bin. + meanv: The mean value of v for the triangles in each bin. + zeta: The correlation function, :math:`\zeta(r,u,v)`. + varzeta: The variance of :math:`\zeta`, only including the shot noise propagated into + the final correlation. This does not include sample variance, so it is always + an underestimate of the actual variance. + weight: The total weight in each bin. + ntri: The number of triangles going into each bin (including those where one or + more objects have w=0). + + If ``sep_units`` are given (either in the config dict or as a named kwarg) then the distances + will all be in these units. + + .. note:: + + If you separate out the steps of the `process` command and use `process_auto` and/or + `process_cross`, then the units will not be applied to ``meanr`` or ``meanlogr`` until + the `finalize` function is called. + + The typical usage pattern is as follows: + + >>> kkk = treecorr.KKKCorrelation(config) + >>> kkk.process(cat) # For auto-correlation. + >>> kkk.process(cat1,cat2,cat3) # For cross-correlation. + >>> kkk.write(file_name) # Write out to a file. + >>> zeta = kkk.zeta # To access zeta directly. + + Parameters: + config (dict): A configuration dict that can be used to pass in kwargs if desired. + This dict is allowed to have addition entries besides those listed + in `BinnedCorr3`, which are ignored here. (default: None) + logger: If desired, a logger object for logging. (default: None, in which case + one will be built according to the config dict's verbose level.) + + Keyword Arguments: + **kwargs: See the documentation for `BinnedCorr3` for the list of allowed keyword + arguments, which may be passed either directly or in the config dict. + """ +
[docs] @depr_pos_kwargs + def __init__(self, config=None, *, logger=None, **kwargs): + """Initialize `KKKCorrelation`. See class doc for details. + """ + BinnedCorr3.__init__(self, config, logger=logger, **kwargs) + + self._ro._d1 = 2 # KData + self._ro._d2 = 2 # KData + self._ro._d3 = 2 # KData + shape = self.logr.shape + self.zeta = np.zeros(shape, dtype=float) + self.varzeta = np.zeros(shape, dtype=float) + self.meand1 = np.zeros(shape, dtype=float) + self.meanlogd1 = np.zeros(shape, dtype=float) + self.meand2 = np.zeros(shape, dtype=float) + self.meanlogd2 = np.zeros(shape, dtype=float) + self.meand3 = np.zeros(shape, dtype=float) + self.meanlogd3 = np.zeros(shape, dtype=float) + self.meanu = np.zeros(shape, dtype=float) + self.meanv = np.zeros(shape, dtype=float) + self.weight = np.zeros(shape, dtype=float) + self.ntri = np.zeros(shape, dtype=float) + self.logger.debug('Finished building KKKCorr')
+ + @property + def corr(self): + if self._corr is None: + self._corr = _lib.BuildCorr3( + self._d1, self._d2, self._d3, self._bintype, + self._min_sep,self._max_sep,self.nbins,self._bin_size,self.b, + self.min_u,self.max_u,self.nubins,self.ubin_size,self.bu, + self.min_v,self.max_v,self.nvbins,self.vbin_size,self.bv, + self.xperiod, self.yperiod, self.zperiod, + dp(self.zeta), dp(None), dp(None), dp(None), + dp(None), dp(None), dp(None), dp(None), + dp(self.meand1), dp(self.meanlogd1), dp(self.meand2), dp(self.meanlogd2), + dp(self.meand3), dp(self.meanlogd3), dp(self.meanu), dp(self.meanv), + dp(self.weight), dp(self.ntri)) + return self._corr + + def __del__(self): + # Using memory allocated from the C layer means we have to explicitly deallocate it + # rather than being able to rely on the Python memory manager. + if self._corr is not None: + if not _ffi._lock.locked(): # pragma: no branch + _lib.DestroyCorr3(self.corr, self._d1, self._d2, self._d3, self._bintype) + +
[docs] def __eq__(self, other): + """Return whether two `KKKCorrelation` instances are equal""" + return (isinstance(other, KKKCorrelation) and + self.nbins == other.nbins and + self.bin_size == other.bin_size and + self.min_sep == other.min_sep and + self.max_sep == other.max_sep and + self.sep_units == other.sep_units and + self.min_u == other.min_u and + self.max_u == other.max_u and + self.nubins == other.nubins and + self.ubin_size == other.ubin_size and + self.min_v == other.min_v and + self.max_v == other.max_v and + self.nvbins == other.nvbins and + self.vbin_size == other.vbin_size and + self.coords == other.coords and + self.bin_type == other.bin_type and + self.bin_slop == other.bin_slop and + self.xperiod == other.xperiod and + self.yperiod == other.yperiod and + self.zperiod == other.zperiod and + np.array_equal(self.meand1, other.meand1) and + np.array_equal(self.meanlogd1, other.meanlogd1) and + np.array_equal(self.meand2, other.meand2) and + np.array_equal(self.meanlogd2, other.meanlogd2) and + np.array_equal(self.meand3, other.meand3) and + np.array_equal(self.meanlogd3, other.meanlogd3) and + np.array_equal(self.meanu, other.meanu) and + np.array_equal(self.meanv, other.meanv) and + np.array_equal(self.zeta, other.zeta) and + np.array_equal(self.varzeta, other.varzeta) and + np.array_equal(self.weight, other.weight) and + np.array_equal(self.ntri, other.ntri))
+ +
[docs] def copy(self): + """Make a copy""" + ret = KKKCorrelation.__new__(KKKCorrelation) + for key, item in self.__dict__.items(): + if isinstance(item, np.ndarray): + # Only items that might change need to by deep copied. + ret.__dict__[key] = item.copy() + else: + # For everything else, shallow copy is fine. + # In particular don't deep copy config or logger + # Most of the rest are scalars, which copy fine this way. + ret.__dict__[key] = item + ret._corr = None # We'll want to make a new one of these if we need it. + return ret
+ +
[docs] def __repr__(self): + return 'KKKCorrelation(config=%r)'%self.config
+ +
[docs] @depr_pos_kwargs + def process_auto(self, cat, *, metric=None, num_threads=None): + """Process a single catalog, accumulating the auto-correlation. + + This accumulates the auto-correlation for the given catalog. After + calling this function as often as desired, the `finalize` command will + finish the calculation of meand1, meanlogd1, etc. + + Parameters: + cat (Catalog): The catalog to process + metric (str): Which metric to use. See `Metrics` for details. + (default: 'Euclidean'; this value can also be given in the + constructor in the config dict.) + num_threads (int): How many OpenMP threads to use during the calculation. + (default: use the number of cpu cores; this value can also be given + in the constructor in the config dict.) + """ + if cat.name == '': + self.logger.info('Starting process KKK auto-correlations') + else: + self.logger.info('Starting process KKK auto-correlations for cat %s.', cat.name) + + self._set_metric(metric, cat.coords) + self._set_num_threads(num_threads) + min_size, max_size = self._get_minmax_size() + + field = cat.getKField(min_size=min_size, max_size=max_size, + split_method=self.split_method, brute=bool(self.brute), + min_top=self.min_top, max_top=self.max_top, + coords=self.coords) + + self.logger.info('Starting %d jobs.',field.nTopLevelNodes) + _lib.ProcessAuto3(self.corr, field.data, self.output_dots, + field._d, self._coords, self._bintype, self._metric)
+ +
[docs] @depr_pos_kwargs + def process_cross12(self, cat1, cat2, *, metric=None, num_threads=None): + """Process two catalogs, accumulating the 3pt cross-correlation, where one of the + points in each triangle come from the first catalog, and two come from the second. + + This accumulates the cross-correlation for the given catalogs as part of a larger + auto-correlation calculation. E.g. when splitting up a large catalog into patches, + this is appropriate to use for the cross correlation between different patches + as part of the complete auto-correlation of the full catalog. + + Parameters: + cat1 (Catalog): The first catalog to process. (1 point in each triangle will come + from this catalog.) + cat2 (Catalog): The second catalog to process. (2 points in each triangle will come + from this catalog.) + metric (str): Which metric to use. See `Metrics` for details. + (default: 'Euclidean'; this value can also be given in the + constructor in the config dict.) + num_threads (int): How many OpenMP threads to use during the calculation. + (default: use the number of cpu cores; this value can also be given + in the constructor in the config dict.) + """ + if cat1.name == '' and cat2.name == '': + self.logger.info('Starting process KKK (1-2) cross-correlations') + else: + self.logger.info('Starting process KKK (1-2) cross-correlations for cats %s, %s.', + cat1.name, cat2.name) + + self._set_metric(metric, cat1.coords, cat2.coords) + self._set_num_threads(num_threads) + min_size, max_size = self._get_minmax_size() + + f1 = cat1.getKField(min_size=min_size, max_size=max_size, + split_method=self.split_method, + brute=self.brute is True or self.brute == 1, + min_top=self.min_top, max_top=self.max_top, + coords=self.coords) + f2 = cat2.getKField(min_size=min_size, max_size=max_size, + split_method=self.split_method, + brute=self.brute is True or self.brute == 2, + min_top=self.min_top, max_top=self.max_top, + coords=self.coords) + + self.logger.info('Starting %d jobs.',f1.nTopLevelNodes) + # Note: all 3 correlation objects are the same. Thus, all triangles will be placed + # into self.corr, whichever way the three catalogs are permuted for each triangle. + _lib.ProcessCross12(self.corr, self.corr, self.corr, + f1.data, f2.data, self.output_dots, + f1._d, f2._d, self._coords, + self._bintype, self._metric)
+ +
[docs] @depr_pos_kwargs + def process_cross(self, cat1, cat2, cat3, *, metric=None, num_threads=None): + """Process a set of three catalogs, accumulating the 3pt cross-correlation. + + This accumulates the cross-correlation for the given catalogs as part of a larger + auto-correlation calculation. E.g. when splitting up a large catalog into patches, + this is appropriate to use for the cross correlation between different patches + as part of the complete auto-correlation of the full catalog. + + Parameters: + cat1 (Catalog): The first catalog to process + cat2 (Catalog): The second catalog to process + cat3 (Catalog): The third catalog to process + metric (str): Which metric to use. See `Metrics` for details. + (default: 'Euclidean'; this value can also be given in the + constructor in the config dict.) + num_threads (int): How many OpenMP threads to use during the calculation. + (default: use the number of cpu cores; this value can also be given + in the constructor in the config dict.) + """ + if cat1.name == '' and cat2.name == '' and cat3.name == '': + self.logger.info('Starting process KKK cross-correlations') + else: + self.logger.info('Starting process KKK cross-correlations for cats %s, %s, %s.', + cat1.name, cat2.name, cat3.name) + + self._set_metric(metric, cat1.coords, cat2.coords, cat3.coords) + self._set_num_threads(num_threads) + min_size, max_size = self._get_minmax_size() + + f1 = cat1.getKField(min_size=min_size, max_size=max_size, + split_method=self.split_method, + brute=self.brute is True or self.brute == 1, + min_top=self.min_top, max_top=self.max_top, + coords=self.coords) + f2 = cat2.getKField(min_size=min_size, max_size=max_size, + split_method=self.split_method, + brute=self.brute is True or self.brute == 2, + min_top=self.min_top, max_top=self.max_top, + coords=self.coords) + f3 = cat3.getKField(min_size=min_size, max_size=max_size, + split_method=self.split_method, + brute=self.brute is True or self.brute == 3, + min_top=self.min_top, max_top=self.max_top, + coords=self.coords) + + self.logger.info('Starting %d jobs.',f1.nTopLevelNodes) + # Note: all 6 correlation objects are the same. Thus, all triangles will be placed + # into self.corr, whichever way the three catalogs are permuted for each triangle. + _lib.ProcessCross3(self.corr, self.corr, self.corr, + self.corr, self.corr, self.corr, + f1.data, f2.data, f3.data, self.output_dots, + f1._d, f2._d, f3._d, self._coords, self._bintype, self._metric)
+ + def _finalize(self): + mask1 = self.weight != 0 + mask2 = self.weight == 0 + + self.zeta[mask1] /= self.weight[mask1] + self.meand1[mask1] /= self.weight[mask1] + self.meanlogd1[mask1] /= self.weight[mask1] + self.meand2[mask1] /= self.weight[mask1] + self.meanlogd2[mask1] /= self.weight[mask1] + self.meand3[mask1] /= self.weight[mask1] + self.meanlogd3[mask1] /= self.weight[mask1] + self.meanu[mask1] /= self.weight[mask1] + self.meanv[mask1] /= self.weight[mask1] + + # Update the units + self._apply_units(mask1) + + # Use meanlogr when available, but set to nominal when no triangles in bin. + self.meand2[mask2] = self.rnom[mask2] + self.meanlogd2[mask2] = self.logr[mask2] + self.meanu[mask2] = self.u[mask2] + self.meanv[mask2] = self.v[mask2] + self.meand3[mask2] = self.u[mask2] * self.meand2[mask2] + self.meanlogd3[mask2] = np.log(self.meand3[mask2]) + self.meand1[mask2] = self.v[mask2] * self.meand3[mask2] + self.meand2[mask2] + self.meanlogd1[mask2] = np.log(self.meand1[mask2]) + +
[docs] def finalize(self, vark1, vark2, vark3): + """Finalize the calculation of the correlation function. + + The `process_auto` and `process_cross` commands accumulate values in each bin, + so they can be called multiple times if appropriate. Afterwards, this command + finishes the calculation by dividing by the total weight. + + Parameters: + vark1 (float): The kappa variance for the first field. + vark2 (float): The kappa variance for the second field. + vark3 (float): The kappa variance for the third field. + """ + self._finalize() + self._var_num = vark1 * vark2 * vark3 + self.cov = self.estimate_cov(self.var_method) + self.varzeta.ravel()[:] = self.cov.diagonal()
+ + def _clear(self): + """Clear the data vectors + """ + self.zeta[:,:,:] = 0. + self.varzeta[:,:,:] = 0. + self.meand1[:,:,:] = 0. + self.meanlogd1[:,:,:] = 0. + self.meand2[:,:,:] = 0. + self.meanlogd2[:,:,:] = 0. + self.meand3[:,:,:] = 0. + self.meanlogd3[:,:,:] = 0. + self.meanu[:,:,:] = 0. + self.meanv[:,:,:] = 0. + self.weight[:,:,:] = 0. + self.ntri[:,:,:] = 0. + +
[docs] def __iadd__(self, other): + """Add a second `KKKCorrelation`'s data to this one. + + .. note:: + + For this to make sense, both `KKKCorrelation` objects should not have had `finalize` + called yet. Then, after adding them together, you should call `finalize` on the sum. + """ + if not isinstance(other, KKKCorrelation): + raise TypeError("Can only add another KKKCorrelation object") + if not (self.nbins == other.nbins and + self.min_sep == other.min_sep and + self.max_sep == other.max_sep and + self.nubins == other.nubins and + self.min_u == other.min_u and + self.max_u == other.max_u and + self.nvbins == other.nvbins and + self.min_v == other.min_v and + self.max_v == other.max_v): + raise ValueError("KKKCorrelation to be added is not compatible with this one.") + + if not other.nonzero: return self + self._set_metric(other.metric, other.coords, other.coords, other.coords) + self.zeta[:] += other.zeta[:] + self.meand1[:] += other.meand1[:] + self.meanlogd1[:] += other.meanlogd1[:] + self.meand2[:] += other.meand2[:] + self.meanlogd2[:] += other.meanlogd2[:] + self.meand3[:] += other.meand3[:] + self.meanlogd3[:] += other.meanlogd3[:] + self.meanu[:] += other.meanu[:] + self.meanv[:] += other.meanv[:] + self.weight[:] += other.weight[:] + self.ntri[:] += other.ntri[:] + return self
+ + def _sum(self, others): + # Equivalent to the operation of: + # self._clear() + # for other in others: + # self += other + # but no sanity checks and use numpy.sum for faster calculation. + np.sum([c.zeta for c in others], axis=0, out=self.zeta) + np.sum([c.meand1 for c in others], axis=0, out=self.meand1) + np.sum([c.meanlogd1 for c in others], axis=0, out=self.meanlogd1) + np.sum([c.meand2 for c in others], axis=0, out=self.meand2) + np.sum([c.meanlogd2 for c in others], axis=0, out=self.meanlogd2) + np.sum([c.meand3 for c in others], axis=0, out=self.meand3) + np.sum([c.meanlogd3 for c in others], axis=0, out=self.meanlogd3) + np.sum([c.meanu for c in others], axis=0, out=self.meanu) + np.sum([c.meanv for c in others], axis=0, out=self.meanv) + np.sum([c.weight for c in others], axis=0, out=self.weight) + np.sum([c.ntri for c in others], axis=0, out=self.ntri) + +
[docs] @depr_pos_kwargs + def process(self, cat1, cat2=None, cat3=None, *, metric=None, num_threads=None, + comm=None, low_mem=False, initialize=True, finalize=True): + """Compute the 3pt correlation function. + + - If only 1 argument is given, then compute an auto-correlation function. + - If 2 arguments are given, then compute a cross-correlation function with the + first catalog taking one corner of the triangles, and the second taking two corners. + - If 3 arguments are given, then compute a three-way cross-correlation function. + + All arguments may be lists, in which case all items in the list are used + for that element of the correlation. + + .. note:: + + For a correlation of multiple catalogs, it typically matters which corner of the + triangle comes from which catalog, which is not kept track of by this function. + The final accumulation will have d1 > d2 > d3 regardless of which input catalog + appears at each corner. The class which keeps track of which catalog appears + in each position in the triangle is `KKKCrossCorrelation`. + + Parameters: + cat1 (Catalog): A catalog or list of catalogs for the first K field. + cat2 (Catalog): A catalog or list of catalogs for the second K field. + (default: None) + cat3 (Catalog): A catalog or list of catalogs for the third K field. + (default: None) + metric (str): Which metric to use. See `Metrics` for details. + (default: 'Euclidean'; this value can also be given in the + constructor in the config dict.) + num_threads (int): How many OpenMP threads to use during the calculation. + (default: use the number of cpu cores; this value can also be given + in the constructor in the config dict.) + comm (mpi4py.Comm): If running MPI, an mpi4py Comm object to communicate between + processes. If used, the rank=0 process will have the final + computation. This only works if using patches. (default: None) + low_mem (bool): Whether to sacrifice a little speed to try to reduce memory usage. + This only works if using patches. (default: False) + initialize (bool): Whether to begin the calculation with a call to + `BinnedCorr3.clear`. (default: True) + finalize (bool): Whether to complete the calculation with a call to `finalize`. + (default: True) + """ + import math + if initialize: + self.clear() + + if not isinstance(cat1,list): + cat1 = cat1.get_patches(low_mem=low_mem) + if cat2 is not None and not isinstance(cat2,list): + cat2 = cat2.get_patches(low_mem=low_mem) + if cat3 is not None and not isinstance(cat3,list): + cat3 = cat3.get_patches(low_mem=low_mem) + + if cat2 is None: + if cat3 is not None: + raise ValueError("For two catalog case, use cat1,cat2, not cat1,cat3") + self._process_all_auto(cat1, metric, num_threads, comm, low_mem) + elif cat3 is None: + self._process_all_cross12(cat1, cat2, metric, num_threads, comm, low_mem) + else: + self._process_all_cross(cat1, cat2, cat3, metric, num_threads, comm, low_mem) + + if finalize: + if cat2 is None: + vark1 = calculateVarK(cat1, low_mem=low_mem) + vark2 = vark1 + vark3 = vark1 + self.logger.info("vark = %f: sig_k = %f",vark1,math.sqrt(vark1)) + elif cat3 is None: + vark1 = calculateVarK(cat1, low_mem=low_mem) + vark2 = calculateVarK(cat2, low_mem=low_mem) + vark3 = vark2 + self.logger.info("vark1 = %f: sig_k = %f",vark1,math.sqrt(vark1)) + self.logger.info("vark2 = %f: sig_k = %f",vark2,math.sqrt(vark2)) + else: + vark1 = calculateVarK(cat1, low_mem=low_mem) + vark2 = calculateVarK(cat2, low_mem=low_mem) + vark3 = calculateVarK(cat3, low_mem=low_mem) + self.logger.info("vark1 = %f: sig_k = %f",vark1,math.sqrt(vark1)) + self.logger.info("vark2 = %f: sig_k = %f",vark2,math.sqrt(vark2)) + self.logger.info("vark3 = %f: sig_k = %f",vark3,math.sqrt(vark3)) + self.finalize(vark1,vark2,vark3)
+ +
[docs] @depr_pos_kwargs + def write(self, file_name, *, file_type=None, precision=None, write_patch_results=False): + r"""Write the correlation function to the file, file_name. + + The output file will include the following columns: + + ========== ============================================================= + Column Description + ========== ============================================================= + r_nom The nominal center of the bin in r = d2 where d1 > d2 > d3 + u_nom The nominal center of the bin in u = d3/d2 + v_nom The nominal center of the bin in v = +-(d1-d2)/d3 + meand1 The mean value :math:`\langle d1\rangle` of triangles that + fell into each bin + meanlogd1 The mean value :math:`\langle \log(d1)\rangle` of triangles + that fell into each bin + meand2 The mean value :math:`\langle d2\rangle` of triangles that + fell into each bin + meanlogd2 The mean value :math:`\langle \log(d2)\rangle` of triangles + that fell into each bin + meand3 The mean value :math:`\langle d3\rangle` of triangles that + fell into each bin + meanlogd3 The mean value :math:`\langle \log(d3)\rangle` of triangles + that fell into each bin + meanu The mean value :math:`\langle u\rangle` of triangles that + fell into each bin + meanv The mean value :math:`\langle v\rangle` of triangles that + fell into each bin + zeta The estimator of :math:`\zeta(r,u,v)` + sigma_zeta The sqrt of the variance estimate of :math:`\zeta` + weight The total weight of triangles contributing to each bin + ntri The number of triangles contributing to each bin + ========== ============================================================= + + If ``sep_units`` was given at construction, then the distances will all be in these units. + Otherwise, they will be in either the same units as x,y,z (for flat or 3d coordinates) or + radians (for spherical coordinates). + + Parameters: + file_name (str): The name of the file to write to. + file_type (str): The type of file to write ('ASCII' or 'FITS'). (default: determine + the type automatically from the extension of file_name.) + precision (int): For ASCII output catalogs, the desired precision. (default: 4; + this value can also be given in the constructor in the config dict.) + write_patch_results (bool): Whether to write the patch-based results as well. + (default: False) + """ + self.logger.info('Writing KKK correlations to %s',file_name) + precision = self.config.get('precision', 4) if precision is None else precision + name = 'main' if write_patch_results else None + with make_writer(file_name, precision, file_type, self.logger) as writer: + self._write(writer, name, write_patch_results)
+ + @property + def _write_col_names(self): + return [ 'r_nom', 'u_nom', 'v_nom', + 'meand1', 'meanlogd1', 'meand2', 'meanlogd2', + 'meand3', 'meanlogd3', 'meanu', 'meanv', + 'zeta', 'sigma_zeta', 'weight', 'ntri' ] + + @property + def _write_data(self): + data = [ self.rnom, self.u, self.v, + self.meand1, self.meanlogd1, self.meand2, self.meanlogd2, + self.meand3, self.meanlogd3, self.meanu, self.meanv, + self.zeta, np.sqrt(self.varzeta), self.weight, self.ntri ] + data = [ col.flatten() for col in data ] + return data + + @property + def _write_params(self): + return { 'coords' : self.coords, 'metric' : self.metric, + 'sep_units' : self.sep_units, 'bin_type' : self.bin_type } + +
[docs] @depr_pos_kwargs + def read(self, file_name, *, file_type=None): + """Read in values from a file. + + This should be a file that was written by TreeCorr, preferably a FITS file, so there + is no loss of information. + + .. warning:: + + The `KKKCorrelation` object should be constructed with the same configuration + parameters as the one being read. e.g. the same min_sep, max_sep, etc. This is not + checked by the read function. + + Parameters: + file_name (str): The name of the file to read in. + file_type (str): The type of file ('ASCII' or 'FITS'). (default: determine the type + automatically from the extension of file_name.) + """ + self.logger.info('Reading KKK correlations from %s',file_name) + with make_reader(file_name, file_type, self.logger) as reader: + self._read(reader)
+ + def _read_from_data(self, data, params): + s = self.logr.shape + if 'R_nom' in data.dtype.names: # pragma: no cover + self._ro.rnom = data['R_nom'].reshape(s) + else: + self._ro.rnom = data['r_nom'].reshape(s) + self.meand1 = data['meand1'].reshape(s) + self.meanlogd1 = data['meanlogd1'].reshape(s) + self.meand2 = data['meand2'].reshape(s) + self.meanlogd2 = data['meanlogd2'].reshape(s) + self.meand3 = data['meand3'].reshape(s) + self.meanlogd3 = data['meanlogd3'].reshape(s) + self.meanu = data['meanu'].reshape(s) + self.meanv = data['meanv'].reshape(s) + self.zeta = data['zeta'].reshape(s) + self.varzeta = data['sigma_zeta'].reshape(s)**2 + self.weight = data['weight'].reshape(s) + self.ntri = data['ntri'].reshape(s) + self.coords = params['coords'].strip() + self.metric = params['metric'].strip() + self._ro.sep_units = params['sep_units'].strip() + self._ro.bin_type = params['bin_type'].strip() + self.npatch1 = params.get('npatch1', 1) + self.npatch2 = params.get('npatch2', 1) + self.npatch3 = params.get('npatch3', 1)
+ + +
[docs]class KKKCrossCorrelation(BinnedCorr3): + r"""This class handles the calculation a 3-point kappa-kappa-kappa cross-correlation + function. + + For 3-point cross correlations, it matters which of the two or three fields falls on + each corner of the triangle. E.g. is field 1 on the corner opposite d1 (the longest + size of the triangle) or is it field 2 (or 3) there? This is in contrast to the 2-point + correlation where the symmetry of the situation means that it doesn't matter which point + is identified with each field. This makes it significantly more complicated to keep track + of all the relevant information for a 3-point cross correlation function. + + The `KKKCorrelation` class holds a single :math:`\zeta` functions describing all + possible triangles, parameterized according to their relative side lengths ordered as + d1 > d2 > d3. + + For a cross-correlation of two fields: K1 - K1 - K2 (i.e. the K1 field is at two of the + corners and K2 is at one corner), then we need three these :math:`\zeta` functions + to capture all of the triangles, since the K2 points may be opposite d1 or d2 or d3. + For a cross-correlation of three fields: K1 - K2 - K3, we need six sets, to account for + all of the possible permutations relative to the triangle sides. + + Therefore, this class holds 6 instances of `KKKCorrelation`, which in turn hold the + information about triangles in each of the relevant configurations. We name these: + + Attributes: + k1k2k3: Triangles where K1 is opposite d1, K2 is opposite d2, K3 is opposite d3. + k1k3k2: Triangles where K1 is opposite d1, K3 is opposite d2, K2 is opposite d3. + k2k1k3: Triangles where K2 is opposite d1, K1 is opposite d2, K3 is opposite d3. + k2k3k1: Triangles where K2 is opposite d1, K3 is opposite d2, K1 is opposite d3. + k3k1k2: Triangles where K3 is opposite d1, K1 is opposite d2, K2 is opposite d3. + k3k2k1: Triangles where K3 is opposite d1, K2 is opposite d2, K1 is opposite d3. + + If for instance K2 and K3 are the same field, then e.g. k1k2k3 and k1k3k2 will have + the same values. + + Ojects of this class also hold the following attributes, which are identical in each of + the above KKKCorrelation instances. + + Attributes: + nbins: The number of bins in logr where r = d2 + bin_size: The size of the bins in logr + min_sep: The minimum separation being considered + max_sep: The maximum separation being considered + nubins: The number of bins in u where u = d3/d2 + ubin_size: The size of the bins in u + min_u: The minimum u being considered + max_u: The maximum u being considered + nvbins: The number of bins in v where v = +-(d1-d2)/d3 + vbin_size: The size of the bins in v + min_v: The minimum v being considered + max_v: The maximum v being considered + logr1d: The nominal centers of the nbins bins in log(r). + u1d: The nominal centers of the nubins bins in u. + v1d: The nominal centers of the nvbins bins in v. + + If ``sep_units`` are given (either in the config dict or as a named kwarg) then the distances + will all be in these units. + + .. note:: + + If you separate out the steps of the `process` command and use `process_cross` directly, + then the units will not be applied to ``meanr`` or ``meanlogr`` until the `finalize` + function is called. + + Parameters: + config (dict): A configuration dict that can be used to pass in kwargs if desired. + This dict is allowed to have addition entries besides those listed + in `BinnedCorr3`, which are ignored here. (default: None) + logger: If desired, a logger object for logging. (default: None, in which case + one will be built according to the config dict's verbose level.) + + Keyword Arguments: + **kwargs: See the documentation for `BinnedCorr3` for the list of allowed keyword + arguments, which may be passed either directly or in the config dict. + """ +
[docs] @depr_pos_kwargs + def __init__(self, config=None, *, logger=None, **kwargs): + """Initialize `KKKCrossCorrelation`. See class doc for details. + """ + BinnedCorr3.__init__(self, config, logger=logger, **kwargs) + + self._ro._d1 = 2 # KData + self._ro._d2 = 2 # KData + self._ro._d3 = 2 # KData + + self.k1k2k3 = KKKCorrelation(config, logger=logger, **kwargs) + self.k1k3k2 = KKKCorrelation(config, logger=logger, **kwargs) + self.k2k1k3 = KKKCorrelation(config, logger=logger, **kwargs) + self.k2k3k1 = KKKCorrelation(config, logger=logger, **kwargs) + self.k3k1k2 = KKKCorrelation(config, logger=logger, **kwargs) + self.k3k2k1 = KKKCorrelation(config, logger=logger, **kwargs) + self._all = [self.k1k2k3, self.k1k3k2, self.k2k1k3, self.k2k3k1, self.k3k1k2, self.k3k2k1] + + self.logger.debug('Finished building KKKCrossCorr')
+ +
[docs] def __eq__(self, other): + """Return whether two `KKKCrossCorrelation` instances are equal""" + return (isinstance(other, KKKCrossCorrelation) and + self.nbins == other.nbins and + self.bin_size == other.bin_size and + self.min_sep == other.min_sep and + self.max_sep == other.max_sep and + self.sep_units == other.sep_units and + self.min_u == other.min_u and + self.max_u == other.max_u and + self.nubins == other.nubins and + self.ubin_size == other.ubin_size and + self.min_v == other.min_v and + self.max_v == other.max_v and + self.nvbins == other.nvbins and + self.vbin_size == other.vbin_size and + self.coords == other.coords and + self.bin_type == other.bin_type and + self.bin_slop == other.bin_slop and + self.xperiod == other.xperiod and + self.yperiod == other.yperiod and + self.zperiod == other.zperiod and + self.k1k2k3 == other.k1k2k3 and + self.k1k3k2 == other.k1k3k2 and + self.k2k1k3 == other.k2k1k3 and + self.k2k3k1 == other.k2k3k1 and + self.k3k1k2 == other.k3k1k2 and + self.k3k2k1 == other.k3k2k1)
+ +
[docs] def copy(self): + """Make a copy""" + ret = KKKCrossCorrelation.__new__(KKKCrossCorrelation) + for key, item in self.__dict__.items(): + if isinstance(item, KKKCorrelation): + ret.__dict__[key] = item.copy() + else: + ret.__dict__[key] = item + # This needs to be the new list: + ret._all = [ret.k1k2k3, ret.k1k3k2, ret.k2k1k3, ret.k2k3k1, ret.k3k1k2, ret.k3k2k1] + return ret
+ +
[docs] def __repr__(self): + return 'KKKCrossCorrelation(config=%r)'%self.config
+ +
[docs] @depr_pos_kwargs + def process_cross12(self, cat1, cat2, *, metric=None, num_threads=None): + """Process two catalogs, accumulating the 3pt cross-correlation, where one of the + points in each triangle come from the first catalog, and two come from the second. + + This accumulates the cross-correlation for the given catalogs. After + calling this function as often as desired, the `finalize` command will + finish the calculation of meand1, meanlogd1, etc. + + .. note:: + + This only adds to the attributes k1k2k3, k2k1k3, k2k3k1, not the ones where + 3 comes before 2. When running this via the regular `process` method, it will + combine them at the end to make sure k1k2k3 == k1k3k2, etc. for a complete + calculation of the 1-2 cross-correlation. + + Parameters: + cat1 (Catalog): The first catalog to process. (1 point in each triangle will come + from this catalog.) + cat2 (Catalog): The second catalog to process. (2 points in each triangle will come + from this catalog.) + metric (str): Which metric to use. See `Metrics` for details. + (default: 'Euclidean'; this value can also be given in the + constructor in the config dict.) + num_threads (int): How many OpenMP threads to use during the calculation. + (default: use the number of cpu cores; this value can also be given + in the constructor in the config dict.) + """ + if cat1.name == '' and cat2.name == '': + self.logger.info('Starting process KKK (1-2) cross-correlations') + else: + self.logger.info('Starting process KKK (1-2) cross-correlations for cats %s, %s.', + cat1.name, cat2.name) + + self._set_metric(metric, cat1.coords, cat2.coords) + for kkk in self._all: + kkk._set_metric(self.metric, self.coords) + self._set_num_threads(num_threads) + min_size, max_size = self._get_minmax_size() + + f1 = cat1.getKField(min_size=min_size, max_size=max_size, + split_method=self.split_method, + brute=self.brute is True or self.brute == 1, + min_top=self.min_top, max_top=self.max_top, + coords=self.coords) + f2 = cat2.getKField(min_size=min_size, max_size=max_size, + split_method=self.split_method, + brute=self.brute is True or self.brute == 2, + min_top=self.min_top, max_top=self.max_top, + coords=self.coords) + + self.logger.info('Starting %d jobs.',f1.nTopLevelNodes) + # Note: all 3 correlation objects are the same. Thus, all triangles will be placed + # into self.corr, whichever way the three catalogs are permuted for each triangle. + _lib.ProcessCross12(self.k1k2k3.corr, self.k2k1k3.corr, self.k2k3k1.corr, + f1.data, f2.data, self.output_dots, + f1._d, f2._d, self._coords, + self._bintype, self._metric)
+ +
[docs] @depr_pos_kwargs + def process_cross(self, cat1, cat2, cat3, *, metric=None, num_threads=None): + """Process a set of three catalogs, accumulating the 3pt cross-correlation. + + This accumulates the cross-correlation for the given catalogs. After + calling this function as often as desired, the `finalize` command will + finish the calculation of meand1, meanlogd1, etc. + + Parameters: + cat1 (Catalog): The first catalog to process + cat2 (Catalog): The second catalog to process + cat3 (Catalog): The third catalog to process + metric (str): Which metric to use. See `Metrics` for details. + (default: 'Euclidean'; this value can also be given in the + constructor in the config dict.) + num_threads (int): How many OpenMP threads to use during the calculation. + (default: use the number of cpu cores; this value can also be given + in the constructor in the config dict.) + """ + if cat1.name == '' and cat2.name == '' and cat3.name == '': + self.logger.info('Starting process KKK cross-correlations') + else: + self.logger.info('Starting process KKK cross-correlations for cats %s, %s, %s.', + cat1.name, cat2.name, cat3.name) + + self._set_metric(metric, cat1.coords, cat2.coords, cat3.coords) + for kkk in self._all: + kkk._set_metric(self.metric, self.coords) + self._set_num_threads(num_threads) + min_size, max_size = self._get_minmax_size() + + f1 = cat1.getKField(min_size=min_size, max_size=max_size, + split_method=self.split_method, + brute=self.brute is True or self.brute == 1, + min_top=self.min_top, max_top=self.max_top, + coords=self.coords) + f2 = cat2.getKField(min_size=min_size, max_size=max_size, + split_method=self.split_method, + brute=self.brute is True or self.brute == 2, + min_top=self.min_top, max_top=self.max_top, + coords=self.coords) + f3 = cat3.getKField(min_size=min_size, max_size=max_size, + split_method=self.split_method, + brute=self.brute is True or self.brute == 3, + min_top=self.min_top, max_top=self.max_top, + coords=self.coords) + + self.logger.info('Starting %d jobs.',f1.nTopLevelNodes) + _lib.ProcessCross3(self.k1k2k3.corr, self.k1k3k2.corr, + self.k2k1k3.corr, self.k2k3k1.corr, + self.k3k1k2.corr, self.k3k2k1.corr, + f1.data, f2.data, f3.data, self.output_dots, + f1._d, f2._d, f3._d, self._coords, self._bintype, self._metric)
+ + def _finalize(self): + for kkk in self._all: + kkk._finalize() + +
[docs] def finalize(self, vark1, vark2, vark3): + """Finalize the calculation of the correlation function. + + The `process_cross` command accumulate values in each bin, so they can be called + multiple times if appropriate. Afterwards, this command finishes the calculation + by dividing by the total weight. + + Parameters: + vark1 (float): The kappa variance for the first field that was correlated. + vark2 (float): The kappa variance for the second field that was correlated. + vark3 (float): The kappa variance for the third field that was correlated. + """ + self.k1k2k3.finalize(vark1,vark2,vark3) + self.k1k3k2.finalize(vark1,vark3,vark2) + self.k2k1k3.finalize(vark2,vark1,vark3) + self.k2k3k1.finalize(vark2,vark3,vark1) + self.k3k1k2.finalize(vark3,vark1,vark2) + self.k3k2k1.finalize(vark3,vark2,vark1)
+ + @property + def nonzero(self): + """Return if there are any values accumulated yet. (i.e. ntri > 0) + """ + return any([kkk.nonzero for kkk in self._all]) + + def _clear(self): + """Clear the data vectors + """ + for kkk in self._all: + kkk._clear() + +
[docs] def __iadd__(self, other): + """Add a second `KKKCrossCorrelation`'s data to this one. + + .. note:: + + For this to make sense, both `KKKCrossCorrelation` objects should not have had + `finalize` called yet. Then, after adding them together, you should call `finalize` + on the sum. + """ + if not isinstance(other, KKKCrossCorrelation): + raise TypeError("Can only add another KKKCrossCorrelation object") + self.k1k2k3 += other.k1k2k3 + self.k1k3k2 += other.k1k3k2 + self.k2k1k3 += other.k2k1k3 + self.k2k3k1 += other.k2k3k1 + self.k3k1k2 += other.k3k1k2 + self.k3k2k1 += other.k3k2k1 + return self
+ + def _sum(self, others): + # Equivalent to the operation of: + # self._clear() + # for other in others: + # self += other + # but no sanity checks and use numpy.sum for faster calculation. + for i, kkk in enumerate(self._all): + kkk._sum([c._all[i] for c in others]) + +
[docs] @depr_pos_kwargs + def process(self, cat1, cat2, cat3=None, *, metric=None, num_threads=None, + comm=None, low_mem=False, initialize=True, finalize=True): + """Accumulate the cross-correlation of the points in the given Catalogs: cat1, cat2, cat3. + + - If 2 arguments are given, then compute a cross-correlation function with the + first catalog taking one corner of the triangles, and the second taking two corners. + - If 3 arguments are given, then compute a three-way cross-correlation function. + + All arguments may be lists, in which case all items in the list are used + for that element of the correlation. + + Parameters: + cat1 (Catalog): A catalog or list of catalogs for the first K field. + cat2 (Catalog): A catalog or list of catalogs for the second K field. + cat3 (Catalog): A catalog or list of catalogs for the third K field. + (default: None) + metric (str): Which metric to use. See `Metrics` for details. + (default: 'Euclidean'; this value can also be given in the + constructor in the config dict.) + num_threads (int): How many OpenMP threads to use during the calculation. + (default: use the number of cpu cores; this value can also be given + in the constructor in the config dict.) + comm (mpi4py.Comm): If running MPI, an mpi4py Comm object to communicate between + processes. If used, the rank=0 process will have the final + computation. This only works if using patches. (default: None) + low_mem (bool): Whether to sacrifice a little speed to try to reduce memory usage. + This only works if using patches. (default: False) + initialize (bool): Whether to begin the calculation with a call to + `BinnedCorr3.clear`. (default: True) + finalize (bool): Whether to complete the calculation with a call to `finalize`. + (default: True) + """ + import math + if initialize: + self.clear() + self._process12 = False + + if not isinstance(cat1,list): cat1 = cat1.get_patches() + if not isinstance(cat2,list): cat2 = cat2.get_patches() + if cat3 is not None and not isinstance(cat3,list): cat3 = cat3.get_patches() + + if cat3 is None: + self._process12 = True + self._process_all_cross12(cat1, cat2, metric, num_threads, comm, low_mem) + else: + self._process_all_cross(cat1, cat2, cat3, metric, num_threads, comm, low_mem) + + if finalize: + if self._process12: + # Then some of the processing involved a cross12 calculation. + # This means that spots 2 and 3 should not be distinguished. + # Combine the relevant arrays. + self.k1k2k3 += self.k1k3k2 + self.k2k1k3 += self.k3k1k2 + self.k2k3k1 += self.k3k2k1 + # Copy back by doing clear and +=. + # This makes sure the coords and metric are set properly. + self.k1k3k2.clear() + self.k3k1k2.clear() + self.k3k2k1.clear() + self.k1k3k2 += self.k1k2k3 + self.k3k1k2 += self.k2k1k3 + self.k3k2k1 += self.k2k3k1 + + vark1 = calculateVarK(cat1, low_mem=low_mem) + vark2 = calculateVarK(cat2, low_mem=low_mem) + self.logger.info("vark1 = %f: sig_k = %f",vark1,math.sqrt(vark1)) + self.logger.info("vark2 = %f: sig_k = %f",vark2,math.sqrt(vark2)) + if cat3 is None: + vark3 = vark2 + else: + vark3 = calculateVarK(cat3, low_mem=low_mem) + self.logger.info("vark3 = %f: sig_k = %f",vark3,math.sqrt(vark3)) + self.finalize(vark1,vark2,vark3)
+ +
[docs] def getStat(self): + """The standard statistic for the current correlation object as a 1-d array. + + In this case, the concatenation of zeta.ravel() for each combination in the following + order: k1k2k3, k1k3k2, k2k1k3, k2k3k1, k3k1k2, k3k2k1. + """ + return np.concatenate([kkk.zeta.ravel() for kkk in self._all])
+ +
[docs] def getWeight(self): + """The weight array for the current correlation object as a 1-d array. + + In this case, the concatenation of getWeight() for each combination in the following + order: k1k2k3, k1k3k2, k2k1k3, k2k3k1, k3k1k2, k3k2k1. + """ + return np.concatenate([kkk.getWeight() for kkk in self._all])
+ +
[docs] @depr_pos_kwargs + def write(self, file_name, *, file_type=None, precision=None, write_patch_results=False): + r"""Write the cross-correlation functions to the file, file_name. + + Parameters: + file_name (str): The name of the file to write to. + file_type (str): The type of file to write ('ASCII' or 'FITS'). (default: determine + the type automatically from the extension of file_name.) + precision (int): For ASCII output catalogs, the desired precision. (default: 4; + this value can also be given in the constructor in the config dict.) + write_patch_results (bool): Whether to write the patch-based results as well. + (default: False) + """ + self.logger.info('Writing KKK cross-correlations to %s',file_name) + precision = self.config.get('precision', 4) if precision is None else precision + name = 'main' if write_patch_results else None + writer = make_writer(file_name, precision, file_type, self.logger) + with writer: + names = [ 'k1k2k3', 'k1k3k2', 'k2k1k3', 'k2k3k1', 'k3k1k2', 'k3k2k1' ] + for name, corr in zip(names, self._all): + corr._write(writer, name, write_patch_results)
+ +
[docs] @depr_pos_kwargs + def read(self, file_name, *, file_type=None): + """Read in values from a file. + + This should be a file that was written by TreeCorr, preferably a FITS file, so there + is no loss of information. + + .. warning:: + + The `KKKCrossCorrelation` object should be constructed with the same configuration + parameters as the one being read. e.g. the same min_sep, max_sep, etc. This is not + checked by the read function. + + Parameters: + file_name (str): The name of the file to read in. + file_type (str): The type of file ('ASCII' or 'FITS'). (default: determine the type + automatically from the extension of file_name.) + """ + self.logger.info('Reading KKK cross-correlations from %s',file_name) + with make_reader(file_name, file_type, self.logger) as reader: + names = [ 'k1k2k3', 'k1k3k2', 'k2k1k3', 'k2k3k1', 'k3k1k2', 'k3k2k1' ] + for name, corr in zip(names, self._all): + corr._read(reader, name)
+
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/treecorr/ngcorrelation.html b/docs/_build/html/_modules/treecorr/ngcorrelation.html new file mode 100644 index 00000000..7d207b7a --- /dev/null +++ b/docs/_build/html/_modules/treecorr/ngcorrelation.html @@ -0,0 +1,931 @@ + + + + + + treecorr.ngcorrelation — TreeCorr 4.3.0 documentation + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for treecorr.ngcorrelation

+# Copyright (c) 2003-2019 by Mike Jarvis
+#
+# TreeCorr is free software: redistribution and use in source and binary forms,
+# with or without modification, are permitted provided that the following
+# conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice, this
+#    list of conditions, and the disclaimer given in the accompanying LICENSE
+#    file.
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+#    this list of conditions, and the disclaimer given in the documentation
+#    and/or other materials provided with the distribution.
+
+"""
+.. module:: ngcorrelation
+"""
+
+import numpy as np
+
+from . import _lib, _ffi
+from .catalog import calculateVarG
+from .binnedcorr2 import BinnedCorr2
+from .util import double_ptr as dp
+from .util import make_writer, make_reader
+from .util import depr_pos_kwargs
+
+
+
[docs]class NGCorrelation(BinnedCorr2): + r"""This class handles the calculation and storage of a 2-point count-shear correlation + function. This is the tangential shear profile around lenses, commonly referred to as + galaxy-galaxy lensing. + + Ojects of this class holds the following attributes: + + Attributes: + nbins: The number of bins in logr + bin_size: The size of the bins in logr + min_sep: The minimum separation being considered + max_sep: The maximum separation being considered + + In addition, the following attributes are numpy arrays of length (nbins): + + Attributes: + logr: The nominal center of the bin in log(r) (the natural logarithm of r). + rnom: The nominal center of the bin converted to regular distance. + i.e. r = exp(logr). + meanr: The (weighted) mean value of r for the pairs in each bin. + If there are no pairs in a bin, then exp(logr) will be used instead. + meanlogr: The (weighted) mean value of log(r) for the pairs in each bin. + If there are no pairs in a bin, then logr will be used instead. + xi: The correlation function, :math:`\xi(r) = \langle \gamma_T\rangle`. + xi_im: The imaginary part of :math:`\xi(r)`. + varxi: An estimate of the variance of :math:`\xi` + weight: The total weight in each bin. + npairs: The number of pairs going into each bin (including pairs where one or + both objects have w=0). + cov: An estimate of the full covariance matrix. + raw_xi: The raw value of xi, uncorrected by an RG calculation. cf. `calculateXi` + raw_xi_im: The raw value of xi_im, uncorrected by an RG calculation. cf. `calculateXi` + raw_varxi: The raw value of varxi, uncorrected by an RG calculation. cf. `calculateXi` + + .. note:: + + The default method for estimating the variance and covariance attributes (``varxi``, + and ``cov``) is 'shot', which only includes the shape noise propagated into + the final correlation. This does not include sample variance, so it is always an + underestimate of the actual variance. To get better estimates, you need to set + ``var_method`` to something else and use patches in the input catalog(s). + cf. `Covariance Estimates`. + + If ``sep_units`` are given (either in the config dict or as a named kwarg) then the distances + will all be in these units. + + .. note:: + + If you separate out the steps of the `process` command and use `process_cross`, + then the units will not be applied to ``meanr`` or ``meanlogr`` until the `finalize` + function is called. + + The typical usage pattern is as follows: + + >>> ng = treecorr.NGCorrelation(config) + >>> ng.process(cat1,cat2) # Compute the cross-correlation. + >>> ng.write(file_name) # Write out to a file. + >>> xi = gg.xi # Or access the correlation function directly. + + Parameters: + config (dict): A configuration dict that can be used to pass in kwargs if desired. + This dict is allowed to have addition entries besides those listed + in `BinnedCorr2`, which are ignored here. (default: None) + logger: If desired, a logger object for logging. (default: None, in which case + one will be built according to the config dict's verbose level.) + + Keyword Arguments: + **kwargs: See the documentation for `BinnedCorr2` for the list of allowed keyword + arguments, which may be passed either directly or in the config dict. + """ +
[docs] @depr_pos_kwargs + def __init__(self, config=None, *, logger=None, **kwargs): + """Initialize `NGCorrelation`. See class doc for details. + """ + BinnedCorr2.__init__(self, config, logger=logger, **kwargs) + + self._ro._d1 = 1 # NData + self._ro._d2 = 3 # GData + self.xi = np.zeros_like(self.rnom, dtype=float) + self.xi_im = np.zeros_like(self.rnom, dtype=float) + self.varxi = np.zeros_like(self.rnom, dtype=float) + self.meanr = np.zeros_like(self.rnom, dtype=float) + self.meanlogr = np.zeros_like(self.rnom, dtype=float) + self.weight = np.zeros_like(self.rnom, dtype=float) + self.npairs = np.zeros_like(self.rnom, dtype=float) + self.raw_xi = self.xi + self.raw_xi_im = self.xi_im + self.raw_varxi = self.varxi + self._rg = None + self.logger.debug('Finished building NGCorr')
+ + @property + def corr(self): + if self._corr is None: + self._corr = _lib.BuildCorr2( + self._d1, self._d2, self._bintype, + self._min_sep,self._max_sep,self._nbins,self._bin_size,self.b, + self.min_rpar, self.max_rpar, self.xperiod, self.yperiod, self.zperiod, + dp(self.raw_xi),dp(self.raw_xi_im), dp(None), dp(None), + dp(self.meanr),dp(self.meanlogr),dp(self.weight),dp(self.npairs)) + return self._corr + + def __del__(self): + # Using memory allocated from the C layer means we have to explicitly deallocate it + # rather than being able to rely on the Python memory manager. + if self._corr is not None: + if not _ffi._lock.locked(): # pragma: no branch + _lib.DestroyCorr2(self.corr, self._d1, self._d2, self._bintype) + +
[docs] def __eq__(self, other): + """Return whether two `NGCorrelation` instances are equal""" + return (isinstance(other, NGCorrelation) and + self.nbins == other.nbins and + self.bin_size == other.bin_size and + self.min_sep == other.min_sep and + self.max_sep == other.max_sep and + self.sep_units == other.sep_units and + self.coords == other.coords and + self.bin_type == other.bin_type and + self.bin_slop == other.bin_slop and + self.min_rpar == other.min_rpar and + self.max_rpar == other.max_rpar and + self.xperiod == other.xperiod and + self.yperiod == other.yperiod and + self.zperiod == other.zperiod and + np.array_equal(self.meanr, other.meanr) and + np.array_equal(self.meanlogr, other.meanlogr) and + np.array_equal(self.xi, other.xi) and + np.array_equal(self.xi_im, other.xi_im) and + np.array_equal(self.varxi, other.varxi) and + np.array_equal(self.weight, other.weight) and + np.array_equal(self.npairs, other.npairs))
+ +
[docs] def copy(self): + """Make a copy""" + ret = NGCorrelation.__new__(NGCorrelation) + for key, item in self.__dict__.items(): + if isinstance(item, np.ndarray): + # Only items that might change need to by deep copied. + ret.__dict__[key] = item.copy() + else: + # For everything else, shallow copy is fine. + # In particular don't deep copy config or logger + # Most of the rest are scalars, which copy fine this way. + # And the read-only things are all in _ro. + # The results dict is trickier. We rely on it being copied in places, but we + # never add more to it after the copy, so shallow copy is fine. + ret.__dict__[key] = item + ret._corr = None # We'll want to make a new one of these if we need it. + if self.xi is self.raw_xi: + ret.raw_xi = ret.xi + ret.raw_xi_im = ret.xi_im + ret.raw_varxi = ret.varxi + else: + ret.raw_xi = self.raw_xi.copy() + ret.raw_xi_im = self.raw_xi_im.copy() + ret.raw_varxi = self.raw_varxi.copy() + if self._rg is not None: + ret._rg = self._rg.copy() + return ret
+ +
[docs] def __repr__(self): + return 'NGCorrelation(config=%r)'%self.config
+ +
[docs] @depr_pos_kwargs + def process_cross(self, cat1, cat2, *, metric=None, num_threads=None): + """Process a single pair of catalogs, accumulating the cross-correlation. + + This accumulates the weighted sums into the bins, but does not finalize + the calculation by dividing by the total weight at the end. After + calling this function as often as desired, the `finalize` command will + finish the calculation. + + Parameters: + cat1 (Catalog): The first catalog to process + cat2 (Catalog): The second catalog to process + metric (str): Which metric to use. See `Metrics` for details. + (default: 'Euclidean'; this value can also be given in the + constructor in the config dict.) + num_threads (int): How many OpenMP threads to use during the calculation. + (default: use the number of cpu cores; this value can also be given + in the constructor in the config dict.) + """ + if cat1.name == '' and cat2.name == '': + self.logger.info('Starting process NG cross-correlations') + else: + self.logger.info('Starting process NG cross-correlations for cats %s, %s.', + cat1.name, cat2.name) + + self._set_metric(metric, cat1.coords, cat2.coords) + self._set_num_threads(num_threads) + min_size, max_size = self._get_minmax_size() + + f1 = cat1.getNField(min_size=min_size, max_size=max_size, + split_method=self.split_method, + brute=self.brute is True or self.brute == 1, + min_top=self.min_top, max_top=self.max_top, + coords=self.coords) + f2 = cat2.getGField(min_size=min_size, max_size=max_size, + split_method=self.split_method, + brute=self.brute is True or self.brute == 2, + min_top=self.min_top, max_top=self.max_top, + coords=self.coords) + + self.logger.info('Starting %d jobs.',f1.nTopLevelNodes) + _lib.ProcessCross2(self.corr, f1.data, f2.data, self.output_dots, + f1._d, f2._d, self._coords, self._bintype, self._metric)
+ +
[docs] @depr_pos_kwargs + def process_pairwise(self, cat1, cat2, *, metric=None, num_threads=None): + """Process a single pair of catalogs, accumulating the cross-correlation, only using + the corresponding pairs of objects in each catalog. + + This accumulates the weighted sums into the bins, but does not finalize + the calculation by dividing by the total weight at the end. After + calling this function as often as desired, the `finalize` command will + finish the calculation. + + .. warning:: + + .. deprecated:: 4.1 + + This function is deprecated and slated to be removed. + If you have a need for it, please open an issue to describe your use case. + + Parameters: + cat1 (Catalog): The first catalog to process + cat2 (Catalog): The second catalog to process + metric (str): Which metric to use. See `Metrics` for details. + (default: 'Euclidean'; this value can also be given in the + constructor in the config dict.) + num_threads (int): How many OpenMP threads to use during the calculation. + (default: use the number of cpu cores; this value can also be given + in the constructor in the config dict.) + """ + import warnings + warnings.warn("The process_pairwise function is slated to be removed in a future version. "+ + "If you are actually using this function usefully, please "+ + "open an issue to describe your use case.", FutureWarning) + if cat1.name == '' and cat2.name == '': + self.logger.info('Starting process NG pairwise-correlations') + else: + self.logger.info('Starting process NG pairwise-correlations for cats %s, %s.', + cat1.name, cat2.name) + + self._set_metric(metric, cat1.coords, cat2.coords) + self._set_num_threads(num_threads) + + f1 = cat1.getNSimpleField() + f2 = cat2.getGSimpleField() + + _lib.ProcessPair(self.corr, f1.data, f2.data, self.output_dots, + f1._d, f2._d, self._coords, self._bintype, self._metric)
+ + def _finalize(self): + mask1 = self.weight != 0 + mask2 = self.weight == 0 + + self.raw_xi[mask1] /= self.weight[mask1] + self.raw_xi_im[mask1] /= self.weight[mask1] + self.meanr[mask1] /= self.weight[mask1] + self.meanlogr[mask1] /= self.weight[mask1] + + # Update the units of meanr, meanlogr + self._apply_units(mask1) + + # Use meanr, meanlogr when available, but set to nominal when no pairs in bin. + self.meanr[mask2] = self.rnom[mask2] + self.meanlogr[mask2] = self.logr[mask2] + +
[docs] def finalize(self, varg): + """Finalize the calculation of the correlation function. + + The `process_cross` command accumulates values in each bin, so it can be called + multiple times if appropriate. Afterwards, this command finishes the calculation + by dividing each column by the total weight. + + Parameters: + varg (float): The shear variance per component for the second field. + """ + self._finalize() + self._var_num = varg + self.cov = self.estimate_cov(self.var_method) + self.raw_varxi.ravel()[:] = self.cov.diagonal() + + self.xi = self.raw_xi + self.xi_im = self.raw_xi_im + self.varxi = self.raw_varxi
+ + def _clear(self): + """Clear the data vectors + """ + self.raw_xi.ravel()[:] = 0 + self.raw_xi_im.ravel()[:] = 0 + self.raw_varxi.ravel()[:] = 0 + self.meanr.ravel()[:] = 0 + self.meanlogr.ravel()[:] = 0 + self.weight.ravel()[:] = 0 + self.npairs.ravel()[:] = 0 + self.xi = self.raw_xi + self.xi_im = self.raw_xi_im + self.varxi = self.raw_varxi + +
[docs] def __iadd__(self, other): + """Add a second `NGCorrelation`'s data to this one. + + .. note:: + + For this to make sense, both `NGCorrelation` objects should not have had `finalize` + called yet. Then, after adding them together, you should call `finalize` on the sum. + """ + if not isinstance(other, NGCorrelation): + raise TypeError("Can only add another NGCorrelation object") + if not (self._nbins == other._nbins and + self.min_sep == other.min_sep and + self.max_sep == other.max_sep): + raise ValueError("NGCorrelation to be added is not compatible with this one.") + + self._set_metric(other.metric, other.coords, other.coords) + self.raw_xi.ravel()[:] += other.raw_xi.ravel()[:] + self.raw_xi_im.ravel()[:] += other.raw_xi_im.ravel()[:] + self.meanr.ravel()[:] += other.meanr.ravel()[:] + self.meanlogr.ravel()[:] += other.meanlogr.ravel()[:] + self.weight.ravel()[:] += other.weight.ravel()[:] + self.npairs.ravel()[:] += other.npairs.ravel()[:] + return self
+ + def _sum(self, others): + # Equivalent to the operation of: + # self._clear() + # for other in others: + # self += other + # but no sanity checks and use numpy.sum for faster calculation. + np.sum([c.raw_xi for c in others], axis=0, out=self.raw_xi) + np.sum([c.raw_xi_im for c in others], axis=0, out=self.raw_xi_im) + np.sum([c.meanr for c in others], axis=0, out=self.meanr) + np.sum([c.meanlogr for c in others], axis=0, out=self.meanlogr) + np.sum([c.weight for c in others], axis=0, out=self.weight) + np.sum([c.npairs for c in others], axis=0, out=self.npairs) + self.xi = self.raw_xi + self.xi_im = self.raw_xi_im + self.varxi = self.raw_varxi + +
[docs] @depr_pos_kwargs + def process(self, cat1, cat2, *, metric=None, num_threads=None, comm=None, low_mem=False, + initialize=True, finalize=True): + """Compute the correlation function. + + Both arguments may be lists, in which case all items in the list are used + for that element of the correlation. + + Parameters: + cat1 (Catalog): A catalog or list of catalogs for the N field. + cat2 (Catalog): A catalog or list of catalogs for the G field. + metric (str): Which metric to use. See `Metrics` for details. + (default: 'Euclidean'; this value can also be given in the + constructor in the config dict.) + num_threads (int): How many OpenMP threads to use during the calculation. + (default: use the number of cpu cores; this value can also be given + in the constructor in the config dict.) + comm (mpi4py.Comm): If running MPI, an mpi4py Comm object to communicate between + processes. If used, the rank=0 process will have the final + computation. This only works if using patches. (default: None) + low_mem (bool): Whether to sacrifice a little speed to try to reduce memory usage. + This only works if using patches. (default: False) + initialize (bool): Whether to begin the calculation with a call to + `BinnedCorr2.clear`. (default: True) + finalize (bool): Whether to complete the calculation with a call to `finalize`. + (default: True) + """ + import math + if initialize: + self.clear() + self._rg = None + + if not isinstance(cat1,list): + cat1 = cat1.get_patches(low_mem=low_mem) + if not isinstance(cat2,list): + cat2 = cat2.get_patches(low_mem=low_mem) + + self._process_all_cross(cat1, cat2, metric, num_threads, comm, low_mem) + + if finalize: + varg = calculateVarG(cat2, low_mem=low_mem) + self.logger.info("varg = %f: sig_sn (per component) = %f",varg,math.sqrt(varg)) + self.finalize(varg)
+ +
[docs] @depr_pos_kwargs + def calculateXi(self, *, rg=None): + r"""Calculate the correlation function possibly given another correlation function + that uses random points for the foreground objects. + + - If rg is None, the simple correlation function :math:`\langle \gamma_T\rangle` is + returned. + - If rg is not None, then a compensated calculation is done: + :math:`\langle \gamma_T\rangle = (DG - RG)`, where DG represents the mean shear + around the lenses and RG represents the mean shear around random points. + + After calling this function, the attributes ``xi``, ``xi_im``, ``varxi``, and ``cov`` will + correspond to the compensated values (if rg is provided). The raw, uncompensated values + are available as ``rawxi``, ``raw_xi_im``, and ``raw_varxi``. + + Parameters: + rg (NGCorrelation): The cross-correlation using random locations as the lenses + (RG), if desired. (default: None) + + Returns: + Tuple containing + + - xi = array of the real part of :math:`\xi(R)` + - xi_im = array of the imaginary part of :math:`\xi(R)` + - varxi = array of the variance estimates of the above values + """ + if rg is not None: + self.xi = self.raw_xi - rg.xi + self.xi_im = self.raw_xi_im - rg.xi_im + self._rg = rg + + if rg.npatch1 not in (1,self.npatch1) or rg.npatch2 != self.npatch2: + raise RuntimeError("RG must be run with the same patches as DG") + + if len(self.results) > 0: + # If there are any rg patch pairs that aren't in results (e.g. due to different + # edge effects among the various pairs in consideration), then we need to add + # some dummy results to make sure all the right pairs are computed when we make + # the vectors for the covariance matrix. + template = next(iter(self.results.values())) # Just need something to copy. + for ij in rg.results: + if ij in self.results: continue + new_cij = template.copy() + new_cij.xi.ravel()[:] = 0 + new_cij.weight.ravel()[:] = 0 + self.results[ij] = new_cij + + self.cov = self.estimate_cov(self.var_method) + self.varxi.ravel()[:] = self.cov.diagonal() + else: + self.varxi = self.raw_varxi + rg.varxi + else: + self.xi = self.raw_xi + self.xi_im = self.raw_xi_im + self.varxi = self.raw_varxi + + return self.xi, self.xi_im, self.varxi
+ + def _calculate_xi_from_pairs(self, pairs): + self._sum([self.results[ij] for ij in pairs]) + self._finalize() + if self._rg is not None: + # If rg has npatch1 = 1, adjust pairs appropriately + if self._rg.npatch1 == 1: + pairs = [(0,ij[1]) for ij in pairs if ij[0] == ij[1]] + # Make sure all ij are in the rg results (some might be missing, which is ok) + pairs = [ij for ij in pairs if self._rg._ok[ij[0],ij[1]]] + self._rg._calculate_xi_from_pairs(pairs) + self.xi -= self._rg.xi + +
[docs] @depr_pos_kwargs + def write(self, file_name, *, rg=None, file_type=None, precision=None, + write_patch_results=False): + r"""Write the correlation function to the file, file_name. + + - If rg is None, the simple correlation function :math:`\langle \gamma_T\rangle` is used. + - If rg is not None, then a compensated calculation is done: + :math:`\langle \gamma_T\rangle = (DG - RG)`, where DG represents the mean shear + around the lenses and RG represents the mean shear around random points. + + The output file will include the following columns: + + ========== ============================================================= + Column Description + ========== ============================================================= + r_nom The nominal center of the bin in r + meanr The mean value :math:`\langle r \rangle` of pairs that fell + into each bin + meanlogr The mean value :math:`\langle \log(r) \rangle` of pairs that + fell into each bin + gamT The real part of the mean tangential shear, + :math:`\langle \gamma_T \rangle(r)` + gamX The imag part of the mean tangential shear, + :math:`\langle \gamma_\times \rangle(r)` + sigma The sqrt of the variance estimate of either of these + weight The total weight contributing to each bin + npairs The total number of pairs in each bin + ========== ============================================================= + + If ``sep_units`` was given at construction, then the distances will all be in these units. + Otherwise, they will be in either the same units as x,y,z (for flat or 3d coordinates) or + radians (for spherical coordinates). + + Parameters: + file_name (str): The name of the file to write to. + rg (NGCorrelation): The cross-correlation using random locations as the lenses + (RG), if desired. (default: None) + file_type (str): The type of file to write ('ASCII' or 'FITS'). (default: determine + the type automatically from the extension of file_name.) + precision (int): For ASCII output catalogs, the desired precision. (default: 4; + this value can also be given in the constructor in the config dict.) + write_patch_results (bool): Whether to write the patch-based results as well. + (default: False) + """ + self.logger.info('Writing NG correlations to %s',file_name) + self.calculateXi(rg=rg) + precision = self.config.get('precision', 4) if precision is None else precision + name = 'main' if write_patch_results else None + with make_writer(file_name, precision, file_type, self.logger) as writer: + self._write(writer, name, write_patch_results)
+ + @property + def _write_col_names(self): + return ['r_nom','meanr','meanlogr','gamT','gamX','sigma','weight','npairs'] + + @property + def _write_data(self): + data = [ self.rnom, self.meanr, self.meanlogr, + self.xi, self.xi_im, np.sqrt(self.varxi), self.weight, self.npairs ] + data = [ col.flatten() for col in data ] + return data + + @property + def _write_params(self): + return { 'coords' : self.coords, 'metric' : self.metric, + 'sep_units' : self.sep_units, 'bin_type' : self.bin_type } + +
[docs] @depr_pos_kwargs + def read(self, file_name, *, file_type=None): + """Read in values from a file. + + This should be a file that was written by TreeCorr, preferably a FITS file, so there + is no loss of information. + + .. warning:: + + The `NGCorrelation` object should be constructed with the same configuration + parameters as the one being read. e.g. the same min_sep, max_sep, etc. This is not + checked by the read function. + + Parameters: + file_name (str): The name of the file to read in. + file_type (str): The type of file ('ASCII' or 'FITS'). (default: determine the type + automatically from the extension of file_name.) + """ + self.logger.info('Reading NG correlations from %s',file_name) + with make_reader(file_name, file_type, self.logger) as reader: + self._read(reader)
+ + def _read_from_data(self, data, params): + s = self.logr.shape + if 'R_nom' in data.dtype.names: # pragma: no cover + self._ro.rnom = data['R_nom'].reshape(s) + self.meanr = data['meanR'].reshape(s) + self.meanlogr = data['meanlogR'].reshape(s) + else: + self._ro.rnom = data['r_nom'].reshape(s) + self.meanr = data['meanr'].reshape(s) + self.meanlogr = data['meanlogr'].reshape(s) + self.xi = data['gamT'].reshape(s) + self.xi_im = data['gamX'].reshape(s) + self.varxi = data['sigma'].reshape(s)**2 + self.weight = data['weight'].reshape(s) + self.npairs = data['npairs'].reshape(s) + self.coords = params['coords'].strip() + self.metric = params['metric'].strip() + self._ro.sep_units = params['sep_units'].strip() + self._ro.bin_type = params['bin_type'].strip() + self.raw_xi = self.xi + self.raw_xi_im = self.xi_im + self.raw_varxi = self.varxi + self.npatch1 = params.get('npatch1', 1) + self.npatch2 = params.get('npatch2', 1) + +
[docs] @depr_pos_kwargs + def calculateNMap(self, *, R=None, rg=None, m2_uform=None): + r"""Calculate the aperture mass statistics from the correlation function. + + .. math:: + + \langle N M_{ap} \rangle(R) &= \int_{0}^{rmax} \frac{r dr}{R^2} + T_\times\left(\frac{r}{R}\right) \Re\xi(r) \\ + \langle N M_{\times} \rangle(R) &= \int_{0}^{rmax} \frac{r dr}{R^2} + T_\times\left(\frac{r}{R}\right) \Im\xi(r) + + The ``m2_uform`` parameter sets which definition of the aperture mass to use. + The default is to use 'Crittenden'. + + If ``m2_uform`` is 'Crittenden': + + .. math:: + + U(r) &= \frac{1}{2\pi} (1-r^2) \exp(-r^2/2) \\ + T_\times(s) &= \frac{s^2}{128} (12-s^2) \exp(-s^2/4) + + cf. Crittenden, et al (2002): ApJ, 568, 20 + + If ``m2_uform`` is 'Schneider': + + .. math:: + + U(r) &= \frac{9}{\pi} (1-r^2) (1/3-r^2) \\ + T_\times(s) &= \frac{18}{\pi} s^2 \arccos(s/2) \\ + &\qquad - \frac{3}{40\pi} s^3 \sqrt{4-s^2} (196 - 74s^2 + 14s^4 - s^6) + + cf. Schneider, et al (2002): A&A, 389, 729 + + In neither case is this formula in the above papers, but the derivation is similar + to the derivations of :math:`T_+` and :math:`T_-` in Schneider et al. (2002). + + Parameters: + R (array): The R values at which to calculate the aperture mass statistics. + (default: None, which means use self.rnom) + rg (NGCorrelation): The cross-correlation using random locations as the lenses + (RG), if desired. (default: None) + m2_uform (str): Which form to use for the aperture mass, as described above. + (default: 'Crittenden'; this value can also be given in the + constructor in the config dict.) + + Returns: + Tuple containing + + - nmap = array of :math:`\langle N M_{ap} \rangle(R)` + - nmx = array of :math:`\langle N M_{\times} \rangle(R)` + - varnmap = array of variance estimates of the above values + """ + if m2_uform is None: + m2_uform = self.config.get('m2_uform','Crittenden') + if m2_uform not in ['Crittenden', 'Schneider']: + raise ValueError("Invalid m2_uform") + if R is None: + R = self.rnom + + # Make s a matrix, so we can eventually do the integral by doing a matrix product. + s = np.outer(1./R, self.meanr) + ssq = s*s + if m2_uform == 'Crittenden': + exp_factor = np.exp(-ssq/4.) + Tx = ssq * (12. - ssq) / 128. * exp_factor + else: + Tx = np.zeros_like(s) + sa = s[s<2.] + ssqa = ssq[s<2.] + Tx[s<2.] = 196. + ssqa*(-74. + ssqa*(14. - ssqa)) + Tx[s<2.] *= -3./(40.*np.pi) * sa * ssqa * np.sqrt(4.-sa**2) + Tx[s<2.] += 18./np.pi * ssqa * np.arccos(sa/2.) + Tx *= ssq + + xi, xi_im, varxi = self.calculateXi(rg=rg) + + # Now do the integral by taking the matrix products. + # Note that dlogr = bin_size + Txxi = Tx.dot(xi) + Txxi_im = Tx.dot(xi_im) + nmap = Txxi * self.bin_size + nmx = Txxi_im * self.bin_size + + # The variance of each of these is + # Var(<NMap>(R)) = int_r=0..2R [s^4 dlogr^2 Tx(s)^2 Var(xi)] + varnmap = (Tx**2).dot(varxi) * self.bin_size**2 + + return nmap, nmx, varnmap
+ +
[docs] @depr_pos_kwargs + def writeNMap(self, file_name, *, R=None, rg=None, m2_uform=None, file_type=None, + precision=None): + r"""Write the cross correlation of the foreground galaxy counts with the aperture mass + based on the correlation function to the file, file_name. + + If rg is provided, the compensated calculation will be used for :math:`\xi`. + + See `calculateNMap` for an explanation of the ``m2_uform`` parameter. + + The output file will include the following columns: + + ========== ========================================================= + Column Description + ========== ========================================================= + R The radius of the aperture. + NMap An estimate of :math:`\langle N_{ap} M_{ap} \rangle(R)` + NMx An estimate of :math:`\langle N_{ap} M_\times \rangle(R)` + sig_nmap The sqrt of the variance estimate of either of these + ========== ========================================================= + + + Parameters: + file_name (str): The name of the file to write to. + R (array): The R values at which to calculate the aperture mass statistics. + (default: None, which means use self.rnom) + rg (NGCorrelation): The cross-correlation using random locations as the lenses + (RG), if desired. (default: None) + m2_uform (str): Which form to use for the aperture mass. (default: 'Crittenden'; + this value can also be given in the constructor in the config dict.) + file_type (str): The type of file to write ('ASCII' or 'FITS'). (default: determine + the type automatically from the extension of file_name.) + precision (int): For ASCII output catalogs, the desired precision. (default: 4; + this value can also be given in the constructor in the config dict.) + """ + self.logger.info('Writing NMap from NG correlations to %s',file_name) + if R is None: + R = self.rnom + + nmap, nmx, varnmap = self.calculateNMap(R=R, rg=rg, m2_uform=m2_uform) + if precision is None: + precision = self.config.get('precision', 4) + + col_names = ['R','NMap','NMx','sig_nmap'] + columns = [ R, nmap, nmx, np.sqrt(varnmap) ] + writer = make_writer(file_name, precision, file_type, logger=self.logger) + with writer: + writer.write(col_names, columns)
+ +
[docs] @depr_pos_kwargs + def writeNorm(self, file_name, *, gg, dd, rr, R=None, dr=None, rg=None, + m2_uform=None, file_type=None, precision=None): + r"""Write the normalized aperture mass cross-correlation to the file, file_name. + + The combination :math:`\langle N M_{ap}\rangle^2 / \langle M_{ap}^2\rangle + \langle N_{ap}^2\rangle` is related to :math:`r`, the galaxy-mass correlation + coefficient. Similarly, :math:`\langle N_{ap}^2\rangle / \langle M_{ap}^2\rangle` + is related to :math:`b`, the galaxy bias parameter. cf. Hoekstra et al, 2002: + http://adsabs.harvard.edu/abs/2002ApJ...577..604H + + This function computes these combinations and outputs them to a file. + + - if rg is provided, the compensated calculation will be used for + :math:`\langle N_{ap} M_{ap} \rangle`. + - if dr is provided, the compensated calculation will be used for + :math:`\langle N_{ap}^2 \rangle`. + + See `calculateNMap` for an explanation of the ``m2_uform`` parameter. + + The output file will include the following columns: + + ========== ===================================================================== + Column Description + ========== ===================================================================== + R The radius of the aperture + NMap An estimate of :math:`\langle N_{ap} M_{ap} \rangle(R)` + NMx An estimate of :math:`\langle N_{ap} M_\times \rangle(R)` + sig_nmap The sqrt of the variance estimate of either of these + Napsq An estimate of :math:`\langle N_{ap}^2 \rangle(R)` + sig_napsq The sqrt of the variance estimate of :math:`\langle N_{ap}^2 \rangle` + Mapsq An estimate of :math:`\langle M_{ap}^2 \rangle(R)` + sig_mapsq The sqrt of the variance estimate of :math:`\langle M_{ap}^2 \rangle` + NMap_norm The ratio :math:`\langle N_{ap} M_{ap} \rangle^2 /` + :math:`\langle N_{ap}^2 \rangle \langle M_{ap}^2 \rangle` + sig_norm The sqrt of the variance estimate of this ratio + Nsq_Mapsq The ratio :math:`\langle N_{ap}^2 \rangle / \langle M_{ap}^2 \rangle` + sig_nn_mm The sqrt of the variance estimate of this ratio + ========== ===================================================================== + + Parameters: + file_name (str): The name of the file to write to. + gg (GGCorrelation): The auto-correlation of the shear field + dd (NNCorrelation): The auto-correlation of the lens counts (DD) + rr (NNCorrelation): The auto-correlation of the random field (RR) + R (array): The R values at which to calculate the aperture mass statistics. + (default: None, which means use self.rnom) + dr (NNCorrelation): The cross-correlation of the data with randoms (DR), if + desired, in which case the Landy-Szalay estimator will be + calculated. (default: None) + rd (NNCorrelation): The cross-correlation of the randoms with data (RD), if + desired. (default: None, which means use rd=dr) + rg (NGCorrelation): The cross-correlation using random locations as the lenses + (RG), if desired. (default: None) + m2_uform (str): Which form to use for the aperture mass. (default: 'Crittenden'; + this value can also be given in the constructor in the config dict.) + file_type (str): The type of file to write ('ASCII' or 'FITS'). (default: determine + the type automatically from the extension of file_name.) + precision (int): For ASCII output catalogs, the desired precision. (default: 4; + this value can also be given in the constructor in the config dict.) + """ + self.logger.info('Writing Norm from NG correlations to %s',file_name) + if R is None: + R = self.rnom + + nmap, nmx, varnmap = self.calculateNMap(R=R, rg=rg, m2_uform=m2_uform) + mapsq, mapsq_im, mxsq, mxsq_im, varmapsq = gg.calculateMapSq(R=R, m2_uform=m2_uform) + nsq, varnsq = dd.calculateNapSq(R=R, rr=rr, dr=dr, m2_uform=m2_uform) + + nmnorm = nmap**2 / (nsq * mapsq) + varnmnorm = nmnorm**2 * (4. * varnmap / nmap**2 + varnsq / nsq**2 + varmapsq / mapsq**2) + nnnorm = nsq / mapsq + varnnnorm = nnnorm**2 * (varnsq / nsq**2 + varmapsq / mapsq**2) + if precision is None: + precision = self.config.get('precision', 4) + + col_names = [ 'R', + 'NMap','NMx','sig_nmap', + 'Napsq','sig_napsq','Mapsq','sig_mapsq', + 'NMap_norm','sig_norm','Nsq_Mapsq','sig_nn_mm' ] + columns = [ R, + nmap, nmx, np.sqrt(varnmap), + nsq, np.sqrt(varnsq), mapsq, np.sqrt(varmapsq), + nmnorm, np.sqrt(varnmnorm), nnnorm, np.sqrt(varnnnorm) ] + writer = make_writer(file_name, precision, file_type, logger=self.logger) + with writer: + writer.write(col_names, columns)
+
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/treecorr/nkcorrelation.html b/docs/_build/html/_modules/treecorr/nkcorrelation.html new file mode 100644 index 00000000..1fe0280c --- /dev/null +++ b/docs/_build/html/_modules/treecorr/nkcorrelation.html @@ -0,0 +1,688 @@ + + + + + + treecorr.nkcorrelation — TreeCorr 4.3.0 documentation + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for treecorr.nkcorrelation

+# Copyright (c) 2003-2019 by Mike Jarvis
+#
+# TreeCorr is free software: redistribution and use in source and binary forms,
+# with or without modification, are permitted provided that the following
+# conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice, this
+#    list of conditions, and the disclaimer given in the accompanying LICENSE
+#    file.
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+#    this list of conditions, and the disclaimer given in the documentation
+#    and/or other materials provided with the distribution.
+
+"""
+.. module:: nkcorrelation
+"""
+
+import numpy as np
+
+from . import _lib, _ffi
+from .catalog import calculateVarK
+from .binnedcorr2 import BinnedCorr2
+from .util import double_ptr as dp
+from .util import make_writer, make_reader
+from .util import depr_pos_kwargs
+
+
+
[docs]class NKCorrelation(BinnedCorr2): + r"""This class handles the calculation and storage of a 2-point count-kappa correlation + function. + + .. note:: + + While we use the term kappa (:math:`\kappa`) here and the letter K in various places, + in fact any scalar field will work here. For example, you can use this to compute + correlations of non-shear quantities, e.g. the sizes or concentrations of galaxies, around + a set of lenses, where "kappa" would be the measurements of these quantities. + + Ojects of this class holds the following attributes: + + Attributes: + nbins: The number of bins in logr + bin_size: The size of the bins in logr + min_sep: The minimum separation being considered + max_sep: The maximum separation being considered + + In addition, the following attributes are numpy arrays of length (nbins): + + Attributes: + logr: The nominal center of the bin in log(r) (the natural logarithm of r). + rnom: The nominal center of the bin converted to regular distance. + i.e. r = exp(logr). + meanr: The (weighted) mean value of r for the pairs in each bin. + If there are no pairs in a bin, then exp(logr) will be used instead. + meanlogr: The (weighted) mean value of log(r) for the pairs in each bin. + If there are no pairs in a bin, then logr will be used instead. + xi: The correlation function, :math:`\xi(r) = \langle \kappa\rangle`. + varxi: An estimate of the variance of :math:`\xi` + weight: The total weight in each bin. + npairs: The number of pairs going into each bin (including pairs where one or + both objects have w=0). + cov: An estimate of the full covariance matrix. + raw_xi: The raw value of xi, uncorrected by an RK calculation. cf. `calculateXi` + raw_varxi: The raw value of varxi, uncorrected by an RK calculation. cf. `calculateXi` + + .. note:: + + The default method for estimating the variance and covariance attributes (``varxi``, + and ``cov``) is 'shot', which only includes the shape noise propagated into + the final correlation. This does not include sample variance, so it is always an + underestimate of the actual variance. To get better estimates, you need to set + ``var_method`` to something else and use patches in the input catalog(s). + cf. `Covariance Estimates`. + + + If ``sep_units`` are given (either in the config dict or as a named kwarg) then the distances + will all be in these units. + + .. note:: + + If you separate out the steps of the `process` command and use `process_cross`, + then the units will not be applied to ``meanr`` or ``meanlogr`` until the `finalize` + function is called. + + The typical usage pattern is as follows: + + >>> nk = treecorr.NKCorrelation(config) + >>> nk.process(cat1,cat2) # Compute the cross-correlation function. + >>> nk.write(file_name) # Write out to a file. + >>> xi = nk.xi # Or access the correlation function directly. + + Parameters: + config (dict): A configuration dict that can be used to pass in kwargs if desired. + This dict is allowed to have addition entries besides those listed + in `BinnedCorr2`, which are ignored here. (default: None) + logger: If desired, a logger object for logging. (default: None, in which case + one will be built according to the config dict's verbose level.) + + Keyword Arguments: + **kwargs: See the documentation for `BinnedCorr2` for the list of allowed keyword + arguments, which may be passed either directly or in the config dict. + """ +
[docs] @depr_pos_kwargs + def __init__(self, config=None, *, logger=None, **kwargs): + """Initialize `NKCorrelation`. See class doc for details. + """ + BinnedCorr2.__init__(self, config, logger=logger, **kwargs) + + self._ro._d1 = 1 # NData + self._ro._d2 = 2 # KData + self.xi = np.zeros_like(self.rnom, dtype=float) + self.varxi = np.zeros_like(self.rnom, dtype=float) + self.meanr = np.zeros_like(self.rnom, dtype=float) + self.meanlogr = np.zeros_like(self.rnom, dtype=float) + self.weight = np.zeros_like(self.rnom, dtype=float) + self.npairs = np.zeros_like(self.rnom, dtype=float) + self.raw_xi = self.xi + self.raw_varxi = self.varxi + self._rk = None + self.logger.debug('Finished building NKCorr')
+ + @property + def corr(self): + if self._corr is None: + self._corr = _lib.BuildCorr2( + self._d1, self._d2, self._bintype, + self._min_sep,self._max_sep,self._nbins,self._bin_size,self.b, + self.min_rpar, self.max_rpar, self.xperiod, self.yperiod, self.zperiod, + dp(self.raw_xi), dp(None), dp(None), dp(None), + dp(self.meanr),dp(self.meanlogr),dp(self.weight),dp(self.npairs)) + return self._corr + + def __del__(self): + # Using memory allocated from the C layer means we have to explicitly deallocate it + # rather than being able to rely on the Python memory manager. + if self._corr is not None: + if not _ffi._lock.locked(): # pragma: no branch + _lib.DestroyCorr2(self.corr, self._d1, self._d2, self._bintype) + +
[docs] def __eq__(self, other): + """Return whether two `NKCorrelation` instances are equal""" + return (isinstance(other, NKCorrelation) and + self.nbins == other.nbins and + self.bin_size == other.bin_size and + self.min_sep == other.min_sep and + self.max_sep == other.max_sep and + self.sep_units == other.sep_units and + self.coords == other.coords and + self.bin_type == other.bin_type and + self.bin_slop == other.bin_slop and + self.min_rpar == other.min_rpar and + self.max_rpar == other.max_rpar and + self.xperiod == other.xperiod and + self.yperiod == other.yperiod and + self.zperiod == other.zperiod and + np.array_equal(self.meanr, other.meanr) and + np.array_equal(self.meanlogr, other.meanlogr) and + np.array_equal(self.xi, other.xi) and + np.array_equal(self.varxi, other.varxi) and + np.array_equal(self.weight, other.weight) and + np.array_equal(self.npairs, other.npairs))
+ +
[docs] def copy(self): + """Make a copy""" + ret = NKCorrelation.__new__(NKCorrelation) + for key, item in self.__dict__.items(): + if isinstance(item, np.ndarray): + # Only items that might change need to by deep copied. + ret.__dict__[key] = item.copy() + else: + # For everything else, shallow copy is fine. + # In particular don't deep copy config or logger + # Most of the rest are scalars, which copy fine this way. + # And the read-only things are all in _ro. + # The results dict is trickier. We rely on it being copied in places, but we + # never add more to it after the copy, so shallow copy is fine. + ret.__dict__[key] = item + ret._corr = None # We'll want to make a new one of these if we need it. + if self.xi is self.raw_xi: + ret.raw_xi = ret.xi + ret.raw_varxi = ret.varxi + else: + ret.raw_xi = self.raw_xi.copy() + ret.raw_varxi = self.raw_varxi.copy() + if self._rk is not None: + ret._rk = self._rk.copy() + return ret
+ +
[docs] def __repr__(self): + return 'NKCorrelation(config=%r)'%self.config
+ +
[docs] @depr_pos_kwargs + def process_cross(self, cat1, cat2, *, metric=None, num_threads=None): + """Process a single pair of catalogs, accumulating the cross-correlation. + + This accumulates the weighted sums into the bins, but does not finalize + the calculation by dividing by the total weight at the end. After + calling this function as often as desired, the `finalize` command will + finish the calculation. + + Parameters: + cat1 (Catalog): The first catalog to process + cat2 (Catalog): The second catalog to process + metric (str): Which metric to use. See `Metrics` for details. + (default: 'Euclidean'; this value can also be given in the + constructor in the config dict.) + num_threads (int): How many OpenMP threads to use during the calculation. + (default: use the number of cpu cores; this value can also be given + in the constructor in the config dict.) + """ + if cat1.name == '' and cat2.name == '': + self.logger.info('Starting process NK cross-correlations') + else: + self.logger.info('Starting process NK cross-correlations for cats %s, %s.', + cat1.name, cat2.name) + + self._set_metric(metric, cat1.coords, cat2.coords) + self._set_num_threads(num_threads) + min_size, max_size = self._get_minmax_size() + + f1 = cat1.getNField(min_size=min_size, max_size=max_size, + split_method=self.split_method, + brute=self.brute is True or self.brute == 1, + min_top=self.min_top, max_top=self.max_top, + coords=self.coords) + f2 = cat2.getKField(min_size=min_size, max_size=max_size, + split_method=self.split_method, + brute=self.brute is True or self.brute == 2, + min_top=self.min_top, max_top=self.max_top, + coords=self.coords) + + self.logger.info('Starting %d jobs.',f1.nTopLevelNodes) + _lib.ProcessCross2(self.corr, f1.data, f2.data, self.output_dots, + f1._d, f2._d, self._coords, self._bintype, self._metric)
+ +
[docs] @depr_pos_kwargs + def process_pairwise(self, cat1, cat2, *, metric=None, num_threads=None): + """Process a single pair of catalogs, accumulating the cross-correlation, only using + the corresponding pairs of objects in each catalog. + + This accumulates the weighted sums into the bins, but does not finalize + the calculation by dividing by the total weight at the end. After + calling this function as often as desired, the `finalize` command will + finish the calculation. + + .. warning:: + + .. deprecated:: 4.1 + + This function is deprecated and slated to be removed. + If you have a need for it, please open an issue to describe your use case. + + Parameters: + cat1 (Catalog): The first catalog to process + cat2 (Catalog): The second catalog to process + metric (str): Which metric to use. See `Metrics` for details. + (default: 'Euclidean'; this value can also be given in the + constructor in the config dict.) + num_threads (int): How many OpenMP threads to use during the calculation. + (default: use the number of cpu cores; this value can also be given + in the constructor in the config dict.) + """ + import warnings + warnings.warn("The process_pairwise function is slated to be removed in a future version. "+ + "If you are actually using this function usefully, please "+ + "open an issue to describe your use case.", FutureWarning) + if cat1.name == '' and cat2.name == '': + self.logger.info('Starting process NK pairwise-correlations') + else: + self.logger.info('Starting process NK pairwise-correlations for cats %s, %s.', + cat1.name, cat2.name) + + self._set_metric(metric, cat1.coords, cat2.coords) + self._set_num_threads(num_threads) + + f1 = cat1.getNSimpleField() + f2 = cat2.getKSimpleField() + + _lib.ProcessPair(self.corr, f1.data, f2.data, self.output_dots, + f1._d, f2._d, self._coords, self._bintype, self._metric)
+ + def _finalize(self): + mask1 = self.weight != 0 + mask2 = self.weight == 0 + + self.raw_xi[mask1] /= self.weight[mask1] + self.meanr[mask1] /= self.weight[mask1] + self.meanlogr[mask1] /= self.weight[mask1] + + # Update the units of meanr, meanlogr + self._apply_units(mask1) + + # Use meanr, meanlogr when available, but set to nominal when no pairs in bin. + self.meanr[mask2] = self.rnom[mask2] + self.meanlogr[mask2] = self.logr[mask2] + +
[docs] def finalize(self, vark): + """Finalize the calculation of the correlation function. + + The `process_cross` command accumulates values in each bin, so it can be called + multiple times if appropriate. Afterwards, this command finishes the calculation + by dividing each column by the total weight. + + Parameters: + vark: The kappa variance for the second field. + """ + self._finalize() + self._var_num = vark + self.cov = self.estimate_cov(self.var_method) + self.raw_varxi.ravel()[:] = self.cov.diagonal()
+ + def _clear(self): + """Clear the data vectors + """ + self.raw_xi.ravel()[:] = 0 + self.raw_varxi.ravel()[:] = 0 + self.meanr.ravel()[:] = 0 + self.meanlogr.ravel()[:] = 0 + self.weight.ravel()[:] = 0 + self.npairs.ravel()[:] = 0 + self.xi = self.raw_xi + self.varxi = self.raw_varxi + +
[docs] def __iadd__(self, other): + """Add a second `NKCorrelation`'s data to this one. + + .. note:: + + For this to make sense, both `NKCorrelation` objects should not have had `finalize` + called yet. Then, after adding them together, you should call `finalize` on the sum. + """ + if not isinstance(other, NKCorrelation): + raise TypeError("Can only add another NKCorrelation object") + if not (self._nbins == other._nbins and + self.min_sep == other.min_sep and + self.max_sep == other.max_sep): + raise ValueError("NKCorrelation to be added is not compatible with this one.") + + self._set_metric(other.metric, other.coords, other.coords) + self.raw_xi.ravel()[:] += other.raw_xi.ravel()[:] + self.meanr.ravel()[:] += other.meanr.ravel()[:] + self.meanlogr.ravel()[:] += other.meanlogr.ravel()[:] + self.weight.ravel()[:] += other.weight.ravel()[:] + self.npairs.ravel()[:] += other.npairs.ravel()[:] + return self
+ + def _sum(self, others): + # Equivalent to the operation of: + # self._clear() + # for other in others: + # self += other + # but no sanity checks and use numpy.sum for faster calculation. + np.sum([c.raw_xi for c in others], axis=0, out=self.raw_xi) + np.sum([c.meanr for c in others], axis=0, out=self.meanr) + np.sum([c.meanlogr for c in others], axis=0, out=self.meanlogr) + np.sum([c.weight for c in others], axis=0, out=self.weight) + np.sum([c.npairs for c in others], axis=0, out=self.npairs) + self.xi = self.raw_xi + self.varxi = self.raw_varxi + +
[docs] @depr_pos_kwargs + def process(self, cat1, cat2, *, metric=None, num_threads=None, comm=None, low_mem=False, + initialize=True, finalize=True): + """Compute the correlation function. + + Both arguments may be lists, in which case all items in the list are used + for that element of the correlation. + + Parameters: + cat1 (Catalog): A catalog or list of catalogs for the N field. + cat2 (Catalog): A catalog or list of catalogs for the K field. + metric (str): Which metric to use. See `Metrics` for details. + (default: 'Euclidean'; this value can also be given in the + constructor in the config dict.) + num_threads (int): How many OpenMP threads to use during the calculation. + (default: use the number of cpu cores; this value can also be given + in the constructor in the config dict.) + comm (mpi4py.Comm): If running MPI, an mpi4py Comm object to communicate between + processes. If used, the rank=0 process will have the final + computation. This only works if using patches. (default: None) + low_mem (bool): Whether to sacrifice a little speed to try to reduce memory usage. + This only works if using patches. (default: False) + initialize (bool): Whether to begin the calculation with a call to + `BinnedCorr2.clear`. (default: True) + finalize (bool): Whether to complete the calculation with a call to `finalize`. + (default: True) + """ + import math + if initialize: + self.clear() + self._rk = None + + if not isinstance(cat1,list): + cat1 = cat1.get_patches(low_mem=low_mem) + if not isinstance(cat2,list): + cat2 = cat2.get_patches(low_mem=low_mem) + + self._process_all_cross(cat1, cat2, metric, num_threads, comm, low_mem) + + if finalize: + vark = calculateVarK(cat2, low_mem=low_mem) + self.logger.info("vark = %f: sig_k = %f",vark,math.sqrt(vark)) + self.finalize(vark)
+ +
[docs] @depr_pos_kwargs + def calculateXi(self, *, rk=None): + r"""Calculate the correlation function possibly given another correlation function + that uses random points for the foreground objects. + + - If rk is None, the simple correlation function :math:`\langle \kappa \rangle` is + returned. + - If rk is not None, then a compensated calculation is done: + :math:`\langle \kappa \rangle = (DK - RK)`, where DK represents the mean kappa + around the lenses and RK represents the mean kappa around random points. + + After calling this function, the attributes ``xi``, ``varxi`` and ``cov`` will correspond + to the compensated values (if rk is provided). The raw, uncompensated values are + available as ``rawxi`` and ``raw_varxi``. + + Parameters: + rk (NKCorrelation): The cross-correlation using random locations as the lenses (RK), + if desired. (default: None) + + Returns: + Tuple containing + + - xi = array of :math:`\xi(r)` + - varxi = array of variance estimates of :math:`\xi(r)` + """ + if rk is not None: + self.xi = self.raw_xi - rk.xi + self._rk = rk + + if rk.npatch1 not in (1,self.npatch1) or rk.npatch2 != self.npatch2: + raise RuntimeError("RK must be run with the same patches as DK") + + if len(self.results) > 0: + # If there are any rk patch pairs that aren't in results (e.g. due to different + # edge effects among the various pairs in consideration), then we need to add + # some dummy results to make sure all the right pairs are computed when we make + # the vectors for the covariance matrix. + template = next(iter(self.results.values())) # Just need something to copy. + for ij in rk.results: + if ij in self.results: continue + new_cij = template.copy() + new_cij.xi.ravel()[:] = 0 + new_cij.weight.ravel()[:] = 0 + self.results[ij] = new_cij + + self.cov = self.estimate_cov(self.var_method) + self.varxi.ravel()[:] = self.cov.diagonal() + else: + self.varxi = self.raw_varxi + rk.varxi + else: + self.xi = self.raw_xi + self.varxi = self.raw_varxi + + return self.xi, self.varxi
+ + def _calculate_xi_from_pairs(self, pairs): + self._sum([self.results[ij] for ij in pairs]) + self._finalize() + if self._rk is not None: + # If rk has npatch1 = 1, adjust pairs appropriately + if self._rk.npatch1 == 1: + pairs = [(0,ij[1]) for ij in pairs if ij[0] == ij[1]] + # Make sure all ij are in the rk results (some might be missing, which is ok) + pairs = [ij for ij in pairs if self._rk._ok[ij[0],ij[1]]] + self._rk._calculate_xi_from_pairs(pairs) + self.xi -= self._rk.xi + +
[docs] def write(self, file_name, * ,rk=None, file_type=None, precision=None, + write_patch_results=False): + r"""Write the correlation function to the file, file_name. + + - If rk is None, the simple correlation function :math:`\langle \kappa \rangle(R)` is + used. + - If rk is not None, then a compensated calculation is done: + :math:`\langle \kappa \rangle = (DK - RK)`, where DK represents the mean kappa + around the lenses and RK represents the mean kappa around random points. + + The output file will include the following columns: + + ========== ========================================================= + Column Description + ========== ========================================================= + r_nom The nominal center of the bin in r + meanr The mean value :math:`\langle r\rangle` of pairs that + fell into each bin + meanlogr The mean value :math:`\langle \log(r)\rangle` of pairs + that fell into each bin + kappa The mean value :math:`\langle \kappa\rangle(r)` + sigma The sqrt of the variance estimate of + :math:`\langle \kappa\rangle` + weight The total weight contributing to each bin + npairs The total number of pairs in each bin + ========== ========================================================= + + If ``sep_units`` was given at construction, then the distances will all be in these units. + Otherwise, they will be in either the same units as x,y,z (for flat or 3d coordinates) or + radians (for spherical coordinates). + + Parameters: + file_name (str): The name of the file to write to. + rk (NKCorrelation): The cross-correlation using random locations as the lenses (RK), + if desired. (default: None) + file_type (str): The type of file to write ('ASCII' or 'FITS'). (default: determine + the type automatically from the extension of file_name.) + precision (int): For ASCII output catalogs, the desired precision. (default: 4; + this value can also be given in the constructor in the config dict.) + write_patch_results (bool): Whether to write the patch-based results as well. + (default: False) + """ + self.logger.info('Writing NK correlations to %s',file_name) + self.calculateXi(rk=rk) + precision = self.config.get('precision', 4) if precision is None else precision + name = 'main' if write_patch_results else None + with make_writer(file_name, precision, file_type, self.logger) as writer: + self._write(writer, name, write_patch_results)
+ + @property + def _write_col_names(self): + return ['r_nom','meanr','meanlogr','kappa','sigma','weight','npairs'] + + @property + def _write_data(self): + data = [ self.rnom, self.meanr, self.meanlogr, + self.xi, np.sqrt(self.varxi), self.weight, self.npairs ] + data = [ col.flatten() for col in data ] + return data + + @property + def _write_params(self): + return { 'coords' : self.coords, 'metric' : self.metric, + 'sep_units' : self.sep_units, 'bin_type' : self.bin_type } + +
[docs] @depr_pos_kwargs + def read(self, file_name, *, file_type=None): + """Read in values from a file. + + This should be a file that was written by TreeCorr, preferably a FITS file, so there + is no loss of information. + + .. warning:: + + The `NKCorrelation` object should be constructed with the same configuration + parameters as the one being read. e.g. the same min_sep, max_sep, etc. This is not + checked by the read function. + + Parameters: + file_name (str): The name of the file to read in. + file_type (str): The type of file ('ASCII' or 'FITS'). (default: determine the type + automatically from the extension of file_name.) + """ + self.logger.info('Reading NK correlations from %s',file_name) + with make_reader(file_name, file_type, self.logger) as reader: + self._read(reader)
+ + def _read_from_data(self, data, params): + s = self.logr.shape + if 'R_nom' in data.dtype.names: # pragma: no cover + self._ro.rnom = data['R_nom'].reshape(s) + self.meanr = data['meanR'].reshape(s) + self.meanlogr = data['meanlogR'].reshape(s) + else: + self._ro.rnom = data['r_nom'].reshape(s) + self.meanr = data['meanr'].reshape(s) + self.meanlogr = data['meanlogr'].reshape(s) + self.xi = data['kappa'].reshape(s) + self.varxi = data['sigma'].reshape(s)**2 + self.weight = data['weight'].reshape(s) + self.npairs = data['npairs'].reshape(s) + self.coords = params['coords'].strip() + self.metric = params['metric'].strip() + self._ro.sep_units = params['sep_units'].strip() + self._ro.bin_type = params['bin_type'].strip() + self.raw_xi = self.xi + self.raw_varxi = self.varxi + self.npatch1 = params.get('npatch1', 1) + self.npatch2 = params.get('npatch2', 1)
+
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/treecorr/nncorrelation.html b/docs/_build/html/_modules/treecorr/nncorrelation.html new file mode 100644 index 00000000..e4ef9102 --- /dev/null +++ b/docs/_build/html/_modules/treecorr/nncorrelation.html @@ -0,0 +1,1067 @@ + + + + + + treecorr.nncorrelation — TreeCorr 4.3.0 documentation + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for treecorr.nncorrelation

+# Copyright (c) 2003-2019 by Mike Jarvis
+#
+# TreeCorr is free software: redistribution and use in source and binary forms,
+# with or without modification, are permitted provided that the following
+# conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice, this
+#    list of conditions, and the disclaimer given in the accompanying LICENSE
+#    file.
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+#    this list of conditions, and the disclaimer given in the documentation
+#    and/or other materials provided with the distribution.
+
+"""
+.. module:: nncorrelation
+"""
+
+import numpy as np
+
+from . import _lib, _ffi
+from .binnedcorr2 import BinnedCorr2
+from .util import double_ptr as dp
+from .util import make_writer, make_reader, lazy_property
+from .util import depr_pos_kwargs
+
+
+
[docs]class NNCorrelation(BinnedCorr2): + r"""This class handles the calculation and storage of a 2-point count-count correlation + function. i.e. the regular density correlation function. + + Ojects of this class holds the following attributes: + + Attributes: + nbins: The number of bins in logr + bin_size: The size of the bins in logr + min_sep: The minimum separation being considered + max_sep: The maximum separation being considered + + In addition, the following attributes are numpy arrays of length (nbins): + + Attributes: + logr: The nominal center of the bin in log(r) (the natural logarithm of r). + rnom: The nominal center of the bin converted to regular distance. + i.e. r = exp(logr). + meanr: The (weighted) mean value of r for the pairs in each bin. + If there are no pairs in a bin, then exp(logr) will be used instead. + meanlogr: The mean value of log(r) for the pairs in each bin. + If there are no pairs in a bin, then logr will be used instead. + weight: The total weight in each bin. + npairs: The number of pairs going into each bin (including pairs where one or + both objects have w=0). + tot: The total number of pairs processed, which is used to normalize + the randoms if they have a different number of pairs. + + If `calculateXi` has been called, then the following will also be available: + + Attributes: + xi: The correlation function, :math:`\xi(r)` + varxi: An estimate of the variance of :math:`\xi` + cov: An estimate of the full covariance matrix. + + If ``sep_units`` are given (either in the config dict or as a named kwarg) then the distances + will all be in these units. + + .. note:: + + If you separate out the steps of the `process` command and use `process_auto` and/or + `process_cross`, then the units will not be applied to ``meanr`` or ``meanlogr`` until + the `finalize` function is called. + + The typical usage pattern is as follows: + + >>> nn = treecorr.NNCorrelation(config) + >>> nn.process(cat) # For auto-correlation. + >>> nn.process(cat1,cat2) # For cross-correlation. + >>> rr.process... # Likewise for random-random correlations + >>> dr.process... # If desired, also do data-random correlations + >>> rd.process... # For cross-correlations, also do the reverse. + >>> nn.write(file_name,rr=rr,dr=dr,rd=rd) # Write out to a file. + >>> xi,varxi = nn.calculateXi(rr=rr,dr=dr,rd=rd) # Or get correlation function directly. + + Parameters: + config (dict): A configuration dict that can be used to pass in kwargs if desired. + This dict is allowed to have addition entries besides those listed + in `BinnedCorr2`, which are ignored here. (default: None) + logger: If desired, a logger object for logging. (default: None, in which case + one will be built according to the config dict's verbose level.) + + Keyword Arguments: + **kwargs: See the documentation for `BinnedCorr2` for the list of allowed keyword + arguments, which may be passed either directly or in the config dict. + """ +
[docs] @depr_pos_kwargs + def __init__(self, config=None, *, logger=None, **kwargs): + """Initialize `NNCorrelation`. See class doc for details. + """ + BinnedCorr2.__init__(self, config, logger=logger, **kwargs) + + self._ro._d1 = 1 # NData + self._ro._d2 = 1 # NData + self.meanr = np.zeros_like(self.rnom, dtype=float) + self.meanlogr = np.zeros_like(self.rnom, dtype=float) + self.weight = np.zeros_like(self.rnom, dtype=float) + self.npairs = np.zeros_like(self.rnom, dtype=float) + self.tot = 0. + self._rr_weight = None # Marker that calculateXi hasn't been called yet. + self._rr = None + self._dr = None + self._rd = None + self._write_rr = None + self._write_dr = None + self._write_rd = None + self.logger.debug('Finished building NNCorr')
+ + @property + def corr(self): + if self._corr is None: + self._corr = _lib.BuildCorr2( + self._d1, self._d2, self._bintype, + self._min_sep,self._max_sep,self._nbins,self._bin_size,self.b, + self.min_rpar, self.max_rpar, self.xperiod, self.yperiod, self.zperiod, + dp(None), dp(None), dp(None), dp(None), + dp(self.meanr),dp(self.meanlogr),dp(self.weight),dp(self.npairs)) + return self._corr + + def __del__(self): + # Using memory allocated from the C layer means we have to explicitly deallocate it + # rather than being able to rely on the Python memory manager. + if self._corr is not None: + if not _ffi._lock.locked(): # pragma: no branch + _lib.DestroyCorr2(self.corr, self._d1, self._d2, self._bintype) + +
[docs] def __eq__(self, other): + """Return whether two `NNCorrelation` instances are equal""" + return (isinstance(other, NNCorrelation) and + self.nbins == other.nbins and + self.bin_size == other.bin_size and + self.min_sep == other.min_sep and + self.max_sep == other.max_sep and + self.sep_units == other.sep_units and + self.coords == other.coords and + self.bin_type == other.bin_type and + self.bin_slop == other.bin_slop and + self.min_rpar == other.min_rpar and + self.max_rpar == other.max_rpar and + self.xperiod == other.xperiod and + self.yperiod == other.yperiod and + self.zperiod == other.zperiod and + self.tot == other.tot and + np.array_equal(self.meanr, other.meanr) and + np.array_equal(self.meanlogr, other.meanlogr) and + np.array_equal(self.weight, other.weight) and + np.array_equal(self.npairs, other.npairs))
+ +
[docs] def copy(self): + """Make a copy""" + ret = NNCorrelation.__new__(NNCorrelation) + for key, item in self.__dict__.items(): + if isinstance(item, np.ndarray): + # Only items that might change need to by deep copied. + ret.__dict__[key] = item.copy() + else: + # For everything else, shallow copy is fine. + # In particular don't deep copy config or logger + # Most of the rest are scalars, which copy fine this way. + # And the read-only things are all in _ro. + # The results dict is trickier. We rely on it being copied in places, but we + # never add more to it after the copy, so shallow copy is fine. + ret.__dict__[key] = item + ret._corr = None # We'll want to make a new one of these if we need it. + if self._rd is not None: + ret._rd = self._rd.copy() + if self._dr is not None: + ret._dr = self._dr.copy() + if self._rr is not None: + ret._rr = self._rr.copy() + return ret
+ + @lazy_property + def _zero_array(self): + # An array of all zeros with the same shape as self.weight (and other data arrays) + z = np.zeros_like(self.weight) + z.flags.writeable=False # Just to make sure we get an error if we try to change it. + return z + + def _zero_copy(self, tot): + # A minimal "copy" with zero for the weight array, and the given value for tot. + ret = NNCorrelation.__new__(NNCorrelation) + ret._ro = self._ro + ret.coords = self.coords + ret.metric = self.metric + ret.config = self.config + ret.meanr = self._zero_array + ret.meanlogr = self._zero_array + ret.weight = self._zero_array + ret.npairs = self._zero_array + ret.tot = tot + ret._corr = None + ret._rr = ret._dr = ret._rd = None + ret._write_rr = ret._write_dr = ret._write_rd = None + # This override is really the main advantage of using this: + setattr(ret, '_nonzero', False) + return ret + +
[docs] def __repr__(self): + return 'NNCorrelation(config=%r)'%self.config
+ +
[docs] @depr_pos_kwargs + def process_auto(self, cat, *, metric=None, num_threads=None): + """Process a single catalog, accumulating the auto-correlation. + + This accumulates the auto-correlation for the given catalog. After + calling this function as often as desired, the `finalize` command will + finish the calculation of meanr, meanlogr. + + Parameters: + cat (Catalog): The catalog to process + metric (str): Which metric to use. See `Metrics` for details. + (default: 'Euclidean'; this value can also be given in the + constructor in the config dict.) + num_threads (int): How many OpenMP threads to use during the calculation. + (default: use the number of cpu cores; this value can also be given + in the constructor in the config dict.) + """ + if cat.name == '': + self.logger.info('Starting process NN auto-correlations') + else: + self.logger.info('Starting process NN auto-correlations for cat %s.', cat.name) + + self._set_metric(metric, cat.coords) + self._set_num_threads(num_threads) + min_size, max_size = self._get_minmax_size() + + field = cat.getNField(min_size=min_size, max_size=max_size, + split_method=self.split_method, brute=bool(self.brute), + min_top=self.min_top, max_top=self.max_top, + coords=self.coords) + + self.logger.info('Starting %d jobs.',field.nTopLevelNodes) + _lib.ProcessAuto2(self.corr, field.data, self.output_dots, + field._d, self._coords, self._bintype, self._metric) + self.tot += 0.5 * cat.sumw**2
+ +
[docs] @depr_pos_kwargs + def process_cross(self, cat1, cat2, *, metric=None, num_threads=None): + """Process a single pair of catalogs, accumulating the cross-correlation. + + This accumulates the cross-correlation for the given catalogs. After + calling this function as often as desired, the `finalize` command will + finish the calculation of meanr, meanlogr. + + Parameters: + cat1 (Catalog): The first catalog to process + cat2 (Catalog): The second catalog to process + metric (str): Which metric to use. See `Metrics` for details. + (default: 'Euclidean'; this value can also be given in the + constructor in the config dict.) + num_threads (int): How many OpenMP threads to use during the calculation. + (default: use the number of cpu cores; this value can also be given + in the constructor in the config dict.) + """ + if cat1.name == '' and cat2.name == '': + self.logger.info('Starting process NN cross-correlations') + else: + self.logger.info('Starting process NN cross-correlations for cats %s, %s.', + cat1.name, cat2.name) + + self._set_metric(metric, cat1.coords, cat2.coords) + self._set_num_threads(num_threads) + min_size, max_size = self._get_minmax_size() + + f1 = cat1.getNField(min_size=min_size, max_size=max_size, + split_method=self.split_method, + brute=self.brute is True or self.brute == 1, + min_top=self.min_top, max_top=self.max_top, + coords=self.coords) + f2 = cat2.getNField(min_size=min_size, max_size=max_size, + split_method=self.split_method, + brute=self.brute is True or self.brute == 2, + min_top=self.min_top, max_top=self.max_top, + coords=self.coords) + + self.logger.info('Starting %d jobs.',f1.nTopLevelNodes) + _lib.ProcessCross2(self.corr, f1.data, f2.data, self.output_dots, + f1._d, f2._d, self._coords, self._bintype, self._metric) + self.tot += cat1.sumw*cat2.sumw
+ +
[docs] @depr_pos_kwargs + def process_pairwise(self, cat1, cat2, *, metric=None, num_threads=None): + """Process a single pair of catalogs, accumulating the cross-correlation, only using + the corresponding pairs of objects in each catalog. + + This accumulates the sums into the bins, but does not finalize the calculation. + After calling this function as often as desired, the `finalize` command will + finish the calculation. + + .. warning:: + + .. deprecated:: 4.1 + + This function is deprecated and slated to be removed. + If you have a need for it, please open an issue to describe your use case. + + Parameters: + cat1 (Catalog): The first catalog to process + cat2 (Catalog): The second catalog to process + metric (str): Which metric to use. See `Metrics` for details. + (default: 'Euclidean'; this value can also be given in the + constructor in the config dict.) + num_threads (int): How many OpenMP threads to use during the calculation. + (default: use the number of cpu cores; this value can also be given + in the constructor in the config dict.) + """ + import warnings + warnings.warn("The process_pairwise function is slated to be removed in a future version. "+ + "If you are actually using this function usefully, please "+ + "open an issue to describe your use case.", FutureWarning) + if cat1.name == '' and cat2.name == '': + self.logger.info('Starting process NN pairwise-correlations') + else: + self.logger.info('Starting process NN pairwise-correlations for cats %s, %s.', + cat1.name, cat2.name) + + self._set_metric(metric, cat1.coords, cat2.coords) + self._set_num_threads(num_threads) + + f1 = cat1.getNSimpleField() + f2 = cat2.getNSimpleField() + + _lib.ProcessPair(self.corr, f1.data, f2.data, self.output_dots, + f1._d, f2._d, self._coords, self._bintype, self._metric) + self.tot += (cat1.sumw+cat2.sumw)/2.
+ + def _finalize(self): + mask1 = self.weight != 0 + mask2 = self.weight == 0 + + self.meanr[mask1] /= self.weight[mask1] + self.meanlogr[mask1] /= self.weight[mask1] + + # Update the units of meanr, meanlogr + self._apply_units(mask1) + + # Use meanr, meanlogr when available, but set to nominal when no pairs in bin. + self.meanr[mask2] = self.rnom[mask2] + self.meanlogr[mask2] = self.logr[mask2] + +
[docs] def finalize(self): + """Finalize the calculation of the correlation function. + + The `process_auto` and `process_cross` commands accumulate values in each bin, + so they can be called multiple times if appropriate. Afterwards, this command + finishes the calculation of meanr, meanlogr by dividing by the total weight. + """ + self._finalize()
+ + @lazy_property + def _nonzero(self): + # The lazy version when we can be sure the object isn't going to accumulate any more. + return self.nonzero + + def _clear(self): + """Clear the data vectors + """ + self.meanr.ravel()[:] = 0. + self.meanlogr.ravel()[:] = 0. + self.weight.ravel()[:] = 0. + self.npairs.ravel()[:] = 0. + self.tot = 0. + +
[docs] def __iadd__(self, other): + """Add a second `NNCorrelation`'s data to this one. + + .. note:: + + For this to make sense, both `NNCorrelation` objects should not have had `finalize` + called yet. Then, after adding them together, you should call `finalize` on the sum. + """ + if not isinstance(other, NNCorrelation): + raise TypeError("Can only add another NNCorrelation object") + if not (self._nbins == other._nbins and + self.min_sep == other.min_sep and + self.max_sep == other.max_sep): + raise ValueError("NNCorrelation to be added is not compatible with this one.") + + self._set_metric(other.metric, other.coords, other.coords) + self.meanr.ravel()[:] += other.meanr.ravel()[:] + self.meanlogr.ravel()[:] += other.meanlogr.ravel()[:] + self.weight.ravel()[:] += other.weight.ravel()[:] + self.npairs.ravel()[:] += other.npairs.ravel()[:] + self.tot += other.tot + return self
+ + def _sum(self, others): + # Equivalent to the operation of: + # self._clear() + # for other in others: + # self += other + # but no sanity checks and use numpy.sum for faster calculation. + tot = np.sum([c.tot for c in others]) + # Empty ones were only needed for tot. Remove them now. + others = [c for c in others if c._nonzero] + if len(others) == 0: + self._clear() + else: + np.sum([c.meanr for c in others], axis=0, out=self.meanr) + np.sum([c.meanlogr for c in others], axis=0, out=self.meanlogr) + np.sum([c.weight for c in others], axis=0, out=self.weight) + np.sum([c.npairs for c in others], axis=0, out=self.npairs) + self.tot = tot + + def _add_tot(self, i, j, c1, c2): + # When storing results from a patch-based run, tot needs to be accumulated even if + # the total weight being accumulated comes out to be zero. + # This only applies to NNCorrelation. For the other ones, this is a no op. + tot = c1.sumw * c2.sumw + self.tot += tot + # We also have to keep all pairs in the results dict, otherwise the tot calculation + # gets messed up. We need to accumulate the tot value of all pairs, even if + # the resulting weight is zero. But use a minimal copy with just the necessary fields + # to save some time. + self.results[(i,j)] = self._zero_copy(tot) + +
[docs] @depr_pos_kwargs + def process(self, cat1, cat2=None, *, metric=None, num_threads=None, comm=None, low_mem=False, + initialize=True, finalize=True): + """Compute the correlation function. + + - If only 1 argument is given, then compute an auto-correlation function. + - If 2 arguments are given, then compute a cross-correlation function. + + Both arguments may be lists, in which case all items in the list are used + for that element of the correlation. + + Parameters: + cat1 (Catalog): A catalog or list of catalogs for the first N field. + cat2 (Catalog): A catalog or list of catalogs for the second N field, if any. + (default: None) + metric (str): Which metric to use. See `Metrics` for details. + (default: 'Euclidean'; this value can also be given in the + constructor in the config dict.) + num_threads (int): How many OpenMP threads to use during the calculation. + (default: use the number of cpu cores; this value can also be given + in the constructor in the config dict.) + comm (mpi4py.Comm): If running MPI, an mpi4py Comm object to communicate between + processes. If used, the rank=0 process will have the final + computation. This only works if using patches. (default: None) + low_mem (bool): Whether to sacrifice a little speed to try to reduce memory usage. + This only works if using patches. (default: False) + initialize (bool): Whether to begin the calculation with a call to + `BinnedCorr2.clear`. (default: True) + finalize (bool): Whether to complete the calculation with a call to `finalize`. + (default: True) + """ + if initialize: + self.clear() + + if not isinstance(cat1,list): + cat1 = cat1.get_patches(low_mem=low_mem) + if cat2 is not None and not isinstance(cat2,list): + cat2 = cat2.get_patches(low_mem=low_mem) + + if cat2 is None or len(cat2) == 0: + self._process_all_auto(cat1, metric, num_threads, comm, low_mem) + else: + self._process_all_cross(cat1, cat2, metric, num_threads, comm, low_mem) + + if finalize: + self.finalize()
+ + def _mean_weight(self): + mean_np = np.mean(self.npairs) + return 1 if mean_np == 0 else np.mean(self.weight)/mean_np + +
[docs] def getStat(self): + """The standard statistic for the current correlation object as a 1-d array. + + This raises a RuntimeError if calculateXi has not been run yet. + """ + if self._rr is None: + raise RuntimeError("You need to call calculateXi before calling estimate_cov.") + return self.xi.ravel()
+ +
[docs] def getWeight(self): + """The weight array for the current correlation object as a 1-d array. + + This is the weight array corresponding to `getStat`. In this case, it is the denominator + RR from the calculation done by calculateXi(). + """ + if self._rr_weight is not None: + return self._rr_weight.ravel() + else: + return self.tot
+ +
[docs] @depr_pos_kwargs + def calculateXi(self, *, rr, dr=None, rd=None): + r"""Calculate the correlation function given another correlation function of random + points using the same mask, and possibly cross correlations of the data and random. + + The rr value is the `NNCorrelation` function for random points. + For a signal that involves a cross correlations, there should be two random + cross-correlations: data-random and random-data, given as dr and rd. + + - If dr is None, the simple correlation function :math:`\xi = (DD/RR - 1)` is used. + - if dr is given and rd is None, then :math:`\xi = (DD - 2DR + RR)/RR` is used. + - If dr and rd are both given, then :math:`\xi = (DD - DR - RD + RR)/RR` is used. + + where DD is the data NN correlation function, which is the current object. + + .. note:: + + The default method for estimating the variance is 'shot', which only includes the + shot noise propagated into the final correlation. This does not include sample + variance, so it is always an underestimate of the actual variance. To get better + estimates, you need to set ``var_method`` to something else and use patches in the + input catalog(s). cf. `Covariance Estimates`. + + After calling this method, you can use the `BinnedCorr2.estimate_cov` method or use this + correlation object in the `estimate_multi_cov` function. Also, the calculated xi and + varxi returned from this function will be available as attributes. + + Parameters: + rr (NNCorrelation): The auto-correlation of the random field (RR) + dr (NNCorrelation): The cross-correlation of the data with randoms (DR), if + desired, in which case the Landy-Szalay estimator will be + calculated. (default: None) + rd (NNCorrelation): The cross-correlation of the randoms with data (RD), if + desired. (default: None, which means use rd=dr) + + Returns: + Tuple containing: + + - xi = array of :math:`\xi(r)` + - varxi = an estimate of the variance of :math:`\xi(r)` + """ + # Each random weight value needs to be rescaled by the ratio of total possible pairs. + if rr.tot == 0: + raise ValueError("rr has tot=0.") + + # rrf is the factor to scale rr weights to get something commensurate to the dd density. + rrf = self.tot / rr.tot + + # Likewise for the other two potential randoms: + if dr is not None: + if dr.tot == 0: + raise ValueError("dr has tot=0.") + drf = self.tot / dr.tot + if rd is not None: + if rd.tot == 0: + raise ValueError("rd has tot=0.") + rdf = self.tot / rd.tot + + # Calculate xi based on which randoms are provided. + denom = rr.weight * rrf + if dr is None and rd is None: + self.xi = self.weight - denom + elif rd is not None and dr is None: + self.xi = self.weight - 2.*rd.weight * rdf + denom + elif dr is not None and rd is None: + self.xi = self.weight - 2.*dr.weight * drf + denom + else: + self.xi = self.weight - rd.weight * rdf - dr.weight * drf + denom + + # Divide by RR in all cases. + if np.any(rr.weight == 0): + self.logger.warning("Warning: Some bins for the randoms had no pairs.") + denom[rr.weight==0] = 1. # guard against division by 0. + self.xi /= denom + + # Set up necessary info for estimate_cov + + # First the bits needed for shot noise covariance: + ddw = self._mean_weight() + rrw = rr._mean_weight() + if dr is not None: + drw = dr._mean_weight() + if rd is not None: + rdw = rd._mean_weight() + + # Note: The use of varxi_factor for the shot noise varxi is semi-empirical. + # It gives the increase in the variance over the case where RR >> DD. + # I don't have a good derivation that this is the right factor to apply + # when the random catalog is not >> larger than the data. + # When I tried to derive this from first principles, I get the below formula, + # but without the **2. So I'm not sure why this factor needs to be squared. + # It seems at least plausible that I missed something in the derivation that + # leads to this getting squared, but I can't really justify it. + # But it's also possible that this is wrong... + # Anyway, it seems to give good results compared to the empirical variance. + # cf. test_nn.py:test_varxi + if dr is None and rd is None: + varxi_factor = 1 + rrf*rrw/ddw + elif rd is not None and dr is None: + varxi_factor = 1 + 2*rdf*rdw/ddw + rrf*rrw/ddw + elif dr is not None and rd is None: + varxi_factor = 1 + 2*drf*drw/ddw + rrf*rrw/ddw + else: + varxi_factor = 1 + drf*drw/ddw + rdf*rdw/ddw + rrf*rrw/ddw + self._var_num = ddw * varxi_factor**2 + self._rr_weight = rr.weight * rrf + + # Now set up the bits needed for patch-based covariance + self._rr = rr + self._dr = dr + self._rd = rd + + if len(self.results) > 0: + # Check that rr,dr,rd use the same patches as dd + if rr.npatch1 != 1 and rr.npatch2 != 1: + if rr.npatch1 != self.npatch1 or rr.npatch2 != self.npatch2: + raise RuntimeError("If using patches, RR must be run with the same patches " + "as DD") + + if dr is not None and (len(dr.results) == 0 or dr.npatch1 != self.npatch1 or + dr.npatch2 not in (self.npatch2, 1)): + raise RuntimeError("DR must be run with the same patches as DD") + if rd is not None and (len(rd.results) == 0 or rd.npatch2 != self.npatch2 or + rd.npatch1 not in (self.npatch1, 1)): + raise RuntimeError("RD must be run with the same patches as DD") + + # If there are any rr,rd,dr patch pairs that aren't in results (because dr is a cross + # correlation, and dd,rr may be auto-correlations, or because the d catalogs has some + # patches with no items), then we need to add some dummy results to make sure all the + # right pairs are computed when we make the vectors for the covariance matrix. + add_ij = set() + if rr.npatch1 != 1 and rr.npatch2 != 1: + for ij in rr.results: + if ij not in self.results: + add_ij.add(ij) + + if dr is not None and dr.npatch2 != 1: + for ij in dr.results: + if ij not in self.results: + add_ij.add(ij) + + if rd is not None and rd.npatch1 != 1: + for ij in rd.results: + if ij not in self.results: + add_ij.add(ij) + + if len(add_ij) > 0: + for ij in add_ij: + self.results[ij] = self._zero_copy(0) + self.__dict__.pop('_ok',None) # If it was already made, it will need to be redone. + + # Now that it's all set up, calculate the covariance and set varxi to the diagonal. + self.cov = self.estimate_cov(self.var_method) + self.varxi = self.cov.diagonal() + return self.xi, self.varxi
+ + def _calculate_xi_from_pairs(self, pairs): + self._sum([self.results[ij] for ij in pairs]) + self._finalize() + if self._rr is None: + return + dd = self.weight + if len(self._rr.results) > 0: + # This is the usual case. R has patches just like D. + # Calculate rr and rrf in the normal way based on the same pairs as used for DD. + pairs1 = [ij for ij in pairs if self._rr._ok[ij[0],ij[1]]] + self._rr._sum([self._rr.results[ij] for ij in pairs1]) + dd_tot = self.tot + else: + # In this case, R was not run with patches. + # This is not necessarily much worse in practice it turns out. + # We just need to scale RR down by the relative area. + # The approximation we'll use is that tot in the auto-correlations is + # proportional to area**2. + # So the sum of tot**0.5 when i==j gives an estimate of the fraction of the total area. + area_frac = np.sum([self.results[ij].tot**0.5 for ij in pairs if ij[0] == ij[1]]) + area_frac /= np.sum([cij.tot**0.5 for ij,cij in self.results.items() if ij[0] == ij[1]]) + # First figure out the original total for all DD that had the same footprint as RR. + dd_tot = np.sum([self.results[ij].tot for ij in self.results]) + # The rrf we want will be a factor of area_frac smaller than the original + # dd_tot/rr_tot. We can effect this by multiplying the full dd_tot by area_frac + # and use that value normally below. (Also for drf and rdf.) + dd_tot *= area_frac + + rr = self._rr.weight + rrf = dd_tot / self._rr.tot + + if self._dr is not None: + if self._dr.npatch2 == 1: + # If r doesn't have patches, then convert all (i,i) pairs to (i,0). + pairs2 = [(ij[0],0) for ij in pairs if ij[0] == ij[1]] + else: + pairs2 = [ij for ij in pairs if self._dr._ok[ij[0],ij[1]]] + self._dr._sum([self._dr.results[ij] for ij in pairs2]) + dr = self._dr.weight + drf = dd_tot / self._dr.tot + if self._rd is not None: + if self._rd.npatch1 == 1: + # If r doesn't have patches, then convert all (i,i) pairs to (0,i). + pairs3 = [(0,ij[1]) for ij in pairs if ij[0] == ij[1]] + else: + pairs3 = [ij for ij in pairs if self._rd._ok[ij[0],ij[1]]] + self._rd._sum([self._rd.results[ij] for ij in pairs3]) + rd = self._rd.weight + rdf = dd_tot / self._rd.tot + denom = rr * rrf + if self._dr is None and self._rd is None: + xi = dd - denom + elif self._rd is not None and self._dr is None: + xi = dd - 2.*rd * rdf + denom + elif self._dr is not None and self._rd is None: + xi = dd - 2.*dr * drf + denom + else: + xi = dd - rd * rdf - dr * drf + denom + denom[denom == 0] = 1 # Guard against division by zero. + self.xi = xi / denom + self._rr_weight = denom + +
[docs] @depr_pos_kwargs + def write(self, file_name, *, rr=None, dr=None, rd=None, file_type=None, precision=None, + write_patch_results=False): + r"""Write the correlation function to the file, file_name. + + rr is the `NNCorrelation` function for random points. + If dr is None, the simple correlation function :math:`\xi = (DD - RR)/RR` is used. + if dr is given and rd is None, then :math:`\xi = (DD - 2DR + RR)/RR` is used. + If dr and rd are both given, then :math:`\xi = (DD - DR - RD + RR)/RR` is used. + + Normally, at least rr should be provided, but if this is also None, then only the + basic accumulated number of pairs are output (along with the separation columns). + + The output file will include the following columns: + + ========== ========================================================== + Column Description + ========== ========================================================== + r_nom The nominal center of the bin in r + meanr The mean value :math:`\langle r\rangle` of pairs that fell + into each bin + meanlogr The mean value :math:`\langle \log(r)\rangle` of pairs that + fell into each bin + xi The estimator :math:`\xi` (if rr is given, or calculateXi + has been called) + sigma_xi The sqrt of the variance estimate of xi (if rr is given + or calculateXi has been called) + DD The total weight of pairs in each bin. + RR The total weight of RR pairs in each bin (if rr is given) + DR The total weight of DR pairs in each bin (if dr is given) + RD The total weight of RD pairs in each bin (if rd is given) + npairs The total number of pairs in each bin + ========== ========================================================== + + If ``sep_units`` was given at construction, then the distances will all be in these units. + Otherwise, they will be in either the same units as x,y,z (for flat or 3d coordinates) or + radians (for spherical coordinates). + + Parameters: + file_name (str): The name of the file to write to. + rr (NNCorrelation): The auto-correlation of the random field (RR) + dr (NNCorrelation): The cross-correlation of the data with randoms (DR), if + desired. (default: None) + rd (NNCorrelation): The cross-correlation of the randoms with data (RD), if + desired. (default: None, which means use rd=dr) + file_type (str): The type of file to write ('ASCII' or 'FITS'). + (default: determine the type automatically from the extension + of file_name.) + precision (int): For ASCII output catalogs, the desired precision. (default: 4; + this value can also be given in the constructor in the config + dict.) + write_patch_results (bool): Whether to write the patch-based results as well. + (default: False) + """ + self.logger.info('Writing NN correlations to %s',file_name) + # Temporary attributes, so the helper functions can access them. + precision = self.config.get('precision', 4) if precision is None else precision + name = 'main' if write_patch_results else None + self._write_rr = rr + self._write_dr = dr + self._write_rd = rd + with make_writer(file_name, precision, file_type, self.logger) as writer: + self._write(writer, name, write_patch_results, zero_tot=True) + self._write_rr = None + self._write_dr = None + self._write_rd = None
+ + @property + def _write_col_names(self): + col_names = [ 'r_nom','meanr','meanlogr' ] + rr = self._write_rr + dr = self._write_dr + rd = self._write_rd + if rr is None: + if hasattr(self, 'xi'): + col_names += [ 'xi','sigma_xi' ] + col_names += [ 'DD', 'npairs' ] + else: + col_names += [ 'xi','sigma_xi','DD','RR' ] + if dr is not None and rd is not None: + col_names += ['DR','RD'] + elif dr is not None or rd is not None: + col_names += ['DR'] + col_names += [ 'npairs' ] + return col_names + + @property + def _write_data(self): + data = [ self.rnom, self.meanr, self.meanlogr ] + rr = self._write_rr + dr = self._write_dr + rd = self._write_rd + if rr is None: + if hasattr(self, 'xi'): + data += [ self.xi, np.sqrt(self.varxi) ] + data += [ self.weight, self.npairs ] + if dr is not None: + raise TypeError("rr must be provided if dr is not None") + if rd is not None: + raise TypeError("rr must be provided if rd is not None") + else: + xi, varxi = self.calculateXi(rr=rr, dr=dr, rd=rd) + data += [ xi, np.sqrt(varxi), + self.weight, rr.weight * (self.tot/rr.tot) ] + if dr is not None and rd is not None: + data += [ dr.weight * (self.tot/dr.tot), rd.weight * (self.tot/rd.tot) ] + elif dr is not None or rd is not None: + if dr is None: dr = rd + data += [ dr.weight * (self.tot/dr.tot) ] + data += [ self.npairs ] + data = [ col.flatten() for col in data ] + return data + + @property + def _write_params(self): + return { 'tot' : self.tot, 'coords' : self.coords, 'metric' : self.metric, + 'sep_units' : self.sep_units, 'bin_type' : self.bin_type } + +
[docs] @depr_pos_kwargs + def read(self, file_name, *, file_type=None): + """Read in values from a file. + + This should be a file that was written by TreeCorr, preferably a FITS file, so there + is no loss of information. + + .. warning:: + + The `NNCorrelation` object should be constructed with the same configuration + parameters as the one being read. e.g. the same min_sep, max_sep, etc. This is not + checked by the read function. + + Parameters: + file_name (str): The name of the file to read in. + file_type (str): The type of file ('ASCII' or 'FITS'). (default: determine the type + automatically from the extension of file_name.) + """ + self.logger.info('Reading NN correlations from %s',file_name) + with make_reader(file_name, file_type, self.logger) as reader: + self._read(reader)
+ + def _read_from_data(self, data, params): + s = self.logr.shape + if 'R_nom' in data.dtype.names: # pragma: no cover + self._ro.rnom = data['R_nom'].reshape(s) + self.meanr = data['meanR'].reshape(s) + self.meanlogr = data['meanlogR'].reshape(s) + else: + self._ro.rnom = data['r_nom'].reshape(s) + self.meanr = data['meanr'].reshape(s) + self.meanlogr = data['meanlogr'].reshape(s) + self.weight = data['DD'].reshape(s) + self.npairs = data['npairs'].reshape(s) + self.tot = params['tot'] + self.coords = params['coords'].strip() + self.metric = params['metric'].strip() + self._ro.sep_units = params['sep_units'].strip() + self._ro.bin_type = params['bin_type'].strip() + if 'xi' in data.dtype.names: + self.xi = data['xi'].reshape(s) + self.varxi = data['sigma_xi'].reshape(s)**2 + self.npatch1 = params.get('npatch1', 1) + self.npatch2 = params.get('npatch2', 1) + +
[docs] @depr_pos_kwargs + def calculateNapSq(self, *, rr, R=None, dr=None, rd=None, m2_uform=None): + r"""Calculate the corrollary to the aperture mass statistics for counts. + + .. math:: + + \langle N_{ap}^2 \rangle(R) &= \int_{0}^{rmax} \frac{r dr}{2R^2} + \left [ T_+\left(\frac{r}{R}\right) \xi(r) \right] \\ + + The ``m2_uform`` parameter sets which definition of the aperture mass to use. + The default is to use 'Crittenden'. + + If ``m2_uform`` is 'Crittenden': + + .. math:: + + U(r) &= \frac{1}{2\pi} (1-r^2) \exp(-r^2/2) \\ + T_+(s) &= \frac{s^4 - 16s^2 + 32}{128} \exp(-s^2/4) \\ + rmax &= \infty + + cf. Crittenden, et al (2002): ApJ, 568, 20 + + If ``m2_uform`` is 'Schneider': + + .. math:: + + U(r) &= \frac{9}{\pi} (1-r^2) (1/3-r^2) \\ + T_+(s) &= \frac{12}{5\pi} (2-15s^2) \arccos(s/2) \\ + &\qquad + \frac{1}{100\pi} s \sqrt{4-s^2} (120 + 2320s^2 - 754s^4 + 132s^6 - 9s^8) \\ + rmax &= 2R + + cf. Schneider, et al (2002): A&A, 389, 729 + + This is used by `NGCorrelation.writeNorm`. See that function and also + `GGCorrelation.calculateMapSq` for more details. + + Parameters: + rr (NNCorrelation): The auto-correlation of the random field (RR) + R (array): The R values at which to calculate the aperture mass statistics. + (default: None, which means use self.rnom) + dr (NNCorrelation): The cross-correlation of the data with randoms (DR), if + desired. (default: None) + rd (NNCorrelation): The cross-correlation of the randoms with data (RD), if + desired. (default: None, which means use rd=dr) + m2_uform (str): Which form to use for the aperture mass. (default: 'Crittenden'; + this value can also be given in the constructor in the config dict.) + + Returns: + Tuple containing + + - nsq = array of :math:`\langle N_{ap}^2 \rangle(R)` + - varnsq = array of variance estimates of this value + """ + if m2_uform is None: + m2_uform = self.config.get('m2_uform', 'Crittenden') + if m2_uform not in ['Crittenden', 'Schneider']: + raise ValueError("Invalid m2_uform") + if R is None: + R = self.rnom + + # Make s a matrix, so we can eventually do the integral by doing a matrix product. + s = np.outer(1./R, self.meanr) + ssq = s*s + if m2_uform == 'Crittenden': + exp_factor = np.exp(-ssq/4.) + Tp = (32. + ssq*(-16. + ssq)) / 128. * exp_factor + else: + Tp = np.zeros_like(s) + sa = s[s<2.] + ssqa = ssq[s<2.] + Tp[s<2.] = 12./(5.*np.pi) * (2.-15.*ssqa) * np.arccos(sa/2.) + Tp[s<2.] += 1./(100.*np.pi) * sa * np.sqrt(4.-ssqa) * ( + 120. + ssqa*(2320. + ssqa*(-754. + ssqa*(132. - 9.*ssqa)))) + Tp *= ssq + + xi, varxi = self.calculateXi(rr=rr, dr=dr, rd=rd) + + # Now do the integral by taking the matrix products. + # Note that dlogr = bin_size + Tpxi = Tp.dot(xi) + nsq = Tpxi * self.bin_size + varnsq = (Tp**2).dot(varxi) * self.bin_size**2 + + return nsq, varnsq
+
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/treecorr/nnncorrelation.html b/docs/_build/html/_modules/treecorr/nnncorrelation.html new file mode 100644 index 00000000..565e1e9a --- /dev/null +++ b/docs/_build/html/_modules/treecorr/nnncorrelation.html @@ -0,0 +1,1598 @@ + + + + + + treecorr.nnncorrelation — TreeCorr 4.3.0 documentation + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • »
  • +
  • Module code »
  • +
  • treecorr.nnncorrelation
  • +
  • +
  • +
+
+
+
+
+ +

Source code for treecorr.nnncorrelation

+# Copyright (c) 2003-2019 by Mike Jarvis
+#
+# TreeCorr is free software: redistribution and use in source and binary forms,
+# with or without modification, are permitted provided that the following
+# conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice, this
+#    list of conditions, and the disclaimer given in the accompanying LICENSE
+#    file.
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+#    this list of conditions, and the disclaimer given in the documentation
+#    and/or other materials provided with the distribution.
+
+"""
+.. module:: nnncorrelation
+"""
+
+import numpy as np
+
+from . import _lib, _ffi
+from .binnedcorr3 import BinnedCorr3
+from .util import double_ptr as dp
+from .util import make_writer, make_reader, lazy_property
+from .util import depr_pos_kwargs
+
+
+
[docs]class NNNCorrelation(BinnedCorr3): + """This class handles the calculation and storage of a 2-point count-count correlation + function. i.e. the regular density correlation function. + + See the doc string of `BinnedCorr3` for a description of how the triangles are binned. + + Ojects of this class holds the following attributes: + + Attributes: + logr: The nominal center of the bin in log(r) (the natural logarithm of r). + nbins: The number of bins in logr where r = d2 + bin_size: The size of the bins in logr + min_sep: The minimum separation being considered + max_sep: The maximum separation being considered + nubins: The number of bins in u where u = d3/d2 + ubin_size: The size of the bins in u + min_u: The minimum u being considered + max_u: The maximum u being considered + nvbins: The number of bins in v where v = +-(d1-d2)/d3 + vbin_size: The size of the bins in v + min_v: The minimum v being considered + max_v: The maximum v being considered + logr1d: The nominal centers of the nbins bins in log(r). + u1d: The nominal centers of the nubins bins in u. + v1d: The nominal centers of the nvbins bins in v. + + In addition, the following attributes are numpy arrays whose shape is (nbins, nubins, nvbins): + + Attributes: + logr: The nominal center of the bin in log(r). + rnom: The nominal center of the bin converted to regular distance. + i.e. r = exp(logr). + u: The nominal center of the bin in u. + v: The nominal center of the bin in v. + meand1: The (weighted) mean value of d1 for the triangles in each bin. + meanlogd1: The mean value of log(d1) for the triangles in each bin. + meand2: The (weighted) mean value of d2 (aka r) for the triangles in each bin. + meanlogd2: The mean value of log(d2) for the triangles in each bin. + meand2: The (weighted) mean value of d3 for the triangles in each bin. + meanlogd2: The mean value of log(d3) for the triangles in each bin. + meanu: The mean value of u for the triangles in each bin. + meanv: The mean value of v for the triangles in each bin. + weight: The total weight in each bin. + ntri: The number of triangles going into each bin (including those where one or + more objects have w=0). + tot: The total number of triangles processed, which is used to normalize + the randoms if they have a different number of triangles. + + If ``sep_units`` are given (either in the config dict or as a named kwarg) then the distances + will all be in these units. + + .. note:: + + If you separate out the steps of the `process` command and use `process_auto` and/or + `process_cross`, then the units will not be applied to ``meanr`` or ``meanlogr`` until + the `finalize` function is called. + + The typical usage pattern is as follows: + + >>> nnn = treecorr.NNNCorrelation(config) + >>> nnn.process(cat) # For auto-correlation. + >>> rrr.process(rand) # Likewise for random-random correlations + >>> drr.process(cat,rand) # If desired, also do data-random correlations + >>> rdd.process(rand,cat) # Also with two data and one random + >>> nnn.write(file_name,rrr=rrr,drr=drr,...) # Write out to a file. + >>> zeta,varzeta = nnn.calculateZeta(rrr=rrr,drr=drr,rdd=rdd) # Or get zeta directly. + + Parameters: + config (dict): A configuration dict that can be used to pass in kwargs if desired. + This dict is allowed to have addition entries besides those listed + in `BinnedCorr3`, which are ignored here. (default: None) + logger: If desired, a logger object for logging. (default: None, in which case + one will be built according to the config dict's verbose level.) + + Keyword Arguments: + **kwargs: See the documentation for `BinnedCorr3` for the list of allowed keyword + arguments, which may be passed either directly or in the config dict. + """ +
[docs] @depr_pos_kwargs + def __init__(self, config=None, *, logger=None, **kwargs): + """Initialize `NNNCorrelation`. See class doc for details. + """ + BinnedCorr3.__init__(self, config, logger=logger, **kwargs) + + self._ro._d1 = 1 # NData + self._ro._d2 = 1 # NData + self._ro._d3 = 1 # NData + shape = self.logr.shape + self.meand1 = np.zeros(shape, dtype=float) + self.meanlogd1 = np.zeros(shape, dtype=float) + self.meand2 = np.zeros(shape, dtype=float) + self.meanlogd2 = np.zeros(shape, dtype=float) + self.meand3 = np.zeros(shape, dtype=float) + self.meanlogd3 = np.zeros(shape, dtype=float) + self.meanu = np.zeros(shape, dtype=float) + self.meanv = np.zeros(shape, dtype=float) + self.weight = np.zeros(shape, dtype=float) + self.ntri = np.zeros(shape, dtype=float) + self.tot = 0. + self._rrr_weight = None + self._rrr = None + self._drr = None + self._rdd = None + self._write_rrr = None + self._write_drr = None + self._write_rdd = None + self.logger.debug('Finished building NNNCorr')
+ + @property + def corr(self): + if self._corr is None: + self._corr = _lib.BuildCorr3( + self._d1, self._d2, self._d3, self._bintype, + self._min_sep,self._max_sep,self.nbins,self._bin_size,self.b, + self.min_u,self.max_u,self.nubins,self.ubin_size,self.bu, + self.min_v,self.max_v,self.nvbins,self.vbin_size,self.bv, + self.xperiod, self.yperiod, self.zperiod, + dp(None), dp(None), dp(None), dp(None), + dp(None), dp(None), dp(None), dp(None), + dp(self.meand1), dp(self.meanlogd1), dp(self.meand2), dp(self.meanlogd2), + dp(self.meand3), dp(self.meanlogd3), dp(self.meanu), dp(self.meanv), + dp(self.weight), dp(self.ntri)) + return self._corr + + def __del__(self): + # Using memory allocated from the C layer means we have to explicitly deallocate it + # rather than being able to rely on the Python memory manager. + if self._corr is not None: + if not _ffi._lock.locked(): # pragma: no branch + _lib.DestroyCorr3(self.corr, self._d1, self._d2, self._d3, self._bintype) + +
[docs] def __eq__(self, other): + """Return whether two `NNNCorrelation` instances are equal""" + return (isinstance(other, NNNCorrelation) and + self.nbins == other.nbins and + self.bin_size == other.bin_size and + self.min_sep == other.min_sep and + self.max_sep == other.max_sep and + self.sep_units == other.sep_units and + self.min_u == other.min_u and + self.max_u == other.max_u and + self.nubins == other.nubins and + self.ubin_size == other.ubin_size and + self.min_v == other.min_v and + self.max_v == other.max_v and + self.nvbins == other.nvbins and + self.vbin_size == other.vbin_size and + self.coords == other.coords and + self.bin_type == other.bin_type and + self.bin_slop == other.bin_slop and + self.xperiod == other.xperiod and + self.yperiod == other.yperiod and + self.zperiod == other.zperiod and + self.tot == other.tot and + np.array_equal(self.meand1, other.meand1) and + np.array_equal(self.meanlogd1, other.meanlogd1) and + np.array_equal(self.meand2, other.meand2) and + np.array_equal(self.meanlogd2, other.meanlogd2) and + np.array_equal(self.meand3, other.meand3) and + np.array_equal(self.meanlogd3, other.meanlogd3) and + np.array_equal(self.meanu, other.meanu) and + np.array_equal(self.meanv, other.meanv) and + np.array_equal(self.weight, other.weight) and + np.array_equal(self.ntri, other.ntri))
+ +
[docs] def copy(self): + """Make a copy""" + ret = NNNCorrelation.__new__(NNNCorrelation) + for key, item in self.__dict__.items(): + if isinstance(item, np.ndarray): + # Only items that might change need to by deep copied. + ret.__dict__[key] = item.copy() + else: + # For everything else, shallow copy is fine. + # In particular don't deep copy config or logger + # Most of the rest are scalars, which copy fine this way. + # And the read-only things are all in _ro. + # The results dict is trickier. We rely on it being copied in places, but we + # never add more to it after the copy, so shallow copy is fine. + ret.__dict__[key] = item + ret._corr = None # We'll want to make a new one of these if we need it. + if self._drr is not None: + ret._drr = self._drr.copy() + if self._rdd is not None: + ret._rdd = self._rdd.copy() + if self._rrr is not None: + ret._rrr = self._rrr.copy() + return ret
+ + @lazy_property + def _zero_array(self): + # An array of all zeros with the same shape as self.weight (and other data arrays) + z = np.zeros_like(self.weight) + z.flags.writeable=False # Just to make sure we get an error if we try to change it. + return z + + def _zero_copy(self, tot): + # A minimal "copy" with zero for the weight array, and the given value for tot. + ret = NNNCorrelation.__new__(NNNCorrelation) + ret._ro = self._ro + ret.coords = self.coords + ret.metric = self.metric + ret.config = self.config + ret.meand1 = self._zero_array + ret.meanlogd1 = self._zero_array + ret.meand2 = self._zero_array + ret.meanlogd2 = self._zero_array + ret.meand3 = self._zero_array + ret.meanlogd3 = self._zero_array + ret.meanu = self._zero_array + ret.meanv = self._zero_array + ret.weight = self._zero_array + ret.ntri = self._zero_array + ret.tot = tot + ret._corr = None + ret._rrr = ret._drr = ret._rdd = None + ret._write_rrr = ret._write_drr = ret._write_rdd = None + # This override is really the main advantage of using this: + setattr(ret, '_nonzero', False) + return ret + +
[docs] def __repr__(self): + return 'NNNCorrelation(config=%r)'%self.config
+ +
[docs] @depr_pos_kwargs + def process_auto(self, cat, *, metric=None, num_threads=None): + """Process a single catalog, accumulating the auto-correlation. + + This accumulates the auto-correlation for the given catalog. After + calling this function as often as desired, the `finalize` command will + finish the calculation of meand1, meanlogd1, etc. + + Parameters: + cat (Catalog): The catalog to process + metric (str): Which metric to use. See `Metrics` for details. + (default: 'Euclidean'; this value can also be given in the + constructor in the config dict.) + num_threads (int): How many OpenMP threads to use during the calculation. + (default: use the number of cpu cores; this value can also be given + in the constructor in the config dict.) + """ + if cat.name == '': + self.logger.info('Starting process NNN auto-correlations') + else: + self.logger.info('Starting process NNN auto-correlations for cat %s.', cat.name) + + self._set_metric(metric, cat.coords) + self._set_num_threads(num_threads) + min_size, max_size = self._get_minmax_size() + + field = cat.getNField(min_size=min_size, max_size=max_size, + split_method=self.split_method, brute=bool(self.brute), + min_top=self.min_top, max_top=self.max_top, + coords=self.coords) + + self.logger.info('Starting %d jobs.',field.nTopLevelNodes) + _lib.ProcessAuto3(self.corr, field.data, self.output_dots, + field._d, self._coords, self._bintype, self._metric) + self.tot += (1./6.) * cat.sumw**3
+ +
[docs] @depr_pos_kwargs + def process_cross12(self, cat1, cat2, *, metric=None, num_threads=None): + """Process two catalogs, accumulating the 3pt cross-correlation, where one of the + points in each triangle come from the first catalog, and two come from the second. + + This accumulates the cross-correlation for the given catalogs as part of a larger + auto-correlation calculation. E.g. when splitting up a large catalog into patches, + this is appropriate to use for the cross correlation between different patches + as part of the complete auto-correlation of the full catalog. + + Parameters: + cat1 (Catalog): The first catalog to process. (1 point in each triangle will come + from this catalog.) + cat2 (Catalog): The second catalog to process. (2 points in each triangle will come + from this catalog.) + metric (str): Which metric to use. See `Metrics` for details. + (default: 'Euclidean'; this value can also be given in the + constructor in the config dict.) + num_threads (int): How many OpenMP threads to use during the calculation. + (default: use the number of cpu cores; this value can also be given + in the constructor in the config dict.) + """ + if cat1.name == '' and cat2.name == '': + self.logger.info('Starting process NNN (1-2) cross-correlations') + else: + self.logger.info('Starting process NNN (1-2) cross-correlations for cats %s, %s.', + cat1.name, cat2.name) + + self._set_metric(metric, cat1.coords, cat2.coords) + self._set_num_threads(num_threads) + min_size, max_size = self._get_minmax_size() + + f1 = cat1.getNField(min_size=min_size, max_size=max_size, + split_method=self.split_method, + brute=self.brute is True or self.brute == 1, + min_top=self.min_top, max_top=self.max_top, + coords=self.coords) + f2 = cat2.getNField(min_size=min_size, max_size=max_size, + split_method=self.split_method, + brute=self.brute is True or self.brute == 2, + min_top=self.min_top, max_top=self.max_top, + coords=self.coords) + + self.logger.info('Starting %d jobs.',f1.nTopLevelNodes) + # Note: all 3 correlation objects are the same. Thus, all triangles will be placed + # into self.corr, whichever way the three catalogs are permuted for each triangle. + _lib.ProcessCross12(self.corr, self.corr, self.corr, + f1.data, f2.data, self.output_dots, + f1._d, f2._d, self._coords, + self._bintype, self._metric) + self.tot += cat1.sumw * cat2.sumw**2 / 2.
+ +
[docs] @depr_pos_kwargs + def process_cross(self, cat1, cat2, cat3, *, metric=None, num_threads=None): + """Process a set of three catalogs, accumulating the 3pt cross-correlation. + + This accumulates the cross-correlation for the given catalogs as part of a larger + auto-correlation calculation. E.g. when splitting up a large catalog into patches, + this is appropriate to use for the cross correlation between different patches + as part of the complete auto-correlation of the full catalog. + + Parameters: + cat1 (Catalog): The first catalog to process + cat2 (Catalog): The second catalog to process + cat3 (Catalog): The third catalog to process + metric (str): Which metric to use. See `Metrics` for details. + (default: 'Euclidean'; this value can also be given in the + constructor in the config dict.) + num_threads (int): How many OpenMP threads to use during the calculation. + (default: use the number of cpu cores; this value can also be given + in the constructor in the config dict.) + """ + if cat1.name == '' and cat2.name == '' and cat3.name == '': + self.logger.info('Starting process NNN cross-correlations') + else: + self.logger.info('Starting process NNN cross-correlations for cats %s, %s, %s.', + cat1.name, cat2.name, cat3.name) + + self._set_metric(metric, cat1.coords, cat2.coords, cat3.coords) + self._set_num_threads(num_threads) + min_size, max_size = self._get_minmax_size() + + f1 = cat1.getNField(min_size=min_size, max_size=max_size, + split_method=self.split_method, + brute=self.brute is True or self.brute == 1, + min_top=self.min_top, max_top=self.max_top, + coords=self.coords) + f2 = cat2.getNField(min_size=min_size, max_size=max_size, + split_method=self.split_method, + brute=self.brute is True or self.brute == 2, + min_top=self.min_top, max_top=self.max_top, + coords=self.coords) + f3 = cat3.getNField(min_size=min_size, max_size=max_size, + split_method=self.split_method, + brute=self.brute is True or self.brute == 3, + min_top=self.min_top, max_top=self.max_top, + coords=self.coords) + + self.logger.info('Starting %d jobs.',f1.nTopLevelNodes) + # Note: all 6 correlation objects are the same. Thus, all triangles will be placed + # into self.corr, whichever way the three catalogs are permuted for each triangle. + _lib.ProcessCross3(self.corr, self.corr, self.corr, + self.corr, self.corr, self.corr, + f1.data, f2.data, f3.data, self.output_dots, + f1._d, f2._d, f3._d, self._coords, self._bintype, self._metric) + self.tot += cat1.sumw * cat2.sumw * cat3.sumw
+ + def _finalize(self): + mask1 = self.weight != 0 + mask2 = self.weight == 0 + + self.meand1[mask1] /= self.weight[mask1] + self.meanlogd1[mask1] /= self.weight[mask1] + self.meand2[mask1] /= self.weight[mask1] + self.meanlogd2[mask1] /= self.weight[mask1] + self.meand3[mask1] /= self.weight[mask1] + self.meanlogd3[mask1] /= self.weight[mask1] + self.meanu[mask1] /= self.weight[mask1] + self.meanv[mask1] /= self.weight[mask1] + + # Update the units + self._apply_units(mask1) + + # Use meanlogr when available, but set to nominal when no triangles in bin. + self.meand2[mask2] = self.rnom[mask2] + self.meanlogd2[mask2] = self.logr[mask2] + self.meanu[mask2] = self.u[mask2] + self.meanv[mask2] = self.v[mask2] + self.meand3[mask2] = self.u[mask2] * self.meand2[mask2] + self.meanlogd3[mask2] = np.log(self.meand3[mask2]) + self.meand1[mask2] = self.v[mask2] * self.meand3[mask2] + self.meand2[mask2] + self.meanlogd1[mask2] = np.log(self.meand1[mask2]) + +
[docs] def finalize(self): + """Finalize the calculation of meand1, meanlogd1, etc. + + The `process_auto` and `process_cross` commands accumulate values in each bin, + so they can be called multiple times if appropriate. Afterwards, this command + finishes the calculation of meanlogr, meanu, meanv by dividing by the total weight. + """ + self._finalize()
+ + @lazy_property + def _nonzero(self): + # The lazy version when we can be sure the object isn't going to accumulate any more. + return self.nonzero + + def _clear(self): + """Clear the data vectors + """ + self.meand1[:,:,:] = 0. + self.meanlogd1[:,:,:] = 0. + self.meand2[:,:,:] = 0. + self.meanlogd2[:,:,:] = 0. + self.meand3[:,:,:] = 0. + self.meanlogd3[:,:,:] = 0. + self.meanu[:,:,:] = 0. + self.meanv[:,:,:] = 0. + self.weight[:,:,:] = 0. + self.ntri[:,:,:] = 0. + self.tot = 0. + + def _sum(self, others): + # Equivalent to the operation of: + # self._clear() + # for other in others: + # self += other + # but no sanity checks and use numpy.sum for faster calculation. + tot = np.sum([c.tot for c in others]) + # Empty ones were only needed for tot. Remove them now. + others = [c for c in others if c._nonzero] + if len(others) == 0: + self._clear() + else: + np.sum([c.meand1 for c in others], axis=0, out=self.meand1) + np.sum([c.meanlogd1 for c in others], axis=0, out=self.meanlogd1) + np.sum([c.meand2 for c in others], axis=0, out=self.meand2) + np.sum([c.meanlogd2 for c in others], axis=0, out=self.meanlogd2) + np.sum([c.meand3 for c in others], axis=0, out=self.meand3) + np.sum([c.meanlogd3 for c in others], axis=0, out=self.meanlogd3) + np.sum([c.meanu for c in others], axis=0, out=self.meanu) + np.sum([c.meanv for c in others], axis=0, out=self.meanv) + np.sum([c.weight for c in others], axis=0, out=self.weight) + np.sum([c.ntri for c in others], axis=0, out=self.ntri) + self.tot = tot + + def _add_tot(self, i, j, k, c1, c2, c3): + # When storing results from a patch-based run, tot needs to be accumulated even if + # the total weight being accumulated comes out to be zero. + # This only applies to NNNCorrelation. For the other ones, this is a no op. + tot = c1.sumw * c2.sumw * c3.sumw + if c2 is c3: + # Account for 1/2 factor in cross12 cases. + tot /= 2. + self.tot += tot + # We also have to keep all pairs in the results dict, otherwise the tot calculation + # gets messed up. We need to accumulate the tot value of all pairs, even if + # the resulting weight is zero. + self.results[(i,j,k)] = self._zero_copy(tot) + +
[docs] def __iadd__(self, other): + """Add a second `NNNCorrelation`'s data to this one. + + .. note:: + + For this to make sense, both `NNNCorrelation` objects should not have had `finalize` + called yet. Then, after adding them together, you should call `finalize` on the sum. + """ + if not isinstance(other, NNNCorrelation): + raise TypeError("Can only add another NNNCorrelation object") + if not (self.nbins == other.nbins and + self.min_sep == other.min_sep and + self.max_sep == other.max_sep and + self.nubins == other.nubins and + self.min_u == other.min_u and + self.max_u == other.max_u and + self.nvbins == other.nvbins and + self.min_v == other.min_v and + self.max_v == other.max_v): + raise ValueError("NNNCorrelation to be added is not compatible with this one.") + + self._set_metric(other.metric, other.coords, other.coords, other.coords) + self.tot += other.tot + + # If other is empty, then we're done now. + if not other.nonzero: + return self + + self.meand1[:] += other.meand1[:] + self.meanlogd1[:] += other.meanlogd1[:] + self.meand2[:] += other.meand2[:] + self.meanlogd2[:] += other.meanlogd2[:] + self.meand3[:] += other.meand3[:] + self.meanlogd3[:] += other.meanlogd3[:] + self.meanu[:] += other.meanu[:] + self.meanv[:] += other.meanv[:] + self.weight[:] += other.weight[:] + self.ntri[:] += other.ntri[:] + return self
+ +
[docs] @depr_pos_kwargs + def process(self, cat1, cat2=None, cat3=None, *, metric=None, num_threads=None, + comm=None, low_mem=False, initialize=True, finalize=True): + """Accumulate the 3pt correlation of the points in the given Catalog(s). + + - If only 1 argument is given, then compute an auto-correlation function. + - If 2 arguments are given, then compute a cross-correlation function with the + first catalog taking one corner of the triangles, and the second taking two corners. + - If 3 arguments are given, then compute a three-way cross-correlation. + + All arguments may be lists, in which case all items in the list are used + for that element of the correlation. + + .. note:: + + For a correlation of multiple catalogs, it typically matters which corner of the + triangle comes from which catalog, which is not kept track of by this function. + The final accumulation will have d1 > d2 > d3 regardless of which input catalog + appears at each corner. The class which keeps track of which catalog appears + in each position in the triangle is `NNNCrossCorrelation`. + + Parameters: + cat1 (Catalog): A catalog or list of catalogs for the first N field. + cat2 (Catalog): A catalog or list of catalogs for the second N field. + (default: None) + cat3 (Catalog): A catalog or list of catalogs for the third N field. + (default: None) + metric (str): Which metric to use. See `Metrics` for details. + (default: 'Euclidean'; this value can also be given in the + constructor in the config dict.) + num_threads (int): How many OpenMP threads to use during the calculation. + (default: use the number of cpu cores; this value can also be given + in the constructor in the config dict.) + comm (mpi4py.Comm): If running MPI, an mpi4py Comm object to communicate between + processes. If used, the rank=0 process will have the final + computation. This only works if using patches. (default: None) + low_mem (bool): Whether to sacrifice a little speed to try to reduce memory usage. + This only works if using patches. (default: False) + initialize (bool): Whether to begin the calculation with a call to + `BinnedCorr3.clear`. (default: True) + finalize (bool): Whether to complete the calculation with a call to `finalize`. + (default: True) + """ + if initialize: + self.clear() + + if not isinstance(cat1,list): cat1 = cat1.get_patches() + if cat2 is not None and not isinstance(cat2,list): cat2 = cat2.get_patches() + if cat3 is not None and not isinstance(cat3,list): cat3 = cat3.get_patches() + + if cat2 is None: + if cat3 is not None: + raise ValueError("For two catalog case, use cat1,cat2, not cat1,cat3") + self._process_all_auto(cat1, metric, num_threads) + elif cat3 is None: + self._process_all_cross12(cat1, cat2, metric, num_threads, comm, low_mem) + else: + self._process_all_cross(cat1, cat2, cat3, metric, num_threads, comm, low_mem) + + if finalize: + self.finalize()
+ + def _mean_weight(self): + mean_np = np.mean(self.ntri) + return 1 if mean_np == 0 else np.mean(self.weight)/mean_np + +
[docs] def getStat(self): + """The standard statistic for the current correlation object as a 1-d array. + + This raises a RuntimeError if calculateZeta has not been run yet. + """ + if self._rrr_weight is None: + raise RuntimeError("You need to call calculateZeta before calling estimate_cov.") + return self.zeta.ravel()
+ +
[docs] def getWeight(self): + """The weight array for the current correlation object as a 1-d array. + + This is the weight array corresponding to `getStat`. In this case, it is the denominator + RRR from the calculation done by calculateZeta(). + """ + if self._rrr_weight is not None: + return self._rrr_weight.ravel() + else: + return self.tot
+ +
[docs] @depr_pos_kwargs + def calculateZeta(self, *, rrr, drr=None, rdd=None): + r"""Calculate the 3pt function given another 3pt function of random + points using the same mask, and possibly cross correlations of the data and random. + + There are two possible formulae that are currently supported. + + 1. The simplest formula to use is :math:`\zeta^\prime = (DDD-RRR)/RRR`. + In this case, only rrr needs to be given, the `NNNCorrelation` of a random field. + However, note that in this case, the return value is not normally called :math:`\zeta`. + Rather, this is an estimator of + + .. math:: + + \zeta^\prime(d_1,d_2,d_3) = \zeta(d_1,d_2,d_3) + \xi(d_1) + \xi(d_2) + \xi(d_3) + + where :math:`\xi` is the two-point correlation function for each leg of the triangle. + You would typically want to calculate that separately and subtract off the + two-point contributions. + + 2. For auto-correlations, a better formula is :math:`\zeta = (DDD-RDD+DRR-RRR)/RRR`. + In this case, RDD is the number of triangles where 1 point comes from the randoms + and 2 points are from the data. Similarly, DRR has 1 point from the data and 2 from + the randoms. These are what are calculated from calling:: + + >>> drr.process(data_cat, rand_cat) + >>> rdd.process(rand_cat, data_cat) + + .. note:: + + One might thing the formula should be :math:`\zeta = (DDD-3RDD+3DRR-RRR)/RRR` + by analogy with the 2pt Landy-Szalay formula. However, the way these are + calculated, the object we are calling RDD already includes triangles where R + is in each of the 3 locations. So it is really more like RDD + DRD + DDR. + These are not computed separately. Rather the single computation of ``rdd`` + described above accumulates all three permutations together. So that one + object includes everything for the second term. Likewise ``drr`` has all the + permutations that are relevant for the third term. + + - If only rrr is provided, the first formula will be used. + - If all of rrr, drr, rdd are provided then the second will be used. + + Parameters: + rrr (NNNCorrelation): The auto-correlation of the random field (RRR) + drr (NNNCorrelation): DRR if desired. (default: None) + rdd (NNNCorrelation): RDD if desired. (default: None) + + Returns: + Tuple containing + + - zeta = array of :math:`\zeta(d_1,d_2,d_3)` + - varzeta = array of variance estimates of :math:`\zeta(d_1,d_2,d_3)` + """ + # Each random ntri value needs to be rescaled by the ratio of total possible tri. + if rrr.tot == 0: + raise ValueError("rrr has tot=0.") + + if (rdd is not None) != (drr is not None): + raise TypeError("Must provide both rdd and drr (or neither).") + + # rrrf is the factor to scale rrr weights to get something commensurate to the ddd density. + rrrf = self.tot / rrr.tot + + # Likewise for the other two potential randoms: + if drr is not None: + if drr.tot == 0: + raise ValueError("drr has tot=0.") + drrf = self.tot / drr.tot + if rdd is not None: + if rdd.tot == 0: + raise ValueError("rdd has tot=0.") + rddf = self.tot / rdd.tot + + # Calculate zeta based on which randoms are provided. + denom = rrr.weight * rrrf + if rdd is None: + self.zeta = self.weight - denom + else: + self.zeta = self.weight - rdd.weight * rddf + drr.weight * drrf - denom + + # Divide by DRR in all cases. + if np.any(rrr.weight == 0): + self.logger.warning("Warning: Some bins for the randoms had no triangles.") + denom[rrr.weight==0] = 1. # guard against division by 0. + self.zeta /= denom + + # Set up necessary info for estimate_cov + + # First the bits needed for shot noise covariance: + dddw = self._mean_weight() + rrrw = rrr._mean_weight() + if drr is not None: + drrw = drr._mean_weight() + if rdd is not None: + rddw = rdd._mean_weight() + + # Note: The use of varzeta_factor for the shot noise varzeta is even less justified + # than in the NN varxi case. This is merely motivated by analogy with the + # 2pt version. + if rdd is None: + varzeta_factor = 1 + rrrf*rrrw/dddw + else: + varzeta_factor = 1 + drrf*drrw/dddw + rddf*rddw/dddw + rrrf*rrrw/dddw + self._var_num = dddw * varzeta_factor**2 # Should this be **3? Hmm... + self._rrr_weight = rrr.weight * rrrf + + # Now set up the bits needed for patch-based covariance + self._rrr = rrr + self._drr = drr + self._rdd = rdd + + if len(self.results) > 0: + # Check that all use the same patches as ddd + if rrr.npatch1 != 1: + if rrr.npatch1 != self.npatch1: + raise RuntimeError("If using patches, RRR must be run with the same patches " + "as DDD") + if drr is not None and (len(drr.results) == 0 or drr.npatch1 != self.npatch1 + or drr.npatch2 not in (self.npatch2, 1)): + raise RuntimeError("DRR must be run with the same patches as DDD") + if rdd is not None and (len(rdd.results) == 0 or rdd.npatch2 != self.npatch2 + or rdd.npatch1 not in (self.npatch1, 1)): + raise RuntimeError("RDD must be run with the same patches as DDD") + + # If there are any rrr,drr,rdd patch sets that aren't in results, then we need to add + # some dummy results to make sure all the right ijk "pair"s are computed when we make + # the vectors for the covariance matrix. + add_ijk = set() + if rrr.npatch1 != 1: + for ijk in rrr.results: + if ijk not in self.results: + add_ijk.add(ijk) + + if drr is not None and drr.npatch2 != 1: + for ijk in drr.results: + if ijk not in self.results: + add_ijk.add(ijk) + + if rdd is not None and rdd.npatch1 != 1: + for ijk in rdd.results: + if ijk not in self.results: + add_ijk.add(ijk) + + if len(add_ijk) > 0: + for ijk in add_ijk: + self.results[ijk] = self._zero_copy(0) + self.__dict__.pop('_ok',None) # If it was already made, it will need to be redone. + + # Now that it's all set up, calculate the covariance and set varzeta to the diagonal. + self.cov = self.estimate_cov(self.var_method) + self.varzeta = self.cov.diagonal().reshape(self.zeta.shape) + return self.zeta, self.varzeta
+ + def _calculate_xi_from_pairs(self, pairs): + # Note: we keep the notation ij and pairs here, even though they are really ijk and + # triples. + self._sum([self.results[ij] for ij in pairs]) + self._finalize() + if self._rrr is None: + return + ddd = self.weight + if len(self._rrr.results) > 0: + # This is the usual case. R has patches just like D. + # Calculate rrr and rrrf in the normal way based on the same pairs as used for DDD. + pairs1 = [ij for ij in pairs if self._rrr._ok[ij[0],ij[1],ij[2]]] + self._rrr._sum([self._rrr.results[ij] for ij in pairs1]) + ddd_tot = self.tot + else: + # In this case, R was not run with patches. + # We need to scale RRR down by the relative area. + # The approximation we'll use is that tot in the auto-correlations is + # proportional to area**3. + # The sum of tot**(1/3) when i=j=k gives an estimate of the fraction of the total area. + area_frac = np.sum([self.results[ij].tot**(1./3.) for ij in pairs + if ij[0] == ij[1] == ij[2]]) + area_frac /= np.sum([cij.tot**(1./3.) for ij,cij in self.results.items() + if ij[0] == ij[1] == ij[2]]) + # First figure out the original total for all DDD that had the same footprint as RRR. + ddd_tot = np.sum([self.results[ij].tot for ij in self.results]) + # The rrrf we want will be a factor of area_frac smaller than the original + # ddd_tot/rrr_tot. We can effect this by multiplying the full ddd_tot by area_frac + # and use that value normally below. (Also for drrf and rddf.) + ddd_tot *= area_frac + + rrr = self._rrr.weight + rrrf = ddd_tot / self._rrr.tot + + if self._drr is not None: + if self._drr.npatch2 == 1: + # If r doesn't have patches, then convert all (i,i,i) pairs to (i,0,0). + pairs2 = [(ij[0],0,0) for ij in pairs if ij[0] == ij[1] == ij[2]] + else: + pairs2 = [ij for ij in pairs if self._drr._ok[ij[0],ij[1],ij[2]]] + self._drr._sum([self._drr.results[ij] for ij in pairs2]) + drr = self._drr.weight + drrf = ddd_tot / self._drr.tot + if self._rdd is not None: + if self._rdd.npatch1 == 1: + # If r doesn't have patches, then convert all (i,i,j) pairs to (0,i,j) + # and all (i,j,i to (0,j,i). + pairs3 = [(0,ij[1],ij[2]) for ij in pairs if ij[0] == ij[1] or ij[0] == ij[2]] + else: + pairs3 = [ij for ij in pairs if self._rdd._ok[ij[0],ij[1],ij[2]]] + self._rdd._sum([self._rdd.results[ij] for ij in pairs3]) + rdd = self._rdd.weight + rddf = ddd_tot / self._rdd.tot + denom = rrr * rrrf + if self._drr is None: + zeta = ddd - denom + else: + zeta = ddd - rdd * rddf + drr * drrf - denom + denom[denom == 0] = 1 # Guard against division by zero. + self.zeta = zeta / denom + self._rrr_weight = denom + +
[docs] @depr_pos_kwargs + def write(self, file_name, *, rrr=None, drr=None, rdd=None, file_type=None, precision=None, + write_patch_results=False): + r"""Write the correlation function to the file, file_name. + + Normally, at least rrr should be provided, but if this is None, then only the + basic accumulated number of triangles are output (along with the columns parametrizing + the size and shape of the triangles). + + If at least rrr is given, then it will output an estimate of the final 3pt correlation + function, :math:`\zeta`. There are two possible formulae that are currently supported. + + 1. The simplest formula to use is :math:`\zeta^\prime = (DDD-RRR)/RRR`. + In this case, only rrr needs to be given, the `NNNCorrelation` of a random field. + However, note that in this case, the return value is not what is normally called + :math:`\zeta`. Rather, this is an estimator of + + .. math:: + \zeta^\prime(d_1,d_2,d_3) = \zeta(d_1,d_2,d_3) + \xi(d_1) + \xi(d_2) + \xi(d_3) + + where :math:`\xi` is the two-point correlation function for each leg of the triangle. + You would typically want to calculate that separately and subtract off the + two-point contributions. + + 2. For auto-correlations, a better formula is :math:`\zeta = (DDD-RDD+DRR-RRR)/RRR`. + In this case, RDD is the number of triangles where 1 point comes from the randoms + and 2 points are from the data. Similarly, DRR has 1 point from the data and 2 from + the randoms. + For this case, all combinations rrr, drr, and rdd must be provided. + + The output file will include the following columns: + + ========== ================================================================ + Column Description + ========== ================================================================ + r_nom The nominal center of the bin in r = d2 where d1 > d2 > d3 + u_nom The nominal center of the bin in u = d3/d2 + v_nom The nominal center of the bin in v = +-(d1-d2)/d3 + meand1 The mean value :math:`\langle d1\rangle` of triangles that fell + into each bin + meanlogd1 The mean value :math:`\langle \log(d1)\rangle` of triangles that + fell into each bin + meand2 The mean value :math:`\langle d2\rangle` of triangles that fell + into each bin + meanlogd2 The mean value :math:`\langle \log(d2)\rangle` of triangles that + fell into each bin + meand3 The mean value :math:`\langle d3\rangle` of triangles that fell + into each bin + meanlogd3 The mean value :math:`\langle \log(d3)\rangle` of triangles that + fell into each bin + meanu The mean value :math:`\langle u\rangle` of triangles that fell + into each bin + meanv The mean value :math:`\langle v\rangle` of triangles that fell + into each bin + zeta The estimator :math:`\zeta(r,u,v)` (if rrr is given) + sigma_zeta The sqrt of the variance estimate of :math:`\zeta` + (if rrr is given) + DDD The total weight of DDD triangles in each bin + RRR The total weight of RRR triangles in each bin (if rrr is given) + DRR The total weight of DRR triangles in each bin (if drr is given) + RDD The total weight of RDD triangles in each bin (if rdd is given) + ntri The number of triangles contributing to each bin + ========== ================================================================ + + If ``sep_units`` was given at construction, then the distances will all be in these units. + Otherwise, they will be in either the same units as x,y,z (for flat or 3d coordinates) or + radians (for spherical coordinates). + + Parameters: + file_name (str): The name of the file to write to. + rrr (NNNCorrelation): The auto-correlation of the random field (RRR) + drr (NNNCorrelation): DRR if desired. (default: None) + rdd (NNNCorrelation): RDD if desired. (default: None) + file_type (str): The type of file to write ('ASCII' or 'FITS'). + (default: determine the type automatically from the extension + of file_name.) + precision (int): For ASCII output catalogs, the desired precision. (default: 4; + this value can also be given in the constructor in the config + dict.) + write_patch_results (bool): Whether to write the patch-based results as well. + (default: False) + """ + self.logger.info('Writing NNN correlations to %s',file_name) + precision = self.config.get('precision', 4) if precision is None else precision + name = 'main' if write_patch_results else None + self._write_rrr = rrr + self._write_drr = drr + self._write_rdd = rdd + with make_writer(file_name, precision, file_type, self.logger) as writer: + self._write(writer, name, write_patch_results, zero_tot=True) + self._write_rrr = None + self._write_drr = None + self._write_rdd = None
+ + @property + def _write_col_names(self): + rrr = self._write_rrr + drr = self._write_drr + rdd = self._write_rdd + col_names = [ 'r_nom', 'u_nom', 'v_nom', 'meand1', 'meanlogd1', 'meand2', 'meanlogd2', + 'meand3', 'meanlogd3', 'meanu', 'meanv' ] + if rrr is None: + col_names += [ 'DDD', 'ntri' ] + else: + col_names += [ 'zeta','sigma_zeta','DDD','RRR' ] + if drr is not None: + col_names += ['DRR','RDD'] + col_names += [ 'ntri' ] + return col_names + + @property + def _write_data(self): + data = [ self.rnom, self.u, self.v, + self.meand1, self.meanlogd1, self.meand2, self.meanlogd2, + self.meand3, self.meanlogd3, self.meanu, self.meanv ] + rrr = self._write_rrr + drr = self._write_drr + rdd = self._write_rdd + if rrr is None: + if drr is not None or rdd is not None: + raise TypeError("rrr must be provided if other combinations are not None") + data += [ self.weight, self.ntri ] + else: + # This will check for other invalid combinations of rrr, drr, etc. + zeta, varzeta = self.calculateZeta(rrr=rrr, drr=drr, rdd=rdd) + + data += [ zeta, np.sqrt(varzeta), + self.weight, rrr.weight * (self.tot/rrr.tot) ] + + if drr is not None: + data += [ drr.weight * (self.tot/drr.tot), rdd.weight * (self.tot/rdd.tot) ] + data += [ self.ntri ] + + data = [ col.flatten() for col in data ] + return data + + @property + def _write_params(self): + return { 'tot' : self.tot, 'coords' : self.coords, 'metric' : self.metric, + 'sep_units' : self.sep_units, 'bin_type' : self.bin_type } + +
[docs] @depr_pos_kwargs + def read(self, file_name, *, file_type=None): + """Read in values from a file. + + This should be a file that was written by TreeCorr, preferably a FITS file, so there + is no loss of information. + + .. warning:: + + The `NNNCorrelation` object should be constructed with the same configuration + parameters as the one being read. e.g. the same min_sep, max_sep, etc. This is not + checked by the read function. + + Parameters: + file_name (str): The name of the file to read in. + file_type (str): The type of file ('ASCII' or 'FITS'). (default: determine the type + automatically from the extension of file_name.) + """ + self.logger.info('Reading NNN correlations from %s',file_name) + with make_reader(file_name, file_type, self.logger) as reader: + self._read(reader)
+ + def _read_from_data(self, data, params): + s = self.logr.shape + if 'R_nom' in data.dtype.names: # pragma: no cover + self._ro.rnom = data['R_nom'].reshape(s) + else: + self._ro.rnom = data['r_nom'].reshape(s) + self.meand1 = data['meand1'].reshape(s) + self.meanlogd1 = data['meanlogd1'].reshape(s) + self.meand2 = data['meand2'].reshape(s) + self.meanlogd2 = data['meanlogd2'].reshape(s) + self.meand3 = data['meand3'].reshape(s) + self.meanlogd3 = data['meanlogd3'].reshape(s) + self.meanu = data['meanu'].reshape(s) + self.meanv = data['meanv'].reshape(s) + self.weight = data['DDD'].reshape(s) + self.ntri = data['ntri'].reshape(s) + if 'zeta' in data.dtype.names: + self.zeta = data['zeta'].reshape(s) + self.varzeta = data['sigma_zeta'].reshape(s)**2 + self.tot = params['tot'] + self.coords = params['coords'].strip() + self.metric = params['metric'].strip() + self._ro.sep_units = params['sep_units'].strip() + self._ro.bin_type = params['bin_type'].strip() + self.npatch1 = params.get('npatch1', 1) + self.npatch2 = params.get('npatch2', 1) + self.npatch3 = params.get('npatch3', 1)
+ + +
[docs]class NNNCrossCorrelation(BinnedCorr3): + r"""This class handles the calculation a 3-point count-count-count cross-correlation + function. + + For 3-point cross correlations, it matters which of the two or three fields falls on + each corner of the triangle. E.g. is field 1 on the corner opposite d1 (the longest + size of the triangle) or is it field 2 (or 3) there? This is in contrast to the 2-point + correlation where the symmetry of the situation means that it doesn't matter which point + is identified with each field. This makes it significantly more complicated to keep track + of all the relevant information for a 3-point cross correlation function. + + The `NNNCorrelation` class holds a single :math:`\zeta` functions describing all + possible triangles, parameterized according to their relative side lengths ordered as + d1 > d2 > d3. + + For a cross-correlation of two fields: N1 - N1 - N2 (i.e. the N1 field is at two of the + corners and N2 is at one corner), then we need three these :math:`\zeta` functions + to capture all of the triangles, since the N2 points may be opposite d1 or d2 or d3. + For a cross-correlation of three fields: N1 - N2 - N3, we need six sets, to account for + all of the possible permutations relative to the triangle sides. + + Therefore, this class holds 6 instances of `NNNCorrelation`, which in turn hold the + information about triangles in each of the relevant configurations. We name these: + + Attributes: + n1n2n3: Triangles where N1 is opposite d1, N2 is opposite d2, N3 is opposite d3. + n1n3n2: Triangles where N1 is opposite d1, N3 is opposite d2, N2 is opposite d3. + n2n1n3: Triangles where N2 is opposite d1, N1 is opposite d2, N3 is opposite d3. + n2n3n1: Triangles where N2 is opposite d1, N3 is opposite d2, N1 is opposite d3. + n3n1n2: Triangles where N3 is opposite d1, N1 is opposite d2, N2 is opposite d3. + n3n2n1: Triangles where N3 is opposite d1, N2 is opposite d2, N1 is opposite d3. + + If for instance N2 and N3 are the same field, then e.g. n1n2n3 and n1n3n2 will have + the same values. + + Ojects of this class also hold the following attributes, which are identical in each of + the above NNNCorrelation instances. + + Attributes: + nbins: The number of bins in logr where r = d2 + bin_size: The size of the bins in logr + min_sep: The minimum separation being considered + max_sep: The maximum separation being considered + nubins: The number of bins in u where u = d3/d2 + ubin_size: The size of the bins in u + min_u: The minimum u being considered + max_u: The maximum u being considered + nvbins: The number of bins in v where v = +-(d1-d2)/d3 + vbin_size: The size of the bins in v + min_v: The minimum v being considered + max_v: The maximum v being considered + logr1d: The nominal centers of the nbins bins in log(r). + u1d: The nominal centers of the nubins bins in u. + v1d: The nominal centers of the nvbins bins in v. + + If ``sep_units`` are given (either in the config dict or as a named kwarg) then the distances + will all be in these units. + + .. note:: + + If you separate out the steps of the `process` command and use `process_cross` directly, + then the units will not be applied to ``meanr`` or ``meanlogr`` until the `finalize` + function is called. + + Parameters: + config (dict): A configuration dict that can be used to pass in kwargs if desired. + This dict is allowed to have addition entries besides those listed + in `BinnedCorr3`, which are ignored here. (default: None) + logger: If desired, a logger object for logging. (default: None, in which case + one will be built according to the config dict's verbose level.) + + Keyword Arguments: + **kwargs: See the documentation for `BinnedCorr3` for the list of allowed keyword + arguments, which may be passed either directly or in the config dict. + """ +
[docs] @depr_pos_kwargs + def __init__(self, config=None, *, logger=None, **kwargs): + """Initialize `NNNCrossCorrelation`. See class doc for details. + """ + BinnedCorr3.__init__(self, config, logger=logger, **kwargs) + + self._ro._d1 = 1 # NData + self._ro._d2 = 1 # NData + self._ro._d3 = 1 # NData + + self.n1n2n3 = NNNCorrelation(config, logger=logger, **kwargs) + self.n1n3n2 = NNNCorrelation(config, logger=logger, **kwargs) + self.n2n1n3 = NNNCorrelation(config, logger=logger, **kwargs) + self.n2n3n1 = NNNCorrelation(config, logger=logger, **kwargs) + self.n3n1n2 = NNNCorrelation(config, logger=logger, **kwargs) + self.n3n2n1 = NNNCorrelation(config, logger=logger, **kwargs) + self._all = [self.n1n2n3, self.n1n3n2, self.n2n1n3, self.n2n3n1, self.n3n1n2, self.n3n2n1] + + self.tot = 0. + self.logger.debug('Finished building NNNCrossCorr')
+ +
[docs] def __eq__(self, other): + """Return whether two `NNNCrossCorrelation` instances are equal""" + return (isinstance(other, NNNCrossCorrelation) and + self.nbins == other.nbins and + self.bin_size == other.bin_size and + self.min_sep == other.min_sep and + self.max_sep == other.max_sep and + self.sep_units == other.sep_units and + self.min_u == other.min_u and + self.max_u == other.max_u and + self.nubins == other.nubins and + self.ubin_size == other.ubin_size and + self.min_v == other.min_v and + self.max_v == other.max_v and + self.nvbins == other.nvbins and + self.vbin_size == other.vbin_size and + self.coords == other.coords and + self.bin_type == other.bin_type and + self.bin_slop == other.bin_slop and + self.xperiod == other.xperiod and + self.yperiod == other.yperiod and + self.zperiod == other.zperiod and + self.n1n2n3 == other.n1n2n3 and + self.n1n3n2 == other.n1n3n2 and + self.n2n1n3 == other.n2n1n3 and + self.n2n3n1 == other.n2n3n1 and + self.n3n1n2 == other.n3n1n2 and + self.n3n2n1 == other.n3n2n1)
+ +
[docs] def copy(self): + """Make a copy""" + ret = NNNCrossCorrelation.__new__(NNNCrossCorrelation) + for key, item in self.__dict__.items(): + if isinstance(item, NNNCorrelation): + ret.__dict__[key] = item.copy() + else: + ret.__dict__[key] = item + # This needs to be the new list: + ret._all = [ret.n1n2n3, ret.n1n3n2, ret.n2n1n3, ret.n2n3n1, ret.n3n1n2, ret.n3n2n1] + return ret
+ + def _zero_copy(self, tot): + # A minimal "copy" with zero for the weight array, and the given value for tot. + ret = NNNCrossCorrelation.__new__(NNNCrossCorrelation) + ret._ro = self._ro + ret.n1n2n3 = self.n1n2n3._zero_copy(tot) + ret.n1n3n2 = self.n1n3n2._zero_copy(tot) + ret.n2n1n3 = self.n2n1n3._zero_copy(tot) + ret.n2n3n1 = self.n2n3n1._zero_copy(tot) + ret.n3n1n2 = self.n3n1n2._zero_copy(tot) + ret.n3n2n1 = self.n3n2n1._zero_copy(tot) + ret._all = [ret.n1n2n3, ret.n1n3n2, ret.n2n1n3, ret.n2n3n1, ret.n3n1n2, ret.n3n2n1] + ret.tot = tot + setattr(ret, '_nonzero', False) + return ret + +
[docs] def __repr__(self): + return 'NNNCrossCorrelation(config=%r)'%self.config
+ +
[docs] @depr_pos_kwargs + def process_cross12(self, cat1, cat2, *, metric=None, num_threads=None): + """Process two catalogs, accumulating the 3pt cross-correlation, where one of the + points in each triangle come from the first catalog, and two come from the second. + + This accumulates the cross-correlation for the given catalogs. After + calling this function as often as desired, the `finalize` command will + finish the calculation of meand1, meanlogd1, etc. + + .. note:: + + This only adds to the attributes n1n2n3, n2n1n3, n2n3n1, not the ones where + 3 comes before 2. When running this via the regular `process` method, it will + combine them at the end to make sure n1n2n3 == n1n3n2, etc. for a complete + calculation of the 1-2 cross-correlation. + + Parameters: + cat1 (Catalog): The first catalog to process. (1 point in each triangle will come + from this catalog.) + cat2 (Catalog): The second catalog to process. (2 points in each triangle will come + from this catalog.) + metric (str): Which metric to use. See `Metrics` for details. + (default: 'Euclidean'; this value can also be given in the + constructor in the config dict.) + num_threads (int): How many OpenMP threads to use during the calculation. + (default: use the number of cpu cores; this value can also be given + in the constructor in the config dict.) + """ + if cat1.name == '' and cat2.name == '': + self.logger.info('Starting process NNN (1-2) cross-correlations') + else: + self.logger.info('Starting process NNN (1-2) cross-correlations for cats %s, %s.', + cat1.name, cat2.name) + + self._set_metric(metric, cat1.coords, cat2.coords) + for nnn in self._all: + nnn._set_metric(self.metric, self.coords) + self._set_num_threads(num_threads) + min_size, max_size = self._get_minmax_size() + + f1 = cat1.getNField(min_size=min_size, max_size=max_size, + split_method=self.split_method, + brute=self.brute is True or self.brute == 1, + min_top=self.min_top, max_top=self.max_top, + coords=self.coords) + f2 = cat2.getNField(min_size=min_size, max_size=max_size, + split_method=self.split_method, + brute=self.brute is True or self.brute == 2, + min_top=self.min_top, max_top=self.max_top, + coords=self.coords) + + self.logger.info('Starting %d jobs.',f1.nTopLevelNodes) + # Note: all 3 correlation objects are the same. Thus, all triangles will be placed + # into self.corr, whichever way the three catalogs are permuted for each triangle. + _lib.ProcessCross12(self.n1n2n3.corr, self.n2n1n3.corr, self.n2n3n1.corr, + f1.data, f2.data, self.output_dots, + f1._d, f2._d, self._coords, + self._bintype, self._metric) + tot = cat1.sumw * cat2.sumw**2 / 2. + self.n1n2n3.tot += tot + self.n2n1n3.tot += tot + self.n2n3n1.tot += tot + self.tot += tot
+ +
[docs] @depr_pos_kwargs + def process_cross(self, cat1, cat2, cat3, *, metric=None, num_threads=None): + """Process a set of three catalogs, accumulating the 3pt cross-correlation. + + This accumulates the cross-correlation for the given catalogs. After + calling this function as often as desired, the `finalize` command will + finish the calculation of meand1, meanlogd1, etc. + + Parameters: + cat1 (Catalog): The first catalog to process + cat2 (Catalog): The second catalog to process + cat3 (Catalog): The third catalog to process + metric (str): Which metric to use. See `Metrics` for details. + (default: 'Euclidean'; this value can also be given in the + constructor in the config dict.) + num_threads (int): How many OpenMP threads to use during the calculation. + (default: use the number of cpu cores; this value can also be given + in the constructor in the config dict.) + """ + if cat1.name == '' and cat2.name == '' and cat3.name == '': + self.logger.info('Starting process NNN cross-correlations') + else: + self.logger.info('Starting process NNN cross-correlations for cats %s, %s, %s.', + cat1.name, cat2.name, cat3.name) + + self._set_metric(metric, cat1.coords, cat2.coords, cat3.coords) + for nnn in self._all: + nnn._set_metric(self.metric, self.coords) + self._set_num_threads(num_threads) + min_size, max_size = self._get_minmax_size() + + f1 = cat1.getNField(min_size=min_size, max_size=max_size, + split_method=self.split_method, + brute=self.brute is True or self.brute == 1, + min_top=self.min_top, max_top=self.max_top, + coords=self.coords) + f2 = cat2.getNField(min_size=min_size, max_size=max_size, + split_method=self.split_method, + brute=self.brute is True or self.brute == 2, + min_top=self.min_top, max_top=self.max_top, + coords=self.coords) + f3 = cat3.getNField(min_size=min_size, max_size=max_size, + split_method=self.split_method, + brute=self.brute is True or self.brute == 3, + min_top=self.min_top, max_top=self.max_top, + coords=self.coords) + + self.logger.info('Starting %d jobs.',f1.nTopLevelNodes) + _lib.ProcessCross3(self.n1n2n3.corr, self.n1n3n2.corr, + self.n2n1n3.corr, self.n2n3n1.corr, + self.n3n1n2.corr, self.n3n2n1.corr, + f1.data, f2.data, f3.data, self.output_dots, + f1._d, f2._d, f3._d, self._coords, self._bintype, self._metric) + tot = cat1.sumw * cat2.sumw * cat3.sumw + for nnn in self._all: + nnn.tot += tot + self.tot += tot
+ + def _finalize(self): + for nnn in self._all: + nnn._finalize() + +
[docs] def finalize(self): + """Finalize the calculation of the correlation function. + + The `process_cross` command accumulate values in each bin, so they can be called + multiple times if appropriate. Afterwards, this command finishes the calculation + by dividing by the total weight. + """ + for nnn in self._all: + nnn.finalize()
+ + @property + def nonzero(self): + """Return if there are any values accumulated yet. (i.e. ntri > 0) + """ + return any(nnn.nonzero for nnn in self._all) + + @lazy_property + def _nonzero(self): + # The lazy version when we can be sure the object isn't going to accumulate any more. + return self.nonzero + + def _clear(self): + """Clear the data vectors + """ + for nnn in self._all: + nnn._clear() + self.tot = 0 + + def _sum(self, others): + # Equivalent to the operation of: + # self._clear() + # for other in others: + # self += other + # but no sanity checks and use numpy.sum for faster calculation. + self.tot = np.sum([c.tot for c in others]) + # Empty ones were only needed for tot. Remove them now. + others = [c for c in others if c._nonzero] + other_all = zip(*[c._all for c in others]) # Transpose list of lists + for nnn,o_nnn in zip(self._all, other_all): + nnn._sum(o_nnn) + + def _add_tot(self, i, j, k, c1, c2, c3): + tot = c1.sumw * c2.sumw * c3.sumw + self.tot += tot + for c in self._all: + c.tot += tot + self.results[(i,j,k)] = self._zero_copy(tot) + +
[docs] def __iadd__(self, other): + """Add a second `NNNCrossCorrelation`'s data to this one. + + .. note:: + + For this to make sense, both `NNNCrossCorrelation` objects should not have had + `finalize` called yet. Then, after adding them together, you should call `finalize` + on the sum. + """ + if not isinstance(other, NNNCrossCorrelation): + raise TypeError("Can only add another NNNCrossCorrelation object") + self.n1n2n3 += other.n1n2n3 + self.n1n3n2 += other.n1n3n2 + self.n2n1n3 += other.n2n1n3 + self.n2n3n1 += other.n2n3n1 + self.n3n1n2 += other.n3n1n2 + self.n3n2n1 += other.n3n2n1 + self.tot += other.tot + return self
+ +
[docs] def getWeight(self): + """The weight array for the current correlation object as a 1-d array. + + For NNNCrossCorrelation, this is always just 1. We don't currently have any ability + to automatically handle a random catalog for NNNCrossCorrelations, so we don't know + what the correct weight would be for a given patch or set of patches. This value + is only used by the sample method of covariance estimation, so this limitation means + that sample covariances may be expected to be less accurate than normal when used with + NNNCorrelations. + """ + return 1.
+ +
[docs] @depr_pos_kwargs + def process(self, cat1, cat2, cat3=None, *, metric=None, num_threads=None, + comm=None, low_mem=False, initialize=True, finalize=True): + """Accumulate the cross-correlation of the points in the given Catalogs: cat1, cat2, cat3. + + - If 2 arguments are given, then compute a cross-correlation function with the + first catalog taking one corner of the triangles, and the second taking two corners. + - If 3 arguments are given, then compute a three-way cross-correlation function. + + All arguments may be lists, in which case all items in the list are used + for that element of the correlation. + + Parameters: + cat1 (Catalog): A catalog or list of catalogs for the first N field. + cat2 (Catalog): A catalog or list of catalogs for the second N field. + cat3 (Catalog): A catalog or list of catalogs for the third N field. + (default: None) + metric (str): Which metric to use. See `Metrics` for details. + (default: 'Euclidean'; this value can also be given in the + constructor in the config dict.) + num_threads (int): How many OpenMP threads to use during the calculation. + (default: use the number of cpu cores; this value can also be given + in the constructor in the config dict.) + comm (mpi4py.Comm): If running MPI, an mpi4py Comm object to communicate between + processes. If used, the rank=0 process will have the final + computation. This only works if using patches. (default: None) + low_mem (bool): Whether to sacrifice a little speed to try to reduce memory usage. + This only works if using patches. (default: False) + initialize (bool): Whether to begin the calculation with a call to + `BinnedCorr3.clear`. (default: True) + finalize (bool): Whether to complete the calculation with a call to `finalize`. + (default: True) + """ + import math + if initialize: + self.clear() + self._process12 = False + + if not isinstance(cat1,list): cat1 = cat1.get_patches() + if not isinstance(cat2,list): cat2 = cat2.get_patches() + if cat3 is not None and not isinstance(cat3,list): cat3 = cat3.get_patches() + + if cat3 is None: + self._process12 = True + self._process_all_cross12(cat1, cat2, metric, num_threads, comm, low_mem) + else: + self._process_all_cross(cat1, cat2, cat3, metric, num_threads, comm, low_mem) + + if finalize: + if self._process12: + # Then some of the processing involved a cross12 calculation. + # This means that spots 2 and 3 should not be distinguished. + # Combine the relevant arrays. + self.n1n2n3 += self.n1n3n2 + self.n2n1n3 += self.n3n1n2 + self.n2n3n1 += self.n3n2n1 + # Copy back by doing clear and +=. + self.n1n3n2.clear() + self.n3n1n2.clear() + self.n3n2n1.clear() + self.n1n3n2 += self.n1n2n3 + self.n3n1n2 += self.n2n1n3 + self.n3n2n1 += self.n2n3n1 + + self.finalize()
+ +
[docs] @depr_pos_kwargs + def write(self, file_name, *, file_type=None, precision=None, write_patch_results=False): + r"""Write the correlation function to the file, file_name. + + Parameters: + file_name (str): The name of the file to write to. + file_type (str): The type of file to write ('ASCII' or 'FITS'). (default: determine + the type automatically from the extension of file_name.) + precision (int): For ASCII output catalogs, the desired precision. (default: 4; + this value can also be given in the constructor in the config dict.) + write_patch_results (bool): Whether to write the patch-based results as well. + (default: False) + """ + self.logger.info('Writing NNN cross-correlations to %s',file_name) + precision = self.config.get('precision', 4) if precision is None else precision + name = 'main' if write_patch_results else None + with make_writer(file_name, precision, file_type, self.logger) as writer: + names = [ 'n1n2n3', 'n1n3n2', 'n2n1n3', 'n2n3n1', 'n3n1n2', 'n3n2n1' ] + for name, corr in zip(names, self._all): + corr._write(writer, name, write_patch_results, zero_tot=True)
+ +
[docs] @depr_pos_kwargs + def read(self, file_name, *, file_type=None): + """Read in values from a file. + + This should be a file that was written by TreeCorr, preferably a FITS file, so there + is no loss of information. + + .. warning:: + + The `NNNCrossCorrelation` object should be constructed with the same configuration + parameters as the one being read. e.g. the same min_sep, max_sep, etc. This is not + checked by the read function. + + Parameters: + file_name (str): The name of the file to read in. + file_type (str): The type of file ('ASCII' or 'FITS'). (default: determine the type + automatically from the extension of file_name.) + """ + self.logger.info('Reading NNN cross-correlations from %s',file_name) + with make_reader(file_name, file_type, self.logger) as reader: + names = [ 'n1n2n3', 'n1n3n2', 'n2n1n3', 'n2n3n1', 'n3n1n2', 'n3n2n1' ] + for name, corr in zip(names, self._all): + corr._read(reader, name)
+
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/treecorr/reader.html b/docs/_build/html/_modules/treecorr/reader.html new file mode 100644 index 00000000..37034590 --- /dev/null +++ b/docs/_build/html/_modules/treecorr/reader.html @@ -0,0 +1,874 @@ + + + + + + treecorr.reader — TreeCorr 4.3.0 documentation + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for treecorr.reader

+# Copyright (c) 2003-2020 by Mike Jarvis
+#
+# TreeCorr is free software: redistribution and use in source and binary forms,
+# with or without modification, are permitted provided that the following
+# conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice, this
+#    list of conditions, and the disclaimer given in the accompanying LICENSE
+#    file.
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+#    this list of conditions, and the disclaimer given in the documentation
+#    and/or other materials provided with the distribution.
+
+"""
+This is a set of helper classes that read data for treecorr.Catalog objects.
+They have a bit of a mixed bag of functions, not intended to be complete,
+just to cover the needs of that class.
+
+HDF and FITS files differ in that the former can have different length columns
+in the same data group, unlike fits HDUs, which have columns of all the same length.
+Where possible we check for that here, and a user would have to be fairly determined to
+trigger this.
+
+The other difference is that fitsio reads a structured array, whereas h5py will
+read a dictionary of arrays. This does not cause problems here, since both are
+indexed by string, but may prevent usage elsewhere. If so we could convert them to
+both provide dicts.
+"""
+import numpy as np
+
+
[docs]class AsciiReader(object): + """Reader interface for ASCII files using numpy. + """ + can_slice = True + default_ext = None + + def __init__(self, file_name, delimiter=None, comment_marker='#', logger=None): + """ + Parameters: + file_name (str): The file name + delimiter (str): What delimiter to use between values. (default: None, + which means any whitespace) + comment_marker (str): What token indicates a comment line. (default: '#') + """ + self.file_name = file_name + self.delimiter = delimiter + self.comment_marker = comment_marker + self.nrows = None + self._file = None + + @property + def file(self): + if self._file is None: + raise RuntimeError('Illegal operation when not in a "with" context') + return self._file + + def __contains__(self, ext): + """Check if ext is None. + + ASCII files don't have extensions, so the only ext allowed is None. + + Parameters: + ext (str): The extension to check + + Returns: + Whether ext is None + """ + # None is the only valid "extension" for ASCII files + return ext is None + +
[docs] def check_valid_ext(self, ext): + """Check if an extension is valid for reading, and raise ValueError if not. + + None is the only valid extension for ASCII files. + + Parameters: + ext (str): The extension to check + """ + if ext is not None: + raise ValueError("Invalid ext={} for file {}".format(ext,self.file_name))
+ + +
[docs] def read(self, cols, s=slice(None), ext=None): + """Read a slice of a column or list of columns from a specified extension. + + Parameters: + cols (str/list): The name(s) of column(s) to read + s (slice/array): A slice object or selection of integers to read (default: all) + ext (str): The extension (ignored) + + Returns: + The data as a dict or single numpy array as appropriate + """ + self.file # Check that we are in a with context, so other things are set up correctly. + + # Figure out how many rows to skip at the start + if isinstance(s, slice) and s.start is not None: + skiprows = self.comment_rows + s.start + else: + skiprows = self.comment_rows + + # And how many to read (if we know) + # Note: genfromtxt can't handle a skip, so defer that to later. + # Also if s is an array, that won't work here either. + if isinstance(s, slice) and s.start is not None and s.stop is not None: + nrows = s.stop - s.start + else: + nrows = None + + # And which columns to read + geti = lambda col: self.col_names.index(col) if col in self.col_names else int(col)-1 + if np.isscalar(cols): + icols = [geti(cols)] + else: + icols = [geti(col) for col in cols] + + # Actually read the data + data = np.genfromtxt(self.file, comments=self.comment_marker, + delimiter=self.delimiter, usecols=icols, + skip_header=skiprows, max_rows=nrows) + self.file.seek(0) + + # If only one column, then the shape comes in as one-d. Reshape it: + if len(icols) == 1: + data = data.reshape(len(data),1) + + # If only one row, then the shape comes in as one-d. Reshape it: + if len(data.shape) == 1: + data = data.reshape(1,len(data)) + + # Select the rows we want if start/end wasn't sufficient. + if isinstance(s, slice): + data = data[::s.step,:] + else: + data = data[s,:] + + # Return is slightly different if we have multiple columns or not. + if np.isscalar(cols): + return data[:,0] + else: + return {col : data[:,i] for i,col in enumerate(cols)}
+ +
[docs] def read_params(self, ext=None): + """Read the params in the given extension, if any. + + Parameters: + ext (str): The extension (ignored -- Ascii always reads the next group) + + Returns: + params + """ + header = next(self.file) + params = {} + if header[1] == '#': + assert header[0] == '#' + tokens = header[2:].split() + if tokens[0][0] != '{': + # Then before the dict, we have group_name + name1 = tokens[0] + if name1 != ext and ext is not None: + raise OSError("Mismatch in group names. Expected %s, found %s"%(ext, name1)) + header = next(self.file) + if header[1] == '#': + assert header[0] == '#' + params = eval(header[2:].strip()) + header = next(self.file) + # In case these changed since the first group. (Which happens for nn.results.) + self.col_names = header[1:].split() + self.ncols = len(self.col_names) + return params
+ +
[docs] def read_data(self, ext=None, max_rows=None): + """Read all data in the file, and the parameters in the header, if any. + + Parameters: + ext (str): The extension (ignored -- Ascii always reads the next group) + max_rows (int): The max number of rows to read. (default: None) + + Returns: + data + """ + data = np.genfromtxt(self.file, names=self.col_names, max_rows=max_rows) + return data
+ +
[docs] def row_count(self, col=None, ext=None): + """Count the number of rows in the file. + + Parameters: + col (str): The column to use (ignored) + ext (str): The extension (ignored) + + Returns: + The number of rows + """ + if self.nrows is not None: + return self.nrows + + # cf. https://stackoverflow.com/a/850962/1332281 + # On my system with python 3.7, bufcount was the fastest among these solutions. + # I also found 256K was the optimal buf size for my system. Probably YMMV, but + # micro-optimizing this is not so important. It's never used by treecorr. + # Only the FitsReader ever calls row_count other than from the unit tests. + lines = 0 + buf_size = 256 * 1024 + buf = self.file.read(buf_size) + while buf: + lines += buf.count('\n') + buf = self.file.read(buf_size) + self.file.seek(0) # Go back to the beginning + self.nrows = lines - self.comment_rows + return self.nrows
+ +
[docs] def names(self, ext=None): + """Return a list of the names of all the columns in an extension + + Parameters: + ext (str): The extension (ignored) + + Returns: + A list of string column names + """ + self.file # Check that we are in a with context, so other things are set up correctly. + + # Include both int values as strings and any real names we know about. + return [str(i+1) for i in range(self.ncols)] + list(self.col_names)
+ + def __enter__(self): + # See how many comment rows there are at the start + self.comment_rows = 0 + with open(self.file_name, 'r') as fid: + for line in fid: # pragma: no branch + if line.startswith(self.comment_marker): self.comment_rows += 1 + else: break + + # Do a trivial read of 1 row, just to get basic info about columns + self.ncols = None + if self.comment_rows >= 1: + try: + data = np.genfromtxt(self.file_name, comments=self.comment_marker, + delimiter=self.delimiter, names=True, + skip_header=self.comment_rows-1, max_rows=1) + self.col_names = data.dtype.names + self.ncols = len(self.col_names) + except Exception: + pass + if self.ncols is None: + data = np.genfromtxt(self.file_name, comments=self.comment_marker, + delimiter=self.delimiter, max_rows=1) + self.col_names = [] + if len(data.shape) != 1: # pragma: no cover + raise OSError('Unable to parse the input catalog as a numpy array') + self.ncols = data.shape[0] + + self._file = open(self.file_name, 'r') + return self + + def __exit__(self, exc_type, exc_value, exc_traceback): + self._file.close() + self._file = None
+ + +
[docs]class PandasReader(AsciiReader): + """Reader interface for ASCII files using pandas. + """ + def __init__(self, file_name, delimiter=None, comment_marker='#', logger=None): + """ + Parameters: + file_name (str): The file name + delimiter (str): What delimiter to use between values. (default: None, + which means any whitespace) + comment_marker (str): What token indicates a comment line. (default: '#') + """ + try: + import pandas + except ImportError: + if logger: + logger.error("Unable to import pandas. Cannot read %s"%file_name) + raise + + AsciiReader.__init__(self, file_name, delimiter, comment_marker) + # This is how pandas handles whitespace + self.sep = r'\s+' if self.delimiter is None else self.delimiter + + +
[docs] def read(self, cols, s=slice(None), ext=None): + """Read a slice of a column or list of columns from a specified extension. + + Parameters: + cols (str/list): The name(s) of column(s) to read + s (slice/array): A slice object or selection of integers to read (default: all) + ext (str): The extension (ignored) + + Returns: + The data as a dict or single numpy array as appropriate + """ + import pandas + self.file # Check that we are in a with context, so other things are set up correctly. + + # Figure out how many rows to skip at the start + if isinstance(s, slice) and s.start is not None: + skiprows = self.comment_rows + s.start + else: + skiprows = self.comment_rows + + # And how many to read (if we know) + # Note: genfromtxt can't handle a skip, so defer that to later. + # Also if s is an array, that won't work here either. + if isinstance(s, slice) and s.start is not None and s.stop is not None: + nrows = s.stop - s.start + else: + nrows = None + + # Pandas has the ability to skip according to a function, so we can accommodate + # arbitrary s (either slice or array of indices): + if isinstance(s, slice) and s.step is not None: + start = skiprows + skiprows = lambda x: x < start or (x-start) % s.step != 0 + if nrows is not None: + nrows = (nrows-1) // s.step + 1 + + if not isinstance(s, slice): + # Then s is a numpy array of indices + start = skiprows + ss = set(s) # for efficiency + skiprows = lambda x: x-start not in ss + + # And which columns to read + geti = lambda col: self.col_names.index(col) if col in self.col_names else int(col)-1 + if np.isscalar(cols): + icols = [geti(cols)] + else: + icols = [geti(col) for col in cols] + + # Actually read the data + df = pandas.read_csv(self.file, comment=self.comment_marker, + sep=self.sep, usecols=icols, header=None, + skiprows=skiprows, nrows=nrows) + self.file.seek(0) + + # Return is slightly different if we have multiple columns or not. + if np.isscalar(cols): + return df.iloc[:,0].to_numpy() + else: + return {col : df.loc[:,icols[i]].to_numpy() for i,col in enumerate(cols)}
+ +
[docs]class ParquetReader(): + """Reader interface for Parquet files using pandas. + """ + can_slice = True + default_ext = None + + def __init__(self, file_name, delimiter=None, comment_marker='#', logger=None): + """ + Parameters: + file_name (str): The file name + delimiter (str): What delimiter to use between values. (default: None, + which means any whitespace) + comment_marker (str): What token indicates a comment line. (default: '#') + """ + try: + import pandas + except ImportError: + if logger: + logger.error("Unable to import pandas. Cannot read %s"%file_name) + raise + + self.file_name = file_name + self._df = None + + @property + def df(self): + if self._df is None: + raise RuntimeError('Illegal operation when not in a "with" context') + return self._df + + def __contains__(self, ext): + """Check if ext is None. + + Parquet files don't have extensions, so the only ext allowed is None. + + Parameters: + ext (str): The extension to check + + Returns: + Whether ext is None + """ + # None is the only valid "extension" for Parquet files + return ext is None + +
[docs] def check_valid_ext(self, ext): + """Check if an extension is valid for reading, and raise ValueError if not. + + None is the only valid extension for ASCII files. + + Parameters: + ext (str): The extension to check + """ + if ext is not None: + raise ValueError("Invalid ext={} for file {}".format(ext,self.file_name))
+ +
[docs] def read(self, cols, s=slice(None), ext=None): + """Read a slice of a column or list of columns from a specified extension. + + Parameters: + cols (str/list): The name(s) of column(s) to read + s (slice/array): A slice object or selection of integers to read (default: all) + ext (str): The extension (ignored) + + Returns: + The data as a recarray or simple numpy array as appropriate + """ + if np.isscalar(cols): + return self.df[cols][s].to_numpy() + else: + return self.df[cols][s].to_records()
+ +
[docs] def row_count(self, col=None, ext=None): + """Count the number of rows in the named extension and column + + Unlike in FitsReader, col is required. + + Parameters: + col (str): The column to use (ignored) + ext (str): The extension (ignored) + + Returns: + The number of rows + """ + return len(self.df)
+ +
[docs] def names(self, ext=None): + """Return a list of the names of all the columns in an extension + + Parameters: + ext (str): The extension to search for columns (ignored) + + Returns: + A list of string column names + """ + return self.df.columns
+ + def __enter__(self): + import pandas + self._df = pandas.read_parquet(self.file_name) + return self + + def __exit__(self, exc_type, exc_value, exc_traceback): + # free the memory in the dataframe at end of "with" statement + self._df = None
+ + +
[docs]class FitsReader(object): + """Reader interface for FITS files. + Uses fitsio to read columns, etc. + """ + default_ext = 1 + + def __init__(self, file_name, logger=None): + """ + Parameters: + file_name (str): The file name + """ + try: + import fitsio + except ImportError: + if logger: + logger.error("Unable to import fitsio. Cannot read %s"%file_name) + raise + + self._file = None # Only works inside a with block. + + # record file name to know what to open when entering + self.file_name = file_name + + # There is a bug in earlier fitsio versions that prevents slicing + self.can_slice = fitsio.__version__ > '1.0.6' + + @property + def file(self): + if self._file is None: + raise RuntimeError('Illegal operation when not in a "with" context') + return self._file + + def __contains__(self, ext): + """Check if there is an extension with the given name or index in the file. + + This may be either a name or an integer. + + Parameters: + ext (str/int): The extension to check for (default: 1) + + Returns: + Whether the extension exists + """ + ext = self._update_ext(ext) + return ext in self.file + +
[docs] def check_valid_ext(self, ext): + """Check if an extension is valid for reading, and raise ValueError if not. + + The ext must both exist and be a table (not an image) + + Parameters: + ext (str/int): The extension to check + """ + import fitsio + + ext = self._update_ext(ext) + if ext not in self: + raise ValueError("Invalid ext={} for file {} (does not exist)".format( + ext, self.file_name)) + + if not isinstance(self.file[ext], fitsio.hdu.TableHDU): + raise ValueError("Invalid ext={} for file {} (Not a TableHDU)".format( + ext, self.file_name))
+ + def _update_ext(self, ext): + # FITS extensions can be indexed by number or + # string. Try converting to an integer if the current + # value is not found. If not let the error be caught later. + if ext is None: + ext = 1 + if ext not in self.file: + try: + ext = int(ext) + except ValueError: + pass + return ext + +
[docs] def read(self, cols, s=slice(None), ext=None): + """Read a slice of a column or list of columns from a specified extension + + Parameters: + cols (str/list): The name(s) of column(s) to read + s (slice/array): A slice object or selection of integers to read (default: all) + ext (str/int)): The FITS extension to use (default: 1) + + Returns: + The data as a recarray or simple numpy array as appropriate + """ + ext = self._update_ext(ext) + return self.file[ext][cols][s]
+ +
[docs] def read_params(self, ext=None): + """Read the params in the given extension, if any. + + Parameters: + ext (str/int): The FITS extension to use (default: 1) + + Returns: + params + """ + ext = self._update_ext(ext) + params = self.file[ext].read_header() + return params
+ +
[docs] def read_data(self, ext=None, max_rows=None): + """Read all data in the file, and the parameters in the header, if any. + + Parameters: + ext (str/int): The FITS extension to use (default: 1) + max_rows (int): The max number of rows to read. (ignored) + + Returns: + data + """ + ext = self._update_ext(ext) + data = self.file[ext].read() + return data
+ +
[docs] def row_count(self, col=None, ext=None): + """Count the number of rows in the named extension + + For compatibility with the HDF interface, which can have columns + of different lengths, we allow a second argument, col, but it is + ignored here. + + Parameters: + col (str): The column to use (ignored) + ext (str/int): The FITS extension to use (default: 1) + + Returns: + The number of rows + """ + ext = self._update_ext(ext) + return self.file[ext].get_nrows()
+ +
[docs] def names(self, ext=None): + """Return a list of the names of all the columns in an extension + + Parameters: + ext (str/int): The extension to search for columns (default: 1) + + Returns: + A list of string column names + """ + ext = self._update_ext(ext) + return self.file[ext].get_colnames()
+ + def __enter__(self): + import fitsio + self._file = fitsio.FITS(self.file_name, 'r') + return self + + def __exit__(self, exc_type, exc_value, exc_traceback): + # Context manager closer - we just close the file at the end, + # regardless of the error + self.file.close() + self._file = None
+ + +
[docs]class HdfReader(object): + """Reader interface for HDF5 files. + Uses h5py to read columns, etc. + """ + # h5py can always accept slices as indices + can_slice = True + default_ext = '/' + + def __init__(self, file_name, logger=None): + """ + Parameters: + file_name (str): The file name + """ + try: + import h5py + except ImportError: + if logger: + logger.error("Unable to import h5py. Cannot read %s"%file_name) + raise + + self._file = None # Only works inside a with block. + self.file_name = file_name + + @property + def file(self): + if self._file is None: + raise RuntimeError('Illegal operation when not in a "with" context') + return self._file + + def __contains__(self, ext): + """Check if there is an extension with the given name in the file. + + Parameters: + ext (str): The extension to check for + + Returns: + Whether the extension exists + """ + return ext in self.file + + def _group(self, ext): + if ext is None: + ext = '/' + try: + return self.file[ext] + except KeyError: + raise OSError("Group name %s not found in HDF5 file."%(ext)) + +
[docs] def check_valid_ext(self, ext): + """Check if an extension is valid for reading, and raise ValueError if not. + + The ext must exist - there is no other requirement for HDF files. + + Parameters: + ext (str): The extension to check + """ + if ext not in self: + raise ValueError("Invalid ext={} for file {} (does not exist)".format( + ext,self.file_name))
+ +
[docs] def read(self, cols, s=slice(None), ext=None): + """Read a slice of a column or list of columns from a specified extension. + + Slices should always be used when reading HDF files - using a sequence of + integers is painfully slow. + + Parameters: + cols (str/list): The name(s) of column(s) to read + s (slice/array): A slice object or selection of integers to read (default: all) + ext (str): The HDF (sub-)group to use (default: '/') + + Returns: + The data as a dict or single numpy array as appropriate + """ + g = self._group(ext) + if np.isscalar(cols): + return g[cols][s] + else: + return {col : g[col][s] for col in cols}
+ +
[docs] def read_params(self, ext=None): + """Read the params in the given extension, if any. + + Parameters: + ext (str): The HDF (sub-)group to use (default: '/') + + Returns: + params + """ + g = self._group(ext) + params = dict(g.attrs) + + return params
+ +
[docs] def read_data(self, ext=None, max_rows=None): + """Read all data in the file, and the parameters in the attributes, if any. + + Parameters: + ext (str): The HDF (sub-)group to use (default: '/') + max_rows (int): The max number of rows to read. (ignored) + + Returns: + data + """ + g = self._group(ext) + + # This does not actually load the column + col_vals = list(g.values()) + col_names = list(g.keys()) + + ncol = len(col_names) + num_rows = col_vals[0].size + dtype=[(name, col.dtype) for (name, col) in zip(col_names, col_vals)] + data = np.empty(num_rows, dtype=dtype) + + # Now we actually read everything + for (name, col) in zip(col_names, col_vals): + data[name] = col[:] + + return data
+ +
[docs] def row_count(self, col, ext=None): + """Count the number of rows in the named extension and column + + Unlike in FitsReader, col is required. + + Parameters: + col (str): The column to use + ext (str): The HDF group name to use (default: '/') + + Returns: + The number of rows + """ + return self._group(ext)[col].size
+ +
[docs] def names(self, ext=None): + """Return a list of the names of all the columns in an extension + + Parameters: + ext (str): The extension to search for columns (default: '/') + + Returns: + A list of string column names + """ + return list(self._group(ext).keys())
+ + def __enter__(self): + import h5py + self._file = h5py.File(self.file_name, 'r') + return self + + def __exit__(self, exc_type, exc_value, exc_traceback): + # closes file at end of "with" statement + self._file.close() + self._file = None
+
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/treecorr/writer.html b/docs/_build/html/_modules/treecorr/writer.html new file mode 100644 index 00000000..f5f6805f --- /dev/null +++ b/docs/_build/html/_modules/treecorr/writer.html @@ -0,0 +1,310 @@ + + + + + + treecorr.writer — TreeCorr 4.3.0 documentation + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for treecorr.writer

+# Copyright (c) 2003-2020 by Mike Jarvis
+#
+# TreeCorr is free software: redistribution and use in source and binary forms,
+# with or without modification, are permitted provided that the following
+# conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice, this
+#    list of conditions, and the disclaimer given in the accompanying LICENSE
+#    file.
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+#    this list of conditions, and the disclaimer given in the documentation
+#    and/or other materials provided with the distribution.
+
+import os
+import numpy as np
+
+def ensure_dir(target):
+    d = os.path.dirname(target)
+    if d != '':
+        if not os.path.exists(d):
+            os.makedirs(d)
+
+
[docs]class AsciiWriter(object): + """Write data to an ASCII (text) file. + """ + def __init__(self, file_name, *, precision=4, logger=None): + """ + Parameters: + file_name: The file name + precision: The number of digits of precision to output. + logger: If desired, a logger object for logging. (default: None) + """ + self.file_name = file_name + self.logger = logger + self.set_precision(precision) + self._file = None + ensure_dir(file_name) + + def set_precision(self, precision): + self.precision = precision + self.width = precision+8 + self.fmt = '%%%d.%de'%(self.width, self.precision) + + @property + def file(self): + if self._file is None: + raise RuntimeError('Illegal operation when not in a "with" context') + return self._file + +
[docs] def write(self, col_names, columns, *, params=None, ext=None): + """Write some columns to an output ASCII file with the given column names. + + Parameters: + col_names: A list of columns names for the given columns. These will be written + in a header comment line at the top of the output file. + columns: A list of numpy arrays with the data to write. + params: A dict of extra parameters to write at the top of the output file. + ext: Optional ext name for these data. (default: None) + """ + ncol = len(col_names) + data = np.empty( (len(columns[0]), ncol) ) + for i,col in enumerate(columns): + data[:,i] = col + + # Note: The first one is 1 shorter to allow space for the initial #. + header = ("#" + "{:^%d}"%(self.width-1) + + " {:^%d}"%(self.width) * (ncol-1) + "\n").format(*col_names) + + if ext is not None: + s = '## %s\n'%ext + self.file.write(s.encode()) + if params is not None: + s = '## %r\n'%(params) + self.file.write(s.encode()) + self.file.write(header.encode()) + np.savetxt(self.file, data, fmt=self.fmt)
+ + def __enter__(self): + self._file = open(self.file_name, 'wb') + return self + + def __exit__(self, exc_type, exc_value, exc_traceback): + self._file.close() + self._file = None
+ + +
[docs]class FitsWriter(object): + """Writer interface for FITS files. + """ + def __init__(self, file_name, *, logger=None): + """ + Parameters: + file_name: The file name + logger: If desired, a logger object for logging. (default: None) + """ + try: + import fitsio # noqa: F401 + except ImportError: + if logger: + logger.error("Unable to import fitsio. Cannot write to %s"%file_name) + raise + self.file_name = file_name + self.logger = logger + self._file = None + ensure_dir(file_name) + + def set_precision(self, precision): + pass + + @property + def file(self): + if self._file is None: + raise RuntimeError('Illegal operation when not in a "with" context') + return self._file + +
[docs] def write(self, col_names, columns, *, params=None, ext=None): + """Write some columns to an output ASCII file with the given column names. + + If name is not None, then it is used as the name of the extension for these data. + + Parameters: + col_names: A list of columns names for the given columns. These will be written + in a header comment line at the top of the output file. + columns: A list of numpy arrays with the data to write. + params: A dict of extra parameters to write at the top of the output file. + ext: Optional ext name for these data. (default: None) + """ + data = np.empty(len(columns[0]), dtype=[ (c,'f8') for c in col_names ]) + for (c, col) in zip(col_names, columns): + data[c] = col + self.file.write(data, header=params, extname=ext)
+ + def __enter__(self): + import fitsio + self._file = fitsio.FITS(self.file_name, 'rw', clobber=True) + return self + + def __exit__(self, exc_type, exc_value, exc_traceback): + self.file.close() + self._file = None
+ + +
[docs]class HdfWriter(object): + """Writer interface for HDF5 files. + Uses h5py to read columns, etc. + """ + def __init__(self, file_name, *, logger=None): + """ + Parameters: + file_name: The file name + logger: If desired, a logger object for logging. (default: None) + """ + try: + import h5py # noqa: F401 + except ImportError: + if logger: + logger.error("Unable to import h5py. Cannot write to %s"%file_name) + raise + self.file_name = file_name + self.logger = logger + self._file = None + ensure_dir(file_name) + + def set_precision(self, precision): + pass + + @property + def file(self): + if self._file is None: + raise RuntimeError('Illegal operation when not in a "with" context') + return self._file + +
[docs] def write(self, col_names, columns, *, params=None, ext=None): + """Write some columns to an output ASCII file with the given column names. + + If name is not None, then it is used as the name of the extension for these data. + + Parameters: + col_names: A list of columns names for the given columns. These will be written + in a header comment line at the top of the output file. + columns: A list of numpy arrays with the data to write. + params: A dict of extra parameters to write at the top of the output file. + ext: Optional group name for these data. (default: None) + """ + if ext is not None: + hdf = self.file.create_group(ext) + else: + hdf = self.file + if params is not None: + hdf.attrs.update(params) + for (name, col) in zip(col_names, columns): + hdf.create_dataset(name, data=col)
+ + def __enter__(self): + import h5py + self._file = h5py.File(self.file_name, 'w') + return self + + def __exit__(self, exc_type, exc_value, exc_traceback): + # closes file at end of "with" statement + self._file.close() + self._file = None
+
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/_sources/binning.rst.txt b/docs/_build/html/_sources/binning.rst.txt new file mode 100644 index 00000000..0bee2f77 --- /dev/null +++ b/docs/_build/html/_sources/binning.rst.txt @@ -0,0 +1,242 @@ +Binning +======= + +To be useful, the measured correlations need to be binned in some way to +find the average correlation among many pairs of nearly the same separation. +The different ways to bin the results may be specified using the ``bin_type`` +parameter in `BinnedCorr2`. + +"Log" +----- + +The default way to bin the results in TreeCorr is uniformly in log(r), +where r is defined according to the specified metric +(cf. `Metrics`). This corresponds to ``bin_type`` = "Log", although +one normally omits this, as it is the default. + +For most correlation functions, which tend to be approximately power laws, this +binning is the most appropriate, since it naturally handles a large dynamic range +in the separation. + +The exact binning is specified using any 3 of the following 4 parameters: + + - ``nbins`` How many bins to use. + - ``bin_size`` The width of the bins in log(r). + - ``min_sep`` The minimum separation r to include. + - ``max_sep`` The maximum separation r to include. + +For a pair with a metric distance r, the index of the corresponding bin in the +output array is ``int(log(r) - log(min_sep))/bin_size)``. + +.. note:: + + If ``nbins`` is the omitted value, then ``bin_size`` might need to be decreased + slightly to accommodate an integer number of bins with the given ``min_sep`` and ``max_sep``. + +"Linear" +-------- + +For use cases where the scales of interest span only a relatively small range of distances, +it may be more convenient to use linear binning rather than logarithmic. A notable +example of this is BAO investigations, where the interesting region is near the BAO peak. +In these cases, using ``bin_type`` = "Linear" may be preferred. + +As with "Log", the binning may be specified using any 3 of the following 4 parameters: + + - ``nbins`` How many bins to use. + - ``bin_size`` The width of the bins in r. + - ``min_sep`` The minimum separation r to include. + - ``max_sep`` The maximum separation r to include. + +For a pair with a metric distance r, the index of the corresponding bin in the +output array is ``int((r - min_sep)/bin_size)``. + +.. note:: + + If ``nbins`` is the omitted value, then ``bin_size`` might need to be decreased + slightly to accommodate an integer number of bins with the given ``min_sep`` and ``max_sep``. + +"TwoD" +------ + +To bin the correlation in two dimensions, (x,y), you can use ``bin_type`` = "TwoD". +This will keep track of not only the distance between two points, but also the +direction. The results are then binned linearly in both the delta x and delta y values. + +The exact binning is specified using any 2 of the following 3 parameters: + + - ``nbins`` How many bins to use in each direction. + - ``bin_size`` The width of the bins in dx and dy. + - ``max_sep`` The maximum absolute value of dx or dy to include. + +For a pair with a directed separation (dx,dy), the indices of the corresponding bin in the +2-d output array are ``int((dx + max_sep)/bin_size)``, ``int((dy + max_sep)/bin_size)``. + +The binning is symmetric around (0,0), so the minimum separation in either direction is +``-max_sep``, and the maximum is ``+max_sep``. +If is also permissible to specify ``min_sep`` to exclude small separations from being +accumulated, but the binning will still include a bin that crosses over (dx,dy) = (0,0) +if ``nbins`` is odd, or four bins that touch (0,0) if ``nbins`` is even. + +Note that this metric is only valid when the input positions are given as x,y (not ra, dec), +and the metric is "Euclidean". If you have a use case for other combinations, please +open an issue with your specific case, and we can try to figure out how it should be implemented. + +Output quantities +----------------- + +For all of the different binning options, the Correlation object will have the following attributes +related to the locations of the bins: + + - ``rnom`` The separation at the nominal centers of the bins. For "Linear" binning, + these will be spaced uniformly. + - ``logr`` The log of the separation at the nominal centers of the bins. For "Log" + binning, these will be spaced uniformly. This is always the (natural) + log of ``rnom``. + - ``left_edges`` The separation at the left edges of the bins. For "Linear" binning, these + are half-way between the ``rnom`` values of successive bins. For "Log" binning, these are + the geometric mean of successive ``rnom`` values, rather than the arithmetic mean. + For "TwoD" binning, these are like "Linear" but for the x separations only. + - ``right_edges`` Analogously, the separation at the right edges of the bins. + - ``meanr`` The mean separation of all the pairs of points that actually ended up + falling in each bin. + - ``meanlogr`` The mean log(separation) of all the pairs of points that actually ended up + falling in each bin. + +The last two quantities are only available after finishing a calculation (e.g. with ``process``). + +In addition to the above, "TwoD" binning also includes the following: + + - ``bottom_edges`` The y separation at the bottom edges of the 2-D bins. Like + ``left_edges``, but for the y values rather than the x values. + - ``top_edges`` The y separation at the top edges of the 2-D bins. Like + ``right_edges``, but for the y values rather than the x values. + +There is some subtlety about which separation to use when comparing measured correlation functions +to theoretical predictions. See Appendix D of +`Singh et al, 2020 `_, +who show that one can find percent level differences among the different options. +(See their Figure D2 in particular.) +The difference is smaller as the bin size decreases, although they point out that it is not always +feasible to make the bin size very small, e.g. because of issues calculating the covariance matrix. + +In most cases, if the true signal is expected to be locally well approximated by a power law, then +using ``meanlogr`` is probably the most appropriate choice. This most closely approximates the +signal-based weighting that they recommend, but if you are concerned about the percent level +effects of this choice, you would be well-advised to investigate the different options with +simulations to see exactly what impact the choice has on your science. + + +Other options for binning +------------------------- + +There are a few other options that affect the binning, which can be set when constructing +any of the `BinnedCorr2` or `BinnedCorr3` classes. + +sep_units +^^^^^^^^^ + +The optional parameter ``sep_units`` lets you specify what units you want for +the binned separations if the separations are angles. + +Valid options are "arcsec", "arcmin", "degrees", "hours", or "radians". The default if +not specified is "radians". + +Note that this is only valid when the distance metric is an angle. +E.g. if RA and Dec values are given for the positions, +and no distance values are specified, then the default metric, "Euclidean", +is the angular separation on the sky. "Arc" similarly is always an angle. + +If the distance metric is a physical distance, then this parameter is invalid, +and the output separation will match the physical distance units in the input catalog. +E.g. if the distance from Earth is given as r, then the output units will match the +units of the r values. Or if positions are given as x, y (and maybe z), then the +units will be whatever the units are for these values. + +bin_slop +^^^^^^^^ + +One of the main reasons that TreeCorr is able to compute correlation functions +so quickly is that it allows the bin edges to be a little bit fuzzy. A pairs whose +separation is very close to a dividing line between two bins might be placed +in the next bin over from where an exact calculation would put it. + +This is normally completely fine for any real-world application. +Indeed, by deciding to bin your correlation function with some non-zero bin size, you have +implicitly defined a resolution below which you don't care about the exact separation +values. + +The approximation TreeCorr makes is to allow some *additional* imprecision that is a +fraction of this level. Namely ``bin_slop``. Specifically, ``bin_slop`` specifies the +maximum possible error any pair can have, given as a fraction of the bin size. + +You can think of it as turning all of your rectangular bins into overlapping trapezoids, +where ``bin_slop`` defines the ratio of the angled portion to the flat mean width. +Larger ``bin_slop`` allows for more overlap (and is thus faster), while smaller ``bin_slop`` +gets closer to putting each pair perfectly into the bin it belongs in. + +The default ``bin_slop`` for the "Log" bin type is such that ``bin_slop * bin_size`` +is 0.1. Or if ``bin_size < 0.1``, then we use ``bin_slop`` = 1. This has been +found to give fairly good accuracy across a variety of applications. However, +for high precision measurements, it may be appropriate to use a smaller value than +this. Especially if your bins are fairly large. + +A typical test to perform on your data is to cut ``bin_slop`` in half and see if your +results change significantly. If not, you are probably fine, but if they change by an +appreciable amount (according to whatever you think that means for your science), +then your original ``bin_slop`` was too large. + +To understand the impact of the ``bin_slop`` parameter, it helps to start by thinking +about when it is set to 0. +If ``bin_slop`` = 0, then TreeCorr does essentially a brute-force calculation, +where each pair of points is always placed into the correct bin. + +But if ``bin_slop`` > 0, then any given pair is allowed to be placed in the wrong bin +so long as the true separation is within this fraction of a bin from the edge. +For example, if a bin nominally goes from 10 to 20 arcmin, then with bin_slop = 0.05, +TreeCorr will accumulate pairs with separations ranging from 9.5 to 20.5 arcmin into this +bin. (I.e. the slop is 0.05 of the bin width on each side.) +Note that some of the pairs with separations from 9.5 to 10.5 would possibly fall into the +lower bin instead. Likewise some from 19.5 to 20.5 would fall in the higher bin. +So both edges are a little fuzzy. + +For large number of objects, the shifts up and down tend to cancel out, so there is typically +very little bias in the results. Statistically, about as many pairs scatter up as scatter +down, so the resulting counts come out pretty close to correct. Furthermore, the total +number of pairs within the specified range is always correct, since each pair is placed +in some bin. + +brute +^^^^^ + +Sometimes, it can be useful to force the code to do the full brute force calculation, +skipping all of the approximations that are inherent to the tree traversal algorithm. +This of course is much slower, but this option can be useful for testing purposes especially. +For instance, comparisons to brute force results have been invaluable in TreeCorr +development of the faster algorithms. Some science cases also use comparison to brute +force results to confirm that they are not significantly impacted by using non-zero +``bin_slop``. + +Setting ``brute`` = True is roughly equivalent to setting ``bin_slop`` = 0. However, +there is a distinction between these two cases. +Internally, the former will *always* traverse the tree all the way to the leaves. So +every pair will be calculated individually. This really is the brute force calculation. + +However, ``bin_slop`` = 0 will allow for the traversal to stop early if all possible pairs in a +given pair of cells fall into the same bin. This can be quite a large speedup in some cases. +And especially for NN correlations, there is no disadvantage to doing so. + +For shear correlations, there can be a slight difference between using ``bin_slop`` = 0 and +``brute`` = True because the shear projections won't be precisely equal in the two cases. +Shear correlations require parallel transporting the shear values to the centers of +the cells, and then when accumulating pairs, the shears are projected onto the line joining +the two points. Both of these lead to slight differences in the results of a ``bin_slop`` = 0 +calculation compared to the true brute force calculation. +If the difference is seen to matter for you, this is probably a sign that you should decrease +your bin size. + +Additionally, there is one other way to use the ``brute`` parameter. If you set +``brute`` to 1 or 2, rather than True or False, then the forced traversal to the +leaf cells will only apply to ``cat1`` or ``cat2`` respectively. The cells for the other +catalog will use the normal criterion based on the ``bin_slop`` parameter to decide whether +it is acceptable to use a non-leaf cell or to continue traversing the tree. diff --git a/docs/_build/html/_sources/catalog.rst.txt b/docs/_build/html/_sources/catalog.rst.txt new file mode 100644 index 00000000..ddddf028 --- /dev/null +++ b/docs/_build/html/_sources/catalog.rst.txt @@ -0,0 +1,35 @@ +Input Data +========== + +The Catalog class +----------------- + +.. autoclass:: treecorr.Catalog + :members: + +Other utilities related to catalogs +----------------------------------- + +.. autofunction:: + treecorr.read_catalogs +.. autofunction:: + treecorr.calculateVarG +.. autofunction:: + treecorr.calculateVarK +.. automodule:: treecorr.catalog + :members: + :exclude-members: Catalog, read_catalogs, calculateVarG, calculateVarK + +File Readers +------------ + +.. autoclass:: treecorr.reader.FitsReader + :members: +.. autoclass:: treecorr.reader.HdfReader + :members: +.. autoclass:: treecorr.reader.AsciiReader + :members: +.. autoclass:: treecorr.reader.PandasReader + :members: +.. autoclass:: treecorr.reader.ParquetReader + :members: diff --git a/docs/_build/html/_sources/changes.rst.txt b/docs/_build/html/_sources/changes.rst.txt new file mode 100644 index 00000000..33390384 --- /dev/null +++ b/docs/_build/html/_sources/changes.rst.txt @@ -0,0 +1,3 @@ + +.. include:: ../CHANGELOG.rst + diff --git a/docs/_build/html/_sources/correlation2.rst.txt b/docs/_build/html/_sources/correlation2.rst.txt new file mode 100644 index 00000000..5240e4fa --- /dev/null +++ b/docs/_build/html/_sources/correlation2.rst.txt @@ -0,0 +1,27 @@ + +Two-point Correlation Functions +=============================== + +There are 6 differenct classes for calculating the different possible two-point correlation +functions: + +.. toctree:: + + nn + gg + ng + kk + nk + kg + +Each of the above classes is a sub-class of the base class BinnedCorr2, so they have a number of +features in common about how they are constructed. The common features are documented here. + +.. autoclass:: treecorr.BinnedCorr2 + :members: + + +.. autofunction:: treecorr.estimate_multi_cov + +.. autofunction:: treecorr.build_multi_cov_design_matrix + diff --git a/docs/_build/html/_sources/correlation3.rst.txt b/docs/_build/html/_sources/correlation3.rst.txt new file mode 100644 index 00000000..804d573f --- /dev/null +++ b/docs/_build/html/_sources/correlation3.rst.txt @@ -0,0 +1,32 @@ + +Three-point Correlation Functions +================================= + +There are currently 3 differenct classes for calculating the different possible three-point +auto-correlation functions: + +.. toctree:: + + nnn + ggg + kkk + +.. note:: + + There are classes that can handle cross-correlations of the same type: + + * `treecorr.NNNCrossCorrelation` + * `treecorr.GGGCrossCorrelation` + * `treecorr.KKKCrossCorrelation` + + However, we do not yet have the ability to compute 3-point cross-correlations across + different types (such as NNG or KGG, etc.) + +Each of the above classes is a sub-class of the base class BinnedCorr3, so they have a number of +features in common about how they are constructed. The common features are documented here. + +.. autoclass:: treecorr.BinnedCorr3 + :members: + + + diff --git a/docs/_build/html/_sources/cov.rst.txt b/docs/_build/html/_sources/cov.rst.txt new file mode 100644 index 00000000..d1df5108 --- /dev/null +++ b/docs/_build/html/_sources/cov.rst.txt @@ -0,0 +1,249 @@ +Covariance Estimates +==================== + +In addition to calculating the correlation function, TreeCorr can also +estimate the variance of the resulting array of values, or even the +covariance matrix. + +This simplest estimate of the variance involves propagating the shot noise +of the individual measurements into the final results. For shear (G) mesurements, +this includes the so-called "shape noise". For scalar (K) measurements, this +includes the point variance of the k values. For count (N) measurements, +it comes from the Poisson statistics of counting. This variance estimate is the +default if you don't specify something different, and it will be recorded as +``varxi`` for most types of correlations. For GG, there are two quantities, +``varxip`` and ``varxim``, which give the variance of ``xip`` and ``xim`` +respectively. + +However, this kind of variance estimate does not capture the sample variance. +This is the fact that the signal has real variation across the field, which +tends to dominate the total variance at large scales. To estimate this +component of the total variance from the data, one typically needs to split +the field into patches and use the variation in the measurement among the +patches to estimate the overall sample variance. + +See `Patches` for information on defining the patches to use for your input `Catalog`. + +Variance Methods +---------------- + +To get one of the patch-based variance estimates for the ``varxi`` or similar +attribute, you can set the ``var_method`` parameter in the constructor. e.g.:: + + >>> ng = treecorr.NGCorrelation(nbins=10, min_sep=1, max_sep=100, var_method='jackknife') + +This tells TreeCorr to use the jackknife algorithm for computing the covariance matrix. +Then ``varxi`` is taken as the diagonal of this covariance matrix. +The full covariance matrix is also recorded at the ``cov`` attribute. + +The following variance methods are implemented: + +"shot" +^^^^^^ + +This is the default shot-noise estimate of the covariance. It includes the Poisson +counts of points for N statistics, shape noise for G statistics, and the observed +scatter in the values for K statistics. In this case, the covariance matrix will +be diagonal, since there is no way to estimate the off-diagonal terms. + +"jackknife" +^^^^^^^^^^^ + +This is the classic jackknife estimate of the covariance matrix. It computes the +correlation function that would have been measured if one patch at a time is excluded +from the sample. Then the covariance matrix is estimated as + +.. math:: + + C = \frac{N_\mathrm{patch} - 1}{N_\mathrm{patch}} \sum_i (\xi_i - \bar\xi)^T (\xi_i-\bar\xi) + +"sample" +^^^^^^^^ + +This is the simplest patch-based covariance estimate estimate. It computes the +correlation function for each patch, where at least one of the two points falls in +that patch. Then the estimated covariance matrix is simply the sample covariance +of these vectors, scaled by the relative total weight in each patch. + +.. math:: + + C = \frac{1}{N_\mathrm{patch} - 1} \sum_i w_i (\xi_i - \bar\xi)^T (\xi_i-\bar\xi) + +For :math:`w_i`, we use the total weight in the correlation measurement for each patch +divided by the total weight in all patches. This is roughly equal to +:math:`1/N_\mathrm{patch}` but captures somewhat any patch-to-patch variation in area +that might be present. + +"bootstrap" +^^^^^^^^^^^ + +This estimate implements a bootstrap resampling of the patches as follows: + +1. Select :math:`N_\mathrm{patch}` patch numbers at random from the full list + :math:`[0 \dots N_\mathrm{patch}{-}1]` with replacement, so some patch numbers + will appear more than once, and some will be missing. + +2. Calculate the total correlation function that would have been computed + from these patches rather than the original patches. + +3. The auto-correlations are included at the selected repetition for the bootstrap + samples. So if a patch number is repeated, its auto-correlation is included that + many times. + +4. Cross-correlations between patches are included only if the two patches + aren't actually the same patch (i.e. it's not actually an auto-correlation). + This prevents extra auto-correlations (where most of the signal typically occurs) + from being included in the sum. + +5. Repeat steps 1-4 a total of :math:`N_\mathrm{bootstrap}` times to build up a large + set of resampled correlation functions, :math:`\{\xi_i\}`. + +6. Then the covariance estimate is the sample variance of these resampled results: + + .. math:: + + C = \frac{1}{N_\mathrm{bootstrap}-1} \sum_i (\xi_i - \bar\xi)^T (\xi_i-\bar\xi) + +The default number of bootstrap resamplings is 500, but you can change this in the +Correlation constructor using the parameter ``num_bootstrap``. + +"marked_bootstrap" +^^^^^^^^^^^^^^^^^^ + +This estimate is based on a "marked-point" bootstrap resampling of the patches. +Specifically, we follow the method described in +*A valid and Fast Spatial Bootstrap for Correlation Functions* +by Ji Meng Loh, 2008. cf. https://ui.adsabs.harvard.edu/abs/2008ApJ...681..726L/. + +This method starts out the same as the "sample" method. It computes the correlation +function for each patch where at least one of the two points falls in that patch. +However, it keeps track of the numerator and denominator separately. +These are the "marks" in Loh, 2008. + +Then these marks are resampled in the normal bootstrap manner (random with replacement) +to produce mock results. The correlation function for each bootstrap resampling is +the sum of the numerator marks divided by the sum of the denominator marks. + +Then the covariance estimate is the sample variance of these resampled results: + +.. math:: + + C = \frac{1}{N_\mathrm{bootstrap}-1} \sum_i (\xi_i - \bar\xi)^T (\xi_i-\bar\xi) + +The default number of bootstrap resamplings is 500, but you can change this in the +Correlation constructor using the parameter ``num_bootstrap``. + +Covariance Matrix +----------------- + +As mentioned above, the covariance matrix corresponding to the specified ``var_method`` +will be saved as the ``cov`` attribute of the correlation instance after processing +is complete. + +However, if the processing was done using patches, then you can also compute the +covariance matrix for any of the above methods without redoing the processing +using `BinnedCorr2.estimate_cov` or `BinnedCorr3.estimate_cov`. E.g.:: + + >>> ng = treecorr.NGCorrelation(nbins=10, min_sep=1, max_sep=100) + >>> ng.process(lens_cat, source_cat) # At least one of these needs to have patches set. + >>> cov_jk = ng.estimate_cov('jackknife') + >>> cov_boot = ng.estimate_cov('bootstrap') + +Additionally, you can compute the joint covariance matrix for a number of statistics +that were processed using the same patches with `treecorr.estimate_multi_cov`. E.g.:: + + >>> ng = treecorr.NGCorrelation(nbins=10, min_sep=1, max_sep=100) + >>> ng.process(lens_cat, source_cat) + >>> gg = treecorr.GGCorrelation(nbins=10, min_sep=1, max_sep=100) + >>> gg.process(source_cat) + >>> cov = treecorr.estimate_multi_cov([ng,gg], 'jackknife') + +This will calculate an estimate of the covariance matrix for the full data vector +with ``ng.xi`` followed by ``gg.xip`` and then ``gg.xim``. + +Covariance of Derived Quantities +-------------------------------- + +Sometimes your data vector of interest might not be just the raw correlation function, +or even a list of several correlation functions. Rather, it might be some derived +quantity. E.g. + +* The ratio or difference of two correlation functions such as ``nk1.xi / nk2.xi``. +* The aperture mass variance computed by `GGCorrelation.calculateMapSq`. +* One of the other ancillary products such as ``ng.xi_im``. +* A reordering of the data vector, such as putting several ``gg.xip`` first for multiple + tomographic bins and then the ``gg.xim`` for each after that. + +These are just examples of what kind of thing you might want. In fact, we enable +any kind of post-processing you want to do on either a single correlation object +(using `BinnedCorr2.estimate_cov` or `BinnedCorr3.estimate_cov`) or a list of +correlation objects (using `treecorr.estimate_multi_cov`). + +These functions take an optional ``func`` parameter, which can be any user-defined +function that calculates the desired data vector from the given correlation(s). +For instance, in the first case, where the desired data vector is the ratio of +two NK correlations, you could find the corresponding covariance matrix as follows:: + + >>> func = lambda corrs: corrs[0].xi / corrs[1].xi + >>> nk1 = treecorr.NKCorrelation(nbins=10, min_sep=1, max_sep=100) + >>> nk2 = treecorr.NKCorrelation(nbins=10, min_sep=1, max_sep=100) + >>> nk1.process(cat1a, cat1b) # Ideally, all of these use the same patches. + >>> nk2.process(cat2a, cat2b) + >>> corrs = [nk1, nk2] + >>> ratio = func(corrs) # = nk1.xi / nk2.xi + >>> cov = treecorr.estimate_multi_cov(corrs, 'jackknife', func) + +The resulting covariance matrix, ``cov``, will be the jackknife estimate for the derived +data vector, ``ratio``. + +Random Catalogs +--------------- + +There are a few adjustements to the above prescription when using random +catalogs, which of course are required when doing an NN correlation. + +1. It is not necessarily required to use patches for the random catalog. + The random is supposed to be dense enough that it doesn't materially contribute + to the noise in the correlation measurement. In particular, it doesn't have + any sample variance itself, and the shot noise component should be small + compared to the shot noise in the data. +2. If you do use patches for the random catalog, then you need to make sure + that you use the same patch definitions for both the data and the randoms. + Using patches for the randoms probably leads to slightly better covariance + estimates in most cases, but the difference in the two results is usually small. + (Note: This seems to be less true for 3pt NNN correlations than 2pt NN. + Using patches for the randoms gives significantly better covariance estimates + in that case than not doing so.) +3. The covariance calculation cannot happen until you call + `calculateXi ` + to let TreeCorr know what the RR and DR (if using that) results are. +4. After calling `dd.calculateXi `, ``dd`` + will have ``varxi`` and ``cov`` attributes calculated according + to whatever ``var_method`` you specified. +5. It also allows you to call `dd.estimate_cov ` + with any different method you want. + And you can include ``dd`` in a list of correlation + objects passed to `treecorr.estimate_multi_cov`. + +Here is a worked example:: + + >>> data = treecorr.Catalog(config, npatch=N) + >>> rand = treecorr.Catalog(rand_config, patch_centers=data.patch_centers) + >>> dd = treecorr.NNCorrelation(nn_config, var_method='jackknife') + >>> dr = treecorr.NNCorrelation(nn_config) + >>> rr = treecorr.NNCorrelation(nn_config) + >>> dd.process(data) + >>> dr.process(data, rand) + >>> rr.process(rand) + >>> dd.calculateXi(rr=rr, dr=dr) + >>> dd_cov = dd.cov # Can access covariance now. + >>> dd_cov_bs = dd.estimate_cov(method='bootstrap') # Or calculate a different one. + >>> txcov = treecorr.estimate_multi_cov([ng,gg,dd], 'bootstrap') # Or include in multi_cov + +As mentioned above, using ``patch_centers`` is optional for ``rand``, but probably recommended. +In the last line, it would be required that ``ng`` and ``gg`` were also made using catalogs +with the same patch centers that ``dd`` used. + +The use pattern for `NNNCorrelation` is analogous, where `NNNCorrelation.calculateZeta` +needs to be run to get the covariance estimate, after which it may be used in a list +past to `treecorr.estimate_multi_cov`. diff --git a/docs/_build/html/_sources/field.rst.txt b/docs/_build/html/_sources/field.rst.txt new file mode 100644 index 00000000..13a8532e --- /dev/null +++ b/docs/_build/html/_sources/field.rst.txt @@ -0,0 +1,53 @@ +Fields +====== + +The `Field` class and its subclasses repackage the information from a `Catalog` +into a ball tree data structure, allowing for fast calcaulation of the correlation +functions. + +There are several kinds of `Field` classes. + + - `Field` itself is an abstract base class of the other kinds of fields, and has a + few methods that are available for all `Field` types. + + - `NField` holds counts of objects and is used for correlations with an N in the name, + including `NNCorrelation`, `NGCorrelation`, `NKCorrelation`, and `NNNCorrelation`. + - `GField` holds both counts of objects and the mean shear of those objects. + It is used for correlations with a G in the name, including + `GGCorrelation`, `NGCorrelation`, `KGCorrelation`, and `GGGCorrelation`. + - `KField` holds both counts of objects and the mean "kappa" of those objects. + It is used for correlations with a K in the name, including + `KKCorrelation`, `NKCorrelation`, `KGCorrelation`, and `KKKCorrelation`. + - `SimpleField` is a different base class, which packages the information in a list + rather than a tree. Its subclasses, `NSimpleField`, `GSimpleField`, and + `KSimpleField`, are used instead of the regular `Field` types when doing + ``pairwise`` correlations. + +Typically, one would not create any of these objects directly, but would instead +use Catalog methods `getNField`, `getGField`, `getKField`. Or indeed, usually, one +does not even do that, and just lets the relevant ``process`` command do so for you. + +.. autoclass:: treecorr.Field + :members: + +.. autoclass:: treecorr.NField + :members: + +.. autoclass:: treecorr.GField + :members: + +.. autoclass:: treecorr.KField + :members: + +.. autoclass:: treecorr.SimpleField + :members: + +.. autoclass:: treecorr.NSimpleField + :members: + +.. autoclass:: treecorr.GSimpleField + :members: + +.. autoclass:: treecorr.KSimpleField + :members: + diff --git a/docs/_build/html/_sources/gg.rst.txt b/docs/_build/html/_sources/gg.rst.txt new file mode 100644 index 00000000..ecdfe92e --- /dev/null +++ b/docs/_build/html/_sources/gg.rst.txt @@ -0,0 +1,10 @@ + +GGCorrelation: Shear-shear correlations +--------------------------------------- + +.. autoclass:: treecorr.GGCorrelation + :members: + :special-members: + :show-inheritance: + + diff --git a/docs/_build/html/_sources/ggg.rst.txt b/docs/_build/html/_sources/ggg.rst.txt new file mode 100644 index 00000000..0f665b30 --- /dev/null +++ b/docs/_build/html/_sources/ggg.rst.txt @@ -0,0 +1,14 @@ + +GGGCorrelation: Shear-shear-shear correlations +---------------------------------------------- + +.. autoclass:: treecorr.GGGCorrelation + :members: + :special-members: + :show-inheritance: + +.. autoclass:: treecorr.GGGCrossCorrelation + :members: + :special-members: + :show-inheritance: + diff --git a/docs/_build/html/_sources/guide.rst.txt b/docs/_build/html/_sources/guide.rst.txt new file mode 100644 index 00000000..505a5f0c --- /dev/null +++ b/docs/_build/html/_sources/guide.rst.txt @@ -0,0 +1,273 @@ +Getting Started Guide +--------------------- + +Jupyter Tutorial +^^^^^^^^^^^^^^^^ + +The below page covers many of the same points as the +`Jupyter notebook tutorial `_ +available in the TreeCorr repo. +You may find it useful to work through that as well as, or instead of, reading this guide. + +Shear-shear auto-correlation +^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Let's start with how to calculate a shear-shear two-point auto-correlation. +It's not necessarily the simplest choice of correlation, but this specific +calculation was the original reason I wrote TreeCorr, so it's close to my heart. +The basic pattern is as follows:: + + cat = treecorr.Catalog(file_name, config) + gg = treecorr.GGCorrelation(config) + gg.process(cat) + gg.write(out_file_name) + +Here ``file_name`` is the name of some input file, which has the shear and position +data of your galaxies. ``config`` is a dictionary with all the configuration +parameters about how to load the data and define the binning. We'll expand that +out shortly. Finally, ``out_file_name`` is some output file to write the results. + +You can do a cross-correlation between two sets of galaxies very similarly:: + + cat1 = treecorr.Catalog(file_name1, config1) + cat2 = treecorr.Catalog(file_name2, config2) + gg.process(cat1, cat2) + +If you would rather not write the results to an output file, but maybe plot them up or do some +further calculation with them, you can access the resulting fields directly as numpy arrays:: + + xip = gg.xip # The real part of xi+ + xim = gg.xim # The real part of xi- + logr = gg.logr # The nominal center of each bin + meanlogr = gg.meanlogr # The mean within the bins + varxi = gg.varxi # The variance of each xi+ or xi- value + # taking into account shape noise only + +See the doc string for `GGCorrelation` for other available attributes. + +Other Two-point Correlation Classes +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +The other kinds of correlations each have their own class: + + - `NNCorrelation` = count-count (normal LSS correlation) + - `GGCorrelation` = shear-shear (e.g. cosmic shear) + - `KKCorrelation` = kappa-kappa (or any other scalar field) + - `NGCorrelation` = count-shear (i.e. (R)) + - `NKCorrelation` = count-kappa (i.e. (R)) + - `KGCorrelation` = kappa-shear + +You should see their doc strings for details, but they all work similarly. +For the last three, there is no auto-correlation option, of course, just the cross-correlation. + +The other main difference between these other correlation classes from GG is that there is only a +single correlation function, so it is called ``xi`` rather than ``xip`` and ``xim``. + +Also, NN does not have any kind of ``xi`` attribute. You need to perform an additional +calculation involving random catalogs for that. +See `Using random catalogs` below for more details. + + +Loading a Catalog +^^^^^^^^^^^^^^^^^ + +OK, now let's get into some of the details about how to load data into a `Catalog`. + +To specify the names of the columns in the input file, as well as other details about +how to interpret the columns, you can either use a ``config`` dict, as we did above, +or specify keyword arguments. Either way is fine, although to be honest, the keywords +are probably more typical, so we'll use that from here on. + +For a shear catalog, you need to specify the position of each galaxy and the +shear values, g1 and g2. You do this by stating which column in the input catalog +corresponds to each value you need. For example:: + + cat = treecorr.Catalog(file_name='input_cat.fits', + x_col='X_IMAGE', y_col='Y_IMAGE', g1_col='E1', g2_col='E2') + +For FITS files, you specify the columns by name, which correspond to the column name +in the FITS table. For ASCII input files, you specify the column number instead:: + + cat = treecorr.Catalog(file_name='input_cat.dat', + x_col=2, y_col=3, g1_col=5, g2_col=6) + +where the first column in numbered 1, not 0. + +When the positions are given as right ascension and declination on the celestial +sphere, rather than x and y on a flat projection (like an image), you also need +to specify what units the angles use:: + + cat = treecorr.Catalog(file_name='input_cat.fits', + ra_col='RA', dec_col='DEC', g1_col='E1', g2_col='E2', + ra_units='hours', dec_units='degrees') + +For the catalog of the N part of a calculation, you can skip the ``g1_col`` and ``g2_col``. +Those only need positions. For a K correlation, you should specify ``k_col`` instead:: + + cat = treecorr.Catalog(file_name='input_cat.fits', + ra_col='RA', dec_col='DEC', k_col='KAPPA', + ra_units='hours', dec_units='degrees') + +See the documentation for `Catalog` for more options, such as how to flip the sign of +g1 or g2 (unfortunately not everyone follows the same conventions), use weights, +skip objects with specific flags, and more. + + +Building a Catalog from numpy arrays +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +If the provided tools for reading in the data from an input file are insufficient, or if +the data you want to use are being generated in Python natively, so there is no file +to read, then you can instead build the `Catalog` directly from numpy arrays:: + + x = numpy.array(x_values) # These might be the output of + y = numpy.array(y_values) # some calculation... + g1 = numpy.array(g1_values) + g2 = numpy.array(g2_values) + + cat = treecorr.Catalog(x=x, y=y, g1=g1, g2=g2) + +You always need to include either ``x`` and ``y`` or ``ra`` and ``dec``. +Which other columns you need depends on what kind of correlation function you want to calculate +from the data. For GG, you need ``g1`` and ``g2``, but for K correlations, you would use +``k`` instead. + +You can optionally provide a weight column as well with ``w`` if desired. +This will then perform a weighted correlation using those weights. + +Again, see the doc string for `Catalog` for more information. + + +Defining the binning +^^^^^^^^^^^^^^^^^^^^ + +For the default `bin_type `, ``"Log"``, the correlation function is binned +in equally spaced bins in :math:`\log(r)`. where :math:`r` represents the separation +between two points being correlated. + +Typically you would specify the minimum and +maximum separation you want accumulated as ``min_sep`` and ``max_sep`` respectively, +along with ``nbins`` to specify how many bins to use:: + + gg = treecorr.GGCorrelation(min_sep=1., max_sep=100., nbins=10) + +When the positions are given as (ra, dec), then the separations are also angles, +so you need to specify what units to use. These do not have to be the same units +as you used for either ra or dec:: + + gg = treecorr.GGCorrelation(min_sep=1., max_sep=100., nbins=10, sep_units='arcmin') + +Most correlation functions of interest in astronomy are roughly power laws, so log +binning puts similar signal-to-noise in each bin, making it often a good choice. +However, for some use cases, linear binning is more appropriate. This is possible +using the ``bin_type`` parameter:: + + gg = treecorr.GGCorrelation(min_sep=10., max_sep=15., nbins=5, bin_type='Linear') + +See `Binning` for more details about this option and the ``"TwoD"`` binning, +as well as some other options related to binning. + +Finally, the default way of calculating separations is a normal Euclidean metric. +However, TreeCorr implements a number of other metrics as well, which are useful +in various situations. See `Metrics` for details. + +Three-point Correlation Classes +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +TreeCorr can also do three-point correlations, to measure how the product of three fields +depends on the size and shape of the triangle connecting three points. +So far, we have only implemented the auto-correlation three-point functions: + + - `NNNCorrelation` # count-count-count + - `GGGCorrelation` # shear-shear-shear + - `KKKCorrelation` # kappa-kappa-kappa + +These classes are significantly more complicated than the two-point ones, +since they have to deal with the geometry of the triangles being binned. +See their doc strings for more details. + + +Using random catalogs +^^^^^^^^^^^^^^^^^^^^^ + +For the NN and NNN correlations, the raw calculation is not sufficient to produce the real +correlation function. You also need to account for the survey geometry (edges, mask, etc.) +by running the same calculation with a random catalog (or several) that have a uniform density, +but the same geometry:: + + data = treecorr.Catalog(data_file, config) + rand = treecorr.Catalog(rand_file, config) + dd = treecorr.NNCorrelation(config) + dr = treecorr.NNCorrelation(config) + rr = treecorr.NNCorrelation(config) + dd.process(data) + dr.process(data,rand) + rr.process(rand) + xi, varxi = dd.calculateXi(rr,dr) + + +This calculates xi = (DD-2DR+RR)/RR for each bin. This is the Landy-Szalay estimator, +which is the most widely used estimator for count-count correlation functions. However, +if you want to use a simpler estimator xi = (DD-RR)/RR, then you can omit the dr parameter. +The simpler estimator is slightly biased though, so this is not recommended. + +After calling `calculateXi `, the ``dd`` object above will have ``xi`` +and ``varxi`` attributes, which store the results of this calculation. + +The NG and NK classes also have a `calculateXi ` method to allow +for the use of compensated estimators in those cases as well. +Calling this function updates the ``xi`` attribute from the uncompensated value to the +compensated value. +These correlations do not suffer as much from masking effects, +so the compensation is not as necessary. However, it does produce a slightly better estimate +of the correlation function if you are able to use a random catalog. + +Furthermore, the `process ` functions can take lists of Catalogs if desired, +in which case it will +do all the possible combinations. This is especially relevant for doing randoms, +since the statistics get better if you generate several randoms and do all the correlations to beat down the noise:: + + rand_list = [ treecorr.Catalog(f,config) for f in rand_files ] + dr.process(data, rand_list) + rr.process(rand_list) + +The corresponding three-point NNN calculation is even more complicated, since there are 8 total +combinations that need to be computed: zeta = (DDD-DDR-DRD-RDD+DRR+RDR+RRD-RRR)/RRR. +Because of the triangle geometry, we don't have DRR = DRD = RDD, so all 8 need to be computed. +See the docstring for `calculateZeta` for more details. + +Manually accumulating the correlation function +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +For even more control over the calculation, you can break up the steps in the +`process ` functions. There are typically three steps: + +1. Calculate the shear variance or kappa variance as needed (i.e. for anything but NN correlations). +2. Accumulate the correlations into the bins for each auto-correlation and cross-correlation desired. +3. Finalize the calculation. + +If you have several pairs of catalogs that you want to accumulate into a single correlation +function, you could write the following:: + + lens_cats = [ treecorr.Catalog(f,config) for f in lens_files ] + source_cats = [ treecorr.Catalog(f,config) for f in source_files ] + ng = treecorr.NGCorrelation(config) + varg = treecorr.calculateVarG(source_cats) + for c1, c2 in zip(lens_cats, source_cats): + ng.process_cross(c1,c2) + ng.finalize(varg) + +In addition to `process_cross `, +classes that allow auto-correlations have a +`process_auto ` method for manually processing +auto-correlations. See the doc strings for these methods for more information. + +Breaking up the calculation manually like this is probably not often necessary anymore. +It used to be useful for dividing a calculation among several machines, which would +each save their results to disk. These results could then be reassembled and +finalized after all the results were finished. + +However, this work mode is now incorporated directly into TreeCorr via the use of +"patches". See `Patches` for details about how to automatically +divide up your input catalog into patches and to farm the calculation out to +multiple machines using MPI. diff --git a/docs/_build/html/_sources/history.rst.txt b/docs/_build/html/_sources/history.rst.txt new file mode 100644 index 00000000..3578d318 --- /dev/null +++ b/docs/_build/html/_sources/history.rst.txt @@ -0,0 +1,24 @@ + +Previous History +================ + +`Changes from version 4.1 to 4.2 +`_ + +`Changes from version 4.0 to 4.1 +`_ + +`Changes from version 3.3 to 4.0 +`_ + +`Changes from version 3.2 to 3.3 +`_ + +`Changes from version 3.1 to 3.2 +`_ + +`Changes from version 3.0 to 3.1 +`_ + +`Changes from version 2.6 to 3.0 +`_ diff --git a/docs/_build/html/_sources/index.rst.txt b/docs/_build/html/_sources/index.rst.txt new file mode 100644 index 00000000..db0cc87b --- /dev/null +++ b/docs/_build/html/_sources/index.rst.txt @@ -0,0 +1,26 @@ +.. treecorr documentation main file + +TreeCorr Documentation +====================== + +.. toctree:: + :maxdepth: 4 + + overview + catalog + correlation2 + correlation3 + metric + binning + patches + cov + field + scripts + guide + changes + history + +* :ref:`genindex` +* :ref:`modindex` +* :ref:`search` + diff --git a/docs/_build/html/_sources/kg.rst.txt b/docs/_build/html/_sources/kg.rst.txt new file mode 100644 index 00000000..c80fea75 --- /dev/null +++ b/docs/_build/html/_sources/kg.rst.txt @@ -0,0 +1,10 @@ + +KGCorrelation: Kappa-shear correlations +--------------------------------------- + +.. autoclass:: treecorr.KGCorrelation + :members: + :special-members: + :show-inheritance: + + diff --git a/docs/_build/html/_sources/kk.rst.txt b/docs/_build/html/_sources/kk.rst.txt new file mode 100644 index 00000000..ad0225ef --- /dev/null +++ b/docs/_build/html/_sources/kk.rst.txt @@ -0,0 +1,10 @@ + +KKCorrelation: Kappa-kappa correlations +--------------------------------------- + +.. autoclass:: treecorr.KKCorrelation + :members: + :special-members: + :show-inheritance: + + diff --git a/docs/_build/html/_sources/kkk.rst.txt b/docs/_build/html/_sources/kkk.rst.txt new file mode 100644 index 00000000..2c502fd3 --- /dev/null +++ b/docs/_build/html/_sources/kkk.rst.txt @@ -0,0 +1,14 @@ + +KKKCorrelation: Kappa-kappa-kappa correlations +---------------------------------------------- + +.. autoclass:: treecorr.KKKCorrelation + :members: + :special-members: + :show-inheritance: + +.. autoclass:: treecorr.KKKCrossCorrelation + :members: + :special-members: + :show-inheritance: + diff --git a/docs/_build/html/_sources/metric.rst.txt b/docs/_build/html/_sources/metric.rst.txt new file mode 100644 index 00000000..88b731cd --- /dev/null +++ b/docs/_build/html/_sources/metric.rst.txt @@ -0,0 +1,217 @@ + +Metrics +======= + +The correlation functions need to know how to calculate distances between the points, +that is, the metric defining the space. + +In most cases, you will probably want to use the default Metric, called "Euclidean", +which just uses the normal Euclidean distance between two points. However, there are a few +other options, which are useful for various applications. + +Both `BinnedCorr2` and `BinnedCorr3` take an optional +``metric`` parameter, which should be one of the following string values: + + +"Euclidean" +----------- + +This is the default metric, and is the only current option for 2-dimensional flat correlations, +i.e. when the coordinates are given by (x,y), rather than either (x,y,z), (ra,dec), or (ra,dec,r). + +For 2-dimensional coordinate systems, the distance is defined as + +:math:`d_{\rm Euclidean} = \sqrt{(x_2-x_1)^2 + (y_2-y_1)^2}` + +For 3-dimensional coordinate systems, the distance is defined as + +:math:`d_{\rm Euclidean} = \sqrt{(x_2-x_1)^2 + (y_2-y_1)^2 + (z_2-z_1)^2}` + +For spherical coordinates with distances, (ra,dec,r), the coordinates are first +converted to Cartesian coordinates and the above formula is used. + +For spherical coordinates without distances, (ra, dec), the coordinates are placed on the +unit sphere and the above formula is used. This means that all distances are really chord +distances across the sphere, not great circle distances. For small angles, this is a small +correction, but as the angles get large, the difference between the great circle distance and +the chord distance becomes significant. The conversion formula is + +:math:`d_{\rm GC} = 2 \arcsin(d_{\rm Euclidean} / 2)` + +TreeCorr applies this formula at the end as part of the `finalize ` +function, so the ``meanr`` and ``meanlogr`` attributes +will be in terms of great circle distances. However, they will not necessarily be spaced +precisely uniformly in log(r), since the original bin spacing will have been set up in terms +of the chord distances. + +"Arc" +----- + +This metric is only valid for spherical coordinates (ra,dec). + +The distance is defined as + +:math:`d_{\rm Arc} = 2 \arcsin(d_{\rm Euclidean} / 2)` + +where :math:`d_{\rm Euclidean}` is the above "Euclidean" chord distance. + +This metric is significantly slower than the "Euclidean" metric, since it requires trigonometric +functions for every pair calculation along the way, rather than just at the end. +In most cases, this extra care is unnecessary, but it provides a means to check if the +chord calculations are in any way problematic for your particular use case. + +Also, unlike the "Euclidean" version, the bin spacing will be uniform in log(r) using the +actual great circle distances, rather than being based on the chord distances. + + +.. _Rperp: + +"Rperp" or "FisherRperp" +------------------------ + +This metric is only valid for 3-dimensional coordinates (ra,dec,r) or (x,y,z). + +The distance in this metric is defined as + +:math:`d_{\rm Rperp} = \sqrt{d_{\rm Euclidean}^2 - r_\parallel^2}` + +where :math:`r_\parallel` follows the defintion in Fisher et al, 1994 (MNRAS, 267, 927). +Namely, if :math:`p_1` and :math:`p_2` are the vector positions from Earth for the +two points, and + +:math:`L \equiv \frac{p1 + p2}{2}` + +then + +:math:`r_\parallel = \frac{(p_2 - p_1) \cdot L}{|L|}` + +That is, it breaks up the full 3-d distance into perpendicular and parallel components: +:math:`d_{\rm 3d}^2 = r_\bot^2 + r_\parallel^2`, +and it identifies the metric separation as just the perpendicular component, :math:`r_\bot`. + +Note that this decomposition is really only valid for objects with a relatively small angular +separation, :math:`\theta`, on the sky, so the two radial vectors are nearly parallel. +In this limit, the formula for :math:`d` reduces to + +:math:`d_{\rm Rperp} \approx \left(\frac{2 r_1 r_2}{r_1+r_2}\right) \theta` + +.. warning:: + + Prior to version 4.0, the "Rperp" name meant what is now called "OldRperp". + The difference can be significant for some use cases, so if consistency across + versions is importatnt to you, you should either switch to using "OldRperp" + or investigate whether the change to "FisherRperp" is important for your + particular science case. + + +"OldRperp" +---------- + +This metric is only valid for 3-dimensional coordinates (ra,dec,r) or (x,y,z). + +This is the version of the Rperp metric that TreeCorr used in versions 3.x. +In version 4.0, we switched the definition of :math:`r_\parallel` to the one +used by Fisher et al, 1994 (MNRAS, 267, 927). The difference turns out to be +non-trivial in some realistic use cases, so we preserve the ability to use the +old version with this metric. + +Specifically, if :math:`r_1` and :math:`r_2` are the two distance from Earth, +then this metric uses :math:`r_\parallel \equiv r_2-r_1`. + +The distance is then defined as + +:math:`d_{\rm OldRperp} = \sqrt{d_{\rm Euclidean}^2 - r_\parallel^2}` + +That is, it breaks up the full 3-d distance into perpendicular and parallel components: +:math:`d_{\rm 3d}^2 = r_\bot^2 + r_\parallel^2`, +and it identifies the metric separation as just the perpendicular component, :math:`r_\bot`. + +Note that this decomposition is really only valid for objects with a relatively small angular +separation, :math:`\theta`, on the sky, so the two radial vectors are nearly parallel. +In this limit, the formula for :math:`d` reduces to + +:math:`d_{\rm OldRperp} \approx \left(\sqrt{r_1 r_2}\right) \theta` + + +"Rlens" +------- + +This metric is only valid when the first catalog uses 3-dimensional coordinates +(ra,dec,r) or (x,y,z). The second catalog may take either 3-d coordinates or spherical +coordinates (ra,dec). + +The distance is defined as + +:math:`d_{\rm Rlens} = r_1 \sin(\theta)` + +where :math:`\theta` is the opening angle between the two objects and :math:`r_1` is the +radial distance to the object in the first catalog. +In other words, this is the distance from the first object (nominally the "lens") to the +line of sight to the second object (nominally the "source"). This is commonly referred to +as the impact parameter of the light path from the source as it passes the lens. + +Since the basic metric does not use the radial distance to the source galaxies (:math:`r_2`), +they are not required. You may just provide (ra,dec) coordinates for the sources. +However, if you want to use the ``min_rpar`` or ``max_rpar`` options +(see `Restrictions on the Line of Sight Separation` below), +then the source coordinates need to include r. + +"Periodic" +---------- + +This metric is equivalent to the Euclidean metric for either 2-d or 3-d coordinate systems, +except that the space is given periodic boundaries, and the distance between two +points is taken to be the *smallest* distance in the periodically repeating space. +It is invalid for Spherical coordinates. + +When constructing the correlation object, you need to set ``period`` if the period is the +same in each direction. Or if you want different periods in each direction, you can +set ``xperiod``, ``yperiod``, and (if 3-d) ``zperiod`` individually. +We call these periods :math:`L_x`, :math:`L_y`, and :math:`L_z` below. + +The distance is defined as + +.. math:: + + dx &= \min \left(|x_2 - x_1|, L_x - |x_2-x_1| \right) \\ + dy &= \min \left(|y_2 - y_1|, L_y - |y_2-y_1| \right) \\ + dz &= \min \left(|z_2 - z_1|, L_z - |z_2-z_1| \right) + +.. math:: + d_{\rm Periodic} = \sqrt{dx^2 + dy^2 + dz^2} + +Of course, for 2-dimensional coordinate systems, :math:`dz = 0`. + +This metric is particularly relevant for data generated from N-body simuluations, which +often use periodic boundary conditions. + + +Restrictions on the Line of Sight Separation +-------------------------------------------- + +There are two additional parameters that are tightly connected to the metric space: +``min_rpar`` and ``max_rpar``. +These set the minimum and maximum values of :math:`r_\parallel` for pairs to be included in the +correlations. + +This is most typically relevant for the Rperp or Rlens metrics, but we now (as of version 4.2) +allow these parameters for any metric. + +The two different Rperp conventions (FisherRperp and OldRperp) have different definitions of +:math:`r_\parallel` as described above, which are used in the definition of the metric distances. +These are the same :math:`r_\parallel` definitions that are used for the min and max values +if ``min_rpar`` and/or ``max_rpar`` are given. +For all other metrics, we use the FisherRperp definition for :math:`r_\parallel` if needed +for this purpose. + +The sign of :math:`r_\parallel` is defined such that positive values mean +the object from the second catalog is farther away. Thus, if the first catalog represents +lenses and the second catalog represents lensed source galaxies, then setting +``min_rpar`` = 0 will restrict the sources to being in the background of each lens. +Contrariwise, setting ``max_rpar`` = 0 will restrict to pairs where the object in the first +catalog is behind the object in the second catalog. + +Another common use case is to restrict to pairs that are near each other in line of sight distance. +Setting ``min_rpar`` = -50, ``max_rpar`` = 50 will restrict the pairs to only those that are +separated by no more than 50 Mpc (say, assuming the catalog distances are given in Mpc) along +the radial direction. diff --git a/docs/_build/html/_sources/ng.rst.txt b/docs/_build/html/_sources/ng.rst.txt new file mode 100644 index 00000000..1523c4a7 --- /dev/null +++ b/docs/_build/html/_sources/ng.rst.txt @@ -0,0 +1,10 @@ + +NGCorrelation: Count-shear correlations +--------------------------------------- + +.. autoclass:: treecorr.NGCorrelation + :members: + :special-members: + :show-inheritance: + + diff --git a/docs/_build/html/_sources/nk.rst.txt b/docs/_build/html/_sources/nk.rst.txt new file mode 100644 index 00000000..7535a528 --- /dev/null +++ b/docs/_build/html/_sources/nk.rst.txt @@ -0,0 +1,10 @@ + +NKCorrelation: Count-kappa correlations +--------------------------------------- + +.. autoclass:: treecorr.NKCorrelation + :members: + :special-members: + :show-inheritance: + + diff --git a/docs/_build/html/_sources/nn.rst.txt b/docs/_build/html/_sources/nn.rst.txt new file mode 100644 index 00000000..84131c7d --- /dev/null +++ b/docs/_build/html/_sources/nn.rst.txt @@ -0,0 +1,10 @@ + +NNCorrelation: Count-count correlations +--------------------------------------- + +.. autoclass:: treecorr.NNCorrelation + :members: + :special-members: + :show-inheritance: + + diff --git a/docs/_build/html/_sources/nnn.rst.txt b/docs/_build/html/_sources/nnn.rst.txt new file mode 100644 index 00000000..63aa2b51 --- /dev/null +++ b/docs/_build/html/_sources/nnn.rst.txt @@ -0,0 +1,14 @@ + +NNNCorrelation: Count-count-count correlations +---------------------------------------------- + +.. autoclass:: treecorr.NNNCorrelation + :members: + :special-members: + :show-inheritance: + +.. autoclass:: treecorr.NNNCrossCorrelation + :members: + :special-members: + :show-inheritance: + diff --git a/docs/_build/html/_sources/overview.rst.txt b/docs/_build/html/_sources/overview.rst.txt new file mode 100644 index 00000000..049aa80b --- /dev/null +++ b/docs/_build/html/_sources/overview.rst.txt @@ -0,0 +1,6 @@ + +Overview +======== + +.. include:: ../README.rst + diff --git a/docs/_build/html/_sources/params.rst.txt b/docs/_build/html/_sources/params.rst.txt new file mode 100644 index 00000000..7e13f61f --- /dev/null +++ b/docs/_build/html/_sources/params.rst.txt @@ -0,0 +1,706 @@ + +Configuration Parameters +======================== + +This section describes the various configuration parameters for controlling +what the `corr2` and `corr3` scripts (or functions) do: + +Parameters about the input file(s) +---------------------------------- + +:file_name: (str or list) + The file(s) with the data to be correlated. + + For an auto-correlation, like cosmic shear, this will be the only file + name you need to specify. This parameter is always required, and + depending on what kind of correlation you are doing, you may need to + specify others below. + + Normally, there would only be a single file name here, but sometimes + the galaxy data comes in multiple files. To treat them all as though + they were a single large catalog, you may specify a list of file names + here:: + + file_name : [ file1.dat, file2.dat, file3.dat ] + + If you are specifying this on the command line, you'll need to put + quotes around the names, or it won't be parsed correctly:: + + file_name="[file1.dat,file2.dat,file3.dat]" + +:file_name2: (str or list) + The file(s) to use for the second field for a cross-correlation. + + If you want to cross-correlate one file (or set of files) with another, then + ``file_name2`` is used to specify the second thing being correlated. e.g. + for galaxy-galaxy lensing, ``file_name`` should be the catalog of lenses, and + ``file_name2`` should be the catalog of source shear values. + +:file_name3: (str or list) + The file(s) to use for the third field for a three-point cross-correlation. + +:rand_file_name: (str or list) + If necessary, a list of random files with the same masking as the ``file_name`` catalog. +:rand_file_name2: (str or list) + If necessary, a list of random files with the same masking as the ``file_name2`` catalog. +:rand_file_name3: (str or list) + If necessary, a list of random files with the same masking as the ``file_name3`` catalog. + + When doing NN and NNN correlations, you need to account for masks and variable + depth by providing a file or list of files that correspond to a uniform- + density field as observed with the same masking and other observational + details. For cross-correlations, you need to provide both of the above + values to separately calibrate the first and second fields. + + ``rand_file_name`` may also be used for NG and NK correlations, but it is not + required in those cases. + +:file_list: (str) A text file with file names in lieu of ``file_name``. +:file_list2: (str) A text file with file names in lieu of ``file_name2``. +:file_list3: (str) A text file with file names in lieu of ``file_name3``. +:rand_file_list: (str) A text file with file names in lieu of ``rand_file_name``. +:rand_file_list2: (str) A text file with file names in lieu of ``rand_file_name2``. +:rand_file_list3: (str) A text file with file names in lieu of ``rand_file_name3``. + + If you have a list of file names, it may be cumbersome to list them all + in the ``file_name`` (etc) parameter. It may be easier to do something like + ``ls *.cat > catlist`` and then use ``file_list=catlist`` as the list of + file names to use. Of course, it is an error to specify both ``file_list`` + and ``file_name`` (or any of the other corresponding pairs). + +:file_type: (ASCII, FITS, HDF5, or Parquet) The file type of the input files. +:delimiter: (str, default = '\0') The delimeter between input values in an ASCII catalog. +:comment_marker: (str, default = '#') The first (non-whitespace) character of comment lines in an input ASCII catalog. + + The default file type is normally ASCII. However, if the file name + includes ".fit" in it, then a fits binary table is assumed. + You can override this behavior using ``file_type``. + + Furthermore, you may specify a delimiter for ASCII catalogs if desired. + e.g. delimiter=',' for a comma-separated value file. Similarly, + comment lines usually begin with '#', but you may specify something + different if necessary. + +:ext: (int/str, default=1 for FITS or root for HDF5) The extension (fits) or group (hdf) to read from + + Normally if you are using a fits file, the binary fits table is + taken from the first extension, HDU 1. If you want to read from a + different HDU, you can specify which one to use here. For HDF files, + the default is to read from the root of the file, but you can also + specify group names like "/data/cat1" + +:first_row: (int, default=1) +:last_row: (int, default=-1) +:every_nth: (int, default=1) + + You can optionally not use all the rows in the input file. + You may specify ``first_row``, ``last_row``, or both to limit the rows being used. + The rows are numbered starting with 1. If ``last_row`` is not positive, it + means to use to the end of the file. If ``every_nth`` is set, it will skip + rows, selecting only 1 out of every n rows. + +:npatch: (int, default=1) + + How many patches to split the catalog into (using kmeans if no other + patch information is provided) for the purpose of jackknife variance + or other options that involve running via patches. (default: 1) + + .. note:: + + If the catalog has ra,dec,r positions, the patches will + be made using just ra,dec. + +:kmeans_init: (str, default='tree') +:kmeans_alt: (bool, default=False) + + If using kmeans to make patches, these two parameters specify which init method + to use and whether to use the alternate kmeans algorithm. + cf. `Field.run_kmeans` + +:patch_centers: (str) + + Alternative to setting patch by hand or using kmeans, you + may instead give patch_centers either as a file name or an array + from which the patches will be determined. + +:x_col: (int/str) Which column to use for x. +:y_col: (int/str) Which column to use for y. +:ra_col: (int/str) Which column to use for ra. +:dec_col: (int/str) Which column to use for dec. + + For the positions of the objects, you can specify either x,y values, which + imply a flat-sky approximation has already been performed (or ignored), + or ra,dec values, which are of course positions on the curved sky. + + For ASCII files, the columns are specified by number, starting with 1 being + the first column (not 0!). + For FITS files, the columns are specified by name, not number. + +:x_units: (str, default=None) The units of x values. +:y_units: (str, default=None) The units of y values. +:ra_units: (str) The units of ra values. +:dec_units: (str) The units of dec values. + + All distances on the sky include a "units" parameter to specify what in + units the values are specified. Options for units are radians, hours, + degrees, arcmin, arcsec. For ra, dec the units field is required. + But for x,y, you can ignore all the unit issues, in which case the + output distances will be in the same units as the input positions. + +:r_col: (int/str) Which column to use for r. + + When using spherical coordinates, ra,dec, you can optionally provide a + distance to the object. In this case, the calculation will be done in + three dimensional distances rather than angular distances. The distances + between objects will be the 3-D Euclidean distance, so you should define + your r values appropriately, given whatever cosmology you are assuming. + + ``r_col`` is invalid in conjunction with ``x_col``, ``y_col``. + +:z_col: (int/str) Which column to use for z. + + Rather than specifying 3-D coordinates as (ra, dec, r), you may instead + specify them as (x, y, z). + + ``z_col`` is invalid in conjunction with ``ra_col``, ``dec_col``. + +:g1_col: (int/str) Which column to use for g1. +:g2_col: (int/str) Which column to use for g2. + + If you are doing one of the shear correlation functions (i.e. NG, KG, GG), + then you need to specify the shear estimates of the corresponding galaxies. + The g1,g2 values are taken to be reduced shear values. They should be + unbiases estimators of g1,g2, so they are allowed to exceed :math:`|g| = 1`. + (This is required for some methods to produce unbiased estimates. + +:k_col: (int/str) Which column to use for kappa. + + If you are doing one of the kappa correlation functions (i.e. NK, KG, KK), + then you need to specify the column to use for kappa. While kappa is + nominally the lensing convergence, it could really be any scalar quantity, + like temperature, size, etc. + +:patch_col: (int/str) Which column to use for patch. + + Use precalculated patch numbers to split the catalog into patches. + +:w_col: (int/str) Which column to use for the weight (if any). +:wpos_col: (int/str) Which column to use for the position weight (if any). + + The weight column is optional. If omitted, all weights are taken to be 1. + +:flag_col: (int/str) Which column to use for the weight (if any). +:ignore_flag: (int) What flag(s) should be ignored. +:ok_flag: (int) What flag(s) are ok to use. + + The code can be set to ignore objects with a particular flag value if desired. + Some codes output a flag along with the shear value. Typically any flag != 0 + should be ignored, but you can optionally have the code ignore only particular + flags, treating the flag value as a bit mask. If ``ignore_flag`` is set to + something, then objects with ``(flag & ignore_flag != 0)`` will be ignored. + If ``ok_flag`` is set, then objects with ``(flag & ~ok_flag != 0)`` will be ignored. + The default is equivalent to ``ok_flag = 0``, which ignores any flag != 0. + +:x_ext: (int/str) Which HDU (fits) or group (HDF) to use for the ``x_col``. +:y_ext: (int/str) Which HDU (fits) or group (HDF) to use for the ``y_col``. +:z_ext: (int/str) Which HDU (fits) or group (HDF) to use for the ``z_col``. +:ra_ext: (int/str) Which HDU (fits) or group (HDF) to use for the ``ra_col``. +:dec_ext: (int/str) Which HDU (fits) or group (HDF) to use for the ``dec_col``. +:r_ext: (int/str) Which HDU (fits) or group (HDF) to use for the ``r_col``. +:g1_ext: (int/str) Which HDU (fits) or group (HDF) to use for the ``g1_col``. +:g2_ext: (int/str) Which HDU (fits) or group (HDF) to use for the ``g2_col``. +:k_ext: (int/str) Which HDU (fits) or group (HDF) to use for the ``k_col``. +:patch_ext: (int/str) Which HDU (fits) or group (HDF) to use for the ``patch_col``. +:w_ext: (int/str) Which HDU (fits) or group (HDF) to use for the ``w_col``. +:wpos_ext: (int/str) Which HDU (fits) or group (HDF) to use for the ``wpos_col``. +:flag_ext: (int/str) Which HDU (fits) or group (HDF) to use for the ``flag_col``. + + If you want to use an extension other than the first one, normally you would + specify which fits extension or HDF5 group to use with the ``ext`` parameter. + However, if different columns need to come from different HDUs, then you can + override the default (given by ``ext``, or '1' (fits), or '/' (HDF) if there + is no ``ext`` parameter) for each column separately. + +:allow_xyz: (bool, default=False) + + Whether to allow x,y,z columns in conjunction with ra, dec. + +:flip_g1: (bool, default=False) Whether to flip the sign of g1. +:flip_g2: (bool, default=False) Whether to flip the sign of g2. + + Sometimes there are issues with the sign conventions of gamma. If you + need to flip the sign of g1 or g2, you may do that with ``flip_g1`` or ``flip_g2`` + (or both). + +:keep_zero_weight: (bool, default=False) + + Whether to keep objects with wpos=0 in the catalog (including + any objects that indirectly get wpos=0 due to NaN or flags), so they + would be included in ntot and also in npairs calculations that use + this Catalog, although of course not contribute to the accumulated + weight of pairs. + +.. note:: + + - If you are cross-correlating two files with different formats, you may + set any of the above items from ``file_type`` to ``flip_g2`` as a two element + list (i.e. two values separated by a space). In this case, the first + item refers to the file(s) in ``file_name``, and the second item refers + to the file(s) in files_name2. + + - You may not mix (x,y) columns with (ra,dec) columns, since its meaning + would be ambiguous. + + - If you don't need a particular column for one of the files, you may + use 0 to indicate not to read that column. This is true for + any format of input catalog. + + - Also, if the given column only applies to one of the two input files + (e.g. k_col for an n-kappa cross-correlation) then you may specify just + the column name or number for the file to which it does apply. + + +Parameters about the binned correlation function to be calculated +----------------------------------------------------------------- + + +:bin_type: (str, default='Log') Which type of binning should be used. + + See `Metrics` for details. + +:min_sep: (float) The minimum separation to include in the output. +:max_sep: (float) The maximum separation to include in the output. +:nbins: (int) The number of output bins to use. +:bin_size: (float) The size of the output bins in log(sep). + + The bins for the histogram may be defined by setting any 3 of the above 4 + parameters. The fourth one is automatically calculated from the values + of the other three. + + See `Binning` for details about how these parameters are used for the + different choice of ``bin_type``. + +:sep_units: (str, default=None) The units to use for ``min_sep`` and ``max_sep``. + + ``sep_units`` is also the units of R in the output file. For ra, dec values, + you should always specify ``sep_units`` explicitly to indicate what angular + units you want to use for the separations. But if your catalogs use x,y, + or if you specify 3-d correlations with r, then the output separations are + in the same units as the input positions. + + See `sep_units` for more discussion about this parameter. + +:bin_slop: (float, default=1) The fraction of a bin width by which it is ok to let the pairs miss the correct bin. + + The code normally determines when to stop traversing the tree when all of the + distance pairs for the two nodes have a spread in distance that is less than the + bin size. i.e. the error in the tree traversal is less than the uncertainty + induced by just binning the results into a histogram. This factor can be changed + by the parameter ``bin_slop``. It is probably best to keep it at 1, but if you want to + make the code more conservative, you can decrease it, in which case the error + from using the tree nodes will be less than the error in the histogram binning. + (In practice, if you are going to do this, you are probably better off just + decreasing the ``bin_size`` instead and leaving ``bin_slop=1``.) + + See `bin_slop` for more discussion about this parameter. + +:brute: (bool/int, default=False) Whether to do the "brute force" algorithm, where the + tree traversal always goes to the leaf cells. + + In addition to True or False, whose meanings are obvious, you may also set + ``brute`` to 1 or 2, which means to go to the leaves for cat1 or cat2, respectively, + but stop traversing the other catalog according to the normal ``bin_slop`` criterion. + + See `brute` for more discussion about this parameter. + +:min_u: (float) The minimum u=d3/d2 to include for three-point functions. +:max_u: (float) The maximum u=d3/d2 to include for three-point functions. +:nubins: (int) The number of output bins to use for u. +:ubin_size: (float) The size of the output bins for u. + +:min_v: (float) The minimum positive v=(d1-d2)/d3 to include for three-point functions. +:max_v: (float) The maximum positive v=(d1-d2)/d3 to include for three-point functions. +:nvbins: (int) The number of output bins to use for positive v. + The total number of bins in the v direction will be twice this number. +:vbin_size: (float) The size of the output bins for v. + +:metric: (str, default='Euclidean') Which metric to use for distance measurements. + + See `Metrics` for details. + +:min_rpar: (float) If the metric supports it, the minimum Rparallel to allow for pairs + to be included in the correlation function. +:max_rpar: (float) If the metric supports it, the maximum Rparallel to allow for pairs + to be included in the correlation function. + +:period: (float) For the 'Periodic' metric, the period to use in all directions. +:xperiod: (float) For the 'Periodic' metric, the period to use in the x directions. +:yperiod: (float) For the 'Periodic' metric, the period to use in the y directions. +:zperiod: (float) For the 'Periodic' metric, the period to use in the z directions. + + +Parameters about the output file(s) +----------------------------------- + +The kind of correlation function that the code will calculate is based on +which output file(s) you specify. It will do the calculation(s) relevant for +each output file you set. For each output file, the first line of the output +says what the columns are. See the descriptions below for more information +about the output columns. + +:nn_file_name: (str) The output filename for count-count correlation function. + + This is the normal density two-point correlation function. + + The output columns are: + + - ``R_nom`` = The center of the bin + - ``meanR`` = The mean separation of the points that went into the bin. + - ``meanlogR`` = The mean log(R) of the points that went into the bin. + - ``xi`` = The correlation function. + - ``sigma_xi`` = The 1-sigma error bar for xi. + - ``DD``, ``RR`` = The raw numbers of pairs for the data and randoms + - ``DR`` (if ``nn_statistic=compensated``) = The cross terms between data and random. + - ``RD`` (if ``nn_statistic=compensated`` cross-correlation) = The cross term between random and data, which for a cross-correlation is not equivalent to ``DR``. + +:nn_statistic: (str, default='compensated') Which statistic to use for xi as the estimator of the NN correlation function. + + Options are (D = data catalog, R = random catalog) + + - 'compensated' is the now-normal Landy-Szalay statistic: xi = (DD-2DR+RR)/RR, or for cross-correlations, xi = (DD-DR-RD+RR)/RR + - 'simple' is the older version: xi = (DD/RR - 1) + +:ng_file_name: (str) The output filename for count-shear correlation function. + + This is the count-shear correlation function, often called galaxy-galaxy + lensing. + + The output columns are: + + - ``R_nom`` = The center of the bin + - ``meanR`` = The mean separation of the points that went into the bin. + - ``meanlogR`` = The mean log(R) of the points that went into the bin. + - ``gamT`` = The mean tangential shear with respect to the point in question. + - ``gamX`` = The shear component 45 degrees from the tangential direction. + - ``sigma`` = The 1-sigma error bar for ``gamT`` and ``gamX``. + - ``weight`` = The total weight of the pairs in each bin. + - ``npairs`` = The total number of pairs in each bin. + +:ng_statistic: (str, default='compensated' if ``rand_files`` is given, otherwise 'simple') Which statistic to use for the mean shear as the estimator of the NG correlation function. + + Options are: + + - 'compensated' is simiar to the Landy-Szalay statistic: + Define: + + - NG = Sum(gamma around data points) + - RG = Sum(gamma around random points), scaled to be equivalent in effective number as the number of pairs in NG. + - npairs = number of pairs in NG. + + Then this statistic is gamT = (NG-RG)/npairs + - 'simple' is the normal version: gamT = NG/npairs + +:gg_file_name: (str) The output filename for shear-shear correlation function. + + This is the shear-shear correlation function, used for cosmic shear. + + The output columns are: + + - ``R_nom`` = The center of the bin + - ``meanR`` = The mean separation of the points that went into the bin. + - ``meanlogR`` = The mean log(R) of the points that went into the bin. + - ``xip`` = where g1 and g2 are measured with respect to the line joining the two galaxies. + - ``xim`` = where g1 and g2 are measured with respect to the line joining the two galaxies. + - ``xip_im`` = . + + In the formulation of xi+ using complex numbers, this is the imaginary component. + It should normally be consistent with zero, especially for an + auto-correlation, because if every pair were counted twice to + get each galaxy in both positions, then this would come out + exactly zero. + + - ``xim_im`` = . + + In the formulation of xi- using complex + numbers, this is the imaginary component. + It should be consistent with zero for parity invariant shear + fields. + + - ``sigma_xi`` = The 1-sigma error bar for xi+ and xi-. + - ``weight`` = The total weight of the pairs in each bin. + - ``npairs`` = The total number of pairs in each bin. + +:nk_file_name: (str) The output filename for count-kappa correlation function. + + This is nominally the kappa version of the ne calculation. However, k is + really any scalar quantity, so it can be used for temperature, size, etc. + + The output columns are: + + - ``R_nom`` = The center of the bin + - ``meanR`` = The mean separation of the points that went into the bin. + - ``meanlogR`` = The mean log(R) of the points that went into the bin. + - ``kappa`` = The mean kappa this distance from the foreground points. + - ``sigma`` = The 1-sigma error bar for . + - ``weight`` = The total weight of the pairs in each bin. + - ``npairs`` = The total number of pairs in each bin. + +:nk_statistic: (str, default='compensated' if ``rand_files`` is given, otherwise 'simple') Which statistic to use for the mean shear as the estimator of the NK correlation function. + + Options are: + + - 'compensated' is simiar to the Landy-Szalay statistic: + Define: + + - NK = Sum(kappa around data points) + - RK = Sum(kappa around random points), scaled to be equivalent in effective number as the number of pairs in NK. + - npairs = number of pairs in NK. + + Then this statistic is ```` = (NK-RK)/npairs + - 'simple' is the normal version: ```` = NK/npairs + +:kk_file_name: (str) The output filename for kappa-kappa correlation function. + + This is the kappa-kappa correlation function. However, k is really any + scalar quantity, so it can be used for temperature, size, etc. + + The output columns are: + + - ``R_nom`` = The center of the bin + - ``meanR`` = The mean separation of the points that went into the bin. + - ``meanlogR`` = The mean log(R) of the points that went into the bin. + - ``xi`` = The correlation function + - ``sigma_xi`` = The 1-sigma error bar for xi. + - ``weight`` = The total weight of the pairs in each bin. + - ``npairs`` = The total number of pairs in each bin. + +:kg_file_name: (str) The output filename for kappa-shear correlation function. + + This is the kappa-shear correlation function. Essentially, this is just + galaxy-galaxy lensing, weighting the tangential shears by the foreground + kappa values. + + The output columns are: + + - ``R_nom`` = The center of the bin + - ``meanR`` = The mean separation of the points that went into the bin. + - ``meanlogR`` = The mean log(R) of the points that went into the bin. + - ``kgamT`` = The kappa-weighted mean tangential shear. + - ``kgamX`` = The kappa-weighted shear component 45 degrees from the tangential direction. + - ``sigma`` = The 1-sigma error bar for ``kgamT`` and ``kgamX``. + - ``weight`` = The total weight of the pairs in each bin. + - ``npairs`` = The total number of pairs in each bin. + +:nnn_file_name: (str) The output filename for count-count-count correlation function. + + This is three-point correlation function of number counts. + + The output columns are: + + - ``R_nom`` = The center of the bin in R = d2 where d1 > d2 > d3 + - ``u_nom`` = The center of the bin in u = d3/d2 + - ``v_nom`` = The center of the bin in v = +-(d1-d2)/d3 + - ``meand1`` = The mean value of d1 for the triangles in each bin + - ``meanlogd1`` = The mean value of log(d1) for the triangles in each bin + - ``meand2`` = The mean value of d2 for the triangles in each bin + - ``meanlogd2`` = The mean value of log(d2) for the triangles in each bin + - ``meand3`` = The mean value of d3 for the triangles in each bin + - ``meanlogd3`` = The mean value of log(d3) for the triangles in each bin + - ``zeta`` = The correlation function. + - ``sigma_zeta`` = The 1-sigma error bar for zeta. + - ``DDD``, ``RRR`` = The raw numbers of triangles for the data and randoms + - ``DDR``, ``DRD``, ``RDD``, ``DRR``, ``RDR``, ``RRD`` (if ``nn_statistic=compensated``) = The cross terms between data and random. + +:nnn_statistic: (str, default='compensated') Which statistic to use for xi as the estimator of the NNN correlation function. + + Options are: + + - 'compensated' is the Szapudi & Szalay (1998) estimator: + zeta = (DDD-DDR-DRD-RDD+DRR+RDR+RRD-RRR)/RRR + - 'simple' is the older version: zeta = (DDD/RRR - 1), although this is not actually + an estimator of zeta. Rather, it estimates zeta(d1,d2,d3) + xi(d1) + xi(d2) + xi(d3). + +:ggg_file_name: (str) The output filename for shear-shear-shear correlation function. + + This is the shear three-point correlation function. We use the "natural components" + as suggested by Schenider & Lombardi (2003): Gamma_0, Gamma_1, Gamma_2, Gamma_3. + All are complex-valued functions of (d1,d2,d3). The offer several options for the projection + direction. We choose to use the triangle centroid as the reference point. + + The output columns are: + + - ``R_nom`` = The center of the bin in R = d2 where d1 > d2 > d3 + - ``u_nom`` = The center of the bin in u = d3/d2 + - ``v_nom`` = The center of the bin in v = +-(d1-d2)/d3 + - ``meand1`` = The mean value of d1 for the triangles in each bin + - ``meanlogd1`` = The mean value of log(d1) for the triangles in each bin + - ``meand2`` = The mean value of d2 for the triangles in each bin + - ``meanlogd2`` = The mean value of log(d2) for the triangles in each bin + - ``meand3`` = The mean value of d3 for the triangles in each bin + - ``meanlogd3`` = The mean value of log(d3) for the triangles in each bin + - ``gam0r`` = The real part of Gamma_0. + - ``gam0i`` = The imag part of Gamma_0. + - ``gam1r`` = The real part of Gamma_1. + - ``gam1i`` = The imag part of Gamma_1. + - ``gam2r`` = The real part of Gamma_2. + - ``gam2i`` = The imag part of Gamma_2. + - ``gam3r`` = The real part of Gamma_3. + - ``gam3i`` = The imag part of Gamma_3. + - ``sigma_gam`` = The 1-sigma error bar for the Gamma values. + - ``weight`` = The total weight of the triangles in each bin. + - ``ntri`` = The total number of triangles in each bin. + +:kkk_file_name: (str) The output filename for kappa-kappa-kappa correlation function. + + This is the three-point correlation function of a scalar field. + + The output columns are: + + - ``R_nom`` = The center of the bin in R = d2 where d1 > d2 > d3 + - ``u_nom`` = The center of the bin in u = d3/d2 + - ``v_nom`` = The center of the bin in v = +-(d1-d2)/d3 + - ``meand1`` = The mean value of d1 for the triangles in each bin + - ``meanlogd1`` = The mean value of log(d1) for the triangles in each bin + - ``meand2`` = The mean value of d2 for the triangles in each bin + - ``meanlogd2`` = The mean value of log(d2) for the triangles in each bin + - ``meand3`` = The mean value of d3 for the triangles in each bin + - ``meanlogd3`` = The mean value of log(d3) for the triangles in each bin + - ``zeta`` = The correlation function. + - ``sigma_zeta`` = The 1-sigma error bar for zeta. + - ``weight`` = The total weight of the triangles in each bin. + - ``ntri`` = The total number of triangles in each bin. + +:precision: (int) The number of digits after the decimal in the output. + + All output quantities are printed using scientific notation, so this sets + the number of digits output for all values. The default precision is 4. + So if you want more (or less) precise values, you can set this to something + else. + + +Derived output quantities +------------------------- + +The rest of these output files are calculated based on one or more correlation +functions. + +:m2_file_name: (str) The output filename for the aperture mass statistics. + + This file outputs the aperture mass variance and related quantities, + derived from the shear-shear correlation function. + + The output columns are: + + - ``R`` = The radius of the aperture. (Spaced the same way as ``R_nom`` is in the correlation function output files. + - ``Mapsq`` = The E-mode aperture mass variance for each radius R. + - ``Mxsq`` = The B-mode aperture mass variance. + - ``MMxa``, ``MMxb`` = Two semi-independent estimate for the E-B cross term. (Both should be consistent with zero for parity invariance shear fields.) + - ``sig_map`` = The 1-sigma error bar for these values. + - ``Gamsq`` = The variance of the top-hat weighted mean shear in apertures of the given radius R. + - ``sig_gam`` = The 1-sigma error bar for ``Gamsq``. + +:m2_uform: (str, default='Crittenden') The function form of the aperture + + The form of the aperture mass statistic popularized by Schneider is + + U = 9/Pi (1-r^2) (1/3-r^2) + Q = 6/Pi r^2 (1-r^2) + + However, in many ways the form used by Crittenden: + + U = 1/2Pi (1-r^2) exp(-r^2/2) + Q = 1/4Pi r^2 exp(-r^2/2) + + is easier to use. For example, the skewness of the aperture mass + has a closed form solution in terms of the 3-point function for the + Crittenden form, but no such formula is known for the Schneider form. + + The ``m2_uform`` parameter allows you to switch between the two forms, + at least for 2-point applications. (You will get an error if you + try to use 'Schneider' with the m3 output.) + +:nm_file_name: (str) The output filename for and related values. + + This file outputs the correlation of the aperture mass with the + aperture-smoothed density field, derived from the count-shear correlation + function. + + The output columns are: + + - ``R`` = The radius of the aperture. (Spaced the same way as ``R_nom`` is in the correlation function output files. + - ``NMap`` = The E-mode aperture mass correlated with the density smoothed with the same aperture profile as the aperture mass statistic uses. + - ``NMx`` = The corresponding B-mode statistic. + - ``sig_nmap`` = The 1-sigma error bar for these values. + +:norm_file_name: (str) The output filename for ^2/ and related values. + + This file outputs the values normalized by . This + provides an estimate of the correlation coefficient, r. + + The output columns are: + + - ``R`` = The radius of the aperture. (Spaced the same way as ``R_nom`` is in the correlation function output files. + - ``NMap`` = The E-mode aperture mass correlated with the density smoothed with the same aperture profile as the aperture mass statistic uses. + - ``NMx`` = The corresponding B-mode statistic. + - ``sig_nmap`` = The 1-sigma error bar for these values. + - ``Napsq`` = The variance of the aperture-weighted galaxy density. + - ``sig_napsq`` = The 1-sigma error bar for . + - ``Mapsq`` = The aperture mass variance. + - ``sig_mapsq`` = The 1-sigma error bar for . + - ``NMap_norm`` = ^2 / ( ) + - ``sig_norm`` = The 1-sigma error bar for this value. + - ``Nsq_Mapsq`` = / + - ``sig_nn_mm`` = The 1-sigma error bar for this value. + + +Miscellaneous parameters +------------------------ + +:verbose: (int, default=1) How verbose the code should be during processing. + + - 0 = no output unless there is an error + - 1 = output warnings + - 2 = output progress information + - 3 = output extra debugging lines + + This is overridden by the ``-v`` command line argument for the `corr2` executable. + +:log_file: (str, default=None) Where to write the logging information. + + The default is to write lines to the screen, but this option allows you to + write them to a file instead. With the `corr2` executable, this can also be + specified with the ``-l`` command line argument. + +:output_dots: (bool, default=(``verbose``>=2)) Whether to output progress dots during the + calculation of the correlation function. + +:split_method: (str, default='mean') Which method to use for splitting cells. + + When building the tree, there are three obvious choices for how to split a set + of points into two chld cells. The direction is always taken to be the + coordinate direction with the largest extent. Then, in that direction, + you can split at the mean value, the median value, or the "middle" = + (xmin+xmax)/2. To select among these, ``split_method`` may be given as + "mean", "median", or "middle" respectively. + + In addition, sometimes it may be useful to inject some randomness into the + tree construction to study how much the results depend on the specific splitting + used. For that purpose, there is also the option to set ``split_method`` = 'random', + which will choose a random point in the middle two quartiles of the range. + +:min_top: (int, default=3) The minimum number of top layers to use when setting up the field. + + The OpenMP parallelization happens over the top level cells, so setting this > 0 + ensures that there will be multiple jobs to be run in parallel. For systems with + very many cores, it may be helpful to set this larger than the default value of 3. + +:max_top: (int, default=10) The maximum number of top layers to use when setting up the field. + + The top-level cells are the cells where each calculation job starts. There will + typically be of order 2^max_top top-level cells. + +:num_threads: (int, default=0) How many (OpenMP) threads should be used. + + The default is to try to determine the number of cpu cores your system has + and use that many threads. + diff --git a/docs/_build/html/_sources/patches.rst.txt b/docs/_build/html/_sources/patches.rst.txt new file mode 100644 index 00000000..cb34629a --- /dev/null +++ b/docs/_build/html/_sources/patches.rst.txt @@ -0,0 +1,391 @@ +Patches +======= + +Normally, TreeCorr is used to compute the auto-correlation function +of data in a single input `Catalog` or the cross-correlation of data +from two `Catalogs `. +However, there are a number of reasons that it might make sense to +divide up a region into several smaller patches for computing the +correlation function: + +1. To compute a more accurate covariance matrix. + There are a number of ways to compute more accurate covariance estimates + from the data than the default method. All of them require dividing + up the data into patches and doing different things with the + patch-to-patch correlations. See `Covariance Estimates` for details. +2. To save memory. + The entire data set might be too large to fit in memory, so you might + want to divide it up so less data is required to be in memory at a time. + See `Reducing Memory Use` below. +3. To split the job among multiple machines. + TreeCorr does a good job of utilizing many cores on a single machine + using OpenMP. However, for very large jobs, you may want to also + split the work among more than one node on a cluster. The most + effective way to do this is to split the data into patches. + See `Using MPI` below. +4. To run k-means on some data set for non-correlation reasons. + TreeCorr happens to have an extremely efficient implementation of the + k-means algorithm. So if you want to perform k-means clustering on + some data that can be represnted in a TreeCorr `Catalog` (i.e. + only 2 or 3 spatial dimensions), then using TreeCorr may be a + particularly efficient way to do the clustering. + See `Running K-Means` below. + +Below we describe how to split up an input `Catalog` into patches and +a few things you can do with it once you have done so. + +Defining Patches on Input +------------------------- + +The most straightforward way to define which object goes in which patch +is to just tell TreeCorr the patch number for each object explicitly. + +If passing in numpy arrays for everything, then just pass in a ``patch`` +parameter with integer values indicating the patch number. + +If reading in data from a file, then set a ``patch_col`` to use which +should have these values. + +The next simplest way to define the patches is to tell TreeCorr how many +patches you want using ``npatch``. +TreeCorr will then run the K-Means algorithm to split up the full area +into this many patches. +See `Running K-Means` below for more details. + +Finally, to make sure multiple catalogs are using the same definition for +where patches are on the sky, you would probably want to have a single +set of patch centers and have all of your catalogs use that via +the ``patch_centers`` option. See `Using Patch Centers` below for details. + + +Running K-Means +--------------- + +One standard way to split up a set of objects into roughly equal area +patches is an algorithm called +`k-means clustering `_. + +The basic idea of the algorithm is to divide the points :math:`\vec x_j` into +:math:`k` patches, :math:`S_i`, such that the total "inertia" is minimized. +Inertia :math:`I_i` of each patch is defined as follows: + +.. math:: + + I_i = \sum_{j \in S_i} \left| \vec x_j - \vec \mu_i \right|^2, + +where :math:`\vec \mu_i` is the center of each patch: + +.. math:: + + \vec \mu_i \equiv \frac{\sum_{j \in S_i} \vec x_j}{N_i}, + +and :math:`N_i` is the number of points assigned to patch :math:`S_i`. +The k-means algorithm finds a solution that is a local minimum in the total inertia, +:math:`\sum_i I_i`, or equivalently the mean inertia :math:`\langle I_i \rangle` +of all the patches. + +This definition of inertia is a relatively good proxy for area on the +sky that has objects, so this algorithm is a good choice for dividing up a +catalog of astronomical objects into fairly uniform patches. + +To use the TreeCorr implementation of k-means, simply +set the ``npatch`` parameter in the `Catalog` constructor to specifiy +how many patches you want TreeCorr to split the data into. + +.. note:: + + If the input catalog has (ra, dec, r) positions, then the patches will + be made using only the ra,dec location on the sky, not the full 3-D + positions. This is usually what you want for making patches over an + astronomical survey area. If you really want to make patches according + to 3-D clustering of points, then you should input x,y,z values instead. + +There are also two additional options which can affect how the k-means +algorithm runs: + +* ``kmeans_init`` specifies what procedure to use for the initialization + of the patches. Options are: + + * 'random' = Choose initial centers randomly from among the input points. + This is the traditional k-means initialization algorithm. + * 'kmeans++' = Use `k-means++ `_, + an improved algorithm by Arthur and Vassilvitskii + with a provable upper bound for how close the final result will + be to the global minimum possible total inertia. + * 'tree' = Use the upper layers of the TreeCorr ball tree to define + the initial centers. This is the default, and in practice, + it will almost always yield the best final patches. + (See :ref:`Comparison with other implementations ` below.) + +* ``kmeans_alt`` specifies whether to use an alternate iteration algorithm + similar to k-means, which often produces somewhat more uniform patches. + + This alternate algorithm specifically targets minimizing the standard deviation + of the inertia rather than the mean inertia, so it tends to lead to patches that + have a smaller final size variation than the regular k-means algorithm. + + This is not the default algorithm because it is not provably (at least by + me) stable. It is possible that the iteration can get into a failure mode + where one patch will end up with zero objects. The regular k-means + provably cannot fail in this way. + + So if you care especially about having very uniform patch sizes, you might + want to try this option, but be careful about inspecting the results that + they don't look crazy. + +See also `Field.run_kmeans`, which has more information about these options, +where these parameters are called simply ``init`` and ``alt`` respectively. + +.. _Comparison: +.. admonition:: Comparison with other implementations + + Before implementing k-means in TreeCorr, I investigated what other options + there were in the Python landscape. I found the following implementations: + + * `scipy.cluster.vq.kmeans `_ + * `scipy.cluster.vq.kmeans2 `_ + * `kmeans_radec `_ + * `pyclustering.cluster.kmeans `_ + * `sklearn.cluster.KMeans `_ + * `sklearn.cluster.MiniBatchKMeans `_ + + I made a `notebook `_ + comparing the different algorithms using a random million galaxies from the DES SV + (Dark Energy Survey, Science Verification) footprint, chosen because it is a + real-life use case that has some ratty edges to deal with, so it seemed like it would + provide a reasonable challenge without being crazy. + + The ideal patches would be essentially uniform in size according to some measure of the + effective area of the patch. To make things simple, I just used the inertia as my + proxy for area, since that's the thing that k-means algorithms natively work with. + + However, we don't really care about the total inertia being minimized. For most purposes + here, we really want the patches to be all close to the *same* size. So rather than + the total inertia, my metric for quality was the rms variation of the intertia + (aka the standard deviation). + + Fortunately, the process of minimizing the total inertia does tend to select patches with + small rms variation as well, but it is worth noting that this is not directly targeted by the + normal k-means algorithm. And furthermore, the k-means algorithm almost never finds the true + global minimum inertia. The quality of the local minimum depends pretty strongly on the + choice of initial centers to seed the iterative part of the algorithm. + + Comparing the results of the various k-means implementations, I found that they all tend + to be either fairly slow, taking a minute or more for just 1 million objects, or they have + very high rms variation in the inertia. + I reran each code multiple times using a different random million objects selected from the original + catalog (of around 16 million objects). Here is a scatter plot of the time vs rms variation + in the inertia for the various codes. + + .. image:: https://user-images.githubusercontent.com/623887/57647337-ac6bd800-7590-11e9-80bc-900bda3bf66b.png + + Since there was no existing implementation I was particularly happy with, + I implemented it myself in TreeCorr. It turns out (not surprisingly) that the ball tree + data structure that TreeCorr uses for efficient calculation of correlation functions + also enables a very efficient implementation of the k-means iteration step. + Furthermore, the quality of the k-means result is pretty dependent + on the choice of the initial centers, and using the ball tree for the initialization turns + out to produce reliably better results than the initialization methods used by other packages. + + The big red dots in the lower left corner are the TreeCorr implementation of the standard + k-means clustering algorithm. It typically takes about 1 or 2 seconds to classify these + 1 million points into 40 patches, and the rms variation is usually less than any other + implementation. + + The `notebook `_ also + includes plots of total inertia, variation in size according to the mean d^2 rather than + sum, and variation in the counts. The TreeCorr algorithm tends to be the best k-means + implementation according to any of these metrics. + + In addition, you can see some slightly smaller orange dots, which have even lower rms + variation but take very slightly longer to run. These are the alternate algorithm I mentioned + above. This alternate algorithm is similar to k-means, but it penalizes patches with a + larger-than-average inertia, so they give up some of their outer points to patches with + smaller inertia. In other words, it explicitly targets making the rms variation as small as + possible. But in practice, it is not much worse in terms of total inertia either. + + The alternate algorithm is available using ``alt=True`` in `Field.run_kmeans`. + I left this as a non-default option for two reasons. First, it's not actually the real + k-means, so I didn't want to confuse people who just want to use this for regular k-means + clustering. But second, I'm not completely sure that it is always stable. There is a free + parameter in the penalty function I chose, which I set to 3. Setting it to 4 gave even better + results (slightly), but at 5 the algorithm broke down with neighboring patches trading + escalating numbers of points between each other until one of them had no points left. + + I couldn't convince myself that 4 was actually a magic number and not just the particular + value for this data set. So 3 might be safe, or there might be data sets where that also + leads to this runaway trading failure mode. I know the regular k-means algorithm can't get + into this mode, so it's always safe. Therefore, I think it's better to force the user to + intentionally select the alternate algorithm if they really care about having a low rms + size variation, with the normal algorithm being the backup if the alternate one fails for them. + + +Using Patch Centers +------------------- + +If you are doing a cross correlation, and you want to use patches for computing +a jackknife covariance for instance, you cannot +just set ``npatch`` in both and expect it to work properly. The two catalogs +would end up with patches arranged very differently on the sky. Patch 2 +for one catalog would not be in the same place as patch 2 in the other one. +Thus, the jackknife calculation would be messed up. + +Instead, you should define the patches using one of the two (or more) +catalogs you want to work with, +and then use its ``patch centers`` attribute as the ``patch_centers`` +parameter when building the other catalog(s):: + + >>> cat1 = treecorr.Catalog(cat_file1, config1, npatch=N) + >>> cat2 = treecorr.Catalog(cat_file2, config2, patch_centers=cat1.patch_centers) + +You can also save the patches to a file using `Catalog.write_patch_centers` +and use that file name as the ``patch_centers`` parameter:: + + >>> cat1 = treecorr.Catalog(cat_file1, config1, npatch=N) + >>> cat1.write_patch_centers(cen_file) + >>> cat2 = treecorr.Catalog(cat_file2, config2, patch_centers=cen_file) + +With either method, cat2 will have patches assigned according to which patch +center each object is closest to. + + +Reducing Memory Use +------------------- + +One reason you might want to use patches is if the full `Catalog` doesn't fit +in memory. (Or possibly by itself it fits, but when performing the correlation function, +the additional memory from building the tree overflows the memory.) +Then you can potentially perform the calculation over patches +with less data loaded into memory at any given time. +The overall procedure for doing this is as follows: + +1. First define your patch centers using some smaller `Catalog`, which + does fit in memory. This could be a catalog over the same survey + geometry, which is intrinsically sparser (say a catalog of red sequence + galaxies or clusters or even stars). Or it could be the large catalog + you want to use, but sampled using the ``every_nth`` option to read + in only a fraction of the rows. Run k-means on the smaller catalog + and write the patch_centers to a file, as describe `above `. +2. Set up a directory somewhere that TreeCorr can use as temporary + space for writing the individual patch files. +3. Define the full `Catalog`, specifying to use the above centers file for the + ``patch_centers`` and the temp directory as ``save_patch_dir``. +4. Make sure not to do anything that requires the catalog be loaded from disk. + TreeCorr will delay doing the actual load until it needs to do so. + Here, we want to make sure it never loads the full data. +5. Run the `process ` function (for whichever correlation + type you need) using the ``low_mem=True`` option. + +Here are some worked examples. First, an auto-correlation of a +single large shear catalog:: + + >>> small_cat = treecorr.Catalog(cat_file, config, every_nth=100, npatch=N) + >>> small_cat.write_patch_centers(cen_file) + >>> del small_cat + >>> full_cat = treecorr.Catalog(cat_file, config, patch_centers=cen_file, + ... save_patch_dir=tmp_dir) + >>> gg = treecorr.GGCorrelation(ggconfig) + >>> gg.process(full_cat, low_mem=True) + +Second, a cross-correlation, where the lens catalog is small enough not to +be a problem, but the source catalog is too large to hold in memory:: + + >>> len_cat = treecorr.Catalog(lens_file, lens_config, npatch=N) + >>> source_cat = treecorr.Catalog(source_file, source_config, + ... patch_centers=lens_cat.patch_centers, + ... save_patch_dir=tmp_dir) + >>> ng = treecorr.NGCorrelation(ngconfig) + >>> ng.process(lens_cat, source_cat, low_mem=True) + +In both cases, the result should be equivalent to what you would get if you could +hold the catalogs fully in memory, but the peak memory will be much lower. +The downside is that this usage will generally take somewhat longer -- +probably something like a factor of 2 for typical scenarios, but this of course +depends heavily on the nature of your calculation, how fast your disk I/O is +compared to your CPUs, and how many cores you are using. + +.. note:: + + Technically, the ``save_patch_dir`` parameter is not required, but it is + recommended. The first time a given patch is loaded, it will find the right + rows in the full catalog and load the ones you need. If you give it a + directory, then it will write these data to disk, which will make subsequent + reads of that patch much faster. + +.. warning:: + + One caveat with respect to the ``save_patch_dir`` parameter is that if there + are already files present in the directory with the right names, then it + will go ahead and use them, rather than make new patch files. This is usually + an efficiency gain, since repeated runs with the same data will already have + the right patch files present. However, if you use the same file name and + save directory for a different data set, or if you make new patches for the + same input file, then TreeCorr won't notice. + + To get TreeCorr to make new patch files, you can either manually delete + everything in the save directory before starting, or (easier) call:: + + >>> cat.write_patch_files() + + which will overwrite any existing files that may be there with the same names. + +Using MPI +--------- + +Another use case that is enabled by using patches is +to divide up the work of calculating a correlation function +over multiple machines with MPI using `mpi4py `_. + +For this usage, the `process ` functions take an optional ``comm`` +parameter. When running in an MPI job, you can pass in ``comm=MPI.COMM_WORLD``, +and TreeCorr will divide up the work among however many nodes you are using. +The results will be sent back the the rank 0 node and combined to produce the +complete answer: + +.. code-block:: python + :linenos: + + # File name: run_with_mpi.py + from mpi4py import MPI + comm = MPI.COMM_WORLD + rank = comm.Get_rank() + + # Define stuff + fname = ... + centers_file = ... + config = ... + ggconfig = ... + + # All machines read the catalog + cat = treecorr.Catalog(fname, config, patch_centers=centers_file) + + # All machines define the same correlation object + gg = treecorr.GGCorrelation(ggconfig) + + # Pass the comm object to the process function + gg.process(cat, comm=comm) + + # rank 0 has the completed result. + if rank == 0: + # Probably do something more interesting with this now... + print('xip = ',gg.xip) + +You would then run this script using (e.g. with 4 processes):: + + $ mpiexec -n 4 python run_with_mpi.py + +The file defining the patch centers should already be written to make sure +that each machine is using the same patch definitions. There is some level of +randomness in the k-means calculation, so if you use ``npatch=N``, then each +machine may end up with different patch definitions, which would definitely +mess things up. + +If you wanted to have it all run in a single script, you should have only +the rank 0 process define the patches. Then send ``cat.patch_centers`` to the +other ranks, who can build their catalogs using this. +But it's probably easier to just precompute the centers and save them to a file +before starting the MPI run. + +A more complete worked example is +`available `_ +in the TreeCorr devel directory. diff --git a/docs/_build/html/_sources/scripts.rst.txt b/docs/_build/html/_sources/scripts.rst.txt new file mode 100644 index 00000000..87fe6eb9 --- /dev/null +++ b/docs/_build/html/_sources/scripts.rst.txt @@ -0,0 +1,100 @@ +Using configuration files +========================= + +Most of the TreeCorr classes can take a ``config`` parameter in lieu +of a set of keyword arguments. This is not necessarily incredibly +useful when driving the code from Python; however, it enables running +the code from some executable scripts, described below. + +Specifically, the parameters defined in the configuration file are +loaded into a Python dict, which is passed to each of the classes +as needed. The advantage of this is that TreeCorr will only use the +parameters it actually needs when initializing each object. +Any additional parameters (e.g. those +that are relevant to a different class) are ignored. + +The corr2 and corr3 executables +------------------------------- + +Along with the installed Python library, TreeCorr also includes +two executable scripts, called ``corr2`` and ``corr3``. +The scripts takes one required command-line argument, which +is the name of a configuration file:: + + corr2 config.yaml + corr3 config.yaml + +A sample configuration file is provided, called sample_config.yaml. + +For the complete documentation about the allowed parameters, see: + +.. toctree:: + + params + +YAML is the recommended format for the configuration file, but we +also allow JSON files if you prefer, or a legacy format, which is +like an .ini file, but without the section headings, consisting of +key = value lines. The three formats are normally distinguished +by their extensions (.yaml, .json, or .params respectively), but +you can also give the file type explicitly with the -f option. E.g.:: + + corr2 my_config_file.txt -f params + +would specify that the configuration file ``my_config_file.txt`` uses +the legacy "params" format. + +You can also specify parameters on the command line after the name of +the configuration file. e.g.:: + + corr2 config.yaml file_name=file1.dat gg_file_name=file1.out + corr2 config.yaml file_name=file2.dat gg_file_name=file2.out + ... + +This can be useful when running the program from a script for lots of input +files. + +The corr2 function from python +------------------------------ + +The same functionality that you have from the ``corr2`` executable is available in python via the +`corr2` function:: + + import treecorr + config = treecorr.read_config(config_file) + config['file_name'] = 'catalog.dat' + config['gg_file_name'] = 'gg.out' + treecorr.corr2(config) + +.. autofunction:: treecorr.corr2 + +The corr3 function from python +------------------------------ + +.. autofunction:: treecorr.corr3 + + +Other utilities related to corr2 and corr3 +------------------------------------------ + +.. autofunction:: treecorr.corr2.print_corr2_params + +.. autofunction:: treecorr.corr3.print_corr3_params + + +Utilities related to the configuration dict +------------------------------------------- + +.. automodule:: treecorr.config + :members: + + +File Writers +------------ + +.. autoclass:: treecorr.writer.FitsWriter + :members: +.. autoclass:: treecorr.writer.HdfWriter + :members: +.. autoclass:: treecorr.writer.AsciiWriter + :members: diff --git a/docs/_build/html/_static/basic.css b/docs/_build/html/_static/basic.css new file mode 100644 index 00000000..b3bdc004 --- /dev/null +++ b/docs/_build/html/_static/basic.css @@ -0,0 +1,861 @@ +/* + * basic.css + * ~~~~~~~~~ + * + * Sphinx stylesheet -- basic theme. + * + * :copyright: Copyright 2007-2021 by the Sphinx team, see AUTHORS. + * :license: BSD, see LICENSE for details. + * + */ + +/* -- main layout ----------------------------------------------------------- */ + +div.clearer { + clear: both; +} + +div.section::after { + display: block; + content: ''; + clear: left; +} + +/* -- relbar ---------------------------------------------------------------- */ + +div.related { + width: 100%; + font-size: 90%; +} + +div.related h3 { + display: none; +} + +div.related ul { + margin: 0; + padding: 0 0 0 10px; + list-style: none; +} + +div.related li { + display: inline; +} + +div.related li.right { + float: right; + margin-right: 5px; +} + +/* -- sidebar --------------------------------------------------------------- */ + +div.sphinxsidebarwrapper { + padding: 10px 5px 0 10px; +} + +div.sphinxsidebar { + float: left; + width: 230px; + margin-left: -100%; + font-size: 90%; + word-wrap: break-word; + overflow-wrap : break-word; +} + +div.sphinxsidebar ul { + list-style: none; +} + +div.sphinxsidebar ul ul, +div.sphinxsidebar ul.want-points { + margin-left: 20px; + list-style: square; +} + +div.sphinxsidebar ul ul { + margin-top: 0; + margin-bottom: 0; +} + +div.sphinxsidebar form { + margin-top: 10px; +} + +div.sphinxsidebar input { + border: 1px solid #98dbcc; + font-family: sans-serif; + font-size: 1em; +} + +div.sphinxsidebar #searchbox form.search { + overflow: hidden; +} + +div.sphinxsidebar #searchbox input[type="text"] { + float: left; + width: 80%; + padding: 0.25em; + box-sizing: border-box; +} + +div.sphinxsidebar #searchbox input[type="submit"] { + float: left; + width: 20%; + border-left: none; + padding: 0.25em; + box-sizing: border-box; +} + + +img { + border: 0; + max-width: 100%; +} + +/* -- search page ----------------------------------------------------------- */ + +ul.search { + margin: 10px 0 0 20px; + padding: 0; +} + +ul.search li { + padding: 5px 0 5px 20px; + background-image: url(file.png); + background-repeat: no-repeat; + background-position: 0 7px; +} + +ul.search li a { + font-weight: bold; +} + +ul.search li div.context { + color: #888; + margin: 2px 0 0 30px; + text-align: left; +} + +ul.keywordmatches li.goodmatch a { + font-weight: bold; +} + +/* -- index page ------------------------------------------------------------ */ + +table.contentstable { + width: 90%; + margin-left: auto; + margin-right: auto; +} + +table.contentstable p.biglink { + line-height: 150%; +} + +a.biglink { + font-size: 1.3em; +} + +span.linkdescr { + font-style: italic; + padding-top: 5px; + font-size: 90%; +} + +/* -- general index --------------------------------------------------------- */ + +table.indextable { + width: 100%; +} + +table.indextable td { + text-align: left; + vertical-align: top; +} + +table.indextable ul { + margin-top: 0; + margin-bottom: 0; + list-style-type: none; +} + +table.indextable > tbody > tr > td > ul { + padding-left: 0em; +} + +table.indextable tr.pcap { + height: 10px; +} + +table.indextable tr.cap { + margin-top: 10px; + background-color: #f2f2f2; +} + +img.toggler { + margin-right: 3px; + margin-top: 3px; + cursor: pointer; +} + +div.modindex-jumpbox { + border-top: 1px solid #ddd; + border-bottom: 1px solid #ddd; + margin: 1em 0 1em 0; + padding: 0.4em; +} + +div.genindex-jumpbox { + border-top: 1px solid #ddd; + border-bottom: 1px solid #ddd; + margin: 1em 0 1em 0; + padding: 0.4em; +} + +/* -- domain module index --------------------------------------------------- */ + +table.modindextable td { + padding: 2px; + border-collapse: collapse; +} + +/* -- general body styles --------------------------------------------------- */ + +div.body { + min-width: 450px; + max-width: 800px; +} + +div.body p, div.body dd, div.body li, div.body blockquote { + -moz-hyphens: auto; + -ms-hyphens: auto; + -webkit-hyphens: auto; + hyphens: auto; +} + +a.headerlink { + visibility: hidden; +} + +a.brackets:before, +span.brackets > a:before{ + content: "["; +} + +a.brackets:after, +span.brackets > a:after { + content: "]"; +} + +h1:hover > a.headerlink, +h2:hover > a.headerlink, +h3:hover > a.headerlink, +h4:hover > a.headerlink, +h5:hover > a.headerlink, +h6:hover > a.headerlink, +dt:hover > a.headerlink, +caption:hover > a.headerlink, +p.caption:hover > a.headerlink, +div.code-block-caption:hover > a.headerlink { + visibility: visible; +} + +div.body p.caption { + text-align: inherit; +} + +div.body td { + text-align: left; +} + +.first { + margin-top: 0 !important; +} + +p.rubric { + margin-top: 30px; + font-weight: bold; +} + +img.align-left, figure.align-left, .figure.align-left, object.align-left { + clear: left; + float: left; + margin-right: 1em; +} + +img.align-right, figure.align-right, .figure.align-right, object.align-right { + clear: right; + float: right; + margin-left: 1em; +} + +img.align-center, figure.align-center, .figure.align-center, object.align-center { + display: block; + margin-left: auto; + margin-right: auto; +} + +img.align-default, figure.align-default, .figure.align-default { + display: block; + margin-left: auto; + margin-right: auto; +} + +.align-left { + text-align: left; +} + +.align-center { + text-align: center; +} + +.align-default { + text-align: center; +} + +.align-right { + text-align: right; +} + +/* -- sidebars -------------------------------------------------------------- */ + +div.sidebar, +aside.sidebar { + margin: 0 0 0.5em 1em; + border: 1px solid #ddb; + padding: 7px; + background-color: #ffe; + width: 40%; + float: right; + clear: right; + overflow-x: auto; +} + +p.sidebar-title { + font-weight: bold; +} + +div.admonition, div.topic, blockquote { + clear: left; +} + +/* -- topics ---------------------------------------------------------------- */ + +div.topic { + border: 1px solid #ccc; + padding: 7px; + margin: 10px 0 10px 0; +} + +p.topic-title { + font-size: 1.1em; + font-weight: bold; + margin-top: 10px; +} + +/* -- admonitions ----------------------------------------------------------- */ + +div.admonition { + margin-top: 10px; + margin-bottom: 10px; + padding: 7px; +} + +div.admonition dt { + font-weight: bold; +} + +p.admonition-title { + margin: 0px 10px 5px 0px; + font-weight: bold; +} + +div.body p.centered { + text-align: center; + margin-top: 25px; +} + +/* -- content of sidebars/topics/admonitions -------------------------------- */ + +div.sidebar > :last-child, +aside.sidebar > :last-child, +div.topic > :last-child, +div.admonition > :last-child { + margin-bottom: 0; +} + +div.sidebar::after, +aside.sidebar::after, +div.topic::after, +div.admonition::after, +blockquote::after { + display: block; + content: ''; + clear: both; +} + +/* -- tables ---------------------------------------------------------------- */ + +table.docutils { + margin-top: 10px; + margin-bottom: 10px; + border: 0; + border-collapse: collapse; +} + +table.align-center { + margin-left: auto; + margin-right: auto; +} + +table.align-default { + margin-left: auto; + margin-right: auto; +} + +table caption span.caption-number { + font-style: italic; +} + +table caption span.caption-text { +} + +table.docutils td, table.docutils th { + padding: 1px 8px 1px 5px; + border-top: 0; + border-left: 0; + border-right: 0; + border-bottom: 1px solid #aaa; +} + +table.footnote td, table.footnote th { + border: 0 !important; +} + +th { + text-align: left; + padding-right: 5px; +} + +table.citation { + border-left: solid 1px gray; + margin-left: 1px; +} + +table.citation td { + border-bottom: none; +} + +th > :first-child, +td > :first-child { + margin-top: 0px; +} + +th > :last-child, +td > :last-child { + margin-bottom: 0px; +} + +/* -- figures --------------------------------------------------------------- */ + +div.figure, figure { + margin: 0.5em; + padding: 0.5em; +} + +div.figure p.caption, figcaption { + padding: 0.3em; +} + +div.figure p.caption span.caption-number, +figcaption span.caption-number { + font-style: italic; +} + +div.figure p.caption span.caption-text, +figcaption span.caption-text { +} + +/* -- field list styles ----------------------------------------------------- */ + +table.field-list td, table.field-list th { + border: 0 !important; +} + +.field-list ul { + margin: 0; + padding-left: 1em; +} + +.field-list p { + margin: 0; +} + +.field-name { + -moz-hyphens: manual; + -ms-hyphens: manual; + -webkit-hyphens: manual; + hyphens: manual; +} + +/* -- hlist styles ---------------------------------------------------------- */ + +table.hlist { + margin: 1em 0; +} + +table.hlist td { + vertical-align: top; +} + + +/* -- other body styles ----------------------------------------------------- */ + +ol.arabic { + list-style: decimal; +} + +ol.loweralpha { + list-style: lower-alpha; +} + +ol.upperalpha { + list-style: upper-alpha; +} + +ol.lowerroman { + list-style: lower-roman; +} + +ol.upperroman { + list-style: upper-roman; +} + +:not(li) > ol > li:first-child > :first-child, +:not(li) > ul > li:first-child > :first-child { + margin-top: 0px; +} + +:not(li) > ol > li:last-child > :last-child, +:not(li) > ul > li:last-child > :last-child { + margin-bottom: 0px; +} + +ol.simple ol p, +ol.simple ul p, +ul.simple ol p, +ul.simple ul p { + margin-top: 0; +} + +ol.simple > li:not(:first-child) > p, +ul.simple > li:not(:first-child) > p { + margin-top: 0; +} + +ol.simple p, +ul.simple p { + margin-bottom: 0; +} + +dl.footnote > dt, +dl.citation > dt { + float: left; + margin-right: 0.5em; +} + +dl.footnote > dd, +dl.citation > dd { + margin-bottom: 0em; +} + +dl.footnote > dd:after, +dl.citation > dd:after { + content: ""; + clear: both; +} + +dl.field-list { + display: grid; + grid-template-columns: fit-content(30%) auto; +} + +dl.field-list > dt { + font-weight: bold; + word-break: break-word; + padding-left: 0.5em; + padding-right: 5px; +} + +dl.field-list > dt:after { + content: ":"; +} + +dl.field-list > dd { + padding-left: 0.5em; + margin-top: 0em; + margin-left: 0em; + margin-bottom: 0em; +} + +dl { + margin-bottom: 15px; +} + +dd > :first-child { + margin-top: 0px; +} + +dd ul, dd table { + margin-bottom: 10px; +} + +dd { + margin-top: 3px; + margin-bottom: 10px; + margin-left: 30px; +} + +dl > dd:last-child, +dl > dd:last-child > :last-child { + margin-bottom: 0; +} + +dt:target, span.highlighted { + background-color: #fbe54e; +} + +rect.highlighted { + fill: #fbe54e; +} + +dl.glossary dt { + font-weight: bold; + font-size: 1.1em; +} + +.optional { + font-size: 1.3em; +} + +.sig-paren { + font-size: larger; +} + +.versionmodified { + font-style: italic; +} + +.system-message { + background-color: #fda; + padding: 5px; + border: 3px solid red; +} + +.footnote:target { + background-color: #ffa; +} + +.line-block { + display: block; + margin-top: 1em; + margin-bottom: 1em; +} + +.line-block .line-block { + margin-top: 0; + margin-bottom: 0; + margin-left: 1.5em; +} + +.guilabel, .menuselection { + font-family: sans-serif; +} + +.accelerator { + text-decoration: underline; +} + +.classifier { + font-style: oblique; +} + +.classifier:before { + font-style: normal; + margin: 0.5em; + content: ":"; +} + +abbr, acronym { + border-bottom: dotted 1px; + cursor: help; +} + +/* -- code displays --------------------------------------------------------- */ + +pre { + overflow: auto; + overflow-y: hidden; /* fixes display issues on Chrome browsers */ +} + +pre, div[class*="highlight-"] { + clear: both; +} + +span.pre { + -moz-hyphens: none; + -ms-hyphens: none; + -webkit-hyphens: none; + hyphens: none; +} + +div[class*="highlight-"] { + margin: 1em 0; +} + +td.linenos pre { + border: 0; + background-color: transparent; + color: #aaa; +} + +table.highlighttable { + display: block; +} + +table.highlighttable tbody { + display: block; +} + +table.highlighttable tr { + display: flex; +} + +table.highlighttable td { + margin: 0; + padding: 0; +} + +table.highlighttable td.linenos { + padding-right: 0.5em; +} + +table.highlighttable td.code { + flex: 1; + overflow: hidden; +} + +.highlight .hll { + display: block; +} + +div.highlight pre, +table.highlighttable pre { + margin: 0; +} + +div.code-block-caption + div { + margin-top: 0; +} + +div.code-block-caption { + margin-top: 1em; + padding: 2px 5px; + font-size: small; +} + +div.code-block-caption code { + background-color: transparent; +} + +table.highlighttable td.linenos, +span.linenos, +div.doctest > div.highlight span.gp { /* gp: Generic.Prompt */ + user-select: none; +} + +div.code-block-caption span.caption-number { + padding: 0.1em 0.3em; + font-style: italic; +} + +div.code-block-caption span.caption-text { +} + +div.literal-block-wrapper { + margin: 1em 0; +} + +code.descname { + background-color: transparent; + font-weight: bold; + font-size: 1.2em; +} + +code.descclassname { + background-color: transparent; +} + +code.xref, a code { + background-color: transparent; + font-weight: bold; +} + +h1 code, h2 code, h3 code, h4 code, h5 code, h6 code { + background-color: transparent; +} + +.viewcode-link { + float: right; +} + +.viewcode-back { + float: right; + font-family: sans-serif; +} + +div.viewcode-block:target { + margin: -1px -10px; + padding: 0 10px; +} + +/* -- math display ---------------------------------------------------------- */ + +img.math { + vertical-align: middle; +} + +div.body div.math p { + text-align: center; +} + +span.eqno { + float: right; +} + +span.eqno a.headerlink { + position: absolute; + z-index: 1; +} + +div.math:hover a.headerlink { + visibility: visible; +} + +/* -- printout stylesheet --------------------------------------------------- */ + +@media print { + div.document, + div.documentwrapper, + div.bodywrapper { + margin: 0 !important; + width: 100%; + } + + div.sphinxsidebar, + div.related, + div.footer, + #top-link { + display: none; + } +} \ No newline at end of file diff --git a/docs/_build/html/_static/css/badge_only.css b/docs/_build/html/_static/css/badge_only.css new file mode 100644 index 00000000..e380325b --- /dev/null +++ b/docs/_build/html/_static/css/badge_only.css @@ -0,0 +1 @@ +.fa:before{-webkit-font-smoothing:antialiased}.clearfix{*zoom:1}.clearfix:after,.clearfix:before{display:table;content:""}.clearfix:after{clear:both}@font-face{font-family:FontAwesome;font-style:normal;font-weight:400;src:url(fonts/fontawesome-webfont.eot?674f50d287a8c48dc19ba404d20fe713?#iefix) format("embedded-opentype"),url(fonts/fontawesome-webfont.woff2?af7ae505a9eed503f8b8e6982036873e) format("woff2"),url(fonts/fontawesome-webfont.woff?fee66e712a8a08eef5805a46892932ad) format("woff"),url(fonts/fontawesome-webfont.ttf?b06871f281fee6b241d60582ae9369b9) format("truetype"),url(fonts/fontawesome-webfont.svg?912ec66d7572ff821749319396470bde#FontAwesome) format("svg")}.fa:before{font-family:FontAwesome;font-style:normal;font-weight:400;line-height:1}.fa:before,a .fa{text-decoration:inherit}.fa:before,a .fa,li .fa{display:inline-block}li .fa-large:before{width:1.875em}ul.fas{list-style-type:none;margin-left:2em;text-indent:-.8em}ul.fas li .fa{width:.8em}ul.fas li .fa-large:before{vertical-align:baseline}.fa-book:before,.icon-book:before{content:"\f02d"}.fa-caret-down:before,.icon-caret-down:before{content:"\f0d7"}.fa-caret-up:before,.icon-caret-up:before{content:"\f0d8"}.fa-caret-left:before,.icon-caret-left:before{content:"\f0d9"}.fa-caret-right:before,.icon-caret-right:before{content:"\f0da"}.rst-versions{position:fixed;bottom:0;left:0;width:300px;color:#fcfcfc;background:#1f1d1d;font-family:Lato,proxima-nova,Helvetica Neue,Arial,sans-serif;z-index:400}.rst-versions a{color:#2980b9;text-decoration:none}.rst-versions .rst-badge-small{display:none}.rst-versions .rst-current-version{padding:12px;background-color:#272525;display:block;text-align:right;font-size:90%;cursor:pointer;color:#27ae60}.rst-versions .rst-current-version:after{clear:both;content:"";display:block}.rst-versions .rst-current-version .fa{color:#fcfcfc}.rst-versions .rst-current-version .fa-book,.rst-versions .rst-current-version .icon-book{float:left}.rst-versions .rst-current-version.rst-out-of-date{background-color:#e74c3c;color:#fff}.rst-versions .rst-current-version.rst-active-old-version{background-color:#f1c40f;color:#000}.rst-versions.shift-up{height:auto;max-height:100%;overflow-y:scroll}.rst-versions.shift-up .rst-other-versions{display:block}.rst-versions .rst-other-versions{font-size:90%;padding:12px;color:grey;display:none}.rst-versions .rst-other-versions hr{display:block;height:1px;border:0;margin:20px 0;padding:0;border-top:1px solid #413d3d}.rst-versions .rst-other-versions dd{display:inline-block;margin:0}.rst-versions .rst-other-versions dd a{display:inline-block;padding:6px;color:#fcfcfc}.rst-versions.rst-badge{width:auto;bottom:20px;right:20px;left:auto;border:none;max-width:300px;max-height:90%}.rst-versions.rst-badge .fa-book,.rst-versions.rst-badge .icon-book{float:none;line-height:30px}.rst-versions.rst-badge.shift-up .rst-current-version{text-align:right}.rst-versions.rst-badge.shift-up .rst-current-version .fa-book,.rst-versions.rst-badge.shift-up .rst-current-version .icon-book{float:left}.rst-versions.rst-badge>.rst-current-version{width:auto;height:30px;line-height:30px;padding:0 6px;display:block;text-align:center}@media screen and (max-width:768px){.rst-versions{width:85%;display:none}.rst-versions.shift{display:block}} \ No newline at end of file diff --git a/docs/_build/html/_static/css/fonts/Roboto-Slab-Bold.woff b/docs/_build/html/_static/css/fonts/Roboto-Slab-Bold.woff new file mode 100644 index 00000000..6cb60000 Binary files /dev/null and b/docs/_build/html/_static/css/fonts/Roboto-Slab-Bold.woff differ diff --git a/docs/_build/html/_static/css/fonts/Roboto-Slab-Bold.woff2 b/docs/_build/html/_static/css/fonts/Roboto-Slab-Bold.woff2 new file mode 100644 index 00000000..7059e231 Binary files /dev/null and b/docs/_build/html/_static/css/fonts/Roboto-Slab-Bold.woff2 differ diff --git a/docs/_build/html/_static/css/fonts/Roboto-Slab-Regular.woff b/docs/_build/html/_static/css/fonts/Roboto-Slab-Regular.woff new file mode 100644 index 00000000..f815f63f Binary files /dev/null and b/docs/_build/html/_static/css/fonts/Roboto-Slab-Regular.woff differ diff --git a/docs/_build/html/_static/css/fonts/Roboto-Slab-Regular.woff2 b/docs/_build/html/_static/css/fonts/Roboto-Slab-Regular.woff2 new file mode 100644 index 00000000..f2c76e5b Binary files /dev/null and b/docs/_build/html/_static/css/fonts/Roboto-Slab-Regular.woff2 differ diff --git a/docs/_build/html/_static/css/fonts/fontawesome-webfont.eot b/docs/_build/html/_static/css/fonts/fontawesome-webfont.eot new file mode 100644 index 00000000..e9f60ca9 Binary files /dev/null and b/docs/_build/html/_static/css/fonts/fontawesome-webfont.eot differ diff --git a/docs/_build/html/_static/css/fonts/fontawesome-webfont.svg b/docs/_build/html/_static/css/fonts/fontawesome-webfont.svg new file mode 100644 index 00000000..855c845e --- /dev/null +++ b/docs/_build/html/_static/css/fonts/fontawesome-webfont.svg @@ -0,0 +1,2671 @@ + + + + +Created by FontForge 20120731 at Mon Oct 24 17:37:40 2016 + By ,,, +Copyright Dave Gandy 2016. All rights reserved. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/docs/_build/html/_static/css/fonts/fontawesome-webfont.ttf b/docs/_build/html/_static/css/fonts/fontawesome-webfont.ttf new file mode 100644 index 00000000..35acda2f Binary files /dev/null and b/docs/_build/html/_static/css/fonts/fontawesome-webfont.ttf differ diff --git a/docs/_build/html/_static/css/fonts/fontawesome-webfont.woff b/docs/_build/html/_static/css/fonts/fontawesome-webfont.woff new file mode 100644 index 00000000..400014a4 Binary files /dev/null and b/docs/_build/html/_static/css/fonts/fontawesome-webfont.woff differ diff --git a/docs/_build/html/_static/css/fonts/fontawesome-webfont.woff2 b/docs/_build/html/_static/css/fonts/fontawesome-webfont.woff2 new file mode 100644 index 00000000..4d13fc60 Binary files /dev/null and b/docs/_build/html/_static/css/fonts/fontawesome-webfont.woff2 differ diff --git a/docs/_build/html/_static/css/fonts/lato-bold-italic.woff b/docs/_build/html/_static/css/fonts/lato-bold-italic.woff new file mode 100644 index 00000000..88ad05b9 Binary files /dev/null and b/docs/_build/html/_static/css/fonts/lato-bold-italic.woff differ diff --git a/docs/_build/html/_static/css/fonts/lato-bold-italic.woff2 b/docs/_build/html/_static/css/fonts/lato-bold-italic.woff2 new file mode 100644 index 00000000..c4e3d804 Binary files /dev/null and b/docs/_build/html/_static/css/fonts/lato-bold-italic.woff2 differ diff --git a/docs/_build/html/_static/css/fonts/lato-bold.woff b/docs/_build/html/_static/css/fonts/lato-bold.woff new file mode 100644 index 00000000..c6dff51f Binary files /dev/null and b/docs/_build/html/_static/css/fonts/lato-bold.woff differ diff --git a/docs/_build/html/_static/css/fonts/lato-bold.woff2 b/docs/_build/html/_static/css/fonts/lato-bold.woff2 new file mode 100644 index 00000000..bb195043 Binary files /dev/null and b/docs/_build/html/_static/css/fonts/lato-bold.woff2 differ diff --git a/docs/_build/html/_static/css/fonts/lato-normal-italic.woff b/docs/_build/html/_static/css/fonts/lato-normal-italic.woff new file mode 100644 index 00000000..76114bc0 Binary files /dev/null and b/docs/_build/html/_static/css/fonts/lato-normal-italic.woff differ diff --git a/docs/_build/html/_static/css/fonts/lato-normal-italic.woff2 b/docs/_build/html/_static/css/fonts/lato-normal-italic.woff2 new file mode 100644 index 00000000..3404f37e Binary files /dev/null and b/docs/_build/html/_static/css/fonts/lato-normal-italic.woff2 differ diff --git a/docs/_build/html/_static/css/fonts/lato-normal.woff b/docs/_build/html/_static/css/fonts/lato-normal.woff new file mode 100644 index 00000000..ae1307ff Binary files /dev/null and b/docs/_build/html/_static/css/fonts/lato-normal.woff differ diff --git a/docs/_build/html/_static/css/fonts/lato-normal.woff2 b/docs/_build/html/_static/css/fonts/lato-normal.woff2 new file mode 100644 index 00000000..3bf98433 Binary files /dev/null and b/docs/_build/html/_static/css/fonts/lato-normal.woff2 differ diff --git a/docs/_build/html/_static/css/theme.css b/docs/_build/html/_static/css/theme.css new file mode 100644 index 00000000..0d9ae7e1 --- /dev/null +++ b/docs/_build/html/_static/css/theme.css @@ -0,0 +1,4 @@ +html{box-sizing:border-box}*,:after,:before{box-sizing:inherit}article,aside,details,figcaption,figure,footer,header,hgroup,nav,section{display:block}audio,canvas,video{display:inline-block;*display:inline;*zoom:1}[hidden],audio:not([controls]){display:none}*{-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box}html{font-size:100%;-webkit-text-size-adjust:100%;-ms-text-size-adjust:100%}body{margin:0}a:active,a:hover{outline:0}abbr[title]{border-bottom:1px dotted}b,strong{font-weight:700}blockquote{margin:0}dfn{font-style:italic}ins{background:#ff9;text-decoration:none}ins,mark{color:#000}mark{background:#ff0;font-style:italic;font-weight:700}.rst-content code,.rst-content tt,code,kbd,pre,samp{font-family:monospace,serif;_font-family:courier new,monospace;font-size:1em}pre{white-space:pre}q{quotes:none}q:after,q:before{content:"";content:none}small{font-size:85%}sub,sup{font-size:75%;line-height:0;position:relative;vertical-align:baseline}sup{top:-.5em}sub{bottom:-.25em}dl,ol,ul{margin:0;padding:0;list-style:none;list-style-image:none}li{list-style:none}dd{margin:0}img{border:0;-ms-interpolation-mode:bicubic;vertical-align:middle;max-width:100%}svg:not(:root){overflow:hidden}figure,form{margin:0}label{cursor:pointer}button,input,select,textarea{font-size:100%;margin:0;vertical-align:baseline;*vertical-align:middle}button,input{line-height:normal}button,input[type=button],input[type=reset],input[type=submit]{cursor:pointer;-webkit-appearance:button;*overflow:visible}button[disabled],input[disabled]{cursor:default}input[type=search]{-webkit-appearance:textfield;-moz-box-sizing:content-box;-webkit-box-sizing:content-box;box-sizing:content-box}textarea{resize:vertical}table{border-collapse:collapse;border-spacing:0}td{vertical-align:top}.chromeframe{margin:.2em 0;background:#ccc;color:#000;padding:.2em 0}.ir{display:block;border:0;text-indent:-999em;overflow:hidden;background-color:transparent;background-repeat:no-repeat;text-align:left;direction:ltr;*line-height:0}.ir br{display:none}.hidden{display:none!important;visibility:hidden}.visuallyhidden{border:0;clip:rect(0 0 0 0);height:1px;margin:-1px;overflow:hidden;padding:0;position:absolute;width:1px}.visuallyhidden.focusable:active,.visuallyhidden.focusable:focus{clip:auto;height:auto;margin:0;overflow:visible;position:static;width:auto}.invisible{visibility:hidden}.relative{position:relative}big,small{font-size:100%}@media print{body,html,section{background:none!important}*{box-shadow:none!important;text-shadow:none!important;filter:none!important;-ms-filter:none!important}a,a:visited{text-decoration:underline}.ir a:after,a[href^="#"]:after,a[href^="javascript:"]:after{content:""}blockquote,pre{page-break-inside:avoid}thead{display:table-header-group}img,tr{page-break-inside:avoid}img{max-width:100%!important}@page{margin:.5cm}.rst-content .toctree-wrapper>p.caption,h2,h3,p{orphans:3;widows:3}.rst-content .toctree-wrapper>p.caption,h2,h3{page-break-after:avoid}}.btn,.fa:before,.icon:before,.rst-content .admonition,.rst-content .admonition-title:before,.rst-content .admonition-todo,.rst-content .attention,.rst-content .caution,.rst-content .code-block-caption .headerlink:before,.rst-content .danger,.rst-content .eqno .headerlink:before,.rst-content .error,.rst-content .hint,.rst-content .important,.rst-content .note,.rst-content .seealso,.rst-content .tip,.rst-content .warning,.rst-content code.download span:first-child:before,.rst-content dl dt .headerlink:before,.rst-content h1 .headerlink:before,.rst-content h2 .headerlink:before,.rst-content h3 .headerlink:before,.rst-content h4 .headerlink:before,.rst-content h5 .headerlink:before,.rst-content h6 .headerlink:before,.rst-content p.caption .headerlink:before,.rst-content p .headerlink:before,.rst-content table>caption .headerlink:before,.rst-content tt.download span:first-child:before,.wy-alert,.wy-dropdown .caret:before,.wy-inline-validate.wy-inline-validate-danger .wy-input-context:before,.wy-inline-validate.wy-inline-validate-info .wy-input-context:before,.wy-inline-validate.wy-inline-validate-success .wy-input-context:before,.wy-inline-validate.wy-inline-validate-warning .wy-input-context:before,.wy-menu-vertical li.current>a,.wy-menu-vertical li.current>a button.toctree-expand:before,.wy-menu-vertical li.on a,.wy-menu-vertical li.on a button.toctree-expand:before,.wy-menu-vertical li button.toctree-expand:before,.wy-nav-top a,.wy-side-nav-search .wy-dropdown>a,.wy-side-nav-search>a,input[type=color],input[type=date],input[type=datetime-local],input[type=datetime],input[type=email],input[type=month],input[type=number],input[type=password],input[type=search],input[type=tel],input[type=text],input[type=time],input[type=url],input[type=week],select,textarea{-webkit-font-smoothing:antialiased}.clearfix{*zoom:1}.clearfix:after,.clearfix:before{display:table;content:""}.clearfix:after{clear:both}/*! + * Font Awesome 4.7.0 by @davegandy - http://fontawesome.io - @fontawesome + * License - http://fontawesome.io/license (Font: SIL OFL 1.1, CSS: MIT License) + */@font-face{font-family:FontAwesome;src:url(fonts/fontawesome-webfont.eot?674f50d287a8c48dc19ba404d20fe713);src:url(fonts/fontawesome-webfont.eot?674f50d287a8c48dc19ba404d20fe713?#iefix&v=4.7.0) format("embedded-opentype"),url(fonts/fontawesome-webfont.woff2?af7ae505a9eed503f8b8e6982036873e) format("woff2"),url(fonts/fontawesome-webfont.woff?fee66e712a8a08eef5805a46892932ad) format("woff"),url(fonts/fontawesome-webfont.ttf?b06871f281fee6b241d60582ae9369b9) format("truetype"),url(fonts/fontawesome-webfont.svg?912ec66d7572ff821749319396470bde#fontawesomeregular) format("svg");font-weight:400;font-style:normal}.fa,.icon,.rst-content .admonition-title,.rst-content .code-block-caption .headerlink,.rst-content .eqno .headerlink,.rst-content code.download span:first-child,.rst-content dl dt .headerlink,.rst-content h1 .headerlink,.rst-content h2 .headerlink,.rst-content h3 .headerlink,.rst-content h4 .headerlink,.rst-content h5 .headerlink,.rst-content h6 .headerlink,.rst-content p.caption .headerlink,.rst-content p .headerlink,.rst-content table>caption .headerlink,.rst-content tt.download span:first-child,.wy-menu-vertical li.current>a button.toctree-expand,.wy-menu-vertical li.on a button.toctree-expand,.wy-menu-vertical li button.toctree-expand{display:inline-block;font:normal normal normal 14px/1 FontAwesome;font-size:inherit;text-rendering:auto;-webkit-font-smoothing:antialiased;-moz-osx-font-smoothing:grayscale}.fa-lg{font-size:1.33333em;line-height:.75em;vertical-align:-15%}.fa-2x{font-size:2em}.fa-3x{font-size:3em}.fa-4x{font-size:4em}.fa-5x{font-size:5em}.fa-fw{width:1.28571em;text-align:center}.fa-ul{padding-left:0;margin-left:2.14286em;list-style-type:none}.fa-ul>li{position:relative}.fa-li{position:absolute;left:-2.14286em;width:2.14286em;top:.14286em;text-align:center}.fa-li.fa-lg{left:-1.85714em}.fa-border{padding:.2em .25em .15em;border:.08em solid #eee;border-radius:.1em}.fa-pull-left{float:left}.fa-pull-right{float:right}.fa-pull-left.icon,.fa.fa-pull-left,.rst-content .code-block-caption .fa-pull-left.headerlink,.rst-content .eqno .fa-pull-left.headerlink,.rst-content .fa-pull-left.admonition-title,.rst-content code.download span.fa-pull-left:first-child,.rst-content dl dt .fa-pull-left.headerlink,.rst-content h1 .fa-pull-left.headerlink,.rst-content h2 .fa-pull-left.headerlink,.rst-content h3 .fa-pull-left.headerlink,.rst-content h4 .fa-pull-left.headerlink,.rst-content h5 .fa-pull-left.headerlink,.rst-content h6 .fa-pull-left.headerlink,.rst-content p .fa-pull-left.headerlink,.rst-content table>caption .fa-pull-left.headerlink,.rst-content tt.download span.fa-pull-left:first-child,.wy-menu-vertical li.current>a button.fa-pull-left.toctree-expand,.wy-menu-vertical li.on a button.fa-pull-left.toctree-expand,.wy-menu-vertical li button.fa-pull-left.toctree-expand{margin-right:.3em}.fa-pull-right.icon,.fa.fa-pull-right,.rst-content .code-block-caption .fa-pull-right.headerlink,.rst-content .eqno .fa-pull-right.headerlink,.rst-content .fa-pull-right.admonition-title,.rst-content code.download span.fa-pull-right:first-child,.rst-content dl dt .fa-pull-right.headerlink,.rst-content h1 .fa-pull-right.headerlink,.rst-content h2 .fa-pull-right.headerlink,.rst-content h3 .fa-pull-right.headerlink,.rst-content h4 .fa-pull-right.headerlink,.rst-content h5 .fa-pull-right.headerlink,.rst-content h6 .fa-pull-right.headerlink,.rst-content p .fa-pull-right.headerlink,.rst-content table>caption .fa-pull-right.headerlink,.rst-content tt.download span.fa-pull-right:first-child,.wy-menu-vertical li.current>a button.fa-pull-right.toctree-expand,.wy-menu-vertical li.on a button.fa-pull-right.toctree-expand,.wy-menu-vertical li button.fa-pull-right.toctree-expand{margin-left:.3em}.pull-right{float:right}.pull-left{float:left}.fa.pull-left,.pull-left.icon,.rst-content .code-block-caption .pull-left.headerlink,.rst-content .eqno .pull-left.headerlink,.rst-content .pull-left.admonition-title,.rst-content code.download span.pull-left:first-child,.rst-content dl dt .pull-left.headerlink,.rst-content h1 .pull-left.headerlink,.rst-content h2 .pull-left.headerlink,.rst-content h3 .pull-left.headerlink,.rst-content h4 .pull-left.headerlink,.rst-content h5 .pull-left.headerlink,.rst-content h6 .pull-left.headerlink,.rst-content p .pull-left.headerlink,.rst-content table>caption .pull-left.headerlink,.rst-content tt.download span.pull-left:first-child,.wy-menu-vertical li.current>a button.pull-left.toctree-expand,.wy-menu-vertical li.on a button.pull-left.toctree-expand,.wy-menu-vertical li button.pull-left.toctree-expand{margin-right:.3em}.fa.pull-right,.pull-right.icon,.rst-content .code-block-caption .pull-right.headerlink,.rst-content .eqno .pull-right.headerlink,.rst-content .pull-right.admonition-title,.rst-content code.download span.pull-right:first-child,.rst-content dl dt .pull-right.headerlink,.rst-content h1 .pull-right.headerlink,.rst-content h2 .pull-right.headerlink,.rst-content h3 .pull-right.headerlink,.rst-content h4 .pull-right.headerlink,.rst-content h5 .pull-right.headerlink,.rst-content h6 .pull-right.headerlink,.rst-content p .pull-right.headerlink,.rst-content table>caption .pull-right.headerlink,.rst-content tt.download span.pull-right:first-child,.wy-menu-vertical li.current>a button.pull-right.toctree-expand,.wy-menu-vertical li.on a button.pull-right.toctree-expand,.wy-menu-vertical li button.pull-right.toctree-expand{margin-left:.3em}.fa-spin{-webkit-animation:fa-spin 2s linear infinite;animation:fa-spin 2s linear infinite}.fa-pulse{-webkit-animation:fa-spin 1s steps(8) infinite;animation:fa-spin 1s steps(8) infinite}@-webkit-keyframes fa-spin{0%{-webkit-transform:rotate(0deg);transform:rotate(0deg)}to{-webkit-transform:rotate(359deg);transform:rotate(359deg)}}@keyframes fa-spin{0%{-webkit-transform:rotate(0deg);transform:rotate(0deg)}to{-webkit-transform:rotate(359deg);transform:rotate(359deg)}}.fa-rotate-90{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=1)";-webkit-transform:rotate(90deg);-ms-transform:rotate(90deg);transform:rotate(90deg)}.fa-rotate-180{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=2)";-webkit-transform:rotate(180deg);-ms-transform:rotate(180deg);transform:rotate(180deg)}.fa-rotate-270{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=3)";-webkit-transform:rotate(270deg);-ms-transform:rotate(270deg);transform:rotate(270deg)}.fa-flip-horizontal{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=0, mirror=1)";-webkit-transform:scaleX(-1);-ms-transform:scaleX(-1);transform:scaleX(-1)}.fa-flip-vertical{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=2, mirror=1)";-webkit-transform:scaleY(-1);-ms-transform:scaleY(-1);transform:scaleY(-1)}:root .fa-flip-horizontal,:root .fa-flip-vertical,:root .fa-rotate-90,:root .fa-rotate-180,:root .fa-rotate-270{filter:none}.fa-stack{position:relative;display:inline-block;width:2em;height:2em;line-height:2em;vertical-align:middle}.fa-stack-1x,.fa-stack-2x{position:absolute;left:0;width:100%;text-align:center}.fa-stack-1x{line-height:inherit}.fa-stack-2x{font-size:2em}.fa-inverse{color:#fff}.fa-glass:before{content:""}.fa-music:before{content:""}.fa-search:before,.icon-search:before{content:""}.fa-envelope-o:before{content:""}.fa-heart:before{content:""}.fa-star:before{content:""}.fa-star-o:before{content:""}.fa-user:before{content:""}.fa-film:before{content:""}.fa-th-large:before{content:""}.fa-th:before{content:""}.fa-th-list:before{content:""}.fa-check:before{content:""}.fa-close:before,.fa-remove:before,.fa-times:before{content:""}.fa-search-plus:before{content:""}.fa-search-minus:before{content:""}.fa-power-off:before{content:""}.fa-signal:before{content:""}.fa-cog:before,.fa-gear:before{content:""}.fa-trash-o:before{content:""}.fa-home:before,.icon-home:before{content:""}.fa-file-o:before{content:""}.fa-clock-o:before{content:""}.fa-road:before{content:""}.fa-download:before,.rst-content code.download span:first-child:before,.rst-content tt.download span:first-child:before{content:""}.fa-arrow-circle-o-down:before{content:""}.fa-arrow-circle-o-up:before{content:""}.fa-inbox:before{content:""}.fa-play-circle-o:before{content:""}.fa-repeat:before,.fa-rotate-right:before{content:""}.fa-refresh:before{content:""}.fa-list-alt:before{content:""}.fa-lock:before{content:""}.fa-flag:before{content:""}.fa-headphones:before{content:""}.fa-volume-off:before{content:""}.fa-volume-down:before{content:""}.fa-volume-up:before{content:""}.fa-qrcode:before{content:""}.fa-barcode:before{content:""}.fa-tag:before{content:""}.fa-tags:before{content:""}.fa-book:before,.icon-book:before{content:""}.fa-bookmark:before{content:""}.fa-print:before{content:""}.fa-camera:before{content:""}.fa-font:before{content:""}.fa-bold:before{content:""}.fa-italic:before{content:""}.fa-text-height:before{content:""}.fa-text-width:before{content:""}.fa-align-left:before{content:""}.fa-align-center:before{content:""}.fa-align-right:before{content:""}.fa-align-justify:before{content:""}.fa-list:before{content:""}.fa-dedent:before,.fa-outdent:before{content:""}.fa-indent:before{content:""}.fa-video-camera:before{content:""}.fa-image:before,.fa-photo:before,.fa-picture-o:before{content:""}.fa-pencil:before{content:""}.fa-map-marker:before{content:""}.fa-adjust:before{content:""}.fa-tint:before{content:""}.fa-edit:before,.fa-pencil-square-o:before{content:""}.fa-share-square-o:before{content:""}.fa-check-square-o:before{content:""}.fa-arrows:before{content:""}.fa-step-backward:before{content:""}.fa-fast-backward:before{content:""}.fa-backward:before{content:""}.fa-play:before{content:""}.fa-pause:before{content:""}.fa-stop:before{content:""}.fa-forward:before{content:""}.fa-fast-forward:before{content:""}.fa-step-forward:before{content:""}.fa-eject:before{content:""}.fa-chevron-left:before{content:""}.fa-chevron-right:before{content:""}.fa-plus-circle:before{content:""}.fa-minus-circle:before{content:""}.fa-times-circle:before,.wy-inline-validate.wy-inline-validate-danger .wy-input-context:before{content:""}.fa-check-circle:before,.wy-inline-validate.wy-inline-validate-success .wy-input-context:before{content:""}.fa-question-circle:before{content:""}.fa-info-circle:before{content:""}.fa-crosshairs:before{content:""}.fa-times-circle-o:before{content:""}.fa-check-circle-o:before{content:""}.fa-ban:before{content:""}.fa-arrow-left:before{content:""}.fa-arrow-right:before{content:""}.fa-arrow-up:before{content:""}.fa-arrow-down:before{content:""}.fa-mail-forward:before,.fa-share:before{content:""}.fa-expand:before{content:""}.fa-compress:before{content:""}.fa-plus:before{content:""}.fa-minus:before{content:""}.fa-asterisk:before{content:""}.fa-exclamation-circle:before,.rst-content .admonition-title:before,.wy-inline-validate.wy-inline-validate-info .wy-input-context:before,.wy-inline-validate.wy-inline-validate-warning .wy-input-context:before{content:""}.fa-gift:before{content:""}.fa-leaf:before{content:""}.fa-fire:before,.icon-fire:before{content:""}.fa-eye:before{content:""}.fa-eye-slash:before{content:""}.fa-exclamation-triangle:before,.fa-warning:before{content:""}.fa-plane:before{content:""}.fa-calendar:before{content:""}.fa-random:before{content:""}.fa-comment:before{content:""}.fa-magnet:before{content:""}.fa-chevron-up:before{content:""}.fa-chevron-down:before{content:""}.fa-retweet:before{content:""}.fa-shopping-cart:before{content:""}.fa-folder:before{content:""}.fa-folder-open:before{content:""}.fa-arrows-v:before{content:""}.fa-arrows-h:before{content:""}.fa-bar-chart-o:before,.fa-bar-chart:before{content:""}.fa-twitter-square:before{content:""}.fa-facebook-square:before{content:""}.fa-camera-retro:before{content:""}.fa-key:before{content:""}.fa-cogs:before,.fa-gears:before{content:""}.fa-comments:before{content:""}.fa-thumbs-o-up:before{content:""}.fa-thumbs-o-down:before{content:""}.fa-star-half:before{content:""}.fa-heart-o:before{content:""}.fa-sign-out:before{content:""}.fa-linkedin-square:before{content:""}.fa-thumb-tack:before{content:""}.fa-external-link:before{content:""}.fa-sign-in:before{content:""}.fa-trophy:before{content:""}.fa-github-square:before{content:""}.fa-upload:before{content:""}.fa-lemon-o:before{content:""}.fa-phone:before{content:""}.fa-square-o:before{content:""}.fa-bookmark-o:before{content:""}.fa-phone-square:before{content:""}.fa-twitter:before{content:""}.fa-facebook-f:before,.fa-facebook:before{content:""}.fa-github:before,.icon-github:before{content:""}.fa-unlock:before{content:""}.fa-credit-card:before{content:""}.fa-feed:before,.fa-rss:before{content:""}.fa-hdd-o:before{content:""}.fa-bullhorn:before{content:""}.fa-bell:before{content:""}.fa-certificate:before{content:""}.fa-hand-o-right:before{content:""}.fa-hand-o-left:before{content:""}.fa-hand-o-up:before{content:""}.fa-hand-o-down:before{content:""}.fa-arrow-circle-left:before,.icon-circle-arrow-left:before{content:""}.fa-arrow-circle-right:before,.icon-circle-arrow-right:before{content:""}.fa-arrow-circle-up:before{content:""}.fa-arrow-circle-down:before{content:""}.fa-globe:before{content:""}.fa-wrench:before{content:""}.fa-tasks:before{content:""}.fa-filter:before{content:""}.fa-briefcase:before{content:""}.fa-arrows-alt:before{content:""}.fa-group:before,.fa-users:before{content:""}.fa-chain:before,.fa-link:before,.icon-link:before{content:""}.fa-cloud:before{content:""}.fa-flask:before{content:""}.fa-cut:before,.fa-scissors:before{content:""}.fa-copy:before,.fa-files-o:before{content:""}.fa-paperclip:before{content:""}.fa-floppy-o:before,.fa-save:before{content:""}.fa-square:before{content:""}.fa-bars:before,.fa-navicon:before,.fa-reorder:before{content:""}.fa-list-ul:before{content:""}.fa-list-ol:before{content:""}.fa-strikethrough:before{content:""}.fa-underline:before{content:""}.fa-table:before{content:""}.fa-magic:before{content:""}.fa-truck:before{content:""}.fa-pinterest:before{content:""}.fa-pinterest-square:before{content:""}.fa-google-plus-square:before{content:""}.fa-google-plus:before{content:""}.fa-money:before{content:""}.fa-caret-down:before,.icon-caret-down:before,.wy-dropdown .caret:before{content:""}.fa-caret-up:before{content:""}.fa-caret-left:before{content:""}.fa-caret-right:before{content:""}.fa-columns:before{content:""}.fa-sort:before,.fa-unsorted:before{content:""}.fa-sort-desc:before,.fa-sort-down:before{content:""}.fa-sort-asc:before,.fa-sort-up:before{content:""}.fa-envelope:before{content:""}.fa-linkedin:before{content:""}.fa-rotate-left:before,.fa-undo:before{content:""}.fa-gavel:before,.fa-legal:before{content:""}.fa-dashboard:before,.fa-tachometer:before{content:""}.fa-comment-o:before{content:""}.fa-comments-o:before{content:""}.fa-bolt:before,.fa-flash:before{content:""}.fa-sitemap:before{content:""}.fa-umbrella:before{content:""}.fa-clipboard:before,.fa-paste:before{content:""}.fa-lightbulb-o:before{content:""}.fa-exchange:before{content:""}.fa-cloud-download:before{content:""}.fa-cloud-upload:before{content:""}.fa-user-md:before{content:""}.fa-stethoscope:before{content:""}.fa-suitcase:before{content:""}.fa-bell-o:before{content:""}.fa-coffee:before{content:""}.fa-cutlery:before{content:""}.fa-file-text-o:before{content:""}.fa-building-o:before{content:""}.fa-hospital-o:before{content:""}.fa-ambulance:before{content:""}.fa-medkit:before{content:""}.fa-fighter-jet:before{content:""}.fa-beer:before{content:""}.fa-h-square:before{content:""}.fa-plus-square:before{content:""}.fa-angle-double-left:before{content:""}.fa-angle-double-right:before{content:""}.fa-angle-double-up:before{content:""}.fa-angle-double-down:before{content:""}.fa-angle-left:before{content:""}.fa-angle-right:before{content:""}.fa-angle-up:before{content:""}.fa-angle-down:before{content:""}.fa-desktop:before{content:""}.fa-laptop:before{content:""}.fa-tablet:before{content:""}.fa-mobile-phone:before,.fa-mobile:before{content:""}.fa-circle-o:before{content:""}.fa-quote-left:before{content:""}.fa-quote-right:before{content:""}.fa-spinner:before{content:""}.fa-circle:before{content:""}.fa-mail-reply:before,.fa-reply:before{content:""}.fa-github-alt:before{content:""}.fa-folder-o:before{content:""}.fa-folder-open-o:before{content:""}.fa-smile-o:before{content:""}.fa-frown-o:before{content:""}.fa-meh-o:before{content:""}.fa-gamepad:before{content:""}.fa-keyboard-o:before{content:""}.fa-flag-o:before{content:""}.fa-flag-checkered:before{content:""}.fa-terminal:before{content:""}.fa-code:before{content:""}.fa-mail-reply-all:before,.fa-reply-all:before{content:""}.fa-star-half-empty:before,.fa-star-half-full:before,.fa-star-half-o:before{content:""}.fa-location-arrow:before{content:""}.fa-crop:before{content:""}.fa-code-fork:before{content:""}.fa-chain-broken:before,.fa-unlink:before{content:""}.fa-question:before{content:""}.fa-info:before{content:""}.fa-exclamation:before{content:""}.fa-superscript:before{content:""}.fa-subscript:before{content:""}.fa-eraser:before{content:""}.fa-puzzle-piece:before{content:""}.fa-microphone:before{content:""}.fa-microphone-slash:before{content:""}.fa-shield:before{content:""}.fa-calendar-o:before{content:""}.fa-fire-extinguisher:before{content:""}.fa-rocket:before{content:""}.fa-maxcdn:before{content:""}.fa-chevron-circle-left:before{content:""}.fa-chevron-circle-right:before{content:""}.fa-chevron-circle-up:before{content:""}.fa-chevron-circle-down:before{content:""}.fa-html5:before{content:""}.fa-css3:before{content:""}.fa-anchor:before{content:""}.fa-unlock-alt:before{content:""}.fa-bullseye:before{content:""}.fa-ellipsis-h:before{content:""}.fa-ellipsis-v:before{content:""}.fa-rss-square:before{content:""}.fa-play-circle:before{content:""}.fa-ticket:before{content:""}.fa-minus-square:before{content:""}.fa-minus-square-o:before,.wy-menu-vertical li.current>a button.toctree-expand:before,.wy-menu-vertical li.on a button.toctree-expand:before{content:""}.fa-level-up:before{content:""}.fa-level-down:before{content:""}.fa-check-square:before{content:""}.fa-pencil-square:before{content:""}.fa-external-link-square:before{content:""}.fa-share-square:before{content:""}.fa-compass:before{content:""}.fa-caret-square-o-down:before,.fa-toggle-down:before{content:""}.fa-caret-square-o-up:before,.fa-toggle-up:before{content:""}.fa-caret-square-o-right:before,.fa-toggle-right:before{content:""}.fa-eur:before,.fa-euro:before{content:""}.fa-gbp:before{content:""}.fa-dollar:before,.fa-usd:before{content:""}.fa-inr:before,.fa-rupee:before{content:""}.fa-cny:before,.fa-jpy:before,.fa-rmb:before,.fa-yen:before{content:""}.fa-rouble:before,.fa-rub:before,.fa-ruble:before{content:""}.fa-krw:before,.fa-won:before{content:""}.fa-bitcoin:before,.fa-btc:before{content:""}.fa-file:before{content:""}.fa-file-text:before{content:""}.fa-sort-alpha-asc:before{content:""}.fa-sort-alpha-desc:before{content:""}.fa-sort-amount-asc:before{content:""}.fa-sort-amount-desc:before{content:""}.fa-sort-numeric-asc:before{content:""}.fa-sort-numeric-desc:before{content:""}.fa-thumbs-up:before{content:""}.fa-thumbs-down:before{content:""}.fa-youtube-square:before{content:""}.fa-youtube:before{content:""}.fa-xing:before{content:""}.fa-xing-square:before{content:""}.fa-youtube-play:before{content:""}.fa-dropbox:before{content:""}.fa-stack-overflow:before{content:""}.fa-instagram:before{content:""}.fa-flickr:before{content:""}.fa-adn:before{content:""}.fa-bitbucket:before,.icon-bitbucket:before{content:""}.fa-bitbucket-square:before{content:""}.fa-tumblr:before{content:""}.fa-tumblr-square:before{content:""}.fa-long-arrow-down:before{content:""}.fa-long-arrow-up:before{content:""}.fa-long-arrow-left:before{content:""}.fa-long-arrow-right:before{content:""}.fa-apple:before{content:""}.fa-windows:before{content:""}.fa-android:before{content:""}.fa-linux:before{content:""}.fa-dribbble:before{content:""}.fa-skype:before{content:""}.fa-foursquare:before{content:""}.fa-trello:before{content:""}.fa-female:before{content:""}.fa-male:before{content:""}.fa-gittip:before,.fa-gratipay:before{content:""}.fa-sun-o:before{content:""}.fa-moon-o:before{content:""}.fa-archive:before{content:""}.fa-bug:before{content:""}.fa-vk:before{content:""}.fa-weibo:before{content:""}.fa-renren:before{content:""}.fa-pagelines:before{content:""}.fa-stack-exchange:before{content:""}.fa-arrow-circle-o-right:before{content:""}.fa-arrow-circle-o-left:before{content:""}.fa-caret-square-o-left:before,.fa-toggle-left:before{content:""}.fa-dot-circle-o:before{content:""}.fa-wheelchair:before{content:""}.fa-vimeo-square:before{content:""}.fa-try:before,.fa-turkish-lira:before{content:""}.fa-plus-square-o:before,.wy-menu-vertical li button.toctree-expand:before{content:""}.fa-space-shuttle:before{content:""}.fa-slack:before{content:""}.fa-envelope-square:before{content:""}.fa-wordpress:before{content:""}.fa-openid:before{content:""}.fa-bank:before,.fa-institution:before,.fa-university:before{content:""}.fa-graduation-cap:before,.fa-mortar-board:before{content:""}.fa-yahoo:before{content:""}.fa-google:before{content:""}.fa-reddit:before{content:""}.fa-reddit-square:before{content:""}.fa-stumbleupon-circle:before{content:""}.fa-stumbleupon:before{content:""}.fa-delicious:before{content:""}.fa-digg:before{content:""}.fa-pied-piper-pp:before{content:""}.fa-pied-piper-alt:before{content:""}.fa-drupal:before{content:""}.fa-joomla:before{content:""}.fa-language:before{content:""}.fa-fax:before{content:""}.fa-building:before{content:""}.fa-child:before{content:""}.fa-paw:before{content:""}.fa-spoon:before{content:""}.fa-cube:before{content:""}.fa-cubes:before{content:""}.fa-behance:before{content:""}.fa-behance-square:before{content:""}.fa-steam:before{content:""}.fa-steam-square:before{content:""}.fa-recycle:before{content:""}.fa-automobile:before,.fa-car:before{content:""}.fa-cab:before,.fa-taxi:before{content:""}.fa-tree:before{content:""}.fa-spotify:before{content:""}.fa-deviantart:before{content:""}.fa-soundcloud:before{content:""}.fa-database:before{content:""}.fa-file-pdf-o:before{content:""}.fa-file-word-o:before{content:""}.fa-file-excel-o:before{content:""}.fa-file-powerpoint-o:before{content:""}.fa-file-image-o:before,.fa-file-photo-o:before,.fa-file-picture-o:before{content:""}.fa-file-archive-o:before,.fa-file-zip-o:before{content:""}.fa-file-audio-o:before,.fa-file-sound-o:before{content:""}.fa-file-movie-o:before,.fa-file-video-o:before{content:""}.fa-file-code-o:before{content:""}.fa-vine:before{content:""}.fa-codepen:before{content:""}.fa-jsfiddle:before{content:""}.fa-life-bouy:before,.fa-life-buoy:before,.fa-life-ring:before,.fa-life-saver:before,.fa-support:before{content:""}.fa-circle-o-notch:before{content:""}.fa-ra:before,.fa-rebel:before,.fa-resistance:before{content:""}.fa-empire:before,.fa-ge:before{content:""}.fa-git-square:before{content:""}.fa-git:before{content:""}.fa-hacker-news:before,.fa-y-combinator-square:before,.fa-yc-square:before{content:""}.fa-tencent-weibo:before{content:""}.fa-qq:before{content:""}.fa-wechat:before,.fa-weixin:before{content:""}.fa-paper-plane:before,.fa-send:before{content:""}.fa-paper-plane-o:before,.fa-send-o:before{content:""}.fa-history:before{content:""}.fa-circle-thin:before{content:""}.fa-header:before{content:""}.fa-paragraph:before{content:""}.fa-sliders:before{content:""}.fa-share-alt:before{content:""}.fa-share-alt-square:before{content:""}.fa-bomb:before{content:""}.fa-futbol-o:before,.fa-soccer-ball-o:before{content:""}.fa-tty:before{content:""}.fa-binoculars:before{content:""}.fa-plug:before{content:""}.fa-slideshare:before{content:""}.fa-twitch:before{content:""}.fa-yelp:before{content:""}.fa-newspaper-o:before{content:""}.fa-wifi:before{content:""}.fa-calculator:before{content:""}.fa-paypal:before{content:""}.fa-google-wallet:before{content:""}.fa-cc-visa:before{content:""}.fa-cc-mastercard:before{content:""}.fa-cc-discover:before{content:""}.fa-cc-amex:before{content:""}.fa-cc-paypal:before{content:""}.fa-cc-stripe:before{content:""}.fa-bell-slash:before{content:""}.fa-bell-slash-o:before{content:""}.fa-trash:before{content:""}.fa-copyright:before{content:""}.fa-at:before{content:""}.fa-eyedropper:before{content:""}.fa-paint-brush:before{content:""}.fa-birthday-cake:before{content:""}.fa-area-chart:before{content:""}.fa-pie-chart:before{content:""}.fa-line-chart:before{content:""}.fa-lastfm:before{content:""}.fa-lastfm-square:before{content:""}.fa-toggle-off:before{content:""}.fa-toggle-on:before{content:""}.fa-bicycle:before{content:""}.fa-bus:before{content:""}.fa-ioxhost:before{content:""}.fa-angellist:before{content:""}.fa-cc:before{content:""}.fa-ils:before,.fa-shekel:before,.fa-sheqel:before{content:""}.fa-meanpath:before{content:""}.fa-buysellads:before{content:""}.fa-connectdevelop:before{content:""}.fa-dashcube:before{content:""}.fa-forumbee:before{content:""}.fa-leanpub:before{content:""}.fa-sellsy:before{content:""}.fa-shirtsinbulk:before{content:""}.fa-simplybuilt:before{content:""}.fa-skyatlas:before{content:""}.fa-cart-plus:before{content:""}.fa-cart-arrow-down:before{content:""}.fa-diamond:before{content:""}.fa-ship:before{content:""}.fa-user-secret:before{content:""}.fa-motorcycle:before{content:""}.fa-street-view:before{content:""}.fa-heartbeat:before{content:""}.fa-venus:before{content:""}.fa-mars:before{content:""}.fa-mercury:before{content:""}.fa-intersex:before,.fa-transgender:before{content:""}.fa-transgender-alt:before{content:""}.fa-venus-double:before{content:""}.fa-mars-double:before{content:""}.fa-venus-mars:before{content:""}.fa-mars-stroke:before{content:""}.fa-mars-stroke-v:before{content:""}.fa-mars-stroke-h:before{content:""}.fa-neuter:before{content:""}.fa-genderless:before{content:""}.fa-facebook-official:before{content:""}.fa-pinterest-p:before{content:""}.fa-whatsapp:before{content:""}.fa-server:before{content:""}.fa-user-plus:before{content:""}.fa-user-times:before{content:""}.fa-bed:before,.fa-hotel:before{content:""}.fa-viacoin:before{content:""}.fa-train:before{content:""}.fa-subway:before{content:""}.fa-medium:before{content:""}.fa-y-combinator:before,.fa-yc:before{content:""}.fa-optin-monster:before{content:""}.fa-opencart:before{content:""}.fa-expeditedssl:before{content:""}.fa-battery-4:before,.fa-battery-full:before,.fa-battery:before{content:""}.fa-battery-3:before,.fa-battery-three-quarters:before{content:""}.fa-battery-2:before,.fa-battery-half:before{content:""}.fa-battery-1:before,.fa-battery-quarter:before{content:""}.fa-battery-0:before,.fa-battery-empty:before{content:""}.fa-mouse-pointer:before{content:""}.fa-i-cursor:before{content:""}.fa-object-group:before{content:""}.fa-object-ungroup:before{content:""}.fa-sticky-note:before{content:""}.fa-sticky-note-o:before{content:""}.fa-cc-jcb:before{content:""}.fa-cc-diners-club:before{content:""}.fa-clone:before{content:""}.fa-balance-scale:before{content:""}.fa-hourglass-o:before{content:""}.fa-hourglass-1:before,.fa-hourglass-start:before{content:""}.fa-hourglass-2:before,.fa-hourglass-half:before{content:""}.fa-hourglass-3:before,.fa-hourglass-end:before{content:""}.fa-hourglass:before{content:""}.fa-hand-grab-o:before,.fa-hand-rock-o:before{content:""}.fa-hand-paper-o:before,.fa-hand-stop-o:before{content:""}.fa-hand-scissors-o:before{content:""}.fa-hand-lizard-o:before{content:""}.fa-hand-spock-o:before{content:""}.fa-hand-pointer-o:before{content:""}.fa-hand-peace-o:before{content:""}.fa-trademark:before{content:""}.fa-registered:before{content:""}.fa-creative-commons:before{content:""}.fa-gg:before{content:""}.fa-gg-circle:before{content:""}.fa-tripadvisor:before{content:""}.fa-odnoklassniki:before{content:""}.fa-odnoklassniki-square:before{content:""}.fa-get-pocket:before{content:""}.fa-wikipedia-w:before{content:""}.fa-safari:before{content:""}.fa-chrome:before{content:""}.fa-firefox:before{content:""}.fa-opera:before{content:""}.fa-internet-explorer:before{content:""}.fa-television:before,.fa-tv:before{content:""}.fa-contao:before{content:""}.fa-500px:before{content:""}.fa-amazon:before{content:""}.fa-calendar-plus-o:before{content:""}.fa-calendar-minus-o:before{content:""}.fa-calendar-times-o:before{content:""}.fa-calendar-check-o:before{content:""}.fa-industry:before{content:""}.fa-map-pin:before{content:""}.fa-map-signs:before{content:""}.fa-map-o:before{content:""}.fa-map:before{content:""}.fa-commenting:before{content:""}.fa-commenting-o:before{content:""}.fa-houzz:before{content:""}.fa-vimeo:before{content:""}.fa-black-tie:before{content:""}.fa-fonticons:before{content:""}.fa-reddit-alien:before{content:""}.fa-edge:before{content:""}.fa-credit-card-alt:before{content:""}.fa-codiepie:before{content:""}.fa-modx:before{content:""}.fa-fort-awesome:before{content:""}.fa-usb:before{content:""}.fa-product-hunt:before{content:""}.fa-mixcloud:before{content:""}.fa-scribd:before{content:""}.fa-pause-circle:before{content:""}.fa-pause-circle-o:before{content:""}.fa-stop-circle:before{content:""}.fa-stop-circle-o:before{content:""}.fa-shopping-bag:before{content:""}.fa-shopping-basket:before{content:""}.fa-hashtag:before{content:""}.fa-bluetooth:before{content:""}.fa-bluetooth-b:before{content:""}.fa-percent:before{content:""}.fa-gitlab:before,.icon-gitlab:before{content:""}.fa-wpbeginner:before{content:""}.fa-wpforms:before{content:""}.fa-envira:before{content:""}.fa-universal-access:before{content:""}.fa-wheelchair-alt:before{content:""}.fa-question-circle-o:before{content:""}.fa-blind:before{content:""}.fa-audio-description:before{content:""}.fa-volume-control-phone:before{content:""}.fa-braille:before{content:""}.fa-assistive-listening-systems:before{content:""}.fa-american-sign-language-interpreting:before,.fa-asl-interpreting:before{content:""}.fa-deaf:before,.fa-deafness:before,.fa-hard-of-hearing:before{content:""}.fa-glide:before{content:""}.fa-glide-g:before{content:""}.fa-sign-language:before,.fa-signing:before{content:""}.fa-low-vision:before{content:""}.fa-viadeo:before{content:""}.fa-viadeo-square:before{content:""}.fa-snapchat:before{content:""}.fa-snapchat-ghost:before{content:""}.fa-snapchat-square:before{content:""}.fa-pied-piper:before{content:""}.fa-first-order:before{content:""}.fa-yoast:before{content:""}.fa-themeisle:before{content:""}.fa-google-plus-circle:before,.fa-google-plus-official:before{content:""}.fa-fa:before,.fa-font-awesome:before{content:""}.fa-handshake-o:before{content:""}.fa-envelope-open:before{content:""}.fa-envelope-open-o:before{content:""}.fa-linode:before{content:""}.fa-address-book:before{content:""}.fa-address-book-o:before{content:""}.fa-address-card:before,.fa-vcard:before{content:""}.fa-address-card-o:before,.fa-vcard-o:before{content:""}.fa-user-circle:before{content:""}.fa-user-circle-o:before{content:""}.fa-user-o:before{content:""}.fa-id-badge:before{content:""}.fa-drivers-license:before,.fa-id-card:before{content:""}.fa-drivers-license-o:before,.fa-id-card-o:before{content:""}.fa-quora:before{content:""}.fa-free-code-camp:before{content:""}.fa-telegram:before{content:""}.fa-thermometer-4:before,.fa-thermometer-full:before,.fa-thermometer:before{content:""}.fa-thermometer-3:before,.fa-thermometer-three-quarters:before{content:""}.fa-thermometer-2:before,.fa-thermometer-half:before{content:""}.fa-thermometer-1:before,.fa-thermometer-quarter:before{content:""}.fa-thermometer-0:before,.fa-thermometer-empty:before{content:""}.fa-shower:before{content:""}.fa-bath:before,.fa-bathtub:before,.fa-s15:before{content:""}.fa-podcast:before{content:""}.fa-window-maximize:before{content:""}.fa-window-minimize:before{content:""}.fa-window-restore:before{content:""}.fa-times-rectangle:before,.fa-window-close:before{content:""}.fa-times-rectangle-o:before,.fa-window-close-o:before{content:""}.fa-bandcamp:before{content:""}.fa-grav:before{content:""}.fa-etsy:before{content:""}.fa-imdb:before{content:""}.fa-ravelry:before{content:""}.fa-eercast:before{content:""}.fa-microchip:before{content:""}.fa-snowflake-o:before{content:""}.fa-superpowers:before{content:""}.fa-wpexplorer:before{content:""}.fa-meetup:before{content:""}.sr-only{position:absolute;width:1px;height:1px;padding:0;margin:-1px;overflow:hidden;clip:rect(0,0,0,0);border:0}.sr-only-focusable:active,.sr-only-focusable:focus{position:static;width:auto;height:auto;margin:0;overflow:visible;clip:auto}.fa,.icon,.rst-content .admonition-title,.rst-content .code-block-caption .headerlink,.rst-content .eqno .headerlink,.rst-content code.download span:first-child,.rst-content dl dt .headerlink,.rst-content h1 .headerlink,.rst-content h2 .headerlink,.rst-content h3 .headerlink,.rst-content h4 .headerlink,.rst-content h5 .headerlink,.rst-content h6 .headerlink,.rst-content p.caption .headerlink,.rst-content p .headerlink,.rst-content table>caption .headerlink,.rst-content tt.download span:first-child,.wy-dropdown .caret,.wy-inline-validate.wy-inline-validate-danger .wy-input-context,.wy-inline-validate.wy-inline-validate-info .wy-input-context,.wy-inline-validate.wy-inline-validate-success .wy-input-context,.wy-inline-validate.wy-inline-validate-warning .wy-input-context,.wy-menu-vertical li.current>a button.toctree-expand,.wy-menu-vertical li.on a button.toctree-expand,.wy-menu-vertical li button.toctree-expand{font-family:inherit}.fa:before,.icon:before,.rst-content .admonition-title:before,.rst-content .code-block-caption .headerlink:before,.rst-content .eqno .headerlink:before,.rst-content code.download span:first-child:before,.rst-content dl dt .headerlink:before,.rst-content h1 .headerlink:before,.rst-content h2 .headerlink:before,.rst-content h3 .headerlink:before,.rst-content h4 .headerlink:before,.rst-content h5 .headerlink:before,.rst-content h6 .headerlink:before,.rst-content p.caption .headerlink:before,.rst-content p .headerlink:before,.rst-content table>caption .headerlink:before,.rst-content tt.download span:first-child:before,.wy-dropdown .caret:before,.wy-inline-validate.wy-inline-validate-danger .wy-input-context:before,.wy-inline-validate.wy-inline-validate-info .wy-input-context:before,.wy-inline-validate.wy-inline-validate-success .wy-input-context:before,.wy-inline-validate.wy-inline-validate-warning .wy-input-context:before,.wy-menu-vertical li.current>a button.toctree-expand:before,.wy-menu-vertical li.on a button.toctree-expand:before,.wy-menu-vertical li button.toctree-expand:before{font-family:FontAwesome;display:inline-block;font-style:normal;font-weight:400;line-height:1;text-decoration:inherit}.rst-content .code-block-caption a .headerlink,.rst-content .eqno a .headerlink,.rst-content a .admonition-title,.rst-content code.download a span:first-child,.rst-content dl dt a .headerlink,.rst-content h1 a .headerlink,.rst-content h2 a .headerlink,.rst-content h3 a .headerlink,.rst-content h4 a .headerlink,.rst-content h5 a .headerlink,.rst-content h6 a .headerlink,.rst-content p.caption a .headerlink,.rst-content p a .headerlink,.rst-content table>caption a .headerlink,.rst-content tt.download a span:first-child,.wy-menu-vertical li.current>a button.toctree-expand,.wy-menu-vertical li.on a button.toctree-expand,.wy-menu-vertical li a button.toctree-expand,a .fa,a .icon,a .rst-content .admonition-title,a .rst-content .code-block-caption .headerlink,a .rst-content .eqno .headerlink,a .rst-content code.download span:first-child,a .rst-content dl dt .headerlink,a .rst-content h1 .headerlink,a .rst-content h2 .headerlink,a .rst-content h3 .headerlink,a .rst-content h4 .headerlink,a .rst-content h5 .headerlink,a .rst-content h6 .headerlink,a .rst-content p.caption .headerlink,a .rst-content p .headerlink,a .rst-content table>caption .headerlink,a .rst-content tt.download span:first-child,a .wy-menu-vertical li button.toctree-expand{display:inline-block;text-decoration:inherit}.btn .fa,.btn .icon,.btn .rst-content .admonition-title,.btn .rst-content .code-block-caption .headerlink,.btn .rst-content .eqno .headerlink,.btn .rst-content code.download span:first-child,.btn .rst-content dl dt .headerlink,.btn .rst-content h1 .headerlink,.btn .rst-content h2 .headerlink,.btn .rst-content h3 .headerlink,.btn .rst-content h4 .headerlink,.btn .rst-content h5 .headerlink,.btn .rst-content h6 .headerlink,.btn .rst-content p .headerlink,.btn .rst-content table>caption .headerlink,.btn .rst-content tt.download span:first-child,.btn .wy-menu-vertical li.current>a button.toctree-expand,.btn .wy-menu-vertical li.on a button.toctree-expand,.btn .wy-menu-vertical li button.toctree-expand,.nav .fa,.nav .icon,.nav .rst-content .admonition-title,.nav .rst-content .code-block-caption .headerlink,.nav .rst-content .eqno .headerlink,.nav .rst-content code.download span:first-child,.nav .rst-content dl dt .headerlink,.nav .rst-content h1 .headerlink,.nav .rst-content h2 .headerlink,.nav .rst-content h3 .headerlink,.nav .rst-content h4 .headerlink,.nav .rst-content h5 .headerlink,.nav .rst-content h6 .headerlink,.nav .rst-content p .headerlink,.nav .rst-content table>caption .headerlink,.nav .rst-content tt.download span:first-child,.nav .wy-menu-vertical li.current>a button.toctree-expand,.nav .wy-menu-vertical li.on a button.toctree-expand,.nav .wy-menu-vertical li button.toctree-expand,.rst-content .btn .admonition-title,.rst-content .code-block-caption .btn .headerlink,.rst-content .code-block-caption .nav .headerlink,.rst-content .eqno .btn .headerlink,.rst-content .eqno .nav .headerlink,.rst-content .nav .admonition-title,.rst-content code.download .btn span:first-child,.rst-content code.download .nav span:first-child,.rst-content dl dt .btn .headerlink,.rst-content dl dt .nav .headerlink,.rst-content h1 .btn .headerlink,.rst-content h1 .nav .headerlink,.rst-content h2 .btn .headerlink,.rst-content h2 .nav .headerlink,.rst-content h3 .btn .headerlink,.rst-content h3 .nav .headerlink,.rst-content h4 .btn .headerlink,.rst-content h4 .nav .headerlink,.rst-content h5 .btn .headerlink,.rst-content h5 .nav .headerlink,.rst-content h6 .btn .headerlink,.rst-content h6 .nav .headerlink,.rst-content p .btn .headerlink,.rst-content p .nav .headerlink,.rst-content table>caption .btn .headerlink,.rst-content table>caption .nav .headerlink,.rst-content tt.download .btn span:first-child,.rst-content tt.download .nav span:first-child,.wy-menu-vertical li .btn button.toctree-expand,.wy-menu-vertical li.current>a .btn button.toctree-expand,.wy-menu-vertical li.current>a .nav button.toctree-expand,.wy-menu-vertical li .nav button.toctree-expand,.wy-menu-vertical li.on a .btn button.toctree-expand,.wy-menu-vertical li.on a .nav button.toctree-expand{display:inline}.btn .fa-large.icon,.btn .fa.fa-large,.btn .rst-content .code-block-caption .fa-large.headerlink,.btn .rst-content .eqno .fa-large.headerlink,.btn .rst-content .fa-large.admonition-title,.btn .rst-content code.download span.fa-large:first-child,.btn .rst-content dl dt .fa-large.headerlink,.btn .rst-content h1 .fa-large.headerlink,.btn .rst-content h2 .fa-large.headerlink,.btn .rst-content h3 .fa-large.headerlink,.btn .rst-content h4 .fa-large.headerlink,.btn .rst-content h5 .fa-large.headerlink,.btn .rst-content h6 .fa-large.headerlink,.btn .rst-content p .fa-large.headerlink,.btn .rst-content table>caption .fa-large.headerlink,.btn .rst-content tt.download span.fa-large:first-child,.btn .wy-menu-vertical li button.fa-large.toctree-expand,.nav .fa-large.icon,.nav .fa.fa-large,.nav .rst-content .code-block-caption .fa-large.headerlink,.nav .rst-content .eqno .fa-large.headerlink,.nav .rst-content .fa-large.admonition-title,.nav .rst-content code.download span.fa-large:first-child,.nav .rst-content dl dt .fa-large.headerlink,.nav .rst-content h1 .fa-large.headerlink,.nav .rst-content h2 .fa-large.headerlink,.nav .rst-content h3 .fa-large.headerlink,.nav .rst-content h4 .fa-large.headerlink,.nav .rst-content h5 .fa-large.headerlink,.nav .rst-content h6 .fa-large.headerlink,.nav .rst-content p .fa-large.headerlink,.nav .rst-content table>caption .fa-large.headerlink,.nav .rst-content tt.download span.fa-large:first-child,.nav .wy-menu-vertical li button.fa-large.toctree-expand,.rst-content .btn .fa-large.admonition-title,.rst-content .code-block-caption .btn .fa-large.headerlink,.rst-content .code-block-caption .nav .fa-large.headerlink,.rst-content .eqno .btn .fa-large.headerlink,.rst-content .eqno .nav .fa-large.headerlink,.rst-content .nav .fa-large.admonition-title,.rst-content code.download .btn span.fa-large:first-child,.rst-content code.download .nav span.fa-large:first-child,.rst-content dl dt .btn .fa-large.headerlink,.rst-content dl dt .nav .fa-large.headerlink,.rst-content h1 .btn .fa-large.headerlink,.rst-content h1 .nav .fa-large.headerlink,.rst-content h2 .btn .fa-large.headerlink,.rst-content h2 .nav .fa-large.headerlink,.rst-content h3 .btn .fa-large.headerlink,.rst-content h3 .nav .fa-large.headerlink,.rst-content h4 .btn .fa-large.headerlink,.rst-content h4 .nav .fa-large.headerlink,.rst-content h5 .btn .fa-large.headerlink,.rst-content h5 .nav .fa-large.headerlink,.rst-content h6 .btn .fa-large.headerlink,.rst-content h6 .nav .fa-large.headerlink,.rst-content p .btn .fa-large.headerlink,.rst-content p .nav .fa-large.headerlink,.rst-content table>caption .btn .fa-large.headerlink,.rst-content table>caption .nav .fa-large.headerlink,.rst-content tt.download .btn span.fa-large:first-child,.rst-content tt.download .nav span.fa-large:first-child,.wy-menu-vertical li .btn button.fa-large.toctree-expand,.wy-menu-vertical li .nav button.fa-large.toctree-expand{line-height:.9em}.btn .fa-spin.icon,.btn .fa.fa-spin,.btn .rst-content .code-block-caption .fa-spin.headerlink,.btn .rst-content .eqno .fa-spin.headerlink,.btn .rst-content .fa-spin.admonition-title,.btn .rst-content code.download span.fa-spin:first-child,.btn .rst-content dl dt .fa-spin.headerlink,.btn .rst-content h1 .fa-spin.headerlink,.btn .rst-content h2 .fa-spin.headerlink,.btn .rst-content h3 .fa-spin.headerlink,.btn .rst-content h4 .fa-spin.headerlink,.btn .rst-content h5 .fa-spin.headerlink,.btn .rst-content h6 .fa-spin.headerlink,.btn .rst-content p .fa-spin.headerlink,.btn .rst-content table>caption .fa-spin.headerlink,.btn .rst-content tt.download span.fa-spin:first-child,.btn .wy-menu-vertical li button.fa-spin.toctree-expand,.nav .fa-spin.icon,.nav .fa.fa-spin,.nav .rst-content .code-block-caption .fa-spin.headerlink,.nav .rst-content .eqno .fa-spin.headerlink,.nav .rst-content .fa-spin.admonition-title,.nav .rst-content code.download span.fa-spin:first-child,.nav .rst-content dl dt .fa-spin.headerlink,.nav .rst-content h1 .fa-spin.headerlink,.nav .rst-content h2 .fa-spin.headerlink,.nav .rst-content h3 .fa-spin.headerlink,.nav .rst-content h4 .fa-spin.headerlink,.nav .rst-content h5 .fa-spin.headerlink,.nav .rst-content h6 .fa-spin.headerlink,.nav .rst-content p .fa-spin.headerlink,.nav .rst-content table>caption .fa-spin.headerlink,.nav .rst-content tt.download span.fa-spin:first-child,.nav .wy-menu-vertical li button.fa-spin.toctree-expand,.rst-content .btn .fa-spin.admonition-title,.rst-content .code-block-caption .btn .fa-spin.headerlink,.rst-content .code-block-caption .nav .fa-spin.headerlink,.rst-content .eqno .btn .fa-spin.headerlink,.rst-content .eqno .nav .fa-spin.headerlink,.rst-content .nav .fa-spin.admonition-title,.rst-content code.download .btn span.fa-spin:first-child,.rst-content code.download .nav span.fa-spin:first-child,.rst-content dl dt .btn .fa-spin.headerlink,.rst-content dl dt .nav .fa-spin.headerlink,.rst-content h1 .btn .fa-spin.headerlink,.rst-content h1 .nav .fa-spin.headerlink,.rst-content h2 .btn .fa-spin.headerlink,.rst-content h2 .nav .fa-spin.headerlink,.rst-content h3 .btn .fa-spin.headerlink,.rst-content h3 .nav .fa-spin.headerlink,.rst-content h4 .btn .fa-spin.headerlink,.rst-content h4 .nav .fa-spin.headerlink,.rst-content h5 .btn .fa-spin.headerlink,.rst-content h5 .nav .fa-spin.headerlink,.rst-content h6 .btn .fa-spin.headerlink,.rst-content h6 .nav .fa-spin.headerlink,.rst-content p .btn .fa-spin.headerlink,.rst-content p .nav .fa-spin.headerlink,.rst-content table>caption .btn .fa-spin.headerlink,.rst-content table>caption .nav .fa-spin.headerlink,.rst-content tt.download .btn span.fa-spin:first-child,.rst-content tt.download .nav span.fa-spin:first-child,.wy-menu-vertical li .btn button.fa-spin.toctree-expand,.wy-menu-vertical li .nav button.fa-spin.toctree-expand{display:inline-block}.btn.fa:before,.btn.icon:before,.rst-content .btn.admonition-title:before,.rst-content .code-block-caption .btn.headerlink:before,.rst-content .eqno .btn.headerlink:before,.rst-content code.download span.btn:first-child:before,.rst-content dl dt .btn.headerlink:before,.rst-content h1 .btn.headerlink:before,.rst-content h2 .btn.headerlink:before,.rst-content h3 .btn.headerlink:before,.rst-content h4 .btn.headerlink:before,.rst-content h5 .btn.headerlink:before,.rst-content h6 .btn.headerlink:before,.rst-content p .btn.headerlink:before,.rst-content table>caption .btn.headerlink:before,.rst-content tt.download span.btn:first-child:before,.wy-menu-vertical li button.btn.toctree-expand:before{opacity:.5;-webkit-transition:opacity .05s ease-in;-moz-transition:opacity .05s ease-in;transition:opacity .05s ease-in}.btn.fa:hover:before,.btn.icon:hover:before,.rst-content .btn.admonition-title:hover:before,.rst-content .code-block-caption .btn.headerlink:hover:before,.rst-content .eqno .btn.headerlink:hover:before,.rst-content code.download span.btn:first-child:hover:before,.rst-content dl dt .btn.headerlink:hover:before,.rst-content h1 .btn.headerlink:hover:before,.rst-content h2 .btn.headerlink:hover:before,.rst-content h3 .btn.headerlink:hover:before,.rst-content h4 .btn.headerlink:hover:before,.rst-content h5 .btn.headerlink:hover:before,.rst-content h6 .btn.headerlink:hover:before,.rst-content p .btn.headerlink:hover:before,.rst-content table>caption .btn.headerlink:hover:before,.rst-content tt.download span.btn:first-child:hover:before,.wy-menu-vertical li button.btn.toctree-expand:hover:before{opacity:1}.btn-mini .fa:before,.btn-mini .icon:before,.btn-mini .rst-content .admonition-title:before,.btn-mini .rst-content .code-block-caption .headerlink:before,.btn-mini .rst-content .eqno .headerlink:before,.btn-mini .rst-content code.download span:first-child:before,.btn-mini .rst-content dl dt .headerlink:before,.btn-mini .rst-content h1 .headerlink:before,.btn-mini .rst-content h2 .headerlink:before,.btn-mini .rst-content h3 .headerlink:before,.btn-mini .rst-content h4 .headerlink:before,.btn-mini .rst-content h5 .headerlink:before,.btn-mini .rst-content h6 .headerlink:before,.btn-mini .rst-content p .headerlink:before,.btn-mini .rst-content table>caption .headerlink:before,.btn-mini .rst-content tt.download span:first-child:before,.btn-mini .wy-menu-vertical li button.toctree-expand:before,.rst-content .btn-mini .admonition-title:before,.rst-content .code-block-caption .btn-mini .headerlink:before,.rst-content .eqno .btn-mini .headerlink:before,.rst-content code.download .btn-mini span:first-child:before,.rst-content dl dt .btn-mini .headerlink:before,.rst-content h1 .btn-mini .headerlink:before,.rst-content h2 .btn-mini .headerlink:before,.rst-content h3 .btn-mini .headerlink:before,.rst-content h4 .btn-mini .headerlink:before,.rst-content h5 .btn-mini .headerlink:before,.rst-content h6 .btn-mini .headerlink:before,.rst-content p .btn-mini .headerlink:before,.rst-content table>caption .btn-mini .headerlink:before,.rst-content tt.download .btn-mini span:first-child:before,.wy-menu-vertical li .btn-mini button.toctree-expand:before{font-size:14px;vertical-align:-15%}.rst-content .admonition,.rst-content .admonition-todo,.rst-content .attention,.rst-content .caution,.rst-content .danger,.rst-content .error,.rst-content .hint,.rst-content .important,.rst-content .note,.rst-content .seealso,.rst-content .tip,.rst-content .warning,.wy-alert{padding:12px;line-height:24px;margin-bottom:24px;background:#e7f2fa}.rst-content .admonition-title,.wy-alert-title{font-weight:700;display:block;color:#fff;background:#6ab0de;padding:6px 12px;margin:-12px -12px 12px}.rst-content .danger,.rst-content .error,.rst-content .wy-alert-danger.admonition,.rst-content .wy-alert-danger.admonition-todo,.rst-content .wy-alert-danger.attention,.rst-content .wy-alert-danger.caution,.rst-content .wy-alert-danger.hint,.rst-content .wy-alert-danger.important,.rst-content .wy-alert-danger.note,.rst-content .wy-alert-danger.seealso,.rst-content .wy-alert-danger.tip,.rst-content .wy-alert-danger.warning,.wy-alert.wy-alert-danger{background:#fdf3f2}.rst-content .danger .admonition-title,.rst-content .danger .wy-alert-title,.rst-content .error .admonition-title,.rst-content .error .wy-alert-title,.rst-content .wy-alert-danger.admonition-todo .admonition-title,.rst-content .wy-alert-danger.admonition-todo .wy-alert-title,.rst-content .wy-alert-danger.admonition .admonition-title,.rst-content .wy-alert-danger.admonition .wy-alert-title,.rst-content .wy-alert-danger.attention .admonition-title,.rst-content .wy-alert-danger.attention .wy-alert-title,.rst-content .wy-alert-danger.caution .admonition-title,.rst-content .wy-alert-danger.caution .wy-alert-title,.rst-content .wy-alert-danger.hint .admonition-title,.rst-content .wy-alert-danger.hint .wy-alert-title,.rst-content .wy-alert-danger.important .admonition-title,.rst-content .wy-alert-danger.important .wy-alert-title,.rst-content .wy-alert-danger.note .admonition-title,.rst-content .wy-alert-danger.note .wy-alert-title,.rst-content .wy-alert-danger.seealso .admonition-title,.rst-content .wy-alert-danger.seealso .wy-alert-title,.rst-content .wy-alert-danger.tip .admonition-title,.rst-content .wy-alert-danger.tip .wy-alert-title,.rst-content .wy-alert-danger.warning .admonition-title,.rst-content .wy-alert-danger.warning .wy-alert-title,.rst-content .wy-alert.wy-alert-danger .admonition-title,.wy-alert.wy-alert-danger .rst-content .admonition-title,.wy-alert.wy-alert-danger .wy-alert-title{background:#f29f97}.rst-content .admonition-todo,.rst-content .attention,.rst-content .caution,.rst-content .warning,.rst-content .wy-alert-warning.admonition,.rst-content .wy-alert-warning.danger,.rst-content .wy-alert-warning.error,.rst-content .wy-alert-warning.hint,.rst-content .wy-alert-warning.important,.rst-content .wy-alert-warning.note,.rst-content .wy-alert-warning.seealso,.rst-content .wy-alert-warning.tip,.wy-alert.wy-alert-warning{background:#ffedcc}.rst-content .admonition-todo .admonition-title,.rst-content .admonition-todo .wy-alert-title,.rst-content .attention .admonition-title,.rst-content .attention .wy-alert-title,.rst-content .caution .admonition-title,.rst-content .caution .wy-alert-title,.rst-content .warning .admonition-title,.rst-content .warning .wy-alert-title,.rst-content .wy-alert-warning.admonition .admonition-title,.rst-content .wy-alert-warning.admonition .wy-alert-title,.rst-content .wy-alert-warning.danger .admonition-title,.rst-content .wy-alert-warning.danger .wy-alert-title,.rst-content .wy-alert-warning.error .admonition-title,.rst-content .wy-alert-warning.error .wy-alert-title,.rst-content .wy-alert-warning.hint .admonition-title,.rst-content .wy-alert-warning.hint .wy-alert-title,.rst-content .wy-alert-warning.important .admonition-title,.rst-content .wy-alert-warning.important .wy-alert-title,.rst-content .wy-alert-warning.note .admonition-title,.rst-content .wy-alert-warning.note .wy-alert-title,.rst-content .wy-alert-warning.seealso .admonition-title,.rst-content .wy-alert-warning.seealso .wy-alert-title,.rst-content .wy-alert-warning.tip .admonition-title,.rst-content .wy-alert-warning.tip .wy-alert-title,.rst-content .wy-alert.wy-alert-warning .admonition-title,.wy-alert.wy-alert-warning .rst-content .admonition-title,.wy-alert.wy-alert-warning .wy-alert-title{background:#f0b37e}.rst-content .note,.rst-content .seealso,.rst-content .wy-alert-info.admonition,.rst-content .wy-alert-info.admonition-todo,.rst-content .wy-alert-info.attention,.rst-content .wy-alert-info.caution,.rst-content .wy-alert-info.danger,.rst-content .wy-alert-info.error,.rst-content .wy-alert-info.hint,.rst-content .wy-alert-info.important,.rst-content .wy-alert-info.tip,.rst-content .wy-alert-info.warning,.wy-alert.wy-alert-info{background:#e7f2fa}.rst-content .note .admonition-title,.rst-content .note .wy-alert-title,.rst-content .seealso .admonition-title,.rst-content .seealso .wy-alert-title,.rst-content .wy-alert-info.admonition-todo .admonition-title,.rst-content .wy-alert-info.admonition-todo .wy-alert-title,.rst-content .wy-alert-info.admonition .admonition-title,.rst-content .wy-alert-info.admonition .wy-alert-title,.rst-content .wy-alert-info.attention .admonition-title,.rst-content .wy-alert-info.attention .wy-alert-title,.rst-content .wy-alert-info.caution .admonition-title,.rst-content .wy-alert-info.caution .wy-alert-title,.rst-content .wy-alert-info.danger .admonition-title,.rst-content .wy-alert-info.danger .wy-alert-title,.rst-content .wy-alert-info.error .admonition-title,.rst-content .wy-alert-info.error .wy-alert-title,.rst-content .wy-alert-info.hint .admonition-title,.rst-content .wy-alert-info.hint .wy-alert-title,.rst-content .wy-alert-info.important .admonition-title,.rst-content .wy-alert-info.important .wy-alert-title,.rst-content .wy-alert-info.tip .admonition-title,.rst-content .wy-alert-info.tip .wy-alert-title,.rst-content .wy-alert-info.warning .admonition-title,.rst-content .wy-alert-info.warning .wy-alert-title,.rst-content .wy-alert.wy-alert-info .admonition-title,.wy-alert.wy-alert-info .rst-content .admonition-title,.wy-alert.wy-alert-info .wy-alert-title{background:#6ab0de}.rst-content .hint,.rst-content .important,.rst-content .tip,.rst-content .wy-alert-success.admonition,.rst-content .wy-alert-success.admonition-todo,.rst-content .wy-alert-success.attention,.rst-content .wy-alert-success.caution,.rst-content .wy-alert-success.danger,.rst-content .wy-alert-success.error,.rst-content .wy-alert-success.note,.rst-content .wy-alert-success.seealso,.rst-content .wy-alert-success.warning,.wy-alert.wy-alert-success{background:#dbfaf4}.rst-content .hint .admonition-title,.rst-content .hint .wy-alert-title,.rst-content .important .admonition-title,.rst-content .important .wy-alert-title,.rst-content .tip .admonition-title,.rst-content .tip .wy-alert-title,.rst-content .wy-alert-success.admonition-todo .admonition-title,.rst-content .wy-alert-success.admonition-todo .wy-alert-title,.rst-content .wy-alert-success.admonition .admonition-title,.rst-content .wy-alert-success.admonition .wy-alert-title,.rst-content .wy-alert-success.attention .admonition-title,.rst-content .wy-alert-success.attention .wy-alert-title,.rst-content .wy-alert-success.caution .admonition-title,.rst-content .wy-alert-success.caution .wy-alert-title,.rst-content .wy-alert-success.danger .admonition-title,.rst-content .wy-alert-success.danger .wy-alert-title,.rst-content .wy-alert-success.error .admonition-title,.rst-content .wy-alert-success.error .wy-alert-title,.rst-content .wy-alert-success.note .admonition-title,.rst-content .wy-alert-success.note .wy-alert-title,.rst-content .wy-alert-success.seealso .admonition-title,.rst-content .wy-alert-success.seealso .wy-alert-title,.rst-content .wy-alert-success.warning .admonition-title,.rst-content .wy-alert-success.warning .wy-alert-title,.rst-content .wy-alert.wy-alert-success .admonition-title,.wy-alert.wy-alert-success .rst-content .admonition-title,.wy-alert.wy-alert-success .wy-alert-title{background:#1abc9c}.rst-content .wy-alert-neutral.admonition,.rst-content .wy-alert-neutral.admonition-todo,.rst-content .wy-alert-neutral.attention,.rst-content .wy-alert-neutral.caution,.rst-content .wy-alert-neutral.danger,.rst-content .wy-alert-neutral.error,.rst-content .wy-alert-neutral.hint,.rst-content .wy-alert-neutral.important,.rst-content .wy-alert-neutral.note,.rst-content .wy-alert-neutral.seealso,.rst-content .wy-alert-neutral.tip,.rst-content .wy-alert-neutral.warning,.wy-alert.wy-alert-neutral{background:#f3f6f6}.rst-content .wy-alert-neutral.admonition-todo .admonition-title,.rst-content .wy-alert-neutral.admonition-todo .wy-alert-title,.rst-content .wy-alert-neutral.admonition .admonition-title,.rst-content .wy-alert-neutral.admonition .wy-alert-title,.rst-content .wy-alert-neutral.attention .admonition-title,.rst-content .wy-alert-neutral.attention .wy-alert-title,.rst-content .wy-alert-neutral.caution .admonition-title,.rst-content .wy-alert-neutral.caution .wy-alert-title,.rst-content .wy-alert-neutral.danger .admonition-title,.rst-content .wy-alert-neutral.danger .wy-alert-title,.rst-content .wy-alert-neutral.error .admonition-title,.rst-content .wy-alert-neutral.error .wy-alert-title,.rst-content .wy-alert-neutral.hint .admonition-title,.rst-content .wy-alert-neutral.hint .wy-alert-title,.rst-content .wy-alert-neutral.important .admonition-title,.rst-content .wy-alert-neutral.important .wy-alert-title,.rst-content .wy-alert-neutral.note .admonition-title,.rst-content .wy-alert-neutral.note .wy-alert-title,.rst-content .wy-alert-neutral.seealso .admonition-title,.rst-content .wy-alert-neutral.seealso .wy-alert-title,.rst-content .wy-alert-neutral.tip .admonition-title,.rst-content .wy-alert-neutral.tip .wy-alert-title,.rst-content .wy-alert-neutral.warning .admonition-title,.rst-content .wy-alert-neutral.warning .wy-alert-title,.rst-content .wy-alert.wy-alert-neutral .admonition-title,.wy-alert.wy-alert-neutral .rst-content .admonition-title,.wy-alert.wy-alert-neutral .wy-alert-title{color:#404040;background:#e1e4e5}.rst-content .wy-alert-neutral.admonition-todo a,.rst-content .wy-alert-neutral.admonition a,.rst-content .wy-alert-neutral.attention a,.rst-content .wy-alert-neutral.caution a,.rst-content .wy-alert-neutral.danger a,.rst-content .wy-alert-neutral.error a,.rst-content .wy-alert-neutral.hint a,.rst-content .wy-alert-neutral.important a,.rst-content .wy-alert-neutral.note a,.rst-content .wy-alert-neutral.seealso a,.rst-content .wy-alert-neutral.tip a,.rst-content .wy-alert-neutral.warning a,.wy-alert.wy-alert-neutral a{color:#2980b9}.rst-content .admonition-todo p:last-child,.rst-content .admonition p:last-child,.rst-content .attention p:last-child,.rst-content .caution p:last-child,.rst-content .danger p:last-child,.rst-content .error p:last-child,.rst-content .hint p:last-child,.rst-content .important p:last-child,.rst-content .note p:last-child,.rst-content .seealso p:last-child,.rst-content .tip p:last-child,.rst-content .warning p:last-child,.wy-alert p:last-child{margin-bottom:0}.wy-tray-container{position:fixed;bottom:0;left:0;z-index:600}.wy-tray-container li{display:block;width:300px;background:transparent;color:#fff;text-align:center;box-shadow:0 5px 5px 0 rgba(0,0,0,.1);padding:0 24px;min-width:20%;opacity:0;height:0;line-height:56px;overflow:hidden;-webkit-transition:all .3s ease-in;-moz-transition:all .3s ease-in;transition:all .3s ease-in}.wy-tray-container li.wy-tray-item-success{background:#27ae60}.wy-tray-container li.wy-tray-item-info{background:#2980b9}.wy-tray-container li.wy-tray-item-warning{background:#e67e22}.wy-tray-container li.wy-tray-item-danger{background:#e74c3c}.wy-tray-container li.on{opacity:1;height:56px}@media screen and (max-width:768px){.wy-tray-container{bottom:auto;top:0;width:100%}.wy-tray-container li{width:100%}}button{font-size:100%;margin:0;vertical-align:baseline;*vertical-align:middle;cursor:pointer;line-height:normal;-webkit-appearance:button;*overflow:visible}button::-moz-focus-inner,input::-moz-focus-inner{border:0;padding:0}button[disabled]{cursor:default}.btn{display:inline-block;border-radius:2px;line-height:normal;white-space:nowrap;text-align:center;cursor:pointer;font-size:100%;padding:6px 12px 8px;color:#fff;border:1px solid rgba(0,0,0,.1);background-color:#27ae60;text-decoration:none;font-weight:400;font-family:Lato,proxima-nova,Helvetica Neue,Arial,sans-serif;box-shadow:inset 0 1px 2px -1px hsla(0,0%,100%,.5),inset 0 -2px 0 0 rgba(0,0,0,.1);outline-none:false;vertical-align:middle;*display:inline;zoom:1;-webkit-user-drag:none;-webkit-user-select:none;-moz-user-select:none;-ms-user-select:none;user-select:none;-webkit-transition:all .1s linear;-moz-transition:all .1s linear;transition:all .1s linear}.btn-hover{background:#2e8ece;color:#fff}.btn:hover{background:#2cc36b;color:#fff}.btn:focus{background:#2cc36b;outline:0}.btn:active{box-shadow:inset 0 -1px 0 0 rgba(0,0,0,.05),inset 0 2px 0 0 rgba(0,0,0,.1);padding:8px 12px 6px}.btn:visited{color:#fff}.btn-disabled,.btn-disabled:active,.btn-disabled:focus,.btn-disabled:hover,.btn:disabled{background-image:none;filter:progid:DXImageTransform.Microsoft.gradient(enabled = false);filter:alpha(opacity=40);opacity:.4;cursor:not-allowed;box-shadow:none}.btn::-moz-focus-inner{padding:0;border:0}.btn-small{font-size:80%}.btn-info{background-color:#2980b9!important}.btn-info:hover{background-color:#2e8ece!important}.btn-neutral{background-color:#f3f6f6!important;color:#404040!important}.btn-neutral:hover{background-color:#e5ebeb!important;color:#404040}.btn-neutral:visited{color:#404040!important}.btn-success{background-color:#27ae60!important}.btn-success:hover{background-color:#295!important}.btn-danger{background-color:#e74c3c!important}.btn-danger:hover{background-color:#ea6153!important}.btn-warning{background-color:#e67e22!important}.btn-warning:hover{background-color:#e98b39!important}.btn-invert{background-color:#222}.btn-invert:hover{background-color:#2f2f2f!important}.btn-link{background-color:transparent!important;color:#2980b9;box-shadow:none;border-color:transparent!important}.btn-link:active,.btn-link:hover{background-color:transparent!important;color:#409ad5!important;box-shadow:none}.btn-link:visited{color:#9b59b6}.wy-btn-group .btn,.wy-control .btn{vertical-align:middle}.wy-btn-group{margin-bottom:24px;*zoom:1}.wy-btn-group:after,.wy-btn-group:before{display:table;content:""}.wy-btn-group:after{clear:both}.wy-dropdown{position:relative;display:inline-block}.wy-dropdown-active .wy-dropdown-menu{display:block}.wy-dropdown-menu{position:absolute;left:0;display:none;float:left;top:100%;min-width:100%;background:#fcfcfc;z-index:100;border:1px solid #cfd7dd;box-shadow:0 2px 2px 0 rgba(0,0,0,.1);padding:12px}.wy-dropdown-menu>dd>a{display:block;clear:both;color:#404040;white-space:nowrap;font-size:90%;padding:0 12px;cursor:pointer}.wy-dropdown-menu>dd>a:hover{background:#2980b9;color:#fff}.wy-dropdown-menu>dd.divider{border-top:1px solid #cfd7dd;margin:6px 0}.wy-dropdown-menu>dd.search{padding-bottom:12px}.wy-dropdown-menu>dd.search input[type=search]{width:100%}.wy-dropdown-menu>dd.call-to-action{background:#e3e3e3;text-transform:uppercase;font-weight:500;font-size:80%}.wy-dropdown-menu>dd.call-to-action:hover{background:#e3e3e3}.wy-dropdown-menu>dd.call-to-action .btn{color:#fff}.wy-dropdown.wy-dropdown-up .wy-dropdown-menu{bottom:100%;top:auto;left:auto;right:0}.wy-dropdown.wy-dropdown-bubble .wy-dropdown-menu{background:#fcfcfc;margin-top:2px}.wy-dropdown.wy-dropdown-bubble .wy-dropdown-menu a{padding:6px 12px}.wy-dropdown.wy-dropdown-bubble .wy-dropdown-menu a:hover{background:#2980b9;color:#fff}.wy-dropdown.wy-dropdown-left .wy-dropdown-menu{right:0;left:auto;text-align:right}.wy-dropdown-arrow:before{content:" ";border-bottom:5px solid #f5f5f5;border-left:5px solid transparent;border-right:5px solid transparent;position:absolute;display:block;top:-4px;left:50%;margin-left:-3px}.wy-dropdown-arrow.wy-dropdown-arrow-left:before{left:11px}.wy-form-stacked select{display:block}.wy-form-aligned .wy-help-inline,.wy-form-aligned input,.wy-form-aligned label,.wy-form-aligned select,.wy-form-aligned textarea{display:inline-block;*display:inline;*zoom:1;vertical-align:middle}.wy-form-aligned .wy-control-group>label{display:inline-block;vertical-align:middle;width:10em;margin:6px 12px 0 0;float:left}.wy-form-aligned .wy-control{float:left}.wy-form-aligned .wy-control label{display:block}.wy-form-aligned .wy-control select{margin-top:6px}fieldset{margin:0}fieldset,legend{border:0;padding:0}legend{width:100%;white-space:normal;margin-bottom:24px;font-size:150%;*margin-left:-7px}label,legend{display:block}label{margin:0 0 .3125em;color:#333;font-size:90%}input,select,textarea{font-size:100%;margin:0;vertical-align:baseline;*vertical-align:middle}.wy-control-group{margin-bottom:24px;max-width:1200px;margin-left:auto;margin-right:auto;*zoom:1}.wy-control-group:after,.wy-control-group:before{display:table;content:""}.wy-control-group:after{clear:both}.wy-control-group.wy-control-group-required>label:after{content:" *";color:#e74c3c}.wy-control-group .wy-form-full,.wy-control-group .wy-form-halves,.wy-control-group .wy-form-thirds{padding-bottom:12px}.wy-control-group .wy-form-full input[type=color],.wy-control-group .wy-form-full input[type=date],.wy-control-group .wy-form-full input[type=datetime-local],.wy-control-group .wy-form-full input[type=datetime],.wy-control-group .wy-form-full input[type=email],.wy-control-group .wy-form-full input[type=month],.wy-control-group .wy-form-full input[type=number],.wy-control-group .wy-form-full input[type=password],.wy-control-group .wy-form-full input[type=search],.wy-control-group .wy-form-full input[type=tel],.wy-control-group .wy-form-full input[type=text],.wy-control-group .wy-form-full input[type=time],.wy-control-group .wy-form-full input[type=url],.wy-control-group .wy-form-full input[type=week],.wy-control-group .wy-form-full select,.wy-control-group .wy-form-halves input[type=color],.wy-control-group .wy-form-halves input[type=date],.wy-control-group .wy-form-halves input[type=datetime-local],.wy-control-group .wy-form-halves input[type=datetime],.wy-control-group .wy-form-halves input[type=email],.wy-control-group .wy-form-halves input[type=month],.wy-control-group .wy-form-halves input[type=number],.wy-control-group .wy-form-halves input[type=password],.wy-control-group .wy-form-halves input[type=search],.wy-control-group .wy-form-halves input[type=tel],.wy-control-group .wy-form-halves input[type=text],.wy-control-group .wy-form-halves input[type=time],.wy-control-group .wy-form-halves input[type=url],.wy-control-group .wy-form-halves input[type=week],.wy-control-group .wy-form-halves select,.wy-control-group .wy-form-thirds input[type=color],.wy-control-group .wy-form-thirds input[type=date],.wy-control-group .wy-form-thirds input[type=datetime-local],.wy-control-group .wy-form-thirds input[type=datetime],.wy-control-group .wy-form-thirds input[type=email],.wy-control-group .wy-form-thirds input[type=month],.wy-control-group .wy-form-thirds input[type=number],.wy-control-group .wy-form-thirds input[type=password],.wy-control-group .wy-form-thirds input[type=search],.wy-control-group .wy-form-thirds input[type=tel],.wy-control-group .wy-form-thirds input[type=text],.wy-control-group .wy-form-thirds input[type=time],.wy-control-group .wy-form-thirds input[type=url],.wy-control-group .wy-form-thirds input[type=week],.wy-control-group .wy-form-thirds select{width:100%}.wy-control-group .wy-form-full{float:left;display:block;width:100%;margin-right:0}.wy-control-group .wy-form-full:last-child{margin-right:0}.wy-control-group .wy-form-halves{float:left;display:block;margin-right:2.35765%;width:48.82117%}.wy-control-group .wy-form-halves:last-child,.wy-control-group .wy-form-halves:nth-of-type(2n){margin-right:0}.wy-control-group .wy-form-halves:nth-of-type(odd){clear:left}.wy-control-group .wy-form-thirds{float:left;display:block;margin-right:2.35765%;width:31.76157%}.wy-control-group .wy-form-thirds:last-child,.wy-control-group .wy-form-thirds:nth-of-type(3n){margin-right:0}.wy-control-group .wy-form-thirds:nth-of-type(3n+1){clear:left}.wy-control-group.wy-control-group-no-input .wy-control,.wy-control-no-input{margin:6px 0 0;font-size:90%}.wy-control-no-input{display:inline-block}.wy-control-group.fluid-input input[type=color],.wy-control-group.fluid-input input[type=date],.wy-control-group.fluid-input input[type=datetime-local],.wy-control-group.fluid-input input[type=datetime],.wy-control-group.fluid-input input[type=email],.wy-control-group.fluid-input input[type=month],.wy-control-group.fluid-input input[type=number],.wy-control-group.fluid-input input[type=password],.wy-control-group.fluid-input input[type=search],.wy-control-group.fluid-input input[type=tel],.wy-control-group.fluid-input input[type=text],.wy-control-group.fluid-input input[type=time],.wy-control-group.fluid-input input[type=url],.wy-control-group.fluid-input input[type=week]{width:100%}.wy-form-message-inline{padding-left:.3em;color:#666;font-size:90%}.wy-form-message{display:block;color:#999;font-size:70%;margin-top:.3125em;font-style:italic}.wy-form-message p{font-size:inherit;font-style:italic;margin-bottom:6px}.wy-form-message p:last-child{margin-bottom:0}input{line-height:normal}input[type=button],input[type=reset],input[type=submit]{-webkit-appearance:button;cursor:pointer;font-family:Lato,proxima-nova,Helvetica Neue,Arial,sans-serif;*overflow:visible}input[type=color],input[type=date],input[type=datetime-local],input[type=datetime],input[type=email],input[type=month],input[type=number],input[type=password],input[type=search],input[type=tel],input[type=text],input[type=time],input[type=url],input[type=week]{-webkit-appearance:none;padding:6px;display:inline-block;border:1px solid #ccc;font-size:80%;font-family:Lato,proxima-nova,Helvetica Neue,Arial,sans-serif;box-shadow:inset 0 1px 3px #ddd;border-radius:0;-webkit-transition:border .3s linear;-moz-transition:border .3s linear;transition:border .3s linear}input[type=datetime-local]{padding:.34375em .625em}input[disabled]{cursor:default}input[type=checkbox],input[type=radio]{padding:0;margin-right:.3125em;*height:13px;*width:13px}input[type=checkbox],input[type=radio],input[type=search]{-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box}input[type=search]::-webkit-search-cancel-button,input[type=search]::-webkit-search-decoration{-webkit-appearance:none}input[type=color]:focus,input[type=date]:focus,input[type=datetime-local]:focus,input[type=datetime]:focus,input[type=email]:focus,input[type=month]:focus,input[type=number]:focus,input[type=password]:focus,input[type=search]:focus,input[type=tel]:focus,input[type=text]:focus,input[type=time]:focus,input[type=url]:focus,input[type=week]:focus{outline:0;outline:thin dotted\9;border-color:#333}input.no-focus:focus{border-color:#ccc!important}input[type=checkbox]:focus,input[type=file]:focus,input[type=radio]:focus{outline:thin dotted #333;outline:1px auto #129fea}input[type=color][disabled],input[type=date][disabled],input[type=datetime-local][disabled],input[type=datetime][disabled],input[type=email][disabled],input[type=month][disabled],input[type=number][disabled],input[type=password][disabled],input[type=search][disabled],input[type=tel][disabled],input[type=text][disabled],input[type=time][disabled],input[type=url][disabled],input[type=week][disabled]{cursor:not-allowed;background-color:#fafafa}input:focus:invalid,select:focus:invalid,textarea:focus:invalid{color:#e74c3c;border:1px solid #e74c3c}input:focus:invalid:focus,select:focus:invalid:focus,textarea:focus:invalid:focus{border-color:#e74c3c}input[type=checkbox]:focus:invalid:focus,input[type=file]:focus:invalid:focus,input[type=radio]:focus:invalid:focus{outline-color:#e74c3c}input.wy-input-large{padding:12px;font-size:100%}textarea{overflow:auto;vertical-align:top;width:100%;font-family:Lato,proxima-nova,Helvetica Neue,Arial,sans-serif}select,textarea{padding:.5em .625em;display:inline-block;border:1px solid #ccc;font-size:80%;box-shadow:inset 0 1px 3px #ddd;-webkit-transition:border .3s linear;-moz-transition:border .3s linear;transition:border .3s linear}select{border:1px solid #ccc;background-color:#fff}select[multiple]{height:auto}select:focus,textarea:focus{outline:0}input[readonly],select[disabled],select[readonly],textarea[disabled],textarea[readonly]{cursor:not-allowed;background-color:#fafafa}input[type=checkbox][disabled],input[type=radio][disabled]{cursor:not-allowed}.wy-checkbox,.wy-radio{margin:6px 0;color:#404040;display:block}.wy-checkbox input,.wy-radio input{vertical-align:baseline}.wy-form-message-inline{display:inline-block;*display:inline;*zoom:1;vertical-align:middle}.wy-input-prefix,.wy-input-suffix{white-space:nowrap;padding:6px}.wy-input-prefix .wy-input-context,.wy-input-suffix .wy-input-context{line-height:27px;padding:0 8px;display:inline-block;font-size:80%;background-color:#f3f6f6;border:1px solid #ccc;color:#999}.wy-input-suffix .wy-input-context{border-left:0}.wy-input-prefix .wy-input-context{border-right:0}.wy-switch{position:relative;display:block;height:24px;margin-top:12px;cursor:pointer}.wy-switch:before{left:0;top:0;width:36px;height:12px;background:#ccc}.wy-switch:after,.wy-switch:before{position:absolute;content:"";display:block;border-radius:4px;-webkit-transition:all .2s ease-in-out;-moz-transition:all .2s ease-in-out;transition:all .2s ease-in-out}.wy-switch:after{width:18px;height:18px;background:#999;left:-3px;top:-3px}.wy-switch span{position:absolute;left:48px;display:block;font-size:12px;color:#ccc;line-height:1}.wy-switch.active:before{background:#1e8449}.wy-switch.active:after{left:24px;background:#27ae60}.wy-switch.disabled{cursor:not-allowed;opacity:.8}.wy-control-group.wy-control-group-error .wy-form-message,.wy-control-group.wy-control-group-error>label{color:#e74c3c}.wy-control-group.wy-control-group-error input[type=color],.wy-control-group.wy-control-group-error input[type=date],.wy-control-group.wy-control-group-error input[type=datetime-local],.wy-control-group.wy-control-group-error input[type=datetime],.wy-control-group.wy-control-group-error input[type=email],.wy-control-group.wy-control-group-error input[type=month],.wy-control-group.wy-control-group-error input[type=number],.wy-control-group.wy-control-group-error input[type=password],.wy-control-group.wy-control-group-error input[type=search],.wy-control-group.wy-control-group-error input[type=tel],.wy-control-group.wy-control-group-error input[type=text],.wy-control-group.wy-control-group-error input[type=time],.wy-control-group.wy-control-group-error input[type=url],.wy-control-group.wy-control-group-error input[type=week],.wy-control-group.wy-control-group-error textarea{border:1px solid #e74c3c}.wy-inline-validate{white-space:nowrap}.wy-inline-validate .wy-input-context{padding:.5em .625em;display:inline-block;font-size:80%}.wy-inline-validate.wy-inline-validate-success .wy-input-context{color:#27ae60}.wy-inline-validate.wy-inline-validate-danger .wy-input-context{color:#e74c3c}.wy-inline-validate.wy-inline-validate-warning .wy-input-context{color:#e67e22}.wy-inline-validate.wy-inline-validate-info .wy-input-context{color:#2980b9}.rotate-90{-webkit-transform:rotate(90deg);-moz-transform:rotate(90deg);-ms-transform:rotate(90deg);-o-transform:rotate(90deg);transform:rotate(90deg)}.rotate-180{-webkit-transform:rotate(180deg);-moz-transform:rotate(180deg);-ms-transform:rotate(180deg);-o-transform:rotate(180deg);transform:rotate(180deg)}.rotate-270{-webkit-transform:rotate(270deg);-moz-transform:rotate(270deg);-ms-transform:rotate(270deg);-o-transform:rotate(270deg);transform:rotate(270deg)}.mirror{-webkit-transform:scaleX(-1);-moz-transform:scaleX(-1);-ms-transform:scaleX(-1);-o-transform:scaleX(-1);transform:scaleX(-1)}.mirror.rotate-90{-webkit-transform:scaleX(-1) rotate(90deg);-moz-transform:scaleX(-1) rotate(90deg);-ms-transform:scaleX(-1) rotate(90deg);-o-transform:scaleX(-1) rotate(90deg);transform:scaleX(-1) rotate(90deg)}.mirror.rotate-180{-webkit-transform:scaleX(-1) rotate(180deg);-moz-transform:scaleX(-1) rotate(180deg);-ms-transform:scaleX(-1) rotate(180deg);-o-transform:scaleX(-1) rotate(180deg);transform:scaleX(-1) rotate(180deg)}.mirror.rotate-270{-webkit-transform:scaleX(-1) rotate(270deg);-moz-transform:scaleX(-1) rotate(270deg);-ms-transform:scaleX(-1) rotate(270deg);-o-transform:scaleX(-1) rotate(270deg);transform:scaleX(-1) rotate(270deg)}@media only screen and (max-width:480px){.wy-form button[type=submit]{margin:.7em 0 0}.wy-form input[type=color],.wy-form input[type=date],.wy-form input[type=datetime-local],.wy-form input[type=datetime],.wy-form input[type=email],.wy-form input[type=month],.wy-form input[type=number],.wy-form input[type=password],.wy-form input[type=search],.wy-form input[type=tel],.wy-form input[type=text],.wy-form input[type=time],.wy-form input[type=url],.wy-form input[type=week],.wy-form label{margin-bottom:.3em;display:block}.wy-form input[type=color],.wy-form input[type=date],.wy-form input[type=datetime-local],.wy-form input[type=datetime],.wy-form input[type=email],.wy-form input[type=month],.wy-form input[type=number],.wy-form input[type=password],.wy-form input[type=search],.wy-form input[type=tel],.wy-form input[type=time],.wy-form input[type=url],.wy-form input[type=week]{margin-bottom:0}.wy-form-aligned .wy-control-group label{margin-bottom:.3em;text-align:left;display:block;width:100%}.wy-form-aligned .wy-control{margin:1.5em 0 0}.wy-form-message,.wy-form-message-inline,.wy-form .wy-help-inline{display:block;font-size:80%;padding:6px 0}}@media screen and (max-width:768px){.tablet-hide{display:none}}@media screen and (max-width:480px){.mobile-hide{display:none}}.float-left{float:left}.float-right{float:right}.full-width{width:100%}.rst-content table.docutils,.rst-content table.field-list,.wy-table{border-collapse:collapse;border-spacing:0;empty-cells:show;margin-bottom:24px}.rst-content table.docutils caption,.rst-content table.field-list caption,.wy-table caption{color:#000;font:italic 85%/1 arial,sans-serif;padding:1em 0;text-align:center}.rst-content table.docutils td,.rst-content table.docutils th,.rst-content table.field-list td,.rst-content table.field-list th,.wy-table td,.wy-table th{font-size:90%;margin:0;overflow:visible;padding:8px 16px}.rst-content table.docutils td:first-child,.rst-content table.docutils th:first-child,.rst-content table.field-list td:first-child,.rst-content table.field-list th:first-child,.wy-table td:first-child,.wy-table th:first-child{border-left-width:0}.rst-content table.docutils thead,.rst-content table.field-list thead,.wy-table thead{color:#000;text-align:left;vertical-align:bottom;white-space:nowrap}.rst-content table.docutils thead th,.rst-content table.field-list thead th,.wy-table thead th{font-weight:700;border-bottom:2px solid #e1e4e5}.rst-content table.docutils td,.rst-content table.field-list td,.wy-table td{background-color:transparent;vertical-align:middle}.rst-content table.docutils td p,.rst-content table.field-list td p,.wy-table td p{line-height:18px}.rst-content table.docutils td p:last-child,.rst-content table.field-list td p:last-child,.wy-table td p:last-child{margin-bottom:0}.rst-content table.docutils .wy-table-cell-min,.rst-content table.field-list .wy-table-cell-min,.wy-table .wy-table-cell-min{width:1%;padding-right:0}.rst-content table.docutils .wy-table-cell-min input[type=checkbox],.rst-content table.field-list .wy-table-cell-min input[type=checkbox],.wy-table .wy-table-cell-min input[type=checkbox]{margin:0}.wy-table-secondary{color:grey;font-size:90%}.wy-table-tertiary{color:grey;font-size:80%}.rst-content table.docutils:not(.field-list) tr:nth-child(2n-1) td,.wy-table-backed,.wy-table-odd td,.wy-table-striped tr:nth-child(2n-1) td{background-color:#f3f6f6}.rst-content table.docutils,.wy-table-bordered-all{border:1px solid #e1e4e5}.rst-content table.docutils td,.wy-table-bordered-all td{border-bottom:1px solid #e1e4e5;border-left:1px solid #e1e4e5}.rst-content table.docutils tbody>tr:last-child td,.wy-table-bordered-all tbody>tr:last-child td{border-bottom-width:0}.wy-table-bordered{border:1px solid #e1e4e5}.wy-table-bordered-rows td{border-bottom:1px solid #e1e4e5}.wy-table-bordered-rows tbody>tr:last-child td{border-bottom-width:0}.wy-table-horizontal td,.wy-table-horizontal th{border-width:0 0 1px;border-bottom:1px solid #e1e4e5}.wy-table-horizontal tbody>tr:last-child td{border-bottom-width:0}.wy-table-responsive{margin-bottom:24px;max-width:100%;overflow:auto}.wy-table-responsive table{margin-bottom:0!important}.wy-table-responsive table td,.wy-table-responsive table th{white-space:nowrap}a{color:#2980b9;text-decoration:none;cursor:pointer}a:hover{color:#3091d1}a:visited{color:#9b59b6}html{height:100%}body,html{overflow-x:hidden}body{font-family:Lato,proxima-nova,Helvetica Neue,Arial,sans-serif;font-weight:400;color:#404040;min-height:100%;background:#edf0f2}.wy-text-left{text-align:left}.wy-text-center{text-align:center}.wy-text-right{text-align:right}.wy-text-large{font-size:120%}.wy-text-normal{font-size:100%}.wy-text-small,small{font-size:80%}.wy-text-strike{text-decoration:line-through}.wy-text-warning{color:#e67e22!important}a.wy-text-warning:hover{color:#eb9950!important}.wy-text-info{color:#2980b9!important}a.wy-text-info:hover{color:#409ad5!important}.wy-text-success{color:#27ae60!important}a.wy-text-success:hover{color:#36d278!important}.wy-text-danger{color:#e74c3c!important}a.wy-text-danger:hover{color:#ed7669!important}.wy-text-neutral{color:#404040!important}a.wy-text-neutral:hover{color:#595959!important}.rst-content .toctree-wrapper>p.caption,h1,h2,h3,h4,h5,h6,legend{margin-top:0;font-weight:700;font-family:Roboto Slab,ff-tisa-web-pro,Georgia,Arial,sans-serif}p{line-height:24px;font-size:16px;margin:0 0 24px}h1{font-size:175%}.rst-content .toctree-wrapper>p.caption,h2{font-size:150%}h3{font-size:125%}h4{font-size:115%}h5{font-size:110%}h6{font-size:100%}hr{display:block;height:1px;border:0;border-top:1px solid #e1e4e5;margin:24px 0;padding:0}.rst-content code,.rst-content tt,code{white-space:nowrap;max-width:100%;background:#fff;border:1px solid #e1e4e5;font-size:75%;padding:0 5px;font-family:SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,Courier,monospace;color:#e74c3c;overflow-x:auto}.rst-content tt.code-large,code.code-large{font-size:90%}.rst-content .section ul,.rst-content .toctree-wrapper ul,.rst-content section ul,.wy-plain-list-disc,article ul{list-style:disc;line-height:24px;margin-bottom:24px}.rst-content .section ul li,.rst-content .toctree-wrapper ul li,.rst-content section ul li,.wy-plain-list-disc li,article ul li{list-style:disc;margin-left:24px}.rst-content .section ul li p:last-child,.rst-content .section ul li ul,.rst-content .toctree-wrapper ul li p:last-child,.rst-content .toctree-wrapper ul li ul,.rst-content section ul li p:last-child,.rst-content section ul li ul,.wy-plain-list-disc li p:last-child,.wy-plain-list-disc li ul,article ul li p:last-child,article ul li ul{margin-bottom:0}.rst-content .section ul li li,.rst-content .toctree-wrapper ul li li,.rst-content section ul li li,.wy-plain-list-disc li li,article ul li li{list-style:circle}.rst-content .section ul li li li,.rst-content .toctree-wrapper ul li li li,.rst-content section ul li li li,.wy-plain-list-disc li li li,article ul li li li{list-style:square}.rst-content .section ul li ol li,.rst-content .toctree-wrapper ul li ol li,.rst-content section ul li ol li,.wy-plain-list-disc li ol li,article ul li ol li{list-style:decimal}.rst-content .section ol,.rst-content .section ol.arabic,.rst-content .toctree-wrapper ol,.rst-content .toctree-wrapper ol.arabic,.rst-content section ol,.rst-content section ol.arabic,.wy-plain-list-decimal,article ol{list-style:decimal;line-height:24px;margin-bottom:24px}.rst-content .section ol.arabic li,.rst-content .section ol li,.rst-content .toctree-wrapper ol.arabic li,.rst-content .toctree-wrapper ol li,.rst-content section ol.arabic li,.rst-content section ol li,.wy-plain-list-decimal li,article ol li{list-style:decimal;margin-left:24px}.rst-content .section ol.arabic li ul,.rst-content .section ol li p:last-child,.rst-content .section ol li ul,.rst-content .toctree-wrapper ol.arabic li ul,.rst-content .toctree-wrapper ol li p:last-child,.rst-content .toctree-wrapper ol li ul,.rst-content section ol.arabic li ul,.rst-content section ol li p:last-child,.rst-content section ol li ul,.wy-plain-list-decimal li p:last-child,.wy-plain-list-decimal li ul,article ol li p:last-child,article ol li ul{margin-bottom:0}.rst-content .section ol.arabic li ul li,.rst-content .section ol li ul li,.rst-content .toctree-wrapper ol.arabic li ul li,.rst-content .toctree-wrapper ol li ul li,.rst-content section ol.arabic li ul li,.rst-content section ol li ul li,.wy-plain-list-decimal li ul li,article ol li ul li{list-style:disc}.wy-breadcrumbs{*zoom:1}.wy-breadcrumbs:after,.wy-breadcrumbs:before{display:table;content:""}.wy-breadcrumbs:after{clear:both}.wy-breadcrumbs li{display:inline-block}.wy-breadcrumbs li.wy-breadcrumbs-aside{float:right}.wy-breadcrumbs li a{display:inline-block;padding:5px}.wy-breadcrumbs li a:first-child{padding-left:0}.rst-content .wy-breadcrumbs li tt,.wy-breadcrumbs li .rst-content tt,.wy-breadcrumbs li code{padding:5px;border:none;background:none}.rst-content .wy-breadcrumbs li tt.literal,.wy-breadcrumbs li .rst-content tt.literal,.wy-breadcrumbs li code.literal{color:#404040}.wy-breadcrumbs-extra{margin-bottom:0;color:#b3b3b3;font-size:80%;display:inline-block}@media screen and (max-width:480px){.wy-breadcrumbs-extra,.wy-breadcrumbs li.wy-breadcrumbs-aside{display:none}}@media print{.wy-breadcrumbs li.wy-breadcrumbs-aside{display:none}}html{font-size:16px}.wy-affix{position:fixed;top:1.618em}.wy-menu a:hover{text-decoration:none}.wy-menu-horiz{*zoom:1}.wy-menu-horiz:after,.wy-menu-horiz:before{display:table;content:""}.wy-menu-horiz:after{clear:both}.wy-menu-horiz li,.wy-menu-horiz ul{display:inline-block}.wy-menu-horiz li:hover{background:hsla(0,0%,100%,.1)}.wy-menu-horiz li.divide-left{border-left:1px solid #404040}.wy-menu-horiz li.divide-right{border-right:1px solid #404040}.wy-menu-horiz a{height:32px;display:inline-block;line-height:32px;padding:0 16px}.wy-menu-vertical{width:300px}.wy-menu-vertical header,.wy-menu-vertical p.caption{color:#55a5d9;height:32px;line-height:32px;padding:0 1.618em;margin:12px 0 0;display:block;font-weight:700;text-transform:uppercase;font-size:85%;white-space:nowrap}.wy-menu-vertical ul{margin-bottom:0}.wy-menu-vertical li.divide-top{border-top:1px solid #404040}.wy-menu-vertical li.divide-bottom{border-bottom:1px solid #404040}.wy-menu-vertical li.current{background:#e3e3e3}.wy-menu-vertical li.current a{color:grey;border-right:1px solid #c9c9c9;padding:.4045em 2.427em}.wy-menu-vertical li.current a:hover{background:#d6d6d6}.rst-content .wy-menu-vertical li tt,.wy-menu-vertical li .rst-content tt,.wy-menu-vertical li code{border:none;background:inherit;color:inherit;padding-left:0;padding-right:0}.wy-menu-vertical li button.toctree-expand{display:block;float:left;margin-left:-1.2em;line-height:18px;color:#4d4d4d;border:none;background:none;padding:0}.wy-menu-vertical li.current>a,.wy-menu-vertical li.on a{color:#404040;font-weight:700;position:relative;background:#fcfcfc;border:none;padding:.4045em 1.618em}.wy-menu-vertical li.current>a:hover,.wy-menu-vertical li.on a:hover{background:#fcfcfc}.wy-menu-vertical li.current>a:hover button.toctree-expand,.wy-menu-vertical li.on a:hover button.toctree-expand{color:grey}.wy-menu-vertical li.current>a button.toctree-expand,.wy-menu-vertical li.on a button.toctree-expand{display:block;line-height:18px;color:#333}.wy-menu-vertical li.toctree-l1.current>a{border-bottom:1px solid #c9c9c9;border-top:1px solid #c9c9c9}.wy-menu-vertical .toctree-l1.current .toctree-l2>ul,.wy-menu-vertical .toctree-l2.current .toctree-l3>ul,.wy-menu-vertical .toctree-l3.current .toctree-l4>ul,.wy-menu-vertical .toctree-l4.current .toctree-l5>ul,.wy-menu-vertical .toctree-l5.current .toctree-l6>ul,.wy-menu-vertical .toctree-l6.current .toctree-l7>ul,.wy-menu-vertical .toctree-l7.current .toctree-l8>ul,.wy-menu-vertical .toctree-l8.current .toctree-l9>ul,.wy-menu-vertical .toctree-l9.current .toctree-l10>ul,.wy-menu-vertical .toctree-l10.current .toctree-l11>ul{display:none}.wy-menu-vertical .toctree-l1.current .current.toctree-l2>ul,.wy-menu-vertical .toctree-l2.current .current.toctree-l3>ul,.wy-menu-vertical .toctree-l3.current .current.toctree-l4>ul,.wy-menu-vertical .toctree-l4.current .current.toctree-l5>ul,.wy-menu-vertical .toctree-l5.current .current.toctree-l6>ul,.wy-menu-vertical .toctree-l6.current .current.toctree-l7>ul,.wy-menu-vertical .toctree-l7.current .current.toctree-l8>ul,.wy-menu-vertical .toctree-l8.current .current.toctree-l9>ul,.wy-menu-vertical .toctree-l9.current .current.toctree-l10>ul,.wy-menu-vertical .toctree-l10.current .current.toctree-l11>ul{display:block}.wy-menu-vertical li.toctree-l3,.wy-menu-vertical li.toctree-l4{font-size:.9em}.wy-menu-vertical li.toctree-l2 a,.wy-menu-vertical li.toctree-l3 a,.wy-menu-vertical li.toctree-l4 a,.wy-menu-vertical li.toctree-l5 a,.wy-menu-vertical li.toctree-l6 a,.wy-menu-vertical li.toctree-l7 a,.wy-menu-vertical li.toctree-l8 a,.wy-menu-vertical li.toctree-l9 a,.wy-menu-vertical li.toctree-l10 a{color:#404040}.wy-menu-vertical li.toctree-l2 a:hover button.toctree-expand,.wy-menu-vertical li.toctree-l3 a:hover button.toctree-expand,.wy-menu-vertical li.toctree-l4 a:hover button.toctree-expand,.wy-menu-vertical li.toctree-l5 a:hover button.toctree-expand,.wy-menu-vertical li.toctree-l6 a:hover button.toctree-expand,.wy-menu-vertical li.toctree-l7 a:hover button.toctree-expand,.wy-menu-vertical li.toctree-l8 a:hover button.toctree-expand,.wy-menu-vertical li.toctree-l9 a:hover button.toctree-expand,.wy-menu-vertical li.toctree-l10 a:hover button.toctree-expand{color:grey}.wy-menu-vertical li.toctree-l2.current li.toctree-l3>a,.wy-menu-vertical li.toctree-l3.current li.toctree-l4>a,.wy-menu-vertical li.toctree-l4.current li.toctree-l5>a,.wy-menu-vertical li.toctree-l5.current li.toctree-l6>a,.wy-menu-vertical li.toctree-l6.current li.toctree-l7>a,.wy-menu-vertical li.toctree-l7.current li.toctree-l8>a,.wy-menu-vertical li.toctree-l8.current li.toctree-l9>a,.wy-menu-vertical li.toctree-l9.current li.toctree-l10>a,.wy-menu-vertical li.toctree-l10.current li.toctree-l11>a{display:block}.wy-menu-vertical li.toctree-l2.current>a{padding:.4045em 2.427em}.wy-menu-vertical li.toctree-l2.current li.toctree-l3>a{padding:.4045em 1.618em .4045em 4.045em}.wy-menu-vertical li.toctree-l3.current>a{padding:.4045em 4.045em}.wy-menu-vertical li.toctree-l3.current li.toctree-l4>a{padding:.4045em 1.618em .4045em 5.663em}.wy-menu-vertical li.toctree-l4.current>a{padding:.4045em 5.663em}.wy-menu-vertical li.toctree-l4.current li.toctree-l5>a{padding:.4045em 1.618em .4045em 7.281em}.wy-menu-vertical li.toctree-l5.current>a{padding:.4045em 7.281em}.wy-menu-vertical li.toctree-l5.current li.toctree-l6>a{padding:.4045em 1.618em .4045em 8.899em}.wy-menu-vertical li.toctree-l6.current>a{padding:.4045em 8.899em}.wy-menu-vertical li.toctree-l6.current li.toctree-l7>a{padding:.4045em 1.618em .4045em 10.517em}.wy-menu-vertical li.toctree-l7.current>a{padding:.4045em 10.517em}.wy-menu-vertical li.toctree-l7.current li.toctree-l8>a{padding:.4045em 1.618em .4045em 12.135em}.wy-menu-vertical li.toctree-l8.current>a{padding:.4045em 12.135em}.wy-menu-vertical li.toctree-l8.current li.toctree-l9>a{padding:.4045em 1.618em .4045em 13.753em}.wy-menu-vertical li.toctree-l9.current>a{padding:.4045em 13.753em}.wy-menu-vertical li.toctree-l9.current li.toctree-l10>a{padding:.4045em 1.618em .4045em 15.371em}.wy-menu-vertical li.toctree-l10.current>a{padding:.4045em 15.371em}.wy-menu-vertical li.toctree-l10.current li.toctree-l11>a{padding:.4045em 1.618em .4045em 16.989em}.wy-menu-vertical li.toctree-l2.current>a,.wy-menu-vertical li.toctree-l2.current li.toctree-l3>a{background:#c9c9c9}.wy-menu-vertical li.toctree-l2 button.toctree-expand{color:#a3a3a3}.wy-menu-vertical li.toctree-l3.current>a,.wy-menu-vertical li.toctree-l3.current li.toctree-l4>a{background:#bdbdbd}.wy-menu-vertical li.toctree-l3 button.toctree-expand{color:#969696}.wy-menu-vertical li.current ul{display:block}.wy-menu-vertical li ul{margin-bottom:0;display:none}.wy-menu-vertical li ul li a{margin-bottom:0;color:#d9d9d9;font-weight:400}.wy-menu-vertical a{line-height:18px;padding:.4045em 1.618em;display:block;position:relative;font-size:90%;color:#d9d9d9}.wy-menu-vertical a:hover{background-color:#4e4a4a;cursor:pointer}.wy-menu-vertical a:hover button.toctree-expand{color:#d9d9d9}.wy-menu-vertical a:active{background-color:#2980b9;cursor:pointer;color:#fff}.wy-menu-vertical a:active button.toctree-expand{color:#fff}.wy-side-nav-search{display:block;width:300px;padding:.809em;margin-bottom:.809em;z-index:200;background-color:#2980b9;text-align:center;color:#fcfcfc}.wy-side-nav-search input[type=text]{width:100%;border-radius:50px;padding:6px 12px;border-color:#2472a4}.wy-side-nav-search img{display:block;margin:auto auto .809em;height:45px;width:45px;background-color:#2980b9;padding:5px;border-radius:100%}.wy-side-nav-search .wy-dropdown>a,.wy-side-nav-search>a{color:#fcfcfc;font-size:100%;font-weight:700;display:inline-block;padding:4px 6px;margin-bottom:.809em;max-width:100%}.wy-side-nav-search .wy-dropdown>a:hover,.wy-side-nav-search>a:hover{background:hsla(0,0%,100%,.1)}.wy-side-nav-search .wy-dropdown>a img.logo,.wy-side-nav-search>a img.logo{display:block;margin:0 auto;height:auto;width:auto;border-radius:0;max-width:100%;background:transparent}.wy-side-nav-search .wy-dropdown>a.icon img.logo,.wy-side-nav-search>a.icon img.logo{margin-top:.85em}.wy-side-nav-search>div.version{margin-top:-.4045em;margin-bottom:.809em;font-weight:400;color:hsla(0,0%,100%,.3)}.wy-nav .wy-menu-vertical header{color:#2980b9}.wy-nav .wy-menu-vertical a{color:#b3b3b3}.wy-nav .wy-menu-vertical a:hover{background-color:#2980b9;color:#fff}[data-menu-wrap]{-webkit-transition:all .2s ease-in;-moz-transition:all .2s ease-in;transition:all .2s ease-in;position:absolute;opacity:1;width:100%;opacity:0}[data-menu-wrap].move-center{left:0;right:auto;opacity:1}[data-menu-wrap].move-left{right:auto;left:-100%;opacity:0}[data-menu-wrap].move-right{right:-100%;left:auto;opacity:0}.wy-body-for-nav{background:#fcfcfc}.wy-grid-for-nav{position:absolute;width:100%;height:100%}.wy-nav-side{position:fixed;top:0;bottom:0;left:0;padding-bottom:2em;width:300px;overflow-x:hidden;overflow-y:hidden;min-height:100%;color:#9b9b9b;background:#343131;z-index:200}.wy-side-scroll{width:320px;position:relative;overflow-x:hidden;overflow-y:scroll;height:100%}.wy-nav-top{display:none;background:#2980b9;color:#fff;padding:.4045em .809em;position:relative;line-height:50px;text-align:center;font-size:100%;*zoom:1}.wy-nav-top:after,.wy-nav-top:before{display:table;content:""}.wy-nav-top:after{clear:both}.wy-nav-top a{color:#fff;font-weight:700}.wy-nav-top img{margin-right:12px;height:45px;width:45px;background-color:#2980b9;padding:5px;border-radius:100%}.wy-nav-top i{font-size:30px;float:left;cursor:pointer;padding-top:inherit}.wy-nav-content-wrap{margin-left:300px;background:#fcfcfc;min-height:100%}.wy-nav-content{padding:1.618em 3.236em;height:100%;max-width:800px;margin:auto}.wy-body-mask{position:fixed;width:100%;height:100%;background:rgba(0,0,0,.2);display:none;z-index:499}.wy-body-mask.on{display:block}footer{color:grey}footer p{margin-bottom:12px}.rst-content footer span.commit tt,footer span.commit .rst-content tt,footer span.commit code{padding:0;font-family:SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,Courier,monospace;font-size:1em;background:none;border:none;color:grey}.rst-footer-buttons{*zoom:1}.rst-footer-buttons:after,.rst-footer-buttons:before{width:100%;display:table;content:""}.rst-footer-buttons:after{clear:both}.rst-breadcrumbs-buttons{margin-top:12px;*zoom:1}.rst-breadcrumbs-buttons:after,.rst-breadcrumbs-buttons:before{display:table;content:""}.rst-breadcrumbs-buttons:after{clear:both}#search-results .search li{margin-bottom:24px;border-bottom:1px solid #e1e4e5;padding-bottom:24px}#search-results .search li:first-child{border-top:1px solid #e1e4e5;padding-top:24px}#search-results .search li a{font-size:120%;margin-bottom:12px;display:inline-block}#search-results .context{color:grey;font-size:90%}.genindextable li>ul{margin-left:24px}@media screen and (max-width:768px){.wy-body-for-nav{background:#fcfcfc}.wy-nav-top{display:block}.wy-nav-side{left:-300px}.wy-nav-side.shift{width:85%;left:0}.wy-menu.wy-menu-vertical,.wy-side-nav-search,.wy-side-scroll{width:auto}.wy-nav-content-wrap{margin-left:0}.wy-nav-content-wrap .wy-nav-content{padding:1.618em}.wy-nav-content-wrap.shift{position:fixed;min-width:100%;left:85%;top:0;height:100%;overflow:hidden}}@media screen and (min-width:1100px){.wy-nav-content-wrap{background:rgba(0,0,0,.05)}.wy-nav-content{margin:0;background:#fcfcfc}}@media print{.rst-versions,.wy-nav-side,footer{display:none}.wy-nav-content-wrap{margin-left:0}}.rst-versions{position:fixed;bottom:0;left:0;width:300px;color:#fcfcfc;background:#1f1d1d;font-family:Lato,proxima-nova,Helvetica Neue,Arial,sans-serif;z-index:400}.rst-versions a{color:#2980b9;text-decoration:none}.rst-versions .rst-badge-small{display:none}.rst-versions .rst-current-version{padding:12px;background-color:#272525;display:block;text-align:right;font-size:90%;cursor:pointer;color:#27ae60;*zoom:1}.rst-versions .rst-current-version:after,.rst-versions .rst-current-version:before{display:table;content:""}.rst-versions .rst-current-version:after{clear:both}.rst-content .code-block-caption .rst-versions .rst-current-version .headerlink,.rst-content .eqno .rst-versions .rst-current-version .headerlink,.rst-content .rst-versions .rst-current-version .admonition-title,.rst-content code.download .rst-versions .rst-current-version span:first-child,.rst-content dl dt .rst-versions .rst-current-version .headerlink,.rst-content h1 .rst-versions .rst-current-version .headerlink,.rst-content h2 .rst-versions .rst-current-version .headerlink,.rst-content h3 .rst-versions .rst-current-version .headerlink,.rst-content h4 .rst-versions .rst-current-version .headerlink,.rst-content h5 .rst-versions .rst-current-version .headerlink,.rst-content h6 .rst-versions .rst-current-version .headerlink,.rst-content p .rst-versions .rst-current-version .headerlink,.rst-content table>caption .rst-versions .rst-current-version .headerlink,.rst-content tt.download .rst-versions .rst-current-version span:first-child,.rst-versions .rst-current-version .fa,.rst-versions .rst-current-version .icon,.rst-versions .rst-current-version .rst-content .admonition-title,.rst-versions .rst-current-version .rst-content .code-block-caption .headerlink,.rst-versions .rst-current-version .rst-content .eqno .headerlink,.rst-versions .rst-current-version .rst-content code.download span:first-child,.rst-versions .rst-current-version .rst-content dl dt .headerlink,.rst-versions .rst-current-version .rst-content h1 .headerlink,.rst-versions .rst-current-version .rst-content h2 .headerlink,.rst-versions .rst-current-version .rst-content h3 .headerlink,.rst-versions .rst-current-version .rst-content h4 .headerlink,.rst-versions .rst-current-version .rst-content h5 .headerlink,.rst-versions .rst-current-version .rst-content h6 .headerlink,.rst-versions .rst-current-version .rst-content p .headerlink,.rst-versions .rst-current-version .rst-content table>caption .headerlink,.rst-versions .rst-current-version .rst-content tt.download span:first-child,.rst-versions .rst-current-version .wy-menu-vertical li button.toctree-expand,.wy-menu-vertical li .rst-versions .rst-current-version button.toctree-expand{color:#fcfcfc}.rst-versions .rst-current-version .fa-book,.rst-versions .rst-current-version .icon-book{float:left}.rst-versions .rst-current-version.rst-out-of-date{background-color:#e74c3c;color:#fff}.rst-versions .rst-current-version.rst-active-old-version{background-color:#f1c40f;color:#000}.rst-versions.shift-up{height:auto;max-height:100%;overflow-y:scroll}.rst-versions.shift-up .rst-other-versions{display:block}.rst-versions .rst-other-versions{font-size:90%;padding:12px;color:grey;display:none}.rst-versions .rst-other-versions hr{display:block;height:1px;border:0;margin:20px 0;padding:0;border-top:1px solid #413d3d}.rst-versions .rst-other-versions dd{display:inline-block;margin:0}.rst-versions .rst-other-versions dd a{display:inline-block;padding:6px;color:#fcfcfc}.rst-versions.rst-badge{width:auto;bottom:20px;right:20px;left:auto;border:none;max-width:300px;max-height:90%}.rst-versions.rst-badge .fa-book,.rst-versions.rst-badge .icon-book{float:none;line-height:30px}.rst-versions.rst-badge.shift-up .rst-current-version{text-align:right}.rst-versions.rst-badge.shift-up .rst-current-version .fa-book,.rst-versions.rst-badge.shift-up .rst-current-version .icon-book{float:left}.rst-versions.rst-badge>.rst-current-version{width:auto;height:30px;line-height:30px;padding:0 6px;display:block;text-align:center}@media screen and (max-width:768px){.rst-versions{width:85%;display:none}.rst-versions.shift{display:block}}.rst-content .toctree-wrapper>p.caption,.rst-content h1,.rst-content h2,.rst-content h3,.rst-content h4,.rst-content h5,.rst-content h6{margin-bottom:24px}.rst-content img{max-width:100%;height:auto}.rst-content div.figure,.rst-content figure{margin-bottom:24px}.rst-content div.figure .caption-text,.rst-content figure .caption-text{font-style:italic}.rst-content div.figure p:last-child.caption,.rst-content figure p:last-child.caption{margin-bottom:0}.rst-content div.figure.align-center,.rst-content figure.align-center{text-align:center}.rst-content .section>a>img,.rst-content .section>img,.rst-content section>a>img,.rst-content section>img{margin-bottom:24px}.rst-content abbr[title]{text-decoration:none}.rst-content.style-external-links a.reference.external:after{font-family:FontAwesome;content:"\f08e";color:#b3b3b3;vertical-align:super;font-size:60%;margin:0 .2em}.rst-content blockquote{margin-left:24px;line-height:24px;margin-bottom:24px}.rst-content pre.literal-block{white-space:pre;margin:0;padding:12px;font-family:SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,Courier,monospace;display:block;overflow:auto}.rst-content div[class^=highlight],.rst-content pre.literal-block{border:1px solid #e1e4e5;overflow-x:auto;margin:1px 0 24px}.rst-content div[class^=highlight] div[class^=highlight],.rst-content pre.literal-block div[class^=highlight]{padding:0;border:none;margin:0}.rst-content div[class^=highlight] td.code{width:100%}.rst-content .linenodiv pre{border-right:1px solid #e6e9ea;margin:0;padding:12px;font-family:SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,Courier,monospace;user-select:none;pointer-events:none}.rst-content div[class^=highlight] pre{white-space:pre;margin:0;padding:12px;display:block;overflow:auto}.rst-content div[class^=highlight] pre .hll{display:block;margin:0 -12px;padding:0 12px}.rst-content .linenodiv pre,.rst-content div[class^=highlight] pre,.rst-content pre.literal-block{font-family:SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,Courier,monospace;font-size:12px;line-height:1.4}.rst-content div.highlight .gp,.rst-content div.highlight span.linenos{user-select:none;pointer-events:none}.rst-content div.highlight span.linenos{display:inline-block;padding-left:0;padding-right:12px;margin-right:12px;border-right:1px solid #e6e9ea}.rst-content .code-block-caption{font-style:italic;font-size:85%;line-height:1;padding:1em 0;text-align:center}@media print{.rst-content .codeblock,.rst-content div[class^=highlight],.rst-content div[class^=highlight] pre{white-space:pre-wrap}}.rst-content .admonition,.rst-content .admonition-todo,.rst-content .attention,.rst-content .caution,.rst-content .danger,.rst-content .error,.rst-content .hint,.rst-content .important,.rst-content .note,.rst-content .seealso,.rst-content .tip,.rst-content .warning{clear:both}.rst-content .admonition-todo .last,.rst-content .admonition-todo>:last-child,.rst-content .admonition .last,.rst-content .admonition>:last-child,.rst-content .attention .last,.rst-content .attention>:last-child,.rst-content .caution .last,.rst-content .caution>:last-child,.rst-content .danger .last,.rst-content .danger>:last-child,.rst-content .error .last,.rst-content .error>:last-child,.rst-content .hint .last,.rst-content .hint>:last-child,.rst-content .important .last,.rst-content .important>:last-child,.rst-content .note .last,.rst-content .note>:last-child,.rst-content .seealso .last,.rst-content .seealso>:last-child,.rst-content .tip .last,.rst-content .tip>:last-child,.rst-content .warning .last,.rst-content .warning>:last-child{margin-bottom:0}.rst-content .admonition-title:before{margin-right:4px}.rst-content .admonition table{border-color:rgba(0,0,0,.1)}.rst-content .admonition table td,.rst-content .admonition table th{background:transparent!important;border-color:rgba(0,0,0,.1)!important}.rst-content .section ol.loweralpha,.rst-content .section ol.loweralpha>li,.rst-content .toctree-wrapper ol.loweralpha,.rst-content .toctree-wrapper ol.loweralpha>li,.rst-content section ol.loweralpha,.rst-content section ol.loweralpha>li{list-style:lower-alpha}.rst-content .section ol.upperalpha,.rst-content .section ol.upperalpha>li,.rst-content .toctree-wrapper ol.upperalpha,.rst-content .toctree-wrapper ol.upperalpha>li,.rst-content section ol.upperalpha,.rst-content section ol.upperalpha>li{list-style:upper-alpha}.rst-content .section ol li>*,.rst-content .section ul li>*,.rst-content .toctree-wrapper ol li>*,.rst-content .toctree-wrapper ul li>*,.rst-content section ol li>*,.rst-content section ul li>*{margin-top:12px;margin-bottom:12px}.rst-content .section ol li>:first-child,.rst-content .section ul li>:first-child,.rst-content .toctree-wrapper ol li>:first-child,.rst-content .toctree-wrapper ul li>:first-child,.rst-content section ol li>:first-child,.rst-content section ul li>:first-child{margin-top:0}.rst-content .section ol li>p,.rst-content .section ol li>p:last-child,.rst-content .section ul li>p,.rst-content .section ul li>p:last-child,.rst-content .toctree-wrapper ol li>p,.rst-content .toctree-wrapper ol li>p:last-child,.rst-content .toctree-wrapper ul li>p,.rst-content .toctree-wrapper ul li>p:last-child,.rst-content section ol li>p,.rst-content section ol li>p:last-child,.rst-content section ul li>p,.rst-content section ul li>p:last-child{margin-bottom:12px}.rst-content .section ol li>p:only-child,.rst-content .section ol li>p:only-child:last-child,.rst-content .section ul li>p:only-child,.rst-content .section ul li>p:only-child:last-child,.rst-content .toctree-wrapper ol li>p:only-child,.rst-content .toctree-wrapper ol li>p:only-child:last-child,.rst-content .toctree-wrapper ul li>p:only-child,.rst-content .toctree-wrapper ul li>p:only-child:last-child,.rst-content section ol li>p:only-child,.rst-content section ol li>p:only-child:last-child,.rst-content section ul li>p:only-child,.rst-content section ul li>p:only-child:last-child{margin-bottom:0}.rst-content .section ol li>ol,.rst-content .section ol li>ul,.rst-content .section ul li>ol,.rst-content .section ul li>ul,.rst-content .toctree-wrapper ol li>ol,.rst-content .toctree-wrapper ol li>ul,.rst-content .toctree-wrapper ul li>ol,.rst-content .toctree-wrapper ul li>ul,.rst-content section ol li>ol,.rst-content section ol li>ul,.rst-content section ul li>ol,.rst-content section ul li>ul{margin-bottom:12px}.rst-content .section ol.simple li>*,.rst-content .section ol.simple li ol,.rst-content .section ol.simple li ul,.rst-content .section ul.simple li>*,.rst-content .section ul.simple li ol,.rst-content .section ul.simple li ul,.rst-content .toctree-wrapper ol.simple li>*,.rst-content .toctree-wrapper ol.simple li ol,.rst-content .toctree-wrapper ol.simple li ul,.rst-content .toctree-wrapper ul.simple li>*,.rst-content .toctree-wrapper ul.simple li ol,.rst-content .toctree-wrapper ul.simple li ul,.rst-content section ol.simple li>*,.rst-content section ol.simple li ol,.rst-content section ol.simple li ul,.rst-content section ul.simple li>*,.rst-content section ul.simple li ol,.rst-content section ul.simple li ul{margin-top:0;margin-bottom:0}.rst-content .line-block{margin-left:0;margin-bottom:24px;line-height:24px}.rst-content .line-block .line-block{margin-left:24px;margin-bottom:0}.rst-content .topic-title{font-weight:700;margin-bottom:12px}.rst-content .toc-backref{color:#404040}.rst-content .align-right{float:right;margin:0 0 24px 24px}.rst-content .align-left{float:left;margin:0 24px 24px 0}.rst-content .align-center{margin:auto}.rst-content .align-center:not(table){display:block}.rst-content .code-block-caption .headerlink,.rst-content .eqno .headerlink,.rst-content .toctree-wrapper>p.caption .headerlink,.rst-content dl dt .headerlink,.rst-content h1 .headerlink,.rst-content h2 .headerlink,.rst-content h3 .headerlink,.rst-content h4 .headerlink,.rst-content h5 .headerlink,.rst-content h6 .headerlink,.rst-content p.caption .headerlink,.rst-content p .headerlink,.rst-content table>caption .headerlink{opacity:0;font-size:14px;font-family:FontAwesome;margin-left:.5em}.rst-content .code-block-caption .headerlink:focus,.rst-content .code-block-caption:hover .headerlink,.rst-content .eqno .headerlink:focus,.rst-content .eqno:hover .headerlink,.rst-content .toctree-wrapper>p.caption .headerlink:focus,.rst-content .toctree-wrapper>p.caption:hover .headerlink,.rst-content dl dt .headerlink:focus,.rst-content dl dt:hover .headerlink,.rst-content h1 .headerlink:focus,.rst-content h1:hover .headerlink,.rst-content h2 .headerlink:focus,.rst-content h2:hover .headerlink,.rst-content h3 .headerlink:focus,.rst-content h3:hover .headerlink,.rst-content h4 .headerlink:focus,.rst-content h4:hover .headerlink,.rst-content h5 .headerlink:focus,.rst-content h5:hover .headerlink,.rst-content h6 .headerlink:focus,.rst-content h6:hover .headerlink,.rst-content p.caption .headerlink:focus,.rst-content p.caption:hover .headerlink,.rst-content p .headerlink:focus,.rst-content p:hover .headerlink,.rst-content table>caption .headerlink:focus,.rst-content table>caption:hover .headerlink{opacity:1}.rst-content .btn:focus{outline:2px solid}.rst-content table>caption .headerlink:after{font-size:12px}.rst-content .centered{text-align:center}.rst-content .sidebar{float:right;width:40%;display:block;margin:0 0 24px 24px;padding:24px;background:#f3f6f6;border:1px solid #e1e4e5}.rst-content .sidebar dl,.rst-content .sidebar p,.rst-content .sidebar ul{font-size:90%}.rst-content .sidebar .last,.rst-content .sidebar>:last-child{margin-bottom:0}.rst-content .sidebar .sidebar-title{display:block;font-family:Roboto Slab,ff-tisa-web-pro,Georgia,Arial,sans-serif;font-weight:700;background:#e1e4e5;padding:6px 12px;margin:-24px -24px 24px;font-size:100%}.rst-content .highlighted{background:#f1c40f;box-shadow:0 0 0 2px #f1c40f;display:inline;font-weight:700}.rst-content .citation-reference,.rst-content .footnote-reference{vertical-align:baseline;position:relative;top:-.4em;line-height:0;font-size:90%}.rst-content .hlist{width:100%}.rst-content dl dt span.classifier:before{content:" : "}.rst-content dl dt span.classifier-delimiter{display:none!important}html.writer-html4 .rst-content table.docutils.citation,html.writer-html4 .rst-content table.docutils.footnote{background:none;border:none}html.writer-html4 .rst-content table.docutils.citation td,html.writer-html4 .rst-content table.docutils.citation tr,html.writer-html4 .rst-content table.docutils.footnote td,html.writer-html4 .rst-content table.docutils.footnote tr{border:none;background-color:transparent!important;white-space:normal}html.writer-html4 .rst-content table.docutils.citation td.label,html.writer-html4 .rst-content table.docutils.footnote td.label{padding-left:0;padding-right:0;vertical-align:top}html.writer-html5 .rst-content dl.field-list,html.writer-html5 .rst-content dl.footnote{display:grid;grid-template-columns:max-content auto}html.writer-html5 .rst-content dl.field-list>dt,html.writer-html5 .rst-content dl.footnote>dt{padding-left:1rem}html.writer-html5 .rst-content dl.field-list>dt:after,html.writer-html5 .rst-content dl.footnote>dt:after{content:":"}html.writer-html5 .rst-content dl.field-list>dd,html.writer-html5 .rst-content dl.field-list>dt,html.writer-html5 .rst-content dl.footnote>dd,html.writer-html5 .rst-content dl.footnote>dt{margin-bottom:0}html.writer-html5 .rst-content dl.footnote{font-size:.9rem}html.writer-html5 .rst-content dl.footnote>dt{margin:0 .5rem .5rem 0;line-height:1.2rem;word-break:break-all;font-weight:400}html.writer-html5 .rst-content dl.footnote>dt>span.brackets{margin-right:.5rem}html.writer-html5 .rst-content dl.footnote>dt>span.brackets:before{content:"["}html.writer-html5 .rst-content dl.footnote>dt>span.brackets:after{content:"]"}html.writer-html5 .rst-content dl.footnote>dt>span.fn-backref{font-style:italic}html.writer-html5 .rst-content dl.footnote>dd{margin:0 0 .5rem;line-height:1.2rem}html.writer-html5 .rst-content dl.footnote>dd p,html.writer-html5 .rst-content dl.option-list kbd{font-size:.9rem}.rst-content table.docutils.footnote,html.writer-html4 .rst-content table.docutils.citation,html.writer-html5 .rst-content dl.footnote{color:grey}.rst-content table.docutils.footnote code,.rst-content table.docutils.footnote tt,html.writer-html4 .rst-content table.docutils.citation code,html.writer-html4 .rst-content table.docutils.citation tt,html.writer-html5 .rst-content dl.footnote code,html.writer-html5 .rst-content dl.footnote tt{color:#555}.rst-content .wy-table-responsive.citation,.rst-content .wy-table-responsive.footnote{margin-bottom:0}.rst-content .wy-table-responsive.citation+:not(.citation),.rst-content .wy-table-responsive.footnote+:not(.footnote){margin-top:24px}.rst-content .wy-table-responsive.citation:last-child,.rst-content .wy-table-responsive.footnote:last-child{margin-bottom:24px}.rst-content table.docutils th{border-color:#e1e4e5}html.writer-html5 .rst-content table.docutils th{border:1px solid #e1e4e5}html.writer-html5 .rst-content table.docutils td>p,html.writer-html5 .rst-content table.docutils th>p{line-height:1rem;margin-bottom:0;font-size:.9rem}.rst-content table.docutils td .last,.rst-content table.docutils td .last>:last-child{margin-bottom:0}.rst-content table.field-list,.rst-content table.field-list td{border:none}.rst-content table.field-list td p{font-size:inherit;line-height:inherit}.rst-content table.field-list td>strong{display:inline-block}.rst-content table.field-list .field-name{padding-right:10px;text-align:left;white-space:nowrap}.rst-content table.field-list .field-body{text-align:left}.rst-content code,.rst-content tt{color:#000;font-family:SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,Courier,monospace;padding:2px 5px}.rst-content code big,.rst-content code em,.rst-content tt big,.rst-content tt em{font-size:100%!important;line-height:normal}.rst-content code.literal,.rst-content tt.literal{color:#e74c3c;white-space:normal}.rst-content code.xref,.rst-content tt.xref,a .rst-content code,a .rst-content tt{font-weight:700;color:#404040}.rst-content kbd,.rst-content pre,.rst-content samp{font-family:SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,Courier,monospace}.rst-content a code,.rst-content a tt{color:#2980b9}.rst-content dl{margin-bottom:24px}.rst-content dl dt{font-weight:700;margin-bottom:12px}.rst-content dl ol,.rst-content dl p,.rst-content dl table,.rst-content dl ul{margin-bottom:12px}.rst-content dl dd{margin:0 0 12px 24px;line-height:24px}html.writer-html4 .rst-content dl:not(.docutils),html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple){margin-bottom:24px}html.writer-html4 .rst-content dl:not(.docutils)>dt,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple)>dt{display:table;margin:6px 0;font-size:90%;line-height:normal;background:#e7f2fa;color:#2980b9;border-top:3px solid #6ab0de;padding:6px;position:relative}html.writer-html4 .rst-content dl:not(.docutils)>dt:before,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple)>dt:before{color:#6ab0de}html.writer-html4 .rst-content dl:not(.docutils)>dt .headerlink,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple)>dt .headerlink{color:#404040;font-size:100%!important}html.writer-html4 .rst-content dl:not(.docutils) dl:not(.field-list)>dt,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple) dl:not(.field-list)>dt{margin-bottom:6px;border:none;border-left:3px solid #ccc;background:#f0f0f0;color:#555}html.writer-html4 .rst-content dl:not(.docutils) dl:not(.field-list)>dt .headerlink,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple) dl:not(.field-list)>dt .headerlink{color:#404040;font-size:100%!important}html.writer-html4 .rst-content dl:not(.docutils)>dt:first-child,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple)>dt:first-child{margin-top:0}html.writer-html4 .rst-content dl:not(.docutils) code.descclassname,html.writer-html4 .rst-content dl:not(.docutils) code.descname,html.writer-html4 .rst-content dl:not(.docutils) tt.descclassname,html.writer-html4 .rst-content dl:not(.docutils) tt.descname,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple) code.descclassname,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple) code.descname,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple) tt.descclassname,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple) tt.descname{background-color:transparent;border:none;padding:0;font-size:100%!important}html.writer-html4 .rst-content dl:not(.docutils) code.descname,html.writer-html4 .rst-content dl:not(.docutils) tt.descname,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple) code.descname,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple) tt.descname{font-weight:700}html.writer-html4 .rst-content dl:not(.docutils) .optional,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple) .optional{display:inline-block;padding:0 4px;color:#000;font-weight:700}html.writer-html4 .rst-content dl:not(.docutils) .property,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple) .property{display:inline-block;padding-right:8px;max-width:100%}html.writer-html4 .rst-content dl:not(.docutils) .k,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple) .k{font-style:italic}html.writer-html4 .rst-content dl:not(.docutils) .descclassname,html.writer-html4 .rst-content dl:not(.docutils) .descname,html.writer-html4 .rst-content dl:not(.docutils) .sig-name,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple) .descclassname,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple) .descname,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple) .sig-name{font-family:SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,Courier,monospace;color:#000}.rst-content .viewcode-back,.rst-content .viewcode-link{display:inline-block;color:#27ae60;font-size:80%;padding-left:24px}.rst-content .viewcode-back{display:block;float:right}.rst-content p.rubric{margin-bottom:12px;font-weight:700}.rst-content code.download,.rst-content tt.download{background:inherit;padding:inherit;font-weight:400;font-family:inherit;font-size:inherit;color:inherit;border:inherit;white-space:inherit}.rst-content code.download span:first-child,.rst-content tt.download span:first-child{-webkit-font-smoothing:subpixel-antialiased}.rst-content code.download span:first-child:before,.rst-content tt.download span:first-child:before{margin-right:4px}.rst-content .guilabel{border:1px solid #7fbbe3;background:#e7f2fa;font-size:80%;font-weight:700;border-radius:4px;padding:2.4px 6px;margin:auto 2px}.rst-content .versionmodified{font-style:italic}@media screen and (max-width:480px){.rst-content .sidebar{width:100%}}span[id*=MathJax-Span]{color:#404040}.math{text-align:center}@font-face{font-family:Lato;src:url(fonts/lato-normal.woff2?bd03a2cc277bbbc338d464e679fe9942) format("woff2"),url(fonts/lato-normal.woff?27bd77b9162d388cb8d4c4217c7c5e2a) format("woff");font-weight:400;font-style:normal;font-display:block}@font-face{font-family:Lato;src:url(fonts/lato-bold.woff2?cccb897485813c7c256901dbca54ecf2) format("woff2"),url(fonts/lato-bold.woff?d878b6c29b10beca227e9eef4246111b) format("woff");font-weight:700;font-style:normal;font-display:block}@font-face{font-family:Lato;src:url(fonts/lato-bold-italic.woff2?0b6bb6725576b072c5d0b02ecdd1900d) format("woff2"),url(fonts/lato-bold-italic.woff?9c7e4e9eb485b4a121c760e61bc3707c) format("woff");font-weight:700;font-style:italic;font-display:block}@font-face{font-family:Lato;src:url(fonts/lato-normal-italic.woff2?4eb103b4d12be57cb1d040ed5e162e9d) format("woff2"),url(fonts/lato-normal-italic.woff?f28f2d6482446544ef1ea1ccc6dd5892) format("woff");font-weight:400;font-style:italic;font-display:block}@font-face{font-family:Roboto Slab;font-style:normal;font-weight:400;src:url(fonts/Roboto-Slab-Regular.woff2?7abf5b8d04d26a2cafea937019bca958) format("woff2"),url(fonts/Roboto-Slab-Regular.woff?c1be9284088d487c5e3ff0a10a92e58c) format("woff");font-display:block}@font-face{font-family:Roboto Slab;font-style:normal;font-weight:700;src:url(fonts/Roboto-Slab-Bold.woff2?9984f4a9bda09be08e83f2506954adbe) format("woff2"),url(fonts/Roboto-Slab-Bold.woff?bed5564a116b05148e3b3bea6fb1162a) format("woff");font-display:block} \ No newline at end of file diff --git a/docs/_build/html/_static/doctools.js b/docs/_build/html/_static/doctools.js new file mode 100644 index 00000000..61ac9d26 --- /dev/null +++ b/docs/_build/html/_static/doctools.js @@ -0,0 +1,321 @@ +/* + * doctools.js + * ~~~~~~~~~~~ + * + * Sphinx JavaScript utilities for all documentation. + * + * :copyright: Copyright 2007-2021 by the Sphinx team, see AUTHORS. + * :license: BSD, see LICENSE for details. + * + */ + +/** + * select a different prefix for underscore + */ +$u = _.noConflict(); + +/** + * make the code below compatible with browsers without + * an installed firebug like debugger +if (!window.console || !console.firebug) { + var names = ["log", "debug", "info", "warn", "error", "assert", "dir", + "dirxml", "group", "groupEnd", "time", "timeEnd", "count", "trace", + "profile", "profileEnd"]; + window.console = {}; + for (var i = 0; i < names.length; ++i) + window.console[names[i]] = function() {}; +} + */ + +/** + * small helper function to urldecode strings + * + * See https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/decodeURIComponent#Decoding_query_parameters_from_a_URL + */ +jQuery.urldecode = function(x) { + if (!x) { + return x + } + return decodeURIComponent(x.replace(/\+/g, ' ')); +}; + +/** + * small helper function to urlencode strings + */ +jQuery.urlencode = encodeURIComponent; + +/** + * This function returns the parsed url parameters of the + * current request. Multiple values per key are supported, + * it will always return arrays of strings for the value parts. + */ +jQuery.getQueryParameters = function(s) { + if (typeof s === 'undefined') + s = document.location.search; + var parts = s.substr(s.indexOf('?') + 1).split('&'); + var result = {}; + for (var i = 0; i < parts.length; i++) { + var tmp = parts[i].split('=', 2); + var key = jQuery.urldecode(tmp[0]); + var value = jQuery.urldecode(tmp[1]); + if (key in result) + result[key].push(value); + else + result[key] = [value]; + } + return result; +}; + +/** + * highlight a given string on a jquery object by wrapping it in + * span elements with the given class name. + */ +jQuery.fn.highlightText = function(text, className) { + function highlight(node, addItems) { + if (node.nodeType === 3) { + var val = node.nodeValue; + var pos = val.toLowerCase().indexOf(text); + if (pos >= 0 && + !jQuery(node.parentNode).hasClass(className) && + !jQuery(node.parentNode).hasClass("nohighlight")) { + var span; + var isInSVG = jQuery(node).closest("body, svg, foreignObject").is("svg"); + if (isInSVG) { + span = document.createElementNS("http://www.w3.org/2000/svg", "tspan"); + } else { + span = document.createElement("span"); + span.className = className; + } + span.appendChild(document.createTextNode(val.substr(pos, text.length))); + node.parentNode.insertBefore(span, node.parentNode.insertBefore( + document.createTextNode(val.substr(pos + text.length)), + node.nextSibling)); + node.nodeValue = val.substr(0, pos); + if (isInSVG) { + var rect = document.createElementNS("http://www.w3.org/2000/svg", "rect"); + var bbox = node.parentElement.getBBox(); + rect.x.baseVal.value = bbox.x; + rect.y.baseVal.value = bbox.y; + rect.width.baseVal.value = bbox.width; + rect.height.baseVal.value = bbox.height; + rect.setAttribute('class', className); + addItems.push({ + "parent": node.parentNode, + "target": rect}); + } + } + } + else if (!jQuery(node).is("button, select, textarea")) { + jQuery.each(node.childNodes, function() { + highlight(this, addItems); + }); + } + } + var addItems = []; + var result = this.each(function() { + highlight(this, addItems); + }); + for (var i = 0; i < addItems.length; ++i) { + jQuery(addItems[i].parent).before(addItems[i].target); + } + return result; +}; + +/* + * backward compatibility for jQuery.browser + * This will be supported until firefox bug is fixed. + */ +if (!jQuery.browser) { + jQuery.uaMatch = function(ua) { + ua = ua.toLowerCase(); + + var match = /(chrome)[ \/]([\w.]+)/.exec(ua) || + /(webkit)[ \/]([\w.]+)/.exec(ua) || + /(opera)(?:.*version|)[ \/]([\w.]+)/.exec(ua) || + /(msie) ([\w.]+)/.exec(ua) || + ua.indexOf("compatible") < 0 && /(mozilla)(?:.*? rv:([\w.]+)|)/.exec(ua) || + []; + + return { + browser: match[ 1 ] || "", + version: match[ 2 ] || "0" + }; + }; + jQuery.browser = {}; + jQuery.browser[jQuery.uaMatch(navigator.userAgent).browser] = true; +} + +/** + * Small JavaScript module for the documentation. + */ +var Documentation = { + + init : function() { + this.fixFirefoxAnchorBug(); + this.highlightSearchWords(); + this.initIndexTable(); + if (DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS) { + this.initOnKeyListeners(); + } + }, + + /** + * i18n support + */ + TRANSLATIONS : {}, + PLURAL_EXPR : function(n) { return n === 1 ? 0 : 1; }, + LOCALE : 'unknown', + + // gettext and ngettext don't access this so that the functions + // can safely bound to a different name (_ = Documentation.gettext) + gettext : function(string) { + var translated = Documentation.TRANSLATIONS[string]; + if (typeof translated === 'undefined') + return string; + return (typeof translated === 'string') ? translated : translated[0]; + }, + + ngettext : function(singular, plural, n) { + var translated = Documentation.TRANSLATIONS[singular]; + if (typeof translated === 'undefined') + return (n == 1) ? singular : plural; + return translated[Documentation.PLURALEXPR(n)]; + }, + + addTranslations : function(catalog) { + for (var key in catalog.messages) + this.TRANSLATIONS[key] = catalog.messages[key]; + this.PLURAL_EXPR = new Function('n', 'return +(' + catalog.plural_expr + ')'); + this.LOCALE = catalog.locale; + }, + + /** + * add context elements like header anchor links + */ + addContextElements : function() { + $('div[id] > :header:first').each(function() { + $('\u00B6'). + attr('href', '#' + this.id). + attr('title', _('Permalink to this headline')). + appendTo(this); + }); + $('dt[id]').each(function() { + $('\u00B6'). + attr('href', '#' + this.id). + attr('title', _('Permalink to this definition')). + appendTo(this); + }); + }, + + /** + * workaround a firefox stupidity + * see: https://bugzilla.mozilla.org/show_bug.cgi?id=645075 + */ + fixFirefoxAnchorBug : function() { + if (document.location.hash && $.browser.mozilla) + window.setTimeout(function() { + document.location.href += ''; + }, 10); + }, + + /** + * highlight the search words provided in the url in the text + */ + highlightSearchWords : function() { + var params = $.getQueryParameters(); + var terms = (params.highlight) ? params.highlight[0].split(/\s+/) : []; + if (terms.length) { + var body = $('div.body'); + if (!body.length) { + body = $('body'); + } + window.setTimeout(function() { + $.each(terms, function() { + body.highlightText(this.toLowerCase(), 'highlighted'); + }); + }, 10); + $('') + .appendTo($('#searchbox')); + } + }, + + /** + * init the domain index toggle buttons + */ + initIndexTable : function() { + var togglers = $('img.toggler').click(function() { + var src = $(this).attr('src'); + var idnum = $(this).attr('id').substr(7); + $('tr.cg-' + idnum).toggle(); + if (src.substr(-9) === 'minus.png') + $(this).attr('src', src.substr(0, src.length-9) + 'plus.png'); + else + $(this).attr('src', src.substr(0, src.length-8) + 'minus.png'); + }).css('display', ''); + if (DOCUMENTATION_OPTIONS.COLLAPSE_INDEX) { + togglers.click(); + } + }, + + /** + * helper function to hide the search marks again + */ + hideSearchWords : function() { + $('#searchbox .highlight-link').fadeOut(300); + $('span.highlighted').removeClass('highlighted'); + }, + + /** + * make the url absolute + */ + makeURL : function(relativeURL) { + return DOCUMENTATION_OPTIONS.URL_ROOT + '/' + relativeURL; + }, + + /** + * get the current relative url + */ + getCurrentURL : function() { + var path = document.location.pathname; + var parts = path.split(/\//); + $.each(DOCUMENTATION_OPTIONS.URL_ROOT.split(/\//), function() { + if (this === '..') + parts.pop(); + }); + var url = parts.join('/'); + return path.substring(url.lastIndexOf('/') + 1, path.length - 1); + }, + + initOnKeyListeners: function() { + $(document).keydown(function(event) { + var activeElementType = document.activeElement.tagName; + // don't navigate when in search box, textarea, dropdown or button + if (activeElementType !== 'TEXTAREA' && activeElementType !== 'INPUT' && activeElementType !== 'SELECT' + && activeElementType !== 'BUTTON' && !event.altKey && !event.ctrlKey && !event.metaKey + && !event.shiftKey) { + switch (event.keyCode) { + case 37: // left + var prevHref = $('link[rel="prev"]').prop('href'); + if (prevHref) { + window.location.href = prevHref; + return false; + } + case 39: // right + var nextHref = $('link[rel="next"]').prop('href'); + if (nextHref) { + window.location.href = nextHref; + return false; + } + } + } + }); + } +}; + +// quick alias for translations +_ = Documentation.gettext; + +$(document).ready(function() { + Documentation.init(); +}); diff --git a/docs/_build/html/_static/documentation_options.js b/docs/_build/html/_static/documentation_options.js new file mode 100644 index 00000000..3a4f8f9b --- /dev/null +++ b/docs/_build/html/_static/documentation_options.js @@ -0,0 +1,12 @@ +var DOCUMENTATION_OPTIONS = { + URL_ROOT: document.getElementById("documentation_options").getAttribute('data-url_root'), + VERSION: '4.3.0', + LANGUAGE: 'None', + COLLAPSE_INDEX: false, + BUILDER: 'html', + FILE_SUFFIX: '.html', + LINK_SUFFIX: '.html', + HAS_SOURCE: true, + SOURCELINK_SUFFIX: '.txt', + NAVIGATION_WITH_KEYS: false +}; \ No newline at end of file diff --git a/docs/_build/html/_static/file.png b/docs/_build/html/_static/file.png new file mode 100644 index 00000000..a858a410 Binary files /dev/null and b/docs/_build/html/_static/file.png differ diff --git a/docs/_build/html/_static/jquery-3.5.1.js b/docs/_build/html/_static/jquery-3.5.1.js new file mode 100644 index 00000000..50937333 --- /dev/null +++ b/docs/_build/html/_static/jquery-3.5.1.js @@ -0,0 +1,10872 @@ +/*! + * jQuery JavaScript Library v3.5.1 + * https://jquery.com/ + * + * Includes Sizzle.js + * https://sizzlejs.com/ + * + * Copyright JS Foundation and other contributors + * Released under the MIT license + * https://jquery.org/license + * + * Date: 2020-05-04T22:49Z + */ +( function( global, factory ) { + + "use strict"; + + if ( typeof module === "object" && typeof module.exports === "object" ) { + + // For CommonJS and CommonJS-like environments where a proper `window` + // is present, execute the factory and get jQuery. + // For environments that do not have a `window` with a `document` + // (such as Node.js), expose a factory as module.exports. + // This accentuates the need for the creation of a real `window`. + // e.g. var jQuery = require("jquery")(window); + // See ticket #14549 for more info. + module.exports = global.document ? + factory( global, true ) : + function( w ) { + if ( !w.document ) { + throw new Error( "jQuery requires a window with a document" ); + } + return factory( w ); + }; + } else { + factory( global ); + } + +// Pass this if window is not defined yet +} )( typeof window !== "undefined" ? window : this, function( window, noGlobal ) { + +// Edge <= 12 - 13+, Firefox <=18 - 45+, IE 10 - 11, Safari 5.1 - 9+, iOS 6 - 9.1 +// throw exceptions when non-strict code (e.g., ASP.NET 4.5) accesses strict mode +// arguments.callee.caller (trac-13335). But as of jQuery 3.0 (2016), strict mode should be common +// enough that all such attempts are guarded in a try block. +"use strict"; + +var arr = []; + +var getProto = Object.getPrototypeOf; + +var slice = arr.slice; + +var flat = arr.flat ? function( array ) { + return arr.flat.call( array ); +} : function( array ) { + return arr.concat.apply( [], array ); +}; + + +var push = arr.push; + +var indexOf = arr.indexOf; + +var class2type = {}; + +var toString = class2type.toString; + +var hasOwn = class2type.hasOwnProperty; + +var fnToString = hasOwn.toString; + +var ObjectFunctionString = fnToString.call( Object ); + +var support = {}; + +var isFunction = function isFunction( obj ) { + + // Support: Chrome <=57, Firefox <=52 + // In some browsers, typeof returns "function" for HTML elements + // (i.e., `typeof document.createElement( "object" ) === "function"`). + // We don't want to classify *any* DOM node as a function. + return typeof obj === "function" && typeof obj.nodeType !== "number"; + }; + + +var isWindow = function isWindow( obj ) { + return obj != null && obj === obj.window; + }; + + +var document = window.document; + + + + var preservedScriptAttributes = { + type: true, + src: true, + nonce: true, + noModule: true + }; + + function DOMEval( code, node, doc ) { + doc = doc || document; + + var i, val, + script = doc.createElement( "script" ); + + script.text = code; + if ( node ) { + for ( i in preservedScriptAttributes ) { + + // Support: Firefox 64+, Edge 18+ + // Some browsers don't support the "nonce" property on scripts. + // On the other hand, just using `getAttribute` is not enough as + // the `nonce` attribute is reset to an empty string whenever it + // becomes browsing-context connected. + // See https://github.com/whatwg/html/issues/2369 + // See https://html.spec.whatwg.org/#nonce-attributes + // The `node.getAttribute` check was added for the sake of + // `jQuery.globalEval` so that it can fake a nonce-containing node + // via an object. + val = node[ i ] || node.getAttribute && node.getAttribute( i ); + if ( val ) { + script.setAttribute( i, val ); + } + } + } + doc.head.appendChild( script ).parentNode.removeChild( script ); + } + + +function toType( obj ) { + if ( obj == null ) { + return obj + ""; + } + + // Support: Android <=2.3 only (functionish RegExp) + return typeof obj === "object" || typeof obj === "function" ? + class2type[ toString.call( obj ) ] || "object" : + typeof obj; +} +/* global Symbol */ +// Defining this global in .eslintrc.json would create a danger of using the global +// unguarded in another place, it seems safer to define global only for this module + + + +var + version = "3.5.1", + + // Define a local copy of jQuery + jQuery = function( selector, context ) { + + // The jQuery object is actually just the init constructor 'enhanced' + // Need init if jQuery is called (just allow error to be thrown if not included) + return new jQuery.fn.init( selector, context ); + }; + +jQuery.fn = jQuery.prototype = { + + // The current version of jQuery being used + jquery: version, + + constructor: jQuery, + + // The default length of a jQuery object is 0 + length: 0, + + toArray: function() { + return slice.call( this ); + }, + + // Get the Nth element in the matched element set OR + // Get the whole matched element set as a clean array + get: function( num ) { + + // Return all the elements in a clean array + if ( num == null ) { + return slice.call( this ); + } + + // Return just the one element from the set + return num < 0 ? this[ num + this.length ] : this[ num ]; + }, + + // Take an array of elements and push it onto the stack + // (returning the new matched element set) + pushStack: function( elems ) { + + // Build a new jQuery matched element set + var ret = jQuery.merge( this.constructor(), elems ); + + // Add the old object onto the stack (as a reference) + ret.prevObject = this; + + // Return the newly-formed element set + return ret; + }, + + // Execute a callback for every element in the matched set. + each: function( callback ) { + return jQuery.each( this, callback ); + }, + + map: function( callback ) { + return this.pushStack( jQuery.map( this, function( elem, i ) { + return callback.call( elem, i, elem ); + } ) ); + }, + + slice: function() { + return this.pushStack( slice.apply( this, arguments ) ); + }, + + first: function() { + return this.eq( 0 ); + }, + + last: function() { + return this.eq( -1 ); + }, + + even: function() { + return this.pushStack( jQuery.grep( this, function( _elem, i ) { + return ( i + 1 ) % 2; + } ) ); + }, + + odd: function() { + return this.pushStack( jQuery.grep( this, function( _elem, i ) { + return i % 2; + } ) ); + }, + + eq: function( i ) { + var len = this.length, + j = +i + ( i < 0 ? len : 0 ); + return this.pushStack( j >= 0 && j < len ? [ this[ j ] ] : [] ); + }, + + end: function() { + return this.prevObject || this.constructor(); + }, + + // For internal use only. + // Behaves like an Array's method, not like a jQuery method. + push: push, + sort: arr.sort, + splice: arr.splice +}; + +jQuery.extend = jQuery.fn.extend = function() { + var options, name, src, copy, copyIsArray, clone, + target = arguments[ 0 ] || {}, + i = 1, + length = arguments.length, + deep = false; + + // Handle a deep copy situation + if ( typeof target === "boolean" ) { + deep = target; + + // Skip the boolean and the target + target = arguments[ i ] || {}; + i++; + } + + // Handle case when target is a string or something (possible in deep copy) + if ( typeof target !== "object" && !isFunction( target ) ) { + target = {}; + } + + // Extend jQuery itself if only one argument is passed + if ( i === length ) { + target = this; + i--; + } + + for ( ; i < length; i++ ) { + + // Only deal with non-null/undefined values + if ( ( options = arguments[ i ] ) != null ) { + + // Extend the base object + for ( name in options ) { + copy = options[ name ]; + + // Prevent Object.prototype pollution + // Prevent never-ending loop + if ( name === "__proto__" || target === copy ) { + continue; + } + + // Recurse if we're merging plain objects or arrays + if ( deep && copy && ( jQuery.isPlainObject( copy ) || + ( copyIsArray = Array.isArray( copy ) ) ) ) { + src = target[ name ]; + + // Ensure proper type for the source value + if ( copyIsArray && !Array.isArray( src ) ) { + clone = []; + } else if ( !copyIsArray && !jQuery.isPlainObject( src ) ) { + clone = {}; + } else { + clone = src; + } + copyIsArray = false; + + // Never move original objects, clone them + target[ name ] = jQuery.extend( deep, clone, copy ); + + // Don't bring in undefined values + } else if ( copy !== undefined ) { + target[ name ] = copy; + } + } + } + } + + // Return the modified object + return target; +}; + +jQuery.extend( { + + // Unique for each copy of jQuery on the page + expando: "jQuery" + ( version + Math.random() ).replace( /\D/g, "" ), + + // Assume jQuery is ready without the ready module + isReady: true, + + error: function( msg ) { + throw new Error( msg ); + }, + + noop: function() {}, + + isPlainObject: function( obj ) { + var proto, Ctor; + + // Detect obvious negatives + // Use toString instead of jQuery.type to catch host objects + if ( !obj || toString.call( obj ) !== "[object Object]" ) { + return false; + } + + proto = getProto( obj ); + + // Objects with no prototype (e.g., `Object.create( null )`) are plain + if ( !proto ) { + return true; + } + + // Objects with prototype are plain iff they were constructed by a global Object function + Ctor = hasOwn.call( proto, "constructor" ) && proto.constructor; + return typeof Ctor === "function" && fnToString.call( Ctor ) === ObjectFunctionString; + }, + + isEmptyObject: function( obj ) { + var name; + + for ( name in obj ) { + return false; + } + return true; + }, + + // Evaluates a script in a provided context; falls back to the global one + // if not specified. + globalEval: function( code, options, doc ) { + DOMEval( code, { nonce: options && options.nonce }, doc ); + }, + + each: function( obj, callback ) { + var length, i = 0; + + if ( isArrayLike( obj ) ) { + length = obj.length; + for ( ; i < length; i++ ) { + if ( callback.call( obj[ i ], i, obj[ i ] ) === false ) { + break; + } + } + } else { + for ( i in obj ) { + if ( callback.call( obj[ i ], i, obj[ i ] ) === false ) { + break; + } + } + } + + return obj; + }, + + // results is for internal usage only + makeArray: function( arr, results ) { + var ret = results || []; + + if ( arr != null ) { + if ( isArrayLike( Object( arr ) ) ) { + jQuery.merge( ret, + typeof arr === "string" ? + [ arr ] : arr + ); + } else { + push.call( ret, arr ); + } + } + + return ret; + }, + + inArray: function( elem, arr, i ) { + return arr == null ? -1 : indexOf.call( arr, elem, i ); + }, + + // Support: Android <=4.0 only, PhantomJS 1 only + // push.apply(_, arraylike) throws on ancient WebKit + merge: function( first, second ) { + var len = +second.length, + j = 0, + i = first.length; + + for ( ; j < len; j++ ) { + first[ i++ ] = second[ j ]; + } + + first.length = i; + + return first; + }, + + grep: function( elems, callback, invert ) { + var callbackInverse, + matches = [], + i = 0, + length = elems.length, + callbackExpect = !invert; + + // Go through the array, only saving the items + // that pass the validator function + for ( ; i < length; i++ ) { + callbackInverse = !callback( elems[ i ], i ); + if ( callbackInverse !== callbackExpect ) { + matches.push( elems[ i ] ); + } + } + + return matches; + }, + + // arg is for internal usage only + map: function( elems, callback, arg ) { + var length, value, + i = 0, + ret = []; + + // Go through the array, translating each of the items to their new values + if ( isArrayLike( elems ) ) { + length = elems.length; + for ( ; i < length; i++ ) { + value = callback( elems[ i ], i, arg ); + + if ( value != null ) { + ret.push( value ); + } + } + + // Go through every key on the object, + } else { + for ( i in elems ) { + value = callback( elems[ i ], i, arg ); + + if ( value != null ) { + ret.push( value ); + } + } + } + + // Flatten any nested arrays + return flat( ret ); + }, + + // A global GUID counter for objects + guid: 1, + + // jQuery.support is not used in Core but other projects attach their + // properties to it so it needs to exist. + support: support +} ); + +if ( typeof Symbol === "function" ) { + jQuery.fn[ Symbol.iterator ] = arr[ Symbol.iterator ]; +} + +// Populate the class2type map +jQuery.each( "Boolean Number String Function Array Date RegExp Object Error Symbol".split( " " ), +function( _i, name ) { + class2type[ "[object " + name + "]" ] = name.toLowerCase(); +} ); + +function isArrayLike( obj ) { + + // Support: real iOS 8.2 only (not reproducible in simulator) + // `in` check used to prevent JIT error (gh-2145) + // hasOwn isn't used here due to false negatives + // regarding Nodelist length in IE + var length = !!obj && "length" in obj && obj.length, + type = toType( obj ); + + if ( isFunction( obj ) || isWindow( obj ) ) { + return false; + } + + return type === "array" || length === 0 || + typeof length === "number" && length > 0 && ( length - 1 ) in obj; +} +var Sizzle = +/*! + * Sizzle CSS Selector Engine v2.3.5 + * https://sizzlejs.com/ + * + * Copyright JS Foundation and other contributors + * Released under the MIT license + * https://js.foundation/ + * + * Date: 2020-03-14 + */ +( function( window ) { +var i, + support, + Expr, + getText, + isXML, + tokenize, + compile, + select, + outermostContext, + sortInput, + hasDuplicate, + + // Local document vars + setDocument, + document, + docElem, + documentIsHTML, + rbuggyQSA, + rbuggyMatches, + matches, + contains, + + // Instance-specific data + expando = "sizzle" + 1 * new Date(), + preferredDoc = window.document, + dirruns = 0, + done = 0, + classCache = createCache(), + tokenCache = createCache(), + compilerCache = createCache(), + nonnativeSelectorCache = createCache(), + sortOrder = function( a, b ) { + if ( a === b ) { + hasDuplicate = true; + } + return 0; + }, + + // Instance methods + hasOwn = ( {} ).hasOwnProperty, + arr = [], + pop = arr.pop, + pushNative = arr.push, + push = arr.push, + slice = arr.slice, + + // Use a stripped-down indexOf as it's faster than native + // https://jsperf.com/thor-indexof-vs-for/5 + indexOf = function( list, elem ) { + var i = 0, + len = list.length; + for ( ; i < len; i++ ) { + if ( list[ i ] === elem ) { + return i; + } + } + return -1; + }, + + booleans = "checked|selected|async|autofocus|autoplay|controls|defer|disabled|hidden|" + + "ismap|loop|multiple|open|readonly|required|scoped", + + // Regular expressions + + // http://www.w3.org/TR/css3-selectors/#whitespace + whitespace = "[\\x20\\t\\r\\n\\f]", + + // https://www.w3.org/TR/css-syntax-3/#ident-token-diagram + identifier = "(?:\\\\[\\da-fA-F]{1,6}" + whitespace + + "?|\\\\[^\\r\\n\\f]|[\\w-]|[^\0-\\x7f])+", + + // Attribute selectors: http://www.w3.org/TR/selectors/#attribute-selectors + attributes = "\\[" + whitespace + "*(" + identifier + ")(?:" + whitespace + + + // Operator (capture 2) + "*([*^$|!~]?=)" + whitespace + + + // "Attribute values must be CSS identifiers [capture 5] + // or strings [capture 3 or capture 4]" + "*(?:'((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\"|(" + identifier + "))|)" + + whitespace + "*\\]", + + pseudos = ":(" + identifier + ")(?:\\((" + + + // To reduce the number of selectors needing tokenize in the preFilter, prefer arguments: + // 1. quoted (capture 3; capture 4 or capture 5) + "('((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\")|" + + + // 2. simple (capture 6) + "((?:\\\\.|[^\\\\()[\\]]|" + attributes + ")*)|" + + + // 3. anything else (capture 2) + ".*" + + ")\\)|)", + + // Leading and non-escaped trailing whitespace, capturing some non-whitespace characters preceding the latter + rwhitespace = new RegExp( whitespace + "+", "g" ), + rtrim = new RegExp( "^" + whitespace + "+|((?:^|[^\\\\])(?:\\\\.)*)" + + whitespace + "+$", "g" ), + + rcomma = new RegExp( "^" + whitespace + "*," + whitespace + "*" ), + rcombinators = new RegExp( "^" + whitespace + "*([>+~]|" + whitespace + ")" + whitespace + + "*" ), + rdescend = new RegExp( whitespace + "|>" ), + + rpseudo = new RegExp( pseudos ), + ridentifier = new RegExp( "^" + identifier + "$" ), + + matchExpr = { + "ID": new RegExp( "^#(" + identifier + ")" ), + "CLASS": new RegExp( "^\\.(" + identifier + ")" ), + "TAG": new RegExp( "^(" + identifier + "|[*])" ), + "ATTR": new RegExp( "^" + attributes ), + "PSEUDO": new RegExp( "^" + pseudos ), + "CHILD": new RegExp( "^:(only|first|last|nth|nth-last)-(child|of-type)(?:\\(" + + whitespace + "*(even|odd|(([+-]|)(\\d*)n|)" + whitespace + "*(?:([+-]|)" + + whitespace + "*(\\d+)|))" + whitespace + "*\\)|)", "i" ), + "bool": new RegExp( "^(?:" + booleans + ")$", "i" ), + + // For use in libraries implementing .is() + // We use this for POS matching in `select` + "needsContext": new RegExp( "^" + whitespace + + "*[>+~]|:(even|odd|eq|gt|lt|nth|first|last)(?:\\(" + whitespace + + "*((?:-\\d)?\\d*)" + whitespace + "*\\)|)(?=[^-]|$)", "i" ) + }, + + rhtml = /HTML$/i, + rinputs = /^(?:input|select|textarea|button)$/i, + rheader = /^h\d$/i, + + rnative = /^[^{]+\{\s*\[native \w/, + + // Easily-parseable/retrievable ID or TAG or CLASS selectors + rquickExpr = /^(?:#([\w-]+)|(\w+)|\.([\w-]+))$/, + + rsibling = /[+~]/, + + // CSS escapes + // http://www.w3.org/TR/CSS21/syndata.html#escaped-characters + runescape = new RegExp( "\\\\[\\da-fA-F]{1,6}" + whitespace + "?|\\\\([^\\r\\n\\f])", "g" ), + funescape = function( escape, nonHex ) { + var high = "0x" + escape.slice( 1 ) - 0x10000; + + return nonHex ? + + // Strip the backslash prefix from a non-hex escape sequence + nonHex : + + // Replace a hexadecimal escape sequence with the encoded Unicode code point + // Support: IE <=11+ + // For values outside the Basic Multilingual Plane (BMP), manually construct a + // surrogate pair + high < 0 ? + String.fromCharCode( high + 0x10000 ) : + String.fromCharCode( high >> 10 | 0xD800, high & 0x3FF | 0xDC00 ); + }, + + // CSS string/identifier serialization + // https://drafts.csswg.org/cssom/#common-serializing-idioms + rcssescape = /([\0-\x1f\x7f]|^-?\d)|^-$|[^\0-\x1f\x7f-\uFFFF\w-]/g, + fcssescape = function( ch, asCodePoint ) { + if ( asCodePoint ) { + + // U+0000 NULL becomes U+FFFD REPLACEMENT CHARACTER + if ( ch === "\0" ) { + return "\uFFFD"; + } + + // Control characters and (dependent upon position) numbers get escaped as code points + return ch.slice( 0, -1 ) + "\\" + + ch.charCodeAt( ch.length - 1 ).toString( 16 ) + " "; + } + + // Other potentially-special ASCII characters get backslash-escaped + return "\\" + ch; + }, + + // Used for iframes + // See setDocument() + // Removing the function wrapper causes a "Permission Denied" + // error in IE + unloadHandler = function() { + setDocument(); + }, + + inDisabledFieldset = addCombinator( + function( elem ) { + return elem.disabled === true && elem.nodeName.toLowerCase() === "fieldset"; + }, + { dir: "parentNode", next: "legend" } + ); + +// Optimize for push.apply( _, NodeList ) +try { + push.apply( + ( arr = slice.call( preferredDoc.childNodes ) ), + preferredDoc.childNodes + ); + + // Support: Android<4.0 + // Detect silently failing push.apply + // eslint-disable-next-line no-unused-expressions + arr[ preferredDoc.childNodes.length ].nodeType; +} catch ( e ) { + push = { apply: arr.length ? + + // Leverage slice if possible + function( target, els ) { + pushNative.apply( target, slice.call( els ) ); + } : + + // Support: IE<9 + // Otherwise append directly + function( target, els ) { + var j = target.length, + i = 0; + + // Can't trust NodeList.length + while ( ( target[ j++ ] = els[ i++ ] ) ) {} + target.length = j - 1; + } + }; +} + +function Sizzle( selector, context, results, seed ) { + var m, i, elem, nid, match, groups, newSelector, + newContext = context && context.ownerDocument, + + // nodeType defaults to 9, since context defaults to document + nodeType = context ? context.nodeType : 9; + + results = results || []; + + // Return early from calls with invalid selector or context + if ( typeof selector !== "string" || !selector || + nodeType !== 1 && nodeType !== 9 && nodeType !== 11 ) { + + return results; + } + + // Try to shortcut find operations (as opposed to filters) in HTML documents + if ( !seed ) { + setDocument( context ); + context = context || document; + + if ( documentIsHTML ) { + + // If the selector is sufficiently simple, try using a "get*By*" DOM method + // (excepting DocumentFragment context, where the methods don't exist) + if ( nodeType !== 11 && ( match = rquickExpr.exec( selector ) ) ) { + + // ID selector + if ( ( m = match[ 1 ] ) ) { + + // Document context + if ( nodeType === 9 ) { + if ( ( elem = context.getElementById( m ) ) ) { + + // Support: IE, Opera, Webkit + // TODO: identify versions + // getElementById can match elements by name instead of ID + if ( elem.id === m ) { + results.push( elem ); + return results; + } + } else { + return results; + } + + // Element context + } else { + + // Support: IE, Opera, Webkit + // TODO: identify versions + // getElementById can match elements by name instead of ID + if ( newContext && ( elem = newContext.getElementById( m ) ) && + contains( context, elem ) && + elem.id === m ) { + + results.push( elem ); + return results; + } + } + + // Type selector + } else if ( match[ 2 ] ) { + push.apply( results, context.getElementsByTagName( selector ) ); + return results; + + // Class selector + } else if ( ( m = match[ 3 ] ) && support.getElementsByClassName && + context.getElementsByClassName ) { + + push.apply( results, context.getElementsByClassName( m ) ); + return results; + } + } + + // Take advantage of querySelectorAll + if ( support.qsa && + !nonnativeSelectorCache[ selector + " " ] && + ( !rbuggyQSA || !rbuggyQSA.test( selector ) ) && + + // Support: IE 8 only + // Exclude object elements + ( nodeType !== 1 || context.nodeName.toLowerCase() !== "object" ) ) { + + newSelector = selector; + newContext = context; + + // qSA considers elements outside a scoping root when evaluating child or + // descendant combinators, which is not what we want. + // In such cases, we work around the behavior by prefixing every selector in the + // list with an ID selector referencing the scope context. + // The technique has to be used as well when a leading combinator is used + // as such selectors are not recognized by querySelectorAll. + // Thanks to Andrew Dupont for this technique. + if ( nodeType === 1 && + ( rdescend.test( selector ) || rcombinators.test( selector ) ) ) { + + // Expand context for sibling selectors + newContext = rsibling.test( selector ) && testContext( context.parentNode ) || + context; + + // We can use :scope instead of the ID hack if the browser + // supports it & if we're not changing the context. + if ( newContext !== context || !support.scope ) { + + // Capture the context ID, setting it first if necessary + if ( ( nid = context.getAttribute( "id" ) ) ) { + nid = nid.replace( rcssescape, fcssescape ); + } else { + context.setAttribute( "id", ( nid = expando ) ); + } + } + + // Prefix every selector in the list + groups = tokenize( selector ); + i = groups.length; + while ( i-- ) { + groups[ i ] = ( nid ? "#" + nid : ":scope" ) + " " + + toSelector( groups[ i ] ); + } + newSelector = groups.join( "," ); + } + + try { + push.apply( results, + newContext.querySelectorAll( newSelector ) + ); + return results; + } catch ( qsaError ) { + nonnativeSelectorCache( selector, true ); + } finally { + if ( nid === expando ) { + context.removeAttribute( "id" ); + } + } + } + } + } + + // All others + return select( selector.replace( rtrim, "$1" ), context, results, seed ); +} + +/** + * Create key-value caches of limited size + * @returns {function(string, object)} Returns the Object data after storing it on itself with + * property name the (space-suffixed) string and (if the cache is larger than Expr.cacheLength) + * deleting the oldest entry + */ +function createCache() { + var keys = []; + + function cache( key, value ) { + + // Use (key + " ") to avoid collision with native prototype properties (see Issue #157) + if ( keys.push( key + " " ) > Expr.cacheLength ) { + + // Only keep the most recent entries + delete cache[ keys.shift() ]; + } + return ( cache[ key + " " ] = value ); + } + return cache; +} + +/** + * Mark a function for special use by Sizzle + * @param {Function} fn The function to mark + */ +function markFunction( fn ) { + fn[ expando ] = true; + return fn; +} + +/** + * Support testing using an element + * @param {Function} fn Passed the created element and returns a boolean result + */ +function assert( fn ) { + var el = document.createElement( "fieldset" ); + + try { + return !!fn( el ); + } catch ( e ) { + return false; + } finally { + + // Remove from its parent by default + if ( el.parentNode ) { + el.parentNode.removeChild( el ); + } + + // release memory in IE + el = null; + } +} + +/** + * Adds the same handler for all of the specified attrs + * @param {String} attrs Pipe-separated list of attributes + * @param {Function} handler The method that will be applied + */ +function addHandle( attrs, handler ) { + var arr = attrs.split( "|" ), + i = arr.length; + + while ( i-- ) { + Expr.attrHandle[ arr[ i ] ] = handler; + } +} + +/** + * Checks document order of two siblings + * @param {Element} a + * @param {Element} b + * @returns {Number} Returns less than 0 if a precedes b, greater than 0 if a follows b + */ +function siblingCheck( a, b ) { + var cur = b && a, + diff = cur && a.nodeType === 1 && b.nodeType === 1 && + a.sourceIndex - b.sourceIndex; + + // Use IE sourceIndex if available on both nodes + if ( diff ) { + return diff; + } + + // Check if b follows a + if ( cur ) { + while ( ( cur = cur.nextSibling ) ) { + if ( cur === b ) { + return -1; + } + } + } + + return a ? 1 : -1; +} + +/** + * Returns a function to use in pseudos for input types + * @param {String} type + */ +function createInputPseudo( type ) { + return function( elem ) { + var name = elem.nodeName.toLowerCase(); + return name === "input" && elem.type === type; + }; +} + +/** + * Returns a function to use in pseudos for buttons + * @param {String} type + */ +function createButtonPseudo( type ) { + return function( elem ) { + var name = elem.nodeName.toLowerCase(); + return ( name === "input" || name === "button" ) && elem.type === type; + }; +} + +/** + * Returns a function to use in pseudos for :enabled/:disabled + * @param {Boolean} disabled true for :disabled; false for :enabled + */ +function createDisabledPseudo( disabled ) { + + // Known :disabled false positives: fieldset[disabled] > legend:nth-of-type(n+2) :can-disable + return function( elem ) { + + // Only certain elements can match :enabled or :disabled + // https://html.spec.whatwg.org/multipage/scripting.html#selector-enabled + // https://html.spec.whatwg.org/multipage/scripting.html#selector-disabled + if ( "form" in elem ) { + + // Check for inherited disabledness on relevant non-disabled elements: + // * listed form-associated elements in a disabled fieldset + // https://html.spec.whatwg.org/multipage/forms.html#category-listed + // https://html.spec.whatwg.org/multipage/forms.html#concept-fe-disabled + // * option elements in a disabled optgroup + // https://html.spec.whatwg.org/multipage/forms.html#concept-option-disabled + // All such elements have a "form" property. + if ( elem.parentNode && elem.disabled === false ) { + + // Option elements defer to a parent optgroup if present + if ( "label" in elem ) { + if ( "label" in elem.parentNode ) { + return elem.parentNode.disabled === disabled; + } else { + return elem.disabled === disabled; + } + } + + // Support: IE 6 - 11 + // Use the isDisabled shortcut property to check for disabled fieldset ancestors + return elem.isDisabled === disabled || + + // Where there is no isDisabled, check manually + /* jshint -W018 */ + elem.isDisabled !== !disabled && + inDisabledFieldset( elem ) === disabled; + } + + return elem.disabled === disabled; + + // Try to winnow out elements that can't be disabled before trusting the disabled property. + // Some victims get caught in our net (label, legend, menu, track), but it shouldn't + // even exist on them, let alone have a boolean value. + } else if ( "label" in elem ) { + return elem.disabled === disabled; + } + + // Remaining elements are neither :enabled nor :disabled + return false; + }; +} + +/** + * Returns a function to use in pseudos for positionals + * @param {Function} fn + */ +function createPositionalPseudo( fn ) { + return markFunction( function( argument ) { + argument = +argument; + return markFunction( function( seed, matches ) { + var j, + matchIndexes = fn( [], seed.length, argument ), + i = matchIndexes.length; + + // Match elements found at the specified indexes + while ( i-- ) { + if ( seed[ ( j = matchIndexes[ i ] ) ] ) { + seed[ j ] = !( matches[ j ] = seed[ j ] ); + } + } + } ); + } ); +} + +/** + * Checks a node for validity as a Sizzle context + * @param {Element|Object=} context + * @returns {Element|Object|Boolean} The input node if acceptable, otherwise a falsy value + */ +function testContext( context ) { + return context && typeof context.getElementsByTagName !== "undefined" && context; +} + +// Expose support vars for convenience +support = Sizzle.support = {}; + +/** + * Detects XML nodes + * @param {Element|Object} elem An element or a document + * @returns {Boolean} True iff elem is a non-HTML XML node + */ +isXML = Sizzle.isXML = function( elem ) { + var namespace = elem.namespaceURI, + docElem = ( elem.ownerDocument || elem ).documentElement; + + // Support: IE <=8 + // Assume HTML when documentElement doesn't yet exist, such as inside loading iframes + // https://bugs.jquery.com/ticket/4833 + return !rhtml.test( namespace || docElem && docElem.nodeName || "HTML" ); +}; + +/** + * Sets document-related variables once based on the current document + * @param {Element|Object} [doc] An element or document object to use to set the document + * @returns {Object} Returns the current document + */ +setDocument = Sizzle.setDocument = function( node ) { + var hasCompare, subWindow, + doc = node ? node.ownerDocument || node : preferredDoc; + + // Return early if doc is invalid or already selected + // Support: IE 11+, Edge 17 - 18+ + // IE/Edge sometimes throw a "Permission denied" error when strict-comparing + // two documents; shallow comparisons work. + // eslint-disable-next-line eqeqeq + if ( doc == document || doc.nodeType !== 9 || !doc.documentElement ) { + return document; + } + + // Update global variables + document = doc; + docElem = document.documentElement; + documentIsHTML = !isXML( document ); + + // Support: IE 9 - 11+, Edge 12 - 18+ + // Accessing iframe documents after unload throws "permission denied" errors (jQuery #13936) + // Support: IE 11+, Edge 17 - 18+ + // IE/Edge sometimes throw a "Permission denied" error when strict-comparing + // two documents; shallow comparisons work. + // eslint-disable-next-line eqeqeq + if ( preferredDoc != document && + ( subWindow = document.defaultView ) && subWindow.top !== subWindow ) { + + // Support: IE 11, Edge + if ( subWindow.addEventListener ) { + subWindow.addEventListener( "unload", unloadHandler, false ); + + // Support: IE 9 - 10 only + } else if ( subWindow.attachEvent ) { + subWindow.attachEvent( "onunload", unloadHandler ); + } + } + + // Support: IE 8 - 11+, Edge 12 - 18+, Chrome <=16 - 25 only, Firefox <=3.6 - 31 only, + // Safari 4 - 5 only, Opera <=11.6 - 12.x only + // IE/Edge & older browsers don't support the :scope pseudo-class. + // Support: Safari 6.0 only + // Safari 6.0 supports :scope but it's an alias of :root there. + support.scope = assert( function( el ) { + docElem.appendChild( el ).appendChild( document.createElement( "div" ) ); + return typeof el.querySelectorAll !== "undefined" && + !el.querySelectorAll( ":scope fieldset div" ).length; + } ); + + /* Attributes + ---------------------------------------------------------------------- */ + + // Support: IE<8 + // Verify that getAttribute really returns attributes and not properties + // (excepting IE8 booleans) + support.attributes = assert( function( el ) { + el.className = "i"; + return !el.getAttribute( "className" ); + } ); + + /* getElement(s)By* + ---------------------------------------------------------------------- */ + + // Check if getElementsByTagName("*") returns only elements + support.getElementsByTagName = assert( function( el ) { + el.appendChild( document.createComment( "" ) ); + return !el.getElementsByTagName( "*" ).length; + } ); + + // Support: IE<9 + support.getElementsByClassName = rnative.test( document.getElementsByClassName ); + + // Support: IE<10 + // Check if getElementById returns elements by name + // The broken getElementById methods don't pick up programmatically-set names, + // so use a roundabout getElementsByName test + support.getById = assert( function( el ) { + docElem.appendChild( el ).id = expando; + return !document.getElementsByName || !document.getElementsByName( expando ).length; + } ); + + // ID filter and find + if ( support.getById ) { + Expr.filter[ "ID" ] = function( id ) { + var attrId = id.replace( runescape, funescape ); + return function( elem ) { + return elem.getAttribute( "id" ) === attrId; + }; + }; + Expr.find[ "ID" ] = function( id, context ) { + if ( typeof context.getElementById !== "undefined" && documentIsHTML ) { + var elem = context.getElementById( id ); + return elem ? [ elem ] : []; + } + }; + } else { + Expr.filter[ "ID" ] = function( id ) { + var attrId = id.replace( runescape, funescape ); + return function( elem ) { + var node = typeof elem.getAttributeNode !== "undefined" && + elem.getAttributeNode( "id" ); + return node && node.value === attrId; + }; + }; + + // Support: IE 6 - 7 only + // getElementById is not reliable as a find shortcut + Expr.find[ "ID" ] = function( id, context ) { + if ( typeof context.getElementById !== "undefined" && documentIsHTML ) { + var node, i, elems, + elem = context.getElementById( id ); + + if ( elem ) { + + // Verify the id attribute + node = elem.getAttributeNode( "id" ); + if ( node && node.value === id ) { + return [ elem ]; + } + + // Fall back on getElementsByName + elems = context.getElementsByName( id ); + i = 0; + while ( ( elem = elems[ i++ ] ) ) { + node = elem.getAttributeNode( "id" ); + if ( node && node.value === id ) { + return [ elem ]; + } + } + } + + return []; + } + }; + } + + // Tag + Expr.find[ "TAG" ] = support.getElementsByTagName ? + function( tag, context ) { + if ( typeof context.getElementsByTagName !== "undefined" ) { + return context.getElementsByTagName( tag ); + + // DocumentFragment nodes don't have gEBTN + } else if ( support.qsa ) { + return context.querySelectorAll( tag ); + } + } : + + function( tag, context ) { + var elem, + tmp = [], + i = 0, + + // By happy coincidence, a (broken) gEBTN appears on DocumentFragment nodes too + results = context.getElementsByTagName( tag ); + + // Filter out possible comments + if ( tag === "*" ) { + while ( ( elem = results[ i++ ] ) ) { + if ( elem.nodeType === 1 ) { + tmp.push( elem ); + } + } + + return tmp; + } + return results; + }; + + // Class + Expr.find[ "CLASS" ] = support.getElementsByClassName && function( className, context ) { + if ( typeof context.getElementsByClassName !== "undefined" && documentIsHTML ) { + return context.getElementsByClassName( className ); + } + }; + + /* QSA/matchesSelector + ---------------------------------------------------------------------- */ + + // QSA and matchesSelector support + + // matchesSelector(:active) reports false when true (IE9/Opera 11.5) + rbuggyMatches = []; + + // qSa(:focus) reports false when true (Chrome 21) + // We allow this because of a bug in IE8/9 that throws an error + // whenever `document.activeElement` is accessed on an iframe + // So, we allow :focus to pass through QSA all the time to avoid the IE error + // See https://bugs.jquery.com/ticket/13378 + rbuggyQSA = []; + + if ( ( support.qsa = rnative.test( document.querySelectorAll ) ) ) { + + // Build QSA regex + // Regex strategy adopted from Diego Perini + assert( function( el ) { + + var input; + + // Select is set to empty string on purpose + // This is to test IE's treatment of not explicitly + // setting a boolean content attribute, + // since its presence should be enough + // https://bugs.jquery.com/ticket/12359 + docElem.appendChild( el ).innerHTML = "" + + ""; + + // Support: IE8, Opera 11-12.16 + // Nothing should be selected when empty strings follow ^= or $= or *= + // The test attribute must be unknown in Opera but "safe" for WinRT + // https://msdn.microsoft.com/en-us/library/ie/hh465388.aspx#attribute_section + if ( el.querySelectorAll( "[msallowcapture^='']" ).length ) { + rbuggyQSA.push( "[*^$]=" + whitespace + "*(?:''|\"\")" ); + } + + // Support: IE8 + // Boolean attributes and "value" are not treated correctly + if ( !el.querySelectorAll( "[selected]" ).length ) { + rbuggyQSA.push( "\\[" + whitespace + "*(?:value|" + booleans + ")" ); + } + + // Support: Chrome<29, Android<4.4, Safari<7.0+, iOS<7.0+, PhantomJS<1.9.8+ + if ( !el.querySelectorAll( "[id~=" + expando + "-]" ).length ) { + rbuggyQSA.push( "~=" ); + } + + // Support: IE 11+, Edge 15 - 18+ + // IE 11/Edge don't find elements on a `[name='']` query in some cases. + // Adding a temporary attribute to the document before the selection works + // around the issue. + // Interestingly, IE 10 & older don't seem to have the issue. + input = document.createElement( "input" ); + input.setAttribute( "name", "" ); + el.appendChild( input ); + if ( !el.querySelectorAll( "[name='']" ).length ) { + rbuggyQSA.push( "\\[" + whitespace + "*name" + whitespace + "*=" + + whitespace + "*(?:''|\"\")" ); + } + + // Webkit/Opera - :checked should return selected option elements + // http://www.w3.org/TR/2011/REC-css3-selectors-20110929/#checked + // IE8 throws error here and will not see later tests + if ( !el.querySelectorAll( ":checked" ).length ) { + rbuggyQSA.push( ":checked" ); + } + + // Support: Safari 8+, iOS 8+ + // https://bugs.webkit.org/show_bug.cgi?id=136851 + // In-page `selector#id sibling-combinator selector` fails + if ( !el.querySelectorAll( "a#" + expando + "+*" ).length ) { + rbuggyQSA.push( ".#.+[+~]" ); + } + + // Support: Firefox <=3.6 - 5 only + // Old Firefox doesn't throw on a badly-escaped identifier. + el.querySelectorAll( "\\\f" ); + rbuggyQSA.push( "[\\r\\n\\f]" ); + } ); + + assert( function( el ) { + el.innerHTML = "" + + ""; + + // Support: Windows 8 Native Apps + // The type and name attributes are restricted during .innerHTML assignment + var input = document.createElement( "input" ); + input.setAttribute( "type", "hidden" ); + el.appendChild( input ).setAttribute( "name", "D" ); + + // Support: IE8 + // Enforce case-sensitivity of name attribute + if ( el.querySelectorAll( "[name=d]" ).length ) { + rbuggyQSA.push( "name" + whitespace + "*[*^$|!~]?=" ); + } + + // FF 3.5 - :enabled/:disabled and hidden elements (hidden elements are still enabled) + // IE8 throws error here and will not see later tests + if ( el.querySelectorAll( ":enabled" ).length !== 2 ) { + rbuggyQSA.push( ":enabled", ":disabled" ); + } + + // Support: IE9-11+ + // IE's :disabled selector does not pick up the children of disabled fieldsets + docElem.appendChild( el ).disabled = true; + if ( el.querySelectorAll( ":disabled" ).length !== 2 ) { + rbuggyQSA.push( ":enabled", ":disabled" ); + } + + // Support: Opera 10 - 11 only + // Opera 10-11 does not throw on post-comma invalid pseudos + el.querySelectorAll( "*,:x" ); + rbuggyQSA.push( ",.*:" ); + } ); + } + + if ( ( support.matchesSelector = rnative.test( ( matches = docElem.matches || + docElem.webkitMatchesSelector || + docElem.mozMatchesSelector || + docElem.oMatchesSelector || + docElem.msMatchesSelector ) ) ) ) { + + assert( function( el ) { + + // Check to see if it's possible to do matchesSelector + // on a disconnected node (IE 9) + support.disconnectedMatch = matches.call( el, "*" ); + + // This should fail with an exception + // Gecko does not error, returns false instead + matches.call( el, "[s!='']:x" ); + rbuggyMatches.push( "!=", pseudos ); + } ); + } + + rbuggyQSA = rbuggyQSA.length && new RegExp( rbuggyQSA.join( "|" ) ); + rbuggyMatches = rbuggyMatches.length && new RegExp( rbuggyMatches.join( "|" ) ); + + /* Contains + ---------------------------------------------------------------------- */ + hasCompare = rnative.test( docElem.compareDocumentPosition ); + + // Element contains another + // Purposefully self-exclusive + // As in, an element does not contain itself + contains = hasCompare || rnative.test( docElem.contains ) ? + function( a, b ) { + var adown = a.nodeType === 9 ? a.documentElement : a, + bup = b && b.parentNode; + return a === bup || !!( bup && bup.nodeType === 1 && ( + adown.contains ? + adown.contains( bup ) : + a.compareDocumentPosition && a.compareDocumentPosition( bup ) & 16 + ) ); + } : + function( a, b ) { + if ( b ) { + while ( ( b = b.parentNode ) ) { + if ( b === a ) { + return true; + } + } + } + return false; + }; + + /* Sorting + ---------------------------------------------------------------------- */ + + // Document order sorting + sortOrder = hasCompare ? + function( a, b ) { + + // Flag for duplicate removal + if ( a === b ) { + hasDuplicate = true; + return 0; + } + + // Sort on method existence if only one input has compareDocumentPosition + var compare = !a.compareDocumentPosition - !b.compareDocumentPosition; + if ( compare ) { + return compare; + } + + // Calculate position if both inputs belong to the same document + // Support: IE 11+, Edge 17 - 18+ + // IE/Edge sometimes throw a "Permission denied" error when strict-comparing + // two documents; shallow comparisons work. + // eslint-disable-next-line eqeqeq + compare = ( a.ownerDocument || a ) == ( b.ownerDocument || b ) ? + a.compareDocumentPosition( b ) : + + // Otherwise we know they are disconnected + 1; + + // Disconnected nodes + if ( compare & 1 || + ( !support.sortDetached && b.compareDocumentPosition( a ) === compare ) ) { + + // Choose the first element that is related to our preferred document + // Support: IE 11+, Edge 17 - 18+ + // IE/Edge sometimes throw a "Permission denied" error when strict-comparing + // two documents; shallow comparisons work. + // eslint-disable-next-line eqeqeq + if ( a == document || a.ownerDocument == preferredDoc && + contains( preferredDoc, a ) ) { + return -1; + } + + // Support: IE 11+, Edge 17 - 18+ + // IE/Edge sometimes throw a "Permission denied" error when strict-comparing + // two documents; shallow comparisons work. + // eslint-disable-next-line eqeqeq + if ( b == document || b.ownerDocument == preferredDoc && + contains( preferredDoc, b ) ) { + return 1; + } + + // Maintain original order + return sortInput ? + ( indexOf( sortInput, a ) - indexOf( sortInput, b ) ) : + 0; + } + + return compare & 4 ? -1 : 1; + } : + function( a, b ) { + + // Exit early if the nodes are identical + if ( a === b ) { + hasDuplicate = true; + return 0; + } + + var cur, + i = 0, + aup = a.parentNode, + bup = b.parentNode, + ap = [ a ], + bp = [ b ]; + + // Parentless nodes are either documents or disconnected + if ( !aup || !bup ) { + + // Support: IE 11+, Edge 17 - 18+ + // IE/Edge sometimes throw a "Permission denied" error when strict-comparing + // two documents; shallow comparisons work. + /* eslint-disable eqeqeq */ + return a == document ? -1 : + b == document ? 1 : + /* eslint-enable eqeqeq */ + aup ? -1 : + bup ? 1 : + sortInput ? + ( indexOf( sortInput, a ) - indexOf( sortInput, b ) ) : + 0; + + // If the nodes are siblings, we can do a quick check + } else if ( aup === bup ) { + return siblingCheck( a, b ); + } + + // Otherwise we need full lists of their ancestors for comparison + cur = a; + while ( ( cur = cur.parentNode ) ) { + ap.unshift( cur ); + } + cur = b; + while ( ( cur = cur.parentNode ) ) { + bp.unshift( cur ); + } + + // Walk down the tree looking for a discrepancy + while ( ap[ i ] === bp[ i ] ) { + i++; + } + + return i ? + + // Do a sibling check if the nodes have a common ancestor + siblingCheck( ap[ i ], bp[ i ] ) : + + // Otherwise nodes in our document sort first + // Support: IE 11+, Edge 17 - 18+ + // IE/Edge sometimes throw a "Permission denied" error when strict-comparing + // two documents; shallow comparisons work. + /* eslint-disable eqeqeq */ + ap[ i ] == preferredDoc ? -1 : + bp[ i ] == preferredDoc ? 1 : + /* eslint-enable eqeqeq */ + 0; + }; + + return document; +}; + +Sizzle.matches = function( expr, elements ) { + return Sizzle( expr, null, null, elements ); +}; + +Sizzle.matchesSelector = function( elem, expr ) { + setDocument( elem ); + + if ( support.matchesSelector && documentIsHTML && + !nonnativeSelectorCache[ expr + " " ] && + ( !rbuggyMatches || !rbuggyMatches.test( expr ) ) && + ( !rbuggyQSA || !rbuggyQSA.test( expr ) ) ) { + + try { + var ret = matches.call( elem, expr ); + + // IE 9's matchesSelector returns false on disconnected nodes + if ( ret || support.disconnectedMatch || + + // As well, disconnected nodes are said to be in a document + // fragment in IE 9 + elem.document && elem.document.nodeType !== 11 ) { + return ret; + } + } catch ( e ) { + nonnativeSelectorCache( expr, true ); + } + } + + return Sizzle( expr, document, null, [ elem ] ).length > 0; +}; + +Sizzle.contains = function( context, elem ) { + + // Set document vars if needed + // Support: IE 11+, Edge 17 - 18+ + // IE/Edge sometimes throw a "Permission denied" error when strict-comparing + // two documents; shallow comparisons work. + // eslint-disable-next-line eqeqeq + if ( ( context.ownerDocument || context ) != document ) { + setDocument( context ); + } + return contains( context, elem ); +}; + +Sizzle.attr = function( elem, name ) { + + // Set document vars if needed + // Support: IE 11+, Edge 17 - 18+ + // IE/Edge sometimes throw a "Permission denied" error when strict-comparing + // two documents; shallow comparisons work. + // eslint-disable-next-line eqeqeq + if ( ( elem.ownerDocument || elem ) != document ) { + setDocument( elem ); + } + + var fn = Expr.attrHandle[ name.toLowerCase() ], + + // Don't get fooled by Object.prototype properties (jQuery #13807) + val = fn && hasOwn.call( Expr.attrHandle, name.toLowerCase() ) ? + fn( elem, name, !documentIsHTML ) : + undefined; + + return val !== undefined ? + val : + support.attributes || !documentIsHTML ? + elem.getAttribute( name ) : + ( val = elem.getAttributeNode( name ) ) && val.specified ? + val.value : + null; +}; + +Sizzle.escape = function( sel ) { + return ( sel + "" ).replace( rcssescape, fcssescape ); +}; + +Sizzle.error = function( msg ) { + throw new Error( "Syntax error, unrecognized expression: " + msg ); +}; + +/** + * Document sorting and removing duplicates + * @param {ArrayLike} results + */ +Sizzle.uniqueSort = function( results ) { + var elem, + duplicates = [], + j = 0, + i = 0; + + // Unless we *know* we can detect duplicates, assume their presence + hasDuplicate = !support.detectDuplicates; + sortInput = !support.sortStable && results.slice( 0 ); + results.sort( sortOrder ); + + if ( hasDuplicate ) { + while ( ( elem = results[ i++ ] ) ) { + if ( elem === results[ i ] ) { + j = duplicates.push( i ); + } + } + while ( j-- ) { + results.splice( duplicates[ j ], 1 ); + } + } + + // Clear input after sorting to release objects + // See https://github.com/jquery/sizzle/pull/225 + sortInput = null; + + return results; +}; + +/** + * Utility function for retrieving the text value of an array of DOM nodes + * @param {Array|Element} elem + */ +getText = Sizzle.getText = function( elem ) { + var node, + ret = "", + i = 0, + nodeType = elem.nodeType; + + if ( !nodeType ) { + + // If no nodeType, this is expected to be an array + while ( ( node = elem[ i++ ] ) ) { + + // Do not traverse comment nodes + ret += getText( node ); + } + } else if ( nodeType === 1 || nodeType === 9 || nodeType === 11 ) { + + // Use textContent for elements + // innerText usage removed for consistency of new lines (jQuery #11153) + if ( typeof elem.textContent === "string" ) { + return elem.textContent; + } else { + + // Traverse its children + for ( elem = elem.firstChild; elem; elem = elem.nextSibling ) { + ret += getText( elem ); + } + } + } else if ( nodeType === 3 || nodeType === 4 ) { + return elem.nodeValue; + } + + // Do not include comment or processing instruction nodes + + return ret; +}; + +Expr = Sizzle.selectors = { + + // Can be adjusted by the user + cacheLength: 50, + + createPseudo: markFunction, + + match: matchExpr, + + attrHandle: {}, + + find: {}, + + relative: { + ">": { dir: "parentNode", first: true }, + " ": { dir: "parentNode" }, + "+": { dir: "previousSibling", first: true }, + "~": { dir: "previousSibling" } + }, + + preFilter: { + "ATTR": function( match ) { + match[ 1 ] = match[ 1 ].replace( runescape, funescape ); + + // Move the given value to match[3] whether quoted or unquoted + match[ 3 ] = ( match[ 3 ] || match[ 4 ] || + match[ 5 ] || "" ).replace( runescape, funescape ); + + if ( match[ 2 ] === "~=" ) { + match[ 3 ] = " " + match[ 3 ] + " "; + } + + return match.slice( 0, 4 ); + }, + + "CHILD": function( match ) { + + /* matches from matchExpr["CHILD"] + 1 type (only|nth|...) + 2 what (child|of-type) + 3 argument (even|odd|\d*|\d*n([+-]\d+)?|...) + 4 xn-component of xn+y argument ([+-]?\d*n|) + 5 sign of xn-component + 6 x of xn-component + 7 sign of y-component + 8 y of y-component + */ + match[ 1 ] = match[ 1 ].toLowerCase(); + + if ( match[ 1 ].slice( 0, 3 ) === "nth" ) { + + // nth-* requires argument + if ( !match[ 3 ] ) { + Sizzle.error( match[ 0 ] ); + } + + // numeric x and y parameters for Expr.filter.CHILD + // remember that false/true cast respectively to 0/1 + match[ 4 ] = +( match[ 4 ] ? + match[ 5 ] + ( match[ 6 ] || 1 ) : + 2 * ( match[ 3 ] === "even" || match[ 3 ] === "odd" ) ); + match[ 5 ] = +( ( match[ 7 ] + match[ 8 ] ) || match[ 3 ] === "odd" ); + + // other types prohibit arguments + } else if ( match[ 3 ] ) { + Sizzle.error( match[ 0 ] ); + } + + return match; + }, + + "PSEUDO": function( match ) { + var excess, + unquoted = !match[ 6 ] && match[ 2 ]; + + if ( matchExpr[ "CHILD" ].test( match[ 0 ] ) ) { + return null; + } + + // Accept quoted arguments as-is + if ( match[ 3 ] ) { + match[ 2 ] = match[ 4 ] || match[ 5 ] || ""; + + // Strip excess characters from unquoted arguments + } else if ( unquoted && rpseudo.test( unquoted ) && + + // Get excess from tokenize (recursively) + ( excess = tokenize( unquoted, true ) ) && + + // advance to the next closing parenthesis + ( excess = unquoted.indexOf( ")", unquoted.length - excess ) - unquoted.length ) ) { + + // excess is a negative index + match[ 0 ] = match[ 0 ].slice( 0, excess ); + match[ 2 ] = unquoted.slice( 0, excess ); + } + + // Return only captures needed by the pseudo filter method (type and argument) + return match.slice( 0, 3 ); + } + }, + + filter: { + + "TAG": function( nodeNameSelector ) { + var nodeName = nodeNameSelector.replace( runescape, funescape ).toLowerCase(); + return nodeNameSelector === "*" ? + function() { + return true; + } : + function( elem ) { + return elem.nodeName && elem.nodeName.toLowerCase() === nodeName; + }; + }, + + "CLASS": function( className ) { + var pattern = classCache[ className + " " ]; + + return pattern || + ( pattern = new RegExp( "(^|" + whitespace + + ")" + className + "(" + whitespace + "|$)" ) ) && classCache( + className, function( elem ) { + return pattern.test( + typeof elem.className === "string" && elem.className || + typeof elem.getAttribute !== "undefined" && + elem.getAttribute( "class" ) || + "" + ); + } ); + }, + + "ATTR": function( name, operator, check ) { + return function( elem ) { + var result = Sizzle.attr( elem, name ); + + if ( result == null ) { + return operator === "!="; + } + if ( !operator ) { + return true; + } + + result += ""; + + /* eslint-disable max-len */ + + return operator === "=" ? result === check : + operator === "!=" ? result !== check : + operator === "^=" ? check && result.indexOf( check ) === 0 : + operator === "*=" ? check && result.indexOf( check ) > -1 : + operator === "$=" ? check && result.slice( -check.length ) === check : + operator === "~=" ? ( " " + result.replace( rwhitespace, " " ) + " " ).indexOf( check ) > -1 : + operator === "|=" ? result === check || result.slice( 0, check.length + 1 ) === check + "-" : + false; + /* eslint-enable max-len */ + + }; + }, + + "CHILD": function( type, what, _argument, first, last ) { + var simple = type.slice( 0, 3 ) !== "nth", + forward = type.slice( -4 ) !== "last", + ofType = what === "of-type"; + + return first === 1 && last === 0 ? + + // Shortcut for :nth-*(n) + function( elem ) { + return !!elem.parentNode; + } : + + function( elem, _context, xml ) { + var cache, uniqueCache, outerCache, node, nodeIndex, start, + dir = simple !== forward ? "nextSibling" : "previousSibling", + parent = elem.parentNode, + name = ofType && elem.nodeName.toLowerCase(), + useCache = !xml && !ofType, + diff = false; + + if ( parent ) { + + // :(first|last|only)-(child|of-type) + if ( simple ) { + while ( dir ) { + node = elem; + while ( ( node = node[ dir ] ) ) { + if ( ofType ? + node.nodeName.toLowerCase() === name : + node.nodeType === 1 ) { + + return false; + } + } + + // Reverse direction for :only-* (if we haven't yet done so) + start = dir = type === "only" && !start && "nextSibling"; + } + return true; + } + + start = [ forward ? parent.firstChild : parent.lastChild ]; + + // non-xml :nth-child(...) stores cache data on `parent` + if ( forward && useCache ) { + + // Seek `elem` from a previously-cached index + + // ...in a gzip-friendly way + node = parent; + outerCache = node[ expando ] || ( node[ expando ] = {} ); + + // Support: IE <9 only + // Defend against cloned attroperties (jQuery gh-1709) + uniqueCache = outerCache[ node.uniqueID ] || + ( outerCache[ node.uniqueID ] = {} ); + + cache = uniqueCache[ type ] || []; + nodeIndex = cache[ 0 ] === dirruns && cache[ 1 ]; + diff = nodeIndex && cache[ 2 ]; + node = nodeIndex && parent.childNodes[ nodeIndex ]; + + while ( ( node = ++nodeIndex && node && node[ dir ] || + + // Fallback to seeking `elem` from the start + ( diff = nodeIndex = 0 ) || start.pop() ) ) { + + // When found, cache indexes on `parent` and break + if ( node.nodeType === 1 && ++diff && node === elem ) { + uniqueCache[ type ] = [ dirruns, nodeIndex, diff ]; + break; + } + } + + } else { + + // Use previously-cached element index if available + if ( useCache ) { + + // ...in a gzip-friendly way + node = elem; + outerCache = node[ expando ] || ( node[ expando ] = {} ); + + // Support: IE <9 only + // Defend against cloned attroperties (jQuery gh-1709) + uniqueCache = outerCache[ node.uniqueID ] || + ( outerCache[ node.uniqueID ] = {} ); + + cache = uniqueCache[ type ] || []; + nodeIndex = cache[ 0 ] === dirruns && cache[ 1 ]; + diff = nodeIndex; + } + + // xml :nth-child(...) + // or :nth-last-child(...) or :nth(-last)?-of-type(...) + if ( diff === false ) { + + // Use the same loop as above to seek `elem` from the start + while ( ( node = ++nodeIndex && node && node[ dir ] || + ( diff = nodeIndex = 0 ) || start.pop() ) ) { + + if ( ( ofType ? + node.nodeName.toLowerCase() === name : + node.nodeType === 1 ) && + ++diff ) { + + // Cache the index of each encountered element + if ( useCache ) { + outerCache = node[ expando ] || + ( node[ expando ] = {} ); + + // Support: IE <9 only + // Defend against cloned attroperties (jQuery gh-1709) + uniqueCache = outerCache[ node.uniqueID ] || + ( outerCache[ node.uniqueID ] = {} ); + + uniqueCache[ type ] = [ dirruns, diff ]; + } + + if ( node === elem ) { + break; + } + } + } + } + } + + // Incorporate the offset, then check against cycle size + diff -= last; + return diff === first || ( diff % first === 0 && diff / first >= 0 ); + } + }; + }, + + "PSEUDO": function( pseudo, argument ) { + + // pseudo-class names are case-insensitive + // http://www.w3.org/TR/selectors/#pseudo-classes + // Prioritize by case sensitivity in case custom pseudos are added with uppercase letters + // Remember that setFilters inherits from pseudos + var args, + fn = Expr.pseudos[ pseudo ] || Expr.setFilters[ pseudo.toLowerCase() ] || + Sizzle.error( "unsupported pseudo: " + pseudo ); + + // The user may use createPseudo to indicate that + // arguments are needed to create the filter function + // just as Sizzle does + if ( fn[ expando ] ) { + return fn( argument ); + } + + // But maintain support for old signatures + if ( fn.length > 1 ) { + args = [ pseudo, pseudo, "", argument ]; + return Expr.setFilters.hasOwnProperty( pseudo.toLowerCase() ) ? + markFunction( function( seed, matches ) { + var idx, + matched = fn( seed, argument ), + i = matched.length; + while ( i-- ) { + idx = indexOf( seed, matched[ i ] ); + seed[ idx ] = !( matches[ idx ] = matched[ i ] ); + } + } ) : + function( elem ) { + return fn( elem, 0, args ); + }; + } + + return fn; + } + }, + + pseudos: { + + // Potentially complex pseudos + "not": markFunction( function( selector ) { + + // Trim the selector passed to compile + // to avoid treating leading and trailing + // spaces as combinators + var input = [], + results = [], + matcher = compile( selector.replace( rtrim, "$1" ) ); + + return matcher[ expando ] ? + markFunction( function( seed, matches, _context, xml ) { + var elem, + unmatched = matcher( seed, null, xml, [] ), + i = seed.length; + + // Match elements unmatched by `matcher` + while ( i-- ) { + if ( ( elem = unmatched[ i ] ) ) { + seed[ i ] = !( matches[ i ] = elem ); + } + } + } ) : + function( elem, _context, xml ) { + input[ 0 ] = elem; + matcher( input, null, xml, results ); + + // Don't keep the element (issue #299) + input[ 0 ] = null; + return !results.pop(); + }; + } ), + + "has": markFunction( function( selector ) { + return function( elem ) { + return Sizzle( selector, elem ).length > 0; + }; + } ), + + "contains": markFunction( function( text ) { + text = text.replace( runescape, funescape ); + return function( elem ) { + return ( elem.textContent || getText( elem ) ).indexOf( text ) > -1; + }; + } ), + + // "Whether an element is represented by a :lang() selector + // is based solely on the element's language value + // being equal to the identifier C, + // or beginning with the identifier C immediately followed by "-". + // The matching of C against the element's language value is performed case-insensitively. + // The identifier C does not have to be a valid language name." + // http://www.w3.org/TR/selectors/#lang-pseudo + "lang": markFunction( function( lang ) { + + // lang value must be a valid identifier + if ( !ridentifier.test( lang || "" ) ) { + Sizzle.error( "unsupported lang: " + lang ); + } + lang = lang.replace( runescape, funescape ).toLowerCase(); + return function( elem ) { + var elemLang; + do { + if ( ( elemLang = documentIsHTML ? + elem.lang : + elem.getAttribute( "xml:lang" ) || elem.getAttribute( "lang" ) ) ) { + + elemLang = elemLang.toLowerCase(); + return elemLang === lang || elemLang.indexOf( lang + "-" ) === 0; + } + } while ( ( elem = elem.parentNode ) && elem.nodeType === 1 ); + return false; + }; + } ), + + // Miscellaneous + "target": function( elem ) { + var hash = window.location && window.location.hash; + return hash && hash.slice( 1 ) === elem.id; + }, + + "root": function( elem ) { + return elem === docElem; + }, + + "focus": function( elem ) { + return elem === document.activeElement && + ( !document.hasFocus || document.hasFocus() ) && + !!( elem.type || elem.href || ~elem.tabIndex ); + }, + + // Boolean properties + "enabled": createDisabledPseudo( false ), + "disabled": createDisabledPseudo( true ), + + "checked": function( elem ) { + + // In CSS3, :checked should return both checked and selected elements + // http://www.w3.org/TR/2011/REC-css3-selectors-20110929/#checked + var nodeName = elem.nodeName.toLowerCase(); + return ( nodeName === "input" && !!elem.checked ) || + ( nodeName === "option" && !!elem.selected ); + }, + + "selected": function( elem ) { + + // Accessing this property makes selected-by-default + // options in Safari work properly + if ( elem.parentNode ) { + // eslint-disable-next-line no-unused-expressions + elem.parentNode.selectedIndex; + } + + return elem.selected === true; + }, + + // Contents + "empty": function( elem ) { + + // http://www.w3.org/TR/selectors/#empty-pseudo + // :empty is negated by element (1) or content nodes (text: 3; cdata: 4; entity ref: 5), + // but not by others (comment: 8; processing instruction: 7; etc.) + // nodeType < 6 works because attributes (2) do not appear as children + for ( elem = elem.firstChild; elem; elem = elem.nextSibling ) { + if ( elem.nodeType < 6 ) { + return false; + } + } + return true; + }, + + "parent": function( elem ) { + return !Expr.pseudos[ "empty" ]( elem ); + }, + + // Element/input types + "header": function( elem ) { + return rheader.test( elem.nodeName ); + }, + + "input": function( elem ) { + return rinputs.test( elem.nodeName ); + }, + + "button": function( elem ) { + var name = elem.nodeName.toLowerCase(); + return name === "input" && elem.type === "button" || name === "button"; + }, + + "text": function( elem ) { + var attr; + return elem.nodeName.toLowerCase() === "input" && + elem.type === "text" && + + // Support: IE<8 + // New HTML5 attribute values (e.g., "search") appear with elem.type === "text" + ( ( attr = elem.getAttribute( "type" ) ) == null || + attr.toLowerCase() === "text" ); + }, + + // Position-in-collection + "first": createPositionalPseudo( function() { + return [ 0 ]; + } ), + + "last": createPositionalPseudo( function( _matchIndexes, length ) { + return [ length - 1 ]; + } ), + + "eq": createPositionalPseudo( function( _matchIndexes, length, argument ) { + return [ argument < 0 ? argument + length : argument ]; + } ), + + "even": createPositionalPseudo( function( matchIndexes, length ) { + var i = 0; + for ( ; i < length; i += 2 ) { + matchIndexes.push( i ); + } + return matchIndexes; + } ), + + "odd": createPositionalPseudo( function( matchIndexes, length ) { + var i = 1; + for ( ; i < length; i += 2 ) { + matchIndexes.push( i ); + } + return matchIndexes; + } ), + + "lt": createPositionalPseudo( function( matchIndexes, length, argument ) { + var i = argument < 0 ? + argument + length : + argument > length ? + length : + argument; + for ( ; --i >= 0; ) { + matchIndexes.push( i ); + } + return matchIndexes; + } ), + + "gt": createPositionalPseudo( function( matchIndexes, length, argument ) { + var i = argument < 0 ? argument + length : argument; + for ( ; ++i < length; ) { + matchIndexes.push( i ); + } + return matchIndexes; + } ) + } +}; + +Expr.pseudos[ "nth" ] = Expr.pseudos[ "eq" ]; + +// Add button/input type pseudos +for ( i in { radio: true, checkbox: true, file: true, password: true, image: true } ) { + Expr.pseudos[ i ] = createInputPseudo( i ); +} +for ( i in { submit: true, reset: true } ) { + Expr.pseudos[ i ] = createButtonPseudo( i ); +} + +// Easy API for creating new setFilters +function setFilters() {} +setFilters.prototype = Expr.filters = Expr.pseudos; +Expr.setFilters = new setFilters(); + +tokenize = Sizzle.tokenize = function( selector, parseOnly ) { + var matched, match, tokens, type, + soFar, groups, preFilters, + cached = tokenCache[ selector + " " ]; + + if ( cached ) { + return parseOnly ? 0 : cached.slice( 0 ); + } + + soFar = selector; + groups = []; + preFilters = Expr.preFilter; + + while ( soFar ) { + + // Comma and first run + if ( !matched || ( match = rcomma.exec( soFar ) ) ) { + if ( match ) { + + // Don't consume trailing commas as valid + soFar = soFar.slice( match[ 0 ].length ) || soFar; + } + groups.push( ( tokens = [] ) ); + } + + matched = false; + + // Combinators + if ( ( match = rcombinators.exec( soFar ) ) ) { + matched = match.shift(); + tokens.push( { + value: matched, + + // Cast descendant combinators to space + type: match[ 0 ].replace( rtrim, " " ) + } ); + soFar = soFar.slice( matched.length ); + } + + // Filters + for ( type in Expr.filter ) { + if ( ( match = matchExpr[ type ].exec( soFar ) ) && ( !preFilters[ type ] || + ( match = preFilters[ type ]( match ) ) ) ) { + matched = match.shift(); + tokens.push( { + value: matched, + type: type, + matches: match + } ); + soFar = soFar.slice( matched.length ); + } + } + + if ( !matched ) { + break; + } + } + + // Return the length of the invalid excess + // if we're just parsing + // Otherwise, throw an error or return tokens + return parseOnly ? + soFar.length : + soFar ? + Sizzle.error( selector ) : + + // Cache the tokens + tokenCache( selector, groups ).slice( 0 ); +}; + +function toSelector( tokens ) { + var i = 0, + len = tokens.length, + selector = ""; + for ( ; i < len; i++ ) { + selector += tokens[ i ].value; + } + return selector; +} + +function addCombinator( matcher, combinator, base ) { + var dir = combinator.dir, + skip = combinator.next, + key = skip || dir, + checkNonElements = base && key === "parentNode", + doneName = done++; + + return combinator.first ? + + // Check against closest ancestor/preceding element + function( elem, context, xml ) { + while ( ( elem = elem[ dir ] ) ) { + if ( elem.nodeType === 1 || checkNonElements ) { + return matcher( elem, context, xml ); + } + } + return false; + } : + + // Check against all ancestor/preceding elements + function( elem, context, xml ) { + var oldCache, uniqueCache, outerCache, + newCache = [ dirruns, doneName ]; + + // We can't set arbitrary data on XML nodes, so they don't benefit from combinator caching + if ( xml ) { + while ( ( elem = elem[ dir ] ) ) { + if ( elem.nodeType === 1 || checkNonElements ) { + if ( matcher( elem, context, xml ) ) { + return true; + } + } + } + } else { + while ( ( elem = elem[ dir ] ) ) { + if ( elem.nodeType === 1 || checkNonElements ) { + outerCache = elem[ expando ] || ( elem[ expando ] = {} ); + + // Support: IE <9 only + // Defend against cloned attroperties (jQuery gh-1709) + uniqueCache = outerCache[ elem.uniqueID ] || + ( outerCache[ elem.uniqueID ] = {} ); + + if ( skip && skip === elem.nodeName.toLowerCase() ) { + elem = elem[ dir ] || elem; + } else if ( ( oldCache = uniqueCache[ key ] ) && + oldCache[ 0 ] === dirruns && oldCache[ 1 ] === doneName ) { + + // Assign to newCache so results back-propagate to previous elements + return ( newCache[ 2 ] = oldCache[ 2 ] ); + } else { + + // Reuse newcache so results back-propagate to previous elements + uniqueCache[ key ] = newCache; + + // A match means we're done; a fail means we have to keep checking + if ( ( newCache[ 2 ] = matcher( elem, context, xml ) ) ) { + return true; + } + } + } + } + } + return false; + }; +} + +function elementMatcher( matchers ) { + return matchers.length > 1 ? + function( elem, context, xml ) { + var i = matchers.length; + while ( i-- ) { + if ( !matchers[ i ]( elem, context, xml ) ) { + return false; + } + } + return true; + } : + matchers[ 0 ]; +} + +function multipleContexts( selector, contexts, results ) { + var i = 0, + len = contexts.length; + for ( ; i < len; i++ ) { + Sizzle( selector, contexts[ i ], results ); + } + return results; +} + +function condense( unmatched, map, filter, context, xml ) { + var elem, + newUnmatched = [], + i = 0, + len = unmatched.length, + mapped = map != null; + + for ( ; i < len; i++ ) { + if ( ( elem = unmatched[ i ] ) ) { + if ( !filter || filter( elem, context, xml ) ) { + newUnmatched.push( elem ); + if ( mapped ) { + map.push( i ); + } + } + } + } + + return newUnmatched; +} + +function setMatcher( preFilter, selector, matcher, postFilter, postFinder, postSelector ) { + if ( postFilter && !postFilter[ expando ] ) { + postFilter = setMatcher( postFilter ); + } + if ( postFinder && !postFinder[ expando ] ) { + postFinder = setMatcher( postFinder, postSelector ); + } + return markFunction( function( seed, results, context, xml ) { + var temp, i, elem, + preMap = [], + postMap = [], + preexisting = results.length, + + // Get initial elements from seed or context + elems = seed || multipleContexts( + selector || "*", + context.nodeType ? [ context ] : context, + [] + ), + + // Prefilter to get matcher input, preserving a map for seed-results synchronization + matcherIn = preFilter && ( seed || !selector ) ? + condense( elems, preMap, preFilter, context, xml ) : + elems, + + matcherOut = matcher ? + + // If we have a postFinder, or filtered seed, or non-seed postFilter or preexisting results, + postFinder || ( seed ? preFilter : preexisting || postFilter ) ? + + // ...intermediate processing is necessary + [] : + + // ...otherwise use results directly + results : + matcherIn; + + // Find primary matches + if ( matcher ) { + matcher( matcherIn, matcherOut, context, xml ); + } + + // Apply postFilter + if ( postFilter ) { + temp = condense( matcherOut, postMap ); + postFilter( temp, [], context, xml ); + + // Un-match failing elements by moving them back to matcherIn + i = temp.length; + while ( i-- ) { + if ( ( elem = temp[ i ] ) ) { + matcherOut[ postMap[ i ] ] = !( matcherIn[ postMap[ i ] ] = elem ); + } + } + } + + if ( seed ) { + if ( postFinder || preFilter ) { + if ( postFinder ) { + + // Get the final matcherOut by condensing this intermediate into postFinder contexts + temp = []; + i = matcherOut.length; + while ( i-- ) { + if ( ( elem = matcherOut[ i ] ) ) { + + // Restore matcherIn since elem is not yet a final match + temp.push( ( matcherIn[ i ] = elem ) ); + } + } + postFinder( null, ( matcherOut = [] ), temp, xml ); + } + + // Move matched elements from seed to results to keep them synchronized + i = matcherOut.length; + while ( i-- ) { + if ( ( elem = matcherOut[ i ] ) && + ( temp = postFinder ? indexOf( seed, elem ) : preMap[ i ] ) > -1 ) { + + seed[ temp ] = !( results[ temp ] = elem ); + } + } + } + + // Add elements to results, through postFinder if defined + } else { + matcherOut = condense( + matcherOut === results ? + matcherOut.splice( preexisting, matcherOut.length ) : + matcherOut + ); + if ( postFinder ) { + postFinder( null, results, matcherOut, xml ); + } else { + push.apply( results, matcherOut ); + } + } + } ); +} + +function matcherFromTokens( tokens ) { + var checkContext, matcher, j, + len = tokens.length, + leadingRelative = Expr.relative[ tokens[ 0 ].type ], + implicitRelative = leadingRelative || Expr.relative[ " " ], + i = leadingRelative ? 1 : 0, + + // The foundational matcher ensures that elements are reachable from top-level context(s) + matchContext = addCombinator( function( elem ) { + return elem === checkContext; + }, implicitRelative, true ), + matchAnyContext = addCombinator( function( elem ) { + return indexOf( checkContext, elem ) > -1; + }, implicitRelative, true ), + matchers = [ function( elem, context, xml ) { + var ret = ( !leadingRelative && ( xml || context !== outermostContext ) ) || ( + ( checkContext = context ).nodeType ? + matchContext( elem, context, xml ) : + matchAnyContext( elem, context, xml ) ); + + // Avoid hanging onto element (issue #299) + checkContext = null; + return ret; + } ]; + + for ( ; i < len; i++ ) { + if ( ( matcher = Expr.relative[ tokens[ i ].type ] ) ) { + matchers = [ addCombinator( elementMatcher( matchers ), matcher ) ]; + } else { + matcher = Expr.filter[ tokens[ i ].type ].apply( null, tokens[ i ].matches ); + + // Return special upon seeing a positional matcher + if ( matcher[ expando ] ) { + + // Find the next relative operator (if any) for proper handling + j = ++i; + for ( ; j < len; j++ ) { + if ( Expr.relative[ tokens[ j ].type ] ) { + break; + } + } + return setMatcher( + i > 1 && elementMatcher( matchers ), + i > 1 && toSelector( + + // If the preceding token was a descendant combinator, insert an implicit any-element `*` + tokens + .slice( 0, i - 1 ) + .concat( { value: tokens[ i - 2 ].type === " " ? "*" : "" } ) + ).replace( rtrim, "$1" ), + matcher, + i < j && matcherFromTokens( tokens.slice( i, j ) ), + j < len && matcherFromTokens( ( tokens = tokens.slice( j ) ) ), + j < len && toSelector( tokens ) + ); + } + matchers.push( matcher ); + } + } + + return elementMatcher( matchers ); +} + +function matcherFromGroupMatchers( elementMatchers, setMatchers ) { + var bySet = setMatchers.length > 0, + byElement = elementMatchers.length > 0, + superMatcher = function( seed, context, xml, results, outermost ) { + var elem, j, matcher, + matchedCount = 0, + i = "0", + unmatched = seed && [], + setMatched = [], + contextBackup = outermostContext, + + // We must always have either seed elements or outermost context + elems = seed || byElement && Expr.find[ "TAG" ]( "*", outermost ), + + // Use integer dirruns iff this is the outermost matcher + dirrunsUnique = ( dirruns += contextBackup == null ? 1 : Math.random() || 0.1 ), + len = elems.length; + + if ( outermost ) { + + // Support: IE 11+, Edge 17 - 18+ + // IE/Edge sometimes throw a "Permission denied" error when strict-comparing + // two documents; shallow comparisons work. + // eslint-disable-next-line eqeqeq + outermostContext = context == document || context || outermost; + } + + // Add elements passing elementMatchers directly to results + // Support: IE<9, Safari + // Tolerate NodeList properties (IE: "length"; Safari: ) matching elements by id + for ( ; i !== len && ( elem = elems[ i ] ) != null; i++ ) { + if ( byElement && elem ) { + j = 0; + + // Support: IE 11+, Edge 17 - 18+ + // IE/Edge sometimes throw a "Permission denied" error when strict-comparing + // two documents; shallow comparisons work. + // eslint-disable-next-line eqeqeq + if ( !context && elem.ownerDocument != document ) { + setDocument( elem ); + xml = !documentIsHTML; + } + while ( ( matcher = elementMatchers[ j++ ] ) ) { + if ( matcher( elem, context || document, xml ) ) { + results.push( elem ); + break; + } + } + if ( outermost ) { + dirruns = dirrunsUnique; + } + } + + // Track unmatched elements for set filters + if ( bySet ) { + + // They will have gone through all possible matchers + if ( ( elem = !matcher && elem ) ) { + matchedCount--; + } + + // Lengthen the array for every element, matched or not + if ( seed ) { + unmatched.push( elem ); + } + } + } + + // `i` is now the count of elements visited above, and adding it to `matchedCount` + // makes the latter nonnegative. + matchedCount += i; + + // Apply set filters to unmatched elements + // NOTE: This can be skipped if there are no unmatched elements (i.e., `matchedCount` + // equals `i`), unless we didn't visit _any_ elements in the above loop because we have + // no element matchers and no seed. + // Incrementing an initially-string "0" `i` allows `i` to remain a string only in that + // case, which will result in a "00" `matchedCount` that differs from `i` but is also + // numerically zero. + if ( bySet && i !== matchedCount ) { + j = 0; + while ( ( matcher = setMatchers[ j++ ] ) ) { + matcher( unmatched, setMatched, context, xml ); + } + + if ( seed ) { + + // Reintegrate element matches to eliminate the need for sorting + if ( matchedCount > 0 ) { + while ( i-- ) { + if ( !( unmatched[ i ] || setMatched[ i ] ) ) { + setMatched[ i ] = pop.call( results ); + } + } + } + + // Discard index placeholder values to get only actual matches + setMatched = condense( setMatched ); + } + + // Add matches to results + push.apply( results, setMatched ); + + // Seedless set matches succeeding multiple successful matchers stipulate sorting + if ( outermost && !seed && setMatched.length > 0 && + ( matchedCount + setMatchers.length ) > 1 ) { + + Sizzle.uniqueSort( results ); + } + } + + // Override manipulation of globals by nested matchers + if ( outermost ) { + dirruns = dirrunsUnique; + outermostContext = contextBackup; + } + + return unmatched; + }; + + return bySet ? + markFunction( superMatcher ) : + superMatcher; +} + +compile = Sizzle.compile = function( selector, match /* Internal Use Only */ ) { + var i, + setMatchers = [], + elementMatchers = [], + cached = compilerCache[ selector + " " ]; + + if ( !cached ) { + + // Generate a function of recursive functions that can be used to check each element + if ( !match ) { + match = tokenize( selector ); + } + i = match.length; + while ( i-- ) { + cached = matcherFromTokens( match[ i ] ); + if ( cached[ expando ] ) { + setMatchers.push( cached ); + } else { + elementMatchers.push( cached ); + } + } + + // Cache the compiled function + cached = compilerCache( + selector, + matcherFromGroupMatchers( elementMatchers, setMatchers ) + ); + + // Save selector and tokenization + cached.selector = selector; + } + return cached; +}; + +/** + * A low-level selection function that works with Sizzle's compiled + * selector functions + * @param {String|Function} selector A selector or a pre-compiled + * selector function built with Sizzle.compile + * @param {Element} context + * @param {Array} [results] + * @param {Array} [seed] A set of elements to match against + */ +select = Sizzle.select = function( selector, context, results, seed ) { + var i, tokens, token, type, find, + compiled = typeof selector === "function" && selector, + match = !seed && tokenize( ( selector = compiled.selector || selector ) ); + + results = results || []; + + // Try to minimize operations if there is only one selector in the list and no seed + // (the latter of which guarantees us context) + if ( match.length === 1 ) { + + // Reduce context if the leading compound selector is an ID + tokens = match[ 0 ] = match[ 0 ].slice( 0 ); + if ( tokens.length > 2 && ( token = tokens[ 0 ] ).type === "ID" && + context.nodeType === 9 && documentIsHTML && Expr.relative[ tokens[ 1 ].type ] ) { + + context = ( Expr.find[ "ID" ]( token.matches[ 0 ] + .replace( runescape, funescape ), context ) || [] )[ 0 ]; + if ( !context ) { + return results; + + // Precompiled matchers will still verify ancestry, so step up a level + } else if ( compiled ) { + context = context.parentNode; + } + + selector = selector.slice( tokens.shift().value.length ); + } + + // Fetch a seed set for right-to-left matching + i = matchExpr[ "needsContext" ].test( selector ) ? 0 : tokens.length; + while ( i-- ) { + token = tokens[ i ]; + + // Abort if we hit a combinator + if ( Expr.relative[ ( type = token.type ) ] ) { + break; + } + if ( ( find = Expr.find[ type ] ) ) { + + // Search, expanding context for leading sibling combinators + if ( ( seed = find( + token.matches[ 0 ].replace( runescape, funescape ), + rsibling.test( tokens[ 0 ].type ) && testContext( context.parentNode ) || + context + ) ) ) { + + // If seed is empty or no tokens remain, we can return early + tokens.splice( i, 1 ); + selector = seed.length && toSelector( tokens ); + if ( !selector ) { + push.apply( results, seed ); + return results; + } + + break; + } + } + } + } + + // Compile and execute a filtering function if one is not provided + // Provide `match` to avoid retokenization if we modified the selector above + ( compiled || compile( selector, match ) )( + seed, + context, + !documentIsHTML, + results, + !context || rsibling.test( selector ) && testContext( context.parentNode ) || context + ); + return results; +}; + +// One-time assignments + +// Sort stability +support.sortStable = expando.split( "" ).sort( sortOrder ).join( "" ) === expando; + +// Support: Chrome 14-35+ +// Always assume duplicates if they aren't passed to the comparison function +support.detectDuplicates = !!hasDuplicate; + +// Initialize against the default document +setDocument(); + +// Support: Webkit<537.32 - Safari 6.0.3/Chrome 25 (fixed in Chrome 27) +// Detached nodes confoundingly follow *each other* +support.sortDetached = assert( function( el ) { + + // Should return 1, but returns 4 (following) + return el.compareDocumentPosition( document.createElement( "fieldset" ) ) & 1; +} ); + +// Support: IE<8 +// Prevent attribute/property "interpolation" +// https://msdn.microsoft.com/en-us/library/ms536429%28VS.85%29.aspx +if ( !assert( function( el ) { + el.innerHTML = ""; + return el.firstChild.getAttribute( "href" ) === "#"; +} ) ) { + addHandle( "type|href|height|width", function( elem, name, isXML ) { + if ( !isXML ) { + return elem.getAttribute( name, name.toLowerCase() === "type" ? 1 : 2 ); + } + } ); +} + +// Support: IE<9 +// Use defaultValue in place of getAttribute("value") +if ( !support.attributes || !assert( function( el ) { + el.innerHTML = ""; + el.firstChild.setAttribute( "value", "" ); + return el.firstChild.getAttribute( "value" ) === ""; +} ) ) { + addHandle( "value", function( elem, _name, isXML ) { + if ( !isXML && elem.nodeName.toLowerCase() === "input" ) { + return elem.defaultValue; + } + } ); +} + +// Support: IE<9 +// Use getAttributeNode to fetch booleans when getAttribute lies +if ( !assert( function( el ) { + return el.getAttribute( "disabled" ) == null; +} ) ) { + addHandle( booleans, function( elem, name, isXML ) { + var val; + if ( !isXML ) { + return elem[ name ] === true ? name.toLowerCase() : + ( val = elem.getAttributeNode( name ) ) && val.specified ? + val.value : + null; + } + } ); +} + +return Sizzle; + +} )( window ); + + + +jQuery.find = Sizzle; +jQuery.expr = Sizzle.selectors; + +// Deprecated +jQuery.expr[ ":" ] = jQuery.expr.pseudos; +jQuery.uniqueSort = jQuery.unique = Sizzle.uniqueSort; +jQuery.text = Sizzle.getText; +jQuery.isXMLDoc = Sizzle.isXML; +jQuery.contains = Sizzle.contains; +jQuery.escapeSelector = Sizzle.escape; + + + + +var dir = function( elem, dir, until ) { + var matched = [], + truncate = until !== undefined; + + while ( ( elem = elem[ dir ] ) && elem.nodeType !== 9 ) { + if ( elem.nodeType === 1 ) { + if ( truncate && jQuery( elem ).is( until ) ) { + break; + } + matched.push( elem ); + } + } + return matched; +}; + + +var siblings = function( n, elem ) { + var matched = []; + + for ( ; n; n = n.nextSibling ) { + if ( n.nodeType === 1 && n !== elem ) { + matched.push( n ); + } + } + + return matched; +}; + + +var rneedsContext = jQuery.expr.match.needsContext; + + + +function nodeName( elem, name ) { + + return elem.nodeName && elem.nodeName.toLowerCase() === name.toLowerCase(); + +}; +var rsingleTag = ( /^<([a-z][^\/\0>:\x20\t\r\n\f]*)[\x20\t\r\n\f]*\/?>(?:<\/\1>|)$/i ); + + + +// Implement the identical functionality for filter and not +function winnow( elements, qualifier, not ) { + if ( isFunction( qualifier ) ) { + return jQuery.grep( elements, function( elem, i ) { + return !!qualifier.call( elem, i, elem ) !== not; + } ); + } + + // Single element + if ( qualifier.nodeType ) { + return jQuery.grep( elements, function( elem ) { + return ( elem === qualifier ) !== not; + } ); + } + + // Arraylike of elements (jQuery, arguments, Array) + if ( typeof qualifier !== "string" ) { + return jQuery.grep( elements, function( elem ) { + return ( indexOf.call( qualifier, elem ) > -1 ) !== not; + } ); + } + + // Filtered directly for both simple and complex selectors + return jQuery.filter( qualifier, elements, not ); +} + +jQuery.filter = function( expr, elems, not ) { + var elem = elems[ 0 ]; + + if ( not ) { + expr = ":not(" + expr + ")"; + } + + if ( elems.length === 1 && elem.nodeType === 1 ) { + return jQuery.find.matchesSelector( elem, expr ) ? [ elem ] : []; + } + + return jQuery.find.matches( expr, jQuery.grep( elems, function( elem ) { + return elem.nodeType === 1; + } ) ); +}; + +jQuery.fn.extend( { + find: function( selector ) { + var i, ret, + len = this.length, + self = this; + + if ( typeof selector !== "string" ) { + return this.pushStack( jQuery( selector ).filter( function() { + for ( i = 0; i < len; i++ ) { + if ( jQuery.contains( self[ i ], this ) ) { + return true; + } + } + } ) ); + } + + ret = this.pushStack( [] ); + + for ( i = 0; i < len; i++ ) { + jQuery.find( selector, self[ i ], ret ); + } + + return len > 1 ? jQuery.uniqueSort( ret ) : ret; + }, + filter: function( selector ) { + return this.pushStack( winnow( this, selector || [], false ) ); + }, + not: function( selector ) { + return this.pushStack( winnow( this, selector || [], true ) ); + }, + is: function( selector ) { + return !!winnow( + this, + + // If this is a positional/relative selector, check membership in the returned set + // so $("p:first").is("p:last") won't return true for a doc with two "p". + typeof selector === "string" && rneedsContext.test( selector ) ? + jQuery( selector ) : + selector || [], + false + ).length; + } +} ); + + +// Initialize a jQuery object + + +// A central reference to the root jQuery(document) +var rootjQuery, + + // A simple way to check for HTML strings + // Prioritize #id over to avoid XSS via location.hash (#9521) + // Strict HTML recognition (#11290: must start with <) + // Shortcut simple #id case for speed + rquickExpr = /^(?:\s*(<[\w\W]+>)[^>]*|#([\w-]+))$/, + + init = jQuery.fn.init = function( selector, context, root ) { + var match, elem; + + // HANDLE: $(""), $(null), $(undefined), $(false) + if ( !selector ) { + return this; + } + + // Method init() accepts an alternate rootjQuery + // so migrate can support jQuery.sub (gh-2101) + root = root || rootjQuery; + + // Handle HTML strings + if ( typeof selector === "string" ) { + if ( selector[ 0 ] === "<" && + selector[ selector.length - 1 ] === ">" && + selector.length >= 3 ) { + + // Assume that strings that start and end with <> are HTML and skip the regex check + match = [ null, selector, null ]; + + } else { + match = rquickExpr.exec( selector ); + } + + // Match html or make sure no context is specified for #id + if ( match && ( match[ 1 ] || !context ) ) { + + // HANDLE: $(html) -> $(array) + if ( match[ 1 ] ) { + context = context instanceof jQuery ? context[ 0 ] : context; + + // Option to run scripts is true for back-compat + // Intentionally let the error be thrown if parseHTML is not present + jQuery.merge( this, jQuery.parseHTML( + match[ 1 ], + context && context.nodeType ? context.ownerDocument || context : document, + true + ) ); + + // HANDLE: $(html, props) + if ( rsingleTag.test( match[ 1 ] ) && jQuery.isPlainObject( context ) ) { + for ( match in context ) { + + // Properties of context are called as methods if possible + if ( isFunction( this[ match ] ) ) { + this[ match ]( context[ match ] ); + + // ...and otherwise set as attributes + } else { + this.attr( match, context[ match ] ); + } + } + } + + return this; + + // HANDLE: $(#id) + } else { + elem = document.getElementById( match[ 2 ] ); + + if ( elem ) { + + // Inject the element directly into the jQuery object + this[ 0 ] = elem; + this.length = 1; + } + return this; + } + + // HANDLE: $(expr, $(...)) + } else if ( !context || context.jquery ) { + return ( context || root ).find( selector ); + + // HANDLE: $(expr, context) + // (which is just equivalent to: $(context).find(expr) + } else { + return this.constructor( context ).find( selector ); + } + + // HANDLE: $(DOMElement) + } else if ( selector.nodeType ) { + this[ 0 ] = selector; + this.length = 1; + return this; + + // HANDLE: $(function) + // Shortcut for document ready + } else if ( isFunction( selector ) ) { + return root.ready !== undefined ? + root.ready( selector ) : + + // Execute immediately if ready is not present + selector( jQuery ); + } + + return jQuery.makeArray( selector, this ); + }; + +// Give the init function the jQuery prototype for later instantiation +init.prototype = jQuery.fn; + +// Initialize central reference +rootjQuery = jQuery( document ); + + +var rparentsprev = /^(?:parents|prev(?:Until|All))/, + + // Methods guaranteed to produce a unique set when starting from a unique set + guaranteedUnique = { + children: true, + contents: true, + next: true, + prev: true + }; + +jQuery.fn.extend( { + has: function( target ) { + var targets = jQuery( target, this ), + l = targets.length; + + return this.filter( function() { + var i = 0; + for ( ; i < l; i++ ) { + if ( jQuery.contains( this, targets[ i ] ) ) { + return true; + } + } + } ); + }, + + closest: function( selectors, context ) { + var cur, + i = 0, + l = this.length, + matched = [], + targets = typeof selectors !== "string" && jQuery( selectors ); + + // Positional selectors never match, since there's no _selection_ context + if ( !rneedsContext.test( selectors ) ) { + for ( ; i < l; i++ ) { + for ( cur = this[ i ]; cur && cur !== context; cur = cur.parentNode ) { + + // Always skip document fragments + if ( cur.nodeType < 11 && ( targets ? + targets.index( cur ) > -1 : + + // Don't pass non-elements to Sizzle + cur.nodeType === 1 && + jQuery.find.matchesSelector( cur, selectors ) ) ) { + + matched.push( cur ); + break; + } + } + } + } + + return this.pushStack( matched.length > 1 ? jQuery.uniqueSort( matched ) : matched ); + }, + + // Determine the position of an element within the set + index: function( elem ) { + + // No argument, return index in parent + if ( !elem ) { + return ( this[ 0 ] && this[ 0 ].parentNode ) ? this.first().prevAll().length : -1; + } + + // Index in selector + if ( typeof elem === "string" ) { + return indexOf.call( jQuery( elem ), this[ 0 ] ); + } + + // Locate the position of the desired element + return indexOf.call( this, + + // If it receives a jQuery object, the first element is used + elem.jquery ? elem[ 0 ] : elem + ); + }, + + add: function( selector, context ) { + return this.pushStack( + jQuery.uniqueSort( + jQuery.merge( this.get(), jQuery( selector, context ) ) + ) + ); + }, + + addBack: function( selector ) { + return this.add( selector == null ? + this.prevObject : this.prevObject.filter( selector ) + ); + } +} ); + +function sibling( cur, dir ) { + while ( ( cur = cur[ dir ] ) && cur.nodeType !== 1 ) {} + return cur; +} + +jQuery.each( { + parent: function( elem ) { + var parent = elem.parentNode; + return parent && parent.nodeType !== 11 ? parent : null; + }, + parents: function( elem ) { + return dir( elem, "parentNode" ); + }, + parentsUntil: function( elem, _i, until ) { + return dir( elem, "parentNode", until ); + }, + next: function( elem ) { + return sibling( elem, "nextSibling" ); + }, + prev: function( elem ) { + return sibling( elem, "previousSibling" ); + }, + nextAll: function( elem ) { + return dir( elem, "nextSibling" ); + }, + prevAll: function( elem ) { + return dir( elem, "previousSibling" ); + }, + nextUntil: function( elem, _i, until ) { + return dir( elem, "nextSibling", until ); + }, + prevUntil: function( elem, _i, until ) { + return dir( elem, "previousSibling", until ); + }, + siblings: function( elem ) { + return siblings( ( elem.parentNode || {} ).firstChild, elem ); + }, + children: function( elem ) { + return siblings( elem.firstChild ); + }, + contents: function( elem ) { + if ( elem.contentDocument != null && + + // Support: IE 11+ + // elements with no `data` attribute has an object + // `contentDocument` with a `null` prototype. + getProto( elem.contentDocument ) ) { + + return elem.contentDocument; + } + + // Support: IE 9 - 11 only, iOS 7 only, Android Browser <=4.3 only + // Treat the template element as a regular one in browsers that + // don't support it. + if ( nodeName( elem, "template" ) ) { + elem = elem.content || elem; + } + + return jQuery.merge( [], elem.childNodes ); + } +}, function( name, fn ) { + jQuery.fn[ name ] = function( until, selector ) { + var matched = jQuery.map( this, fn, until ); + + if ( name.slice( -5 ) !== "Until" ) { + selector = until; + } + + if ( selector && typeof selector === "string" ) { + matched = jQuery.filter( selector, matched ); + } + + if ( this.length > 1 ) { + + // Remove duplicates + if ( !guaranteedUnique[ name ] ) { + jQuery.uniqueSort( matched ); + } + + // Reverse order for parents* and prev-derivatives + if ( rparentsprev.test( name ) ) { + matched.reverse(); + } + } + + return this.pushStack( matched ); + }; +} ); +var rnothtmlwhite = ( /[^\x20\t\r\n\f]+/g ); + + + +// Convert String-formatted options into Object-formatted ones +function createOptions( options ) { + var object = {}; + jQuery.each( options.match( rnothtmlwhite ) || [], function( _, flag ) { + object[ flag ] = true; + } ); + return object; +} + +/* + * Create a callback list using the following parameters: + * + * options: an optional list of space-separated options that will change how + * the callback list behaves or a more traditional option object + * + * By default a callback list will act like an event callback list and can be + * "fired" multiple times. + * + * Possible options: + * + * once: will ensure the callback list can only be fired once (like a Deferred) + * + * memory: will keep track of previous values and will call any callback added + * after the list has been fired right away with the latest "memorized" + * values (like a Deferred) + * + * unique: will ensure a callback can only be added once (no duplicate in the list) + * + * stopOnFalse: interrupt callings when a callback returns false + * + */ +jQuery.Callbacks = function( options ) { + + // Convert options from String-formatted to Object-formatted if needed + // (we check in cache first) + options = typeof options === "string" ? + createOptions( options ) : + jQuery.extend( {}, options ); + + var // Flag to know if list is currently firing + firing, + + // Last fire value for non-forgettable lists + memory, + + // Flag to know if list was already fired + fired, + + // Flag to prevent firing + locked, + + // Actual callback list + list = [], + + // Queue of execution data for repeatable lists + queue = [], + + // Index of currently firing callback (modified by add/remove as needed) + firingIndex = -1, + + // Fire callbacks + fire = function() { + + // Enforce single-firing + locked = locked || options.once; + + // Execute callbacks for all pending executions, + // respecting firingIndex overrides and runtime changes + fired = firing = true; + for ( ; queue.length; firingIndex = -1 ) { + memory = queue.shift(); + while ( ++firingIndex < list.length ) { + + // Run callback and check for early termination + if ( list[ firingIndex ].apply( memory[ 0 ], memory[ 1 ] ) === false && + options.stopOnFalse ) { + + // Jump to end and forget the data so .add doesn't re-fire + firingIndex = list.length; + memory = false; + } + } + } + + // Forget the data if we're done with it + if ( !options.memory ) { + memory = false; + } + + firing = false; + + // Clean up if we're done firing for good + if ( locked ) { + + // Keep an empty list if we have data for future add calls + if ( memory ) { + list = []; + + // Otherwise, this object is spent + } else { + list = ""; + } + } + }, + + // Actual Callbacks object + self = { + + // Add a callback or a collection of callbacks to the list + add: function() { + if ( list ) { + + // If we have memory from a past run, we should fire after adding + if ( memory && !firing ) { + firingIndex = list.length - 1; + queue.push( memory ); + } + + ( function add( args ) { + jQuery.each( args, function( _, arg ) { + if ( isFunction( arg ) ) { + if ( !options.unique || !self.has( arg ) ) { + list.push( arg ); + } + } else if ( arg && arg.length && toType( arg ) !== "string" ) { + + // Inspect recursively + add( arg ); + } + } ); + } )( arguments ); + + if ( memory && !firing ) { + fire(); + } + } + return this; + }, + + // Remove a callback from the list + remove: function() { + jQuery.each( arguments, function( _, arg ) { + var index; + while ( ( index = jQuery.inArray( arg, list, index ) ) > -1 ) { + list.splice( index, 1 ); + + // Handle firing indexes + if ( index <= firingIndex ) { + firingIndex--; + } + } + } ); + return this; + }, + + // Check if a given callback is in the list. + // If no argument is given, return whether or not list has callbacks attached. + has: function( fn ) { + return fn ? + jQuery.inArray( fn, list ) > -1 : + list.length > 0; + }, + + // Remove all callbacks from the list + empty: function() { + if ( list ) { + list = []; + } + return this; + }, + + // Disable .fire and .add + // Abort any current/pending executions + // Clear all callbacks and values + disable: function() { + locked = queue = []; + list = memory = ""; + return this; + }, + disabled: function() { + return !list; + }, + + // Disable .fire + // Also disable .add unless we have memory (since it would have no effect) + // Abort any pending executions + lock: function() { + locked = queue = []; + if ( !memory && !firing ) { + list = memory = ""; + } + return this; + }, + locked: function() { + return !!locked; + }, + + // Call all callbacks with the given context and arguments + fireWith: function( context, args ) { + if ( !locked ) { + args = args || []; + args = [ context, args.slice ? args.slice() : args ]; + queue.push( args ); + if ( !firing ) { + fire(); + } + } + return this; + }, + + // Call all the callbacks with the given arguments + fire: function() { + self.fireWith( this, arguments ); + return this; + }, + + // To know if the callbacks have already been called at least once + fired: function() { + return !!fired; + } + }; + + return self; +}; + + +function Identity( v ) { + return v; +} +function Thrower( ex ) { + throw ex; +} + +function adoptValue( value, resolve, reject, noValue ) { + var method; + + try { + + // Check for promise aspect first to privilege synchronous behavior + if ( value && isFunction( ( method = value.promise ) ) ) { + method.call( value ).done( resolve ).fail( reject ); + + // Other thenables + } else if ( value && isFunction( ( method = value.then ) ) ) { + method.call( value, resolve, reject ); + + // Other non-thenables + } else { + + // Control `resolve` arguments by letting Array#slice cast boolean `noValue` to integer: + // * false: [ value ].slice( 0 ) => resolve( value ) + // * true: [ value ].slice( 1 ) => resolve() + resolve.apply( undefined, [ value ].slice( noValue ) ); + } + + // For Promises/A+, convert exceptions into rejections + // Since jQuery.when doesn't unwrap thenables, we can skip the extra checks appearing in + // Deferred#then to conditionally suppress rejection. + } catch ( value ) { + + // Support: Android 4.0 only + // Strict mode functions invoked without .call/.apply get global-object context + reject.apply( undefined, [ value ] ); + } +} + +jQuery.extend( { + + Deferred: function( func ) { + var tuples = [ + + // action, add listener, callbacks, + // ... .then handlers, argument index, [final state] + [ "notify", "progress", jQuery.Callbacks( "memory" ), + jQuery.Callbacks( "memory" ), 2 ], + [ "resolve", "done", jQuery.Callbacks( "once memory" ), + jQuery.Callbacks( "once memory" ), 0, "resolved" ], + [ "reject", "fail", jQuery.Callbacks( "once memory" ), + jQuery.Callbacks( "once memory" ), 1, "rejected" ] + ], + state = "pending", + promise = { + state: function() { + return state; + }, + always: function() { + deferred.done( arguments ).fail( arguments ); + return this; + }, + "catch": function( fn ) { + return promise.then( null, fn ); + }, + + // Keep pipe for back-compat + pipe: function( /* fnDone, fnFail, fnProgress */ ) { + var fns = arguments; + + return jQuery.Deferred( function( newDefer ) { + jQuery.each( tuples, function( _i, tuple ) { + + // Map tuples (progress, done, fail) to arguments (done, fail, progress) + var fn = isFunction( fns[ tuple[ 4 ] ] ) && fns[ tuple[ 4 ] ]; + + // deferred.progress(function() { bind to newDefer or newDefer.notify }) + // deferred.done(function() { bind to newDefer or newDefer.resolve }) + // deferred.fail(function() { bind to newDefer or newDefer.reject }) + deferred[ tuple[ 1 ] ]( function() { + var returned = fn && fn.apply( this, arguments ); + if ( returned && isFunction( returned.promise ) ) { + returned.promise() + .progress( newDefer.notify ) + .done( newDefer.resolve ) + .fail( newDefer.reject ); + } else { + newDefer[ tuple[ 0 ] + "With" ]( + this, + fn ? [ returned ] : arguments + ); + } + } ); + } ); + fns = null; + } ).promise(); + }, + then: function( onFulfilled, onRejected, onProgress ) { + var maxDepth = 0; + function resolve( depth, deferred, handler, special ) { + return function() { + var that = this, + args = arguments, + mightThrow = function() { + var returned, then; + + // Support: Promises/A+ section 2.3.3.3.3 + // https://promisesaplus.com/#point-59 + // Ignore double-resolution attempts + if ( depth < maxDepth ) { + return; + } + + returned = handler.apply( that, args ); + + // Support: Promises/A+ section 2.3.1 + // https://promisesaplus.com/#point-48 + if ( returned === deferred.promise() ) { + throw new TypeError( "Thenable self-resolution" ); + } + + // Support: Promises/A+ sections 2.3.3.1, 3.5 + // https://promisesaplus.com/#point-54 + // https://promisesaplus.com/#point-75 + // Retrieve `then` only once + then = returned && + + // Support: Promises/A+ section 2.3.4 + // https://promisesaplus.com/#point-64 + // Only check objects and functions for thenability + ( typeof returned === "object" || + typeof returned === "function" ) && + returned.then; + + // Handle a returned thenable + if ( isFunction( then ) ) { + + // Special processors (notify) just wait for resolution + if ( special ) { + then.call( + returned, + resolve( maxDepth, deferred, Identity, special ), + resolve( maxDepth, deferred, Thrower, special ) + ); + + // Normal processors (resolve) also hook into progress + } else { + + // ...and disregard older resolution values + maxDepth++; + + then.call( + returned, + resolve( maxDepth, deferred, Identity, special ), + resolve( maxDepth, deferred, Thrower, special ), + resolve( maxDepth, deferred, Identity, + deferred.notifyWith ) + ); + } + + // Handle all other returned values + } else { + + // Only substitute handlers pass on context + // and multiple values (non-spec behavior) + if ( handler !== Identity ) { + that = undefined; + args = [ returned ]; + } + + // Process the value(s) + // Default process is resolve + ( special || deferred.resolveWith )( that, args ); + } + }, + + // Only normal processors (resolve) catch and reject exceptions + process = special ? + mightThrow : + function() { + try { + mightThrow(); + } catch ( e ) { + + if ( jQuery.Deferred.exceptionHook ) { + jQuery.Deferred.exceptionHook( e, + process.stackTrace ); + } + + // Support: Promises/A+ section 2.3.3.3.4.1 + // https://promisesaplus.com/#point-61 + // Ignore post-resolution exceptions + if ( depth + 1 >= maxDepth ) { + + // Only substitute handlers pass on context + // and multiple values (non-spec behavior) + if ( handler !== Thrower ) { + that = undefined; + args = [ e ]; + } + + deferred.rejectWith( that, args ); + } + } + }; + + // Support: Promises/A+ section 2.3.3.3.1 + // https://promisesaplus.com/#point-57 + // Re-resolve promises immediately to dodge false rejection from + // subsequent errors + if ( depth ) { + process(); + } else { + + // Call an optional hook to record the stack, in case of exception + // since it's otherwise lost when execution goes async + if ( jQuery.Deferred.getStackHook ) { + process.stackTrace = jQuery.Deferred.getStackHook(); + } + window.setTimeout( process ); + } + }; + } + + return jQuery.Deferred( function( newDefer ) { + + // progress_handlers.add( ... ) + tuples[ 0 ][ 3 ].add( + resolve( + 0, + newDefer, + isFunction( onProgress ) ? + onProgress : + Identity, + newDefer.notifyWith + ) + ); + + // fulfilled_handlers.add( ... ) + tuples[ 1 ][ 3 ].add( + resolve( + 0, + newDefer, + isFunction( onFulfilled ) ? + onFulfilled : + Identity + ) + ); + + // rejected_handlers.add( ... ) + tuples[ 2 ][ 3 ].add( + resolve( + 0, + newDefer, + isFunction( onRejected ) ? + onRejected : + Thrower + ) + ); + } ).promise(); + }, + + // Get a promise for this deferred + // If obj is provided, the promise aspect is added to the object + promise: function( obj ) { + return obj != null ? jQuery.extend( obj, promise ) : promise; + } + }, + deferred = {}; + + // Add list-specific methods + jQuery.each( tuples, function( i, tuple ) { + var list = tuple[ 2 ], + stateString = tuple[ 5 ]; + + // promise.progress = list.add + // promise.done = list.add + // promise.fail = list.add + promise[ tuple[ 1 ] ] = list.add; + + // Handle state + if ( stateString ) { + list.add( + function() { + + // state = "resolved" (i.e., fulfilled) + // state = "rejected" + state = stateString; + }, + + // rejected_callbacks.disable + // fulfilled_callbacks.disable + tuples[ 3 - i ][ 2 ].disable, + + // rejected_handlers.disable + // fulfilled_handlers.disable + tuples[ 3 - i ][ 3 ].disable, + + // progress_callbacks.lock + tuples[ 0 ][ 2 ].lock, + + // progress_handlers.lock + tuples[ 0 ][ 3 ].lock + ); + } + + // progress_handlers.fire + // fulfilled_handlers.fire + // rejected_handlers.fire + list.add( tuple[ 3 ].fire ); + + // deferred.notify = function() { deferred.notifyWith(...) } + // deferred.resolve = function() { deferred.resolveWith(...) } + // deferred.reject = function() { deferred.rejectWith(...) } + deferred[ tuple[ 0 ] ] = function() { + deferred[ tuple[ 0 ] + "With" ]( this === deferred ? undefined : this, arguments ); + return this; + }; + + // deferred.notifyWith = list.fireWith + // deferred.resolveWith = list.fireWith + // deferred.rejectWith = list.fireWith + deferred[ tuple[ 0 ] + "With" ] = list.fireWith; + } ); + + // Make the deferred a promise + promise.promise( deferred ); + + // Call given func if any + if ( func ) { + func.call( deferred, deferred ); + } + + // All done! + return deferred; + }, + + // Deferred helper + when: function( singleValue ) { + var + + // count of uncompleted subordinates + remaining = arguments.length, + + // count of unprocessed arguments + i = remaining, + + // subordinate fulfillment data + resolveContexts = Array( i ), + resolveValues = slice.call( arguments ), + + // the master Deferred + master = jQuery.Deferred(), + + // subordinate callback factory + updateFunc = function( i ) { + return function( value ) { + resolveContexts[ i ] = this; + resolveValues[ i ] = arguments.length > 1 ? slice.call( arguments ) : value; + if ( !( --remaining ) ) { + master.resolveWith( resolveContexts, resolveValues ); + } + }; + }; + + // Single- and empty arguments are adopted like Promise.resolve + if ( remaining <= 1 ) { + adoptValue( singleValue, master.done( updateFunc( i ) ).resolve, master.reject, + !remaining ); + + // Use .then() to unwrap secondary thenables (cf. gh-3000) + if ( master.state() === "pending" || + isFunction( resolveValues[ i ] && resolveValues[ i ].then ) ) { + + return master.then(); + } + } + + // Multiple arguments are aggregated like Promise.all array elements + while ( i-- ) { + adoptValue( resolveValues[ i ], updateFunc( i ), master.reject ); + } + + return master.promise(); + } +} ); + + +// These usually indicate a programmer mistake during development, +// warn about them ASAP rather than swallowing them by default. +var rerrorNames = /^(Eval|Internal|Range|Reference|Syntax|Type|URI)Error$/; + +jQuery.Deferred.exceptionHook = function( error, stack ) { + + // Support: IE 8 - 9 only + // Console exists when dev tools are open, which can happen at any time + if ( window.console && window.console.warn && error && rerrorNames.test( error.name ) ) { + window.console.warn( "jQuery.Deferred exception: " + error.message, error.stack, stack ); + } +}; + + + + +jQuery.readyException = function( error ) { + window.setTimeout( function() { + throw error; + } ); +}; + + + + +// The deferred used on DOM ready +var readyList = jQuery.Deferred(); + +jQuery.fn.ready = function( fn ) { + + readyList + .then( fn ) + + // Wrap jQuery.readyException in a function so that the lookup + // happens at the time of error handling instead of callback + // registration. + .catch( function( error ) { + jQuery.readyException( error ); + } ); + + return this; +}; + +jQuery.extend( { + + // Is the DOM ready to be used? Set to true once it occurs. + isReady: false, + + // A counter to track how many items to wait for before + // the ready event fires. See #6781 + readyWait: 1, + + // Handle when the DOM is ready + ready: function( wait ) { + + // Abort if there are pending holds or we're already ready + if ( wait === true ? --jQuery.readyWait : jQuery.isReady ) { + return; + } + + // Remember that the DOM is ready + jQuery.isReady = true; + + // If a normal DOM Ready event fired, decrement, and wait if need be + if ( wait !== true && --jQuery.readyWait > 0 ) { + return; + } + + // If there are functions bound, to execute + readyList.resolveWith( document, [ jQuery ] ); + } +} ); + +jQuery.ready.then = readyList.then; + +// The ready event handler and self cleanup method +function completed() { + document.removeEventListener( "DOMContentLoaded", completed ); + window.removeEventListener( "load", completed ); + jQuery.ready(); +} + +// Catch cases where $(document).ready() is called +// after the browser event has already occurred. +// Support: IE <=9 - 10 only +// Older IE sometimes signals "interactive" too soon +if ( document.readyState === "complete" || + ( document.readyState !== "loading" && !document.documentElement.doScroll ) ) { + + // Handle it asynchronously to allow scripts the opportunity to delay ready + window.setTimeout( jQuery.ready ); + +} else { + + // Use the handy event callback + document.addEventListener( "DOMContentLoaded", completed ); + + // A fallback to window.onload, that will always work + window.addEventListener( "load", completed ); +} + + + + +// Multifunctional method to get and set values of a collection +// The value/s can optionally be executed if it's a function +var access = function( elems, fn, key, value, chainable, emptyGet, raw ) { + var i = 0, + len = elems.length, + bulk = key == null; + + // Sets many values + if ( toType( key ) === "object" ) { + chainable = true; + for ( i in key ) { + access( elems, fn, i, key[ i ], true, emptyGet, raw ); + } + + // Sets one value + } else if ( value !== undefined ) { + chainable = true; + + if ( !isFunction( value ) ) { + raw = true; + } + + if ( bulk ) { + + // Bulk operations run against the entire set + if ( raw ) { + fn.call( elems, value ); + fn = null; + + // ...except when executing function values + } else { + bulk = fn; + fn = function( elem, _key, value ) { + return bulk.call( jQuery( elem ), value ); + }; + } + } + + if ( fn ) { + for ( ; i < len; i++ ) { + fn( + elems[ i ], key, raw ? + value : + value.call( elems[ i ], i, fn( elems[ i ], key ) ) + ); + } + } + } + + if ( chainable ) { + return elems; + } + + // Gets + if ( bulk ) { + return fn.call( elems ); + } + + return len ? fn( elems[ 0 ], key ) : emptyGet; +}; + + +// Matches dashed string for camelizing +var rmsPrefix = /^-ms-/, + rdashAlpha = /-([a-z])/g; + +// Used by camelCase as callback to replace() +function fcamelCase( _all, letter ) { + return letter.toUpperCase(); +} + +// Convert dashed to camelCase; used by the css and data modules +// Support: IE <=9 - 11, Edge 12 - 15 +// Microsoft forgot to hump their vendor prefix (#9572) +function camelCase( string ) { + return string.replace( rmsPrefix, "ms-" ).replace( rdashAlpha, fcamelCase ); +} +var acceptData = function( owner ) { + + // Accepts only: + // - Node + // - Node.ELEMENT_NODE + // - Node.DOCUMENT_NODE + // - Object + // - Any + return owner.nodeType === 1 || owner.nodeType === 9 || !( +owner.nodeType ); +}; + + + + +function Data() { + this.expando = jQuery.expando + Data.uid++; +} + +Data.uid = 1; + +Data.prototype = { + + cache: function( owner ) { + + // Check if the owner object already has a cache + var value = owner[ this.expando ]; + + // If not, create one + if ( !value ) { + value = {}; + + // We can accept data for non-element nodes in modern browsers, + // but we should not, see #8335. + // Always return an empty object. + if ( acceptData( owner ) ) { + + // If it is a node unlikely to be stringify-ed or looped over + // use plain assignment + if ( owner.nodeType ) { + owner[ this.expando ] = value; + + // Otherwise secure it in a non-enumerable property + // configurable must be true to allow the property to be + // deleted when data is removed + } else { + Object.defineProperty( owner, this.expando, { + value: value, + configurable: true + } ); + } + } + } + + return value; + }, + set: function( owner, data, value ) { + var prop, + cache = this.cache( owner ); + + // Handle: [ owner, key, value ] args + // Always use camelCase key (gh-2257) + if ( typeof data === "string" ) { + cache[ camelCase( data ) ] = value; + + // Handle: [ owner, { properties } ] args + } else { + + // Copy the properties one-by-one to the cache object + for ( prop in data ) { + cache[ camelCase( prop ) ] = data[ prop ]; + } + } + return cache; + }, + get: function( owner, key ) { + return key === undefined ? + this.cache( owner ) : + + // Always use camelCase key (gh-2257) + owner[ this.expando ] && owner[ this.expando ][ camelCase( key ) ]; + }, + access: function( owner, key, value ) { + + // In cases where either: + // + // 1. No key was specified + // 2. A string key was specified, but no value provided + // + // Take the "read" path and allow the get method to determine + // which value to return, respectively either: + // + // 1. The entire cache object + // 2. The data stored at the key + // + if ( key === undefined || + ( ( key && typeof key === "string" ) && value === undefined ) ) { + + return this.get( owner, key ); + } + + // When the key is not a string, or both a key and value + // are specified, set or extend (existing objects) with either: + // + // 1. An object of properties + // 2. A key and value + // + this.set( owner, key, value ); + + // Since the "set" path can have two possible entry points + // return the expected data based on which path was taken[*] + return value !== undefined ? value : key; + }, + remove: function( owner, key ) { + var i, + cache = owner[ this.expando ]; + + if ( cache === undefined ) { + return; + } + + if ( key !== undefined ) { + + // Support array or space separated string of keys + if ( Array.isArray( key ) ) { + + // If key is an array of keys... + // We always set camelCase keys, so remove that. + key = key.map( camelCase ); + } else { + key = camelCase( key ); + + // If a key with the spaces exists, use it. + // Otherwise, create an array by matching non-whitespace + key = key in cache ? + [ key ] : + ( key.match( rnothtmlwhite ) || [] ); + } + + i = key.length; + + while ( i-- ) { + delete cache[ key[ i ] ]; + } + } + + // Remove the expando if there's no more data + if ( key === undefined || jQuery.isEmptyObject( cache ) ) { + + // Support: Chrome <=35 - 45 + // Webkit & Blink performance suffers when deleting properties + // from DOM nodes, so set to undefined instead + // https://bugs.chromium.org/p/chromium/issues/detail?id=378607 (bug restricted) + if ( owner.nodeType ) { + owner[ this.expando ] = undefined; + } else { + delete owner[ this.expando ]; + } + } + }, + hasData: function( owner ) { + var cache = owner[ this.expando ]; + return cache !== undefined && !jQuery.isEmptyObject( cache ); + } +}; +var dataPriv = new Data(); + +var dataUser = new Data(); + + + +// Implementation Summary +// +// 1. Enforce API surface and semantic compatibility with 1.9.x branch +// 2. Improve the module's maintainability by reducing the storage +// paths to a single mechanism. +// 3. Use the same single mechanism to support "private" and "user" data. +// 4. _Never_ expose "private" data to user code (TODO: Drop _data, _removeData) +// 5. Avoid exposing implementation details on user objects (eg. expando properties) +// 6. Provide a clear path for implementation upgrade to WeakMap in 2014 + +var rbrace = /^(?:\{[\w\W]*\}|\[[\w\W]*\])$/, + rmultiDash = /[A-Z]/g; + +function getData( data ) { + if ( data === "true" ) { + return true; + } + + if ( data === "false" ) { + return false; + } + + if ( data === "null" ) { + return null; + } + + // Only convert to a number if it doesn't change the string + if ( data === +data + "" ) { + return +data; + } + + if ( rbrace.test( data ) ) { + return JSON.parse( data ); + } + + return data; +} + +function dataAttr( elem, key, data ) { + var name; + + // If nothing was found internally, try to fetch any + // data from the HTML5 data-* attribute + if ( data === undefined && elem.nodeType === 1 ) { + name = "data-" + key.replace( rmultiDash, "-$&" ).toLowerCase(); + data = elem.getAttribute( name ); + + if ( typeof data === "string" ) { + try { + data = getData( data ); + } catch ( e ) {} + + // Make sure we set the data so it isn't changed later + dataUser.set( elem, key, data ); + } else { + data = undefined; + } + } + return data; +} + +jQuery.extend( { + hasData: function( elem ) { + return dataUser.hasData( elem ) || dataPriv.hasData( elem ); + }, + + data: function( elem, name, data ) { + return dataUser.access( elem, name, data ); + }, + + removeData: function( elem, name ) { + dataUser.remove( elem, name ); + }, + + // TODO: Now that all calls to _data and _removeData have been replaced + // with direct calls to dataPriv methods, these can be deprecated. + _data: function( elem, name, data ) { + return dataPriv.access( elem, name, data ); + }, + + _removeData: function( elem, name ) { + dataPriv.remove( elem, name ); + } +} ); + +jQuery.fn.extend( { + data: function( key, value ) { + var i, name, data, + elem = this[ 0 ], + attrs = elem && elem.attributes; + + // Gets all values + if ( key === undefined ) { + if ( this.length ) { + data = dataUser.get( elem ); + + if ( elem.nodeType === 1 && !dataPriv.get( elem, "hasDataAttrs" ) ) { + i = attrs.length; + while ( i-- ) { + + // Support: IE 11 only + // The attrs elements can be null (#14894) + if ( attrs[ i ] ) { + name = attrs[ i ].name; + if ( name.indexOf( "data-" ) === 0 ) { + name = camelCase( name.slice( 5 ) ); + dataAttr( elem, name, data[ name ] ); + } + } + } + dataPriv.set( elem, "hasDataAttrs", true ); + } + } + + return data; + } + + // Sets multiple values + if ( typeof key === "object" ) { + return this.each( function() { + dataUser.set( this, key ); + } ); + } + + return access( this, function( value ) { + var data; + + // The calling jQuery object (element matches) is not empty + // (and therefore has an element appears at this[ 0 ]) and the + // `value` parameter was not undefined. An empty jQuery object + // will result in `undefined` for elem = this[ 0 ] which will + // throw an exception if an attempt to read a data cache is made. + if ( elem && value === undefined ) { + + // Attempt to get data from the cache + // The key will always be camelCased in Data + data = dataUser.get( elem, key ); + if ( data !== undefined ) { + return data; + } + + // Attempt to "discover" the data in + // HTML5 custom data-* attrs + data = dataAttr( elem, key ); + if ( data !== undefined ) { + return data; + } + + // We tried really hard, but the data doesn't exist. + return; + } + + // Set the data... + this.each( function() { + + // We always store the camelCased key + dataUser.set( this, key, value ); + } ); + }, null, value, arguments.length > 1, null, true ); + }, + + removeData: function( key ) { + return this.each( function() { + dataUser.remove( this, key ); + } ); + } +} ); + + +jQuery.extend( { + queue: function( elem, type, data ) { + var queue; + + if ( elem ) { + type = ( type || "fx" ) + "queue"; + queue = dataPriv.get( elem, type ); + + // Speed up dequeue by getting out quickly if this is just a lookup + if ( data ) { + if ( !queue || Array.isArray( data ) ) { + queue = dataPriv.access( elem, type, jQuery.makeArray( data ) ); + } else { + queue.push( data ); + } + } + return queue || []; + } + }, + + dequeue: function( elem, type ) { + type = type || "fx"; + + var queue = jQuery.queue( elem, type ), + startLength = queue.length, + fn = queue.shift(), + hooks = jQuery._queueHooks( elem, type ), + next = function() { + jQuery.dequeue( elem, type ); + }; + + // If the fx queue is dequeued, always remove the progress sentinel + if ( fn === "inprogress" ) { + fn = queue.shift(); + startLength--; + } + + if ( fn ) { + + // Add a progress sentinel to prevent the fx queue from being + // automatically dequeued + if ( type === "fx" ) { + queue.unshift( "inprogress" ); + } + + // Clear up the last queue stop function + delete hooks.stop; + fn.call( elem, next, hooks ); + } + + if ( !startLength && hooks ) { + hooks.empty.fire(); + } + }, + + // Not public - generate a queueHooks object, or return the current one + _queueHooks: function( elem, type ) { + var key = type + "queueHooks"; + return dataPriv.get( elem, key ) || dataPriv.access( elem, key, { + empty: jQuery.Callbacks( "once memory" ).add( function() { + dataPriv.remove( elem, [ type + "queue", key ] ); + } ) + } ); + } +} ); + +jQuery.fn.extend( { + queue: function( type, data ) { + var setter = 2; + + if ( typeof type !== "string" ) { + data = type; + type = "fx"; + setter--; + } + + if ( arguments.length < setter ) { + return jQuery.queue( this[ 0 ], type ); + } + + return data === undefined ? + this : + this.each( function() { + var queue = jQuery.queue( this, type, data ); + + // Ensure a hooks for this queue + jQuery._queueHooks( this, type ); + + if ( type === "fx" && queue[ 0 ] !== "inprogress" ) { + jQuery.dequeue( this, type ); + } + } ); + }, + dequeue: function( type ) { + return this.each( function() { + jQuery.dequeue( this, type ); + } ); + }, + clearQueue: function( type ) { + return this.queue( type || "fx", [] ); + }, + + // Get a promise resolved when queues of a certain type + // are emptied (fx is the type by default) + promise: function( type, obj ) { + var tmp, + count = 1, + defer = jQuery.Deferred(), + elements = this, + i = this.length, + resolve = function() { + if ( !( --count ) ) { + defer.resolveWith( elements, [ elements ] ); + } + }; + + if ( typeof type !== "string" ) { + obj = type; + type = undefined; + } + type = type || "fx"; + + while ( i-- ) { + tmp = dataPriv.get( elements[ i ], type + "queueHooks" ); + if ( tmp && tmp.empty ) { + count++; + tmp.empty.add( resolve ); + } + } + resolve(); + return defer.promise( obj ); + } +} ); +var pnum = ( /[+-]?(?:\d*\.|)\d+(?:[eE][+-]?\d+|)/ ).source; + +var rcssNum = new RegExp( "^(?:([+-])=|)(" + pnum + ")([a-z%]*)$", "i" ); + + +var cssExpand = [ "Top", "Right", "Bottom", "Left" ]; + +var documentElement = document.documentElement; + + + + var isAttached = function( elem ) { + return jQuery.contains( elem.ownerDocument, elem ); + }, + composed = { composed: true }; + + // Support: IE 9 - 11+, Edge 12 - 18+, iOS 10.0 - 10.2 only + // Check attachment across shadow DOM boundaries when possible (gh-3504) + // Support: iOS 10.0-10.2 only + // Early iOS 10 versions support `attachShadow` but not `getRootNode`, + // leading to errors. We need to check for `getRootNode`. + if ( documentElement.getRootNode ) { + isAttached = function( elem ) { + return jQuery.contains( elem.ownerDocument, elem ) || + elem.getRootNode( composed ) === elem.ownerDocument; + }; + } +var isHiddenWithinTree = function( elem, el ) { + + // isHiddenWithinTree might be called from jQuery#filter function; + // in that case, element will be second argument + elem = el || elem; + + // Inline style trumps all + return elem.style.display === "none" || + elem.style.display === "" && + + // Otherwise, check computed style + // Support: Firefox <=43 - 45 + // Disconnected elements can have computed display: none, so first confirm that elem is + // in the document. + isAttached( elem ) && + + jQuery.css( elem, "display" ) === "none"; + }; + + + +function adjustCSS( elem, prop, valueParts, tween ) { + var adjusted, scale, + maxIterations = 20, + currentValue = tween ? + function() { + return tween.cur(); + } : + function() { + return jQuery.css( elem, prop, "" ); + }, + initial = currentValue(), + unit = valueParts && valueParts[ 3 ] || ( jQuery.cssNumber[ prop ] ? "" : "px" ), + + // Starting value computation is required for potential unit mismatches + initialInUnit = elem.nodeType && + ( jQuery.cssNumber[ prop ] || unit !== "px" && +initial ) && + rcssNum.exec( jQuery.css( elem, prop ) ); + + if ( initialInUnit && initialInUnit[ 3 ] !== unit ) { + + // Support: Firefox <=54 + // Halve the iteration target value to prevent interference from CSS upper bounds (gh-2144) + initial = initial / 2; + + // Trust units reported by jQuery.css + unit = unit || initialInUnit[ 3 ]; + + // Iteratively approximate from a nonzero starting point + initialInUnit = +initial || 1; + + while ( maxIterations-- ) { + + // Evaluate and update our best guess (doubling guesses that zero out). + // Finish if the scale equals or crosses 1 (making the old*new product non-positive). + jQuery.style( elem, prop, initialInUnit + unit ); + if ( ( 1 - scale ) * ( 1 - ( scale = currentValue() / initial || 0.5 ) ) <= 0 ) { + maxIterations = 0; + } + initialInUnit = initialInUnit / scale; + + } + + initialInUnit = initialInUnit * 2; + jQuery.style( elem, prop, initialInUnit + unit ); + + // Make sure we update the tween properties later on + valueParts = valueParts || []; + } + + if ( valueParts ) { + initialInUnit = +initialInUnit || +initial || 0; + + // Apply relative offset (+=/-=) if specified + adjusted = valueParts[ 1 ] ? + initialInUnit + ( valueParts[ 1 ] + 1 ) * valueParts[ 2 ] : + +valueParts[ 2 ]; + if ( tween ) { + tween.unit = unit; + tween.start = initialInUnit; + tween.end = adjusted; + } + } + return adjusted; +} + + +var defaultDisplayMap = {}; + +function getDefaultDisplay( elem ) { + var temp, + doc = elem.ownerDocument, + nodeName = elem.nodeName, + display = defaultDisplayMap[ nodeName ]; + + if ( display ) { + return display; + } + + temp = doc.body.appendChild( doc.createElement( nodeName ) ); + display = jQuery.css( temp, "display" ); + + temp.parentNode.removeChild( temp ); + + if ( display === "none" ) { + display = "block"; + } + defaultDisplayMap[ nodeName ] = display; + + return display; +} + +function showHide( elements, show ) { + var display, elem, + values = [], + index = 0, + length = elements.length; + + // Determine new display value for elements that need to change + for ( ; index < length; index++ ) { + elem = elements[ index ]; + if ( !elem.style ) { + continue; + } + + display = elem.style.display; + if ( show ) { + + // Since we force visibility upon cascade-hidden elements, an immediate (and slow) + // check is required in this first loop unless we have a nonempty display value (either + // inline or about-to-be-restored) + if ( display === "none" ) { + values[ index ] = dataPriv.get( elem, "display" ) || null; + if ( !values[ index ] ) { + elem.style.display = ""; + } + } + if ( elem.style.display === "" && isHiddenWithinTree( elem ) ) { + values[ index ] = getDefaultDisplay( elem ); + } + } else { + if ( display !== "none" ) { + values[ index ] = "none"; + + // Remember what we're overwriting + dataPriv.set( elem, "display", display ); + } + } + } + + // Set the display of the elements in a second loop to avoid constant reflow + for ( index = 0; index < length; index++ ) { + if ( values[ index ] != null ) { + elements[ index ].style.display = values[ index ]; + } + } + + return elements; +} + +jQuery.fn.extend( { + show: function() { + return showHide( this, true ); + }, + hide: function() { + return showHide( this ); + }, + toggle: function( state ) { + if ( typeof state === "boolean" ) { + return state ? this.show() : this.hide(); + } + + return this.each( function() { + if ( isHiddenWithinTree( this ) ) { + jQuery( this ).show(); + } else { + jQuery( this ).hide(); + } + } ); + } +} ); +var rcheckableType = ( /^(?:checkbox|radio)$/i ); + +var rtagName = ( /<([a-z][^\/\0>\x20\t\r\n\f]*)/i ); + +var rscriptType = ( /^$|^module$|\/(?:java|ecma)script/i ); + + + +( function() { + var fragment = document.createDocumentFragment(), + div = fragment.appendChild( document.createElement( "div" ) ), + input = document.createElement( "input" ); + + // Support: Android 4.0 - 4.3 only + // Check state lost if the name is set (#11217) + // Support: Windows Web Apps (WWA) + // `name` and `type` must use .setAttribute for WWA (#14901) + input.setAttribute( "type", "radio" ); + input.setAttribute( "checked", "checked" ); + input.setAttribute( "name", "t" ); + + div.appendChild( input ); + + // Support: Android <=4.1 only + // Older WebKit doesn't clone checked state correctly in fragments + support.checkClone = div.cloneNode( true ).cloneNode( true ).lastChild.checked; + + // Support: IE <=11 only + // Make sure textarea (and checkbox) defaultValue is properly cloned + div.innerHTML = ""; + support.noCloneChecked = !!div.cloneNode( true ).lastChild.defaultValue; + + // Support: IE <=9 only + // IE <=9 replaces "; + support.option = !!div.lastChild; +} )(); + + +// We have to close these tags to support XHTML (#13200) +var wrapMap = { + + // XHTML parsers do not magically insert elements in the + // same way that tag soup parsers do. So we cannot shorten + // this by omitting or other required elements. + thead: [ 1, "", "
" ], + col: [ 2, "", "
" ], + tr: [ 2, "", "
" ], + td: [ 3, "", "
" ], + + _default: [ 0, "", "" ] +}; + +wrapMap.tbody = wrapMap.tfoot = wrapMap.colgroup = wrapMap.caption = wrapMap.thead; +wrapMap.th = wrapMap.td; + +// Support: IE <=9 only +if ( !support.option ) { + wrapMap.optgroup = wrapMap.option = [ 1, "" ]; +} + + +function getAll( context, tag ) { + + // Support: IE <=9 - 11 only + // Use typeof to avoid zero-argument method invocation on host objects (#15151) + var ret; + + if ( typeof context.getElementsByTagName !== "undefined" ) { + ret = context.getElementsByTagName( tag || "*" ); + + } else if ( typeof context.querySelectorAll !== "undefined" ) { + ret = context.querySelectorAll( tag || "*" ); + + } else { + ret = []; + } + + if ( tag === undefined || tag && nodeName( context, tag ) ) { + return jQuery.merge( [ context ], ret ); + } + + return ret; +} + + +// Mark scripts as having already been evaluated +function setGlobalEval( elems, refElements ) { + var i = 0, + l = elems.length; + + for ( ; i < l; i++ ) { + dataPriv.set( + elems[ i ], + "globalEval", + !refElements || dataPriv.get( refElements[ i ], "globalEval" ) + ); + } +} + + +var rhtml = /<|&#?\w+;/; + +function buildFragment( elems, context, scripts, selection, ignored ) { + var elem, tmp, tag, wrap, attached, j, + fragment = context.createDocumentFragment(), + nodes = [], + i = 0, + l = elems.length; + + for ( ; i < l; i++ ) { + elem = elems[ i ]; + + if ( elem || elem === 0 ) { + + // Add nodes directly + if ( toType( elem ) === "object" ) { + + // Support: Android <=4.0 only, PhantomJS 1 only + // push.apply(_, arraylike) throws on ancient WebKit + jQuery.merge( nodes, elem.nodeType ? [ elem ] : elem ); + + // Convert non-html into a text node + } else if ( !rhtml.test( elem ) ) { + nodes.push( context.createTextNode( elem ) ); + + // Convert html into DOM nodes + } else { + tmp = tmp || fragment.appendChild( context.createElement( "div" ) ); + + // Deserialize a standard representation + tag = ( rtagName.exec( elem ) || [ "", "" ] )[ 1 ].toLowerCase(); + wrap = wrapMap[ tag ] || wrapMap._default; + tmp.innerHTML = wrap[ 1 ] + jQuery.htmlPrefilter( elem ) + wrap[ 2 ]; + + // Descend through wrappers to the right content + j = wrap[ 0 ]; + while ( j-- ) { + tmp = tmp.lastChild; + } + + // Support: Android <=4.0 only, PhantomJS 1 only + // push.apply(_, arraylike) throws on ancient WebKit + jQuery.merge( nodes, tmp.childNodes ); + + // Remember the top-level container + tmp = fragment.firstChild; + + // Ensure the created nodes are orphaned (#12392) + tmp.textContent = ""; + } + } + } + + // Remove wrapper from fragment + fragment.textContent = ""; + + i = 0; + while ( ( elem = nodes[ i++ ] ) ) { + + // Skip elements already in the context collection (trac-4087) + if ( selection && jQuery.inArray( elem, selection ) > -1 ) { + if ( ignored ) { + ignored.push( elem ); + } + continue; + } + + attached = isAttached( elem ); + + // Append to fragment + tmp = getAll( fragment.appendChild( elem ), "script" ); + + // Preserve script evaluation history + if ( attached ) { + setGlobalEval( tmp ); + } + + // Capture executables + if ( scripts ) { + j = 0; + while ( ( elem = tmp[ j++ ] ) ) { + if ( rscriptType.test( elem.type || "" ) ) { + scripts.push( elem ); + } + } + } + } + + return fragment; +} + + +var + rkeyEvent = /^key/, + rmouseEvent = /^(?:mouse|pointer|contextmenu|drag|drop)|click/, + rtypenamespace = /^([^.]*)(?:\.(.+)|)/; + +function returnTrue() { + return true; +} + +function returnFalse() { + return false; +} + +// Support: IE <=9 - 11+ +// focus() and blur() are asynchronous, except when they are no-op. +// So expect focus to be synchronous when the element is already active, +// and blur to be synchronous when the element is not already active. +// (focus and blur are always synchronous in other supported browsers, +// this just defines when we can count on it). +function expectSync( elem, type ) { + return ( elem === safeActiveElement() ) === ( type === "focus" ); +} + +// Support: IE <=9 only +// Accessing document.activeElement can throw unexpectedly +// https://bugs.jquery.com/ticket/13393 +function safeActiveElement() { + try { + return document.activeElement; + } catch ( err ) { } +} + +function on( elem, types, selector, data, fn, one ) { + var origFn, type; + + // Types can be a map of types/handlers + if ( typeof types === "object" ) { + + // ( types-Object, selector, data ) + if ( typeof selector !== "string" ) { + + // ( types-Object, data ) + data = data || selector; + selector = undefined; + } + for ( type in types ) { + on( elem, type, selector, data, types[ type ], one ); + } + return elem; + } + + if ( data == null && fn == null ) { + + // ( types, fn ) + fn = selector; + data = selector = undefined; + } else if ( fn == null ) { + if ( typeof selector === "string" ) { + + // ( types, selector, fn ) + fn = data; + data = undefined; + } else { + + // ( types, data, fn ) + fn = data; + data = selector; + selector = undefined; + } + } + if ( fn === false ) { + fn = returnFalse; + } else if ( !fn ) { + return elem; + } + + if ( one === 1 ) { + origFn = fn; + fn = function( event ) { + + // Can use an empty set, since event contains the info + jQuery().off( event ); + return origFn.apply( this, arguments ); + }; + + // Use same guid so caller can remove using origFn + fn.guid = origFn.guid || ( origFn.guid = jQuery.guid++ ); + } + return elem.each( function() { + jQuery.event.add( this, types, fn, data, selector ); + } ); +} + +/* + * Helper functions for managing events -- not part of the public interface. + * Props to Dean Edwards' addEvent library for many of the ideas. + */ +jQuery.event = { + + global: {}, + + add: function( elem, types, handler, data, selector ) { + + var handleObjIn, eventHandle, tmp, + events, t, handleObj, + special, handlers, type, namespaces, origType, + elemData = dataPriv.get( elem ); + + // Only attach events to objects that accept data + if ( !acceptData( elem ) ) { + return; + } + + // Caller can pass in an object of custom data in lieu of the handler + if ( handler.handler ) { + handleObjIn = handler; + handler = handleObjIn.handler; + selector = handleObjIn.selector; + } + + // Ensure that invalid selectors throw exceptions at attach time + // Evaluate against documentElement in case elem is a non-element node (e.g., document) + if ( selector ) { + jQuery.find.matchesSelector( documentElement, selector ); + } + + // Make sure that the handler has a unique ID, used to find/remove it later + if ( !handler.guid ) { + handler.guid = jQuery.guid++; + } + + // Init the element's event structure and main handler, if this is the first + if ( !( events = elemData.events ) ) { + events = elemData.events = Object.create( null ); + } + if ( !( eventHandle = elemData.handle ) ) { + eventHandle = elemData.handle = function( e ) { + + // Discard the second event of a jQuery.event.trigger() and + // when an event is called after a page has unloaded + return typeof jQuery !== "undefined" && jQuery.event.triggered !== e.type ? + jQuery.event.dispatch.apply( elem, arguments ) : undefined; + }; + } + + // Handle multiple events separated by a space + types = ( types || "" ).match( rnothtmlwhite ) || [ "" ]; + t = types.length; + while ( t-- ) { + tmp = rtypenamespace.exec( types[ t ] ) || []; + type = origType = tmp[ 1 ]; + namespaces = ( tmp[ 2 ] || "" ).split( "." ).sort(); + + // There *must* be a type, no attaching namespace-only handlers + if ( !type ) { + continue; + } + + // If event changes its type, use the special event handlers for the changed type + special = jQuery.event.special[ type ] || {}; + + // If selector defined, determine special event api type, otherwise given type + type = ( selector ? special.delegateType : special.bindType ) || type; + + // Update special based on newly reset type + special = jQuery.event.special[ type ] || {}; + + // handleObj is passed to all event handlers + handleObj = jQuery.extend( { + type: type, + origType: origType, + data: data, + handler: handler, + guid: handler.guid, + selector: selector, + needsContext: selector && jQuery.expr.match.needsContext.test( selector ), + namespace: namespaces.join( "." ) + }, handleObjIn ); + + // Init the event handler queue if we're the first + if ( !( handlers = events[ type ] ) ) { + handlers = events[ type ] = []; + handlers.delegateCount = 0; + + // Only use addEventListener if the special events handler returns false + if ( !special.setup || + special.setup.call( elem, data, namespaces, eventHandle ) === false ) { + + if ( elem.addEventListener ) { + elem.addEventListener( type, eventHandle ); + } + } + } + + if ( special.add ) { + special.add.call( elem, handleObj ); + + if ( !handleObj.handler.guid ) { + handleObj.handler.guid = handler.guid; + } + } + + // Add to the element's handler list, delegates in front + if ( selector ) { + handlers.splice( handlers.delegateCount++, 0, handleObj ); + } else { + handlers.push( handleObj ); + } + + // Keep track of which events have ever been used, for event optimization + jQuery.event.global[ type ] = true; + } + + }, + + // Detach an event or set of events from an element + remove: function( elem, types, handler, selector, mappedTypes ) { + + var j, origCount, tmp, + events, t, handleObj, + special, handlers, type, namespaces, origType, + elemData = dataPriv.hasData( elem ) && dataPriv.get( elem ); + + if ( !elemData || !( events = elemData.events ) ) { + return; + } + + // Once for each type.namespace in types; type may be omitted + types = ( types || "" ).match( rnothtmlwhite ) || [ "" ]; + t = types.length; + while ( t-- ) { + tmp = rtypenamespace.exec( types[ t ] ) || []; + type = origType = tmp[ 1 ]; + namespaces = ( tmp[ 2 ] || "" ).split( "." ).sort(); + + // Unbind all events (on this namespace, if provided) for the element + if ( !type ) { + for ( type in events ) { + jQuery.event.remove( elem, type + types[ t ], handler, selector, true ); + } + continue; + } + + special = jQuery.event.special[ type ] || {}; + type = ( selector ? special.delegateType : special.bindType ) || type; + handlers = events[ type ] || []; + tmp = tmp[ 2 ] && + new RegExp( "(^|\\.)" + namespaces.join( "\\.(?:.*\\.|)" ) + "(\\.|$)" ); + + // Remove matching events + origCount = j = handlers.length; + while ( j-- ) { + handleObj = handlers[ j ]; + + if ( ( mappedTypes || origType === handleObj.origType ) && + ( !handler || handler.guid === handleObj.guid ) && + ( !tmp || tmp.test( handleObj.namespace ) ) && + ( !selector || selector === handleObj.selector || + selector === "**" && handleObj.selector ) ) { + handlers.splice( j, 1 ); + + if ( handleObj.selector ) { + handlers.delegateCount--; + } + if ( special.remove ) { + special.remove.call( elem, handleObj ); + } + } + } + + // Remove generic event handler if we removed something and no more handlers exist + // (avoids potential for endless recursion during removal of special event handlers) + if ( origCount && !handlers.length ) { + if ( !special.teardown || + special.teardown.call( elem, namespaces, elemData.handle ) === false ) { + + jQuery.removeEvent( elem, type, elemData.handle ); + } + + delete events[ type ]; + } + } + + // Remove data and the expando if it's no longer used + if ( jQuery.isEmptyObject( events ) ) { + dataPriv.remove( elem, "handle events" ); + } + }, + + dispatch: function( nativeEvent ) { + + var i, j, ret, matched, handleObj, handlerQueue, + args = new Array( arguments.length ), + + // Make a writable jQuery.Event from the native event object + event = jQuery.event.fix( nativeEvent ), + + handlers = ( + dataPriv.get( this, "events" ) || Object.create( null ) + )[ event.type ] || [], + special = jQuery.event.special[ event.type ] || {}; + + // Use the fix-ed jQuery.Event rather than the (read-only) native event + args[ 0 ] = event; + + for ( i = 1; i < arguments.length; i++ ) { + args[ i ] = arguments[ i ]; + } + + event.delegateTarget = this; + + // Call the preDispatch hook for the mapped type, and let it bail if desired + if ( special.preDispatch && special.preDispatch.call( this, event ) === false ) { + return; + } + + // Determine handlers + handlerQueue = jQuery.event.handlers.call( this, event, handlers ); + + // Run delegates first; they may want to stop propagation beneath us + i = 0; + while ( ( matched = handlerQueue[ i++ ] ) && !event.isPropagationStopped() ) { + event.currentTarget = matched.elem; + + j = 0; + while ( ( handleObj = matched.handlers[ j++ ] ) && + !event.isImmediatePropagationStopped() ) { + + // If the event is namespaced, then each handler is only invoked if it is + // specially universal or its namespaces are a superset of the event's. + if ( !event.rnamespace || handleObj.namespace === false || + event.rnamespace.test( handleObj.namespace ) ) { + + event.handleObj = handleObj; + event.data = handleObj.data; + + ret = ( ( jQuery.event.special[ handleObj.origType ] || {} ).handle || + handleObj.handler ).apply( matched.elem, args ); + + if ( ret !== undefined ) { + if ( ( event.result = ret ) === false ) { + event.preventDefault(); + event.stopPropagation(); + } + } + } + } + } + + // Call the postDispatch hook for the mapped type + if ( special.postDispatch ) { + special.postDispatch.call( this, event ); + } + + return event.result; + }, + + handlers: function( event, handlers ) { + var i, handleObj, sel, matchedHandlers, matchedSelectors, + handlerQueue = [], + delegateCount = handlers.delegateCount, + cur = event.target; + + // Find delegate handlers + if ( delegateCount && + + // Support: IE <=9 + // Black-hole SVG instance trees (trac-13180) + cur.nodeType && + + // Support: Firefox <=42 + // Suppress spec-violating clicks indicating a non-primary pointer button (trac-3861) + // https://www.w3.org/TR/DOM-Level-3-Events/#event-type-click + // Support: IE 11 only + // ...but not arrow key "clicks" of radio inputs, which can have `button` -1 (gh-2343) + !( event.type === "click" && event.button >= 1 ) ) { + + for ( ; cur !== this; cur = cur.parentNode || this ) { + + // Don't check non-elements (#13208) + // Don't process clicks on disabled elements (#6911, #8165, #11382, #11764) + if ( cur.nodeType === 1 && !( event.type === "click" && cur.disabled === true ) ) { + matchedHandlers = []; + matchedSelectors = {}; + for ( i = 0; i < delegateCount; i++ ) { + handleObj = handlers[ i ]; + + // Don't conflict with Object.prototype properties (#13203) + sel = handleObj.selector + " "; + + if ( matchedSelectors[ sel ] === undefined ) { + matchedSelectors[ sel ] = handleObj.needsContext ? + jQuery( sel, this ).index( cur ) > -1 : + jQuery.find( sel, this, null, [ cur ] ).length; + } + if ( matchedSelectors[ sel ] ) { + matchedHandlers.push( handleObj ); + } + } + if ( matchedHandlers.length ) { + handlerQueue.push( { elem: cur, handlers: matchedHandlers } ); + } + } + } + } + + // Add the remaining (directly-bound) handlers + cur = this; + if ( delegateCount < handlers.length ) { + handlerQueue.push( { elem: cur, handlers: handlers.slice( delegateCount ) } ); + } + + return handlerQueue; + }, + + addProp: function( name, hook ) { + Object.defineProperty( jQuery.Event.prototype, name, { + enumerable: true, + configurable: true, + + get: isFunction( hook ) ? + function() { + if ( this.originalEvent ) { + return hook( this.originalEvent ); + } + } : + function() { + if ( this.originalEvent ) { + return this.originalEvent[ name ]; + } + }, + + set: function( value ) { + Object.defineProperty( this, name, { + enumerable: true, + configurable: true, + writable: true, + value: value + } ); + } + } ); + }, + + fix: function( originalEvent ) { + return originalEvent[ jQuery.expando ] ? + originalEvent : + new jQuery.Event( originalEvent ); + }, + + special: { + load: { + + // Prevent triggered image.load events from bubbling to window.load + noBubble: true + }, + click: { + + // Utilize native event to ensure correct state for checkable inputs + setup: function( data ) { + + // For mutual compressibility with _default, replace `this` access with a local var. + // `|| data` is dead code meant only to preserve the variable through minification. + var el = this || data; + + // Claim the first handler + if ( rcheckableType.test( el.type ) && + el.click && nodeName( el, "input" ) ) { + + // dataPriv.set( el, "click", ... ) + leverageNative( el, "click", returnTrue ); + } + + // Return false to allow normal processing in the caller + return false; + }, + trigger: function( data ) { + + // For mutual compressibility with _default, replace `this` access with a local var. + // `|| data` is dead code meant only to preserve the variable through minification. + var el = this || data; + + // Force setup before triggering a click + if ( rcheckableType.test( el.type ) && + el.click && nodeName( el, "input" ) ) { + + leverageNative( el, "click" ); + } + + // Return non-false to allow normal event-path propagation + return true; + }, + + // For cross-browser consistency, suppress native .click() on links + // Also prevent it if we're currently inside a leveraged native-event stack + _default: function( event ) { + var target = event.target; + return rcheckableType.test( target.type ) && + target.click && nodeName( target, "input" ) && + dataPriv.get( target, "click" ) || + nodeName( target, "a" ); + } + }, + + beforeunload: { + postDispatch: function( event ) { + + // Support: Firefox 20+ + // Firefox doesn't alert if the returnValue field is not set. + if ( event.result !== undefined && event.originalEvent ) { + event.originalEvent.returnValue = event.result; + } + } + } + } +}; + +// Ensure the presence of an event listener that handles manually-triggered +// synthetic events by interrupting progress until reinvoked in response to +// *native* events that it fires directly, ensuring that state changes have +// already occurred before other listeners are invoked. +function leverageNative( el, type, expectSync ) { + + // Missing expectSync indicates a trigger call, which must force setup through jQuery.event.add + if ( !expectSync ) { + if ( dataPriv.get( el, type ) === undefined ) { + jQuery.event.add( el, type, returnTrue ); + } + return; + } + + // Register the controller as a special universal handler for all event namespaces + dataPriv.set( el, type, false ); + jQuery.event.add( el, type, { + namespace: false, + handler: function( event ) { + var notAsync, result, + saved = dataPriv.get( this, type ); + + if ( ( event.isTrigger & 1 ) && this[ type ] ) { + + // Interrupt processing of the outer synthetic .trigger()ed event + // Saved data should be false in such cases, but might be a leftover capture object + // from an async native handler (gh-4350) + if ( !saved.length ) { + + // Store arguments for use when handling the inner native event + // There will always be at least one argument (an event object), so this array + // will not be confused with a leftover capture object. + saved = slice.call( arguments ); + dataPriv.set( this, type, saved ); + + // Trigger the native event and capture its result + // Support: IE <=9 - 11+ + // focus() and blur() are asynchronous + notAsync = expectSync( this, type ); + this[ type ](); + result = dataPriv.get( this, type ); + if ( saved !== result || notAsync ) { + dataPriv.set( this, type, false ); + } else { + result = {}; + } + if ( saved !== result ) { + + // Cancel the outer synthetic event + event.stopImmediatePropagation(); + event.preventDefault(); + return result.value; + } + + // If this is an inner synthetic event for an event with a bubbling surrogate + // (focus or blur), assume that the surrogate already propagated from triggering the + // native event and prevent that from happening again here. + // This technically gets the ordering wrong w.r.t. to `.trigger()` (in which the + // bubbling surrogate propagates *after* the non-bubbling base), but that seems + // less bad than duplication. + } else if ( ( jQuery.event.special[ type ] || {} ).delegateType ) { + event.stopPropagation(); + } + + // If this is a native event triggered above, everything is now in order + // Fire an inner synthetic event with the original arguments + } else if ( saved.length ) { + + // ...and capture the result + dataPriv.set( this, type, { + value: jQuery.event.trigger( + + // Support: IE <=9 - 11+ + // Extend with the prototype to reset the above stopImmediatePropagation() + jQuery.extend( saved[ 0 ], jQuery.Event.prototype ), + saved.slice( 1 ), + this + ) + } ); + + // Abort handling of the native event + event.stopImmediatePropagation(); + } + } + } ); +} + +jQuery.removeEvent = function( elem, type, handle ) { + + // This "if" is needed for plain objects + if ( elem.removeEventListener ) { + elem.removeEventListener( type, handle ); + } +}; + +jQuery.Event = function( src, props ) { + + // Allow instantiation without the 'new' keyword + if ( !( this instanceof jQuery.Event ) ) { + return new jQuery.Event( src, props ); + } + + // Event object + if ( src && src.type ) { + this.originalEvent = src; + this.type = src.type; + + // Events bubbling up the document may have been marked as prevented + // by a handler lower down the tree; reflect the correct value. + this.isDefaultPrevented = src.defaultPrevented || + src.defaultPrevented === undefined && + + // Support: Android <=2.3 only + src.returnValue === false ? + returnTrue : + returnFalse; + + // Create target properties + // Support: Safari <=6 - 7 only + // Target should not be a text node (#504, #13143) + this.target = ( src.target && src.target.nodeType === 3 ) ? + src.target.parentNode : + src.target; + + this.currentTarget = src.currentTarget; + this.relatedTarget = src.relatedTarget; + + // Event type + } else { + this.type = src; + } + + // Put explicitly provided properties onto the event object + if ( props ) { + jQuery.extend( this, props ); + } + + // Create a timestamp if incoming event doesn't have one + this.timeStamp = src && src.timeStamp || Date.now(); + + // Mark it as fixed + this[ jQuery.expando ] = true; +}; + +// jQuery.Event is based on DOM3 Events as specified by the ECMAScript Language Binding +// https://www.w3.org/TR/2003/WD-DOM-Level-3-Events-20030331/ecma-script-binding.html +jQuery.Event.prototype = { + constructor: jQuery.Event, + isDefaultPrevented: returnFalse, + isPropagationStopped: returnFalse, + isImmediatePropagationStopped: returnFalse, + isSimulated: false, + + preventDefault: function() { + var e = this.originalEvent; + + this.isDefaultPrevented = returnTrue; + + if ( e && !this.isSimulated ) { + e.preventDefault(); + } + }, + stopPropagation: function() { + var e = this.originalEvent; + + this.isPropagationStopped = returnTrue; + + if ( e && !this.isSimulated ) { + e.stopPropagation(); + } + }, + stopImmediatePropagation: function() { + var e = this.originalEvent; + + this.isImmediatePropagationStopped = returnTrue; + + if ( e && !this.isSimulated ) { + e.stopImmediatePropagation(); + } + + this.stopPropagation(); + } +}; + +// Includes all common event props including KeyEvent and MouseEvent specific props +jQuery.each( { + altKey: true, + bubbles: true, + cancelable: true, + changedTouches: true, + ctrlKey: true, + detail: true, + eventPhase: true, + metaKey: true, + pageX: true, + pageY: true, + shiftKey: true, + view: true, + "char": true, + code: true, + charCode: true, + key: true, + keyCode: true, + button: true, + buttons: true, + clientX: true, + clientY: true, + offsetX: true, + offsetY: true, + pointerId: true, + pointerType: true, + screenX: true, + screenY: true, + targetTouches: true, + toElement: true, + touches: true, + + which: function( event ) { + var button = event.button; + + // Add which for key events + if ( event.which == null && rkeyEvent.test( event.type ) ) { + return event.charCode != null ? event.charCode : event.keyCode; + } + + // Add which for click: 1 === left; 2 === middle; 3 === right + if ( !event.which && button !== undefined && rmouseEvent.test( event.type ) ) { + if ( button & 1 ) { + return 1; + } + + if ( button & 2 ) { + return 3; + } + + if ( button & 4 ) { + return 2; + } + + return 0; + } + + return event.which; + } +}, jQuery.event.addProp ); + +jQuery.each( { focus: "focusin", blur: "focusout" }, function( type, delegateType ) { + jQuery.event.special[ type ] = { + + // Utilize native event if possible so blur/focus sequence is correct + setup: function() { + + // Claim the first handler + // dataPriv.set( this, "focus", ... ) + // dataPriv.set( this, "blur", ... ) + leverageNative( this, type, expectSync ); + + // Return false to allow normal processing in the caller + return false; + }, + trigger: function() { + + // Force setup before trigger + leverageNative( this, type ); + + // Return non-false to allow normal event-path propagation + return true; + }, + + delegateType: delegateType + }; +} ); + +// Create mouseenter/leave events using mouseover/out and event-time checks +// so that event delegation works in jQuery. +// Do the same for pointerenter/pointerleave and pointerover/pointerout +// +// Support: Safari 7 only +// Safari sends mouseenter too often; see: +// https://bugs.chromium.org/p/chromium/issues/detail?id=470258 +// for the description of the bug (it existed in older Chrome versions as well). +jQuery.each( { + mouseenter: "mouseover", + mouseleave: "mouseout", + pointerenter: "pointerover", + pointerleave: "pointerout" +}, function( orig, fix ) { + jQuery.event.special[ orig ] = { + delegateType: fix, + bindType: fix, + + handle: function( event ) { + var ret, + target = this, + related = event.relatedTarget, + handleObj = event.handleObj; + + // For mouseenter/leave call the handler if related is outside the target. + // NB: No relatedTarget if the mouse left/entered the browser window + if ( !related || ( related !== target && !jQuery.contains( target, related ) ) ) { + event.type = handleObj.origType; + ret = handleObj.handler.apply( this, arguments ); + event.type = fix; + } + return ret; + } + }; +} ); + +jQuery.fn.extend( { + + on: function( types, selector, data, fn ) { + return on( this, types, selector, data, fn ); + }, + one: function( types, selector, data, fn ) { + return on( this, types, selector, data, fn, 1 ); + }, + off: function( types, selector, fn ) { + var handleObj, type; + if ( types && types.preventDefault && types.handleObj ) { + + // ( event ) dispatched jQuery.Event + handleObj = types.handleObj; + jQuery( types.delegateTarget ).off( + handleObj.namespace ? + handleObj.origType + "." + handleObj.namespace : + handleObj.origType, + handleObj.selector, + handleObj.handler + ); + return this; + } + if ( typeof types === "object" ) { + + // ( types-object [, selector] ) + for ( type in types ) { + this.off( type, selector, types[ type ] ); + } + return this; + } + if ( selector === false || typeof selector === "function" ) { + + // ( types [, fn] ) + fn = selector; + selector = undefined; + } + if ( fn === false ) { + fn = returnFalse; + } + return this.each( function() { + jQuery.event.remove( this, types, fn, selector ); + } ); + } +} ); + + +var + + // Support: IE <=10 - 11, Edge 12 - 13 only + // In IE/Edge using regex groups here causes severe slowdowns. + // See https://connect.microsoft.com/IE/feedback/details/1736512/ + rnoInnerhtml = /\s*$/g; + +// Prefer a tbody over its parent table for containing new rows +function manipulationTarget( elem, content ) { + if ( nodeName( elem, "table" ) && + nodeName( content.nodeType !== 11 ? content : content.firstChild, "tr" ) ) { + + return jQuery( elem ).children( "tbody" )[ 0 ] || elem; + } + + return elem; +} + +// Replace/restore the type attribute of script elements for safe DOM manipulation +function disableScript( elem ) { + elem.type = ( elem.getAttribute( "type" ) !== null ) + "/" + elem.type; + return elem; +} +function restoreScript( elem ) { + if ( ( elem.type || "" ).slice( 0, 5 ) === "true/" ) { + elem.type = elem.type.slice( 5 ); + } else { + elem.removeAttribute( "type" ); + } + + return elem; +} + +function cloneCopyEvent( src, dest ) { + var i, l, type, pdataOld, udataOld, udataCur, events; + + if ( dest.nodeType !== 1 ) { + return; + } + + // 1. Copy private data: events, handlers, etc. + if ( dataPriv.hasData( src ) ) { + pdataOld = dataPriv.get( src ); + events = pdataOld.events; + + if ( events ) { + dataPriv.remove( dest, "handle events" ); + + for ( type in events ) { + for ( i = 0, l = events[ type ].length; i < l; i++ ) { + jQuery.event.add( dest, type, events[ type ][ i ] ); + } + } + } + } + + // 2. Copy user data + if ( dataUser.hasData( src ) ) { + udataOld = dataUser.access( src ); + udataCur = jQuery.extend( {}, udataOld ); + + dataUser.set( dest, udataCur ); + } +} + +// Fix IE bugs, see support tests +function fixInput( src, dest ) { + var nodeName = dest.nodeName.toLowerCase(); + + // Fails to persist the checked state of a cloned checkbox or radio button. + if ( nodeName === "input" && rcheckableType.test( src.type ) ) { + dest.checked = src.checked; + + // Fails to return the selected option to the default selected state when cloning options + } else if ( nodeName === "input" || nodeName === "textarea" ) { + dest.defaultValue = src.defaultValue; + } +} + +function domManip( collection, args, callback, ignored ) { + + // Flatten any nested arrays + args = flat( args ); + + var fragment, first, scripts, hasScripts, node, doc, + i = 0, + l = collection.length, + iNoClone = l - 1, + value = args[ 0 ], + valueIsFunction = isFunction( value ); + + // We can't cloneNode fragments that contain checked, in WebKit + if ( valueIsFunction || + ( l > 1 && typeof value === "string" && + !support.checkClone && rchecked.test( value ) ) ) { + return collection.each( function( index ) { + var self = collection.eq( index ); + if ( valueIsFunction ) { + args[ 0 ] = value.call( this, index, self.html() ); + } + domManip( self, args, callback, ignored ); + } ); + } + + if ( l ) { + fragment = buildFragment( args, collection[ 0 ].ownerDocument, false, collection, ignored ); + first = fragment.firstChild; + + if ( fragment.childNodes.length === 1 ) { + fragment = first; + } + + // Require either new content or an interest in ignored elements to invoke the callback + if ( first || ignored ) { + scripts = jQuery.map( getAll( fragment, "script" ), disableScript ); + hasScripts = scripts.length; + + // Use the original fragment for the last item + // instead of the first because it can end up + // being emptied incorrectly in certain situations (#8070). + for ( ; i < l; i++ ) { + node = fragment; + + if ( i !== iNoClone ) { + node = jQuery.clone( node, true, true ); + + // Keep references to cloned scripts for later restoration + if ( hasScripts ) { + + // Support: Android <=4.0 only, PhantomJS 1 only + // push.apply(_, arraylike) throws on ancient WebKit + jQuery.merge( scripts, getAll( node, "script" ) ); + } + } + + callback.call( collection[ i ], node, i ); + } + + if ( hasScripts ) { + doc = scripts[ scripts.length - 1 ].ownerDocument; + + // Reenable scripts + jQuery.map( scripts, restoreScript ); + + // Evaluate executable scripts on first document insertion + for ( i = 0; i < hasScripts; i++ ) { + node = scripts[ i ]; + if ( rscriptType.test( node.type || "" ) && + !dataPriv.access( node, "globalEval" ) && + jQuery.contains( doc, node ) ) { + + if ( node.src && ( node.type || "" ).toLowerCase() !== "module" ) { + + // Optional AJAX dependency, but won't run scripts if not present + if ( jQuery._evalUrl && !node.noModule ) { + jQuery._evalUrl( node.src, { + nonce: node.nonce || node.getAttribute( "nonce" ) + }, doc ); + } + } else { + DOMEval( node.textContent.replace( rcleanScript, "" ), node, doc ); + } + } + } + } + } + } + + return collection; +} + +function remove( elem, selector, keepData ) { + var node, + nodes = selector ? jQuery.filter( selector, elem ) : elem, + i = 0; + + for ( ; ( node = nodes[ i ] ) != null; i++ ) { + if ( !keepData && node.nodeType === 1 ) { + jQuery.cleanData( getAll( node ) ); + } + + if ( node.parentNode ) { + if ( keepData && isAttached( node ) ) { + setGlobalEval( getAll( node, "script" ) ); + } + node.parentNode.removeChild( node ); + } + } + + return elem; +} + +jQuery.extend( { + htmlPrefilter: function( html ) { + return html; + }, + + clone: function( elem, dataAndEvents, deepDataAndEvents ) { + var i, l, srcElements, destElements, + clone = elem.cloneNode( true ), + inPage = isAttached( elem ); + + // Fix IE cloning issues + if ( !support.noCloneChecked && ( elem.nodeType === 1 || elem.nodeType === 11 ) && + !jQuery.isXMLDoc( elem ) ) { + + // We eschew Sizzle here for performance reasons: https://jsperf.com/getall-vs-sizzle/2 + destElements = getAll( clone ); + srcElements = getAll( elem ); + + for ( i = 0, l = srcElements.length; i < l; i++ ) { + fixInput( srcElements[ i ], destElements[ i ] ); + } + } + + // Copy the events from the original to the clone + if ( dataAndEvents ) { + if ( deepDataAndEvents ) { + srcElements = srcElements || getAll( elem ); + destElements = destElements || getAll( clone ); + + for ( i = 0, l = srcElements.length; i < l; i++ ) { + cloneCopyEvent( srcElements[ i ], destElements[ i ] ); + } + } else { + cloneCopyEvent( elem, clone ); + } + } + + // Preserve script evaluation history + destElements = getAll( clone, "script" ); + if ( destElements.length > 0 ) { + setGlobalEval( destElements, !inPage && getAll( elem, "script" ) ); + } + + // Return the cloned set + return clone; + }, + + cleanData: function( elems ) { + var data, elem, type, + special = jQuery.event.special, + i = 0; + + for ( ; ( elem = elems[ i ] ) !== undefined; i++ ) { + if ( acceptData( elem ) ) { + if ( ( data = elem[ dataPriv.expando ] ) ) { + if ( data.events ) { + for ( type in data.events ) { + if ( special[ type ] ) { + jQuery.event.remove( elem, type ); + + // This is a shortcut to avoid jQuery.event.remove's overhead + } else { + jQuery.removeEvent( elem, type, data.handle ); + } + } + } + + // Support: Chrome <=35 - 45+ + // Assign undefined instead of using delete, see Data#remove + elem[ dataPriv.expando ] = undefined; + } + if ( elem[ dataUser.expando ] ) { + + // Support: Chrome <=35 - 45+ + // Assign undefined instead of using delete, see Data#remove + elem[ dataUser.expando ] = undefined; + } + } + } + } +} ); + +jQuery.fn.extend( { + detach: function( selector ) { + return remove( this, selector, true ); + }, + + remove: function( selector ) { + return remove( this, selector ); + }, + + text: function( value ) { + return access( this, function( value ) { + return value === undefined ? + jQuery.text( this ) : + this.empty().each( function() { + if ( this.nodeType === 1 || this.nodeType === 11 || this.nodeType === 9 ) { + this.textContent = value; + } + } ); + }, null, value, arguments.length ); + }, + + append: function() { + return domManip( this, arguments, function( elem ) { + if ( this.nodeType === 1 || this.nodeType === 11 || this.nodeType === 9 ) { + var target = manipulationTarget( this, elem ); + target.appendChild( elem ); + } + } ); + }, + + prepend: function() { + return domManip( this, arguments, function( elem ) { + if ( this.nodeType === 1 || this.nodeType === 11 || this.nodeType === 9 ) { + var target = manipulationTarget( this, elem ); + target.insertBefore( elem, target.firstChild ); + } + } ); + }, + + before: function() { + return domManip( this, arguments, function( elem ) { + if ( this.parentNode ) { + this.parentNode.insertBefore( elem, this ); + } + } ); + }, + + after: function() { + return domManip( this, arguments, function( elem ) { + if ( this.parentNode ) { + this.parentNode.insertBefore( elem, this.nextSibling ); + } + } ); + }, + + empty: function() { + var elem, + i = 0; + + for ( ; ( elem = this[ i ] ) != null; i++ ) { + if ( elem.nodeType === 1 ) { + + // Prevent memory leaks + jQuery.cleanData( getAll( elem, false ) ); + + // Remove any remaining nodes + elem.textContent = ""; + } + } + + return this; + }, + + clone: function( dataAndEvents, deepDataAndEvents ) { + dataAndEvents = dataAndEvents == null ? false : dataAndEvents; + deepDataAndEvents = deepDataAndEvents == null ? dataAndEvents : deepDataAndEvents; + + return this.map( function() { + return jQuery.clone( this, dataAndEvents, deepDataAndEvents ); + } ); + }, + + html: function( value ) { + return access( this, function( value ) { + var elem = this[ 0 ] || {}, + i = 0, + l = this.length; + + if ( value === undefined && elem.nodeType === 1 ) { + return elem.innerHTML; + } + + // See if we can take a shortcut and just use innerHTML + if ( typeof value === "string" && !rnoInnerhtml.test( value ) && + !wrapMap[ ( rtagName.exec( value ) || [ "", "" ] )[ 1 ].toLowerCase() ] ) { + + value = jQuery.htmlPrefilter( value ); + + try { + for ( ; i < l; i++ ) { + elem = this[ i ] || {}; + + // Remove element nodes and prevent memory leaks + if ( elem.nodeType === 1 ) { + jQuery.cleanData( getAll( elem, false ) ); + elem.innerHTML = value; + } + } + + elem = 0; + + // If using innerHTML throws an exception, use the fallback method + } catch ( e ) {} + } + + if ( elem ) { + this.empty().append( value ); + } + }, null, value, arguments.length ); + }, + + replaceWith: function() { + var ignored = []; + + // Make the changes, replacing each non-ignored context element with the new content + return domManip( this, arguments, function( elem ) { + var parent = this.parentNode; + + if ( jQuery.inArray( this, ignored ) < 0 ) { + jQuery.cleanData( getAll( this ) ); + if ( parent ) { + parent.replaceChild( elem, this ); + } + } + + // Force callback invocation + }, ignored ); + } +} ); + +jQuery.each( { + appendTo: "append", + prependTo: "prepend", + insertBefore: "before", + insertAfter: "after", + replaceAll: "replaceWith" +}, function( name, original ) { + jQuery.fn[ name ] = function( selector ) { + var elems, + ret = [], + insert = jQuery( selector ), + last = insert.length - 1, + i = 0; + + for ( ; i <= last; i++ ) { + elems = i === last ? this : this.clone( true ); + jQuery( insert[ i ] )[ original ]( elems ); + + // Support: Android <=4.0 only, PhantomJS 1 only + // .get() because push.apply(_, arraylike) throws on ancient WebKit + push.apply( ret, elems.get() ); + } + + return this.pushStack( ret ); + }; +} ); +var rnumnonpx = new RegExp( "^(" + pnum + ")(?!px)[a-z%]+$", "i" ); + +var getStyles = function( elem ) { + + // Support: IE <=11 only, Firefox <=30 (#15098, #14150) + // IE throws on elements created in popups + // FF meanwhile throws on frame elements through "defaultView.getComputedStyle" + var view = elem.ownerDocument.defaultView; + + if ( !view || !view.opener ) { + view = window; + } + + return view.getComputedStyle( elem ); + }; + +var swap = function( elem, options, callback ) { + var ret, name, + old = {}; + + // Remember the old values, and insert the new ones + for ( name in options ) { + old[ name ] = elem.style[ name ]; + elem.style[ name ] = options[ name ]; + } + + ret = callback.call( elem ); + + // Revert the old values + for ( name in options ) { + elem.style[ name ] = old[ name ]; + } + + return ret; +}; + + +var rboxStyle = new RegExp( cssExpand.join( "|" ), "i" ); + + + +( function() { + + // Executing both pixelPosition & boxSizingReliable tests require only one layout + // so they're executed at the same time to save the second computation. + function computeStyleTests() { + + // This is a singleton, we need to execute it only once + if ( !div ) { + return; + } + + container.style.cssText = "position:absolute;left:-11111px;width:60px;" + + "margin-top:1px;padding:0;border:0"; + div.style.cssText = + "position:relative;display:block;box-sizing:border-box;overflow:scroll;" + + "margin:auto;border:1px;padding:1px;" + + "width:60%;top:1%"; + documentElement.appendChild( container ).appendChild( div ); + + var divStyle = window.getComputedStyle( div ); + pixelPositionVal = divStyle.top !== "1%"; + + // Support: Android 4.0 - 4.3 only, Firefox <=3 - 44 + reliableMarginLeftVal = roundPixelMeasures( divStyle.marginLeft ) === 12; + + // Support: Android 4.0 - 4.3 only, Safari <=9.1 - 10.1, iOS <=7.0 - 9.3 + // Some styles come back with percentage values, even though they shouldn't + div.style.right = "60%"; + pixelBoxStylesVal = roundPixelMeasures( divStyle.right ) === 36; + + // Support: IE 9 - 11 only + // Detect misreporting of content dimensions for box-sizing:border-box elements + boxSizingReliableVal = roundPixelMeasures( divStyle.width ) === 36; + + // Support: IE 9 only + // Detect overflow:scroll screwiness (gh-3699) + // Support: Chrome <=64 + // Don't get tricked when zoom affects offsetWidth (gh-4029) + div.style.position = "absolute"; + scrollboxSizeVal = roundPixelMeasures( div.offsetWidth / 3 ) === 12; + + documentElement.removeChild( container ); + + // Nullify the div so it wouldn't be stored in the memory and + // it will also be a sign that checks already performed + div = null; + } + + function roundPixelMeasures( measure ) { + return Math.round( parseFloat( measure ) ); + } + + var pixelPositionVal, boxSizingReliableVal, scrollboxSizeVal, pixelBoxStylesVal, + reliableTrDimensionsVal, reliableMarginLeftVal, + container = document.createElement( "div" ), + div = document.createElement( "div" ); + + // Finish early in limited (non-browser) environments + if ( !div.style ) { + return; + } + + // Support: IE <=9 - 11 only + // Style of cloned element affects source element cloned (#8908) + div.style.backgroundClip = "content-box"; + div.cloneNode( true ).style.backgroundClip = ""; + support.clearCloneStyle = div.style.backgroundClip === "content-box"; + + jQuery.extend( support, { + boxSizingReliable: function() { + computeStyleTests(); + return boxSizingReliableVal; + }, + pixelBoxStyles: function() { + computeStyleTests(); + return pixelBoxStylesVal; + }, + pixelPosition: function() { + computeStyleTests(); + return pixelPositionVal; + }, + reliableMarginLeft: function() { + computeStyleTests(); + return reliableMarginLeftVal; + }, + scrollboxSize: function() { + computeStyleTests(); + return scrollboxSizeVal; + }, + + // Support: IE 9 - 11+, Edge 15 - 18+ + // IE/Edge misreport `getComputedStyle` of table rows with width/height + // set in CSS while `offset*` properties report correct values. + // Behavior in IE 9 is more subtle than in newer versions & it passes + // some versions of this test; make sure not to make it pass there! + reliableTrDimensions: function() { + var table, tr, trChild, trStyle; + if ( reliableTrDimensionsVal == null ) { + table = document.createElement( "table" ); + tr = document.createElement( "tr" ); + trChild = document.createElement( "div" ); + + table.style.cssText = "position:absolute;left:-11111px"; + tr.style.height = "1px"; + trChild.style.height = "9px"; + + documentElement + .appendChild( table ) + .appendChild( tr ) + .appendChild( trChild ); + + trStyle = window.getComputedStyle( tr ); + reliableTrDimensionsVal = parseInt( trStyle.height ) > 3; + + documentElement.removeChild( table ); + } + return reliableTrDimensionsVal; + } + } ); +} )(); + + +function curCSS( elem, name, computed ) { + var width, minWidth, maxWidth, ret, + + // Support: Firefox 51+ + // Retrieving style before computed somehow + // fixes an issue with getting wrong values + // on detached elements + style = elem.style; + + computed = computed || getStyles( elem ); + + // getPropertyValue is needed for: + // .css('filter') (IE 9 only, #12537) + // .css('--customProperty) (#3144) + if ( computed ) { + ret = computed.getPropertyValue( name ) || computed[ name ]; + + if ( ret === "" && !isAttached( elem ) ) { + ret = jQuery.style( elem, name ); + } + + // A tribute to the "awesome hack by Dean Edwards" + // Android Browser returns percentage for some values, + // but width seems to be reliably pixels. + // This is against the CSSOM draft spec: + // https://drafts.csswg.org/cssom/#resolved-values + if ( !support.pixelBoxStyles() && rnumnonpx.test( ret ) && rboxStyle.test( name ) ) { + + // Remember the original values + width = style.width; + minWidth = style.minWidth; + maxWidth = style.maxWidth; + + // Put in the new values to get a computed value out + style.minWidth = style.maxWidth = style.width = ret; + ret = computed.width; + + // Revert the changed values + style.width = width; + style.minWidth = minWidth; + style.maxWidth = maxWidth; + } + } + + return ret !== undefined ? + + // Support: IE <=9 - 11 only + // IE returns zIndex value as an integer. + ret + "" : + ret; +} + + +function addGetHookIf( conditionFn, hookFn ) { + + // Define the hook, we'll check on the first run if it's really needed. + return { + get: function() { + if ( conditionFn() ) { + + // Hook not needed (or it's not possible to use it due + // to missing dependency), remove it. + delete this.get; + return; + } + + // Hook needed; redefine it so that the support test is not executed again. + return ( this.get = hookFn ).apply( this, arguments ); + } + }; +} + + +var cssPrefixes = [ "Webkit", "Moz", "ms" ], + emptyStyle = document.createElement( "div" ).style, + vendorProps = {}; + +// Return a vendor-prefixed property or undefined +function vendorPropName( name ) { + + // Check for vendor prefixed names + var capName = name[ 0 ].toUpperCase() + name.slice( 1 ), + i = cssPrefixes.length; + + while ( i-- ) { + name = cssPrefixes[ i ] + capName; + if ( name in emptyStyle ) { + return name; + } + } +} + +// Return a potentially-mapped jQuery.cssProps or vendor prefixed property +function finalPropName( name ) { + var final = jQuery.cssProps[ name ] || vendorProps[ name ]; + + if ( final ) { + return final; + } + if ( name in emptyStyle ) { + return name; + } + return vendorProps[ name ] = vendorPropName( name ) || name; +} + + +var + + // Swappable if display is none or starts with table + // except "table", "table-cell", or "table-caption" + // See here for display values: https://developer.mozilla.org/en-US/docs/CSS/display + rdisplayswap = /^(none|table(?!-c[ea]).+)/, + rcustomProp = /^--/, + cssShow = { position: "absolute", visibility: "hidden", display: "block" }, + cssNormalTransform = { + letterSpacing: "0", + fontWeight: "400" + }; + +function setPositiveNumber( _elem, value, subtract ) { + + // Any relative (+/-) values have already been + // normalized at this point + var matches = rcssNum.exec( value ); + return matches ? + + // Guard against undefined "subtract", e.g., when used as in cssHooks + Math.max( 0, matches[ 2 ] - ( subtract || 0 ) ) + ( matches[ 3 ] || "px" ) : + value; +} + +function boxModelAdjustment( elem, dimension, box, isBorderBox, styles, computedVal ) { + var i = dimension === "width" ? 1 : 0, + extra = 0, + delta = 0; + + // Adjustment may not be necessary + if ( box === ( isBorderBox ? "border" : "content" ) ) { + return 0; + } + + for ( ; i < 4; i += 2 ) { + + // Both box models exclude margin + if ( box === "margin" ) { + delta += jQuery.css( elem, box + cssExpand[ i ], true, styles ); + } + + // If we get here with a content-box, we're seeking "padding" or "border" or "margin" + if ( !isBorderBox ) { + + // Add padding + delta += jQuery.css( elem, "padding" + cssExpand[ i ], true, styles ); + + // For "border" or "margin", add border + if ( box !== "padding" ) { + delta += jQuery.css( elem, "border" + cssExpand[ i ] + "Width", true, styles ); + + // But still keep track of it otherwise + } else { + extra += jQuery.css( elem, "border" + cssExpand[ i ] + "Width", true, styles ); + } + + // If we get here with a border-box (content + padding + border), we're seeking "content" or + // "padding" or "margin" + } else { + + // For "content", subtract padding + if ( box === "content" ) { + delta -= jQuery.css( elem, "padding" + cssExpand[ i ], true, styles ); + } + + // For "content" or "padding", subtract border + if ( box !== "margin" ) { + delta -= jQuery.css( elem, "border" + cssExpand[ i ] + "Width", true, styles ); + } + } + } + + // Account for positive content-box scroll gutter when requested by providing computedVal + if ( !isBorderBox && computedVal >= 0 ) { + + // offsetWidth/offsetHeight is a rounded sum of content, padding, scroll gutter, and border + // Assuming integer scroll gutter, subtract the rest and round down + delta += Math.max( 0, Math.ceil( + elem[ "offset" + dimension[ 0 ].toUpperCase() + dimension.slice( 1 ) ] - + computedVal - + delta - + extra - + 0.5 + + // If offsetWidth/offsetHeight is unknown, then we can't determine content-box scroll gutter + // Use an explicit zero to avoid NaN (gh-3964) + ) ) || 0; + } + + return delta; +} + +function getWidthOrHeight( elem, dimension, extra ) { + + // Start with computed style + var styles = getStyles( elem ), + + // To avoid forcing a reflow, only fetch boxSizing if we need it (gh-4322). + // Fake content-box until we know it's needed to know the true value. + boxSizingNeeded = !support.boxSizingReliable() || extra, + isBorderBox = boxSizingNeeded && + jQuery.css( elem, "boxSizing", false, styles ) === "border-box", + valueIsBorderBox = isBorderBox, + + val = curCSS( elem, dimension, styles ), + offsetProp = "offset" + dimension[ 0 ].toUpperCase() + dimension.slice( 1 ); + + // Support: Firefox <=54 + // Return a confounding non-pixel value or feign ignorance, as appropriate. + if ( rnumnonpx.test( val ) ) { + if ( !extra ) { + return val; + } + val = "auto"; + } + + + // Support: IE 9 - 11 only + // Use offsetWidth/offsetHeight for when box sizing is unreliable. + // In those cases, the computed value can be trusted to be border-box. + if ( ( !support.boxSizingReliable() && isBorderBox || + + // Support: IE 10 - 11+, Edge 15 - 18+ + // IE/Edge misreport `getComputedStyle` of table rows with width/height + // set in CSS while `offset*` properties report correct values. + // Interestingly, in some cases IE 9 doesn't suffer from this issue. + !support.reliableTrDimensions() && nodeName( elem, "tr" ) || + + // Fall back to offsetWidth/offsetHeight when value is "auto" + // This happens for inline elements with no explicit setting (gh-3571) + val === "auto" || + + // Support: Android <=4.1 - 4.3 only + // Also use offsetWidth/offsetHeight for misreported inline dimensions (gh-3602) + !parseFloat( val ) && jQuery.css( elem, "display", false, styles ) === "inline" ) && + + // Make sure the element is visible & connected + elem.getClientRects().length ) { + + isBorderBox = jQuery.css( elem, "boxSizing", false, styles ) === "border-box"; + + // Where available, offsetWidth/offsetHeight approximate border box dimensions. + // Where not available (e.g., SVG), assume unreliable box-sizing and interpret the + // retrieved value as a content box dimension. + valueIsBorderBox = offsetProp in elem; + if ( valueIsBorderBox ) { + val = elem[ offsetProp ]; + } + } + + // Normalize "" and auto + val = parseFloat( val ) || 0; + + // Adjust for the element's box model + return ( val + + boxModelAdjustment( + elem, + dimension, + extra || ( isBorderBox ? "border" : "content" ), + valueIsBorderBox, + styles, + + // Provide the current computed size to request scroll gutter calculation (gh-3589) + val + ) + ) + "px"; +} + +jQuery.extend( { + + // Add in style property hooks for overriding the default + // behavior of getting and setting a style property + cssHooks: { + opacity: { + get: function( elem, computed ) { + if ( computed ) { + + // We should always get a number back from opacity + var ret = curCSS( elem, "opacity" ); + return ret === "" ? "1" : ret; + } + } + } + }, + + // Don't automatically add "px" to these possibly-unitless properties + cssNumber: { + "animationIterationCount": true, + "columnCount": true, + "fillOpacity": true, + "flexGrow": true, + "flexShrink": true, + "fontWeight": true, + "gridArea": true, + "gridColumn": true, + "gridColumnEnd": true, + "gridColumnStart": true, + "gridRow": true, + "gridRowEnd": true, + "gridRowStart": true, + "lineHeight": true, + "opacity": true, + "order": true, + "orphans": true, + "widows": true, + "zIndex": true, + "zoom": true + }, + + // Add in properties whose names you wish to fix before + // setting or getting the value + cssProps: {}, + + // Get and set the style property on a DOM Node + style: function( elem, name, value, extra ) { + + // Don't set styles on text and comment nodes + if ( !elem || elem.nodeType === 3 || elem.nodeType === 8 || !elem.style ) { + return; + } + + // Make sure that we're working with the right name + var ret, type, hooks, + origName = camelCase( name ), + isCustomProp = rcustomProp.test( name ), + style = elem.style; + + // Make sure that we're working with the right name. We don't + // want to query the value if it is a CSS custom property + // since they are user-defined. + if ( !isCustomProp ) { + name = finalPropName( origName ); + } + + // Gets hook for the prefixed version, then unprefixed version + hooks = jQuery.cssHooks[ name ] || jQuery.cssHooks[ origName ]; + + // Check if we're setting a value + if ( value !== undefined ) { + type = typeof value; + + // Convert "+=" or "-=" to relative numbers (#7345) + if ( type === "string" && ( ret = rcssNum.exec( value ) ) && ret[ 1 ] ) { + value = adjustCSS( elem, name, ret ); + + // Fixes bug #9237 + type = "number"; + } + + // Make sure that null and NaN values aren't set (#7116) + if ( value == null || value !== value ) { + return; + } + + // If a number was passed in, add the unit (except for certain CSS properties) + // The isCustomProp check can be removed in jQuery 4.0 when we only auto-append + // "px" to a few hardcoded values. + if ( type === "number" && !isCustomProp ) { + value += ret && ret[ 3 ] || ( jQuery.cssNumber[ origName ] ? "" : "px" ); + } + + // background-* props affect original clone's values + if ( !support.clearCloneStyle && value === "" && name.indexOf( "background" ) === 0 ) { + style[ name ] = "inherit"; + } + + // If a hook was provided, use that value, otherwise just set the specified value + if ( !hooks || !( "set" in hooks ) || + ( value = hooks.set( elem, value, extra ) ) !== undefined ) { + + if ( isCustomProp ) { + style.setProperty( name, value ); + } else { + style[ name ] = value; + } + } + + } else { + + // If a hook was provided get the non-computed value from there + if ( hooks && "get" in hooks && + ( ret = hooks.get( elem, false, extra ) ) !== undefined ) { + + return ret; + } + + // Otherwise just get the value from the style object + return style[ name ]; + } + }, + + css: function( elem, name, extra, styles ) { + var val, num, hooks, + origName = camelCase( name ), + isCustomProp = rcustomProp.test( name ); + + // Make sure that we're working with the right name. We don't + // want to modify the value if it is a CSS custom property + // since they are user-defined. + if ( !isCustomProp ) { + name = finalPropName( origName ); + } + + // Try prefixed name followed by the unprefixed name + hooks = jQuery.cssHooks[ name ] || jQuery.cssHooks[ origName ]; + + // If a hook was provided get the computed value from there + if ( hooks && "get" in hooks ) { + val = hooks.get( elem, true, extra ); + } + + // Otherwise, if a way to get the computed value exists, use that + if ( val === undefined ) { + val = curCSS( elem, name, styles ); + } + + // Convert "normal" to computed value + if ( val === "normal" && name in cssNormalTransform ) { + val = cssNormalTransform[ name ]; + } + + // Make numeric if forced or a qualifier was provided and val looks numeric + if ( extra === "" || extra ) { + num = parseFloat( val ); + return extra === true || isFinite( num ) ? num || 0 : val; + } + + return val; + } +} ); + +jQuery.each( [ "height", "width" ], function( _i, dimension ) { + jQuery.cssHooks[ dimension ] = { + get: function( elem, computed, extra ) { + if ( computed ) { + + // Certain elements can have dimension info if we invisibly show them + // but it must have a current display style that would benefit + return rdisplayswap.test( jQuery.css( elem, "display" ) ) && + + // Support: Safari 8+ + // Table columns in Safari have non-zero offsetWidth & zero + // getBoundingClientRect().width unless display is changed. + // Support: IE <=11 only + // Running getBoundingClientRect on a disconnected node + // in IE throws an error. + ( !elem.getClientRects().length || !elem.getBoundingClientRect().width ) ? + swap( elem, cssShow, function() { + return getWidthOrHeight( elem, dimension, extra ); + } ) : + getWidthOrHeight( elem, dimension, extra ); + } + }, + + set: function( elem, value, extra ) { + var matches, + styles = getStyles( elem ), + + // Only read styles.position if the test has a chance to fail + // to avoid forcing a reflow. + scrollboxSizeBuggy = !support.scrollboxSize() && + styles.position === "absolute", + + // To avoid forcing a reflow, only fetch boxSizing if we need it (gh-3991) + boxSizingNeeded = scrollboxSizeBuggy || extra, + isBorderBox = boxSizingNeeded && + jQuery.css( elem, "boxSizing", false, styles ) === "border-box", + subtract = extra ? + boxModelAdjustment( + elem, + dimension, + extra, + isBorderBox, + styles + ) : + 0; + + // Account for unreliable border-box dimensions by comparing offset* to computed and + // faking a content-box to get border and padding (gh-3699) + if ( isBorderBox && scrollboxSizeBuggy ) { + subtract -= Math.ceil( + elem[ "offset" + dimension[ 0 ].toUpperCase() + dimension.slice( 1 ) ] - + parseFloat( styles[ dimension ] ) - + boxModelAdjustment( elem, dimension, "border", false, styles ) - + 0.5 + ); + } + + // Convert to pixels if value adjustment is needed + if ( subtract && ( matches = rcssNum.exec( value ) ) && + ( matches[ 3 ] || "px" ) !== "px" ) { + + elem.style[ dimension ] = value; + value = jQuery.css( elem, dimension ); + } + + return setPositiveNumber( elem, value, subtract ); + } + }; +} ); + +jQuery.cssHooks.marginLeft = addGetHookIf( support.reliableMarginLeft, + function( elem, computed ) { + if ( computed ) { + return ( parseFloat( curCSS( elem, "marginLeft" ) ) || + elem.getBoundingClientRect().left - + swap( elem, { marginLeft: 0 }, function() { + return elem.getBoundingClientRect().left; + } ) + ) + "px"; + } + } +); + +// These hooks are used by animate to expand properties +jQuery.each( { + margin: "", + padding: "", + border: "Width" +}, function( prefix, suffix ) { + jQuery.cssHooks[ prefix + suffix ] = { + expand: function( value ) { + var i = 0, + expanded = {}, + + // Assumes a single number if not a string + parts = typeof value === "string" ? value.split( " " ) : [ value ]; + + for ( ; i < 4; i++ ) { + expanded[ prefix + cssExpand[ i ] + suffix ] = + parts[ i ] || parts[ i - 2 ] || parts[ 0 ]; + } + + return expanded; + } + }; + + if ( prefix !== "margin" ) { + jQuery.cssHooks[ prefix + suffix ].set = setPositiveNumber; + } +} ); + +jQuery.fn.extend( { + css: function( name, value ) { + return access( this, function( elem, name, value ) { + var styles, len, + map = {}, + i = 0; + + if ( Array.isArray( name ) ) { + styles = getStyles( elem ); + len = name.length; + + for ( ; i < len; i++ ) { + map[ name[ i ] ] = jQuery.css( elem, name[ i ], false, styles ); + } + + return map; + } + + return value !== undefined ? + jQuery.style( elem, name, value ) : + jQuery.css( elem, name ); + }, name, value, arguments.length > 1 ); + } +} ); + + +function Tween( elem, options, prop, end, easing ) { + return new Tween.prototype.init( elem, options, prop, end, easing ); +} +jQuery.Tween = Tween; + +Tween.prototype = { + constructor: Tween, + init: function( elem, options, prop, end, easing, unit ) { + this.elem = elem; + this.prop = prop; + this.easing = easing || jQuery.easing._default; + this.options = options; + this.start = this.now = this.cur(); + this.end = end; + this.unit = unit || ( jQuery.cssNumber[ prop ] ? "" : "px" ); + }, + cur: function() { + var hooks = Tween.propHooks[ this.prop ]; + + return hooks && hooks.get ? + hooks.get( this ) : + Tween.propHooks._default.get( this ); + }, + run: function( percent ) { + var eased, + hooks = Tween.propHooks[ this.prop ]; + + if ( this.options.duration ) { + this.pos = eased = jQuery.easing[ this.easing ]( + percent, this.options.duration * percent, 0, 1, this.options.duration + ); + } else { + this.pos = eased = percent; + } + this.now = ( this.end - this.start ) * eased + this.start; + + if ( this.options.step ) { + this.options.step.call( this.elem, this.now, this ); + } + + if ( hooks && hooks.set ) { + hooks.set( this ); + } else { + Tween.propHooks._default.set( this ); + } + return this; + } +}; + +Tween.prototype.init.prototype = Tween.prototype; + +Tween.propHooks = { + _default: { + get: function( tween ) { + var result; + + // Use a property on the element directly when it is not a DOM element, + // or when there is no matching style property that exists. + if ( tween.elem.nodeType !== 1 || + tween.elem[ tween.prop ] != null && tween.elem.style[ tween.prop ] == null ) { + return tween.elem[ tween.prop ]; + } + + // Passing an empty string as a 3rd parameter to .css will automatically + // attempt a parseFloat and fallback to a string if the parse fails. + // Simple values such as "10px" are parsed to Float; + // complex values such as "rotate(1rad)" are returned as-is. + result = jQuery.css( tween.elem, tween.prop, "" ); + + // Empty strings, null, undefined and "auto" are converted to 0. + return !result || result === "auto" ? 0 : result; + }, + set: function( tween ) { + + // Use step hook for back compat. + // Use cssHook if its there. + // Use .style if available and use plain properties where available. + if ( jQuery.fx.step[ tween.prop ] ) { + jQuery.fx.step[ tween.prop ]( tween ); + } else if ( tween.elem.nodeType === 1 && ( + jQuery.cssHooks[ tween.prop ] || + tween.elem.style[ finalPropName( tween.prop ) ] != null ) ) { + jQuery.style( tween.elem, tween.prop, tween.now + tween.unit ); + } else { + tween.elem[ tween.prop ] = tween.now; + } + } + } +}; + +// Support: IE <=9 only +// Panic based approach to setting things on disconnected nodes +Tween.propHooks.scrollTop = Tween.propHooks.scrollLeft = { + set: function( tween ) { + if ( tween.elem.nodeType && tween.elem.parentNode ) { + tween.elem[ tween.prop ] = tween.now; + } + } +}; + +jQuery.easing = { + linear: function( p ) { + return p; + }, + swing: function( p ) { + return 0.5 - Math.cos( p * Math.PI ) / 2; + }, + _default: "swing" +}; + +jQuery.fx = Tween.prototype.init; + +// Back compat <1.8 extension point +jQuery.fx.step = {}; + + + + +var + fxNow, inProgress, + rfxtypes = /^(?:toggle|show|hide)$/, + rrun = /queueHooks$/; + +function schedule() { + if ( inProgress ) { + if ( document.hidden === false && window.requestAnimationFrame ) { + window.requestAnimationFrame( schedule ); + } else { + window.setTimeout( schedule, jQuery.fx.interval ); + } + + jQuery.fx.tick(); + } +} + +// Animations created synchronously will run synchronously +function createFxNow() { + window.setTimeout( function() { + fxNow = undefined; + } ); + return ( fxNow = Date.now() ); +} + +// Generate parameters to create a standard animation +function genFx( type, includeWidth ) { + var which, + i = 0, + attrs = { height: type }; + + // If we include width, step value is 1 to do all cssExpand values, + // otherwise step value is 2 to skip over Left and Right + includeWidth = includeWidth ? 1 : 0; + for ( ; i < 4; i += 2 - includeWidth ) { + which = cssExpand[ i ]; + attrs[ "margin" + which ] = attrs[ "padding" + which ] = type; + } + + if ( includeWidth ) { + attrs.opacity = attrs.width = type; + } + + return attrs; +} + +function createTween( value, prop, animation ) { + var tween, + collection = ( Animation.tweeners[ prop ] || [] ).concat( Animation.tweeners[ "*" ] ), + index = 0, + length = collection.length; + for ( ; index < length; index++ ) { + if ( ( tween = collection[ index ].call( animation, prop, value ) ) ) { + + // We're done with this property + return tween; + } + } +} + +function defaultPrefilter( elem, props, opts ) { + var prop, value, toggle, hooks, oldfire, propTween, restoreDisplay, display, + isBox = "width" in props || "height" in props, + anim = this, + orig = {}, + style = elem.style, + hidden = elem.nodeType && isHiddenWithinTree( elem ), + dataShow = dataPriv.get( elem, "fxshow" ); + + // Queue-skipping animations hijack the fx hooks + if ( !opts.queue ) { + hooks = jQuery._queueHooks( elem, "fx" ); + if ( hooks.unqueued == null ) { + hooks.unqueued = 0; + oldfire = hooks.empty.fire; + hooks.empty.fire = function() { + if ( !hooks.unqueued ) { + oldfire(); + } + }; + } + hooks.unqueued++; + + anim.always( function() { + + // Ensure the complete handler is called before this completes + anim.always( function() { + hooks.unqueued--; + if ( !jQuery.queue( elem, "fx" ).length ) { + hooks.empty.fire(); + } + } ); + } ); + } + + // Detect show/hide animations + for ( prop in props ) { + value = props[ prop ]; + if ( rfxtypes.test( value ) ) { + delete props[ prop ]; + toggle = toggle || value === "toggle"; + if ( value === ( hidden ? "hide" : "show" ) ) { + + // Pretend to be hidden if this is a "show" and + // there is still data from a stopped show/hide + if ( value === "show" && dataShow && dataShow[ prop ] !== undefined ) { + hidden = true; + + // Ignore all other no-op show/hide data + } else { + continue; + } + } + orig[ prop ] = dataShow && dataShow[ prop ] || jQuery.style( elem, prop ); + } + } + + // Bail out if this is a no-op like .hide().hide() + propTween = !jQuery.isEmptyObject( props ); + if ( !propTween && jQuery.isEmptyObject( orig ) ) { + return; + } + + // Restrict "overflow" and "display" styles during box animations + if ( isBox && elem.nodeType === 1 ) { + + // Support: IE <=9 - 11, Edge 12 - 15 + // Record all 3 overflow attributes because IE does not infer the shorthand + // from identically-valued overflowX and overflowY and Edge just mirrors + // the overflowX value there. + opts.overflow = [ style.overflow, style.overflowX, style.overflowY ]; + + // Identify a display type, preferring old show/hide data over the CSS cascade + restoreDisplay = dataShow && dataShow.display; + if ( restoreDisplay == null ) { + restoreDisplay = dataPriv.get( elem, "display" ); + } + display = jQuery.css( elem, "display" ); + if ( display === "none" ) { + if ( restoreDisplay ) { + display = restoreDisplay; + } else { + + // Get nonempty value(s) by temporarily forcing visibility + showHide( [ elem ], true ); + restoreDisplay = elem.style.display || restoreDisplay; + display = jQuery.css( elem, "display" ); + showHide( [ elem ] ); + } + } + + // Animate inline elements as inline-block + if ( display === "inline" || display === "inline-block" && restoreDisplay != null ) { + if ( jQuery.css( elem, "float" ) === "none" ) { + + // Restore the original display value at the end of pure show/hide animations + if ( !propTween ) { + anim.done( function() { + style.display = restoreDisplay; + } ); + if ( restoreDisplay == null ) { + display = style.display; + restoreDisplay = display === "none" ? "" : display; + } + } + style.display = "inline-block"; + } + } + } + + if ( opts.overflow ) { + style.overflow = "hidden"; + anim.always( function() { + style.overflow = opts.overflow[ 0 ]; + style.overflowX = opts.overflow[ 1 ]; + style.overflowY = opts.overflow[ 2 ]; + } ); + } + + // Implement show/hide animations + propTween = false; + for ( prop in orig ) { + + // General show/hide setup for this element animation + if ( !propTween ) { + if ( dataShow ) { + if ( "hidden" in dataShow ) { + hidden = dataShow.hidden; + } + } else { + dataShow = dataPriv.access( elem, "fxshow", { display: restoreDisplay } ); + } + + // Store hidden/visible for toggle so `.stop().toggle()` "reverses" + if ( toggle ) { + dataShow.hidden = !hidden; + } + + // Show elements before animating them + if ( hidden ) { + showHide( [ elem ], true ); + } + + /* eslint-disable no-loop-func */ + + anim.done( function() { + + /* eslint-enable no-loop-func */ + + // The final step of a "hide" animation is actually hiding the element + if ( !hidden ) { + showHide( [ elem ] ); + } + dataPriv.remove( elem, "fxshow" ); + for ( prop in orig ) { + jQuery.style( elem, prop, orig[ prop ] ); + } + } ); + } + + // Per-property setup + propTween = createTween( hidden ? dataShow[ prop ] : 0, prop, anim ); + if ( !( prop in dataShow ) ) { + dataShow[ prop ] = propTween.start; + if ( hidden ) { + propTween.end = propTween.start; + propTween.start = 0; + } + } + } +} + +function propFilter( props, specialEasing ) { + var index, name, easing, value, hooks; + + // camelCase, specialEasing and expand cssHook pass + for ( index in props ) { + name = camelCase( index ); + easing = specialEasing[ name ]; + value = props[ index ]; + if ( Array.isArray( value ) ) { + easing = value[ 1 ]; + value = props[ index ] = value[ 0 ]; + } + + if ( index !== name ) { + props[ name ] = value; + delete props[ index ]; + } + + hooks = jQuery.cssHooks[ name ]; + if ( hooks && "expand" in hooks ) { + value = hooks.expand( value ); + delete props[ name ]; + + // Not quite $.extend, this won't overwrite existing keys. + // Reusing 'index' because we have the correct "name" + for ( index in value ) { + if ( !( index in props ) ) { + props[ index ] = value[ index ]; + specialEasing[ index ] = easing; + } + } + } else { + specialEasing[ name ] = easing; + } + } +} + +function Animation( elem, properties, options ) { + var result, + stopped, + index = 0, + length = Animation.prefilters.length, + deferred = jQuery.Deferred().always( function() { + + // Don't match elem in the :animated selector + delete tick.elem; + } ), + tick = function() { + if ( stopped ) { + return false; + } + var currentTime = fxNow || createFxNow(), + remaining = Math.max( 0, animation.startTime + animation.duration - currentTime ), + + // Support: Android 2.3 only + // Archaic crash bug won't allow us to use `1 - ( 0.5 || 0 )` (#12497) + temp = remaining / animation.duration || 0, + percent = 1 - temp, + index = 0, + length = animation.tweens.length; + + for ( ; index < length; index++ ) { + animation.tweens[ index ].run( percent ); + } + + deferred.notifyWith( elem, [ animation, percent, remaining ] ); + + // If there's more to do, yield + if ( percent < 1 && length ) { + return remaining; + } + + // If this was an empty animation, synthesize a final progress notification + if ( !length ) { + deferred.notifyWith( elem, [ animation, 1, 0 ] ); + } + + // Resolve the animation and report its conclusion + deferred.resolveWith( elem, [ animation ] ); + return false; + }, + animation = deferred.promise( { + elem: elem, + props: jQuery.extend( {}, properties ), + opts: jQuery.extend( true, { + specialEasing: {}, + easing: jQuery.easing._default + }, options ), + originalProperties: properties, + originalOptions: options, + startTime: fxNow || createFxNow(), + duration: options.duration, + tweens: [], + createTween: function( prop, end ) { + var tween = jQuery.Tween( elem, animation.opts, prop, end, + animation.opts.specialEasing[ prop ] || animation.opts.easing ); + animation.tweens.push( tween ); + return tween; + }, + stop: function( gotoEnd ) { + var index = 0, + + // If we are going to the end, we want to run all the tweens + // otherwise we skip this part + length = gotoEnd ? animation.tweens.length : 0; + if ( stopped ) { + return this; + } + stopped = true; + for ( ; index < length; index++ ) { + animation.tweens[ index ].run( 1 ); + } + + // Resolve when we played the last frame; otherwise, reject + if ( gotoEnd ) { + deferred.notifyWith( elem, [ animation, 1, 0 ] ); + deferred.resolveWith( elem, [ animation, gotoEnd ] ); + } else { + deferred.rejectWith( elem, [ animation, gotoEnd ] ); + } + return this; + } + } ), + props = animation.props; + + propFilter( props, animation.opts.specialEasing ); + + for ( ; index < length; index++ ) { + result = Animation.prefilters[ index ].call( animation, elem, props, animation.opts ); + if ( result ) { + if ( isFunction( result.stop ) ) { + jQuery._queueHooks( animation.elem, animation.opts.queue ).stop = + result.stop.bind( result ); + } + return result; + } + } + + jQuery.map( props, createTween, animation ); + + if ( isFunction( animation.opts.start ) ) { + animation.opts.start.call( elem, animation ); + } + + // Attach callbacks from options + animation + .progress( animation.opts.progress ) + .done( animation.opts.done, animation.opts.complete ) + .fail( animation.opts.fail ) + .always( animation.opts.always ); + + jQuery.fx.timer( + jQuery.extend( tick, { + elem: elem, + anim: animation, + queue: animation.opts.queue + } ) + ); + + return animation; +} + +jQuery.Animation = jQuery.extend( Animation, { + + tweeners: { + "*": [ function( prop, value ) { + var tween = this.createTween( prop, value ); + adjustCSS( tween.elem, prop, rcssNum.exec( value ), tween ); + return tween; + } ] + }, + + tweener: function( props, callback ) { + if ( isFunction( props ) ) { + callback = props; + props = [ "*" ]; + } else { + props = props.match( rnothtmlwhite ); + } + + var prop, + index = 0, + length = props.length; + + for ( ; index < length; index++ ) { + prop = props[ index ]; + Animation.tweeners[ prop ] = Animation.tweeners[ prop ] || []; + Animation.tweeners[ prop ].unshift( callback ); + } + }, + + prefilters: [ defaultPrefilter ], + + prefilter: function( callback, prepend ) { + if ( prepend ) { + Animation.prefilters.unshift( callback ); + } else { + Animation.prefilters.push( callback ); + } + } +} ); + +jQuery.speed = function( speed, easing, fn ) { + var opt = speed && typeof speed === "object" ? jQuery.extend( {}, speed ) : { + complete: fn || !fn && easing || + isFunction( speed ) && speed, + duration: speed, + easing: fn && easing || easing && !isFunction( easing ) && easing + }; + + // Go to the end state if fx are off + if ( jQuery.fx.off ) { + opt.duration = 0; + + } else { + if ( typeof opt.duration !== "number" ) { + if ( opt.duration in jQuery.fx.speeds ) { + opt.duration = jQuery.fx.speeds[ opt.duration ]; + + } else { + opt.duration = jQuery.fx.speeds._default; + } + } + } + + // Normalize opt.queue - true/undefined/null -> "fx" + if ( opt.queue == null || opt.queue === true ) { + opt.queue = "fx"; + } + + // Queueing + opt.old = opt.complete; + + opt.complete = function() { + if ( isFunction( opt.old ) ) { + opt.old.call( this ); + } + + if ( opt.queue ) { + jQuery.dequeue( this, opt.queue ); + } + }; + + return opt; +}; + +jQuery.fn.extend( { + fadeTo: function( speed, to, easing, callback ) { + + // Show any hidden elements after setting opacity to 0 + return this.filter( isHiddenWithinTree ).css( "opacity", 0 ).show() + + // Animate to the value specified + .end().animate( { opacity: to }, speed, easing, callback ); + }, + animate: function( prop, speed, easing, callback ) { + var empty = jQuery.isEmptyObject( prop ), + optall = jQuery.speed( speed, easing, callback ), + doAnimation = function() { + + // Operate on a copy of prop so per-property easing won't be lost + var anim = Animation( this, jQuery.extend( {}, prop ), optall ); + + // Empty animations, or finishing resolves immediately + if ( empty || dataPriv.get( this, "finish" ) ) { + anim.stop( true ); + } + }; + doAnimation.finish = doAnimation; + + return empty || optall.queue === false ? + this.each( doAnimation ) : + this.queue( optall.queue, doAnimation ); + }, + stop: function( type, clearQueue, gotoEnd ) { + var stopQueue = function( hooks ) { + var stop = hooks.stop; + delete hooks.stop; + stop( gotoEnd ); + }; + + if ( typeof type !== "string" ) { + gotoEnd = clearQueue; + clearQueue = type; + type = undefined; + } + if ( clearQueue ) { + this.queue( type || "fx", [] ); + } + + return this.each( function() { + var dequeue = true, + index = type != null && type + "queueHooks", + timers = jQuery.timers, + data = dataPriv.get( this ); + + if ( index ) { + if ( data[ index ] && data[ index ].stop ) { + stopQueue( data[ index ] ); + } + } else { + for ( index in data ) { + if ( data[ index ] && data[ index ].stop && rrun.test( index ) ) { + stopQueue( data[ index ] ); + } + } + } + + for ( index = timers.length; index--; ) { + if ( timers[ index ].elem === this && + ( type == null || timers[ index ].queue === type ) ) { + + timers[ index ].anim.stop( gotoEnd ); + dequeue = false; + timers.splice( index, 1 ); + } + } + + // Start the next in the queue if the last step wasn't forced. + // Timers currently will call their complete callbacks, which + // will dequeue but only if they were gotoEnd. + if ( dequeue || !gotoEnd ) { + jQuery.dequeue( this, type ); + } + } ); + }, + finish: function( type ) { + if ( type !== false ) { + type = type || "fx"; + } + return this.each( function() { + var index, + data = dataPriv.get( this ), + queue = data[ type + "queue" ], + hooks = data[ type + "queueHooks" ], + timers = jQuery.timers, + length = queue ? queue.length : 0; + + // Enable finishing flag on private data + data.finish = true; + + // Empty the queue first + jQuery.queue( this, type, [] ); + + if ( hooks && hooks.stop ) { + hooks.stop.call( this, true ); + } + + // Look for any active animations, and finish them + for ( index = timers.length; index--; ) { + if ( timers[ index ].elem === this && timers[ index ].queue === type ) { + timers[ index ].anim.stop( true ); + timers.splice( index, 1 ); + } + } + + // Look for any animations in the old queue and finish them + for ( index = 0; index < length; index++ ) { + if ( queue[ index ] && queue[ index ].finish ) { + queue[ index ].finish.call( this ); + } + } + + // Turn off finishing flag + delete data.finish; + } ); + } +} ); + +jQuery.each( [ "toggle", "show", "hide" ], function( _i, name ) { + var cssFn = jQuery.fn[ name ]; + jQuery.fn[ name ] = function( speed, easing, callback ) { + return speed == null || typeof speed === "boolean" ? + cssFn.apply( this, arguments ) : + this.animate( genFx( name, true ), speed, easing, callback ); + }; +} ); + +// Generate shortcuts for custom animations +jQuery.each( { + slideDown: genFx( "show" ), + slideUp: genFx( "hide" ), + slideToggle: genFx( "toggle" ), + fadeIn: { opacity: "show" }, + fadeOut: { opacity: "hide" }, + fadeToggle: { opacity: "toggle" } +}, function( name, props ) { + jQuery.fn[ name ] = function( speed, easing, callback ) { + return this.animate( props, speed, easing, callback ); + }; +} ); + +jQuery.timers = []; +jQuery.fx.tick = function() { + var timer, + i = 0, + timers = jQuery.timers; + + fxNow = Date.now(); + + for ( ; i < timers.length; i++ ) { + timer = timers[ i ]; + + // Run the timer and safely remove it when done (allowing for external removal) + if ( !timer() && timers[ i ] === timer ) { + timers.splice( i--, 1 ); + } + } + + if ( !timers.length ) { + jQuery.fx.stop(); + } + fxNow = undefined; +}; + +jQuery.fx.timer = function( timer ) { + jQuery.timers.push( timer ); + jQuery.fx.start(); +}; + +jQuery.fx.interval = 13; +jQuery.fx.start = function() { + if ( inProgress ) { + return; + } + + inProgress = true; + schedule(); +}; + +jQuery.fx.stop = function() { + inProgress = null; +}; + +jQuery.fx.speeds = { + slow: 600, + fast: 200, + + // Default speed + _default: 400 +}; + + +// Based off of the plugin by Clint Helfers, with permission. +// https://web.archive.org/web/20100324014747/http://blindsignals.com/index.php/2009/07/jquery-delay/ +jQuery.fn.delay = function( time, type ) { + time = jQuery.fx ? jQuery.fx.speeds[ time ] || time : time; + type = type || "fx"; + + return this.queue( type, function( next, hooks ) { + var timeout = window.setTimeout( next, time ); + hooks.stop = function() { + window.clearTimeout( timeout ); + }; + } ); +}; + + +( function() { + var input = document.createElement( "input" ), + select = document.createElement( "select" ), + opt = select.appendChild( document.createElement( "option" ) ); + + input.type = "checkbox"; + + // Support: Android <=4.3 only + // Default value for a checkbox should be "on" + support.checkOn = input.value !== ""; + + // Support: IE <=11 only + // Must access selectedIndex to make default options select + support.optSelected = opt.selected; + + // Support: IE <=11 only + // An input loses its value after becoming a radio + input = document.createElement( "input" ); + input.value = "t"; + input.type = "radio"; + support.radioValue = input.value === "t"; +} )(); + + +var boolHook, + attrHandle = jQuery.expr.attrHandle; + +jQuery.fn.extend( { + attr: function( name, value ) { + return access( this, jQuery.attr, name, value, arguments.length > 1 ); + }, + + removeAttr: function( name ) { + return this.each( function() { + jQuery.removeAttr( this, name ); + } ); + } +} ); + +jQuery.extend( { + attr: function( elem, name, value ) { + var ret, hooks, + nType = elem.nodeType; + + // Don't get/set attributes on text, comment and attribute nodes + if ( nType === 3 || nType === 8 || nType === 2 ) { + return; + } + + // Fallback to prop when attributes are not supported + if ( typeof elem.getAttribute === "undefined" ) { + return jQuery.prop( elem, name, value ); + } + + // Attribute hooks are determined by the lowercase version + // Grab necessary hook if one is defined + if ( nType !== 1 || !jQuery.isXMLDoc( elem ) ) { + hooks = jQuery.attrHooks[ name.toLowerCase() ] || + ( jQuery.expr.match.bool.test( name ) ? boolHook : undefined ); + } + + if ( value !== undefined ) { + if ( value === null ) { + jQuery.removeAttr( elem, name ); + return; + } + + if ( hooks && "set" in hooks && + ( ret = hooks.set( elem, value, name ) ) !== undefined ) { + return ret; + } + + elem.setAttribute( name, value + "" ); + return value; + } + + if ( hooks && "get" in hooks && ( ret = hooks.get( elem, name ) ) !== null ) { + return ret; + } + + ret = jQuery.find.attr( elem, name ); + + // Non-existent attributes return null, we normalize to undefined + return ret == null ? undefined : ret; + }, + + attrHooks: { + type: { + set: function( elem, value ) { + if ( !support.radioValue && value === "radio" && + nodeName( elem, "input" ) ) { + var val = elem.value; + elem.setAttribute( "type", value ); + if ( val ) { + elem.value = val; + } + return value; + } + } + } + }, + + removeAttr: function( elem, value ) { + var name, + i = 0, + + // Attribute names can contain non-HTML whitespace characters + // https://html.spec.whatwg.org/multipage/syntax.html#attributes-2 + attrNames = value && value.match( rnothtmlwhite ); + + if ( attrNames && elem.nodeType === 1 ) { + while ( ( name = attrNames[ i++ ] ) ) { + elem.removeAttribute( name ); + } + } + } +} ); + +// Hooks for boolean attributes +boolHook = { + set: function( elem, value, name ) { + if ( value === false ) { + + // Remove boolean attributes when set to false + jQuery.removeAttr( elem, name ); + } else { + elem.setAttribute( name, name ); + } + return name; + } +}; + +jQuery.each( jQuery.expr.match.bool.source.match( /\w+/g ), function( _i, name ) { + var getter = attrHandle[ name ] || jQuery.find.attr; + + attrHandle[ name ] = function( elem, name, isXML ) { + var ret, handle, + lowercaseName = name.toLowerCase(); + + if ( !isXML ) { + + // Avoid an infinite loop by temporarily removing this function from the getter + handle = attrHandle[ lowercaseName ]; + attrHandle[ lowercaseName ] = ret; + ret = getter( elem, name, isXML ) != null ? + lowercaseName : + null; + attrHandle[ lowercaseName ] = handle; + } + return ret; + }; +} ); + + + + +var rfocusable = /^(?:input|select|textarea|button)$/i, + rclickable = /^(?:a|area)$/i; + +jQuery.fn.extend( { + prop: function( name, value ) { + return access( this, jQuery.prop, name, value, arguments.length > 1 ); + }, + + removeProp: function( name ) { + return this.each( function() { + delete this[ jQuery.propFix[ name ] || name ]; + } ); + } +} ); + +jQuery.extend( { + prop: function( elem, name, value ) { + var ret, hooks, + nType = elem.nodeType; + + // Don't get/set properties on text, comment and attribute nodes + if ( nType === 3 || nType === 8 || nType === 2 ) { + return; + } + + if ( nType !== 1 || !jQuery.isXMLDoc( elem ) ) { + + // Fix name and attach hooks + name = jQuery.propFix[ name ] || name; + hooks = jQuery.propHooks[ name ]; + } + + if ( value !== undefined ) { + if ( hooks && "set" in hooks && + ( ret = hooks.set( elem, value, name ) ) !== undefined ) { + return ret; + } + + return ( elem[ name ] = value ); + } + + if ( hooks && "get" in hooks && ( ret = hooks.get( elem, name ) ) !== null ) { + return ret; + } + + return elem[ name ]; + }, + + propHooks: { + tabIndex: { + get: function( elem ) { + + // Support: IE <=9 - 11 only + // elem.tabIndex doesn't always return the + // correct value when it hasn't been explicitly set + // https://web.archive.org/web/20141116233347/http://fluidproject.org/blog/2008/01/09/getting-setting-and-removing-tabindex-values-with-javascript/ + // Use proper attribute retrieval(#12072) + var tabindex = jQuery.find.attr( elem, "tabindex" ); + + if ( tabindex ) { + return parseInt( tabindex, 10 ); + } + + if ( + rfocusable.test( elem.nodeName ) || + rclickable.test( elem.nodeName ) && + elem.href + ) { + return 0; + } + + return -1; + } + } + }, + + propFix: { + "for": "htmlFor", + "class": "className" + } +} ); + +// Support: IE <=11 only +// Accessing the selectedIndex property +// forces the browser to respect setting selected +// on the option +// The getter ensures a default option is selected +// when in an optgroup +// eslint rule "no-unused-expressions" is disabled for this code +// since it considers such accessions noop +if ( !support.optSelected ) { + jQuery.propHooks.selected = { + get: function( elem ) { + + /* eslint no-unused-expressions: "off" */ + + var parent = elem.parentNode; + if ( parent && parent.parentNode ) { + parent.parentNode.selectedIndex; + } + return null; + }, + set: function( elem ) { + + /* eslint no-unused-expressions: "off" */ + + var parent = elem.parentNode; + if ( parent ) { + parent.selectedIndex; + + if ( parent.parentNode ) { + parent.parentNode.selectedIndex; + } + } + } + }; +} + +jQuery.each( [ + "tabIndex", + "readOnly", + "maxLength", + "cellSpacing", + "cellPadding", + "rowSpan", + "colSpan", + "useMap", + "frameBorder", + "contentEditable" +], function() { + jQuery.propFix[ this.toLowerCase() ] = this; +} ); + + + + + // Strip and collapse whitespace according to HTML spec + // https://infra.spec.whatwg.org/#strip-and-collapse-ascii-whitespace + function stripAndCollapse( value ) { + var tokens = value.match( rnothtmlwhite ) || []; + return tokens.join( " " ); + } + + +function getClass( elem ) { + return elem.getAttribute && elem.getAttribute( "class" ) || ""; +} + +function classesToArray( value ) { + if ( Array.isArray( value ) ) { + return value; + } + if ( typeof value === "string" ) { + return value.match( rnothtmlwhite ) || []; + } + return []; +} + +jQuery.fn.extend( { + addClass: function( value ) { + var classes, elem, cur, curValue, clazz, j, finalValue, + i = 0; + + if ( isFunction( value ) ) { + return this.each( function( j ) { + jQuery( this ).addClass( value.call( this, j, getClass( this ) ) ); + } ); + } + + classes = classesToArray( value ); + + if ( classes.length ) { + while ( ( elem = this[ i++ ] ) ) { + curValue = getClass( elem ); + cur = elem.nodeType === 1 && ( " " + stripAndCollapse( curValue ) + " " ); + + if ( cur ) { + j = 0; + while ( ( clazz = classes[ j++ ] ) ) { + if ( cur.indexOf( " " + clazz + " " ) < 0 ) { + cur += clazz + " "; + } + } + + // Only assign if different to avoid unneeded rendering. + finalValue = stripAndCollapse( cur ); + if ( curValue !== finalValue ) { + elem.setAttribute( "class", finalValue ); + } + } + } + } + + return this; + }, + + removeClass: function( value ) { + var classes, elem, cur, curValue, clazz, j, finalValue, + i = 0; + + if ( isFunction( value ) ) { + return this.each( function( j ) { + jQuery( this ).removeClass( value.call( this, j, getClass( this ) ) ); + } ); + } + + if ( !arguments.length ) { + return this.attr( "class", "" ); + } + + classes = classesToArray( value ); + + if ( classes.length ) { + while ( ( elem = this[ i++ ] ) ) { + curValue = getClass( elem ); + + // This expression is here for better compressibility (see addClass) + cur = elem.nodeType === 1 && ( " " + stripAndCollapse( curValue ) + " " ); + + if ( cur ) { + j = 0; + while ( ( clazz = classes[ j++ ] ) ) { + + // Remove *all* instances + while ( cur.indexOf( " " + clazz + " " ) > -1 ) { + cur = cur.replace( " " + clazz + " ", " " ); + } + } + + // Only assign if different to avoid unneeded rendering. + finalValue = stripAndCollapse( cur ); + if ( curValue !== finalValue ) { + elem.setAttribute( "class", finalValue ); + } + } + } + } + + return this; + }, + + toggleClass: function( value, stateVal ) { + var type = typeof value, + isValidValue = type === "string" || Array.isArray( value ); + + if ( typeof stateVal === "boolean" && isValidValue ) { + return stateVal ? this.addClass( value ) : this.removeClass( value ); + } + + if ( isFunction( value ) ) { + return this.each( function( i ) { + jQuery( this ).toggleClass( + value.call( this, i, getClass( this ), stateVal ), + stateVal + ); + } ); + } + + return this.each( function() { + var className, i, self, classNames; + + if ( isValidValue ) { + + // Toggle individual class names + i = 0; + self = jQuery( this ); + classNames = classesToArray( value ); + + while ( ( className = classNames[ i++ ] ) ) { + + // Check each className given, space separated list + if ( self.hasClass( className ) ) { + self.removeClass( className ); + } else { + self.addClass( className ); + } + } + + // Toggle whole class name + } else if ( value === undefined || type === "boolean" ) { + className = getClass( this ); + if ( className ) { + + // Store className if set + dataPriv.set( this, "__className__", className ); + } + + // If the element has a class name or if we're passed `false`, + // then remove the whole classname (if there was one, the above saved it). + // Otherwise bring back whatever was previously saved (if anything), + // falling back to the empty string if nothing was stored. + if ( this.setAttribute ) { + this.setAttribute( "class", + className || value === false ? + "" : + dataPriv.get( this, "__className__" ) || "" + ); + } + } + } ); + }, + + hasClass: function( selector ) { + var className, elem, + i = 0; + + className = " " + selector + " "; + while ( ( elem = this[ i++ ] ) ) { + if ( elem.nodeType === 1 && + ( " " + stripAndCollapse( getClass( elem ) ) + " " ).indexOf( className ) > -1 ) { + return true; + } + } + + return false; + } +} ); + + + + +var rreturn = /\r/g; + +jQuery.fn.extend( { + val: function( value ) { + var hooks, ret, valueIsFunction, + elem = this[ 0 ]; + + if ( !arguments.length ) { + if ( elem ) { + hooks = jQuery.valHooks[ elem.type ] || + jQuery.valHooks[ elem.nodeName.toLowerCase() ]; + + if ( hooks && + "get" in hooks && + ( ret = hooks.get( elem, "value" ) ) !== undefined + ) { + return ret; + } + + ret = elem.value; + + // Handle most common string cases + if ( typeof ret === "string" ) { + return ret.replace( rreturn, "" ); + } + + // Handle cases where value is null/undef or number + return ret == null ? "" : ret; + } + + return; + } + + valueIsFunction = isFunction( value ); + + return this.each( function( i ) { + var val; + + if ( this.nodeType !== 1 ) { + return; + } + + if ( valueIsFunction ) { + val = value.call( this, i, jQuery( this ).val() ); + } else { + val = value; + } + + // Treat null/undefined as ""; convert numbers to string + if ( val == null ) { + val = ""; + + } else if ( typeof val === "number" ) { + val += ""; + + } else if ( Array.isArray( val ) ) { + val = jQuery.map( val, function( value ) { + return value == null ? "" : value + ""; + } ); + } + + hooks = jQuery.valHooks[ this.type ] || jQuery.valHooks[ this.nodeName.toLowerCase() ]; + + // If set returns undefined, fall back to normal setting + if ( !hooks || !( "set" in hooks ) || hooks.set( this, val, "value" ) === undefined ) { + this.value = val; + } + } ); + } +} ); + +jQuery.extend( { + valHooks: { + option: { + get: function( elem ) { + + var val = jQuery.find.attr( elem, "value" ); + return val != null ? + val : + + // Support: IE <=10 - 11 only + // option.text throws exceptions (#14686, #14858) + // Strip and collapse whitespace + // https://html.spec.whatwg.org/#strip-and-collapse-whitespace + stripAndCollapse( jQuery.text( elem ) ); + } + }, + select: { + get: function( elem ) { + var value, option, i, + options = elem.options, + index = elem.selectedIndex, + one = elem.type === "select-one", + values = one ? null : [], + max = one ? index + 1 : options.length; + + if ( index < 0 ) { + i = max; + + } else { + i = one ? index : 0; + } + + // Loop through all the selected options + for ( ; i < max; i++ ) { + option = options[ i ]; + + // Support: IE <=9 only + // IE8-9 doesn't update selected after form reset (#2551) + if ( ( option.selected || i === index ) && + + // Don't return options that are disabled or in a disabled optgroup + !option.disabled && + ( !option.parentNode.disabled || + !nodeName( option.parentNode, "optgroup" ) ) ) { + + // Get the specific value for the option + value = jQuery( option ).val(); + + // We don't need an array for one selects + if ( one ) { + return value; + } + + // Multi-Selects return an array + values.push( value ); + } + } + + return values; + }, + + set: function( elem, value ) { + var optionSet, option, + options = elem.options, + values = jQuery.makeArray( value ), + i = options.length; + + while ( i-- ) { + option = options[ i ]; + + /* eslint-disable no-cond-assign */ + + if ( option.selected = + jQuery.inArray( jQuery.valHooks.option.get( option ), values ) > -1 + ) { + optionSet = true; + } + + /* eslint-enable no-cond-assign */ + } + + // Force browsers to behave consistently when non-matching value is set + if ( !optionSet ) { + elem.selectedIndex = -1; + } + return values; + } + } + } +} ); + +// Radios and checkboxes getter/setter +jQuery.each( [ "radio", "checkbox" ], function() { + jQuery.valHooks[ this ] = { + set: function( elem, value ) { + if ( Array.isArray( value ) ) { + return ( elem.checked = jQuery.inArray( jQuery( elem ).val(), value ) > -1 ); + } + } + }; + if ( !support.checkOn ) { + jQuery.valHooks[ this ].get = function( elem ) { + return elem.getAttribute( "value" ) === null ? "on" : elem.value; + }; + } +} ); + + + + +// Return jQuery for attributes-only inclusion + + +support.focusin = "onfocusin" in window; + + +var rfocusMorph = /^(?:focusinfocus|focusoutblur)$/, + stopPropagationCallback = function( e ) { + e.stopPropagation(); + }; + +jQuery.extend( jQuery.event, { + + trigger: function( event, data, elem, onlyHandlers ) { + + var i, cur, tmp, bubbleType, ontype, handle, special, lastElement, + eventPath = [ elem || document ], + type = hasOwn.call( event, "type" ) ? event.type : event, + namespaces = hasOwn.call( event, "namespace" ) ? event.namespace.split( "." ) : []; + + cur = lastElement = tmp = elem = elem || document; + + // Don't do events on text and comment nodes + if ( elem.nodeType === 3 || elem.nodeType === 8 ) { + return; + } + + // focus/blur morphs to focusin/out; ensure we're not firing them right now + if ( rfocusMorph.test( type + jQuery.event.triggered ) ) { + return; + } + + if ( type.indexOf( "." ) > -1 ) { + + // Namespaced trigger; create a regexp to match event type in handle() + namespaces = type.split( "." ); + type = namespaces.shift(); + namespaces.sort(); + } + ontype = type.indexOf( ":" ) < 0 && "on" + type; + + // Caller can pass in a jQuery.Event object, Object, or just an event type string + event = event[ jQuery.expando ] ? + event : + new jQuery.Event( type, typeof event === "object" && event ); + + // Trigger bitmask: & 1 for native handlers; & 2 for jQuery (always true) + event.isTrigger = onlyHandlers ? 2 : 3; + event.namespace = namespaces.join( "." ); + event.rnamespace = event.namespace ? + new RegExp( "(^|\\.)" + namespaces.join( "\\.(?:.*\\.|)" ) + "(\\.|$)" ) : + null; + + // Clean up the event in case it is being reused + event.result = undefined; + if ( !event.target ) { + event.target = elem; + } + + // Clone any incoming data and prepend the event, creating the handler arg list + data = data == null ? + [ event ] : + jQuery.makeArray( data, [ event ] ); + + // Allow special events to draw outside the lines + special = jQuery.event.special[ type ] || {}; + if ( !onlyHandlers && special.trigger && special.trigger.apply( elem, data ) === false ) { + return; + } + + // Determine event propagation path in advance, per W3C events spec (#9951) + // Bubble up to document, then to window; watch for a global ownerDocument var (#9724) + if ( !onlyHandlers && !special.noBubble && !isWindow( elem ) ) { + + bubbleType = special.delegateType || type; + if ( !rfocusMorph.test( bubbleType + type ) ) { + cur = cur.parentNode; + } + for ( ; cur; cur = cur.parentNode ) { + eventPath.push( cur ); + tmp = cur; + } + + // Only add window if we got to document (e.g., not plain obj or detached DOM) + if ( tmp === ( elem.ownerDocument || document ) ) { + eventPath.push( tmp.defaultView || tmp.parentWindow || window ); + } + } + + // Fire handlers on the event path + i = 0; + while ( ( cur = eventPath[ i++ ] ) && !event.isPropagationStopped() ) { + lastElement = cur; + event.type = i > 1 ? + bubbleType : + special.bindType || type; + + // jQuery handler + handle = ( + dataPriv.get( cur, "events" ) || Object.create( null ) + )[ event.type ] && + dataPriv.get( cur, "handle" ); + if ( handle ) { + handle.apply( cur, data ); + } + + // Native handler + handle = ontype && cur[ ontype ]; + if ( handle && handle.apply && acceptData( cur ) ) { + event.result = handle.apply( cur, data ); + if ( event.result === false ) { + event.preventDefault(); + } + } + } + event.type = type; + + // If nobody prevented the default action, do it now + if ( !onlyHandlers && !event.isDefaultPrevented() ) { + + if ( ( !special._default || + special._default.apply( eventPath.pop(), data ) === false ) && + acceptData( elem ) ) { + + // Call a native DOM method on the target with the same name as the event. + // Don't do default actions on window, that's where global variables be (#6170) + if ( ontype && isFunction( elem[ type ] ) && !isWindow( elem ) ) { + + // Don't re-trigger an onFOO event when we call its FOO() method + tmp = elem[ ontype ]; + + if ( tmp ) { + elem[ ontype ] = null; + } + + // Prevent re-triggering of the same event, since we already bubbled it above + jQuery.event.triggered = type; + + if ( event.isPropagationStopped() ) { + lastElement.addEventListener( type, stopPropagationCallback ); + } + + elem[ type ](); + + if ( event.isPropagationStopped() ) { + lastElement.removeEventListener( type, stopPropagationCallback ); + } + + jQuery.event.triggered = undefined; + + if ( tmp ) { + elem[ ontype ] = tmp; + } + } + } + } + + return event.result; + }, + + // Piggyback on a donor event to simulate a different one + // Used only for `focus(in | out)` events + simulate: function( type, elem, event ) { + var e = jQuery.extend( + new jQuery.Event(), + event, + { + type: type, + isSimulated: true + } + ); + + jQuery.event.trigger( e, null, elem ); + } + +} ); + +jQuery.fn.extend( { + + trigger: function( type, data ) { + return this.each( function() { + jQuery.event.trigger( type, data, this ); + } ); + }, + triggerHandler: function( type, data ) { + var elem = this[ 0 ]; + if ( elem ) { + return jQuery.event.trigger( type, data, elem, true ); + } + } +} ); + + +// Support: Firefox <=44 +// Firefox doesn't have focus(in | out) events +// Related ticket - https://bugzilla.mozilla.org/show_bug.cgi?id=687787 +// +// Support: Chrome <=48 - 49, Safari <=9.0 - 9.1 +// focus(in | out) events fire after focus & blur events, +// which is spec violation - http://www.w3.org/TR/DOM-Level-3-Events/#events-focusevent-event-order +// Related ticket - https://bugs.chromium.org/p/chromium/issues/detail?id=449857 +if ( !support.focusin ) { + jQuery.each( { focus: "focusin", blur: "focusout" }, function( orig, fix ) { + + // Attach a single capturing handler on the document while someone wants focusin/focusout + var handler = function( event ) { + jQuery.event.simulate( fix, event.target, jQuery.event.fix( event ) ); + }; + + jQuery.event.special[ fix ] = { + setup: function() { + + // Handle: regular nodes (via `this.ownerDocument`), window + // (via `this.document`) & document (via `this`). + var doc = this.ownerDocument || this.document || this, + attaches = dataPriv.access( doc, fix ); + + if ( !attaches ) { + doc.addEventListener( orig, handler, true ); + } + dataPriv.access( doc, fix, ( attaches || 0 ) + 1 ); + }, + teardown: function() { + var doc = this.ownerDocument || this.document || this, + attaches = dataPriv.access( doc, fix ) - 1; + + if ( !attaches ) { + doc.removeEventListener( orig, handler, true ); + dataPriv.remove( doc, fix ); + + } else { + dataPriv.access( doc, fix, attaches ); + } + } + }; + } ); +} +var location = window.location; + +var nonce = { guid: Date.now() }; + +var rquery = ( /\?/ ); + + + +// Cross-browser xml parsing +jQuery.parseXML = function( data ) { + var xml; + if ( !data || typeof data !== "string" ) { + return null; + } + + // Support: IE 9 - 11 only + // IE throws on parseFromString with invalid input. + try { + xml = ( new window.DOMParser() ).parseFromString( data, "text/xml" ); + } catch ( e ) { + xml = undefined; + } + + if ( !xml || xml.getElementsByTagName( "parsererror" ).length ) { + jQuery.error( "Invalid XML: " + data ); + } + return xml; +}; + + +var + rbracket = /\[\]$/, + rCRLF = /\r?\n/g, + rsubmitterTypes = /^(?:submit|button|image|reset|file)$/i, + rsubmittable = /^(?:input|select|textarea|keygen)/i; + +function buildParams( prefix, obj, traditional, add ) { + var name; + + if ( Array.isArray( obj ) ) { + + // Serialize array item. + jQuery.each( obj, function( i, v ) { + if ( traditional || rbracket.test( prefix ) ) { + + // Treat each array item as a scalar. + add( prefix, v ); + + } else { + + // Item is non-scalar (array or object), encode its numeric index. + buildParams( + prefix + "[" + ( typeof v === "object" && v != null ? i : "" ) + "]", + v, + traditional, + add + ); + } + } ); + + } else if ( !traditional && toType( obj ) === "object" ) { + + // Serialize object item. + for ( name in obj ) { + buildParams( prefix + "[" + name + "]", obj[ name ], traditional, add ); + } + + } else { + + // Serialize scalar item. + add( prefix, obj ); + } +} + +// Serialize an array of form elements or a set of +// key/values into a query string +jQuery.param = function( a, traditional ) { + var prefix, + s = [], + add = function( key, valueOrFunction ) { + + // If value is a function, invoke it and use its return value + var value = isFunction( valueOrFunction ) ? + valueOrFunction() : + valueOrFunction; + + s[ s.length ] = encodeURIComponent( key ) + "=" + + encodeURIComponent( value == null ? "" : value ); + }; + + if ( a == null ) { + return ""; + } + + // If an array was passed in, assume that it is an array of form elements. + if ( Array.isArray( a ) || ( a.jquery && !jQuery.isPlainObject( a ) ) ) { + + // Serialize the form elements + jQuery.each( a, function() { + add( this.name, this.value ); + } ); + + } else { + + // If traditional, encode the "old" way (the way 1.3.2 or older + // did it), otherwise encode params recursively. + for ( prefix in a ) { + buildParams( prefix, a[ prefix ], traditional, add ); + } + } + + // Return the resulting serialization + return s.join( "&" ); +}; + +jQuery.fn.extend( { + serialize: function() { + return jQuery.param( this.serializeArray() ); + }, + serializeArray: function() { + return this.map( function() { + + // Can add propHook for "elements" to filter or add form elements + var elements = jQuery.prop( this, "elements" ); + return elements ? jQuery.makeArray( elements ) : this; + } ) + .filter( function() { + var type = this.type; + + // Use .is( ":disabled" ) so that fieldset[disabled] works + return this.name && !jQuery( this ).is( ":disabled" ) && + rsubmittable.test( this.nodeName ) && !rsubmitterTypes.test( type ) && + ( this.checked || !rcheckableType.test( type ) ); + } ) + .map( function( _i, elem ) { + var val = jQuery( this ).val(); + + if ( val == null ) { + return null; + } + + if ( Array.isArray( val ) ) { + return jQuery.map( val, function( val ) { + return { name: elem.name, value: val.replace( rCRLF, "\r\n" ) }; + } ); + } + + return { name: elem.name, value: val.replace( rCRLF, "\r\n" ) }; + } ).get(); + } +} ); + + +var + r20 = /%20/g, + rhash = /#.*$/, + rantiCache = /([?&])_=[^&]*/, + rheaders = /^(.*?):[ \t]*([^\r\n]*)$/mg, + + // #7653, #8125, #8152: local protocol detection + rlocalProtocol = /^(?:about|app|app-storage|.+-extension|file|res|widget):$/, + rnoContent = /^(?:GET|HEAD)$/, + rprotocol = /^\/\//, + + /* Prefilters + * 1) They are useful to introduce custom dataTypes (see ajax/jsonp.js for an example) + * 2) These are called: + * - BEFORE asking for a transport + * - AFTER param serialization (s.data is a string if s.processData is true) + * 3) key is the dataType + * 4) the catchall symbol "*" can be used + * 5) execution will start with transport dataType and THEN continue down to "*" if needed + */ + prefilters = {}, + + /* Transports bindings + * 1) key is the dataType + * 2) the catchall symbol "*" can be used + * 3) selection will start with transport dataType and THEN go to "*" if needed + */ + transports = {}, + + // Avoid comment-prolog char sequence (#10098); must appease lint and evade compression + allTypes = "*/".concat( "*" ), + + // Anchor tag for parsing the document origin + originAnchor = document.createElement( "a" ); + originAnchor.href = location.href; + +// Base "constructor" for jQuery.ajaxPrefilter and jQuery.ajaxTransport +function addToPrefiltersOrTransports( structure ) { + + // dataTypeExpression is optional and defaults to "*" + return function( dataTypeExpression, func ) { + + if ( typeof dataTypeExpression !== "string" ) { + func = dataTypeExpression; + dataTypeExpression = "*"; + } + + var dataType, + i = 0, + dataTypes = dataTypeExpression.toLowerCase().match( rnothtmlwhite ) || []; + + if ( isFunction( func ) ) { + + // For each dataType in the dataTypeExpression + while ( ( dataType = dataTypes[ i++ ] ) ) { + + // Prepend if requested + if ( dataType[ 0 ] === "+" ) { + dataType = dataType.slice( 1 ) || "*"; + ( structure[ dataType ] = structure[ dataType ] || [] ).unshift( func ); + + // Otherwise append + } else { + ( structure[ dataType ] = structure[ dataType ] || [] ).push( func ); + } + } + } + }; +} + +// Base inspection function for prefilters and transports +function inspectPrefiltersOrTransports( structure, options, originalOptions, jqXHR ) { + + var inspected = {}, + seekingTransport = ( structure === transports ); + + function inspect( dataType ) { + var selected; + inspected[ dataType ] = true; + jQuery.each( structure[ dataType ] || [], function( _, prefilterOrFactory ) { + var dataTypeOrTransport = prefilterOrFactory( options, originalOptions, jqXHR ); + if ( typeof dataTypeOrTransport === "string" && + !seekingTransport && !inspected[ dataTypeOrTransport ] ) { + + options.dataTypes.unshift( dataTypeOrTransport ); + inspect( dataTypeOrTransport ); + return false; + } else if ( seekingTransport ) { + return !( selected = dataTypeOrTransport ); + } + } ); + return selected; + } + + return inspect( options.dataTypes[ 0 ] ) || !inspected[ "*" ] && inspect( "*" ); +} + +// A special extend for ajax options +// that takes "flat" options (not to be deep extended) +// Fixes #9887 +function ajaxExtend( target, src ) { + var key, deep, + flatOptions = jQuery.ajaxSettings.flatOptions || {}; + + for ( key in src ) { + if ( src[ key ] !== undefined ) { + ( flatOptions[ key ] ? target : ( deep || ( deep = {} ) ) )[ key ] = src[ key ]; + } + } + if ( deep ) { + jQuery.extend( true, target, deep ); + } + + return target; +} + +/* Handles responses to an ajax request: + * - finds the right dataType (mediates between content-type and expected dataType) + * - returns the corresponding response + */ +function ajaxHandleResponses( s, jqXHR, responses ) { + + var ct, type, finalDataType, firstDataType, + contents = s.contents, + dataTypes = s.dataTypes; + + // Remove auto dataType and get content-type in the process + while ( dataTypes[ 0 ] === "*" ) { + dataTypes.shift(); + if ( ct === undefined ) { + ct = s.mimeType || jqXHR.getResponseHeader( "Content-Type" ); + } + } + + // Check if we're dealing with a known content-type + if ( ct ) { + for ( type in contents ) { + if ( contents[ type ] && contents[ type ].test( ct ) ) { + dataTypes.unshift( type ); + break; + } + } + } + + // Check to see if we have a response for the expected dataType + if ( dataTypes[ 0 ] in responses ) { + finalDataType = dataTypes[ 0 ]; + } else { + + // Try convertible dataTypes + for ( type in responses ) { + if ( !dataTypes[ 0 ] || s.converters[ type + " " + dataTypes[ 0 ] ] ) { + finalDataType = type; + break; + } + if ( !firstDataType ) { + firstDataType = type; + } + } + + // Or just use first one + finalDataType = finalDataType || firstDataType; + } + + // If we found a dataType + // We add the dataType to the list if needed + // and return the corresponding response + if ( finalDataType ) { + if ( finalDataType !== dataTypes[ 0 ] ) { + dataTypes.unshift( finalDataType ); + } + return responses[ finalDataType ]; + } +} + +/* Chain conversions given the request and the original response + * Also sets the responseXXX fields on the jqXHR instance + */ +function ajaxConvert( s, response, jqXHR, isSuccess ) { + var conv2, current, conv, tmp, prev, + converters = {}, + + // Work with a copy of dataTypes in case we need to modify it for conversion + dataTypes = s.dataTypes.slice(); + + // Create converters map with lowercased keys + if ( dataTypes[ 1 ] ) { + for ( conv in s.converters ) { + converters[ conv.toLowerCase() ] = s.converters[ conv ]; + } + } + + current = dataTypes.shift(); + + // Convert to each sequential dataType + while ( current ) { + + if ( s.responseFields[ current ] ) { + jqXHR[ s.responseFields[ current ] ] = response; + } + + // Apply the dataFilter if provided + if ( !prev && isSuccess && s.dataFilter ) { + response = s.dataFilter( response, s.dataType ); + } + + prev = current; + current = dataTypes.shift(); + + if ( current ) { + + // There's only work to do if current dataType is non-auto + if ( current === "*" ) { + + current = prev; + + // Convert response if prev dataType is non-auto and differs from current + } else if ( prev !== "*" && prev !== current ) { + + // Seek a direct converter + conv = converters[ prev + " " + current ] || converters[ "* " + current ]; + + // If none found, seek a pair + if ( !conv ) { + for ( conv2 in converters ) { + + // If conv2 outputs current + tmp = conv2.split( " " ); + if ( tmp[ 1 ] === current ) { + + // If prev can be converted to accepted input + conv = converters[ prev + " " + tmp[ 0 ] ] || + converters[ "* " + tmp[ 0 ] ]; + if ( conv ) { + + // Condense equivalence converters + if ( conv === true ) { + conv = converters[ conv2 ]; + + // Otherwise, insert the intermediate dataType + } else if ( converters[ conv2 ] !== true ) { + current = tmp[ 0 ]; + dataTypes.unshift( tmp[ 1 ] ); + } + break; + } + } + } + } + + // Apply converter (if not an equivalence) + if ( conv !== true ) { + + // Unless errors are allowed to bubble, catch and return them + if ( conv && s.throws ) { + response = conv( response ); + } else { + try { + response = conv( response ); + } catch ( e ) { + return { + state: "parsererror", + error: conv ? e : "No conversion from " + prev + " to " + current + }; + } + } + } + } + } + } + + return { state: "success", data: response }; +} + +jQuery.extend( { + + // Counter for holding the number of active queries + active: 0, + + // Last-Modified header cache for next request + lastModified: {}, + etag: {}, + + ajaxSettings: { + url: location.href, + type: "GET", + isLocal: rlocalProtocol.test( location.protocol ), + global: true, + processData: true, + async: true, + contentType: "application/x-www-form-urlencoded; charset=UTF-8", + + /* + timeout: 0, + data: null, + dataType: null, + username: null, + password: null, + cache: null, + throws: false, + traditional: false, + headers: {}, + */ + + accepts: { + "*": allTypes, + text: "text/plain", + html: "text/html", + xml: "application/xml, text/xml", + json: "application/json, text/javascript" + }, + + contents: { + xml: /\bxml\b/, + html: /\bhtml/, + json: /\bjson\b/ + }, + + responseFields: { + xml: "responseXML", + text: "responseText", + json: "responseJSON" + }, + + // Data converters + // Keys separate source (or catchall "*") and destination types with a single space + converters: { + + // Convert anything to text + "* text": String, + + // Text to html (true = no transformation) + "text html": true, + + // Evaluate text as a json expression + "text json": JSON.parse, + + // Parse text as xml + "text xml": jQuery.parseXML + }, + + // For options that shouldn't be deep extended: + // you can add your own custom options here if + // and when you create one that shouldn't be + // deep extended (see ajaxExtend) + flatOptions: { + url: true, + context: true + } + }, + + // Creates a full fledged settings object into target + // with both ajaxSettings and settings fields. + // If target is omitted, writes into ajaxSettings. + ajaxSetup: function( target, settings ) { + return settings ? + + // Building a settings object + ajaxExtend( ajaxExtend( target, jQuery.ajaxSettings ), settings ) : + + // Extending ajaxSettings + ajaxExtend( jQuery.ajaxSettings, target ); + }, + + ajaxPrefilter: addToPrefiltersOrTransports( prefilters ), + ajaxTransport: addToPrefiltersOrTransports( transports ), + + // Main method + ajax: function( url, options ) { + + // If url is an object, simulate pre-1.5 signature + if ( typeof url === "object" ) { + options = url; + url = undefined; + } + + // Force options to be an object + options = options || {}; + + var transport, + + // URL without anti-cache param + cacheURL, + + // Response headers + responseHeadersString, + responseHeaders, + + // timeout handle + timeoutTimer, + + // Url cleanup var + urlAnchor, + + // Request state (becomes false upon send and true upon completion) + completed, + + // To know if global events are to be dispatched + fireGlobals, + + // Loop variable + i, + + // uncached part of the url + uncached, + + // Create the final options object + s = jQuery.ajaxSetup( {}, options ), + + // Callbacks context + callbackContext = s.context || s, + + // Context for global events is callbackContext if it is a DOM node or jQuery collection + globalEventContext = s.context && + ( callbackContext.nodeType || callbackContext.jquery ) ? + jQuery( callbackContext ) : + jQuery.event, + + // Deferreds + deferred = jQuery.Deferred(), + completeDeferred = jQuery.Callbacks( "once memory" ), + + // Status-dependent callbacks + statusCode = s.statusCode || {}, + + // Headers (they are sent all at once) + requestHeaders = {}, + requestHeadersNames = {}, + + // Default abort message + strAbort = "canceled", + + // Fake xhr + jqXHR = { + readyState: 0, + + // Builds headers hashtable if needed + getResponseHeader: function( key ) { + var match; + if ( completed ) { + if ( !responseHeaders ) { + responseHeaders = {}; + while ( ( match = rheaders.exec( responseHeadersString ) ) ) { + responseHeaders[ match[ 1 ].toLowerCase() + " " ] = + ( responseHeaders[ match[ 1 ].toLowerCase() + " " ] || [] ) + .concat( match[ 2 ] ); + } + } + match = responseHeaders[ key.toLowerCase() + " " ]; + } + return match == null ? null : match.join( ", " ); + }, + + // Raw string + getAllResponseHeaders: function() { + return completed ? responseHeadersString : null; + }, + + // Caches the header + setRequestHeader: function( name, value ) { + if ( completed == null ) { + name = requestHeadersNames[ name.toLowerCase() ] = + requestHeadersNames[ name.toLowerCase() ] || name; + requestHeaders[ name ] = value; + } + return this; + }, + + // Overrides response content-type header + overrideMimeType: function( type ) { + if ( completed == null ) { + s.mimeType = type; + } + return this; + }, + + // Status-dependent callbacks + statusCode: function( map ) { + var code; + if ( map ) { + if ( completed ) { + + // Execute the appropriate callbacks + jqXHR.always( map[ jqXHR.status ] ); + } else { + + // Lazy-add the new callbacks in a way that preserves old ones + for ( code in map ) { + statusCode[ code ] = [ statusCode[ code ], map[ code ] ]; + } + } + } + return this; + }, + + // Cancel the request + abort: function( statusText ) { + var finalText = statusText || strAbort; + if ( transport ) { + transport.abort( finalText ); + } + done( 0, finalText ); + return this; + } + }; + + // Attach deferreds + deferred.promise( jqXHR ); + + // Add protocol if not provided (prefilters might expect it) + // Handle falsy url in the settings object (#10093: consistency with old signature) + // We also use the url parameter if available + s.url = ( ( url || s.url || location.href ) + "" ) + .replace( rprotocol, location.protocol + "//" ); + + // Alias method option to type as per ticket #12004 + s.type = options.method || options.type || s.method || s.type; + + // Extract dataTypes list + s.dataTypes = ( s.dataType || "*" ).toLowerCase().match( rnothtmlwhite ) || [ "" ]; + + // A cross-domain request is in order when the origin doesn't match the current origin. + if ( s.crossDomain == null ) { + urlAnchor = document.createElement( "a" ); + + // Support: IE <=8 - 11, Edge 12 - 15 + // IE throws exception on accessing the href property if url is malformed, + // e.g. http://example.com:80x/ + try { + urlAnchor.href = s.url; + + // Support: IE <=8 - 11 only + // Anchor's host property isn't correctly set when s.url is relative + urlAnchor.href = urlAnchor.href; + s.crossDomain = originAnchor.protocol + "//" + originAnchor.host !== + urlAnchor.protocol + "//" + urlAnchor.host; + } catch ( e ) { + + // If there is an error parsing the URL, assume it is crossDomain, + // it can be rejected by the transport if it is invalid + s.crossDomain = true; + } + } + + // Convert data if not already a string + if ( s.data && s.processData && typeof s.data !== "string" ) { + s.data = jQuery.param( s.data, s.traditional ); + } + + // Apply prefilters + inspectPrefiltersOrTransports( prefilters, s, options, jqXHR ); + + // If request was aborted inside a prefilter, stop there + if ( completed ) { + return jqXHR; + } + + // We can fire global events as of now if asked to + // Don't fire events if jQuery.event is undefined in an AMD-usage scenario (#15118) + fireGlobals = jQuery.event && s.global; + + // Watch for a new set of requests + if ( fireGlobals && jQuery.active++ === 0 ) { + jQuery.event.trigger( "ajaxStart" ); + } + + // Uppercase the type + s.type = s.type.toUpperCase(); + + // Determine if request has content + s.hasContent = !rnoContent.test( s.type ); + + // Save the URL in case we're toying with the If-Modified-Since + // and/or If-None-Match header later on + // Remove hash to simplify url manipulation + cacheURL = s.url.replace( rhash, "" ); + + // More options handling for requests with no content + if ( !s.hasContent ) { + + // Remember the hash so we can put it back + uncached = s.url.slice( cacheURL.length ); + + // If data is available and should be processed, append data to url + if ( s.data && ( s.processData || typeof s.data === "string" ) ) { + cacheURL += ( rquery.test( cacheURL ) ? "&" : "?" ) + s.data; + + // #9682: remove data so that it's not used in an eventual retry + delete s.data; + } + + // Add or update anti-cache param if needed + if ( s.cache === false ) { + cacheURL = cacheURL.replace( rantiCache, "$1" ); + uncached = ( rquery.test( cacheURL ) ? "&" : "?" ) + "_=" + ( nonce.guid++ ) + + uncached; + } + + // Put hash and anti-cache on the URL that will be requested (gh-1732) + s.url = cacheURL + uncached; + + // Change '%20' to '+' if this is encoded form body content (gh-2658) + } else if ( s.data && s.processData && + ( s.contentType || "" ).indexOf( "application/x-www-form-urlencoded" ) === 0 ) { + s.data = s.data.replace( r20, "+" ); + } + + // Set the If-Modified-Since and/or If-None-Match header, if in ifModified mode. + if ( s.ifModified ) { + if ( jQuery.lastModified[ cacheURL ] ) { + jqXHR.setRequestHeader( "If-Modified-Since", jQuery.lastModified[ cacheURL ] ); + } + if ( jQuery.etag[ cacheURL ] ) { + jqXHR.setRequestHeader( "If-None-Match", jQuery.etag[ cacheURL ] ); + } + } + + // Set the correct header, if data is being sent + if ( s.data && s.hasContent && s.contentType !== false || options.contentType ) { + jqXHR.setRequestHeader( "Content-Type", s.contentType ); + } + + // Set the Accepts header for the server, depending on the dataType + jqXHR.setRequestHeader( + "Accept", + s.dataTypes[ 0 ] && s.accepts[ s.dataTypes[ 0 ] ] ? + s.accepts[ s.dataTypes[ 0 ] ] + + ( s.dataTypes[ 0 ] !== "*" ? ", " + allTypes + "; q=0.01" : "" ) : + s.accepts[ "*" ] + ); + + // Check for headers option + for ( i in s.headers ) { + jqXHR.setRequestHeader( i, s.headers[ i ] ); + } + + // Allow custom headers/mimetypes and early abort + if ( s.beforeSend && + ( s.beforeSend.call( callbackContext, jqXHR, s ) === false || completed ) ) { + + // Abort if not done already and return + return jqXHR.abort(); + } + + // Aborting is no longer a cancellation + strAbort = "abort"; + + // Install callbacks on deferreds + completeDeferred.add( s.complete ); + jqXHR.done( s.success ); + jqXHR.fail( s.error ); + + // Get transport + transport = inspectPrefiltersOrTransports( transports, s, options, jqXHR ); + + // If no transport, we auto-abort + if ( !transport ) { + done( -1, "No Transport" ); + } else { + jqXHR.readyState = 1; + + // Send global event + if ( fireGlobals ) { + globalEventContext.trigger( "ajaxSend", [ jqXHR, s ] ); + } + + // If request was aborted inside ajaxSend, stop there + if ( completed ) { + return jqXHR; + } + + // Timeout + if ( s.async && s.timeout > 0 ) { + timeoutTimer = window.setTimeout( function() { + jqXHR.abort( "timeout" ); + }, s.timeout ); + } + + try { + completed = false; + transport.send( requestHeaders, done ); + } catch ( e ) { + + // Rethrow post-completion exceptions + if ( completed ) { + throw e; + } + + // Propagate others as results + done( -1, e ); + } + } + + // Callback for when everything is done + function done( status, nativeStatusText, responses, headers ) { + var isSuccess, success, error, response, modified, + statusText = nativeStatusText; + + // Ignore repeat invocations + if ( completed ) { + return; + } + + completed = true; + + // Clear timeout if it exists + if ( timeoutTimer ) { + window.clearTimeout( timeoutTimer ); + } + + // Dereference transport for early garbage collection + // (no matter how long the jqXHR object will be used) + transport = undefined; + + // Cache response headers + responseHeadersString = headers || ""; + + // Set readyState + jqXHR.readyState = status > 0 ? 4 : 0; + + // Determine if successful + isSuccess = status >= 200 && status < 300 || status === 304; + + // Get response data + if ( responses ) { + response = ajaxHandleResponses( s, jqXHR, responses ); + } + + // Use a noop converter for missing script + if ( !isSuccess && jQuery.inArray( "script", s.dataTypes ) > -1 ) { + s.converters[ "text script" ] = function() {}; + } + + // Convert no matter what (that way responseXXX fields are always set) + response = ajaxConvert( s, response, jqXHR, isSuccess ); + + // If successful, handle type chaining + if ( isSuccess ) { + + // Set the If-Modified-Since and/or If-None-Match header, if in ifModified mode. + if ( s.ifModified ) { + modified = jqXHR.getResponseHeader( "Last-Modified" ); + if ( modified ) { + jQuery.lastModified[ cacheURL ] = modified; + } + modified = jqXHR.getResponseHeader( "etag" ); + if ( modified ) { + jQuery.etag[ cacheURL ] = modified; + } + } + + // if no content + if ( status === 204 || s.type === "HEAD" ) { + statusText = "nocontent"; + + // if not modified + } else if ( status === 304 ) { + statusText = "notmodified"; + + // If we have data, let's convert it + } else { + statusText = response.state; + success = response.data; + error = response.error; + isSuccess = !error; + } + } else { + + // Extract error from statusText and normalize for non-aborts + error = statusText; + if ( status || !statusText ) { + statusText = "error"; + if ( status < 0 ) { + status = 0; + } + } + } + + // Set data for the fake xhr object + jqXHR.status = status; + jqXHR.statusText = ( nativeStatusText || statusText ) + ""; + + // Success/Error + if ( isSuccess ) { + deferred.resolveWith( callbackContext, [ success, statusText, jqXHR ] ); + } else { + deferred.rejectWith( callbackContext, [ jqXHR, statusText, error ] ); + } + + // Status-dependent callbacks + jqXHR.statusCode( statusCode ); + statusCode = undefined; + + if ( fireGlobals ) { + globalEventContext.trigger( isSuccess ? "ajaxSuccess" : "ajaxError", + [ jqXHR, s, isSuccess ? success : error ] ); + } + + // Complete + completeDeferred.fireWith( callbackContext, [ jqXHR, statusText ] ); + + if ( fireGlobals ) { + globalEventContext.trigger( "ajaxComplete", [ jqXHR, s ] ); + + // Handle the global AJAX counter + if ( !( --jQuery.active ) ) { + jQuery.event.trigger( "ajaxStop" ); + } + } + } + + return jqXHR; + }, + + getJSON: function( url, data, callback ) { + return jQuery.get( url, data, callback, "json" ); + }, + + getScript: function( url, callback ) { + return jQuery.get( url, undefined, callback, "script" ); + } +} ); + +jQuery.each( [ "get", "post" ], function( _i, method ) { + jQuery[ method ] = function( url, data, callback, type ) { + + // Shift arguments if data argument was omitted + if ( isFunction( data ) ) { + type = type || callback; + callback = data; + data = undefined; + } + + // The url can be an options object (which then must have .url) + return jQuery.ajax( jQuery.extend( { + url: url, + type: method, + dataType: type, + data: data, + success: callback + }, jQuery.isPlainObject( url ) && url ) ); + }; +} ); + +jQuery.ajaxPrefilter( function( s ) { + var i; + for ( i in s.headers ) { + if ( i.toLowerCase() === "content-type" ) { + s.contentType = s.headers[ i ] || ""; + } + } +} ); + + +jQuery._evalUrl = function( url, options, doc ) { + return jQuery.ajax( { + url: url, + + // Make this explicit, since user can override this through ajaxSetup (#11264) + type: "GET", + dataType: "script", + cache: true, + async: false, + global: false, + + // Only evaluate the response if it is successful (gh-4126) + // dataFilter is not invoked for failure responses, so using it instead + // of the default converter is kludgy but it works. + converters: { + "text script": function() {} + }, + dataFilter: function( response ) { + jQuery.globalEval( response, options, doc ); + } + } ); +}; + + +jQuery.fn.extend( { + wrapAll: function( html ) { + var wrap; + + if ( this[ 0 ] ) { + if ( isFunction( html ) ) { + html = html.call( this[ 0 ] ); + } + + // The elements to wrap the target around + wrap = jQuery( html, this[ 0 ].ownerDocument ).eq( 0 ).clone( true ); + + if ( this[ 0 ].parentNode ) { + wrap.insertBefore( this[ 0 ] ); + } + + wrap.map( function() { + var elem = this; + + while ( elem.firstElementChild ) { + elem = elem.firstElementChild; + } + + return elem; + } ).append( this ); + } + + return this; + }, + + wrapInner: function( html ) { + if ( isFunction( html ) ) { + return this.each( function( i ) { + jQuery( this ).wrapInner( html.call( this, i ) ); + } ); + } + + return this.each( function() { + var self = jQuery( this ), + contents = self.contents(); + + if ( contents.length ) { + contents.wrapAll( html ); + + } else { + self.append( html ); + } + } ); + }, + + wrap: function( html ) { + var htmlIsFunction = isFunction( html ); + + return this.each( function( i ) { + jQuery( this ).wrapAll( htmlIsFunction ? html.call( this, i ) : html ); + } ); + }, + + unwrap: function( selector ) { + this.parent( selector ).not( "body" ).each( function() { + jQuery( this ).replaceWith( this.childNodes ); + } ); + return this; + } +} ); + + +jQuery.expr.pseudos.hidden = function( elem ) { + return !jQuery.expr.pseudos.visible( elem ); +}; +jQuery.expr.pseudos.visible = function( elem ) { + return !!( elem.offsetWidth || elem.offsetHeight || elem.getClientRects().length ); +}; + + + + +jQuery.ajaxSettings.xhr = function() { + try { + return new window.XMLHttpRequest(); + } catch ( e ) {} +}; + +var xhrSuccessStatus = { + + // File protocol always yields status code 0, assume 200 + 0: 200, + + // Support: IE <=9 only + // #1450: sometimes IE returns 1223 when it should be 204 + 1223: 204 + }, + xhrSupported = jQuery.ajaxSettings.xhr(); + +support.cors = !!xhrSupported && ( "withCredentials" in xhrSupported ); +support.ajax = xhrSupported = !!xhrSupported; + +jQuery.ajaxTransport( function( options ) { + var callback, errorCallback; + + // Cross domain only allowed if supported through XMLHttpRequest + if ( support.cors || xhrSupported && !options.crossDomain ) { + return { + send: function( headers, complete ) { + var i, + xhr = options.xhr(); + + xhr.open( + options.type, + options.url, + options.async, + options.username, + options.password + ); + + // Apply custom fields if provided + if ( options.xhrFields ) { + for ( i in options.xhrFields ) { + xhr[ i ] = options.xhrFields[ i ]; + } + } + + // Override mime type if needed + if ( options.mimeType && xhr.overrideMimeType ) { + xhr.overrideMimeType( options.mimeType ); + } + + // X-Requested-With header + // For cross-domain requests, seeing as conditions for a preflight are + // akin to a jigsaw puzzle, we simply never set it to be sure. + // (it can always be set on a per-request basis or even using ajaxSetup) + // For same-domain requests, won't change header if already provided. + if ( !options.crossDomain && !headers[ "X-Requested-With" ] ) { + headers[ "X-Requested-With" ] = "XMLHttpRequest"; + } + + // Set headers + for ( i in headers ) { + xhr.setRequestHeader( i, headers[ i ] ); + } + + // Callback + callback = function( type ) { + return function() { + if ( callback ) { + callback = errorCallback = xhr.onload = + xhr.onerror = xhr.onabort = xhr.ontimeout = + xhr.onreadystatechange = null; + + if ( type === "abort" ) { + xhr.abort(); + } else if ( type === "error" ) { + + // Support: IE <=9 only + // On a manual native abort, IE9 throws + // errors on any property access that is not readyState + if ( typeof xhr.status !== "number" ) { + complete( 0, "error" ); + } else { + complete( + + // File: protocol always yields status 0; see #8605, #14207 + xhr.status, + xhr.statusText + ); + } + } else { + complete( + xhrSuccessStatus[ xhr.status ] || xhr.status, + xhr.statusText, + + // Support: IE <=9 only + // IE9 has no XHR2 but throws on binary (trac-11426) + // For XHR2 non-text, let the caller handle it (gh-2498) + ( xhr.responseType || "text" ) !== "text" || + typeof xhr.responseText !== "string" ? + { binary: xhr.response } : + { text: xhr.responseText }, + xhr.getAllResponseHeaders() + ); + } + } + }; + }; + + // Listen to events + xhr.onload = callback(); + errorCallback = xhr.onerror = xhr.ontimeout = callback( "error" ); + + // Support: IE 9 only + // Use onreadystatechange to replace onabort + // to handle uncaught aborts + if ( xhr.onabort !== undefined ) { + xhr.onabort = errorCallback; + } else { + xhr.onreadystatechange = function() { + + // Check readyState before timeout as it changes + if ( xhr.readyState === 4 ) { + + // Allow onerror to be called first, + // but that will not handle a native abort + // Also, save errorCallback to a variable + // as xhr.onerror cannot be accessed + window.setTimeout( function() { + if ( callback ) { + errorCallback(); + } + } ); + } + }; + } + + // Create the abort callback + callback = callback( "abort" ); + + try { + + // Do send the request (this may raise an exception) + xhr.send( options.hasContent && options.data || null ); + } catch ( e ) { + + // #14683: Only rethrow if this hasn't been notified as an error yet + if ( callback ) { + throw e; + } + } + }, + + abort: function() { + if ( callback ) { + callback(); + } + } + }; + } +} ); + + + + +// Prevent auto-execution of scripts when no explicit dataType was provided (See gh-2432) +jQuery.ajaxPrefilter( function( s ) { + if ( s.crossDomain ) { + s.contents.script = false; + } +} ); + +// Install script dataType +jQuery.ajaxSetup( { + accepts: { + script: "text/javascript, application/javascript, " + + "application/ecmascript, application/x-ecmascript" + }, + contents: { + script: /\b(?:java|ecma)script\b/ + }, + converters: { + "text script": function( text ) { + jQuery.globalEval( text ); + return text; + } + } +} ); + +// Handle cache's special case and crossDomain +jQuery.ajaxPrefilter( "script", function( s ) { + if ( s.cache === undefined ) { + s.cache = false; + } + if ( s.crossDomain ) { + s.type = "GET"; + } +} ); + +// Bind script tag hack transport +jQuery.ajaxTransport( "script", function( s ) { + + // This transport only deals with cross domain or forced-by-attrs requests + if ( s.crossDomain || s.scriptAttrs ) { + var script, callback; + return { + send: function( _, complete ) { + script = jQuery( " + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

Binning

+

To be useful, the measured correlations need to be binned in some way to +find the average correlation among many pairs of nearly the same separation. +The different ways to bin the results may be specified using the bin_type +parameter in BinnedCorr2.

+
+

“Log”

+

The default way to bin the results in TreeCorr is uniformly in log(r), +where r is defined according to the specified metric +(cf. Metrics). This corresponds to bin_type = “Log”, although +one normally omits this, as it is the default.

+

For most correlation functions, which tend to be approximately power laws, this +binning is the most appropriate, since it naturally handles a large dynamic range +in the separation.

+

The exact binning is specified using any 3 of the following 4 parameters:

+
+
    +
  • nbins How many bins to use.

  • +
  • bin_size The width of the bins in log(r).

  • +
  • min_sep The minimum separation r to include.

  • +
  • max_sep The maximum separation r to include.

  • +
+
+

For a pair with a metric distance r, the index of the corresponding bin in the +output array is int(log(r) - log(min_sep))/bin_size).

+
+

Note

+

If nbins is the omitted value, then bin_size might need to be decreased +slightly to accommodate an integer number of bins with the given min_sep and max_sep.

+
+
+
+

“Linear”

+

For use cases where the scales of interest span only a relatively small range of distances, +it may be more convenient to use linear binning rather than logarithmic. A notable +example of this is BAO investigations, where the interesting region is near the BAO peak. +In these cases, using bin_type = “Linear” may be preferred.

+

As with “Log”, the binning may be specified using any 3 of the following 4 parameters:

+
+
    +
  • nbins How many bins to use.

  • +
  • bin_size The width of the bins in r.

  • +
  • min_sep The minimum separation r to include.

  • +
  • max_sep The maximum separation r to include.

  • +
+
+

For a pair with a metric distance r, the index of the corresponding bin in the +output array is int((r - min_sep)/bin_size).

+
+

Note

+

If nbins is the omitted value, then bin_size might need to be decreased +slightly to accommodate an integer number of bins with the given min_sep and max_sep.

+
+
+
+

“TwoD”

+

To bin the correlation in two dimensions, (x,y), you can use bin_type = “TwoD”. +This will keep track of not only the distance between two points, but also the +direction. The results are then binned linearly in both the delta x and delta y values.

+

The exact binning is specified using any 2 of the following 3 parameters:

+
+
    +
  • nbins How many bins to use in each direction.

  • +
  • bin_size The width of the bins in dx and dy.

  • +
  • max_sep The maximum absolute value of dx or dy to include.

  • +
+
+

For a pair with a directed separation (dx,dy), the indices of the corresponding bin in the +2-d output array are int((dx + max_sep)/bin_size), int((dy + max_sep)/bin_size).

+

The binning is symmetric around (0,0), so the minimum separation in either direction is +-max_sep, and the maximum is +max_sep. +If is also permissible to specify min_sep to exclude small separations from being +accumulated, but the binning will still include a bin that crosses over (dx,dy) = (0,0) +if nbins is odd, or four bins that touch (0,0) if nbins is even.

+

Note that this metric is only valid when the input positions are given as x,y (not ra, dec), +and the metric is “Euclidean”. If you have a use case for other combinations, please +open an issue with your specific case, and we can try to figure out how it should be implemented.

+
+
+

Output quantities

+

For all of the different binning options, the Correlation object will have the following attributes +related to the locations of the bins:

+
+
    +
  • rnom The separation at the nominal centers of the bins. For “Linear” binning, +these will be spaced uniformly.

  • +
  • logr The log of the separation at the nominal centers of the bins. For “Log” +binning, these will be spaced uniformly. This is always the (natural) +log of rnom.

  • +
  • left_edges The separation at the left edges of the bins. For “Linear” binning, these +are half-way between the rnom values of successive bins. For “Log” binning, these are +the geometric mean of successive rnom values, rather than the arithmetic mean. +For “TwoD” binning, these are like “Linear” but for the x separations only.

  • +
  • right_edges Analogously, the separation at the right edges of the bins.

  • +
  • meanr The mean separation of all the pairs of points that actually ended up +falling in each bin.

  • +
  • meanlogr The mean log(separation) of all the pairs of points that actually ended up +falling in each bin.

  • +
+
+

The last two quantities are only available after finishing a calculation (e.g. with process).

+

In addition to the above, “TwoD” binning also includes the following:

+
+
    +
  • bottom_edges The y separation at the bottom edges of the 2-D bins. Like +left_edges, but for the y values rather than the x values.

  • +
  • top_edges The y separation at the top edges of the 2-D bins. Like +right_edges, but for the y values rather than the x values.

  • +
+
+

There is some subtlety about which separation to use when comparing measured correlation functions +to theoretical predictions. See Appendix D of +Singh et al, 2020, +who show that one can find percent level differences among the different options. +(See their Figure D2 in particular.) +The difference is smaller as the bin size decreases, although they point out that it is not always +feasible to make the bin size very small, e.g. because of issues calculating the covariance matrix.

+

In most cases, if the true signal is expected to be locally well approximated by a power law, then +using meanlogr is probably the most appropriate choice. This most closely approximates the +signal-based weighting that they recommend, but if you are concerned about the percent level +effects of this choice, you would be well-advised to investigate the different options with +simulations to see exactly what impact the choice has on your science.

+
+
+

Other options for binning

+

There are a few other options that affect the binning, which can be set when constructing +any of the BinnedCorr2 or BinnedCorr3 classes.

+
+

sep_units

+

The optional parameter sep_units lets you specify what units you want for +the binned separations if the separations are angles.

+

Valid options are “arcsec”, “arcmin”, “degrees”, “hours”, or “radians”. The default if +not specified is “radians”.

+

Note that this is only valid when the distance metric is an angle. +E.g. if RA and Dec values are given for the positions, +and no distance values are specified, then the default metric, “Euclidean”, +is the angular separation on the sky. “Arc” similarly is always an angle.

+

If the distance metric is a physical distance, then this parameter is invalid, +and the output separation will match the physical distance units in the input catalog. +E.g. if the distance from Earth is given as r, then the output units will match the +units of the r values. Or if positions are given as x, y (and maybe z), then the +units will be whatever the units are for these values.

+
+
+

bin_slop

+

One of the main reasons that TreeCorr is able to compute correlation functions +so quickly is that it allows the bin edges to be a little bit fuzzy. A pairs whose +separation is very close to a dividing line between two bins might be placed +in the next bin over from where an exact calculation would put it.

+

This is normally completely fine for any real-world application. +Indeed, by deciding to bin your correlation function with some non-zero bin size, you have +implicitly defined a resolution below which you don’t care about the exact separation +values.

+

The approximation TreeCorr makes is to allow some additional imprecision that is a +fraction of this level. Namely bin_slop. Specifically, bin_slop specifies the +maximum possible error any pair can have, given as a fraction of the bin size.

+

You can think of it as turning all of your rectangular bins into overlapping trapezoids, +where bin_slop defines the ratio of the angled portion to the flat mean width. +Larger bin_slop allows for more overlap (and is thus faster), while smaller bin_slop +gets closer to putting each pair perfectly into the bin it belongs in.

+

The default bin_slop for the “Log” bin type is such that bin_slop * bin_size +is 0.1. Or if bin_size < 0.1, then we use bin_slop = 1. This has been +found to give fairly good accuracy across a variety of applications. However, +for high precision measurements, it may be appropriate to use a smaller value than +this. Especially if your bins are fairly large.

+

A typical test to perform on your data is to cut bin_slop in half and see if your +results change significantly. If not, you are probably fine, but if they change by an +appreciable amount (according to whatever you think that means for your science), +then your original bin_slop was too large.

+

To understand the impact of the bin_slop parameter, it helps to start by thinking +about when it is set to 0. +If bin_slop = 0, then TreeCorr does essentially a brute-force calculation, +where each pair of points is always placed into the correct bin.

+

But if bin_slop > 0, then any given pair is allowed to be placed in the wrong bin +so long as the true separation is within this fraction of a bin from the edge. +For example, if a bin nominally goes from 10 to 20 arcmin, then with bin_slop = 0.05, +TreeCorr will accumulate pairs with separations ranging from 9.5 to 20.5 arcmin into this +bin. (I.e. the slop is 0.05 of the bin width on each side.) +Note that some of the pairs with separations from 9.5 to 10.5 would possibly fall into the +lower bin instead. Likewise some from 19.5 to 20.5 would fall in the higher bin. +So both edges are a little fuzzy.

+

For large number of objects, the shifts up and down tend to cancel out, so there is typically +very little bias in the results. Statistically, about as many pairs scatter up as scatter +down, so the resulting counts come out pretty close to correct. Furthermore, the total +number of pairs within the specified range is always correct, since each pair is placed +in some bin.

+
+
+

brute

+

Sometimes, it can be useful to force the code to do the full brute force calculation, +skipping all of the approximations that are inherent to the tree traversal algorithm. +This of course is much slower, but this option can be useful for testing purposes especially. +For instance, comparisons to brute force results have been invaluable in TreeCorr +development of the faster algorithms. Some science cases also use comparison to brute +force results to confirm that they are not significantly impacted by using non-zero +bin_slop.

+

Setting brute = True is roughly equivalent to setting bin_slop = 0. However, +there is a distinction between these two cases. +Internally, the former will always traverse the tree all the way to the leaves. So +every pair will be calculated individually. This really is the brute force calculation.

+

However, bin_slop = 0 will allow for the traversal to stop early if all possible pairs in a +given pair of cells fall into the same bin. This can be quite a large speedup in some cases. +And especially for NN correlations, there is no disadvantage to doing so.

+

For shear correlations, there can be a slight difference between using bin_slop = 0 and +brute = True because the shear projections won’t be precisely equal in the two cases. +Shear correlations require parallel transporting the shear values to the centers of +the cells, and then when accumulating pairs, the shears are projected onto the line joining +the two points. Both of these lead to slight differences in the results of a bin_slop = 0 +calculation compared to the true brute force calculation. +If the difference is seen to matter for you, this is probably a sign that you should decrease +your bin size.

+

Additionally, there is one other way to use the brute parameter. If you set +brute to 1 or 2, rather than True or False, then the forced traversal to the +leaf cells will only apply to cat1 or cat2 respectively. The cells for the other +catalog will use the normal criterion based on the bin_slop parameter to decide whether +it is acceptable to use a non-leaf cell or to continue traversing the tree.

+
+
+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/catalog.html b/docs/_build/html/catalog.html new file mode 100644 index 00000000..9eac2a38 --- /dev/null +++ b/docs/_build/html/catalog.html @@ -0,0 +1,1494 @@ + + + + + + Input Data — TreeCorr 4.3.0 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

Input Data

+
+

The Catalog class

+
+
+class treecorr.Catalog(file_name=None, config=None, *, num=0, logger=None, is_rand=False, x=None, y=None, z=None, ra=None, dec=None, r=None, w=None, wpos=None, flag=None, g1=None, g2=None, k=None, patch=None, patch_centers=None, rng=None, **kwargs)[source]
+

A set of input data (positions and other quantities) to be correlated.

+

A Catalog object keeps track of the relevant information for a number of objects to +be correlated. The objects each have some kind of position (for instance (x,y), (ra,dec), +(x,y,z), etc.), and possibly some extra information such as weights (w), shear values (g1,g2), +or kappa values (k).

+

The simplest way to build a Catalog is to simply pass in numpy arrays for each +piece of information you want included. For instance:

+
>>> cat = treecorr.Catalog(x=x, y=y, k=k, w=w)
+
+
+

Each of these input paramters should be a numpy array, where each corresponding element +is the value for that object. Of course, all the arrays should be the same size.

+

In some cases, there are additional required parameters. For instance, with RA and Dec +positions, you need to declare what units the given input values use:

+
>>> cat = treecorr.Catalog(ra=ra, dec=dec, g1=g1, g2=g2,
+...                        ra_units='hour', dec_units='deg')
+
+
+

For (ra,dec) positions, these units fields are required to specify the units of the angular +values. For (x,y) positions, the units are optional (and usually unnecessary).

+

You can also initialize a Catalog by reading in columns from a file. For instance:

+
>>> cat = treecorr.Catalog('data.fits', ra_col='ALPHA2000', dec_col='DELTA2000',
+...                        g1_col='E1', g2_col='E2', ra_units='deg', dec_units='deg')
+
+
+

This reads the given columns from the input file. The input file may be a FITS file, +an HDF5 file, a Parquet file, or an ASCII file. Normally the file type is determined +according to the file’s extension (e.g. ‘.fits’ here), but it can also be set explicitly +with file_type.

+

For FITS, HDF5, and Parquet files, the column names should be strings as shown above. +For ASCII files, they may be strings if the input file has column names. But you may +also use integer values giving the index of which column to use. We use a 1-based convention +for these, so x_col=1 would mean to use the first column as the x value. (0 means don’t +read that column.)

+

Finally, you may store all the various parameters in a configuration dict +and just pass the dict as an argument after the file name:

+
>>> config = { 'ra_col' : 'ALPHA2000',
+...            'dec_col' : 'DELTA2000',
+...            'g1_col' : 'E1',
+...            'g2_col' : 'E2',
+...            'ra_units' : 'deg',
+...            'dec_units' : 'deg' }
+>>> cat = treecorr.Catalog(file_name, config)
+
+
+

This can be useful for encapsulating all the TreeCorr options in a single place in your +code, which might be used multiple times. Notably, this syntax ignores any dict keys +that are not relevant to the Catalog construction, so you can use the same config dict +for the Catalog and your correlation objects, which can be convenient.

+

See also Configuration Parameters for complete descriptions of all of the relevant +configuration parameters, particularly the first section Parameters about the input file(s).

+

You may also override any configuration parameters or add additional parameters as kwargs +after the config dict. For instance, to flip the sign of the g1 values after reading +from the input file, you could write:

+
>>> cat1 = treecorr.Catalog(file_name, config, flip_g1=True)
+
+
+

After construction, a Catalog object will have the following attributes:

+
+
Attributes
+
    +
  • x – The x positions, if defined, as a numpy array (converted to radians if x_units +was given). (None otherwise)

  • +
  • y – The y positions, if defined, as a numpy array (converted to radians if y_units +was given). (None otherwise)

  • +
  • z – The z positions, if defined, as a numpy array. (None otherwise)

  • +
  • ra – The right ascension, if defined, as a numpy array (in radians). (None otherwise)

  • +
  • dec – The declination, if defined, as a numpy array (in radians). (None otherwise)

  • +
  • r – The distance, if defined, as a numpy array. (None otherwise)

  • +
  • w – The weights, as a numpy array. (All 1’s if no weight column provided.)

  • +
  • wpos – The weights for position centroiding, as a numpy array, if given. (None otherwise, +which means that implicitly wpos = w.)

  • +
  • g1 – The g1 component of the shear, if defined, as a numpy array. (None otherwise)

  • +
  • g2 – The g2 component of the shear, if defined, as a numpy array. (None otherwise)

  • +
  • k – The convergence, kappa, if defined, as a numpy array. (None otherwise)

  • +
  • patch – The patch number of each object, if patches are being used. (None otherwise) +If the entire catalog is a single patch, then patch may be an int.

  • +
  • ntot – The total number of objects (including those with zero weight if +keep_zero_weight is set to True)

  • +
  • nobj – The number of objects with non-zero weight

  • +
  • sumw – The sum of the weights

  • +
  • varg – The shear variance (aka shape noise) (0 if g1,g2 are not defined)

    +
    +

    Note

    +

    If there are weights, this is really \(\sum(w^2 |g|^2)/\sum(w)\), +which is more like \(\langle w \rangle \mathrm{Var}(g)\). +It is only used for var_method='shot', where the noise estimate is this +value divided by the total weight per bin, so this is the right quantity +to use for that.

    +
    +
  • +
  • vark – The kappa variance (0 if k is not defined)

    +
    +

    Note

    +

    If there are weights, this is really \(\sum(w^2 \kappa^2)/\sum(w)\). +As for varg, this is the right quantity to use for the 'shot' +noise estimate.

    +
    +
  • +
  • name – When constructed from a file, this will be the file_name. It is only used as +a reference name in logging output after construction, so if you construct it +from data vectors directly, it will be ''. You may assign to it if you want to +give this catalog a specific name.

  • +
  • coords – Which kind of coordinate system is defined for this catalog. +The possibilities for this attribute are:

    +
    +
      +
    • ‘flat’ = 2-dimensional flat coordinates. Set when x,y are given.

    • +
    • ‘spherical’ = spherical coordinates. Set when ra,dec are given.

    • +
    • ‘3d’ = 3-dimensional coordinates. Set when x,y,z or ra,dec,r are given.

    • +
    +
    +
  • +
  • field – If any of the get?Field methods have been called to construct +a field from this catalog (either explicitly or implicitly via a corr.process() command, then this attribute will hold the most recent +field to have been constructed.

    +
    +

    Note

    +

    It holds this field as a weakref, so if caching is turned off with +resize_cache(0), and the field has been garbage collected, then this +attribute will be None.

    +
    +
  • +
+
+
Parameters
+
    +
  • file_name (str) – The name of the catalog file to be read in. (default: None, in which +case the columns need to be entered directly with x, y, etc.)

  • +
  • config (dict) – A configuration dict which defines attributes about how to read the +file. Any optional kwargs may be given here in the config dict if +desired. Invalid keys in the config dict are ignored. (default: None)

  • +
+
+
Keyword Arguments
+
    +
  • num (int) – Which number catalog are we reading. e.g. for NG correlations the +catalog for the N has num=0, the one for G has num=1. This is only +necessary if you are using a config dict where things like x_col +have multiple values. (default: 0)

  • +
  • logger – If desired, a Logger object for logging. (default: None, in which case +one will be built according to the config dict’s verbose level.)

  • +
  • is_rand (bool) – If this is a random file, then setting is_rand to True will let them +skip k_col, g1_col, and g2_col if they were set for the main catalog. +(default: False)

  • +
  • x (array) – The x values. (default: None; When providing values directly, either +x,y are required or ra,dec are required.)

  • +
  • y (array) – The y values. (default: None; When providing values directly, either +x,y are required or ra,dec are required.)

  • +
  • z (array) – The z values, if doing 3d positions. (default: None; invalid in +conjunction with ra, dec.)

  • +
  • ra (array) – The RA values. (default: None; When providing values directly, either +x,y are required or ra,dec are required.)

  • +
  • dec (array) – The Dec values. (default: None; When providing values directly, either +x,y are required or ra,dec are required.)

  • +
  • r (array) – The r values (the distances of each source from Earth). (default: None; +invalid in conjunction with x, y.)

  • +
  • w (array) – The weights to apply when computing the correlations. (default: None)

  • +
  • wpos (array) – The weights to use for position centroiding. (default: None, which +means to use the value weights, w, to weight the positions as well.)

  • +
  • flag (array) – An optional array of flags, indicating objects to skip. Rows with +flag != 0 (or technically flag & ~ok_flag != 0) will be given a weight +of 0. (default: None)

  • +
  • g1 (array) – The g1 values to use for shear correlations. (g1,g2 may represent any +spinor field.) (default: None)

  • +
  • g2 (array) – The g2 values to use for shear correlations. (g1,g2 may represent any +spinor field.) (default: None)

  • +
  • k (array) – The kappa values to use for scalar correlations. (This may represent +any scalar field.) (default: None)

  • +
  • patch (array or int) –

    Optionally, patch numbers to use for each object. (default: None)

    +
    +

    Note

    +

    This may also be an int if the entire catalog represents a +single patch. If patch_centers is given this will select those +items from the full input that correspond to the given patch number.

    +
    +

  • +
  • patch_centers (array or str) – Alternative to setting patch by hand or using kmeans, you +may instead give patch_centers either as a file name or an array +from which the patches will be determined. (default: None)

  • +
  • file_type (str) – What kind of file is the input file. Valid options are ‘ASCII’, ‘FITS’ +‘HDF’, or ‘Parquet’ (default: if the file_name extension starts with +.fit, then use ‘FITS’, or with .hdf, then use ‘HDF’, or with ‘.par’, +then use ‘Parquet’, else ‘ASCII’)

  • +
  • delimiter (str) – For ASCII files, what delimiter to use between values. (default: None, +which means any whitespace)

  • +
  • comment_marker (str) – For ASCII files, what token indicates a comment line. (default: ‘#’)

  • +
  • first_row (int) – Which row to take as the first row to be used. (default: 1)

  • +
  • last_row (int) – Which row to take as the last row to be used. (default: -1, which means +the last row in the file)

  • +
  • every_nth (int) – Only use every nth row of the input catalog. (default: 1)

  • +
  • npatch (int) –

    How many patches to split the catalog into (using kmeans if no other +patch information is provided) for the purpose of jackknife variance +or other options that involve running via patches. (default: 1)

    +
    +

    Note

    +

    If the catalog has ra,dec,r positions, the patches will +be made using just ra,dec.

    +
    +

  • +
  • kmeans_init (str) – If using kmeans to make patches, which init method to use. +cf. Field.run_kmeans (default: ‘tree’)

  • +
  • kmeans_alt (bool) – If using kmeans to make patches, whether to use the alternate kmeans +algorithm. cf. Field.run_kmeans (default: False)

  • +
  • x_col (str or int) – The column to use for the x values. An integer is only allowed for +ASCII files. (default: ‘0’, which means not to read in this column. +When reading from a file, either x_col and y_col are required or ra_col +and dec_col are required.)

  • +
  • y_col (str or int) – The column to use for the y values. An integer is only allowed for +ASCII files. (default: ‘0’, which means not to read in this column. +When reading from a file, either x_col and y_col are required or ra_col +and dec_col are required.)

  • +
  • z_col (str or int) – The column to use for the z values. An integer is only allowed for +ASCII files. (default: ‘0’, which means not to read in this column; +invalid in conjunction with ra_col, dec_col.)

  • +
  • ra_col (str or int) – The column to use for the ra values. An integer is only allowed for +ASCII files. (default: ‘0’, which means not to read in this column. +When reading from a file, either x_col and y_col are required or ra_col +and dec_col are required.)

  • +
  • dec_col (str or int) – The column to use for the dec values. An integer is only allowed for +ASCII files. (default: ‘0’, which means not to read in this column. +When reading from a file, either x_col and y_col are required or ra_col +and dec_col are required.)

  • +
  • r_col (str or int) – The column to use for the r values. An integer is only allowed for +ASCII files. (default: ‘0’, which means not to read in this column; +invalid in conjunction with x_col, y_col.)

  • +
  • x_units (str) – The units to use for the x values, given as a string. Valid options are +arcsec, arcmin, degrees, hours, radians. (default: radians, although +with (x,y) positions, you can often just ignore the units, and the +output separations will be in whatever units x and y are in.)

  • +
  • y_units (str) – The units to use for the y values, given as a string. Valid options are +arcsec, arcmin, degrees, hours, radians. (default: radians, although +with (x,y) positions, you can often just ignore the units, and the +output separations will be in whatever units x and y are in.)

  • +
  • ra_units (str) – The units to use for the ra values, given as a string. Valid options +are arcsec, arcmin, degrees, hours, radians. (required when using +ra_col or providing ra directly)

  • +
  • dec_units (str) – The units to use for the dec values, given as a string. Valid options +are arcsec, arcmin, degrees, hours, radians. (required when using +dec_col or providing dec directly)

  • +
  • g1_col (str or int) – The column to use for the g1 values. An integer is only allowed for +ASCII files. (default: ‘0’, which means not to read in this column.)

  • +
  • g2_col (str or int) – The column to use for the g2 values. An integer is only allowed for +ASCII files. (default: ‘0’, which means not to read in this column.)

  • +
  • k_col (str or int) – The column to use for the kappa values. An integer is only allowed for +ASCII files. (default: ‘0’, which means not to read in this column.)

  • +
  • patch_col (str or int) – The column to use for the patch numbers. An integer is only allowed +for ASCII files. (default: ‘0’, which means not to read in this column.)

  • +
  • w_col (str or int) – The column to use for the weight values. An integer is only allowed for +ASCII files. (default: ‘0’, which means not to read in this column.)

  • +
  • wpos_col (str or int) – The column to use for the position weight values. An integer is only +allowed for ASCII files. (default: ‘0’, which means not to read in this +column, in which case wpos=w.)

  • +
  • flag_col (str or int) – The column to use for the flag values. An integer is only allowed for +ASCII files. Any row with flag != 0 (or technically flag & ~ok_flag +!= 0) will be given a weight of 0. (default: ‘0’, which means not to +read in this column.)

  • +
  • ignore_flag (int) – Which flags should be ignored. (default: all non-zero flags are ignored. +Equivalent to ignore_flag = ~0.)

  • +
  • ok_flag (int) – Which flags should be considered ok. (default: 0. i.e. all non-zero +flags are ignored.)

  • +
  • allow_xyz (bool) – Whether to allow x,y,z values in conjunction with ra,dec. Normally, +it is an error to have both kinds of positions, but if you know that +the x,y,z, values are consistent with the given ra,dec values, it +can save time to input them, rather than calculate them using trig +functions. (default: False)

  • +
  • flip_g1 (bool) – Whether to flip the sign of the input g1 values. (default: False)

  • +
  • flip_g2 (bool) – Whether to flip the sign of the input g2 values. (default: False)

  • +
  • keep_zero_weight (bool) – Whether to keep objects with wpos=0 in the catalog (including +any objects that indirectly get wpos=0 due to NaN or flags), so they +would be included in ntot and also in npairs calculations that use +this Catalog, although of course not contribute to the accumulated +weight of pairs. (default: False)

  • +
  • save_patch_dir (str) – If desired, when building patches from this Catalog, save them +as FITS files in the given directory for more efficient loading when +doing cross-patch correlations with the low_mem option.

  • +
  • ext (int/str) – For FITS/HDF files, Which extension to read. (default: 1 for fits, +root for HDF)

  • +
  • x_ext (int/str) – Which extension to use for the x values. (default: ext)

  • +
  • y_ext (int/str) – Which extension to use for the y values. (default: ext)

  • +
  • z_ext (int/str) – Which extension to use for the z values. (default: ext)

  • +
  • ra_ext (int/str) – Which extension to use for the ra values. (default: ext)

  • +
  • dec_ext (int/str) – Which extension to use for the dec values. (default: ext)

  • +
  • r_ext (int/str) – Which extension to use for the r values. (default: ext)

  • +
  • g1_ext (int/str) – Which extension to use for the g1 values. (default: ext)

  • +
  • g2_ext (int/str) – Which extension to use for the g2 values. (default: ext)

  • +
  • k_ext (int/str) – Which extension to use for the k values. (default: ext)

  • +
  • patch_ext (int/str) – Which extension to use for the patch numbers. (default: ext)

  • +
  • w_ext (int/str) – Which extension to use for the w values. (default: ext)

  • +
  • wpos_ext (int/str) – Which extension to use for the wpos values. (default: ext)

  • +
  • flag_ext (int/str) – Which extension to use for the flag values. (default: ext)

  • +
  • verbose (int) –

    If no logger is provided, this will optionally specify a logging level +to use.

    +
    +
      +
    • 0 means no logging output

    • +
    • 1 means to output warnings only (default)

    • +
    • 2 means to output various progress information

    • +
    • 3 means to output extensive debugging information

    • +
    +
    +

  • +
  • log_file (str) – If no logger is provided, this will specify a file to write the logging +output. (default: None; i.e. output to standard output)

  • +
  • split_method (str) –

    How to split the cells in the tree when building the tree structure. +Options are:

    +
    +
      +
    • mean: Use the arithmetic mean of the coordinate being split. +(default)

    • +
    • median: Use the median of the coordinate being split.

    • +
    • middle: Use the middle of the range; i.e. the average of the +minimum and maximum value.

    • +
    • random: Use a random point somewhere in the middle two quartiles +of the range.

    • +
    +
    +

  • +
  • cat_precision (int) – The precision to use when writing a Catalog to an ASCII file. This +should be an integer, which specifies how many digits to write. +(default: 16)

  • +
  • rng (RandomState) – If desired, a numpy.random.RandomState instance to use for any random +number generation (e.g. kmeans patches). (default: None)

  • +
  • num_threads (int) –

    How many OpenMP threads to use during the catalog load steps. +(default: use the number of cpu cores)

    +
    +

    Note

    +

    This won’t work if the system’s C compiler cannot use OpenMP +(e.g. clang prior to version 3.7.)

    +
    +

  • +
+
+
+
+
+checkForNaN(col, col_str)[source]
+

Check if the column has any NaNs. If so, set those rows to have w[k]=0.

+
+
Parameters
+
    +
  • col (array) – The input column to check.

  • +
  • col_str (str) – The name of the column. Used only as information in logging output.

  • +
+
+
+
+ +
+
+clear_cache()[source]
+

Clear all field caches.

+

The various kinds of fields built from this catalog are cached. This may or may not +be an optimization for your use case. Normally only a single field is built for a +given catalog, and it is usually efficient to cache it, so it can be reused multiple +times. E.g. for the usual Landy-Szalay NN calculation:

+
>>> dd.process(data_cat)
+>>> rr.process(rand_cat)
+>>> dr.process(data_cat, rand_cat)
+
+
+

the third line will be able to reuse the same fields built for the data and randoms +in the first two lines.

+

However, this also means that the memory used for the field will persist as long as +the catalog object does. If you need to recover this memory and don’t want to delete +the catalog yet, this method lets you clear the cache.

+

There are separate caches for each kind of field. If you want to clear just one or +some of them, you can call clear separately for the different caches:

+
>>> cat.nfields.clear()
+>>> cat.kfields.clear()
+>>> cat.gfields.clear()
+>>> cat.nsimplefields.clear()
+>>> cat.ksimplefields.clear()
+>>> cat.gsimplefields.clear()
+
+
+
+ +
+
+copy()[source]
+

Make a copy

+
+ +
+
+getGField(*, min_size=0, max_size=None, split_method=None, brute=False, min_top=None, max_top=10, coords=None, logger=None)[source]
+

Return a GField based on the g1,g2 values in this catalog.

+

The GField object is cached, so this is efficient to call multiple times. +cf. resize_cache and clear_cache.

+
+
Parameters
+
    +
  • min_size (float) – The minimum radius cell required (usually min_sep). (default: 0)

  • +
  • max_size (float) – The maximum radius cell required (usually max_sep). (default: None)

  • +
  • split_method (str) – Which split method to use (‘mean’, ‘median’, ‘middle’, or ‘random’) +(default: ‘mean’; this value can also be given in the Catalog +constructor in the config dict.)

  • +
  • brute (bool) – Whether to force traversal to the leaves. (default: False)

  • +
  • min_top (int) – The minimum number of top layers to use when setting up the +field. (default: \(\max(3, \log_2(N_{\rm cpu}))\))

  • +
  • max_top (int) – The maximum number of top layers to use when setting up the +field. (default: 10)

  • +
  • coords (str) – The kind of coordinate system to use. (default self.coords)

  • +
  • logger – A Logger object if desired (default: self.logger)

  • +
+
+
Returns
+

A GField object

+
+
+
+ +
+
+getGSimpleField(*, logger=None)[source]
+

Return a GSimpleField based on the g1,g2 values in this catalog.

+

The GSimpleField object is cached, so this is efficient to call multiple times. +cf. resize_cache and clear_cache

+
+
Parameters
+

logger – A Logger object if desired (default: self.logger)

+
+
Returns
+

A GSimpleField object

+
+
+
+ +
+
+getKField(*, min_size=0, max_size=None, split_method=None, brute=False, min_top=None, max_top=10, coords=None, logger=None)[source]
+

Return a KField based on the k values in this catalog.

+

The KField object is cached, so this is efficient to call multiple times. +cf. resize_cache and clear_cache

+
+
Parameters
+
    +
  • min_size (float) – The minimum radius cell required (usually min_sep). (default: 0)

  • +
  • max_size (float) – The maximum radius cell required (usually max_sep). (default: None)

  • +
  • split_method (str) – Which split method to use (‘mean’, ‘median’, ‘middle’, or ‘random’) +(default: ‘mean’; this value can also be given in the Catalog +constructor in the config dict.)

  • +
  • brute (bool) – Whether to force traversal to the leaves. (default: False)

  • +
  • min_top (int) – The minimum number of top layers to use when setting up the +field. (default: \(\max(3, \log_2(N_{\rm cpu}))\))

  • +
  • max_top (int) – The maximum number of top layers to use when setting up the +field. (default: 10)

  • +
  • coords (str) – The kind of coordinate system to use. (default self.coords)

  • +
  • logger – A Logger object if desired (default: self.logger)

  • +
+
+
Returns
+

A KField object

+
+
+
+ +
+
+getKSimpleField(*, logger=None)[source]
+

Return a KSimpleField based on the k values in this catalog.

+

The KSimpleField object is cached, so this is efficient to call multiple times. +cf. resize_cache and clear_cache

+
+
Parameters
+

logger – A Logger object if desired (default: self.logger)

+
+
Returns
+

A KSimpleField object

+
+
+
+ +
+
+getNField(*, min_size=0, max_size=None, split_method=None, brute=False, min_top=None, max_top=10, coords=None, logger=None)[source]
+

Return an NField based on the positions in this catalog.

+

The NField object is cached, so this is efficient to call multiple times. +cf. resize_cache and clear_cache

+
+
Parameters
+
    +
  • min_size (float) – The minimum radius cell required (usually min_sep). (default: 0)

  • +
  • max_size (float) – The maximum radius cell required (usually max_sep). (default: None)

  • +
  • split_method (str) – Which split method to use (‘mean’, ‘median’, ‘middle’, or ‘random’) +(default: ‘mean’; this value can also be given in the Catalog +constructor in the config dict.)

  • +
  • brute (bool) – Whether to force traversal to the leaves. (default: False)

  • +
  • min_top (int) – The minimum number of top layers to use when setting up the +field. (default: \(\max(3, \log_2(N_{\rm cpu}))\))

  • +
  • max_top (int) – The maximum number of top layers to use when setting up the +field. (default: 10)

  • +
  • coords (str) – The kind of coordinate system to use. (default: self.coords)

  • +
  • logger – A Logger object if desired (default: self.logger)

  • +
+
+
Returns
+

An NField object

+
+
+
+ +
+
+getNSimpleField(*, logger=None)[source]
+

Return an NSimpleField based on the positions in this catalog.

+

The NSimpleField object is cached, so this is efficient to call multiple times. +cf. resize_cache and clear_cache

+
+
Parameters
+

logger – A Logger object if desired (default: self.logger)

+
+
Returns
+

An NSimpleField object

+
+
+
+ +
+
+get_patch_centers()[source]
+

Return an array of patch centers corresponding to the patches in this catalog.

+

If the patches were set either using K-Means or by giving the centers, then this +will just return that same center array. Otherwise, it will be calculated from the +positions of the objects with each patch number.

+

This function is automatically called when accessing the property +patch_centers. So you should not normally need to call it directly.

+
+
Returns
+

An array of center coordinates used to make the patches. +Shape is (npatch, 2) for flat geometries or (npatch, 3) for 3d or +spherical geometries. In the latter case, the centers represent +(x,y,z) coordinates on the unit sphere.

+
+
+
+ +
+
+get_patch_file_names(save_patch_dir)[source]
+

Get the names of the files to use for reading/writing patches in save_patch_dir

+
+ +
+
+get_patches(*, low_mem=False)[source]
+

Return a list of Catalog instances each representing a single patch from this Catalog

+

After calling this function once, the patches may be repeatedly accessed by the +patches attribute, without triggering a rebuild of the patches. Furthermore, +if patches is accessed before calling this function, it will be called automatically +(with the default low_mem parameter).

+
+
Parameters
+

low_mem (bool) – Whether to try to leave the returned patch catalogs in an +“unloaded” state, wherein they will not load the data from a +file until they are used. This only works if the current catalog +was loaded from a file or the patches were saved (using +save_patch_dir). (default: False)

+
+
+
+ +
+
+load()[source]
+

Load the data from a file, if it isn’t yet loaded.

+

When a Catalog is read in from a file, it tries to delay the loading of the data from +disk until it is actually needed. This is especially important when running over a +set of patches, since you may not be able to fit all the patches in memory at once.

+

One does not normally need to call this method explicitly. It will run automatically +whenever the data is needed. However, if you want to directly control when the disk +access happens, you can use this function.

+
+ +
+
+makeArray(col, col_str, dtype=<class 'float'>)[source]
+

Turn the input column into a numpy array if it wasn’t already. +Also make sure the input is 1-d.

+
+
Parameters
+
    +
  • col (array-like) – The input column to be converted into a numpy array.

  • +
  • col_str (str) – The name of the column. Used only as information in logging output.

  • +
  • dtype (type) – The dtype for the returned array. (default: float)

  • +
+
+
Returns
+

The column converted to a 1-d numpy array.

+
+
+
+ +
+
+read_patch_centers(file_name)[source]
+

Read patch centers from a file.

+

This function typically gets called automatically when setting patch_centers as a +string, being the file name. The patch centers are read from the file and returned.

+
+
Parameters
+

file_name (str) – The name of the file to write to.

+
+
Returns
+

The centers, as an array, which can be used to determine the patches.

+
+
+
+ +
+
+read_patches(save_patch_dir=None)[source]
+

Read the patches from files on disk.

+

This function assumes that the patches were written using write_patches. +In particular, the file names are not arbitrary, but must match what TreeCorr uses +in that method.

+
+

Note

+

The patches that are read in will be in an “unloaded” state. They will load +as needed when some functionality requires it. So this is compatible with using +the low_mem option in various places.

+
+
+
Parameters
+

save_patch_dir (str) – The directory to read from. [default: None, in which +case self.save_patch_dir will be used. If that is None, a +ValueError will be raised.]

+
+
+
+ +
+
+resize_cache(maxsize)[source]
+

Resize all field caches.

+

The various kinds of fields built from this catalog are cached. This may or may not +be an optimization for your use case. Normally only a single field is built for a +given catalog, and it is usually efficient to cache it, so it can be reused multiple +times. E.g. for the usual Landy-Szalay NN calculation:

+
>>> dd.process(data_cat)
+>>> rr.process(rand_cat)
+>>> dr.process(data_cat, rand_cat)
+
+
+

the third line will be able to reuse the same fields built for the data and randoms +in the first two lines.

+

However, if you are making many different fields from the same catalog – for instance +because you keep changing the min_sep and max_sep for different calls – then saving +them all will tend to blow up the memory.

+

Therefore, the default number of fields (of each type) to cache is 1. This lets the +first use case be efficient, but not use too much memory for the latter case.

+

If you prefer a different behavior, this method lets you change the number of fields to +cache. The cache is an LRU (Least Recently Used) cache, which means only the n most +recently used fields are saved. I.e. when it is full, the least recently used field +is removed from the cache.

+

If you call this with maxsize=0, then caching will be turned off. A new field will be +built each time you call a process function with this catalog.

+

If you call this with maxsize>1, then mutiple fields will be saved according to whatever +number you set. This will use more memory, but may be an optimization for you depending +on what your are doing.

+

Finally, if you want to set different sizes for the different kinds of fields, then +you can call resize separately for the different caches:

+
>>> cat.nfields.resize(maxsize)
+>>> cat.kfields.resize(maxsize)
+>>> cat.gfields.resize(maxsize)
+>>> cat.nsimplefields.resize(maxsize)
+>>> cat.ksimplefields.resize(maxsize)
+>>> cat.gsimplefields.resize(maxsize)
+
+
+
+
Parameters
+

maxsize (float) – The new maximum number of fields of each type to cache.

+
+
+
+ +
+
+select(indx)[source]
+

Trim the catalog to only include those objects with the give indices.

+
+
Parameters
+

indx – A numpy array of index values to keep.

+
+
+
+ +
+
+unload()[source]
+

Bring the Catalog back to an “unloaded” state, if possible.

+

When a Catalog is read in from a file, it tries to delay the loading of the data from +disk until it is actually needed. After loading, this method will return the Catalog +back to the unloaded state to recover the memory in the data arrays. If the Catalog is +needed again during further processing, it will re-load the data from disk at that time.

+

This will also call clear_cache to recover any memory from fields that have been +constructed as well.

+

If the Catalog was not read in from a file, then this function will only do the +clear_cache step.

+
+ +
+
+write(file_name, *, file_type=None, cat_precision=None)[source]
+

Write the catalog to a file.

+

The position columns are output using the same units as were used when building the +Catalog. If you want to use a different unit, you can set the catalog’s units directly +before writing. e.g.:

+
>>> cat = treecorr.Catalog('cat.dat', ra=ra, dec=dec,
+                           ra_units='hours', dec_units='degrees')
+>>> cat.ra_units = coord.degrees
+>>> cat.write('new_cat.dat')
+
+
+

The output file will include some of the following columns (those for which the +corresponding attribute is not None):

+ ++++ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

Column

Description

ra

self.ra if not None

dec

self.dec if not None

r

self.r if not None

x

self.x if not None

y

self.y if not None

z

self.z if not None

w

self.w if not None and self.nontrivial_w

wpos

self.wpos if not None

g1

self.g1 if not None

g2

self.g2 if not None

k

self.k if not None

patch

self.patch if not None

meanR

The mean value <R> of pairs that fell into each bin.

meanlogR

The mean value <logR> of pairs that fell into each bin.

+
+
Parameters
+
    +
  • file_name (str) – The name of the file to write to.

  • +
  • file_type (str) – The type of file to write (‘ASCII’ or ‘FITS’). (default: +determine the type automatically from the extension of file_name.)

  • +
  • cat_precision (int) – For ASCII output catalogs, the desired precision. (default: 16; +this value can also be given in the Catalog constructor in the +config dict.)

  • +
+
+
Returns
+

The column names that were written to the file as a list.

+
+
+
+ +
+
+write_patch_centers(file_name)[source]
+

Write the patch centers to a file.

+

The output file will include the following columns:

+ ++++ + + + + + + + + + + + + + + + + + + + +

Column

Description

patch

patch number (0..npatch-1)

x

mean x values

y

mean y values

z

mean z values (only for spherical or 3d coordinates)

+

It will write a FITS file if the file name ends with ‘.fits’, otherwise an ASCII file.

+
+
Parameters
+

file_name (str) – The name of the file to write to.

+
+
+
+ +
+
+write_patches(save_patch_dir=None)[source]
+

Write the patches to disk as separate files.

+

This can be used in conjunction with low_mem=True option of get_patches (and +implicitly by the various process methods) to only keep +at most two patches in memory at a time.

+
+
Parameters
+

save_patch_dir (str) – The directory to write the patches to. [default: None, in which +case self.save_patch_dir will be used. If that is None, a +ValueError will be raised.]

+
+
+
+ +
+ +
+ +
+

File Readers

+
+
+class treecorr.reader.FitsReader(file_name, logger=None)[source]
+

Reader interface for FITS files. +Uses fitsio to read columns, etc.

+
+
+check_valid_ext(ext)[source]
+

Check if an extension is valid for reading, and raise ValueError if not.

+

The ext must both exist and be a table (not an image)

+
+
Parameters
+

ext (str/int) – The extension to check

+
+
+
+ +
+
+names(ext=None)[source]
+

Return a list of the names of all the columns in an extension

+
+
Parameters
+

ext (str/int) – The extension to search for columns (default: 1)

+
+
Returns
+

A list of string column names

+
+
+
+ +
+
+read(cols, s=slice(None, None, None), ext=None)[source]
+

Read a slice of a column or list of columns from a specified extension

+
+
Parameters
+
    +
  • cols (str/list) – The name(s) of column(s) to read

  • +
  • s (slice/array) – A slice object or selection of integers to read (default: all)

  • +
  • ext (str/int)) – The FITS extension to use (default: 1)

  • +
+
+
Returns
+

The data as a recarray or simple numpy array as appropriate

+
+
+
+ +
+
+read_data(ext=None, max_rows=None)[source]
+

Read all data in the file, and the parameters in the header, if any.

+
+
Parameters
+
    +
  • ext (str/int) – The FITS extension to use (default: 1)

  • +
  • max_rows (int) – The max number of rows to read. (ignored)

  • +
+
+
Returns
+

data

+
+
+
+ +
+
+read_params(ext=None)[source]
+

Read the params in the given extension, if any.

+
+
Parameters
+

ext (str/int) – The FITS extension to use (default: 1)

+
+
Returns
+

params

+
+
+
+ +
+
+row_count(col=None, ext=None)[source]
+

Count the number of rows in the named extension

+

For compatibility with the HDF interface, which can have columns +of different lengths, we allow a second argument, col, but it is +ignored here.

+
+
Parameters
+
    +
  • col (str) – The column to use (ignored)

  • +
  • ext (str/int) – The FITS extension to use (default: 1)

  • +
+
+
Returns
+

The number of rows

+
+
+
+ +
+ +
+
+class treecorr.reader.HdfReader(file_name, logger=None)[source]
+

Reader interface for HDF5 files. +Uses h5py to read columns, etc.

+
+
+check_valid_ext(ext)[source]
+

Check if an extension is valid for reading, and raise ValueError if not.

+

The ext must exist - there is no other requirement for HDF files.

+
+
Parameters
+

ext (str) – The extension to check

+
+
+
+ +
+
+names(ext=None)[source]
+

Return a list of the names of all the columns in an extension

+
+
Parameters
+

ext (str) – The extension to search for columns (default: ‘/’)

+
+
Returns
+

A list of string column names

+
+
+
+ +
+
+read(cols, s=slice(None, None, None), ext=None)[source]
+

Read a slice of a column or list of columns from a specified extension.

+

Slices should always be used when reading HDF files - using a sequence of +integers is painfully slow.

+
+
Parameters
+
    +
  • cols (str/list) – The name(s) of column(s) to read

  • +
  • s (slice/array) – A slice object or selection of integers to read (default: all)

  • +
  • ext (str) – The HDF (sub-)group to use (default: ‘/’)

  • +
+
+
Returns
+

The data as a dict or single numpy array as appropriate

+
+
+
+ +
+
+read_data(ext=None, max_rows=None)[source]
+

Read all data in the file, and the parameters in the attributes, if any.

+
+
Parameters
+
    +
  • ext (str) – The HDF (sub-)group to use (default: ‘/’)

  • +
  • max_rows (int) – The max number of rows to read. (ignored)

  • +
+
+
Returns
+

data

+
+
+
+ +
+
+read_params(ext=None)[source]
+

Read the params in the given extension, if any.

+
+
Parameters
+

ext (str) – The HDF (sub-)group to use (default: ‘/’)

+
+
Returns
+

params

+
+
+
+ +
+
+row_count(col, ext=None)[source]
+

Count the number of rows in the named extension and column

+

Unlike in FitsReader, col is required.

+
+
Parameters
+
    +
  • col (str) – The column to use

  • +
  • ext (str) – The HDF group name to use (default: ‘/’)

  • +
+
+
Returns
+

The number of rows

+
+
+
+ +
+ +
+
+class treecorr.reader.AsciiReader(file_name, delimiter=None, comment_marker='#', logger=None)[source]
+

Reader interface for ASCII files using numpy.

+
+
+check_valid_ext(ext)[source]
+

Check if an extension is valid for reading, and raise ValueError if not.

+

None is the only valid extension for ASCII files.

+
+
Parameters
+

ext (str) – The extension to check

+
+
+
+ +
+
+names(ext=None)[source]
+

Return a list of the names of all the columns in an extension

+
+
Parameters
+

ext (str) – The extension (ignored)

+
+
Returns
+

A list of string column names

+
+
+
+ +
+
+read(cols, s=slice(None, None, None), ext=None)[source]
+

Read a slice of a column or list of columns from a specified extension.

+
+
Parameters
+
    +
  • cols (str/list) – The name(s) of column(s) to read

  • +
  • s (slice/array) – A slice object or selection of integers to read (default: all)

  • +
  • ext (str) – The extension (ignored)

  • +
+
+
Returns
+

The data as a dict or single numpy array as appropriate

+
+
+
+ +
+
+read_data(ext=None, max_rows=None)[source]
+

Read all data in the file, and the parameters in the header, if any.

+
+
Parameters
+
    +
  • ext (str) – The extension (ignored – Ascii always reads the next group)

  • +
  • max_rows (int) – The max number of rows to read. (default: None)

  • +
+
+
Returns
+

data

+
+
+
+ +
+
+read_params(ext=None)[source]
+

Read the params in the given extension, if any.

+
+
Parameters
+

ext (str) – The extension (ignored – Ascii always reads the next group)

+
+
Returns
+

params

+
+
+
+ +
+
+row_count(col=None, ext=None)[source]
+

Count the number of rows in the file.

+
+
Parameters
+
    +
  • col (str) – The column to use (ignored)

  • +
  • ext (str) – The extension (ignored)

  • +
+
+
Returns
+

The number of rows

+
+
+
+ +
+ +
+
+class treecorr.reader.PandasReader(file_name, delimiter=None, comment_marker='#', logger=None)[source]
+

Reader interface for ASCII files using pandas.

+
+
+read(cols, s=slice(None, None, None), ext=None)[source]
+

Read a slice of a column or list of columns from a specified extension.

+
+
Parameters
+
    +
  • cols (str/list) – The name(s) of column(s) to read

  • +
  • s (slice/array) – A slice object or selection of integers to read (default: all)

  • +
  • ext (str) – The extension (ignored)

  • +
+
+
Returns
+

The data as a dict or single numpy array as appropriate

+
+
+
+ +
+ +
+
+class treecorr.reader.ParquetReader(file_name, delimiter=None, comment_marker='#', logger=None)[source]
+

Reader interface for Parquet files using pandas.

+
+
+check_valid_ext(ext)[source]
+

Check if an extension is valid for reading, and raise ValueError if not.

+

None is the only valid extension for ASCII files.

+
+
Parameters
+

ext (str) – The extension to check

+
+
+
+ +
+
+names(ext=None)[source]
+

Return a list of the names of all the columns in an extension

+
+
Parameters
+

ext (str) – The extension to search for columns (ignored)

+
+
Returns
+

A list of string column names

+
+
+
+ +
+
+read(cols, s=slice(None, None, None), ext=None)[source]
+

Read a slice of a column or list of columns from a specified extension.

+
+
Parameters
+
    +
  • cols (str/list) – The name(s) of column(s) to read

  • +
  • s (slice/array) – A slice object or selection of integers to read (default: all)

  • +
  • ext (str) – The extension (ignored)

  • +
+
+
Returns
+

The data as a recarray or simple numpy array as appropriate

+
+
+
+ +
+
+row_count(col=None, ext=None)[source]
+

Count the number of rows in the named extension and column

+

Unlike in FitsReader, col is required.

+
+
Parameters
+
    +
  • col (str) – The column to use (ignored)

  • +
  • ext (str) – The extension (ignored)

  • +
+
+
Returns
+

The number of rows

+
+
+
+ +
+ +
+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/changes.html b/docs/_build/html/changes.html new file mode 100644 index 00000000..ea0e8c3c --- /dev/null +++ b/docs/_build/html/changes.html @@ -0,0 +1,166 @@ + + + + + + Changes from version 4.2 to 4.3 — TreeCorr 4.3.0 documentation + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

Changes from version 4.2 to 4.3

+

See the listing below for the complete list of new features and changes. +Relevant PRs and Issues, +whose issue numbers are listed below for the relevant items.

+
+

System Support Changes

+
    +
  • Starting with this version, TreeCorr no longer supports Python 2.7 or 3.6. +We currently support Python versions 3.7, 3,8, 3.9, 3.10.

  • +
  • We now support Windows machines, which we hadn’t before this. Code is now regularly +tested on ubuntu, macos and windows. (#136, #143)

  • +
+
+
+

API Changes

+
    +
  • Many function parameters are now keyword-only. The old syntax allowing these parameters +to be positional still works, but is deprecated. (#129)

  • +
+
+
+

Performance improvements

+
    +
  • Added ability to compute patch-based covariance matrices using MPI. (#138, #139)

  • +
+
+
+

New features

+
    +
  • Add BinnedCorr2.build_cov_design_matrix and build_multi_cov_design_matrix functions (#132)

  • +
  • Added ability to write out the full set of pair-wise results in the write commands, and to +read them back in. This allows correlation objects to still be able to correctly calculate +the various patch-based covariance matrix estimates after round-tripping through a file. +To use this feature, use corr.write(..., write_patch_results=True). (#141)

  • +
  • Allow None as a configuration parameter to be equivalent to not specifying a parameter. +E.g. bin_size=0.1, min_sep=1., max_sep=100., nbins=None is allowed now. (#142)

  • +
+
+
+

Bug fixes

+
    +
  • Fixed a bug where correlation objects using bin_type=TwoD could not be correctly read back +in after being written to a file. (#141)

  • +
+
+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/correlation2.html b/docs/_build/html/correlation2.html new file mode 100644 index 00000000..26b520e6 --- /dev/null +++ b/docs/_build/html/correlation2.html @@ -0,0 +1,664 @@ + + + + + + Two-point Correlation Functions — TreeCorr 4.3.0 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

Two-point Correlation Functions

+

There are 6 differenct classes for calculating the different possible two-point correlation +functions:

+ +

Each of the above classes is a sub-class of the base class BinnedCorr2, so they have a number of +features in common about how they are constructed. The common features are documented here.

+
+
+class treecorr.BinnedCorr2(config=None, *, logger=None, rng=None, **kwargs)[source]
+

This class stores the results of a 2-point correlation calculation, along with some +ancillary data.

+

This is a base class that is not intended to be constructed directly. But it has a few +helper functions that derived classes can use to help perform their calculations. See +the derived classes for more details:

+ +
+

Note

+

When we refer to kappa in the correlation functions, that is because TreeCorr was +originally designed for weak lensing applications. But in fact any scalar quantity +may be used here. CMB temperature fluctuations for example.

+
+

The constructor for all derived classes take a config dict as the first argument, +since this is often how we keep track of parameters, but if you don’t want to +use one or if you want to change some parameters from what are in a config dict, +then you can use normal kwargs, which take precedence over anything in the config dict.

+

There are a number of possible definitions for the distance between two points, which +are appropriate for different use cases. These are specified by the metric parameter. +The possible options are:

+
+
    +
  • ‘Euclidean’ = straight line Euclidean distance between two points.

  • +
  • ‘FisherRperp’ = the perpendicular component of the distance, following the +definitions in Fisher et al, 1994 (MNRAS, 267, 927).

  • +
  • ‘OldRperp’ = the perpendicular component of the distance using the definition +of Rperp from TreeCorr v3.x.

  • +
  • ‘Rperp’ = an alias for FisherRperp. You can change it to be an alias for +OldRperp if you want by setting treecorr.Rperp_alias = 'OldRperp' before +using it.

  • +
  • ‘Rlens’ = the distance from the first object (taken to be a lens) to the line +connecting Earth and the second object (taken to be a lensed source).

  • +
  • ‘Arc’ = the true great circle distance for spherical coordinates.

  • +
  • ‘Periodic’ = Like Euclidean, but with periodic boundaries.

  • +
+
+

See Metrics for more information about these various metric options.

+

There are also a few different possibile binning prescriptions to define the range of +distances, which should be placed into each bin.

+
+
    +
  • ‘Log’ - logarithmic binning in the distance. The bin steps will be uniform in +log(r) from log(min_sep) .. log(max_sep).

  • +
  • ‘Linear’ - linear binning in the distance. The bin steps will be uniform in r +from min_sep .. max_sep.

  • +
  • ‘TwoD’ = 2-dimensional binning from x = (-max_sep .. max_sep) and +y = (-max_sep .. max_sep). The bin steps will be uniform in both x and y. +(i.e. linear in x,y)

  • +
+
+

See Binning for more information about the different binning options.

+
+
Parameters
+
    +
  • config (dict) – A configuration dict that can be used to pass in the below kwargs if +desired. This dict is allowed to have addition entries in addition +to those listed below, which are ignored here. (default: None)

  • +
  • logger – If desired, a logger object for logging. (default: None, in which case +one will be built according to the config dict’s verbose level.)

  • +
+
+
Keyword Arguments
+
    +
  • nbins (int) – How many bins to use. (Exactly three of nbins, bin_size, min_sep, +max_sep are required. If nbins is not given or set to None, it will be +calculated from the values of the other three, rounding up to the next +highest integer. In this case, bin_size will be readjusted to account +for this rounding up.)

  • +
  • bin_size (float) – The width of the bins in log(separation). (Exactly three of nbins, +bin_size, min_sep, max_sep are required. If bin_size is not given or +set to None, it will be calculated from the values of the other three.)

  • +
  • min_sep (float) – The minimum separation in units of sep_units, if relevant. (Exactly +three of nbins, bin_size, min_sep, max_sep are required. If min_sep is +not given or set to None, it will be calculated from the values of the +other three.)

  • +
  • max_sep (float) – The maximum separation in units of sep_units, if relevant. (Exactly +three of nbins, bin_size, min_sep, max_sep are required. If max_sep is +not given or set to None, it will be calculated from the values of the +other three.)

  • +
  • sep_units (str) – The units to use for the separation values, given as a string. This +includes both min_sep and max_sep above, as well as the units of the +output distance values. Valid options are arcsec, arcmin, degrees, +hours, radians. (default: radians if angular units make sense, but for +3-d or flat 2-d positions, the default will just match the units of +x,y[,z] coordinates)

  • +
  • bin_slop (float) – How much slop to allow in the placement of pairs in the bins. +If bin_slop = 1, then the bin into which a particular pair is placed +may be incorrect by at most 1.0 bin widths. (default: None, which +means to use a bin_slop that gives a maximum error of 10% on any bin, +which has been found to yield good results for most application.

  • +
  • brute (bool) –

    Whether to use the “brute force” algorithm. (default: False) Options +are:

    +
    +
      +
    • False (the default): Stop at non-leaf cells whenever the error in +the separation is compatible with the given bin_slop.

    • +
    • True: Go to the leaves for both catalogs.

    • +
    • 1: Always go to the leaves for cat1, but stop at non-leaf cells of +cat2 when the error is compatible with the given bin_slop.

    • +
    • 2: Always go to the leaves for cat2, but stop at non-leaf cells of +cat1 when the error is compatible with the given bin_slop.

    • +
    +
    +

  • +
  • verbose (int) –

    If no logger is provided, this will optionally specify a logging level +to use:

    +
    +
      +
    • 0 means no logging output

    • +
    • 1 means to output warnings only (default)

    • +
    • 2 means to output various progress information

    • +
    • 3 means to output extensive debugging information

    • +
    +
    +

  • +
  • log_file (str) – If no logger is provided, this will specify a file to write the logging +output. (default: None; i.e. output to standard output)

  • +
  • output_dots (bool) – Whether to output progress dots during the calcualtion of the +correlation function. (default: False unless verbose is given and >= 2, +in which case True)

  • +
  • split_method (str) –

    How to split the cells in the tree when building the tree structure. +Options are:

    +
      +
    • mean = Use the arithmetic mean of the coordinate being split. +(default)

    • +
    • median = Use the median of the coordinate being split.

    • +
    • middle = Use the middle of the range; i.e. the average of the minimum +and maximum value.

    • +
    • random: Use a random point somewhere in the middle two quartiles of +the range.

    • +
    +

  • +
  • min_top (int) – The minimum number of top layers to use when setting up the field. +(default: \(\max(3, \log_2(N_{\rm cpu}))\))

  • +
  • max_top (int) – The maximum number of top layers to use when setting up the field. +The top-level cells are where each calculation job starts. There will +typically be of order \(2^{\rm max\_top}\) top-level cells. +(default: 10)

  • +
  • precision (int) – The precision to use for the output values. This specifies how many +digits to write. (default: 4)

  • +
  • pairwise (bool) – Whether to use a different kind of calculation for cross correlations +whereby corresponding items in the two catalogs are correlated pairwise +rather than the usual case of every item in one catalog being correlated +with every item in the other catalog. (default: False) (DEPRECATED)

  • +
  • m2_uform (str) – The default functional form to use for aperture mass calculations. +see calculateMapSq for more details. (default: ‘Crittenden’)

  • +
  • metric (str) – Which metric to use for distance measurements. Options are listed +above. (default: ‘Euclidean’)

  • +
  • bin_type (str) – What type of binning should be used. Options are listed above. +(default: ‘Log’)

  • +
  • min_rpar (float) – The minimum difference in Rparallel to allow for pairs being included +in the correlation function. (default: None)

  • +
  • max_rpar (float) – The maximum difference in Rparallel to allow for pairs being included +in the correlation function. (default: None)

  • +
  • period (float) – For the ‘Periodic’ metric, the period to use in all directions. +(default: None)

  • +
  • xperiod (float) – For the ‘Periodic’ metric, the period to use in the x direction. +(default: period)

  • +
  • yperiod (float) – For the ‘Periodic’ metric, the period to use in the y direction. +(default: period)

  • +
  • zperiod (float) – For the ‘Periodic’ metric, the period to use in the z direction. +(default: period)

  • +
  • var_method (str) – Which method to use for estimating the variance. Options are: +‘shot’, ‘jackknife’, ‘sample’, ‘bootstrap’, ‘marked_bootstrap’. +(default: ‘shot’)

  • +
  • num_bootstrap (int) – How many bootstrap samples to use for the ‘bootstrap’ and +‘marked_bootstrap’ var_methods. (default: 500)

  • +
  • rng (RandomState) – If desired, a numpy.random.RandomState instance to use for bootstrap +random number generation. (default: None)

  • +
  • num_threads (int) –

    How many OpenMP threads to use during the calculation. +(default: use the number of cpu cores)

    +
    +

    Note

    +

    This won’t work if the system’s C compiler cannot use OpenMP +(e.g. clang prior to version 3.7.)

    +
    +

  • +
+
+
+
+
+build_cov_design_matrix(method, *, func=None, comm=None)[source]
+

Build the design matrix that is used for estimating the covariance matrix.

+

The design matrix for patch-based covariance estimates is a matrix where each row +corresponds to a different estimate of the data vector, \(\xi_i\) (or +\(f(\xi_i)\) if using the optional func parameter).

+

The different of rows in the matrix for each valid method are:

+
+
    +
  • ‘shot’: This method is not valid here.

  • +
  • ‘jackknife’: The data vector when excluding a single patch.

  • +
  • ‘sample’: The data vector using only a single patch for the first catalog.

  • +
  • ‘bootstrap’: The data vector for a random resampling of the patches keeping the +sample total number, but allowing some to repeat. Cross terms from repeated patches +are excluded (since they are really auto terms).

  • +
  • ‘marked_bootstrap’: The data vector for a random resampling of patches in the first +catalog, using all patches for the second catalog. Based on the algorithm in +Loh(2008).

  • +
+
+

See estimate_cov for more details.

+

The return value includes both the design matrix and a vector of weights (the total weight +array in the computed correlation functions). The weights are used for the sample method +when estimating the covariance matrix. The other methods ignore them, but they are provided +here in case they are useful.

+
+
Parameters
+
    +
  • method (str) – Which method to use to estimate the covariance matrix.

  • +
  • func (function) – A unary function that takes the list corrs and returns the +desired full data vector. [default: None, which is equivalent to +lambda corrs: np.concatenate([c.getStat() for c in corrs])]

  • +
  • comm (mpi comm) –

  • +
+
+
Returns
+

numpy arrays with the design matrix and weights respectively.

+
+
Return type
+

A, w

+
+
+
+ +
+
+clear()[source]
+

Clear all data vectors, the results dict, and any related values.

+
+ +
+
+estimate_cov(method, *, func=None, comm=None)[source]
+

Estimate the covariance matrix based on the data

+

This function will calculate an estimate of the covariance matrix according to the +given method.

+

Options for method include:

+
+
    +
  • ‘shot’ = The variance based on “shot noise” only. This includes the Poisson +counts of points for N statistics, shape noise for G statistics, and the observed +scatter in the values for K statistics. In this case, the returned covariance +matrix will be diagonal, since there is no way to estimate the off-diagonal terms.

  • +
  • ‘jackknife’ = A jackknife estimate of the covariance matrix based on the scatter +in the measurement when excluding one patch at a time.

  • +
  • ‘sample’ = An estimate based on the sample covariance of a set of samples, +taken as the patches of the input catalog.

  • +
  • ‘bootstrap’ = A bootstrap covariance estimate. It selects patches at random with +replacement and then generates the statistic using all the auto-correlations at +their selected repetition plus all the cross terms that aren’t actually auto terms.

  • +
  • ‘marked_bootstrap’ = An estimate based on a marked-point bootstrap resampling of the +patches. Similar to bootstrap, but only samples the patches of the first catalog and +uses all patches from the second catalog that correspond to each patch selection of +the first catalog. Based on the algorithm presented in Loh (2008). +cf. https://ui.adsabs.harvard.edu/abs/2008ApJ…681..726L/

  • +
+
+

Both ‘bootstrap’ and ‘marked_bootstrap’ use the num_bootstrap parameter, which can be set on +construction.

+
+

Note

+

For most classes, there is only a single statistic, so this calculates a covariance +matrix for that vector. GGCorrelation has two: xip and xim, so in this +case the full data vector is xip followed by xim, and this calculates the +covariance matrix for that full vector including both statistics. The helper +function getStat returns the relevant statistic in all cases.

+
+

In all cases, the relevant processing needs to already have been completed and finalized. +And for all methods other than ‘shot’, the processing should have involved an appropriate +number of patches – preferably more patches than the length of the vector for your +statistic, although this is not checked.

+

The default data vector to use for the covariance matrix is given by the method +getStat. As noted above, this is usually just self.xi. However, there is an option +to compute the covariance of some other function of the correlation object by providing +an arbitrary function, func, which should act on the current correlation object +and return the data vector of interest.

+

For instance, for an NGCorrelation, you might want to compute the covariance of the +imaginary part, ng.xi_im, rather than the real part. In this case you could use

+
>>> func = lambda ng: ng.xi_im
+
+
+

The return value from this func should be a single numpy array. (This is not directly +checked, but you’ll probably get some kind of exception if it doesn’t behave as expected.)

+
+

Note

+

The optional func parameter is not valid in conjunction with method='shot'. +It only works for the methods that are based on patch combinations.

+
+

This function can be parallelized by passing the comm argument as an mpi4py communicator +to parallelize using that. For MPI, all processes should have the same inputs. +If method == “shot” then parallelization has no effect.

+
+
Parameters
+
    +
  • method (str) – Which method to use to estimate the covariance matrix.

  • +
  • func (function) – A unary function that acts on the current correlation object and +returns the desired data vector. [default: None, which is +equivalent to lambda corr: corr.getStat().

  • +
  • comm (mpi comm) –

  • +
+
+
Returns
+

A numpy array with the estimated covariance matrix.

+
+
+
+ +
+
+getStat()[source]
+

The standard statistic for the current correlation object as a 1-d array.

+

Usually, this is just self.xi. But if the metric is TwoD, this becomes self.xi.ravel(). +And for GGCorrelation, it is the concatenation of self.xip and self.xim.

+
+ +
+
+getWeight()[source]
+

The weight array for the current correlation object as a 1-d array.

+

This is the weight array corresponding to getStat. Usually just self.weight, but +raveled for TwoD and duplicated for GGCorrelation to match what getStat does in +those cases.

+
+ +
+
+property nonzero
+

Return if there are any values accumulated yet. (i.e. npairs > 0)

+
+ +
+
+sample_pairs(n, cat1, cat2, *, min_sep, max_sep, metric=None)[source]
+

Return a random sample of n pairs whose separations fall between min_sep and max_sep.

+

This would typically be used to get some random subset of the indices of pairs that +fell into a particular bin of the correlation. E.g. to get 100 pairs from the third +bin of a BinnedCorr2 instance, corr, you could write:

+
>>> min_sep = corr.left_edges[2]   # third bin has i=2
+>>> max_sep = corr.right_edges[2]
+>>> i1, i2, sep = corr.sample_pairs(100, cat1, cat2, min_sep, max_sep)
+
+
+

The min_sep and max_sep should use the same units as were defined when constructing +the corr instance.

+

The selection process will also use the same bin_slop as specified (either explicitly or +implicitly) when constructing the corr instance. This means that some of the pairs may +have actual separations slightly outside of the specified range. If you want a selection +using an exact range without any slop, you should construct a new Correlation instance +with bin_slop=0, and call sample_pairs with that.

+

The returned separations will likewise correspond to the separation of the cells in the +tree that TreeCorr used to place the pairs into the given bin. Therefore, if these cells +were not leaf cells, then they will not typically be equal to the real separations for the +given metric. If you care about the exact separations for each pair, you should either +call sample_pairs from a Correlation instance with brute=True or recalculate the +distances yourself from the original data.

+

Also, note that min_sep and max_sep may be arbitrary. There is no requirement that they +be edges of one of the standard bins for this correlation function. There is also no +requirement that this correlation instance has already accumulated pairs via a call +to process with these catalogs.

+
+
Parameters
+
    +
  • n (int) – How many samples to return.

  • +
  • cat1 (Catalog) – The catalog from which to sample the first object of each pair.

  • +
  • cat2 (Catalog) – The catalog from which to sample the second object of each pair. +(This may be the same as cat1.)

  • +
  • min_sep (float) – The minimum separation for the returned pairs (modulo some slop +allowed by the bin_slop parameter). (Note: keyword name is required +for this parameter: min_sep=min_sep)

  • +
  • max_sep (float) – The maximum separation for the returned pairs (modulo some slop +allowed by the bin_slop parameter). (Note: keyword name is required +for this parameter: max_sep=max_sep)

  • +
  • metric (str) – Which metric to use. See Metrics for details. (default: +self.metric, or ‘Euclidean’ if not set yet)

  • +
+
+
Returns
+

Tuple containing

+
+
    +
  • i1 (array): indices of objects from cat1

  • +
  • i2 (array): indices of objects from cat2

  • +
  • sep (array): separations of the pairs of objects (i1,i2)

  • +
+
+

+
+
+
+ +
+ +
+
+treecorr.estimate_multi_cov(corrs, method, *, func=None, comm=None)[source]
+

Estimate the covariance matrix of multiple statistics.

+

This is like the method BinnedCorr2.estimate_cov, except that it will acoommodate +multiple statistics from a list corrs of BinnedCorr2 objects.

+

Options for method include:

+
+
    +
  • ‘shot’ = The variance based on “shot noise” only. This includes the Poisson +counts of points for N statistics, shape noise for G statistics, and the observed +scatter in the values for K statistics. In this case, the returned covariance +matrix will be diagonal, since there is no way to estimate the off-diagonal terms.

  • +
  • ‘jackknife’ = A jackknife estimate of the covariance matrix based on the scatter +in the measurement when excluding one patch at a time.

  • +
  • ‘sample’ = An estimate based on the sample covariance of a set of samples, +taken as the patches of the input catalog.

  • +
  • ‘bootstrap’ = A bootstrap covariance estimate. It selects patches at random with +replacement and then generates the statistic using all the auto-correlations at +their selected repetition plus all the cross terms that aren’t actually auto terms.

  • +
  • ‘marked_bootstrap’ = An estimate based on a marked-point bootstrap resampling of the +patches. Similar to bootstrap, but only samples the patches of the first catalog and +uses all patches from the second catalog that correspond to each patch selection of +the first catalog. Based on the algorithm presented in Loh (2008). +cf. https://ui.adsabs.harvard.edu/abs/2008ApJ…681..726L/

  • +
+
+

Both ‘bootstrap’ and ‘marked_bootstrap’ use the num_bootstrap parameter, which can be set on +construction.

+

For example, to find the combined covariance matrix for an NG tangential shear statistc, +along with the GG xi+ and xi- from the same area, using jackknife covariance estimation, +you would write:

+
>>> cov = treecorr.estimate_multi_cov([ng,gg], method='jackknife')
+
+
+

In all cases, the relevant processing needs to already have been completed and finalized. +And for all methods other than ‘shot’, the processing should have involved an appropriate +number of patches – preferably more patches than the length of the vector for your +statistic, although this is not checked.

+

The default order of the covariance matrix is to simply concatenate the data vectors +for each corr in the list corrs. However, if you want to do something more complicated, +you may provide an arbitrary function, func, which should act on the list of correlations. +For instance, if you have several GGCorrelation objects and would like to order the +covariance such that all xi+ results come first, and then all xi- results, you could use

+
>>> func = lambda corrs: np.concatenate([c.xip for c in corrs] + [c.xim for c in corrs])
+
+
+

Or if you want to compute the covariance matrix of some derived quantity like the ratio +of two correlations, you could use

+
>>> func = lambda corrs: corrs[0].xi / corrs[1].xi
+
+
+

This function can be parallelized by passing the comm argument as an mpi4py communicator to +parallelize using that. For MPI, all processes should have the same inputs. +If method == “shot” then parallelization has no effect.

+

The return value from this func should be a single numpy array. (This is not directly +checked, but you’ll probably get some kind of exception if it doesn’t behave as expected.)

+
+

Note

+

The optional func parameter is not valid in conjunction with method='shot'. +It only works for the methods that are based on patch combinations.

+
+
+
Parameters
+
    +
  • corrs (list) – A list of BinnedCorr2 instances.

  • +
  • method (str) – Which method to use to estimate the covariance matrix.

  • +
  • func (function) – A unary function that takes the list corrs and returns the +desired full data vector. [default: None, which is equivalent to +lambda corrs: np.concatenate([c.getStat() for c in corrs])]

  • +
  • comm (mpi comm) –

  • +
+
+
Returns
+

A numpy array with the estimated covariance matrix.

+
+
+
+ +
+
+treecorr.build_multi_cov_design_matrix(corrs, method, *, func=None, comm=None)[source]
+

Build the design matrix that is used for estimating the covariance matrix.

+

The design matrix for patch-based covariance estimates is a matrix where each row +corresponds to a different estimate of the data vector, \(\xi_i\) (or +\(f(\xi_i)\) if using the optional func parameter).

+

The different of rows in the matrix for each valid method are:

+
+
    +
  • ‘shot’: This method is not valid here.

  • +
  • ‘jackknife’: The data vector when excluding a single patch.

  • +
  • ‘sample’: The data vector using only a single patch for the first catalog.

  • +
  • ‘bootstrap’: The data vector for a random resampling of the patches keeping the +sample total number, but allowing some to repeat. Cross terms from repeated patches +are excluded (since they are really auto terms).

  • +
  • ‘marked_bootstrap’: The data vector for a random resampling of patches in the first +catalog, using all patches for the second catalog. Based on the algorithm in Loh(2008).

  • +
+
+

See estimate_multi_cov for more details.

+

The return value includes both the design matrix and a vector of weights (the total weight +array in the computed correlation functions). The weights are used for the sample method +when estimating the covariance matrix. The other methods ignore them, but they are provided +here in case they are useful.

+
+
Parameters
+
    +
  • corrs (list) – A list of BinnedCorr2 instances.

  • +
  • method (str) – Which method to use to estimate the covariance matrix.

  • +
  • func (function) – A unary function that takes the list corrs and returns the +desired full data vector. [default: None, which is equivalent to +lambda corrs: np.concatenate([c.getStat() for c in corrs])]

  • +
  • comm (mpi comm) –

  • +
+
+
Returns
+

numpy arrays with the design matrix and weights respectively.

+
+
Return type
+

A, w

+
+
+
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/correlation3.html b/docs/_build/html/correlation3.html new file mode 100644 index 00000000..4ca89872 --- /dev/null +++ b/docs/_build/html/correlation3.html @@ -0,0 +1,490 @@ + + + + + + Three-point Correlation Functions — TreeCorr 4.3.0 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

Three-point Correlation Functions

+

There are currently 3 differenct classes for calculating the different possible three-point +auto-correlation functions:

+ +
+

Note

+

There are classes that can handle cross-correlations of the same type:

+ +

However, we do not yet have the ability to compute 3-point cross-correlations across +different types (such as NNG or KGG, etc.)

+
+

Each of the above classes is a sub-class of the base class BinnedCorr3, so they have a number of +features in common about how they are constructed. The common features are documented here.

+
+
+class treecorr.BinnedCorr3(config=None, *, logger=None, rng=None, **kwargs)[source]
+

This class stores the results of a 3-point correlation calculation, along with some +ancillary data.

+

This is a base class that is not intended to be constructed directly. But it has a few +helper functions that derived classes can use to help perform their calculations. See +the derived classes for more details:

+ +

Three-point correlations are a bit more complicated than two-point, since the data need +to be binned in triangles, not just the separation between two points. We characterize the +triangles according to the following three parameters based on the three side lenghts +of the triangle with d1 >= d2 >= d3.

+
+\[\begin{split}r &= d2 \\ +u &= \frac{d3}{d2} \\ +v &= \pm \frac{(d1 - d2)}{d3} \\\end{split}\]
+

The orientation of the triangle is specified by the sign of v. +Positive v triangles have the three sides d1,d2,d3 in counter-clockwise orientation. +Negative v triangles have the three sides d1,d2,d3 in clockwise orientation.

+
+

Note

+

We always bin the same way for positive and negative v values, and the binning +specification for v should just be for the positive values. E.g. if you specify +min_v=0.2, max_v=0.6, then TreeCorr will also accumulate triangles with +-0.6 < v < -0.2 in addition to those with 0.2 < v < 0.6.

+
+

The constructor for all derived classes take a config dict as the first argument, +since this is often how we keep track of parameters, but if you don’t want to +use one or if you want to change some parameters from what are in a config dict, +then you can use normal kwargs, which take precedence over anything in the config dict.

+

There are three implemented definitions for the metric, which defines how to calculate +the distance between two points, for three-point corretions:

+
+
    +
  • ‘Euclidean’ = straight line Euclidean distance between two points. For spherical +coordinates (ra,dec without r), this is the chord distance between points on the +unit sphere.

  • +
  • ‘Arc’ = the true great circle distance for spherical coordinates.

  • +
  • ‘Periodic’ = Like Euclidean, but with periodic boundaries.

    +
    +

    Note

    +

    The triangles for three-point correlations can become ambiguous if d1 > period/2, +which means the maximum d2 (max_sep) should be less than period/4. +This is not enforced.

    +
    +
  • +
+
+

So far, there is only one allowed value for the bin_type for three-point correlations.

+
+
    +
  • ‘LogRUV’ - The bin steps will be uniform in log(r) from log(min_sep) .. log(max_sep). +The u and v values are binned linearly from min_u .. max_u and min_v .. max_v.

  • +
+
+
+
Parameters
+
    +
  • config (dict) – A configuration dict that can be used to pass in the below kwargs if +desired. This dict is allowed to have addition entries in addition +to those listed below, which are ignored here. (default: None)

  • +
  • logger – If desired, a logger object for logging. (default: None, in which case +one will be built according to the config dict’s verbose level.)

  • +
+
+
Keyword Arguments
+
    +
  • nbins (int) – How many bins to use. (Exactly three of nbins, bin_size, min_sep, +max_sep are required. If nbins is not given or set to None, it will be +calculated from the values of the other three, rounding up to the next +highest integer. In this case, bin_size will be readjusted to account +for this rounding up.)

  • +
  • bin_size (float) – The width of the bins in log(separation). (Exactly three of nbins, +bin_size, min_sep, max_sep are required. If bin_size is not given or +set to None, it will be calculated from the values of the other three.)

  • +
  • min_sep (float) – The minimum separation in units of sep_units, if relevant. (Exactly +three of nbins, bin_size, min_sep, max_sep are required. If min_sep is +not given or set to None, it will be calculated from the values of the +other three.)

  • +
  • max_sep (float) – The maximum separation in units of sep_units, if relevant. (Exactly +three of nbins, bin_size, min_sep, max_sep are required. If max_sep is +not given or set to None, it will be calculated from the values of the +other three.)

  • +
  • sep_units (str) – The units to use for the separation values, given as a string. This +includes both min_sep and max_sep above, as well as the units of the +output distance values. Valid options are arcsec, arcmin, degrees, +hours, radians. (default: radians if angular units make sense, but for +3-d or flat 2-d positions, the default will just match the units of +x,y[,z] coordinates)

  • +
  • bin_slop (float) – How much slop to allow in the placement of triangles in the bins. +If bin_slop = 1, then the bin into which a particular pair is placed +may be incorrect by at most 1.0 bin widths. (default: None, which +means to use a bin_slop that gives a maximum error of 10% on any bin, +which has been found to yield good results for most application.

  • +
  • nubins (int) – Analogous to nbins for the u values. (The default is to calculate from +ubin_size = binsize, min_u = 0, max_u = 1, but this can be overridden +by specifying up to 3 of these four parametes.)

  • +
  • ubin_size (float) – Analogous to bin_size for the u values. (default: bin_size)

  • +
  • min_u (float) – Analogous to min_sep for the u values. (default: 0)

  • +
  • max_u (float) – Analogous to max_sep for the u values. (default: 1)

  • +
  • nvbins (int) – Analogous to nbins for the positive v values. (The default is to +calculate from vbin_size = binsize, min_v = 0, max_v = 1, but this can +be overridden by specifying up to 3 of these four parametes.)

  • +
  • vbin_size (float) – Analogous to bin_size for the v values. (default: bin_size)

  • +
  • min_v (float) – Analogous to min_sep for the positive v values. (default: 0)

  • +
  • max_v (float) – Analogous to max_sep for the positive v values. (default: 1)

  • +
  • brute (bool) –

    Whether to use the “brute force” algorithm. (default: False) Options +are:

    +
    +
      +
    • False (the default): Stop at non-leaf cells whenever the error in +the separation is compatible with the given bin_slop.

    • +
    • True: Go to the leaves for both catalogs.

    • +
    • 1: Always go to the leaves for cat1, but stop at non-leaf cells of +cat2 when the error is compatible with the given bin_slop.

    • +
    • 2: Always go to the leaves for cat2, but stop at non-leaf cells of +cat1 when the error is compatible with the given bin_slop.

    • +
    +
    +

  • +
  • verbose (int) –

    If no logger is provided, this will optionally specify a logging level +to use:

    +
    +
      +
    • 0 means no logging output

    • +
    • 1 means to output warnings only (default)

    • +
    • 2 means to output various progress information

    • +
    • 3 means to output extensive debugging information

    • +
    +
    +

  • +
  • log_file (str) – If no logger is provided, this will specify a file to write the logging +output. (default: None; i.e. output to standard output)

  • +
  • output_dots (bool) – Whether to output progress dots during the calcualtion of the +correlation function. (default: False unless verbose is given and >= 2, +in which case True)

  • +
  • split_method (str) –

    How to split the cells in the tree when building the tree structure. +Options are:

    +
      +
    • mean = Use the arithmetic mean of the coordinate being split. +(default)

    • +
    • median = Use the median of the coordinate being split.

    • +
    • middle = Use the middle of the range; i.e. the average of the minimum +and maximum value.

    • +
    • random: Use a random point somewhere in the middle two quartiles of +the range.

    • +
    +

  • +
  • min_top (int) – The minimum number of top layers to use when setting up the field. +(default: \(\max(3, \log_2(N_{\rm cpu}))\))

  • +
  • max_top (int) – The maximum number of top layers to use when setting up the field. +The top-level cells are where each calculation job starts. There will +typically be of order \(2^{\rm max\_top}\) top-level cells. +(default: 10)

  • +
  • precision (int) – The precision to use for the output values. This specifies how many +digits to write. (default: 4)

  • +
  • metric (str) – Which metric to use for distance measurements. Options are listed +above. (default: ‘Euclidean’)

  • +
  • bin_type (str) – What type of binning should be used. Only one option currently. +(default: ‘LogRUV’)

  • +
  • period (float) – For the ‘Periodic’ metric, the period to use in all directions. +(default: None)

  • +
  • xperiod (float) – For the ‘Periodic’ metric, the period to use in the x direction. +(default: period)

  • +
  • yperiod (float) – For the ‘Periodic’ metric, the period to use in the y direction. +(default: period)

  • +
  • zperiod (float) – For the ‘Periodic’ metric, the period to use in the z direction. +(default: period)

  • +
  • var_method (str) – Which method to use for estimating the variance. Options are: +‘shot’, ‘jackknife’, ‘sample’, ‘bootstrap’, ‘marked_bootstrap’. +(default: ‘shot’)

  • +
  • num_bootstrap (int) – How many bootstrap samples to use for the ‘bootstrap’ and +‘marked_bootstrap’ var_methods. (default: 500)

  • +
  • rng (RandomState) – If desired, a numpy.random.RandomState instance to use for bootstrap +random number generation. (default: None)

  • +
  • num_threads (int) –

    How many OpenMP threads to use during the calculation. +(default: use the number of cpu cores; this value can also be given in +the constructor in the config dict.)

    +
    +

    Note

    +

    This won’t work if the system’s C compiler cannot use OpenMP +(e.g. clang prior to version 3.7.)

    +
    +

  • +
+
+
+
+
+build_cov_design_matrix(method, *, func=None, comm=None)[source]
+

Build the design matrix that is used for estimating the covariance matrix.

+

The design matrix for patch-based covariance estimates is a matrix where each row +corresponds to a different estimate of the data vector, \(\zeta_i\) (or +\(f(\zeta_i)\) if using the optional func parameter).

+

The different of rows in the matrix for each valid method are:

+
+
    +
  • ‘shot’: This method is not valid here.

  • +
  • ‘jackknife’: The data vector when excluding a single patch.

  • +
  • ‘sample’: The data vector using only a single patch for the first catalog.

  • +
  • ‘bootstrap’: The data vector for a random resampling of the patches keeping the +sample total number, but allowing some to repeat. Cross terms from repeated patches +are excluded (since they are really auto terms).

  • +
  • ‘marked_bootstrap’: The data vector for a random resampling of patches in the first +catalog, using all patches for the second catalog. Based on the algorithm in +Loh(2008).

  • +
+
+

See estimate_cov for more details.

+

The return value includes both the design matrix and a vector of weights (the total weight +array in the computed correlation functions). The weights are used for the sample method +when estimating the covariance matrix. The other methods ignore them, but they are provided +here in case they are useful.

+
+
Parameters
+
    +
  • method (str) – Which method to use to estimate the covariance matrix.

  • +
  • func (function) – A unary function that takes the list corrs and returns the +desired full data vector. [default: None, which is equivalent to +lambda corrs: np.concatenate([c.getStat() for c in corrs])]

  • +
  • comm (mpi comm) –

  • +
+
+
Returns
+

numpy arrays with the design matrix and weights respectively.

+
+
Return type
+

A, w

+
+
+
+ +
+
+clear()[source]
+

Clear all data vectors, the results dict, and any related values.

+
+ +
+
+estimate_cov(method, *, func=None, comm=None)[source]
+

Estimate the covariance matrix based on the data

+

This function will calculate an estimate of the covariance matrix according to the +given method.

+

Options for method include:

+
+
    +
  • ‘shot’ = The variance based on “shot noise” only. This includes the Poisson +counts of points for N statistics, shape noise for G statistics, and the observed +scatter in the values for K statistics. In this case, the returned covariance +matrix will be diagonal, since there is no way to estimate the off-diagonal terms.

  • +
  • ‘jackknife’ = A jackknife estimate of the covariance matrix based on the scatter +in the measurement when excluding one patch at a time.

  • +
  • ‘sample’ = An estimate based on the sample covariance of a set of samples, +taken as the patches of the input catalog.

  • +
  • ‘bootstrap’ = A bootstrap covariance estimate. It selects patches at random with +replacement and then generates the statistic using all the auto-correlations at +their selected repetition plus all the cross terms that aren’t actually auto terms.

  • +
  • ‘marked_bootstrap’ = An estimate based on a marked-point bootstrap resampling of the +patches. Similar to bootstrap, but only samples the patches of the first catalog and +uses all patches from the second catalog that correspond to each patch selection of +the first catalog. cf. https://ui.adsabs.harvard.edu/abs/2008ApJ…681..726L/

  • +
+
+

Both ‘bootstrap’ and ‘marked_bootstrap’ use the num_bootstrap parameter, which can be set on +construction.

+
+

Note

+

For most classes, there is only a single statistic, zeta, so this calculates a +covariance matrix for that vector. GGGCorrelation has four: gam0, gam1, +gam2, and gam3, so in this case the full data vector is gam0 followed by +gam1, then gam2, then gam3, and this calculates the covariance matrix for +that full vector including both statistics. The helper function getStat returns the +relevant statistic in all cases.

+
+

In all cases, the relevant processing needs to already have been completed and finalized. +And for all methods other than ‘shot’, the processing should have involved an appropriate +number of patches – preferably more patches than the length of the vector for your +statistic, although this is not checked.

+

The default data vector to use for the covariance matrix is given by the method +getStat. As noted above, this is usually just self.zeta. However, there is an option +to compute the covariance of some other function of the correlation object by providing +an arbitrary function, func, which should act on the current correlation object +and return the data vector of interest.

+

For instance, for an GGGCorrelation, you might want to compute the covariance of just +gam0 and ignore the others. In this case you could use

+
>>> func = lambda ggg: ggg.gam0
+
+
+

The return value from this func should be a single numpy array. (This is not directly +checked, but you’ll probably get some kind of exception if it doesn’t behave as expected.)

+
+

Note

+

The optional func parameter is not valid in conjunction with method='shot'. +It only works for the methods that are based on patch combinations.

+
+

This function can be parallelized by passing the comm argument as an mpi4py communicator +to parallelize using that. For MPI, all processes should have the same inputs. +If method == “shot” then parallelization has no effect.

+
+
Parameters
+
    +
  • method (str) – Which method to use to estimate the covariance matrix.

  • +
  • func (function) – A unary function that acts on the current correlation object and +returns the desired data vector. [default: None, which is +equivalent to lambda corr: corr.getStat().

  • +
  • comm (mpi comm) –

  • +
+
+
Returns
+

A numpy array with the estimated covariance matrix.

+
+
+
+ +
+
+getStat()[source]
+

The standard statistic for the current correlation object as a 1-d array.

+

Usually, this is just self.zeta. But if the metric is TwoD, this becomes +self.zeta.ravel().

+

And for GGGCorrelation, it is the concatenation of the four different correlations +[gam0.ravel(), gam1.ravel(), gam2.ravel(), gam3.ravel()].

+
+ +
+
+getWeight()[source]
+

The weight array for the current correlation object as a 1-d array.

+

This is the weight array corresponding to getStat. Usually just self.weight, but +raveled for TwoD and duplicated for GGGCorrelation to match what getStat does in +those cases.

+
+ +
+
+property nonzero
+

Return if there are any values accumulated yet. (i.e. ntri > 0)

+
+ +
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/cov.html b/docs/_build/html/cov.html new file mode 100644 index 00000000..928cde2a --- /dev/null +++ b/docs/_build/html/cov.html @@ -0,0 +1,344 @@ + + + + + + Covariance Estimates — TreeCorr 4.3.0 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

Covariance Estimates

+

In addition to calculating the correlation function, TreeCorr can also +estimate the variance of the resulting array of values, or even the +covariance matrix.

+

This simplest estimate of the variance involves propagating the shot noise +of the individual measurements into the final results. For shear (G) mesurements, +this includes the so-called “shape noise”. For scalar (K) measurements, this +includes the point variance of the k values. For count (N) measurements, +it comes from the Poisson statistics of counting. This variance estimate is the +default if you don’t specify something different, and it will be recorded as +varxi for most types of correlations. For GG, there are two quantities, +varxip and varxim, which give the variance of xip and xim +respectively.

+

However, this kind of variance estimate does not capture the sample variance. +This is the fact that the signal has real variation across the field, which +tends to dominate the total variance at large scales. To estimate this +component of the total variance from the data, one typically needs to split +the field into patches and use the variation in the measurement among the +patches to estimate the overall sample variance.

+

See Patches for information on defining the patches to use for your input Catalog.

+
+

Variance Methods

+

To get one of the patch-based variance estimates for the varxi or similar +attribute, you can set the var_method parameter in the constructor. e.g.:

+
>>> ng = treecorr.NGCorrelation(nbins=10, min_sep=1, max_sep=100, var_method='jackknife')
+
+
+

This tells TreeCorr to use the jackknife algorithm for computing the covariance matrix. +Then varxi is taken as the diagonal of this covariance matrix. +The full covariance matrix is also recorded at the cov attribute.

+

The following variance methods are implemented:

+
+

“shot”

+

This is the default shot-noise estimate of the covariance. It includes the Poisson +counts of points for N statistics, shape noise for G statistics, and the observed +scatter in the values for K statistics. In this case, the covariance matrix will +be diagonal, since there is no way to estimate the off-diagonal terms.

+
+
+

“jackknife”

+

This is the classic jackknife estimate of the covariance matrix. It computes the +correlation function that would have been measured if one patch at a time is excluded +from the sample. Then the covariance matrix is estimated as

+
+\[C = \frac{N_\mathrm{patch} - 1}{N_\mathrm{patch}} \sum_i (\xi_i - \bar\xi)^T (\xi_i-\bar\xi)\]
+
+
+

“sample”

+

This is the simplest patch-based covariance estimate estimate. It computes the +correlation function for each patch, where at least one of the two points falls in +that patch. Then the estimated covariance matrix is simply the sample covariance +of these vectors, scaled by the relative total weight in each patch.

+
+\[C = \frac{1}{N_\mathrm{patch} - 1} \sum_i w_i (\xi_i - \bar\xi)^T (\xi_i-\bar\xi)\]
+

For \(w_i\), we use the total weight in the correlation measurement for each patch +divided by the total weight in all patches. This is roughly equal to +\(1/N_\mathrm{patch}\) but captures somewhat any patch-to-patch variation in area +that might be present.

+
+
+

“bootstrap”

+

This estimate implements a bootstrap resampling of the patches as follows:

+
    +
  1. Select \(N_\mathrm{patch}\) patch numbers at random from the full list +\([0 \dots N_\mathrm{patch}{-}1]\) with replacement, so some patch numbers +will appear more than once, and some will be missing.

  2. +
  3. Calculate the total correlation function that would have been computed +from these patches rather than the original patches.

  4. +
  5. The auto-correlations are included at the selected repetition for the bootstrap +samples. So if a patch number is repeated, its auto-correlation is included that +many times.

  6. +
  7. Cross-correlations between patches are included only if the two patches +aren’t actually the same patch (i.e. it’s not actually an auto-correlation). +This prevents extra auto-correlations (where most of the signal typically occurs) +from being included in the sum.

  8. +
  9. Repeat steps 1-4 a total of \(N_\mathrm{bootstrap}\) times to build up a large +set of resampled correlation functions, \(\{\xi_i\}\).

  10. +
  11. Then the covariance estimate is the sample variance of these resampled results:

    +
    +
    +\[C = \frac{1}{N_\mathrm{bootstrap}-1} \sum_i (\xi_i - \bar\xi)^T (\xi_i-\bar\xi)\]
    +
    +
  12. +
+

The default number of bootstrap resamplings is 500, but you can change this in the +Correlation constructor using the parameter num_bootstrap.

+
+
+

“marked_bootstrap”

+

This estimate is based on a “marked-point” bootstrap resampling of the patches. +Specifically, we follow the method described in +A valid and Fast Spatial Bootstrap for Correlation Functions +by Ji Meng Loh, 2008. cf. https://ui.adsabs.harvard.edu/abs/2008ApJ…681..726L/.

+

This method starts out the same as the “sample” method. It computes the correlation +function for each patch where at least one of the two points falls in that patch. +However, it keeps track of the numerator and denominator separately. +These are the “marks” in Loh, 2008.

+

Then these marks are resampled in the normal bootstrap manner (random with replacement) +to produce mock results. The correlation function for each bootstrap resampling is +the sum of the numerator marks divided by the sum of the denominator marks.

+

Then the covariance estimate is the sample variance of these resampled results:

+
+\[C = \frac{1}{N_\mathrm{bootstrap}-1} \sum_i (\xi_i - \bar\xi)^T (\xi_i-\bar\xi)\]
+

The default number of bootstrap resamplings is 500, but you can change this in the +Correlation constructor using the parameter num_bootstrap.

+
+
+
+

Covariance Matrix

+

As mentioned above, the covariance matrix corresponding to the specified var_method +will be saved as the cov attribute of the correlation instance after processing +is complete.

+

However, if the processing was done using patches, then you can also compute the +covariance matrix for any of the above methods without redoing the processing +using BinnedCorr2.estimate_cov or BinnedCorr3.estimate_cov. E.g.:

+
>>> ng = treecorr.NGCorrelation(nbins=10, min_sep=1, max_sep=100)
+>>> ng.process(lens_cat, source_cat)  # At least one of these needs to have patches set.
+>>> cov_jk = ng.estimate_cov('jackknife')
+>>> cov_boot = ng.estimate_cov('bootstrap')
+
+
+

Additionally, you can compute the joint covariance matrix for a number of statistics +that were processed using the same patches with treecorr.estimate_multi_cov. E.g.:

+
>>> ng = treecorr.NGCorrelation(nbins=10, min_sep=1, max_sep=100)
+>>> ng.process(lens_cat, source_cat)
+>>> gg = treecorr.GGCorrelation(nbins=10, min_sep=1, max_sep=100)
+>>> gg.process(source_cat)
+>>> cov = treecorr.estimate_multi_cov([ng,gg], 'jackknife')
+
+
+

This will calculate an estimate of the covariance matrix for the full data vector +with ng.xi followed by gg.xip and then gg.xim.

+
+
+

Covariance of Derived Quantities

+

Sometimes your data vector of interest might not be just the raw correlation function, +or even a list of several correlation functions. Rather, it might be some derived +quantity. E.g.

+
    +
  • The ratio or difference of two correlation functions such as nk1.xi / nk2.xi.

  • +
  • The aperture mass variance computed by GGCorrelation.calculateMapSq.

  • +
  • One of the other ancillary products such as ng.xi_im.

  • +
  • A reordering of the data vector, such as putting several gg.xip first for multiple +tomographic bins and then the gg.xim for each after that.

  • +
+

These are just examples of what kind of thing you might want. In fact, we enable +any kind of post-processing you want to do on either a single correlation object +(using BinnedCorr2.estimate_cov or BinnedCorr3.estimate_cov) or a list of +correlation objects (using treecorr.estimate_multi_cov).

+

These functions take an optional func parameter, which can be any user-defined +function that calculates the desired data vector from the given correlation(s). +For instance, in the first case, where the desired data vector is the ratio of +two NK correlations, you could find the corresponding covariance matrix as follows:

+
>>> func = lambda corrs: corrs[0].xi / corrs[1].xi
+>>> nk1 = treecorr.NKCorrelation(nbins=10, min_sep=1, max_sep=100)
+>>> nk2 = treecorr.NKCorrelation(nbins=10, min_sep=1, max_sep=100)
+>>> nk1.process(cat1a, cat1b)  # Ideally, all of these use the same patches.
+>>> nk2.process(cat2a, cat2b)
+>>> corrs = [nk1, nk2]
+>>> ratio = func(corrs)  # = nk1.xi / nk2.xi
+>>> cov = treecorr.estimate_multi_cov(corrs, 'jackknife', func)
+
+
+

The resulting covariance matrix, cov, will be the jackknife estimate for the derived +data vector, ratio.

+
+
+

Random Catalogs

+

There are a few adjustements to the above prescription when using random +catalogs, which of course are required when doing an NN correlation.

+
    +
  1. It is not necessarily required to use patches for the random catalog. +The random is supposed to be dense enough that it doesn’t materially contribute +to the noise in the correlation measurement. In particular, it doesn’t have +any sample variance itself, and the shot noise component should be small +compared to the shot noise in the data.

  2. +
  3. If you do use patches for the random catalog, then you need to make sure +that you use the same patch definitions for both the data and the randoms. +Using patches for the randoms probably leads to slightly better covariance +estimates in most cases, but the difference in the two results is usually small. +(Note: This seems to be less true for 3pt NNN correlations than 2pt NN. +Using patches for the randoms gives significantly better covariance estimates +in that case than not doing so.)

  4. +
  5. The covariance calculation cannot happen until you call +calculateXi +to let TreeCorr know what the RR and DR (if using that) results are.

  6. +
  7. After calling dd.calculateXi, dd +will have varxi and cov attributes calculated according +to whatever var_method you specified.

  8. +
  9. It also allows you to call dd.estimate_cov +with any different method you want. +And you can include dd in a list of correlation +objects passed to treecorr.estimate_multi_cov.

  10. +
+

Here is a worked example:

+
>>> data = treecorr.Catalog(config, npatch=N)
+>>> rand = treecorr.Catalog(rand_config, patch_centers=data.patch_centers)
+>>> dd = treecorr.NNCorrelation(nn_config, var_method='jackknife')
+>>> dr = treecorr.NNCorrelation(nn_config)
+>>> rr = treecorr.NNCorrelation(nn_config)
+>>> dd.process(data)
+>>> dr.process(data, rand)
+>>> rr.process(rand)
+>>> dd.calculateXi(rr=rr, dr=dr)
+>>> dd_cov = dd.cov  # Can access covariance now.
+>>> dd_cov_bs = dd.estimate_cov(method='bootstrap') # Or calculate a different one.
+>>> txcov = treecorr.estimate_multi_cov([ng,gg,dd], 'bootstrap') # Or include in multi_cov
+
+
+

As mentioned above, using patch_centers is optional for rand, but probably recommended. +In the last line, it would be required that ng and gg were also made using catalogs +with the same patch centers that dd used.

+

The use pattern for NNNCorrelation is analogous, where NNNCorrelation.calculateZeta +needs to be run to get the covariance estimate, after which it may be used in a list +past to treecorr.estimate_multi_cov.

+
+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/field.html b/docs/_build/html/field.html new file mode 100644 index 00000000..ab803f1f --- /dev/null +++ b/docs/_build/html/field.html @@ -0,0 +1,761 @@ + + + + + + Fields — TreeCorr 4.3.0 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

Fields

+

The Field class and its subclasses repackage the information from a Catalog +into a ball tree data structure, allowing for fast calcaulation of the correlation +functions.

+

There are several kinds of Field classes.

+
+
+
+

Typically, one would not create any of these objects directly, but would instead +use Catalog methods getNField, getGField, getKField. Or indeed, usually, one +does not even do that, and just lets the relevant process command do so for you.

+
+
+class treecorr.Field[source]
+

A Field in TreeCorr is the object that stores the tree structure we use for efficient +calculation of the correlation functions.

+

The root “cell” in the tree has information about the whole field, including the total +number of points, the total weight, the mean position, the size (by which we mean the +maximum distance of any point from the mean position), and possibly more information depending +on which kind of field we have.

+

It also points to two sub-cells which each describe about half the points. These are commonly +referred to as “daughter cells”. They in turn point to two more cells each, and so on until +we get to cells that are considered “small enough” according to the min_size parameter given +in the constructor. These lowest level cells are referred to as “leaves”.

+

Technically, a Field doesn’t have just one of these trees. To make parallel computation +more efficient, we actually skip the first few layers of the tree as described above and +store a list of root cells. The three parameters that determine how many of these there +will be are max_size, min_top, and max_top:

+
+
    +
  • max_size sets the maximum size cell that we want to make sure we have in the trees, +so the root cells will be at least this large. The default is None, which means +we care about all sizes, so there may be only one root cell (but typically more +because of min_top).

  • +
  • min_top sets the minimum number of initial levels to skip. The default is either 3 +or \(\log_2(N_{cpu})\), whichever is larger. This means there will be at least 8 +(or \(N_{cpu}\)) root cells (assuming ntot is at least this large of course).

  • +
  • max_top sets the maximum number of initial levels to skip. The default is 10, +which means there could be up to 1024 root cells.

  • +
+
+

Finally, the split_method parameter sets how the points in a cell should be divided +when forming the two daughter cells. The split is always done according to whichever +dimension has the largest extent. E.g. if max(|x - meanx|) is larger than max(|y - meany|) +and (for 3d) max(|z - meanz|), then it will split according to the x values. But then +it may split in different ways according to split_method. The allowed values are:

+
+
    +
  • ‘mean’ means to divide the points at the average (mean) value of x, y or z.

  • +
  • ‘median’ means to divide the points at the median value of x, y, or z.

  • +
  • ‘middle’ means to divide the points at midpoint between the minimum and maximum values.

  • +
  • ‘random’ means to divide the points randomly somewhere between the 40th and 60th +percentile locations in the sorted list.

  • +
+
+

Field itself is an abstract base class for the specific types of field classes. +As such, it cannot be constructed directly. You should make one of the concrete subclasses:

+
+
    +
  • NField describes a field of objects to be counted only.

  • +
  • KField describes a field of points sampling a scalar field (e.g. kappa in the +weak lensing context). In addition to the above values, cells keep track of +the mean kappa value in the given region.

  • +
  • GField describes a field of points sampling a spinor field (e.g. gamma in the +weak lensing context). In addition to the above values, cells keep track of +the mean (complex) gamma value in the given region.

  • +
+
+
+
+property cat
+

The catalog from which this field was constructed.

+

It is stored as a weakref, so if the Catalog has already been garbage collected, this +might be None.

+
+ +
+
+count_near(*args, **kwargs)[source]
+

Count how many points are near a given coordinate.

+

Use the existing tree structure to count how many points are within some given separation +of a target coordinate.

+

There are several options for how to specify the reference coordinate, which depends +on the type of coordinate system this field implements.

+
    +
  1. For flat 2-dimensional coordinates:

  2. +
+
+
Parameters
+
    +
  • x (float) – The x coordinate of the target location

  • +
  • y (float) – The y coordinate of the target location

  • +
  • sep (float) – The separation distance

  • +
+
+
+
    +
  1. For 3-dimensional Cartesian coordinates:

  2. +
+
+
Parameters
+
    +
  • x (float) – The x coordinate of the target location

  • +
  • y (float) – The y coordinate of the target location

  • +
  • z (float) – The z coordinate of the target location

  • +
  • sep (float) – The separation distance

  • +
+
+
+
    +
  1. For spherical coordinates:

  2. +
+
+
Parameters
+
    +
  • ra (float or Angle) – The right ascension of the target location

  • +
  • dec (float or Angle) – The declination of the target location

  • +
  • c (CelestialCorod) – A coord.CelestialCoord object in lieu of (ra, dec)

  • +
  • sep (float or Angle) – The separation distance

  • +
  • ra_units (str) – The units of ra if given as a float

  • +
  • dec_units (str) – The units of dec if given as a float

  • +
  • sep_units (str) – The units of sep if given as a float

  • +
+
+
+
    +
  1. For spherical coordinates with distances:

  2. +
+
+
Parameters
+
    +
  • ra (float or Angle) – The right ascension of the target location

  • +
  • dec (float or Angle) – The declination of the target location

  • +
  • c (CelestialCorod) – A coord.CelestialCoord object in lieu of (ra, dec)

  • +
  • r (float) – The distance to the target location

  • +
  • sep (float) – The separation distance

  • +
  • ra_units (str) – The units of ra if given as a float

  • +
  • dec_units (str) – The units of dec if given as a float

  • +
+
+
+

In all cases, for parameters that are angles (ra, dec, sep for ‘spherical’), you may either +provide this quantity as a coord.Angle instance, or you may provide ra_units, dec_units +or sep_units respectively to specify which angular units are providing.

+

Finally, in cases where ra, dec are allowed, you may instead provide a +coord.CelestialCoord instance as the first argument to specify both RA and Dec.

+
+ +
+
+get_near(*args, **kwargs)[source]
+

Get the indices of points near a given coordinate.

+

Use the existing tree structure to find the points that are within some given separation +of a target coordinate.

+

There are several options for how to specify the reference coordinate, which depends +on the type of coordinate system this field implements.

+
    +
  1. For flat 2-dimensional coordinates:

  2. +
+
+
Parameters
+
    +
  • x (float) – The x coordinate of the target location

  • +
  • y (float) – The y coordinate of the target location

  • +
  • sep (float) – The separation distance

  • +
+
+
+
    +
  1. For 3-dimensional Cartesian coordinates:

  2. +
+
+
Parameters
+
    +
  • x (float) – The x coordinate of the target location

  • +
  • y (float) – The y coordinate of the target location

  • +
  • z (float) – The z coordinate of the target location

  • +
  • sep (float) – The separation distance

  • +
+
+
+
    +
  1. For spherical coordinates:

  2. +
+
+
Parameters
+
    +
  • ra (float or Angle) – The right ascension of the target location

  • +
  • dec (float or Angle) – The declination of the target location

  • +
  • c (CelestialCorod) – A coord.CelestialCoord object in lieu of (ra, dec)

  • +
  • sep (float or Angle) – The separation distance

  • +
  • ra_units (str) – The units of ra if given as a float

  • +
  • dec_units (str) – The units of dec if given as a float

  • +
  • sep_units (str) – The units of sep if given as a float

  • +
+
+
+
    +
  1. For spherical coordinates with distances:

  2. +
+
+
Parameters
+
    +
  • ra (float or Angle) – The right ascension of the target location

  • +
  • dec (float or Angle) – The declination of the target location

  • +
  • c (CelestialCorod) – A coord.CelestialCoord object in lieu of (ra, dec)

  • +
  • r (float) – The distance to the target location

  • +
  • sep (float) – The separation distance

  • +
  • ra_units (str) – The units of ra if given as a float

  • +
  • dec_units (str) – The units of dec if given as a float

  • +
+
+
+

In all cases, for parameters that are angles (ra, dec, sep for ‘spherical’), you may either +provide this quantity as a coord.Angle instance, or you may provide ra_units, dec_units +or sep_units respectively to specify which angular units are providing.

+

Finally, in cases where ra, dec are allowed, you may instead provide a +coord.CelestialCoord instance as the first argument to specify both RA and Dec.

+
+ +
+
+kmeans_assign_patches(centers)[source]
+

Assign patch numbers to each point according to the given centers.

+

This is final step in the full K-Means algorithm. It assignes patch numbers to each +point in the field according to which center is closest.

+
+
Parameters
+

centers (array) – An array of center coordinates. +Shape is (npatch, 2) for flat geometries or (npatch, 3) for 3d or +spherical geometries. In the latter case, the centers represent +(x,y,z) coordinates on the unit sphere.

+
+
Returns
+

An array of patch labels, all integers from 0..npatch-1. Size is self.ntot.

+
+
+
+ +
+
+kmeans_initialize_centers(npatch, init='tree', *, rng=None)[source]
+

Use the field’s tree structure to assign good initial centers for a K-Means run.

+

The classic K-Means algorithm involves starting with random points as the initial +centers of the patches. This has a tendency to result in rather poor results in +terms of having similar sized patches at the end. Specifically, the standard deviation +of the inertia at the local minimum that the K-Means algorithm settles into tends to be +fairly high for typical geometries.

+

A better approach is to use the existing tree structure to start out with centers that +are fairly evenly spread out through the field. This algorithm traverses the tree +until we get to a level that has enough cells for the requested number of patches. +Then it uses the centroids of these cells as the initial patch centers.

+
+
Parameters
+
    +
  • npatch (int) – How many patches to generate initial centers for

  • +
  • init (str) –

    Initialization method. Options are:

    +
      +
    • ’tree’ (default) = Use the normal tree structure of the +field, traversing down to a level where there are npatch +cells, and use the centroids of these cells as the initial +centers. This is almost always the best choice.

    • +
    • ’random’ = Use npatch random points as the intial centers.

    • +
    • ’kmeans++’ = Use the k-means++ algorithm. +cf. https://en.wikipedia.org/wiki/K-means%2B%2B

    • +
    +

  • +
  • rng (RandomState) – If desired, a numpy.random.RandomState instance to use for random +number generation. (default: None)

  • +
+
+
Returns
+

An array of center coordinates. +Shape is (npatch, 2) for flat geometries or (npatch, 3) for 3d or +spherical geometries. In the latter case, the centers represent +(x,y,z) coordinates on the unit sphere.

+
+
+
+ +
+
+kmeans_refine_centers(centers, *, max_iter=200, tol=1e-05, alt=False)[source]
+

Fast implementation of the K-Means algorithm

+

The standard K-Means algorithm is as follows +(cf. https://en.wikipedia.org/wiki/K-means_clustering):

+
    +
  1. Choose centers somehow. Traditionally, this is done by just selecting npatch random +points from the full set, but we do this more smartly in kmeans_initialize_centers.

  2. +
  3. For each point, measure the distance to each current patch center, and assign it to the +patch that has the closest center.

  4. +
  5. Update all the centers to be the centroid of the points assigned to each patch.

  6. +
  7. Repeat 2, 3 until the rms shift in the centers is less than some tolerance or the +maximum number of iterations is reached.

  8. +
  9. Assign the corresponding patch label to each point (kmeans_assign_patches).

  10. +
+

In TreeCorr, we use the tree structure to massively increase the speed of steps 2 and 3. +For a given cell, we know both its center and its size, so we can quickly check whether +all the points in the cell are closer to one center than another. This lets us quickly +cull centers from consideration as we traverse the tree. Once we get to a cell where only +one center can be closest for any of the points in it, we stop traversing and assign the +whole cell to that patch.

+

Further, it is also fast to update the new centroid, since the sum of all the positions +for a cell is just N times the cell’s centroid.

+

As a result, this algorithm typically takes a fraction of a second for ~a million points. +Indeed most of the time spent in the full kmeans calculation is in building the tree +in the first place, rather than actually running the kmeans code. With the alternate +algorithm (alt=True), the time is only slightly slower from having to calculate +the sizes at each step.

+
+
Parameters
+
    +
  • centers (array) – An array of center coordinates. (modified by this function) +Shape is (npatch, 2) for flat geometries or (npatch, 3) for 3d or +spherical geometries. In the latter case, the centers represent +(x,y,z) coordinates on the unit sphere.

  • +
  • max_iter (int) – How many iterations at most to run. (default: 200)

  • +
  • tol (float) – Tolerance in the rms centroid shift to consider as converged +as a fraction of the total field size. (default: 1.e-5)

  • +
  • alt (bool) – Use the alternate assignment algorithm to minimize the standard +deviation of the inertia rather than the total inertia (aka WCSS). +(default: False)

  • +
+
+
+
+ +
+
+property nTopLevelNodes
+

The number of top-level nodes.

+
+ +
+
+run_kmeans(npatch, *, max_iter=200, tol=1e-05, init='tree', alt=False, rng=None)[source]
+

Use k-means algorithm to set patch labels for a field.

+

The k-means algorithm (cf. https://en.wikipedia.org/wiki/K-means_clustering) identifies +a center position for each patch. Each point is then assigned to the patch whose center +is closest. The centers are then updated to be the mean position of all the points +assigned to the patch. This process is repeated until the center locations have converged.

+

The process tends to converge relatively quickly. The convergence criterion we use +is a tolerance on the rms shift in the centroid positions as a fraction of the overall +size of the whole field. This is settable as tol (default 1.e-5). You can also +set the maximum number of iterations to allow as max_iter (default 200).

+

The upshot of the k-means process is to minimize the total within-cluster sum of squares +(WCSS), also known as the “inertia” of each patch. This tends to produce patches with +more or less similar inertia, which make them useful for jackknife or other sampling +estimates of the errors in the correlation functions.

+

More specifically, if the points \(j\) have vector positions \(\vec x_j\), +and we define patches \(S_i\) to comprise disjoint subsets of the \(j\) +values, then the inertia \(I_i\) of each patch is defined as:

+
+\[I_i = \sum_{j \in S_i} \left| \vec x_j - \vec \mu_i \right|^2,\]
+

where \(\vec \mu_i\) is the center of each patch:

+
+\[\vec \mu_i \equiv \frac{\sum_{j \in S_i} \vec x_j}{N_i},\]
+

and \(N_i\) is the number of points assigned to patch \(S_i\). +The k-means algorithm finds a solution that is a local minimum in the total inertia, +\(\sum_i I_i\).

+

In addition to the normal k-means algorithm, we also offer an alternate algorithm, which +can produce slightly better patches for the purpose of patch-based covariance estimation. +The ideal patch definition for such use would be to minimize the standard deviation (std) +of the inertia of each patch, not the total (or mean) inertia. It turns out that it is +difficult to devise an algorithm that literally does this, since it has a tendancy to +become unstable and not converge.

+

However, adding a penalty term to the patch assignment step of the normal k-means +algorithm turns out to work reasonably well. The penalty term we use is \(f I_i\), +where \(f\) is a scaling constant (see below). When doing the assignment step we assign +each point \(j\) to the patch \(i\) that gives the minimum penalized distance

+
+\[d_{ij}^{\prime\;\! 2} = \left| \vec x_j - \mu_i \right|^2 + f I_i.\]
+

The penalty term means that patches with less inertia get more points on the next +iteration, and vice versa, which tends to equalize the inertia values somewhat. +The resulting patches have significantly lower std inertia, but typically only slightly +higher total inertia.

+

For the scaling constant, \(f\), we chose

+
+\[f = \frac{3}{\langle N_i\rangle},\]
+

three times the inverse of the mean number of points in each patch.

+

The \(1/\langle N_i\rangle\) factor makes the two terms of comparable magnitude +near the edges of the patches, so patches still get most of the points near their previous +centers, even if they already have larger than average inertia, but some of the points in +the outskirts of the patch might switch to a nearby patch with smaller inertia. The +factor of 3 is purely empirical, and was found to give good results in terms of std +inertia on some test data (the DES SV field).

+

The alternate algorithm is available by specifying alt=True. Despite it typically +giving better patch centers than the standard algorithm, we don’t make it the default, +because it may be possible for the iteration to become unstable, leading to some patches +with no points in them. (This happened in our tests when the arbitrary factor in the +scaling constant was 5 instead of 3, but I could not prove that 3 would always avoid this +failure mode.) If this happens for you, your best bet is probably to switch to the +standard algorithm, which can never suffer from this problem.

+
+
Parameters
+
    +
  • npatch (int) – How many patches to generate

  • +
  • max_iter (int) – How many iterations at most to run. (default: 200)

  • +
  • tol (float) – Tolerance in the rms centroid shift to consider as converged +as a fraction of the total field size. (default: 1.e-5)

  • +
  • init (str) –

    Initialization method. Options are:

    +
      +
    • ’tree’ (default) = Use the normal tree structure of the +field, traversing down to a level where there are npatch +cells, and use the centroids of these cells as the initial +centers. This is almost always the best choice.

    • +
    • ’random’ = Use npatch random points as the intial centers.

    • +
    • ’kmeans++’ = Use the k-means++ algorithm. +cf. https://en.wikipedia.org/wiki/K-means%2B%2B

    • +
    +

  • +
  • alt (bool) – Use the alternate assignment algorithm to minimize the standard +deviation of the inertia rather than the total inertia (aka WCSS). +(default: False)

  • +
  • rng (RandomState) – If desired, a numpy.random.RandomState instance to use for random +number generation. (default: None)

  • +
+
+
Returns
+

Tuple containing

+
+
    +
  • patches (array): An array of patch labels, all integers from 0..npatch-1. +Size is self.ntot.

  • +
  • centers (array): An array of center coordinates used to make the patches. +Shape is (npatch, 2) for flat geometries or (npatch, 3) for 3d or +spherical geometries. In the latter case, the centers represent +(x,y,z) coordinates on the unit sphere.

  • +
+
+

+
+
+
+ +
+ +
+
+class treecorr.NField(cat, *, min_size=0, max_size=None, split_method='mean', brute=False, min_top=None, max_top=10, coords=None, rng=None, logger=None)[source]
+

This class stores the positions and number of objects in a tree structure from which it is +efficient to compute correlation functions.

+

An NField is typically created from a Catalog object using

+
>>> nfield = cat.getNField(min_size=min_size, max_size=max_size)
+
+
+
+
Parameters
+
    +
  • cat (Catalog) – The catalog from which to make the field.

  • +
  • min_size (float) – The minimum radius cell required (usually min_sep). (default: 0)

  • +
  • max_size (float) – The maximum radius cell required (usually max_sep). (default: None)

  • +
  • split_method (str) – Which split method to use (‘mean’, ‘median’, ‘middle’, or ‘random’). +(default: ‘mean’)

  • +
  • brute (bool) – Whether to force traversal to the leaves for this field. +(default: False)

  • +
  • min_top (int) – The minimum number of top layers to use when setting up the field. +(default: \(\max(3, \log_2(N_{\rm cpu}))\))

  • +
  • max_top (int) – The maximum number of top layers to use when setting up the field. +(default: 10)

  • +
  • coords (str) – The kind of coordinate system to use. (default: cat.coords)

  • +
  • rng (RandomState) – If desired, a numpy.random.RandomState instance to use for random +number generation. (default: None)

  • +
  • logger (Logger) – A logger file if desired. (default: None)

  • +
+
+
+
+ +
+
+class treecorr.GField(cat, *, min_size=0, max_size=None, split_method='mean', brute=False, min_top=None, max_top=10, coords=None, rng=None, logger=None)[source]
+

This class stores the values of a spinor field (gamma in the weak lensing context) in a +tree structure from which it is efficient to compute correlation functions.

+

A GField is typically created from a Catalog object using

+
>>> gfield = cat.getGField(min_size, max_size, b)
+
+
+
+
Parameters
+
    +
  • cat (Catalog) – The catalog from which to make the field.

  • +
  • min_size (float) – The minimum radius cell required (usually min_sep). (default: 0)

  • +
  • max_size (float) – The maximum radius cell required (usually max_sep). (default: None)

  • +
  • split_method (str) – Which split method to use (‘mean’, ‘median’, ‘middle’, or ‘random’). +(default: ‘mean’)

  • +
  • brute (bool) – Whether to force traversal to the leaves for this field. +(default: False)

  • +
  • min_top (int) – The minimum number of top layers to use when setting up the field. +(default: \(\max(3, \log_2(N_{\rm cpu}))\))

  • +
  • max_top (int) – The maximum number of top layers to use when setting up the field. +(default: 10)

  • +
  • coords (str) – The kind of coordinate system to use. (default: cat.coords)

  • +
  • rng (RandomState) – If desired, a numpy.random.RandomState instance to use for random +number generation. (default: None)

  • +
  • logger (Logger) – A logger file if desired. (default: None)

  • +
+
+
+
+ +
+
+class treecorr.KField(cat, *, min_size=0, max_size=None, split_method='mean', brute=False, min_top=None, max_top=10, coords=None, rng=None, logger=None)[source]
+

This class stores the values of a scalar field (kappa in the weak lensing context) in a +tree structure from which it is efficient to compute correlation functions.

+

A KField is typically created from a Catalog object using

+
>>> kfield = cat.getKField(min_size, max_size, b)
+
+
+
+
Parameters
+
    +
  • cat (Catalog) – The catalog from which to make the field.

  • +
  • min_size (float) – The minimum radius cell required (usually min_sep). (default: 0)

  • +
  • max_size (float) – The maximum radius cell required (usually max_sep). (default: None)

  • +
  • split_method (str) – Which split method to use (‘mean’, ‘median’, ‘middle’, or ‘random’). +(default: ‘mean’)

  • +
  • brute (bool) – Whether to force traversal to the leaves for this field. +(default: False)

  • +
  • min_top (int) – The minimum number of top layers to use when setting up the field. +(default: \(\max(3, \log_2(N_{\rm cpu}))\))

  • +
  • max_top (int) – The maximum number of top layers to use when setting up the field. +(default: 10)

  • +
  • coords (str) – The kind of coordinate system to use. (default: cat.coords)

  • +
  • rng (RandomState) – If desired, a numpy.random.RandomState instance to use for random +number generation. (default: None)

  • +
  • logger (Logger) – A logger file if desired. (default: None)

  • +
+
+
+
+ +
+
+class treecorr.SimpleField[source]
+

A SimpleField is like a Field, but only stores the leaves as a list, skipping all the +tree stuff.

+

Again, this is an abstract base class, which cannot be instantiated. You should +make one of the concrete subclasses:

+
+
    +
  • NSimpleField describes a field of objects to be counted only.

  • +
  • KSimpleField describes a field of points sampling a scalar field.

  • +
  • GSimpleField describes a field of points sampling a spinor field.

  • +
+
+
+

Warning

+
+

Deprecated since version 4.1: This function is deprecated and slated to be removed. +If you have a need for it, please open an issue to describe your use case.

+
+
+
+ +
+
+class treecorr.NSimpleField(cat, *, logger=None)[source]
+

This class stores the positions as a list, skipping all the tree stuff.

+

An NSimpleField is typically created from a Catalog object using

+
>>> nfield = cat.getNSimpleField()
+
+
+
+

Warning

+
+

Deprecated since version 4.1: This function is deprecated and slated to be removed. +If you have a need for it, please open an issue to describe your use case.

+
+
+
+
Parameters
+
    +
  • cat (Catalog) – The catalog from which to make the field.

  • +
  • logger (Logger) – A logger file if desired. (default: None)

  • +
+
+
+
+ +
+
+class treecorr.GSimpleField(cat, *, logger=None)[source]
+

This class stores the shear field as a list, skipping all the tree stuff.

+

A GSimpleField is typically created from a Catalog object using

+
>>> gfield = cat.getGSimpleField()
+
+
+
+

Warning

+
+

Deprecated since version 4.1: This function is deprecated and slated to be removed. +If you have a need for it, please open an issue to describe your use case.

+
+
+
+
Parameters
+
    +
  • cat (Catalog) – The catalog from which to make the field.

  • +
  • logger (Logger) – A logger file if desired. (default: None)

  • +
+
+
+
+ +
+
+class treecorr.KSimpleField(cat, *, logger=None)[source]
+

This class stores the kappa field as a list, skipping all the tree stuff.

+

A KSimpleField is typically created from a Catalog object using

+
>>> kfield = cat.getKSimpleField()
+
+
+
+

Warning

+
+

Deprecated since version 4.1: This function is deprecated and slated to be removed. +If you have a need for it, please open an issue to describe your use case.

+
+
+
+
Parameters
+
    +
  • cat (Catalog) – The catalog from which to make the field.

  • +
  • logger (Logger) – A logger file if desired. (default: None)

  • +
+
+
+
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/genindex.html b/docs/_build/html/genindex.html new file mode 100644 index 00000000..b81ff336 --- /dev/null +++ b/docs/_build/html/genindex.html @@ -0,0 +1,948 @@ + + + + + + Index — TreeCorr 4.3.0 documentation + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • »
  • +
  • Index
  • +
  • +
  • +
+
+
+
+
+ + +

Index

+ +
+ _ + | A + | B + | C + | E + | F + | G + | H + | I + | K + | L + | M + | N + | P + | R + | S + | T + | U + | W + +
+

_

+ + + +
+ +

A

+ + + +
+ +

B

+ + + +
+ +

C

+ + + +
+ +

E

+ + + +
+ +

F

+ + + +
+ +

G

+ + + +
+ +

H

+ + + +
+ +

I

+ + + +
+ +

K

+ + + +
+ +

L

+ + +
+ +

M

+ + +
+ +

N

+ + + +
+ +

P

+ + + +
+ +

R

+ + + +
+ +

S

+ + + +
+ +

T

+ + + +
    +
  • + treecorr.catalog + +
  • +
    +
  • + treecorr.config + +
  • +
+ +

U

+ + +
+ +

W

+ + + +
+ + + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/gg.html b/docs/_build/html/gg.html new file mode 100644 index 00000000..58efd817 --- /dev/null +++ b/docs/_build/html/gg.html @@ -0,0 +1,681 @@ + + + + + + GGCorrelation: Shear-shear correlations — TreeCorr 4.3.0 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

GGCorrelation: Shear-shear correlations

+
+
+class treecorr.GGCorrelation(config=None, *, logger=None, **kwargs)[source]
+

Bases: treecorr.binnedcorr2.BinnedCorr2

+

This class handles the calculation and storage of a 2-point shear-shear correlation +function.

+

Ojects of this class holds the following attributes:

+
+
Attributes
+
    +
  • nbins – The number of bins in logr

  • +
  • bin_size – The size of the bins in logr

  • +
  • min_sep – The minimum separation being considered

  • +
  • max_sep – The maximum separation being considered

  • +
+
+
+

In addition, the following attributes are numpy arrays of length (nbins):

+
+
Attributes
+
    +
  • logr – The nominal center of the bin in log(r) (the natural logarithm of r).

  • +
  • rnom – The nominal center of the bin converted to regular distance. +i.e. r = exp(logr).

  • +
  • meanr – The (weighted) mean value of r for the pairs in each bin. +If there are no pairs in a bin, then exp(logr) will be used instead.

  • +
  • meanlogr – The (weighted) mean value of log(r) for the pairs in each bin. +If there are no pairs in a bin, then logr will be used instead.

  • +
  • xip – The correlation function, \(\xi_+(r)\).

  • +
  • xim – The correlation function, \(\xi_-(r)\).

  • +
  • xip_im – The imaginary part of \(\xi_+(r)\).

  • +
  • xim_im – The imaginary part of \(\xi_-(r)\).

  • +
  • varxip – An estimate of the variance of \(\xi_+(r)\)

  • +
  • varxim – An estimate of the variance of \(\xi_-(r)\)

  • +
  • weight – The total weight in each bin.

  • +
  • npairs – The number of pairs going into each bin (including pairs where one or +both objects have w=0).

  • +
  • cov – An estimate of the full covariance matrix for the data vector with +\(\xi_+\) first and then \(\xi_-\).

  • +
+
+
+
+

Note

+

The default method for estimating the variance and covariance attributes (varxip, +varxim, and cov) is ‘shot’, which only includes the shape noise propagated into +the final correlation. This does not include sample variance, so it is always an +underestimate of the actual variance. To get better estimates, you need to set +var_method to something else and use patches in the input catalog(s). +cf. Covariance Estimates.

+
+

If sep_units are given (either in the config dict or as a named kwarg) then the distances +will all be in these units.

+
+

Note

+

If you separate out the steps of the process command and use process_auto and/or +process_cross, then the units will not be applied to meanr or meanlogr until +the finalize function is called.

+
+

The typical usage pattern is as follows:

+
>>> gg = treecorr.GGCorrelation(config)
+>>> gg.process(cat)         # For auto-correlation.
+>>> gg.process(cat1,cat2)   # For cross-correlation.
+>>> gg.write(file_name)     # Write out to a file.
+>>> xip = gg.xip            # Or access the correlation function directly.
+
+
+
+
Parameters
+
    +
  • config (dict) – A configuration dict that can be used to pass in kwargs if desired. +This dict is allowed to have addition entries besides those listed +in BinnedCorr2, which are ignored here. (default: None)

  • +
  • logger – If desired, a logger object for logging. (default: None, in which case +one will be built according to the config dict’s verbose level.)

  • +
+
+
Keyword Arguments
+

**kwargs – See the documentation for BinnedCorr2 for the list of allowed keyword +arguments, which may be passed either directly or in the config dict.

+
+
+
+
+__eq__(other)[source]
+

Return whether two GGCorrelation instances are equal

+
+ +
+
+__iadd__(other)[source]
+

Add a second GGCorrelation’s data to this one.

+
+

Note

+

For this to make sense, both GGCorrelation objects should not have had finalize +called yet. Then, after adding them together, you should call finalize on the sum.

+
+
+ +
+
+__init__(config=None, *, logger=None, **kwargs)[source]
+

Initialize GGCorrelation. See class doc for details.

+
+ +
+
+__repr__()[source]
+

Return repr(self).

+
+ +
+
+calculateGamSq(*, R=None, eb=False)[source]
+

Calculate the tophat shear variance from the correlation function.

+
+\[ \begin{align}\begin{aligned}\begin{split}\langle \gamma^2 \rangle(R) &= \int_0^{2R} \frac{r dr}{R^2} S_+(s) \xi_+(r) \\ +\langle \gamma^2 \rangle_E(R) &= \int_0^{2R} \frac{r dr}{2 R^2} +\left[ S_+\left(\frac{r}{R}\right) \xi_+(r) + +S_-\left(\frac{r}{R}\right) \xi_-(r) \right] \\ +\langle \gamma^2 \rangle_B(R) &= \int_0^{2R} \frac{r dr}{2 R^2} +\left[ S_+\left(\frac{r}{R}\right) \xi_+(r) - +S_-\left(\frac{r}{R}\right) \xi_-(r) \right] \\\end{split}\\\begin{split}S_+(s) &= \frac{1}{\pi} \left(4 \arccos(s/2) - s \sqrt{4-s^2} \right) \\ +S_-(s) &= \begin{cases} +s<=2, & \frac{1}{\pi s^4} \left(s \sqrt{4-s^2} (6-s^2) - 8(3-s^2) \arcsin(s/2)\right)\\ +s>=2, & \frac{1}{s^4} \left(4(s^2-3)\right) +\end{cases}\end{split}\end{aligned}\end{align} \]
+

cf. Schneider, et al (2002): A&A, 389, 729

+

The default behavior is not to compute the E/B versions. They are calculated if +eb is set to True.

+
+

Note

+

This function is only implemented for Log binning.

+
+
+
Parameters
+
    +
  • R (array) – The R values at which to calculate the shear variance. +(default: None, which means use self.rnom)

  • +
  • eb (bool) – Whether to include the E/B decomposition as well as the total +\(\langle \gamma^2\rangle\). (default: False)

  • +
+
+
Returns
+

Tuple containing

+
+
    +
  • gamsq = array of \(\langle \gamma^2 \rangle(R)\)

  • +
  • vargamsq = array of the variance estimate of gamsq

  • +
  • gamsq_e (Only if eb is True) = array of \(\langle \gamma^2 \rangle_E(R)\)

  • +
  • gamsq_b (Only if eb is True) = array of \(\langle \gamma^2 \rangle_B(R)\)

  • +
  • vargamsq_e (Only if eb is True) = array of the variance estimate of +gamsq_e or gamsq_b

  • +
+
+

+
+
+
+ +
+
+calculateMapSq(*, R=None, m2_uform=None)[source]
+

Calculate the aperture mass statistics from the correlation function.

+
+\[\begin{split}\langle M_{ap}^2 \rangle(R) &= \int_{0}^{rmax} \frac{r dr}{2R^2} +\left [ T_+\left(\frac{r}{R}\right) \xi_+(r) + +T_-\left(\frac{r}{R}\right) \xi_-(r) \right] \\ +\langle M_\times^2 \rangle(R) &= \int_{0}^{rmax} \frac{r dr}{2R^2} +\left[ T_+\left(\frac{r}{R}\right) \xi_+(r) - +T_-\left(\frac{r}{R}\right) \xi_-(r) \right]\end{split}\]
+

The m2_uform parameter sets which definition of the aperture mass to use. +The default is to use ‘Crittenden’.

+

If m2_uform is ‘Crittenden’:

+
+\[\begin{split}U(r) &= \frac{1}{2\pi} (1-r^2) \exp(-r^2/2) \\ +Q(r) &= \frac{1}{4\pi} r^2 \exp(-r^2/2) \\ +T_+(s) &= \frac{s^4 - 16s^2 + 32}{128} \exp(-s^2/4) \\ +T_-(s) &= \frac{s^4}{128} \exp(-s^2/4) \\ +rmax &= \infty\end{split}\]
+

cf. Crittenden, et al (2002): ApJ, 568, 20

+

If m2_uform is ‘Schneider’:

+
+\[\begin{split}U(r) &= \frac{9}{\pi} (1-r^2) (1/3-r^2) \\ +Q(r) &= \frac{6}{\pi} r^2 (1-r^2) \\ +T_+(s) &= \frac{12}{5\pi} (2-15s^2) \arccos(s/2) \\ +&\qquad + \frac{1}{100\pi} s \sqrt{4-s^2} (120 + 2320s^2 - 754s^4 + 132s^6 - 9s^8) \\ +T_-(s) &= \frac{3}{70\pi} s^3 (4-s^2)^{7/2} \\ +rmax &= 2R\end{split}\]
+

cf. Schneider, et al (2002): A&A, 389, 729

+
+

Note

+

This function is only implemented for Log binning.

+
+
+
Parameters
+
    +
  • R (array) – The R values at which to calculate the aperture mass statistics. +(default: None, which means use self.rnom)

  • +
  • m2_uform (str) – Which form to use for the aperture mass, as described above. +(default: ‘Crittenden’; this value can also be given in the +constructor in the config dict.)

  • +
+
+
Returns
+

Tuple containing

+
+
    +
  • mapsq = array of \(\langle M_{ap}^2 \rangle(R)\)

  • +
  • mapsq_im = the imaginary part of mapsq, which is an estimate of +\(\langle M_{ap} M_\times \rangle(R)\)

  • +
  • mxsq = array of \(\langle M_\times^2 \rangle(R)\)

  • +
  • mxsq_im = the imaginary part of mxsq, which is an estimate of +\(\langle M_{ap} M_\times \rangle(R)\)

  • +
  • varmapsq = array of the variance estimate of either mapsq or mxsq

  • +
+
+

+
+
+
+ +
+
+copy()[source]
+

Make a copy

+
+ +
+
+finalize(varg1, varg2)[source]
+

Finalize the calculation of the correlation function.

+

The process_auto and process_cross commands accumulate values in each bin, +so they can be called multiple times if appropriate. Afterwards, this command +finishes the calculation by dividing each column by the total weight.

+
+
Parameters
+
    +
  • varg1 (float) – The shear variance per component for the first field.

  • +
  • varg2 (float) – The shear variance per component for the second field.

  • +
+
+
+
+ +
+
+getStat()[source]
+

The standard statistic for the current correlation object as a 1-d array.

+

In this case, this is the concatenation of self.xip and self.xim (raveled if necessary).

+
+ +
+
+getWeight()[source]
+

The weight array for the current correlation object as a 1-d array.

+

This is the weight array corresponding to getStat. In this case, the weight is +duplicated to account for both xip and xim returned as part of getStat().

+
+ +
+
+process(cat1, cat2=None, *, metric=None, num_threads=None, comm=None, low_mem=False, initialize=True, finalize=True)[source]
+

Compute the correlation function.

+
    +
  • If only 1 argument is given, then compute an auto-correlation function.

  • +
  • If 2 arguments are given, then compute a cross-correlation function.

  • +
+

Both arguments may be lists, in which case all items in the list are used +for that element of the correlation.

+
+
Parameters
+
    +
  • cat1 (Catalog) – A catalog or list of catalogs for the first G field.

  • +
  • cat2 (Catalog) – A catalog or list of catalogs for the second G field, if any. +(default: None)

  • +
  • metric (str) – Which metric to use. See Metrics for details. +(default: ‘Euclidean’; this value can also be given in the +constructor in the config dict.)

  • +
  • num_threads (int) – How many OpenMP threads to use during the calculation. +(default: use the number of cpu cores; this value can also be given +in the constructor in the config dict.)

  • +
  • comm (mpi4py.Comm) – If running MPI, an mpi4py Comm object to communicate between +processes. If used, the rank=0 process will have the final +computation. This only works if using patches. (default: None)

  • +
  • low_mem (bool) – Whether to sacrifice a little speed to try to reduce memory usage. +This only works if using patches. (default: False)

  • +
  • initialize (bool) – Whether to begin the calculation with a call to +BinnedCorr2.clear. (default: True)

  • +
  • finalize (bool) – Whether to complete the calculation with a call to finalize. +(default: True)

  • +
+
+
+
+ +
+
+process_auto(cat, *, metric=None, num_threads=None)[source]
+

Process a single catalog, accumulating the auto-correlation.

+

This accumulates the weighted sums into the bins, but does not finalize +the calculation by dividing by the total weight at the end. After +calling this function as often as desired, the finalize command will +finish the calculation.

+
+
Parameters
+
    +
  • cat (Catalog) – The catalog to process

  • +
  • metric (str) – Which metric to use. See Metrics for details. +(default: ‘Euclidean’; this value can also be given in the +constructor in the config dict.)

  • +
  • num_threads (int) – How many OpenMP threads to use during the calculation. +(default: use the number of cpu cores; this value can also be given +in the constructor in the config dict.)

  • +
+
+
+
+ +
+
+process_cross(cat1, cat2, *, metric=None, num_threads=None)[source]
+

Process a single pair of catalogs, accumulating the cross-correlation.

+

This accumulates the weighted sums into the bins, but does not finalize +the calculation by dividing by the total weight at the end. After +calling this function as often as desired, the finalize command will +finish the calculation.

+
+
Parameters
+
    +
  • cat1 (Catalog) – The first catalog to process

  • +
  • cat2 (Catalog) – The second catalog to process

  • +
  • metric (str) – Which metric to use. See Metrics for details. +(default: ‘Euclidean’; this value can also be given in the +constructor in the config dict.)

  • +
  • num_threads (int) – How many OpenMP threads to use during the calculation. +(default: use the number of cpu cores; this value can also be given +in the constructor in the config dict.)

  • +
+
+
+
+ +
+
+process_pairwise(cat1, cat2, *, metric=None, num_threads=None)[source]
+

Process a single pair of catalogs, accumulating the cross-correlation, only using +the corresponding pairs of objects in each catalog.

+

This accumulates the weighted sums into the bins, but does not finalize +the calculation by dividing by the total weight at the end. After +calling this function as often as desired, the finalize command will +finish the calculation.

+
+

Warning

+
+

Deprecated since version 4.1: This function is deprecated and slated to be removed. +If you have a need for it, please open an issue to describe your use case.

+
+
+
+
Parameters
+
    +
  • cat1 (Catalog) – The first catalog to process

  • +
  • cat2 (Catalog) – The second catalog to process

  • +
  • metric (str) – Which metric to use. See process for +details. (default: ‘Euclidean’; this value can also be given in the +constructor in the config dict.)

  • +
  • num_threads (int) – How many OpenMP threads to use during the calculation. +(default: use the number of cpu cores; this value can also be given +in the constructor in the config dict.)

  • +
+
+
+
+ +
+
+read(file_name, *, file_type=None)[source]
+

Read in values from a file.

+

This should be a file that was written by TreeCorr, preferably a FITS file, so there +is no loss of information.

+
+

Warning

+

The GGCorrelation object should be constructed with the same configuration +parameters as the one being read. e.g. the same min_sep, max_sep, etc. This is not +checked by the read function.

+
+
+
Parameters
+
    +
  • file_name (str) – The name of the file to read in.

  • +
  • file_type (str) – The type of file (‘ASCII’ or ‘FITS’). (default: determine the type +automatically from the extension of file_name.)

  • +
+
+
+
+ +
+
+write(file_name, *, file_type=None, precision=None, write_patch_results=False)[source]
+

Write the correlation function to the file, file_name.

+

The output file will include the following columns:

+ ++++ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

Column

Description

r_nom

The nominal center of the bin in r

meanr

The mean value \(\langle r \rangle\) of pairs that +fell into each bin

meanlogr

The mean value \(\langle \log(r) \rangle\) of pairs +that fell into each bin

xip

The real part of the \(\xi_+\) correlation function

xim

The real part of the \(\xi_-\) correlation function

xip_im

The imag part of the \(\xi_+\) correlation function

xim_im

The imag part of the \(\xi_-\) correlation function

sigma_xip

The sqrt of the variance estimate of \(\xi_+\)

sigma_xim

The sqrt of the variance estimate of \(\xi_-\)

weight

The total weight contributing to each bin

npairs

The total number of pairs in each bin

+

If sep_units was given at construction, then the distances will all be in these units. +Otherwise, they will be in either the same units as x,y,z (for flat or 3d coordinates) or +radians (for spherical coordinates).

+
+
Parameters
+
    +
  • file_name (str) – The name of the file to write to.

  • +
  • file_type (str) – The type of file to write (‘ASCII’ or ‘FITS’). (default: determine +the type automatically from the extension of file_name.)

  • +
  • precision (int) – For ASCII output catalogs, the desired precision. (default: 4; +this value can also be given in the constructor in the config dict.)

  • +
  • write_patch_results (bool) – Whether to write the patch-based results as well. +(default: False)

  • +
+
+
+
+ +
+
+writeMapSq(file_name, *, R=None, m2_uform=None, file_type=None, precision=None)[source]
+

Write the aperture mass statistics based on the correlation function to the +file, file_name.

+

See calculateMapSq for an explanation of the m2_uform parameter.

+

The output file will include the following columns:

+ ++++ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

Column

Description

R

The aperture radius

Mapsq

+
The real part of \(\langle M_{ap}^2\rangle\)

(cf. calculateMapSq)

+
+
+

Mxsq

The real part of \(\langle M_\times^2\rangle\)

MMxa

+
The imag part of \(\langle M_{ap}^2\rangle\):

an estimator of \(\langle M_{ap} M_\times\rangle\)

+
+
+

MMxa

+
The imag part of \(\langle M_\times^2\rangle\):

an estimator of \(\langle M_{ap} M_\times\rangle\)

+
+
+

sig_map

+
The sqrt of the variance estimate of

\(\langle M_{ap}^2\rangle\)

+
+
+

Gamsq

+
The tophat shear variance \(\langle \gamma^2\rangle\)

(cf. calculateGamSq)

+
+
+

sig_gam

+
The sqrt of the variance estimate of

\(\langle \gamma^2\rangle\)

+
+
+
+
+
Parameters
+
    +
  • file_name (str) – The name of the file to write to.

  • +
  • R (array) – The R values at which to calculate the statistics. +(default: None, which means use self.rnom)

  • +
  • m2_uform (str) – Which form to use for the aperture mass. (default: ‘Crittenden’; +this value can also be given in the constructor in the config dict.)

  • +
  • file_type (str) – The type of file to write (‘ASCII’ or ‘FITS’). (default: determine +the type automatically from the extension of file_name.)

  • +
  • precision (int) – For ASCII output catalogs, the desired precision. (default: 4; +this value can also be given in the constructor in the config dict.)

  • +
+
+
+
+ +
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/ggg.html b/docs/_build/html/ggg.html new file mode 100644 index 00000000..045c5040 --- /dev/null +++ b/docs/_build/html/ggg.html @@ -0,0 +1,1005 @@ + + + + + + GGGCorrelation: Shear-shear-shear correlations — TreeCorr 4.3.0 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

GGGCorrelation: Shear-shear-shear correlations

+
+
+class treecorr.GGGCorrelation(config=None, *, logger=None, **kwargs)[source]
+

Bases: treecorr.binnedcorr3.BinnedCorr3

+

This class handles the calculation and storage of a 3-point shear-shear-shear correlation +function.

+

We use the “natural components” of the shear 3-point function described by Schneider & +Lombardi (2003) [Astron.Astrophys. 397 (2003) 809-818]. In this paradigm, the shears +are projected relative to some point defined by the geometry of the triangle. They +give several reasonable choices for this point. We choose the triangle’s centroid as the +“most natural” point, as many simple shear fields have purely real \(\Gamma_0\) using +this definition. It is also a fairly simple point to calculate in the code compared to +some of the other options they offer, so projections relative to it are fairly efficient.

+

There are 4 complex-valued 3-point shear corrletion functions defined for triples of shear +values projected relative to the line joining the location of the shear to the cenroid of +the triangle:

+
+\[\begin{split}\Gamma_0 &= \langle \gamma(\mathbf{x1}) \gamma(\mathbf{x2}) \gamma(\mathbf{x3}) \rangle \\ +\Gamma_1 &= \langle \gamma(\mathbf{x1})^* \gamma(\mathbf{x2}) \gamma(\mathbf{x3}) \rangle \\ +\Gamma_2 &= \langle \gamma(\mathbf{x1}) \gamma(\mathbf{x2})^* \gamma(\mathbf{x3}) \rangle \\ +\Gamma_3 &= \langle \gamma(\mathbf{x1}) \gamma(\mathbf{x2}) \gamma(\mathbf{x3})^* \rangle \\\end{split}\]
+

where \(\mathbf{x1}, \mathbf{x2}, \mathbf{x3}\) are the corners of the triange opposite +sides d1, d2, d3 respectively, where d1 > d2 > d3, and \({}^*\) indicates complex +conjugation.

+

See the doc string of BinnedCorr3 for a description of how the triangles +are binned.

+

This class only holds one set of these \(\Gamma\) functions, which means that it is only +directly applicable for computing auto-correlations. To describe a cross-correlation of one +shear field with another, you need three sets of these functions. To describe a three-way +cross-correlation of three different shear fields, you need six. These use cases are +enabled by the class GGGCrossCorrelation which holds six instances of this class to keep +track of all the various triangles. See that class for more details.

+

Ojects of this class holds the following attributes:

+
+
Attributes
+
    +
  • nbins – The number of bins in logr where r = d2

  • +
  • bin_size – The size of the bins in logr

  • +
  • min_sep – The minimum separation being considered

  • +
  • max_sep – The maximum separation being considered

  • +
  • nubins – The number of bins in u where u = d3/d2

  • +
  • ubin_size – The size of the bins in u

  • +
  • min_u – The minimum u being considered

  • +
  • max_u – The maximum u being considered

  • +
  • nvbins – The number of bins in v where v = +-(d1-d2)/d3

  • +
  • vbin_size – The size of the bins in v

  • +
  • min_v – The minimum v being considered

  • +
  • max_v – The maximum v being considered

  • +
  • logr1d – The nominal centers of the nbins bins in log(r).

  • +
  • u1d – The nominal centers of the nubins bins in u.

  • +
  • v1d – The nominal centers of the nvbins bins in v.

  • +
+
+
+

In addition, the following attributes are numpy arrays whose shape is (nbins, nubins, nvbins):

+
+
Attributes
+
    +
  • logr – The nominal center of each bin in log(r).

  • +
  • rnom – The nominal center of the bin converted to regular distance. +i.e. r = exp(logr).

  • +
  • u – The nominal center of each bin in u.

  • +
  • v – The nominal center of each bin in v.

  • +
  • meand1 – The (weighted) mean value of d1 for the triangles in each bin.

  • +
  • meanlogd1 – The mean value of log(d1) for the triangles in each bin.

  • +
  • meand2 – The (weighted) mean value of d2 (aka r) for the triangles in each bin.

  • +
  • meanlogd2 – The mean value of log(d2) for the triangles in each bin.

  • +
  • meand2 – The (weighted) mean value of d3 for the triangles in each bin.

  • +
  • meanlogd2 – The mean value of log(d3) for the triangles in each bin.

  • +
  • meanu – The mean value of u for the triangles in each bin.

  • +
  • meanv – The mean value of v for the triangles in each bin.

  • +
  • gam0 – The 0th “natural” correlation function, \(\Gamma_0(r,u,v)\).

  • +
  • gam1 – The 1st “natural” correlation function, \(\Gamma_1(r,u,v)\).

  • +
  • gam2 – The 2nd “natural” correlation function, \(\Gamma_2(r,u,v)\).

  • +
  • gam3 – The 3rd “natural” correlation function, \(\Gamma_3(r,u,v)\).

  • +
  • vargam0 – The variance of \(\Gamma_0\), only including the shot noise +propagated into the final correlation. This (and the related values for +1,2,3) does not include sample variance, so it is always an underestimate +of the actual variance.

  • +
  • vargam1 – The variance of \(\Gamma_1\).

  • +
  • vargam2 – The variance of \(\Gamma_2\).

  • +
  • vargam3 – The variance of \(\Gamma_3\).

  • +
  • weight – The total weight in each bin.

  • +
  • ntri – The number of triangles going into each bin (including those where one or +more objects have w=0).

  • +
+
+
+

If sep_units are given (either in the config dict or as a named kwarg) then the distances +will all be in these units.

+
+

Note

+

If you separate out the steps of the process command and use process_auto and/or +process_cross, then the units will not be applied to meanr or meanlogr until +the finalize function is called.

+
+

The typical usage pattern is as follows:

+
>>> ggg = treecorr.GGGCorrelation(config)
+>>> ggg.process(cat)              # For auto-correlation.
+>>> ggg.process(cat1,cat2,cat3)   # For cross-correlation.
+>>> ggg.write(file_name)          # Write out to a file.
+>>> gam0 = ggg.gam0, etc.         # To access gamma values directly.
+>>> gam0r = ggg.gam0r             # You can also access real and imag parts separately.
+>>> gam0i = ggg.gam0i
+
+
+
+
Parameters
+
    +
  • config (dict) – A configuration dict that can be used to pass in kwargs if desired. +This dict is allowed to have addition entries besides those listed +in BinnedCorr3, which are ignored here. (default: None)

  • +
  • logger – If desired, a logger object for logging. (default: None, in which case +one will be built according to the config dict’s verbose level.)

  • +
+
+
Keyword Arguments
+

**kwargs – See the documentation for BinnedCorr3 for the list of allowed keyword +arguments, which may be passed either directly or in the config dict.

+
+
+
+
+__eq__(other)[source]
+

Return whether two GGGCorrelation instances are equal

+
+ +
+
+__iadd__(other)[source]
+

Add a second GGGCorrelation’s data to this one.

+
+

Note

+

For this to make sense, both GGGCorrelation objects should not have had finalize +called yet. Then, after adding them together, you should call finalize on the sum.

+
+
+ +
+
+__init__(config=None, *, logger=None, **kwargs)[source]
+

Initialize GGGCorrelation. See class doc for details.

+
+ +
+
+__repr__()[source]
+

Return repr(self).

+
+ +
+
+calculateMap3(*, R=None, k2=1, k3=1)[source]
+

Calculate the skewness of the aperture mass from the correlation function.

+

The equations for this come from Jarvis, Bernstein & Jain (2004, MNRAS, 352). +See their section 3, especially equations 51 and 52 for the \(T_i\) functions, +equations 60 and 61 for the calculation of \(\langle \cal M^3 \rangle\) and +\(\langle \cal M^2 M^* \rangle\), and equations 55-58 for how to convert +these to the return values.

+

If k2 or k3 != 1, then this routine calculates the generalization of the skewness +proposed by Schneider, Kilbinger & Lombardi (2005, A&A, 431): +\(\langle M_{ap}^3(R, k_2 R, k_3 R)\rangle\) and related values.

+

If k2 = k3 = 1 (the default), then there are only 4 combinations of Map and Mx +that are relevant:

+
    +
  • map3 = \(\langle M_{ap}^3(R)\rangle\)

  • +
  • map2mx = \(\langle M_{ap}^2(R) M_\times(R)\rangle\),

  • +
  • mapmx2 = \(\langle M_{ap}(R) M_\times(R)\rangle\)

  • +
  • mx3 = \(\langle M_{\rm \times}^3(R)\rangle\)

  • +
+

However, if k2 or k3 != 1, then there are 8 combinations:

+
    +
  • map3 = \(\langle M_{ap}(R) M_{ap}(k_2 R) M_{ap}(k_3 R)\rangle\)

  • +
  • mapmapmx = \(\langle M_{ap}(R) M_{ap}(k_2 R) M_\times(k_3 R)\rangle\)

  • +
  • mapmxmap = \(\langle M_{ap}(R) M_\times(k_2 R) M_{ap}(k_3 R)\rangle\)

  • +
  • mxmapmap = \(\langle M_\times(R) M_{ap}(k_2 R) M_{ap}(k_3 R)\rangle\)

  • +
  • mxmxmap = \(\langle M_\times(R) M_\times(k_2 R) M_{ap}(k_3 R)\rangle\)

  • +
  • mxmapmx = \(\langle M_\times(R) M_{ap}(k_2 R) M_\times(k_3 R)\rangle\)

  • +
  • mapmxmx = \(\langle M_{ap}(R) M_\times(k_2 R) M_\times(k_3 R)\rangle\)

  • +
  • mx3 = \(\langle M_\times(R) M_\times(k_2 R) M_\times(k_3 R)\rangle\)

  • +
+

To accommodate this full generality, we always return all 8 values, along with the +estimated variance (which is equal for each), even when k2 = k3 = 1.

+
+

Note

+

The formulae for the m2_uform = ‘Schneider’ definition of the aperture mass, +described in the documentation of calculateMapSq, are not known, so that is not an +option here. The calculations here use the definition that corresponds to +m2_uform = ‘Crittenden’.

+
+
+
Parameters
+
    +
  • R (array) – The R values at which to calculate the aperture mass statistics. +(default: None, which means use self.rnom1d)

  • +
  • k2 (float) – If given, the ratio R2/R1 in the SKL formulae. (default: 1)

  • +
  • k3 (float) – If given, the ratio R3/R1 in the SKL formulae. (default: 1)

  • +
+
+
Returns
+

    +
  • map3 = array of \(\langle M_{ap}(R) M_{ap}(k_2 R) M_{ap}(k_3 R)\rangle\)

  • +
  • mapmapmx = array of \(\langle M_{ap}(R) M_{ap}(k_2 R) M_\times(k_3 R)\rangle\)

  • +
  • mapmxmap = array of \(\langle M_{ap}(R) M_\times(k_2 R) M_{ap}(k_3 R)\rangle\)

  • +
  • mxmapmap = array of \(\langle M_\times(R) M_{ap}(k_2 R) M_{ap}(k_3 R)\rangle\)

  • +
  • mxmxmap = array of \(\langle M_\times(R) M_\times(k_2 R) M_{ap}(k_3 R)\rangle\)

  • +
  • mxmapmx = array of \(\langle M_\times(R) M_{ap}(k_2 R) M_\times(k_3 R)\rangle\)

  • +
  • mapmxmx = array of \(\langle M_{ap}(R) M_\times(k_2 R) M_\times(k_3 R)\rangle\)

  • +
  • mx3 = array of \(\langle M_\times(R) M_\times(k_2 R) M_\times(k_3 R)\rangle\)

  • +
  • varmap3 = array of variance estimates of the above values

  • +
+

+
+
Return type
+

Tuple containing

+
+
+
+ +
+
+copy()[source]
+

Make a copy

+
+ +
+
+finalize(varg1, varg2, varg3)[source]
+

Finalize the calculation of the correlation function.

+

The process_auto and process_cross commands accumulate values in each bin, +so they can be called multiple times if appropriate. Afterwards, this command +finishes the calculation by dividing by the total weight.

+
+
Parameters
+
    +
  • varg1 (float) – The shear variance for the first field.

  • +
  • varg2 (float) – The shear variance for the second field.

  • +
  • varg3 (float) – The shear variance for the third field.

  • +
+
+
+
+ +
+
+getStat()[source]
+

The standard statistic for the current correlation object as a 1-d array.

+

In this case, the concatenation of gam0.ravel(), gam1.ravel(), gam2.ravel(), gam3.ravel().

+
+

Note

+

This is a complex array, unlike most other statistics. +The computed covariance matrix will be complex, although since it is Hermitian the +diagonal is real, so the resulting vargam0, etc. will all be real arrays.

+
+
+ +
+
+getWeight()[source]
+

The weight array for the current correlation object as a 1-d array.

+

In this case, 4 copies of self.weight.ravel().

+
+ +
+
+process(cat1, cat2=None, cat3=None, *, metric=None, num_threads=None, comm=None, low_mem=False, initialize=True, finalize=True)[source]
+

Compute the 3pt correlation function.

+
    +
  • If only 1 argument is given, then compute an auto-correlation function.

  • +
  • If 2 arguments are given, then compute a cross-correlation function with the +first catalog taking one corner of the triangles, and the second taking two corners.

  • +
  • If 3 arguments are given, then compute a three-way cross-correlation function.

  • +
+

All arguments may be lists, in which case all items in the list are used +for that element of the correlation.

+
+

Note

+

For a correlation of multiple catalogs, it typically matters which corner of the +triangle comes from which catalog, which is not kept track of by this function. +The final accumulation will have d1 > d2 > d3 regardless of which input catalog +appears at each corner. The class which keeps track of which catalog appears +in each position in the triangle is GGGCrossCorrelation.

+
+
+
Parameters
+
    +
  • cat1 (Catalog) – A catalog or list of catalogs for the first G field.

  • +
  • cat2 (Catalog) – A catalog or list of catalogs for the second G field. +(default: None)

  • +
  • cat3 (Catalog) – A catalog or list of catalogs for the third G field. +(default: None)

  • +
  • metric (str) – Which metric to use. See Metrics for details. +(default: ‘Euclidean’; this value can also be given in the +constructor in the config dict.)

  • +
  • num_threads (int) – How many OpenMP threads to use during the calculation. +(default: use the number of cpu cores; this value can also be given +in the constructor in the config dict.)

  • +
  • comm (mpi4py.Comm) – If running MPI, an mpi4py Comm object to communicate between +processes. If used, the rank=0 process will have the final +computation. This only works if using patches. (default: None)

  • +
  • low_mem (bool) – Whether to sacrifice a little speed to try to reduce memory usage. +This only works if using patches. (default: False)

  • +
  • initialize (bool) – Whether to begin the calculation with a call to +BinnedCorr3.clear. (default: True)

  • +
  • finalize (bool) – Whether to complete the calculation with a call to finalize. +(default: True)

  • +
+
+
+
+ +
+
+process_auto(cat, *, metric=None, num_threads=None)[source]
+

Process a single catalog, accumulating the auto-correlation.

+

This accumulates the auto-correlation for the given catalog. After +calling this function as often as desired, the finalize command will +finish the calculation of meand1, meanlogd1, etc.

+
+
Parameters
+
    +
  • cat (Catalog) – The catalog to process

  • +
  • metric (str) – Which metric to use. See Metrics for details. +(default: ‘Euclidean’; this value can also be given in the +constructor in the config dict.)

  • +
  • num_threads (int) – How many OpenMP threads to use during the calculation. +(default: use the number of cpu cores; this value can also be given +in the constructor in the config dict.)

  • +
+
+
+
+ +
+
+process_cross(cat1, cat2, cat3, *, metric=None, num_threads=None)[source]
+

Process a set of three catalogs, accumulating the 3pt cross-correlation.

+

This accumulates the cross-correlation for the given catalogs as part of a larger +auto-correlation calculation. E.g. when splitting up a large catalog into patches, +this is appropriate to use for the cross correlation between different patches +as part of the complete auto-correlation of the full catalog.

+
+
Parameters
+
    +
  • cat1 (Catalog) – The first catalog to process

  • +
  • cat2 (Catalog) – The second catalog to process

  • +
  • cat3 (Catalog) – The third catalog to process

  • +
  • metric (str) – Which metric to use. See Metrics for details. +(default: ‘Euclidean’; this value can also be given in the +constructor in the config dict.)

  • +
  • num_threads (int) – How many OpenMP threads to use during the calculation. +(default: use the number of cpu cores; this value can also be given +in the constructor in the config dict.)

  • +
+
+
+
+ +
+
+process_cross12(cat1, cat2, *, metric=None, num_threads=None)[source]
+

Process two catalogs, accumulating the 3pt cross-correlation, where one of the +points in each triangle come from the first catalog, and two come from the second.

+

This accumulates the cross-correlation for the given catalogs as part of a larger +auto-correlation calculation. E.g. when splitting up a large catalog into patches, +this is appropriate to use for the cross correlation between different patches +as part of the complete auto-correlation of the full catalog.

+
+
Parameters
+
    +
  • cat1 (Catalog) – The first catalog to process. (1 point in each triangle will come +from this catalog.)

  • +
  • cat2 (Catalog) – The second catalog to process. (2 points in each triangle will come +from this catalog.)

  • +
  • metric (str) – Which metric to use. See Metrics for details. +(default: ‘Euclidean’; this value can also be given in the +constructor in the config dict.)

  • +
  • num_threads (int) – How many OpenMP threads to use during the calculation. +(default: use the number of cpu cores; this value can also be given +in the constructor in the config dict.)

  • +
+
+
+
+ +
+
+read(file_name, *, file_type=None)[source]
+

Read in values from a file.

+

This should be a file that was written by TreeCorr, preferably a FITS file, so there +is no loss of information.

+
+

Warning

+

The GGGCorrelation object should be constructed with the same configuration +parameters as the one being read. e.g. the same min_sep, max_sep, etc. This is not +checked by the read function.

+
+
+
Parameters
+
    +
  • file_name (str) – The name of the file to read in.

  • +
  • file_type (str) – The type of file (‘ASCII’ or ‘FITS’). (default: determine the type +automatically from the extension of file_name.)

  • +
+
+
+
+ +
+
+write(file_name, *, file_type=None, precision=None, write_patch_results=False)[source]
+

Write the correlation function to the file, file_name.

+

As described in the doc string for GGGCorrelation, we use the “natural components” of +the shear 3-point function described by Schneider & Lombardi (2003) using the triangle +centroid as the projection point. There are 4 complex-valued natural components, so there +are 8 columns in the output file.

+

The output file will include the following columns:

+ ++++ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

Column

Description

r_nom

The nominal center of the bin in r = d2 where d1 > d2 > d3

u_nom

The nominal center of the bin in u = d3/d2

v_nom

The nominal center of the bin in v = +-(d1-d2)/d3

meand1

The mean value \(\langle d1\rangle\) of triangles that +fell into each bin

meanlogd1

The mean value \(\langle \log(d1)\rangle\) of triangles +that fell into each bin

meand2

The mean value \(\langle d2\rangle\) of triangles that +fell into each bin

meanlogd2

The mean value \(\langle \log(d2)\rangle\) of triangles +that fell into each bin

meand3

The mean value \(\langle d3\rangle\) of triangles that +fell into each bin

meanlogd3

The mean value \(\langle \log(d3)\rangle\) of triangles +that fell into each bin

meanu

The mean value \(\langle u\rangle\) of triangles that +fell into each bin

meanv

The mean value \(\langle v\rangle\) of triangles that +fell into each bi.

gam0r

The real part of the estimator of \(\Gamma_0(r,u,v)\)

gam0i

The imag part of the estimator of \(\Gamma_0(r,u,v)\)

gam1r

The real part of the estimator of \(\Gamma_1(r,u,v)\)

gam1i

The imag part of the estimator of \(\Gamma_1(r,u,v)\)

gam2r

The real part of the estimator of \(\Gamma_2(r,u,v)\)

gam2i

The imag part of the estimator of \(\Gamma_2(r,u,v)\)

gam3r

The real part of the estimator of \(\Gamma_3(r,u,v)\)

gam3i

The imag part of the estimator of \(\Gamma_3(r,u,v)\)

sigma_gam0

The sqrt of the variance estimate of \(\Gamma_0\)

sigma_gam1

The sqrt of the variance estimate of \(\Gamma_1\)

sigma_gam2

The sqrt of the variance estimate of \(\Gamma_2\)

sigma_gam3

The sqrt of the variance estimate of \(\Gamma_3\)

weight

The total weight of triangles contributing to each bin.

ntri

The number of triangles contributing to each bin.

+

If sep_units was given at construction, then the distances will all be in these units. +Otherwise, they will be in either the same units as x,y,z (for flat or 3d coordinates) or +radians (for spherical coordinates).

+
+
Parameters
+
    +
  • file_name (str) – The name of the file to write to.

  • +
  • file_type (str) – The type of file to write (‘ASCII’ or ‘FITS’). (default: determine +the type automatically from the extension of file_name.)

  • +
  • precision (int) – For ASCII output catalogs, the desired precision. (default: 4; +this value can also be given in the constructor in the config dict.)

  • +
  • write_patch_results (bool) – Whether to write the patch-based results as well. +(default: False)

  • +
+
+
+
+ +
+
+writeMap3(file_name, *, R=None, file_type=None, precision=None)[source]
+

Write the aperture mass skewness based on the correlation function to the +file, file_name.

+

The output file will include the following columns:

+ ++++ + + + + + + + + + + + + + + + + + + + + + + + + + +

Column

Description

R

The aperture radius

Map3

An estimate of \(\langle M_{ap}^3\rangle(R)\) +(cf. calculateMap3)

Map2Mx

An estimate of \(\langle M_{ap}^2 M_\times\rangle(R)\)

MapMx2

An estimate of \(\langle M_{ap} M_\times^2\rangle(R)\)

Mx3

An estimate of \(\langle M_\times^3\rangle(R)\)

sig_map

The sqrt of the variance estimate of each of these

+
+
Parameters
+
    +
  • file_name (str) – The name of the file to write to.

  • +
  • R (array) – The R values at which to calculate the statistics. +(default: None, which means use self.rnom)

  • +
  • file_type (str) – The type of file to write (‘ASCII’ or ‘FITS’). (default: determine +the type automatically from the extension of file_name.)

  • +
  • precision (int) – For ASCII output catalogs, the desired precision. (default: 4; +this value can also be given in the constructor in the config dict.)

  • +
+
+
+
+ +
+ +
+
+class treecorr.GGGCrossCorrelation(config=None, *, logger=None, **kwargs)[source]
+

Bases: treecorr.binnedcorr3.BinnedCorr3

+

This class handles the calculation a 3-point shear-shear-shear cross-correlation +function.

+

For 3-point cross correlations, it matters which of the two or three fields falls on +each corner of the triangle. E.g. is field 1 on the corner opposite d1 (the longest +size of the triangle) or is it field 2 (or 3) there? This is in contrast to the 2-point +correlation where the symmetry of the situation means that it doesn’t matter which point +is identified with each field. This makes it significantly more complicated to keep track +of all the relevant information for a 3-point cross correlation function.

+

The GGGCorrelation class holds a single set of \(\Gamma\) functions describing all +possible triangles, parameterized according to their relative side lengths ordered as +d1 > d2 > d3.

+

For a cross-correlation of two fields: G1 - G1 - G2 (i.e. the G1 field is at two of the +corners and G2 is at one corner), then we need three sets of these \(\Gamma\) functions +to capture all of the triangles, since the G2 points may be opposite d1 or d2 or d3. +For a cross-correlation of three fields: G1 - G2 - G3, we need six sets, to account for +all of the possible permutations relative to the triangle sides.

+

Therefore, this class holds 6 instances of GGGCorrelation, which in turn hold the +information about triangles in each of the relevant configurations. We name these:

+
+
Attributes
+
    +
  • g1g2g3 – Triangles where G1 is opposite d1, G2 is opposite d2, G3 is opposite d3.

  • +
  • g1g3g2 – Triangles where G1 is opposite d1, G3 is opposite d2, G2 is opposite d3.

  • +
  • g2g1g3 – Triangles where G2 is opposite d1, G1 is opposite d2, G3 is opposite d3.

  • +
  • g2g3g1 – Triangles where G2 is opposite d1, G3 is opposite d2, G1 is opposite d3.

  • +
  • g3g1g2 – Triangles where G3 is opposite d1, G1 is opposite d2, G2 is opposite d3.

  • +
  • g3g2g1 – Triangles where G3 is opposite d1, G2 is opposite d2, G1 is opposite d3.

  • +
+
+
+

If for instance G2 and G3 are the same field, then e.g. g1g2g3 and g1g3g2 will have +the same values.

+

Ojects of this class also hold the following attributes, which are identical in each of +the above GGGCorrelation instances.

+
+
Attributes
+
    +
  • nbins – The number of bins in logr where r = d2

  • +
  • bin_size – The size of the bins in logr

  • +
  • min_sep – The minimum separation being considered

  • +
  • max_sep – The maximum separation being considered

  • +
  • nubins – The number of bins in u where u = d3/d2

  • +
  • ubin_size – The size of the bins in u

  • +
  • min_u – The minimum u being considered

  • +
  • max_u – The maximum u being considered

  • +
  • nvbins – The number of bins in v where v = +-(d1-d2)/d3

  • +
  • vbin_size – The size of the bins in v

  • +
  • min_v – The minimum v being considered

  • +
  • max_v – The maximum v being considered

  • +
  • logr1d – The nominal centers of the nbins bins in log(r).

  • +
  • u1d – The nominal centers of the nubins bins in u.

  • +
  • v1d – The nominal centers of the nvbins bins in v.

  • +
+
+
+

If sep_units are given (either in the config dict or as a named kwarg) then the distances +will all be in these units.

+
+

Note

+

If you separate out the steps of the process command and use process_cross directly, +then the units will not be applied to meanr or meanlogr until the finalize +function is called.

+
+
+
Parameters
+
    +
  • config (dict) – A configuration dict that can be used to pass in kwargs if desired. +This dict is allowed to have addition entries besides those listed +in BinnedCorr3, which are ignored here. (default: None)

  • +
  • logger – If desired, a logger object for logging. (default: None, in which case +one will be built according to the config dict’s verbose level.)

  • +
+
+
Keyword Arguments
+

**kwargs – See the documentation for BinnedCorr3 for the list of allowed keyword +arguments, which may be passed either directly or in the config dict.

+
+
+
+
+__eq__(other)[source]
+

Return whether two GGGCrossCorrelation instances are equal

+
+ +
+
+__iadd__(other)[source]
+

Add a second GGGCrossCorrelation’s data to this one.

+
+

Note

+

For this to make sense, both GGGCrossCorrelation objects should not have had +finalize called yet. Then, after adding them together, you should call finalize +on the sum.

+
+
+ +
+
+__init__(config=None, *, logger=None, **kwargs)[source]
+

Initialize GGGCrossCorrelation. See class doc for details.

+
+ +
+
+__repr__()[source]
+

Return repr(self).

+
+ +
+
+copy()[source]
+

Make a copy

+
+ +
+
+finalize(varg1, varg2, varg3)[source]
+

Finalize the calculation of the correlation function.

+

The process_cross command accumulate values in each bin, so they can be called +multiple times if appropriate. Afterwards, this command finishes the calculation +by dividing by the total weight.

+
+
Parameters
+
    +
  • varg1 (float) – The shear variance for the first field that was correlated.

  • +
  • varg2 (float) – The shear variance for the second field that was correlated.

  • +
  • varg3 (float) – The shear variance for the third field that was correlated.

  • +
+
+
+
+ +
+
+getStat()[source]
+

The standard statistic for the current correlation object as a 1-d array.

+

In this case, the concatenation of zeta.ravel() for each combination in the following +order: g1g2g3, g1g3g2, g2g1g3, g2g3g1, g3g1g2, g3g2g1.

+
+ +
+
+getWeight()[source]
+

The weight array for the current correlation object as a 1-d array.

+

In this case, the concatenation of getWeight() for each combination in the following +order: g1g2g3, g1g3g2, g2g1g3, g2g3g1, g3g1g2, g3g2g1.

+
+ +
+
+property nonzero
+

Return if there are any values accumulated yet. (i.e. ntri > 0)

+
+ +
+
+process(cat1, cat2, cat3=None, *, metric=None, num_threads=None, comm=None, low_mem=False, initialize=True, finalize=True)[source]
+

Accumulate the cross-correlation of the points in the given Catalogs: cat1, cat2, cat3.

+
    +
  • If 2 arguments are given, then compute a cross-correlation function with the +first catalog taking one corner of the triangles, and the second taking two corners.

  • +
  • If 3 arguments are given, then compute a three-way cross-correlation function.

  • +
+

All arguments may be lists, in which case all items in the list are used +for that element of the correlation.

+
+
Parameters
+
    +
  • cat1 (Catalog) – A catalog or list of catalogs for the first G field.

  • +
  • cat2 (Catalog) – A catalog or list of catalogs for the second G field.

  • +
  • cat3 (Catalog) – A catalog or list of catalogs for the third G field. +(default: None)

  • +
  • metric (str) – Which metric to use. See Metrics for details. +(default: ‘Euclidean’; this value can also be given in the +constructor in the config dict.)

  • +
  • num_threads (int) – How many OpenMP threads to use during the calculation. +(default: use the number of cpu cores; this value can also be given +in the constructor in the config dict.)

  • +
  • comm (mpi4py.Comm) – If running MPI, an mpi4py Comm object to communicate between +processes. If used, the rank=0 process will have the final +computation. This only works if using patches. (default: None)

  • +
  • low_mem (bool) – Whether to sacrifice a little speed to try to reduce memory usage. +This only works if using patches. (default: False)

  • +
  • initialize (bool) – Whether to begin the calculation with a call to +BinnedCorr3.clear. (default: True)

  • +
  • finalize (bool) – Whether to complete the calculation with a call to finalize. +(default: True)

  • +
+
+
+
+ +
+
+process_cross(cat1, cat2, cat3, *, metric=None, num_threads=None)[source]
+

Process a set of three catalogs, accumulating the 3pt cross-correlation.

+

This accumulates the cross-correlation for the given catalogs. After +calling this function as often as desired, the finalize command will +finish the calculation of meand1, meanlogd1, etc.

+
+
Parameters
+
    +
  • cat1 (Catalog) – The first catalog to process

  • +
  • cat2 (Catalog) – The second catalog to process

  • +
  • cat3 (Catalog) – The third catalog to process

  • +
  • metric (str) – Which metric to use. See Metrics for details. +(default: ‘Euclidean’; this value can also be given in the +constructor in the config dict.)

  • +
  • num_threads (int) – How many OpenMP threads to use during the calculation. +(default: use the number of cpu cores; this value can also be given +in the constructor in the config dict.)

  • +
+
+
+
+ +
+
+process_cross12(cat1, cat2, *, metric=None, num_threads=None)[source]
+

Process two catalogs, accumulating the 3pt cross-correlation, where one of the +points in each triangle come from the first catalog, and two come from the second.

+

This accumulates the cross-correlation for the given catalogs. After +calling this function as often as desired, the finalize command will +finish the calculation of meand1, meanlogd1, etc.

+
+

Note

+

This only adds to the attributes g1g2g3, g2g1g3, g2g3g1, not the ones where +3 comes before 2. When running this via the regular process method, it will +combine them at the end to make sure g1g2g3 == g1g3g2, etc. for a complete +calculation of the 1-2 cross-correlation.

+
+
+
Parameters
+
    +
  • cat1 (Catalog) – The first catalog to process. (1 point in each triangle will come +from this catalog.)

  • +
  • cat2 (Catalog) – The second catalog to process. (2 points in each triangle will come +from this catalog.)

  • +
  • metric (str) – Which metric to use. See Metrics for details. +(default: ‘Euclidean’; this value can also be given in the +constructor in the config dict.)

  • +
  • num_threads (int) – How many OpenMP threads to use during the calculation. +(default: use the number of cpu cores; this value can also be given +in the constructor in the config dict.)

  • +
+
+
+
+ +
+
+read(file_name, *, file_type=None)[source]
+

Read in values from a file.

+

This should be a file that was written by TreeCorr, preferably a FITS file, so there +is no loss of information.

+
+

Warning

+

The GGGCrossCorrelation object should be constructed with the same configuration +parameters as the one being read. e.g. the same min_sep, max_sep, etc. This is not +checked by the read function.

+
+
+
Parameters
+
    +
  • file_name (str) – The name of the file to read in.

  • +
  • file_type (str) – The type of file (‘ASCII’ or ‘FITS’). (default: determine the type +automatically from the extension of file_name.)

  • +
+
+
+
+ +
+
+write(file_name, *, file_type=None, precision=None, write_patch_results=False)[source]
+

Write the cross-correlation functions to the file, file_name.

+
+
Parameters
+
    +
  • file_name (str) – The name of the file to write to.

  • +
  • file_type (str) – The type of file to write (‘ASCII’ or ‘FITS’). (default: determine +the type automatically from the extension of file_name.)

  • +
  • precision (int) – For ASCII output catalogs, the desired precision. (default: 4; +this value can also be given in the constructor in the config dict.)

  • +
  • write_patch_results (bool) – Whether to write the patch-based results as well. +(default: False)

  • +
+
+
+
+ +
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/guide.html b/docs/_build/html/guide.html new file mode 100644 index 00000000..89a4d91e --- /dev/null +++ b/docs/_build/html/guide.html @@ -0,0 +1,373 @@ + + + + + + Getting Started Guide — TreeCorr 4.3.0 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

Getting Started Guide

+
+

Jupyter Tutorial

+

The below page covers many of the same points as the +Jupyter notebook tutorial +available in the TreeCorr repo. +You may find it useful to work through that as well as, or instead of, reading this guide.

+
+
+

Shear-shear auto-correlation

+

Let’s start with how to calculate a shear-shear two-point auto-correlation. +It’s not necessarily the simplest choice of correlation, but this specific +calculation was the original reason I wrote TreeCorr, so it’s close to my heart. +The basic pattern is as follows:

+
cat = treecorr.Catalog(file_name, config)
+gg = treecorr.GGCorrelation(config)
+gg.process(cat)
+gg.write(out_file_name)
+
+
+

Here file_name is the name of some input file, which has the shear and position +data of your galaxies. config is a dictionary with all the configuration +parameters about how to load the data and define the binning. We’ll expand that +out shortly. Finally, out_file_name is some output file to write the results.

+

You can do a cross-correlation between two sets of galaxies very similarly:

+
cat1 = treecorr.Catalog(file_name1, config1)
+cat2 = treecorr.Catalog(file_name2, config2)
+gg.process(cat1, cat2)
+
+
+

If you would rather not write the results to an output file, but maybe plot them up or do some +further calculation with them, you can access the resulting fields directly as numpy arrays:

+
xip = gg.xip            # The real part of xi+
+xim = gg.xim            # The real part of xi-
+logr = gg.logr          # The nominal center of each bin
+meanlogr = gg.meanlogr  # The mean <log(r)> within the bins
+varxi = gg.varxi        # The variance of each xi+ or xi- value
+                        # taking into account shape noise only
+
+
+

See the doc string for GGCorrelation for other available attributes.

+
+
+

Other Two-point Correlation Classes

+

The other kinds of correlations each have their own class:

+
+
+
+

You should see their doc strings for details, but they all work similarly. +For the last three, there is no auto-correlation option, of course, just the cross-correlation.

+

The other main difference between these other correlation classes from GG is that there is only a +single correlation function, so it is called xi rather than xip and xim.

+

Also, NN does not have any kind of xi attribute. You need to perform an additional +calculation involving random catalogs for that. +See Using random catalogs below for more details.

+
+
+

Loading a Catalog

+

OK, now let’s get into some of the details about how to load data into a Catalog.

+

To specify the names of the columns in the input file, as well as other details about +how to interpret the columns, you can either use a config dict, as we did above, +or specify keyword arguments. Either way is fine, although to be honest, the keywords +are probably more typical, so we’ll use that from here on.

+

For a shear catalog, you need to specify the position of each galaxy and the +shear values, g1 and g2. You do this by stating which column in the input catalog +corresponds to each value you need. For example:

+
cat = treecorr.Catalog(file_name='input_cat.fits',
+                       x_col='X_IMAGE', y_col='Y_IMAGE', g1_col='E1', g2_col='E2')
+
+
+

For FITS files, you specify the columns by name, which correspond to the column name +in the FITS table. For ASCII input files, you specify the column number instead:

+
cat = treecorr.Catalog(file_name='input_cat.dat',
+                       x_col=2, y_col=3, g1_col=5, g2_col=6)
+
+
+

where the first column in numbered 1, not 0.

+

When the positions are given as right ascension and declination on the celestial +sphere, rather than x and y on a flat projection (like an image), you also need +to specify what units the angles use:

+
cat = treecorr.Catalog(file_name='input_cat.fits',
+                       ra_col='RA', dec_col='DEC', g1_col='E1', g2_col='E2',
+                       ra_units='hours', dec_units='degrees')
+
+
+

For the catalog of the N part of a calculation, you can skip the g1_col and g2_col. +Those only need positions. For a K correlation, you should specify k_col instead:

+
cat = treecorr.Catalog(file_name='input_cat.fits',
+                       ra_col='RA', dec_col='DEC', k_col='KAPPA',
+                       ra_units='hours', dec_units='degrees')
+
+
+

See the documentation for Catalog for more options, such as how to flip the sign of +g1 or g2 (unfortunately not everyone follows the same conventions), use weights, +skip objects with specific flags, and more.

+
+
+

Building a Catalog from numpy arrays

+

If the provided tools for reading in the data from an input file are insufficient, or if +the data you want to use are being generated in Python natively, so there is no file +to read, then you can instead build the Catalog directly from numpy arrays:

+
x = numpy.array(x_values)    # These might be the output of
+y = numpy.array(y_values)    # some calculation...
+g1 = numpy.array(g1_values)
+g2 = numpy.array(g2_values)
+
+cat = treecorr.Catalog(x=x, y=y, g1=g1, g2=g2)
+
+
+

You always need to include either x and y or ra and dec. +Which other columns you need depends on what kind of correlation function you want to calculate +from the data. For GG, you need g1 and g2, but for K correlations, you would use +k instead.

+

You can optionally provide a weight column as well with w if desired. +This will then perform a weighted correlation using those weights.

+

Again, see the doc string for Catalog for more information.

+
+
+

Defining the binning

+

For the default bin_type, "Log", the correlation function is binned +in equally spaced bins in \(\log(r)\). where \(r\) represents the separation +between two points being correlated.

+

Typically you would specify the minimum and +maximum separation you want accumulated as min_sep and max_sep respectively, +along with nbins to specify how many bins to use:

+
gg = treecorr.GGCorrelation(min_sep=1., max_sep=100., nbins=10)
+
+
+

When the positions are given as (ra, dec), then the separations are also angles, +so you need to specify what units to use. These do not have to be the same units +as you used for either ra or dec:

+
gg = treecorr.GGCorrelation(min_sep=1., max_sep=100., nbins=10, sep_units='arcmin')
+
+
+

Most correlation functions of interest in astronomy are roughly power laws, so log +binning puts similar signal-to-noise in each bin, making it often a good choice. +However, for some use cases, linear binning is more appropriate. This is possible +using the bin_type parameter:

+
gg = treecorr.GGCorrelation(min_sep=10., max_sep=15., nbins=5, bin_type='Linear')
+
+
+

See Binning for more details about this option and the "TwoD" binning, +as well as some other options related to binning.

+

Finally, the default way of calculating separations is a normal Euclidean metric. +However, TreeCorr implements a number of other metrics as well, which are useful +in various situations. See Metrics for details.

+
+
+

Three-point Correlation Classes

+

TreeCorr can also do three-point correlations, to measure how the product of three fields +depends on the size and shape of the triangle connecting three points. +So far, we have only implemented the auto-correlation three-point functions:

+
+
+
+

These classes are significantly more complicated than the two-point ones, +since they have to deal with the geometry of the triangles being binned. +See their doc strings for more details.

+
+
+

Using random catalogs

+

For the NN and NNN correlations, the raw calculation is not sufficient to produce the real +correlation function. You also need to account for the survey geometry (edges, mask, etc.) +by running the same calculation with a random catalog (or several) that have a uniform density, +but the same geometry:

+
data = treecorr.Catalog(data_file, config)
+rand = treecorr.Catalog(rand_file, config)
+dd = treecorr.NNCorrelation(config)
+dr = treecorr.NNCorrelation(config)
+rr = treecorr.NNCorrelation(config)
+dd.process(data)
+dr.process(data,rand)
+rr.process(rand)
+xi, varxi = dd.calculateXi(rr,dr)
+
+
+

This calculates xi = (DD-2DR+RR)/RR for each bin. This is the Landy-Szalay estimator, +which is the most widely used estimator for count-count correlation functions. However, +if you want to use a simpler estimator xi = (DD-RR)/RR, then you can omit the dr parameter. +The simpler estimator is slightly biased though, so this is not recommended.

+

After calling calculateXi, the dd object above will have xi +and varxi attributes, which store the results of this calculation.

+

The NG and NK classes also have a calculateXi method to allow +for the use of compensated estimators in those cases as well. +Calling this function updates the xi attribute from the uncompensated value to the +compensated value. +These correlations do not suffer as much from masking effects, +so the compensation is not as necessary. However, it does produce a slightly better estimate +of the correlation function if you are able to use a random catalog.

+

Furthermore, the process functions can take lists of Catalogs if desired, +in which case it will +do all the possible combinations. This is especially relevant for doing randoms, +since the statistics get better if you generate several randoms and do all the correlations to beat down the noise:

+
rand_list = [ treecorr.Catalog(f,config) for f in rand_files ]
+dr.process(data, rand_list)
+rr.process(rand_list)
+
+
+

The corresponding three-point NNN calculation is even more complicated, since there are 8 total +combinations that need to be computed: zeta = (DDD-DDR-DRD-RDD+DRR+RDR+RRD-RRR)/RRR. +Because of the triangle geometry, we don’t have DRR = DRD = RDD, so all 8 need to be computed. +See the docstring for calculateZeta for more details.

+
+
+

Manually accumulating the correlation function

+

For even more control over the calculation, you can break up the steps in the +process functions. There are typically three steps:

+
    +
  1. Calculate the shear variance or kappa variance as needed (i.e. for anything but NN correlations).

  2. +
  3. Accumulate the correlations into the bins for each auto-correlation and cross-correlation desired.

  4. +
  5. Finalize the calculation.

  6. +
+

If you have several pairs of catalogs that you want to accumulate into a single correlation +function, you could write the following:

+
lens_cats = [ treecorr.Catalog(f,config) for f in lens_files ]
+source_cats = [ treecorr.Catalog(f,config) for f in source_files ]
+ng = treecorr.NGCorrelation(config)
+varg = treecorr.calculateVarG(source_cats)
+for c1, c2 in zip(lens_cats, source_cats):
+    ng.process_cross(c1,c2)
+ng.finalize(varg)
+
+
+

In addition to process_cross, +classes that allow auto-correlations have a +process_auto method for manually processing +auto-correlations. See the doc strings for these methods for more information.

+

Breaking up the calculation manually like this is probably not often necessary anymore. +It used to be useful for dividing a calculation among several machines, which would +each save their results to disk. These results could then be reassembled and +finalized after all the results were finished.

+

However, this work mode is now incorporated directly into TreeCorr via the use of +“patches”. See Patches for details about how to automatically +divide up your input catalog into patches and to farm the calculation out to +multiple machines using MPI.

+
+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/history.html b/docs/_build/html/history.html new file mode 100644 index 00000000..a82eb21a --- /dev/null +++ b/docs/_build/html/history.html @@ -0,0 +1,120 @@ + + + + + + Previous History — TreeCorr 4.3.0 documentation + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/docs/_build/html/index.html b/docs/_build/html/index.html new file mode 100644 index 00000000..3e1958e4 --- /dev/null +++ b/docs/_build/html/index.html @@ -0,0 +1,245 @@ + + + + + + TreeCorr Documentation — TreeCorr 4.3.0 documentation + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

TreeCorr Documentation

+
+ +
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/kg.html b/docs/_build/html/kg.html new file mode 100644 index 00000000..f9de1fc0 --- /dev/null +++ b/docs/_build/html/kg.html @@ -0,0 +1,436 @@ + + + + + + KGCorrelation: Kappa-shear correlations — TreeCorr 4.3.0 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

KGCorrelation: Kappa-shear correlations

+
+
+class treecorr.KGCorrelation(config=None, *, logger=None, **kwargs)[source]
+

Bases: treecorr.binnedcorr2.BinnedCorr2

+

This class handles the calculation and storage of a 2-point kappa-shear correlation +function.

+
+

Note

+

While we use the term kappa (\(\kappa\)) here and the letter K in various places, +in fact any scalar field will work here. For example, you can use this to compute +correlations of some survey property, such as seeing, with shear, where “kappa” would +really be the measured property, e.g. the observed sizes of the stars.

+
+

Ojects of this class holds the following attributes:

+
+
Attributes
+
    +
  • nbins – The number of bins in logr

  • +
  • bin_size – The size of the bins in logr

  • +
  • min_sep – The minimum separation being considered

  • +
  • max_sep – The maximum separation being considered

  • +
+
+
+

In addition, the following attributes are numpy arrays of length (nbins):

+
+
Attributes
+
    +
  • logr – The nominal center of the bin in log(r) (the natural logarithm of r).

  • +
  • rnom – The nominal center of the bin converted to regular distance. +i.e. r = exp(logr).

  • +
  • meanr – The (weighted) mean value of r for the pairs in each bin. +If there are no pairs in a bin, then exp(logr) will be used instead.

  • +
  • meanlogr – The (weighted) mean value of log(r) for the pairs in each bin. +If there are no pairs in a bin, then logr will be used instead.

  • +
  • xi – The correlation function, \(\xi(r) = \langle \kappa\, \gamma_T\rangle\).

  • +
  • xi_im – The imaginary part of \(\xi(r)\).

  • +
  • varxi – An estimate of the variance of \(\xi\)

  • +
  • weight – The total weight in each bin.

  • +
  • npairs – The number of pairs going into each bin (including pairs where one or +both objects have w=0).

  • +
  • cov – An estimate of the full covariance matrix.

  • +
+
+
+
+

Note

+

The default method for estimating the variance and covariance attributes (varxi, +and cov) is ‘shot’, which only includes the shape noise propagated into the final +correlation. This does not include sample variance, so it is always an underestimate of +the actual variance. To get better estimates, you need to set var_method to something +else and use patches in the input catalog(s). cf. Covariance Estimates.

+
+

If sep_units are given (either in the config dict or as a named kwarg) then the distances +will all be in these units.

+
+

Note

+

If you separate out the steps of the process command and use process_cross, +then the units will not be applied to meanr or meanlogr until the finalize +function is called.

+
+

The typical usage pattern is as follows:

+
>>> kg = treecorr.KGCorrelation(config)
+>>> kg.process(cat1,cat2)   # Calculate the cross-correlation
+>>> kg.write(file_name)     # Write out to a file.
+>>> xi = kg.xi              # Or access the correlation function directly.
+
+
+
+
Parameters
+
    +
  • config (dict) – A configuration dict that can be used to pass in kwargs if desired. +This dict is allowed to have addition entries besides those listed +in BinnedCorr2, which are ignored here. (default: None)

  • +
  • logger – If desired, a logger object for logging. (default: None, in which case +one will be built according to the config dict’s verbose level.)

  • +
+
+
Keyword Arguments
+

**kwargs – See the documentation for BinnedCorr2 for the list of allowed keyword +arguments, which may be passed either directly or in the config dict.

+
+
+
+
+__eq__(other)[source]
+

Return whether two KGCorrelation instances are equal

+
+ +
+
+__iadd__(other)[source]
+

Add a second KGCorrelation’s data to this one.

+
+

Note

+

For this to make sense, both KGCorrelation objects should not have had finalize +called yet. Then, after adding them together, you should call finalize on the sum.

+
+
+ +
+
+__init__(config=None, *, logger=None, **kwargs)[source]
+

Initialize KGCorrelation. See class doc for details.

+
+ +
+
+__repr__()[source]
+

Return repr(self).

+
+ +
+
+copy()[source]
+

Make a copy

+
+ +
+
+finalize(vark, varg)[source]
+

Finalize the calculation of the correlation function.

+

The process_cross command accumulates values in each bin, so it can be called +multiple times if appropriate. Afterwards, this command finishes the calculation +by dividing each column by the total weight.

+
+
Parameters
+
    +
  • vark (float) – The kappa variance for the first field.

  • +
  • varg (float) – The shear variance per component for the second field.

  • +
+
+
+
+ +
+
+process(cat1, cat2, *, metric=None, num_threads=None, comm=None, low_mem=False, initialize=True, finalize=True)[source]
+

Compute the correlation function.

+

Both arguments may be lists, in which case all items in the list are used +for that element of the correlation.

+
+
Parameters
+
    +
  • cat1 (Catalog) – A catalog or list of catalogs for the K field.

  • +
  • cat2 (Catalog) – A catalog or list of catalogs for the G field.

  • +
  • metric (str) – Which metric to use. See Metrics for details. +(default: ‘Euclidean’; this value can also be given in the +constructor in the config dict.)

  • +
  • num_threads (int) – How many OpenMP threads to use during the calculation. +(default: use the number of cpu cores; this value can also be given +in the constructor in the config dict.)

  • +
  • comm (mpi4py.Comm) – If running MPI, an mpi4py Comm object to communicate between +processes. If used, the rank=0 process will have the final +computation. This only works if using patches. (default: None)

  • +
  • low_mem (bool) – Whether to sacrifice a little speed to try to reduce memory usage. +This only works if using patches. (default: False)

  • +
  • initialize (bool) – Whether to begin the calculation with a call to +BinnedCorr2.clear. (default: True)

  • +
  • finalize (bool) – Whether to complete the calculation with a call to finalize. +(default: True)

  • +
+
+
+
+ +
+
+process_cross(cat1, cat2, *, metric=None, num_threads=None)[source]
+

Process a single pair of catalogs, accumulating the cross-correlation.

+

This accumulates the weighted sums into the bins, but does not finalize +the calculation by dividing by the total weight at the end. After +calling this function as often as desired, the finalize command will +finish the calculation.

+
+
Parameters
+
    +
  • cat1 (Catalog) – The first catalog to process

  • +
  • cat2 (Catalog) – The second catalog to process

  • +
  • metric (str) – Which metric to use. See Metrics for details. +(default: ‘Euclidean’; this value can also be given in the +constructor in the config dict.)

  • +
  • num_threads (int) – How many OpenMP threads to use during the calculation. +(default: use the number of cpu cores; this value can also be given +in the constructor in the config dict.)

  • +
+
+
+
+ +
+
+process_pairwise(cat1, cat2, *, metric=None, num_threads=None)[source]
+

Process a single pair of catalogs, accumulating the cross-correlation, only using +the corresponding pairs of objects in each catalog.

+

This accumulates the weighted sums into the bins, but does not finalize +the calculation by dividing by the total weight at the end. After +calling this function as often as desired, the finalize command will +finish the calculation.

+
+

Warning

+
+

Deprecated since version 4.1: This function is deprecated and slated to be removed. +If you have a need for it, please open an issue to describe your use case.

+
+
+
+
Parameters
+
    +
  • cat1 (Catalog) – The first catalog to process

  • +
  • cat2 (Catalog) – The second catalog to process

  • +
  • metric (str) – Which metric to use. See Metrics for details. +(default: ‘Euclidean’; this value can also be given in the +constructor in the config dict.)

  • +
  • num_threads (int) – How many OpenMP threads to use during the calculation. +(default: use the number of cpu cores; this value can also be given +in the constructor in the config dict.)

  • +
+
+
+
+ +
+
+read(file_name, *, file_type=None)[source]
+

Read in values from a file.

+

This should be a file that was written by TreeCorr, preferably a FITS file, so there +is no loss of information.

+
+

Warning

+

The KGCorrelation object should be constructed with the same configuration +parameters as the one being read. e.g. the same min_sep, max_sep, etc. This is not +checked by the read function.

+
+
+
Parameters
+
    +
  • file_name (str) – The name of the file to read in.

  • +
  • file_type (str) – The type of file (‘ASCII’ or ‘FITS’). (default: determine the type +automatically from the extension of file_name.)

  • +
+
+
+
+ +
+
+write(file_name, *, file_type=None, precision=None, write_patch_results=False)[source]
+

Write the correlation function to the file, file_name.

+

The output file will include the following columns:

+ ++++ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

Column

Description

r_nom

The nominal center of the bin in r

meanr

The mean value \(\langle r\rangle\) of pairs that +fell into each bin

meanlogr

The mean value \(\langle \log(r)\rangle\) of pairs +that fell into each bin

kgamT

The real part of correlation function, +\(\langle \kappa\, \gamma_T\rangle\)

kgamX

The imag part of correlation function, +\(\langle \kappa\, \gamma_\times\rangle\)

sigma

The sqrt of the variance estimate of both of these

weight

The total weight contributing to each bin

npairs

The total number of pairs in each bin

+

If sep_units was given at construction, then the distances will all be in these units. +Otherwise, they will be in either the same units as x,y,z (for flat or 3d coordinates) or +radians (for spherical coordinates).

+
+
Parameters
+
    +
  • file_name (str) – The name of the file to write to.

  • +
  • file_type (str) – The type of file to write (‘ASCII’ or ‘FITS’). (default: determine +the type automatically from the extension of file_name.)

  • +
  • precision (int) – For ASCII output catalogs, the desired precision. (default: 4; +this value can also be given in the constructor in the config dict.)

  • +
  • write_patch_results (bool) – Whether to write the patch-based results as well. +(default: False)

  • +
+
+
+
+ +
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/kk.html b/docs/_build/html/kk.html new file mode 100644 index 00000000..27c7fe51 --- /dev/null +++ b/docs/_build/html/kk.html @@ -0,0 +1,459 @@ + + + + + + KKCorrelation: Kappa-kappa correlations — TreeCorr 4.3.0 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

KKCorrelation: Kappa-kappa correlations

+
+
+class treecorr.KKCorrelation(config=None, *, logger=None, **kwargs)[source]
+

Bases: treecorr.binnedcorr2.BinnedCorr2

+

This class handles the calculation and storage of a 2-point kappa-kappa correlation +function.

+
+

Note

+

While we use the term kappa (\(\kappa\)) here and the letter K in various places, +in fact any scalar field will work here. For example, you can use this to compute +correlations of the CMB temperature fluctuations, where “kappa” would really be +\(\Delta T\).

+
+

Ojects of this class holds the following attributes:

+
+
Attributes
+
    +
  • nbins – The number of bins in logr

  • +
  • bin_size – The size of the bins in logr

  • +
  • min_sep – The minimum separation being considered

  • +
  • max_sep – The maximum separation being considered

  • +
+
+
+

In addition, the following attributes are numpy arrays of length (nbins):

+
+
Attributes
+
    +
  • logr – The nominal center of the bin in log(r) (the natural logarithm of r).

  • +
  • rnom – The nominal center of the bin converted to regular distance. +i.e. r = exp(logr).

  • +
  • meanr – The (weighted) mean value of r for the pairs in each bin. +If there are no pairs in a bin, then exp(logr) will be used instead.

  • +
  • meanlogr – The (weighted) mean value of log(r) for the pairs in each bin. +If there are no pairs in a bin, then logr will be used instead.

  • +
  • xi – The correlation function, \(\xi(r)\)

  • +
  • varxi – An estimate of the variance of \(\xi\)

  • +
  • weight – The total weight in each bin.

  • +
  • npairs – The number of pairs going into each bin (including pairs where one or +both objects have w=0).

  • +
  • cov – An estimate of the full covariance matrix.

  • +
+
+
+
+

Note

+

The default method for estimating the variance and covariance attributes (varxi, +and cov) is ‘shot’, which only includes the shot noise propagated into the final +correlation. This does not include sample variance, so it is always an underestimate of +the actual variance. To get better estimates, you need to set var_method to something +else and use patches in the input catalog(s). cf. Covariance Estimates.

+
+

If sep_units are given (either in the config dict or as a named kwarg) then the distances +will all be in these units.

+
+

Note

+

If you separate out the steps of the process command and use process_auto and/or +process_cross, then the units will not be applied to meanr or meanlogr until +the finalize function is called.

+
+

The typical usage pattern is as follows:

+
>>> kk = treecorr.KKCorrelation(config)
+>>> kk.process(cat)         # For auto-correlation.
+>>> kk.process(cat1,cat2)   # For cross-correlation.
+>>> kk.write(file_name)     # Write out to a file.
+>>> xi = kk.xi              # Or access the correlation function directly.
+
+
+
+
Parameters
+
    +
  • config (dict) – A configuration dict that can be used to pass in kwargs if desired. +This dict is allowed to have addition entries besides those listed +in BinnedCorr2, which are ignored here. (default: None)

  • +
  • logger – If desired, a logger object for logging. (default: None, in which case +one will be built according to the config dict’s verbose level.)

  • +
+
+
Keyword Arguments
+

**kwargs – See the documentation for BinnedCorr2 for the list of allowed keyword +arguments, which may be passed either directly or in the config dict.

+
+
+
+
+__eq__(other)[source]
+

Return whether two KKCorrelation instances are equal

+
+ +
+
+__iadd__(other)[source]
+

Add a second KKCorrelation’s data to this one.

+
+

Note

+

For this to make sense, both KKCorrelation objects should not have had finalize +called yet. Then, after adding them together, you should call finalize on the sum.

+
+
+ +
+
+__init__(config=None, *, logger=None, **kwargs)[source]
+

Initialize KKCorrelation. See class doc for details.

+
+ +
+
+__repr__()[source]
+

Return repr(self).

+
+ +
+
+copy()[source]
+

Make a copy

+
+ +
+
+finalize(vark1, vark2)[source]
+

Finalize the calculation of the correlation function.

+

The process_auto and process_cross commands accumulate values in each bin, +so they can be called multiple times if appropriate. Afterwards, this command +finishes the calculation by dividing each column by the total weight.

+
+
Parameters
+
    +
  • vark1 (float) – The kappa variance for the first field.

  • +
  • vark2 (float) – The kappa variance for the second field.

  • +
+
+
+
+ +
+
+process(cat1, cat2=None, *, metric=None, num_threads=None, comm=None, low_mem=False, initialize=True, finalize=True)[source]
+

Compute the correlation function.

+
    +
  • If only 1 argument is given, then compute an auto-correlation function.

  • +
  • If 2 arguments are given, then compute a cross-correlation function.

  • +
+

Both arguments may be lists, in which case all items in the list are used +for that element of the correlation.

+
+
Parameters
+
    +
  • cat1 (Catalog) – A catalog or list of catalogs for the first K field.

  • +
  • cat2 (Catalog) – A catalog or list of catalogs for the second K field, if any. +(default: None)

  • +
  • metric (str) – Which metric to use. See Metrics for details. +(default: ‘Euclidean’; this value can also be given in the +constructor in the config dict.)

  • +
  • num_threads (int) – How many OpenMP threads to use during the calculation. +(default: use the number of cpu cores; this value can also be given +in the constructor in the config dict.)

  • +
  • comm (mpi4py.Comm) – If running MPI, an mpi4py Comm object to communicate between +processes. If used, the rank=0 process will have the final +computation. This only works if using patches. (default: None)

  • +
  • low_mem (bool) – Whether to sacrifice a little speed to try to reduce memory usage. +This only works if using patches. (default: False)

  • +
  • initialize (bool) – Whether to begin the calculation with a call to +BinnedCorr2.clear. (default: True)

  • +
  • finalize (bool) – Whether to complete the calculation with a call to finalize. +(default: True)

  • +
+
+
+
+ +
+
+process_auto(cat, *, metric=None, num_threads=None)[source]
+

Process a single catalog, accumulating the auto-correlation.

+

This accumulates the weighted sums into the bins, but does not finalize +the calculation by dividing by the total weight at the end. After +calling this function as often as desired, the finalize command will +finish the calculation.

+
+
Parameters
+
    +
  • cat (Catalog) – The catalog to process

  • +
  • metric (str) – Which metric to use. See Metrics for details. +(default: ‘Euclidean’; this value can also be given in the +constructor in the config dict.)

  • +
  • num_threads (int) – How many OpenMP threads to use during the calculation. +(default: use the number of cpu cores; this value can also be given +in the constructor in the config dict.)

  • +
+
+
+
+ +
+
+process_cross(cat1, cat2, *, metric=None, num_threads=None)[source]
+

Process a single pair of catalogs, accumulating the cross-correlation.

+

This accumulates the weighted sums into the bins, but does not finalize +the calculation by dividing by the total weight at the end. After +calling this function as often as desired, the finalize command will +finish the calculation.

+
+
Parameters
+
    +
  • cat1 (Catalog) – The first catalog to process

  • +
  • cat2 (Catalog) – The second catalog to process

  • +
  • metric (str) – Which metric to use. See Metrics for details. +(default: ‘Euclidean’; this value can also be given in the +constructor in the config dict.)

  • +
  • num_threads (int) – How many OpenMP threads to use during the calculation. +(default: use the number of cpu cores; this value can also be given +in the constructor in the config dict.)

  • +
+
+
+
+ +
+
+process_pairwise(cat1, cat2, *, metric=None, num_threads=None)[source]
+

Process a single pair of catalogs, accumulating the cross-correlation, only using +the corresponding pairs of objects in each catalog.

+

This accumulates the weighted sums into the bins, but does not finalize +the calculation by dividing by the total weight at the end. After +calling this function as often as desired, the finalize command will +finish the calculation.

+
+

Warning

+
+

Deprecated since version 4.1: This function is deprecated and slated to be removed. +If you have a need for it, please open an issue to describe your use case.

+
+
+
+
Parameters
+
    +
  • cat1 (Catalog) – The first catalog to process

  • +
  • cat2 (Catalog) – The second catalog to process

  • +
  • metric (str) – Which metric to use. See Metrics for details. +(default: ‘Euclidean’; this value can also be given in the +constructor in the config dict.)

  • +
  • num_threads (int) – How many OpenMP threads to use during the calculation. +(default: use the number of cpu cores; this value can also be given +in the constructor in the config dict.)

  • +
+
+
+
+ +
+
+read(file_name, *, file_type=None)[source]
+

Read in values from a file.

+

This should be a file that was written by TreeCorr, preferably a FITS file, so there +is no loss of information.

+
+

Warning

+

The KKCorrelation object should be constructed with the same configuration +parameters as the one being read. e.g. the same min_sep, max_sep, etc. This is not +checked by the read function.

+
+
+
Parameters
+
    +
  • file_name (str) – The name of the file to read in.

  • +
  • file_type (str) – The type of file (‘ASCII’ or ‘FITS’). (default: determine the type +automatically from the extension of file_name.)

  • +
+
+
+
+ +
+
+write(file_name, *, file_type=None, precision=None, write_patch_results=False)[source]
+

Write the correlation function to the file, file_name.

+

The output file will include the following columns:

+ ++++ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

Column

Description

r_nom

The nominal center of the bin in r

meanr

The mean value \(\langle r \rangle\) of pairs that +fell into each bin

meanlogr

The mean value \(\langle \log(r) \rangle\) of pairs +that fell into each bin

xi

The estimate of the correlation function xi(r)

sigma_xi

The sqrt of the variance estimate of xi(r)

weight

The total weight contributing to each bin

npairs

The total number of pairs in each bin

+

If sep_units was given at construction, then the distances will all be in these units. +Otherwise, they will be in either the same units as x,y,z (for flat or 3d coordinates) or +radians (for spherical coordinates).

+
+
Parameters
+
    +
  • file_name (str) – The name of the file to write to.

  • +
  • file_type (str) – The type of file to write (‘ASCII’ or ‘FITS’). (default: determine +the type automatically from the extension of file_name.)

  • +
  • precision (int) – For ASCII output catalogs, the desired precision. (default: 4; +this value can also be given in the constructor in the config dict.)

  • +
  • write_patch_results (bool) – Whether to write the patch-based results as well. +(default: False)

  • +
+
+
+
+ +
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/kkk.html b/docs/_build/html/kkk.html new file mode 100644 index 00000000..57945252 --- /dev/null +++ b/docs/_build/html/kkk.html @@ -0,0 +1,802 @@ + + + + + + KKKCorrelation: Kappa-kappa-kappa correlations — TreeCorr 4.3.0 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

KKKCorrelation: Kappa-kappa-kappa correlations

+
+
+class treecorr.KKKCorrelation(config=None, *, logger=None, **kwargs)[source]
+

Bases: treecorr.binnedcorr3.BinnedCorr3

+

This class handles the calculation and storage of a 3-point kappa-kappa-kappa correlation +function.

+
+

Note

+

While we use the term kappa (\(\kappa\)) here and the letter K in various places, +in fact any scalar field will work here. For example, you can use this to compute +correlations of the CMB temperature fluctuations, where “kappa” would really be +\(\Delta T\).

+
+

See the doc string of BinnedCorr3 for a description of how the triangles are binned.

+

Ojects of this class holds the following attributes:

+
+
Attributes
+
    +
  • nbins – The number of bins in logr where r = d2

  • +
  • bin_size – The size of the bins in logr

  • +
  • min_sep – The minimum separation being considered

  • +
  • max_sep – The maximum separation being considered

  • +
  • nubins – The number of bins in u where u = d3/d2

  • +
  • ubin_size – The size of the bins in u

  • +
  • min_u – The minimum u being considered

  • +
  • max_u – The maximum u being considered

  • +
  • nvbins – The number of bins in v where v = +-(d1-d2)/d3

  • +
  • vbin_size – The size of the bins in v

  • +
  • min_v – The minimum v being considered

  • +
  • max_v – The maximum v being considered

  • +
  • logr1d – The nominal centers of the nbins bins in log(r).

  • +
  • u1d – The nominal centers of the nubins bins in u.

  • +
  • v1d – The nominal centers of the nvbins bins in v.

  • +
+
+
+

In addition, the following attributes are numpy arrays whose shape is (nbins, nubins, nvbins):

+
+
Attributes
+
    +
  • logr – The nominal center of the bin in log(r).

  • +
  • rnom – The nominal center of the bin converted to regular distance. +i.e. r = exp(logr).

  • +
  • u – The nominal center of the bin in u.

  • +
  • v – The nominal center of the bin in v.

  • +
  • meand1 – The (weighted) mean value of d1 for the triangles in each bin.

  • +
  • meanlogd1 – The mean value of log(d1) for the triangles in each bin.

  • +
  • meand2 – The (weighted) mean value of d2 (aka r) for the triangles in each bin.

  • +
  • meanlogd2 – The mean value of log(d2) for the triangles in each bin.

  • +
  • meand2 – The (weighted) mean value of d3 for the triangles in each bin.

  • +
  • meanlogd2 – The mean value of log(d3) for the triangles in each bin.

  • +
  • meanu – The mean value of u for the triangles in each bin.

  • +
  • meanv – The mean value of v for the triangles in each bin.

  • +
  • zeta – The correlation function, \(\zeta(r,u,v)\).

  • +
  • varzeta – The variance of \(\zeta\), only including the shot noise propagated into +the final correlation. This does not include sample variance, so it is always +an underestimate of the actual variance.

  • +
  • weight – The total weight in each bin.

  • +
  • ntri – The number of triangles going into each bin (including those where one or +more objects have w=0).

  • +
+
+
+

If sep_units are given (either in the config dict or as a named kwarg) then the distances +will all be in these units.

+
+

Note

+

If you separate out the steps of the process command and use process_auto and/or +process_cross, then the units will not be applied to meanr or meanlogr until +the finalize function is called.

+
+

The typical usage pattern is as follows:

+
>>> kkk = treecorr.KKKCorrelation(config)
+>>> kkk.process(cat)              # For auto-correlation.
+>>> kkk.process(cat1,cat2,cat3)   # For cross-correlation.
+>>> kkk.write(file_name)          # Write out to a file.
+>>> zeta = kkk.zeta               # To access zeta directly.
+
+
+
+
Parameters
+
    +
  • config (dict) – A configuration dict that can be used to pass in kwargs if desired. +This dict is allowed to have addition entries besides those listed +in BinnedCorr3, which are ignored here. (default: None)

  • +
  • logger – If desired, a logger object for logging. (default: None, in which case +one will be built according to the config dict’s verbose level.)

  • +
+
+
Keyword Arguments
+

**kwargs – See the documentation for BinnedCorr3 for the list of allowed keyword +arguments, which may be passed either directly or in the config dict.

+
+
+
+
+__eq__(other)[source]
+

Return whether two KKKCorrelation instances are equal

+
+ +
+
+__iadd__(other)[source]
+

Add a second KKKCorrelation’s data to this one.

+
+

Note

+

For this to make sense, both KKKCorrelation objects should not have had finalize +called yet. Then, after adding them together, you should call finalize on the sum.

+
+
+ +
+
+__init__(config=None, *, logger=None, **kwargs)[source]
+

Initialize KKKCorrelation. See class doc for details.

+
+ +
+
+__repr__()[source]
+

Return repr(self).

+
+ +
+
+copy()[source]
+

Make a copy

+
+ +
+
+finalize(vark1, vark2, vark3)[source]
+

Finalize the calculation of the correlation function.

+

The process_auto and process_cross commands accumulate values in each bin, +so they can be called multiple times if appropriate. Afterwards, this command +finishes the calculation by dividing by the total weight.

+
+
Parameters
+
    +
  • vark1 (float) – The kappa variance for the first field.

  • +
  • vark2 (float) – The kappa variance for the second field.

  • +
  • vark3 (float) – The kappa variance for the third field.

  • +
+
+
+
+ +
+
+process(cat1, cat2=None, cat3=None, *, metric=None, num_threads=None, comm=None, low_mem=False, initialize=True, finalize=True)[source]
+

Compute the 3pt correlation function.

+
    +
  • If only 1 argument is given, then compute an auto-correlation function.

  • +
  • If 2 arguments are given, then compute a cross-correlation function with the +first catalog taking one corner of the triangles, and the second taking two corners.

  • +
  • If 3 arguments are given, then compute a three-way cross-correlation function.

  • +
+

All arguments may be lists, in which case all items in the list are used +for that element of the correlation.

+
+

Note

+

For a correlation of multiple catalogs, it typically matters which corner of the +triangle comes from which catalog, which is not kept track of by this function. +The final accumulation will have d1 > d2 > d3 regardless of which input catalog +appears at each corner. The class which keeps track of which catalog appears +in each position in the triangle is KKKCrossCorrelation.

+
+
+
Parameters
+
    +
  • cat1 (Catalog) – A catalog or list of catalogs for the first K field.

  • +
  • cat2 (Catalog) – A catalog or list of catalogs for the second K field. +(default: None)

  • +
  • cat3 (Catalog) – A catalog or list of catalogs for the third K field. +(default: None)

  • +
  • metric (str) – Which metric to use. See Metrics for details. +(default: ‘Euclidean’; this value can also be given in the +constructor in the config dict.)

  • +
  • num_threads (int) – How many OpenMP threads to use during the calculation. +(default: use the number of cpu cores; this value can also be given +in the constructor in the config dict.)

  • +
  • comm (mpi4py.Comm) – If running MPI, an mpi4py Comm object to communicate between +processes. If used, the rank=0 process will have the final +computation. This only works if using patches. (default: None)

  • +
  • low_mem (bool) – Whether to sacrifice a little speed to try to reduce memory usage. +This only works if using patches. (default: False)

  • +
  • initialize (bool) – Whether to begin the calculation with a call to +BinnedCorr3.clear. (default: True)

  • +
  • finalize (bool) – Whether to complete the calculation with a call to finalize. +(default: True)

  • +
+
+
+
+ +
+
+process_auto(cat, *, metric=None, num_threads=None)[source]
+

Process a single catalog, accumulating the auto-correlation.

+

This accumulates the auto-correlation for the given catalog. After +calling this function as often as desired, the finalize command will +finish the calculation of meand1, meanlogd1, etc.

+
+
Parameters
+
    +
  • cat (Catalog) – The catalog to process

  • +
  • metric (str) – Which metric to use. See Metrics for details. +(default: ‘Euclidean’; this value can also be given in the +constructor in the config dict.)

  • +
  • num_threads (int) – How many OpenMP threads to use during the calculation. +(default: use the number of cpu cores; this value can also be given +in the constructor in the config dict.)

  • +
+
+
+
+ +
+
+process_cross(cat1, cat2, cat3, *, metric=None, num_threads=None)[source]
+

Process a set of three catalogs, accumulating the 3pt cross-correlation.

+

This accumulates the cross-correlation for the given catalogs as part of a larger +auto-correlation calculation. E.g. when splitting up a large catalog into patches, +this is appropriate to use for the cross correlation between different patches +as part of the complete auto-correlation of the full catalog.

+
+
Parameters
+
    +
  • cat1 (Catalog) – The first catalog to process

  • +
  • cat2 (Catalog) – The second catalog to process

  • +
  • cat3 (Catalog) – The third catalog to process

  • +
  • metric (str) – Which metric to use. See Metrics for details. +(default: ‘Euclidean’; this value can also be given in the +constructor in the config dict.)

  • +
  • num_threads (int) – How many OpenMP threads to use during the calculation. +(default: use the number of cpu cores; this value can also be given +in the constructor in the config dict.)

  • +
+
+
+
+ +
+
+process_cross12(cat1, cat2, *, metric=None, num_threads=None)[source]
+

Process two catalogs, accumulating the 3pt cross-correlation, where one of the +points in each triangle come from the first catalog, and two come from the second.

+

This accumulates the cross-correlation for the given catalogs as part of a larger +auto-correlation calculation. E.g. when splitting up a large catalog into patches, +this is appropriate to use for the cross correlation between different patches +as part of the complete auto-correlation of the full catalog.

+
+
Parameters
+
    +
  • cat1 (Catalog) – The first catalog to process. (1 point in each triangle will come +from this catalog.)

  • +
  • cat2 (Catalog) – The second catalog to process. (2 points in each triangle will come +from this catalog.)

  • +
  • metric (str) – Which metric to use. See Metrics for details. +(default: ‘Euclidean’; this value can also be given in the +constructor in the config dict.)

  • +
  • num_threads (int) – How many OpenMP threads to use during the calculation. +(default: use the number of cpu cores; this value can also be given +in the constructor in the config dict.)

  • +
+
+
+
+ +
+
+read(file_name, *, file_type=None)[source]
+

Read in values from a file.

+

This should be a file that was written by TreeCorr, preferably a FITS file, so there +is no loss of information.

+
+

Warning

+

The KKKCorrelation object should be constructed with the same configuration +parameters as the one being read. e.g. the same min_sep, max_sep, etc. This is not +checked by the read function.

+
+
+
Parameters
+
    +
  • file_name (str) – The name of the file to read in.

  • +
  • file_type (str) – The type of file (‘ASCII’ or ‘FITS’). (default: determine the type +automatically from the extension of file_name.)

  • +
+
+
+
+ +
+
+write(file_name, *, file_type=None, precision=None, write_patch_results=False)[source]
+

Write the correlation function to the file, file_name.

+

The output file will include the following columns:

+ ++++ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

Column

Description

r_nom

The nominal center of the bin in r = d2 where d1 > d2 > d3

u_nom

The nominal center of the bin in u = d3/d2

v_nom

The nominal center of the bin in v = +-(d1-d2)/d3

meand1

The mean value \(\langle d1\rangle\) of triangles that +fell into each bin

meanlogd1

The mean value \(\langle \log(d1)\rangle\) of triangles +that fell into each bin

meand2

The mean value \(\langle d2\rangle\) of triangles that +fell into each bin

meanlogd2

The mean value \(\langle \log(d2)\rangle\) of triangles +that fell into each bin

meand3

The mean value \(\langle d3\rangle\) of triangles that +fell into each bin

meanlogd3

The mean value \(\langle \log(d3)\rangle\) of triangles +that fell into each bin

meanu

The mean value \(\langle u\rangle\) of triangles that +fell into each bin

meanv

The mean value \(\langle v\rangle\) of triangles that +fell into each bin

zeta

The estimator of \(\zeta(r,u,v)\)

sigma_zeta

The sqrt of the variance estimate of \(\zeta\)

weight

The total weight of triangles contributing to each bin

ntri

The number of triangles contributing to each bin

+

If sep_units was given at construction, then the distances will all be in these units. +Otherwise, they will be in either the same units as x,y,z (for flat or 3d coordinates) or +radians (for spherical coordinates).

+
+
Parameters
+
    +
  • file_name (str) – The name of the file to write to.

  • +
  • file_type (str) – The type of file to write (‘ASCII’ or ‘FITS’). (default: determine +the type automatically from the extension of file_name.)

  • +
  • precision (int) – For ASCII output catalogs, the desired precision. (default: 4; +this value can also be given in the constructor in the config dict.)

  • +
  • write_patch_results (bool) – Whether to write the patch-based results as well. +(default: False)

  • +
+
+
+
+ +
+ +
+
+class treecorr.KKKCrossCorrelation(config=None, *, logger=None, **kwargs)[source]
+

Bases: treecorr.binnedcorr3.BinnedCorr3

+

This class handles the calculation a 3-point kappa-kappa-kappa cross-correlation +function.

+

For 3-point cross correlations, it matters which of the two or three fields falls on +each corner of the triangle. E.g. is field 1 on the corner opposite d1 (the longest +size of the triangle) or is it field 2 (or 3) there? This is in contrast to the 2-point +correlation where the symmetry of the situation means that it doesn’t matter which point +is identified with each field. This makes it significantly more complicated to keep track +of all the relevant information for a 3-point cross correlation function.

+

The KKKCorrelation class holds a single \(\zeta\) functions describing all +possible triangles, parameterized according to their relative side lengths ordered as +d1 > d2 > d3.

+

For a cross-correlation of two fields: K1 - K1 - K2 (i.e. the K1 field is at two of the +corners and K2 is at one corner), then we need three these \(\zeta\) functions +to capture all of the triangles, since the K2 points may be opposite d1 or d2 or d3. +For a cross-correlation of three fields: K1 - K2 - K3, we need six sets, to account for +all of the possible permutations relative to the triangle sides.

+

Therefore, this class holds 6 instances of KKKCorrelation, which in turn hold the +information about triangles in each of the relevant configurations. We name these:

+
+
Attributes
+
    +
  • k1k2k3 – Triangles where K1 is opposite d1, K2 is opposite d2, K3 is opposite d3.

  • +
  • k1k3k2 – Triangles where K1 is opposite d1, K3 is opposite d2, K2 is opposite d3.

  • +
  • k2k1k3 – Triangles where K2 is opposite d1, K1 is opposite d2, K3 is opposite d3.

  • +
  • k2k3k1 – Triangles where K2 is opposite d1, K3 is opposite d2, K1 is opposite d3.

  • +
  • k3k1k2 – Triangles where K3 is opposite d1, K1 is opposite d2, K2 is opposite d3.

  • +
  • k3k2k1 – Triangles where K3 is opposite d1, K2 is opposite d2, K1 is opposite d3.

  • +
+
+
+

If for instance K2 and K3 are the same field, then e.g. k1k2k3 and k1k3k2 will have +the same values.

+

Ojects of this class also hold the following attributes, which are identical in each of +the above KKKCorrelation instances.

+
+
Attributes
+
    +
  • nbins – The number of bins in logr where r = d2

  • +
  • bin_size – The size of the bins in logr

  • +
  • min_sep – The minimum separation being considered

  • +
  • max_sep – The maximum separation being considered

  • +
  • nubins – The number of bins in u where u = d3/d2

  • +
  • ubin_size – The size of the bins in u

  • +
  • min_u – The minimum u being considered

  • +
  • max_u – The maximum u being considered

  • +
  • nvbins – The number of bins in v where v = +-(d1-d2)/d3

  • +
  • vbin_size – The size of the bins in v

  • +
  • min_v – The minimum v being considered

  • +
  • max_v – The maximum v being considered

  • +
  • logr1d – The nominal centers of the nbins bins in log(r).

  • +
  • u1d – The nominal centers of the nubins bins in u.

  • +
  • v1d – The nominal centers of the nvbins bins in v.

  • +
+
+
+

If sep_units are given (either in the config dict or as a named kwarg) then the distances +will all be in these units.

+
+

Note

+

If you separate out the steps of the process command and use process_cross directly, +then the units will not be applied to meanr or meanlogr until the finalize +function is called.

+
+
+
Parameters
+
    +
  • config (dict) – A configuration dict that can be used to pass in kwargs if desired. +This dict is allowed to have addition entries besides those listed +in BinnedCorr3, which are ignored here. (default: None)

  • +
  • logger – If desired, a logger object for logging. (default: None, in which case +one will be built according to the config dict’s verbose level.)

  • +
+
+
Keyword Arguments
+

**kwargs – See the documentation for BinnedCorr3 for the list of allowed keyword +arguments, which may be passed either directly or in the config dict.

+
+
+
+
+__eq__(other)[source]
+

Return whether two KKKCrossCorrelation instances are equal

+
+ +
+
+__iadd__(other)[source]
+

Add a second KKKCrossCorrelation’s data to this one.

+
+

Note

+

For this to make sense, both KKKCrossCorrelation objects should not have had +finalize called yet. Then, after adding them together, you should call finalize +on the sum.

+
+
+ +
+
+__init__(config=None, *, logger=None, **kwargs)[source]
+

Initialize KKKCrossCorrelation. See class doc for details.

+
+ +
+
+__repr__()[source]
+

Return repr(self).

+
+ +
+
+copy()[source]
+

Make a copy

+
+ +
+
+finalize(vark1, vark2, vark3)[source]
+

Finalize the calculation of the correlation function.

+

The process_cross command accumulate values in each bin, so they can be called +multiple times if appropriate. Afterwards, this command finishes the calculation +by dividing by the total weight.

+
+
Parameters
+
    +
  • vark1 (float) – The kappa variance for the first field that was correlated.

  • +
  • vark2 (float) – The kappa variance for the second field that was correlated.

  • +
  • vark3 (float) – The kappa variance for the third field that was correlated.

  • +
+
+
+
+ +
+
+getStat()[source]
+

The standard statistic for the current correlation object as a 1-d array.

+

In this case, the concatenation of zeta.ravel() for each combination in the following +order: k1k2k3, k1k3k2, k2k1k3, k2k3k1, k3k1k2, k3k2k1.

+
+ +
+
+getWeight()[source]
+

The weight array for the current correlation object as a 1-d array.

+

In this case, the concatenation of getWeight() for each combination in the following +order: k1k2k3, k1k3k2, k2k1k3, k2k3k1, k3k1k2, k3k2k1.

+
+ +
+
+property nonzero
+

Return if there are any values accumulated yet. (i.e. ntri > 0)

+
+ +
+
+process(cat1, cat2, cat3=None, *, metric=None, num_threads=None, comm=None, low_mem=False, initialize=True, finalize=True)[source]
+

Accumulate the cross-correlation of the points in the given Catalogs: cat1, cat2, cat3.

+
    +
  • If 2 arguments are given, then compute a cross-correlation function with the +first catalog taking one corner of the triangles, and the second taking two corners.

  • +
  • If 3 arguments are given, then compute a three-way cross-correlation function.

  • +
+

All arguments may be lists, in which case all items in the list are used +for that element of the correlation.

+
+
Parameters
+
    +
  • cat1 (Catalog) – A catalog or list of catalogs for the first K field.

  • +
  • cat2 (Catalog) – A catalog or list of catalogs for the second K field.

  • +
  • cat3 (Catalog) – A catalog or list of catalogs for the third K field. +(default: None)

  • +
  • metric (str) – Which metric to use. See Metrics for details. +(default: ‘Euclidean’; this value can also be given in the +constructor in the config dict.)

  • +
  • num_threads (int) – How many OpenMP threads to use during the calculation. +(default: use the number of cpu cores; this value can also be given +in the constructor in the config dict.)

  • +
  • comm (mpi4py.Comm) – If running MPI, an mpi4py Comm object to communicate between +processes. If used, the rank=0 process will have the final +computation. This only works if using patches. (default: None)

  • +
  • low_mem (bool) – Whether to sacrifice a little speed to try to reduce memory usage. +This only works if using patches. (default: False)

  • +
  • initialize (bool) – Whether to begin the calculation with a call to +BinnedCorr3.clear. (default: True)

  • +
  • finalize (bool) – Whether to complete the calculation with a call to finalize. +(default: True)

  • +
+
+
+
+ +
+
+process_cross(cat1, cat2, cat3, *, metric=None, num_threads=None)[source]
+

Process a set of three catalogs, accumulating the 3pt cross-correlation.

+

This accumulates the cross-correlation for the given catalogs. After +calling this function as often as desired, the finalize command will +finish the calculation of meand1, meanlogd1, etc.

+
+
Parameters
+
    +
  • cat1 (Catalog) – The first catalog to process

  • +
  • cat2 (Catalog) – The second catalog to process

  • +
  • cat3 (Catalog) – The third catalog to process

  • +
  • metric (str) – Which metric to use. See Metrics for details. +(default: ‘Euclidean’; this value can also be given in the +constructor in the config dict.)

  • +
  • num_threads (int) – How many OpenMP threads to use during the calculation. +(default: use the number of cpu cores; this value can also be given +in the constructor in the config dict.)

  • +
+
+
+
+ +
+
+process_cross12(cat1, cat2, *, metric=None, num_threads=None)[source]
+

Process two catalogs, accumulating the 3pt cross-correlation, where one of the +points in each triangle come from the first catalog, and two come from the second.

+

This accumulates the cross-correlation for the given catalogs. After +calling this function as often as desired, the finalize command will +finish the calculation of meand1, meanlogd1, etc.

+
+

Note

+

This only adds to the attributes k1k2k3, k2k1k3, k2k3k1, not the ones where +3 comes before 2. When running this via the regular process method, it will +combine them at the end to make sure k1k2k3 == k1k3k2, etc. for a complete +calculation of the 1-2 cross-correlation.

+
+
+
Parameters
+
    +
  • cat1 (Catalog) – The first catalog to process. (1 point in each triangle will come +from this catalog.)

  • +
  • cat2 (Catalog) – The second catalog to process. (2 points in each triangle will come +from this catalog.)

  • +
  • metric (str) – Which metric to use. See Metrics for details. +(default: ‘Euclidean’; this value can also be given in the +constructor in the config dict.)

  • +
  • num_threads (int) – How many OpenMP threads to use during the calculation. +(default: use the number of cpu cores; this value can also be given +in the constructor in the config dict.)

  • +
+
+
+
+ +
+
+read(file_name, *, file_type=None)[source]
+

Read in values from a file.

+

This should be a file that was written by TreeCorr, preferably a FITS file, so there +is no loss of information.

+
+

Warning

+

The KKKCrossCorrelation object should be constructed with the same configuration +parameters as the one being read. e.g. the same min_sep, max_sep, etc. This is not +checked by the read function.

+
+
+
Parameters
+
    +
  • file_name (str) – The name of the file to read in.

  • +
  • file_type (str) – The type of file (‘ASCII’ or ‘FITS’). (default: determine the type +automatically from the extension of file_name.)

  • +
+
+
+
+ +
+
+write(file_name, *, file_type=None, precision=None, write_patch_results=False)[source]
+

Write the cross-correlation functions to the file, file_name.

+
+
Parameters
+
    +
  • file_name (str) – The name of the file to write to.

  • +
  • file_type (str) – The type of file to write (‘ASCII’ or ‘FITS’). (default: determine +the type automatically from the extension of file_name.)

  • +
  • precision (int) – For ASCII output catalogs, the desired precision. (default: 4; +this value can also be given in the constructor in the config dict.)

  • +
  • write_patch_results (bool) – Whether to write the patch-based results as well. +(default: False)

  • +
+
+
+
+ +
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/metric.html b/docs/_build/html/metric.html new file mode 100644 index 00000000..50650fab --- /dev/null +++ b/docs/_build/html/metric.html @@ -0,0 +1,278 @@ + + + + + + Metrics — TreeCorr 4.3.0 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

Metrics

+

The correlation functions need to know how to calculate distances between the points, +that is, the metric defining the space.

+

In most cases, you will probably want to use the default Metric, called “Euclidean”, +which just uses the normal Euclidean distance between two points. However, there are a few +other options, which are useful for various applications.

+

Both BinnedCorr2 and BinnedCorr3 take an optional +metric parameter, which should be one of the following string values:

+
+

“Euclidean”

+

This is the default metric, and is the only current option for 2-dimensional flat correlations, +i.e. when the coordinates are given by (x,y), rather than either (x,y,z), (ra,dec), or (ra,dec,r).

+

For 2-dimensional coordinate systems, the distance is defined as

+

\(d_{\rm Euclidean} = \sqrt{(x_2-x_1)^2 + (y_2-y_1)^2}\)

+

For 3-dimensional coordinate systems, the distance is defined as

+

\(d_{\rm Euclidean} = \sqrt{(x_2-x_1)^2 + (y_2-y_1)^2 + (z_2-z_1)^2}\)

+

For spherical coordinates with distances, (ra,dec,r), the coordinates are first +converted to Cartesian coordinates and the above formula is used.

+

For spherical coordinates without distances, (ra, dec), the coordinates are placed on the +unit sphere and the above formula is used. This means that all distances are really chord +distances across the sphere, not great circle distances. For small angles, this is a small +correction, but as the angles get large, the difference between the great circle distance and +the chord distance becomes significant. The conversion formula is

+

\(d_{\rm GC} = 2 \arcsin(d_{\rm Euclidean} / 2)\)

+

TreeCorr applies this formula at the end as part of the finalize +function, so the meanr and meanlogr attributes +will be in terms of great circle distances. However, they will not necessarily be spaced +precisely uniformly in log(r), since the original bin spacing will have been set up in terms +of the chord distances.

+
+
+

“Arc”

+

This metric is only valid for spherical coordinates (ra,dec).

+

The distance is defined as

+

\(d_{\rm Arc} = 2 \arcsin(d_{\rm Euclidean} / 2)\)

+

where \(d_{\rm Euclidean}\) is the above “Euclidean” chord distance.

+

This metric is significantly slower than the “Euclidean” metric, since it requires trigonometric +functions for every pair calculation along the way, rather than just at the end. +In most cases, this extra care is unnecessary, but it provides a means to check if the +chord calculations are in any way problematic for your particular use case.

+

Also, unlike the “Euclidean” version, the bin spacing will be uniform in log(r) using the +actual great circle distances, rather than being based on the chord distances.

+
+
+

“Rperp” or “FisherRperp”

+

This metric is only valid for 3-dimensional coordinates (ra,dec,r) or (x,y,z).

+

The distance in this metric is defined as

+

\(d_{\rm Rperp} = \sqrt{d_{\rm Euclidean}^2 - r_\parallel^2}\)

+

where \(r_\parallel\) follows the defintion in Fisher et al, 1994 (MNRAS, 267, 927). +Namely, if \(p_1\) and \(p_2\) are the vector positions from Earth for the +two points, and

+

\(L \equiv \frac{p1 + p2}{2}\)

+

then

+

\(r_\parallel = \frac{(p_2 - p_1) \cdot L}{|L|}\)

+

That is, it breaks up the full 3-d distance into perpendicular and parallel components: +\(d_{\rm 3d}^2 = r_\bot^2 + r_\parallel^2\), +and it identifies the metric separation as just the perpendicular component, \(r_\bot\).

+

Note that this decomposition is really only valid for objects with a relatively small angular +separation, \(\theta\), on the sky, so the two radial vectors are nearly parallel. +In this limit, the formula for \(d\) reduces to

+

\(d_{\rm Rperp} \approx \left(\frac{2 r_1 r_2}{r_1+r_2}\right) \theta\)

+
+

Warning

+

Prior to version 4.0, the “Rperp” name meant what is now called “OldRperp”. +The difference can be significant for some use cases, so if consistency across +versions is importatnt to you, you should either switch to using “OldRperp” +or investigate whether the change to “FisherRperp” is important for your +particular science case.

+
+
+
+

“OldRperp”

+

This metric is only valid for 3-dimensional coordinates (ra,dec,r) or (x,y,z).

+

This is the version of the Rperp metric that TreeCorr used in versions 3.x. +In version 4.0, we switched the definition of \(r_\parallel\) to the one +used by Fisher et al, 1994 (MNRAS, 267, 927). The difference turns out to be +non-trivial in some realistic use cases, so we preserve the ability to use the +old version with this metric.

+

Specifically, if \(r_1\) and \(r_2\) are the two distance from Earth, +then this metric uses \(r_\parallel \equiv r_2-r_1\).

+

The distance is then defined as

+

\(d_{\rm OldRperp} = \sqrt{d_{\rm Euclidean}^2 - r_\parallel^2}\)

+

That is, it breaks up the full 3-d distance into perpendicular and parallel components: +\(d_{\rm 3d}^2 = r_\bot^2 + r_\parallel^2\), +and it identifies the metric separation as just the perpendicular component, \(r_\bot\).

+

Note that this decomposition is really only valid for objects with a relatively small angular +separation, \(\theta\), on the sky, so the two radial vectors are nearly parallel. +In this limit, the formula for \(d\) reduces to

+

\(d_{\rm OldRperp} \approx \left(\sqrt{r_1 r_2}\right) \theta\)

+
+
+

“Rlens”

+

This metric is only valid when the first catalog uses 3-dimensional coordinates +(ra,dec,r) or (x,y,z). The second catalog may take either 3-d coordinates or spherical +coordinates (ra,dec).

+

The distance is defined as

+

\(d_{\rm Rlens} = r_1 \sin(\theta)\)

+

where \(\theta\) is the opening angle between the two objects and \(r_1\) is the +radial distance to the object in the first catalog. +In other words, this is the distance from the first object (nominally the “lens”) to the +line of sight to the second object (nominally the “source”). This is commonly referred to +as the impact parameter of the light path from the source as it passes the lens.

+

Since the basic metric does not use the radial distance to the source galaxies (\(r_2\)), +they are not required. You may just provide (ra,dec) coordinates for the sources. +However, if you want to use the min_rpar or max_rpar options +(see Restrictions on the Line of Sight Separation below), +then the source coordinates need to include r.

+
+
+

“Periodic”

+

This metric is equivalent to the Euclidean metric for either 2-d or 3-d coordinate systems, +except that the space is given periodic boundaries, and the distance between two +points is taken to be the smallest distance in the periodically repeating space. +It is invalid for Spherical coordinates.

+

When constructing the correlation object, you need to set period if the period is the +same in each direction. Or if you want different periods in each direction, you can +set xperiod, yperiod, and (if 3-d) zperiod individually. +We call these periods \(L_x\), \(L_y\), and \(L_z\) below.

+

The distance is defined as

+
+\[\begin{split}dx &= \min \left(|x_2 - x_1|, L_x - |x_2-x_1| \right) \\ +dy &= \min \left(|y_2 - y_1|, L_y - |y_2-y_1| \right) \\ +dz &= \min \left(|z_2 - z_1|, L_z - |z_2-z_1| \right)\end{split}\]
+
+\[d_{\rm Periodic} = \sqrt{dx^2 + dy^2 + dz^2}\]
+

Of course, for 2-dimensional coordinate systems, \(dz = 0\).

+

This metric is particularly relevant for data generated from N-body simuluations, which +often use periodic boundary conditions.

+
+
+

Restrictions on the Line of Sight Separation

+

There are two additional parameters that are tightly connected to the metric space: +min_rpar and max_rpar. +These set the minimum and maximum values of \(r_\parallel\) for pairs to be included in the +correlations.

+

This is most typically relevant for the Rperp or Rlens metrics, but we now (as of version 4.2) +allow these parameters for any metric.

+

The two different Rperp conventions (FisherRperp and OldRperp) have different definitions of +\(r_\parallel\) as described above, which are used in the definition of the metric distances. +These are the same \(r_\parallel\) definitions that are used for the min and max values +if min_rpar and/or max_rpar are given. +For all other metrics, we use the FisherRperp definition for \(r_\parallel\) if needed +for this purpose.

+

The sign of \(r_\parallel\) is defined such that positive values mean +the object from the second catalog is farther away. Thus, if the first catalog represents +lenses and the second catalog represents lensed source galaxies, then setting +min_rpar = 0 will restrict the sources to being in the background of each lens. +Contrariwise, setting max_rpar = 0 will restrict to pairs where the object in the first +catalog is behind the object in the second catalog.

+

Another common use case is to restrict to pairs that are near each other in line of sight distance. +Setting min_rpar = -50, max_rpar = 50 will restrict the pairs to only those that are +separated by no more than 50 Mpc (say, assuming the catalog distances are given in Mpc) along +the radial direction.

+
+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/ng.html b/docs/_build/html/ng.html new file mode 100644 index 00000000..5014b08f --- /dev/null +++ b/docs/_build/html/ng.html @@ -0,0 +1,670 @@ + + + + + + NGCorrelation: Count-shear correlations — TreeCorr 4.3.0 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

NGCorrelation: Count-shear correlations

+
+
+class treecorr.NGCorrelation(config=None, *, logger=None, **kwargs)[source]
+

Bases: treecorr.binnedcorr2.BinnedCorr2

+

This class handles the calculation and storage of a 2-point count-shear correlation +function. This is the tangential shear profile around lenses, commonly referred to as +galaxy-galaxy lensing.

+

Ojects of this class holds the following attributes:

+
+
Attributes
+
    +
  • nbins – The number of bins in logr

  • +
  • bin_size – The size of the bins in logr

  • +
  • min_sep – The minimum separation being considered

  • +
  • max_sep – The maximum separation being considered

  • +
+
+
+

In addition, the following attributes are numpy arrays of length (nbins):

+
+
Attributes
+
    +
  • logr – The nominal center of the bin in log(r) (the natural logarithm of r).

  • +
  • rnom – The nominal center of the bin converted to regular distance. +i.e. r = exp(logr).

  • +
  • meanr – The (weighted) mean value of r for the pairs in each bin. +If there are no pairs in a bin, then exp(logr) will be used instead.

  • +
  • meanlogr – The (weighted) mean value of log(r) for the pairs in each bin. +If there are no pairs in a bin, then logr will be used instead.

  • +
  • xi – The correlation function, \(\xi(r) = \langle \gamma_T\rangle\).

  • +
  • xi_im – The imaginary part of \(\xi(r)\).

  • +
  • varxi – An estimate of the variance of \(\xi\)

  • +
  • weight – The total weight in each bin.

  • +
  • npairs – The number of pairs going into each bin (including pairs where one or +both objects have w=0).

  • +
  • cov – An estimate of the full covariance matrix.

  • +
  • raw_xi – The raw value of xi, uncorrected by an RG calculation. cf. calculateXi

  • +
  • raw_xi_im – The raw value of xi_im, uncorrected by an RG calculation. cf. calculateXi

  • +
  • raw_varxi – The raw value of varxi, uncorrected by an RG calculation. cf. calculateXi

  • +
+
+
+
+

Note

+

The default method for estimating the variance and covariance attributes (varxi, +and cov) is ‘shot’, which only includes the shape noise propagated into +the final correlation. This does not include sample variance, so it is always an +underestimate of the actual variance. To get better estimates, you need to set +var_method to something else and use patches in the input catalog(s). +cf. Covariance Estimates.

+
+

If sep_units are given (either in the config dict or as a named kwarg) then the distances +will all be in these units.

+
+

Note

+

If you separate out the steps of the process command and use process_cross, +then the units will not be applied to meanr or meanlogr until the finalize +function is called.

+
+

The typical usage pattern is as follows:

+
>>> ng = treecorr.NGCorrelation(config)
+>>> ng.process(cat1,cat2)   # Compute the cross-correlation.
+>>> ng.write(file_name)     # Write out to a file.
+>>> xi = gg.xi              # Or access the correlation function directly.
+
+
+
+
Parameters
+
    +
  • config (dict) – A configuration dict that can be used to pass in kwargs if desired. +This dict is allowed to have addition entries besides those listed +in BinnedCorr2, which are ignored here. (default: None)

  • +
  • logger – If desired, a logger object for logging. (default: None, in which case +one will be built according to the config dict’s verbose level.)

  • +
+
+
Keyword Arguments
+

**kwargs – See the documentation for BinnedCorr2 for the list of allowed keyword +arguments, which may be passed either directly or in the config dict.

+
+
+
+
+__eq__(other)[source]
+

Return whether two NGCorrelation instances are equal

+
+ +
+
+__iadd__(other)[source]
+

Add a second NGCorrelation’s data to this one.

+
+

Note

+

For this to make sense, both NGCorrelation objects should not have had finalize +called yet. Then, after adding them together, you should call finalize on the sum.

+
+
+ +
+
+__init__(config=None, *, logger=None, **kwargs)[source]
+

Initialize NGCorrelation. See class doc for details.

+
+ +
+
+__repr__()[source]
+

Return repr(self).

+
+ +
+
+calculateNMap(*, R=None, rg=None, m2_uform=None)[source]
+

Calculate the aperture mass statistics from the correlation function.

+
+\[\begin{split}\langle N M_{ap} \rangle(R) &= \int_{0}^{rmax} \frac{r dr}{R^2} +T_\times\left(\frac{r}{R}\right) \Re\xi(r) \\ +\langle N M_{\times} \rangle(R) &= \int_{0}^{rmax} \frac{r dr}{R^2} +T_\times\left(\frac{r}{R}\right) \Im\xi(r)\end{split}\]
+

The m2_uform parameter sets which definition of the aperture mass to use. +The default is to use ‘Crittenden’.

+

If m2_uform is ‘Crittenden’:

+
+\[\begin{split}U(r) &= \frac{1}{2\pi} (1-r^2) \exp(-r^2/2) \\ +T_\times(s) &= \frac{s^2}{128} (12-s^2) \exp(-s^2/4)\end{split}\]
+

cf. Crittenden, et al (2002): ApJ, 568, 20

+

If m2_uform is ‘Schneider’:

+
+\[\begin{split}U(r) &= \frac{9}{\pi} (1-r^2) (1/3-r^2) \\ +T_\times(s) &= \frac{18}{\pi} s^2 \arccos(s/2) \\ +&\qquad - \frac{3}{40\pi} s^3 \sqrt{4-s^2} (196 - 74s^2 + 14s^4 - s^6)\end{split}\]
+

cf. Schneider, et al (2002): A&A, 389, 729

+

In neither case is this formula in the above papers, but the derivation is similar +to the derivations of \(T_+\) and \(T_-\) in Schneider et al. (2002).

+
+
Parameters
+
    +
  • R (array) – The R values at which to calculate the aperture mass statistics. +(default: None, which means use self.rnom)

  • +
  • rg (NGCorrelation) – The cross-correlation using random locations as the lenses +(RG), if desired. (default: None)

  • +
  • m2_uform (str) – Which form to use for the aperture mass, as described above. +(default: ‘Crittenden’; this value can also be given in the +constructor in the config dict.)

  • +
+
+
Returns
+

Tuple containing

+
+
    +
  • nmap = array of \(\langle N M_{ap} \rangle(R)\)

  • +
  • nmx = array of \(\langle N M_{\times} \rangle(R)\)

  • +
  • varnmap = array of variance estimates of the above values

  • +
+
+

+
+
+
+ +
+
+calculateXi(*, rg=None)[source]
+

Calculate the correlation function possibly given another correlation function +that uses random points for the foreground objects.

+
    +
  • If rg is None, the simple correlation function \(\langle \gamma_T\rangle\) is +returned.

  • +
  • If rg is not None, then a compensated calculation is done: +\(\langle \gamma_T\rangle = (DG - RG)\), where DG represents the mean shear +around the lenses and RG represents the mean shear around random points.

  • +
+

After calling this function, the attributes xi, xi_im, varxi, and cov will +correspond to the compensated values (if rg is provided). The raw, uncompensated values +are available as rawxi, raw_xi_im, and raw_varxi.

+
+
Parameters
+

rg (NGCorrelation) – The cross-correlation using random locations as the lenses +(RG), if desired. (default: None)

+
+
Returns
+

Tuple containing

+
+
    +
  • xi = array of the real part of \(\xi(R)\)

  • +
  • xi_im = array of the imaginary part of \(\xi(R)\)

  • +
  • varxi = array of the variance estimates of the above values

  • +
+
+

+
+
+
+ +
+
+copy()[source]
+

Make a copy

+
+ +
+
+finalize(varg)[source]
+

Finalize the calculation of the correlation function.

+

The process_cross command accumulates values in each bin, so it can be called +multiple times if appropriate. Afterwards, this command finishes the calculation +by dividing each column by the total weight.

+
+
Parameters
+

varg (float) – The shear variance per component for the second field.

+
+
+
+ +
+
+process(cat1, cat2, *, metric=None, num_threads=None, comm=None, low_mem=False, initialize=True, finalize=True)[source]
+

Compute the correlation function.

+

Both arguments may be lists, in which case all items in the list are used +for that element of the correlation.

+
+
Parameters
+
    +
  • cat1 (Catalog) – A catalog or list of catalogs for the N field.

  • +
  • cat2 (Catalog) – A catalog or list of catalogs for the G field.

  • +
  • metric (str) – Which metric to use. See Metrics for details. +(default: ‘Euclidean’; this value can also be given in the +constructor in the config dict.)

  • +
  • num_threads (int) – How many OpenMP threads to use during the calculation. +(default: use the number of cpu cores; this value can also be given +in the constructor in the config dict.)

  • +
  • comm (mpi4py.Comm) – If running MPI, an mpi4py Comm object to communicate between +processes. If used, the rank=0 process will have the final +computation. This only works if using patches. (default: None)

  • +
  • low_mem (bool) – Whether to sacrifice a little speed to try to reduce memory usage. +This only works if using patches. (default: False)

  • +
  • initialize (bool) – Whether to begin the calculation with a call to +BinnedCorr2.clear. (default: True)

  • +
  • finalize (bool) – Whether to complete the calculation with a call to finalize. +(default: True)

  • +
+
+
+
+ +
+
+process_cross(cat1, cat2, *, metric=None, num_threads=None)[source]
+

Process a single pair of catalogs, accumulating the cross-correlation.

+

This accumulates the weighted sums into the bins, but does not finalize +the calculation by dividing by the total weight at the end. After +calling this function as often as desired, the finalize command will +finish the calculation.

+
+
Parameters
+
    +
  • cat1 (Catalog) – The first catalog to process

  • +
  • cat2 (Catalog) – The second catalog to process

  • +
  • metric (str) – Which metric to use. See Metrics for details. +(default: ‘Euclidean’; this value can also be given in the +constructor in the config dict.)

  • +
  • num_threads (int) – How many OpenMP threads to use during the calculation. +(default: use the number of cpu cores; this value can also be given +in the constructor in the config dict.)

  • +
+
+
+
+ +
+
+process_pairwise(cat1, cat2, *, metric=None, num_threads=None)[source]
+

Process a single pair of catalogs, accumulating the cross-correlation, only using +the corresponding pairs of objects in each catalog.

+

This accumulates the weighted sums into the bins, but does not finalize +the calculation by dividing by the total weight at the end. After +calling this function as often as desired, the finalize command will +finish the calculation.

+
+

Warning

+
+

Deprecated since version 4.1: This function is deprecated and slated to be removed. +If you have a need for it, please open an issue to describe your use case.

+
+
+
+
Parameters
+
    +
  • cat1 (Catalog) – The first catalog to process

  • +
  • cat2 (Catalog) – The second catalog to process

  • +
  • metric (str) – Which metric to use. See Metrics for details. +(default: ‘Euclidean’; this value can also be given in the +constructor in the config dict.)

  • +
  • num_threads (int) – How many OpenMP threads to use during the calculation. +(default: use the number of cpu cores; this value can also be given +in the constructor in the config dict.)

  • +
+
+
+
+ +
+
+read(file_name, *, file_type=None)[source]
+

Read in values from a file.

+

This should be a file that was written by TreeCorr, preferably a FITS file, so there +is no loss of information.

+
+

Warning

+

The NGCorrelation object should be constructed with the same configuration +parameters as the one being read. e.g. the same min_sep, max_sep, etc. This is not +checked by the read function.

+
+
+
Parameters
+
    +
  • file_name (str) – The name of the file to read in.

  • +
  • file_type (str) – The type of file (‘ASCII’ or ‘FITS’). (default: determine the type +automatically from the extension of file_name.)

  • +
+
+
+
+ +
+
+write(file_name, *, rg=None, file_type=None, precision=None, write_patch_results=False)[source]
+

Write the correlation function to the file, file_name.

+
    +
  • If rg is None, the simple correlation function \(\langle \gamma_T\rangle\) is used.

  • +
  • If rg is not None, then a compensated calculation is done: +\(\langle \gamma_T\rangle = (DG - RG)\), where DG represents the mean shear +around the lenses and RG represents the mean shear around random points.

  • +
+

The output file will include the following columns:

+ ++++ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

Column

Description

r_nom

The nominal center of the bin in r

meanr

The mean value \(\langle r \rangle\) of pairs that fell +into each bin

meanlogr

The mean value \(\langle \log(r) \rangle\) of pairs that +fell into each bin

gamT

The real part of the mean tangential shear, +\(\langle \gamma_T \rangle(r)\)

gamX

The imag part of the mean tangential shear, +\(\langle \gamma_\times \rangle(r)\)

sigma

The sqrt of the variance estimate of either of these

weight

The total weight contributing to each bin

npairs

The total number of pairs in each bin

+

If sep_units was given at construction, then the distances will all be in these units. +Otherwise, they will be in either the same units as x,y,z (for flat or 3d coordinates) or +radians (for spherical coordinates).

+
+
Parameters
+
    +
  • file_name (str) – The name of the file to write to.

  • +
  • rg (NGCorrelation) – The cross-correlation using random locations as the lenses +(RG), if desired. (default: None)

  • +
  • file_type (str) – The type of file to write (‘ASCII’ or ‘FITS’). (default: determine +the type automatically from the extension of file_name.)

  • +
  • precision (int) – For ASCII output catalogs, the desired precision. (default: 4; +this value can also be given in the constructor in the config dict.)

  • +
  • write_patch_results (bool) – Whether to write the patch-based results as well. +(default: False)

  • +
+
+
+
+ +
+
+writeNMap(file_name, *, R=None, rg=None, m2_uform=None, file_type=None, precision=None)[source]
+

Write the cross correlation of the foreground galaxy counts with the aperture mass +based on the correlation function to the file, file_name.

+

If rg is provided, the compensated calculation will be used for \(\xi\).

+

See calculateNMap for an explanation of the m2_uform parameter.

+

The output file will include the following columns:

+ ++++ + + + + + + + + + + + + + + + + + + + +

Column

Description

R

The radius of the aperture.

NMap

An estimate of \(\langle N_{ap} M_{ap} \rangle(R)\)

NMx

An estimate of \(\langle N_{ap} M_\times \rangle(R)\)

sig_nmap

The sqrt of the variance estimate of either of these

+
+
Parameters
+
    +
  • file_name (str) – The name of the file to write to.

  • +
  • R (array) – The R values at which to calculate the aperture mass statistics. +(default: None, which means use self.rnom)

  • +
  • rg (NGCorrelation) – The cross-correlation using random locations as the lenses +(RG), if desired. (default: None)

  • +
  • m2_uform (str) – Which form to use for the aperture mass. (default: ‘Crittenden’; +this value can also be given in the constructor in the config dict.)

  • +
  • file_type (str) – The type of file to write (‘ASCII’ or ‘FITS’). (default: determine +the type automatically from the extension of file_name.)

  • +
  • precision (int) – For ASCII output catalogs, the desired precision. (default: 4; +this value can also be given in the constructor in the config dict.)

  • +
+
+
+
+ +
+
+writeNorm(file_name, *, gg, dd, rr, R=None, dr=None, rg=None, m2_uform=None, file_type=None, precision=None)[source]
+

Write the normalized aperture mass cross-correlation to the file, file_name.

+

The combination \(\langle N M_{ap}\rangle^2 / \langle M_{ap}^2\rangle +\langle N_{ap}^2\rangle\) is related to \(r\), the galaxy-mass correlation +coefficient. Similarly, \(\langle N_{ap}^2\rangle / \langle M_{ap}^2\rangle\) +is related to \(b\), the galaxy bias parameter. cf. Hoekstra et al, 2002: +http://adsabs.harvard.edu/abs/2002ApJ…577..604H

+

This function computes these combinations and outputs them to a file.

+
    +
  • if rg is provided, the compensated calculation will be used for +\(\langle N_{ap} M_{ap} \rangle\).

  • +
  • if dr is provided, the compensated calculation will be used for +\(\langle N_{ap}^2 \rangle\).

  • +
+

See calculateNMap for an explanation of the m2_uform parameter.

+

The output file will include the following columns:

+ ++++ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

Column

Description

R

The radius of the aperture

NMap

An estimate of \(\langle N_{ap} M_{ap} \rangle(R)\)

NMx

An estimate of \(\langle N_{ap} M_\times \rangle(R)\)

sig_nmap

The sqrt of the variance estimate of either of these

Napsq

An estimate of \(\langle N_{ap}^2 \rangle(R)\)

sig_napsq

The sqrt of the variance estimate of \(\langle N_{ap}^2 \rangle\)

Mapsq

An estimate of \(\langle M_{ap}^2 \rangle(R)\)

sig_mapsq

The sqrt of the variance estimate of \(\langle M_{ap}^2 \rangle\)

NMap_norm

The ratio \(\langle N_{ap} M_{ap} \rangle^2 /\) +\(\langle N_{ap}^2 \rangle \langle M_{ap}^2 \rangle\)

sig_norm

The sqrt of the variance estimate of this ratio

Nsq_Mapsq

The ratio \(\langle N_{ap}^2 \rangle / \langle M_{ap}^2 \rangle\)

sig_nn_mm

The sqrt of the variance estimate of this ratio

+
+
Parameters
+
    +
  • file_name (str) – The name of the file to write to.

  • +
  • gg (GGCorrelation) – The auto-correlation of the shear field

  • +
  • dd (NNCorrelation) – The auto-correlation of the lens counts (DD)

  • +
  • rr (NNCorrelation) – The auto-correlation of the random field (RR)

  • +
  • R (array) – The R values at which to calculate the aperture mass statistics. +(default: None, which means use self.rnom)

  • +
  • dr (NNCorrelation) – The cross-correlation of the data with randoms (DR), if +desired, in which case the Landy-Szalay estimator will be +calculated. (default: None)

  • +
  • rd (NNCorrelation) – The cross-correlation of the randoms with data (RD), if +desired. (default: None, which means use rd=dr)

  • +
  • rg (NGCorrelation) – The cross-correlation using random locations as the lenses +(RG), if desired. (default: None)

  • +
  • m2_uform (str) – Which form to use for the aperture mass. (default: ‘Crittenden’; +this value can also be given in the constructor in the config dict.)

  • +
  • file_type (str) – The type of file to write (‘ASCII’ or ‘FITS’). (default: determine +the type automatically from the extension of file_name.)

  • +
  • precision (int) – For ASCII output catalogs, the desired precision. (default: 4; +this value can also be given in the constructor in the config dict.)

  • +
+
+
+
+ +
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/nk.html b/docs/_build/html/nk.html new file mode 100644 index 00000000..72486875 --- /dev/null +++ b/docs/_build/html/nk.html @@ -0,0 +1,473 @@ + + + + + + NKCorrelation: Count-kappa correlations — TreeCorr 4.3.0 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

NKCorrelation: Count-kappa correlations

+
+
+class treecorr.NKCorrelation(config=None, *, logger=None, **kwargs)[source]
+

Bases: treecorr.binnedcorr2.BinnedCorr2

+

This class handles the calculation and storage of a 2-point count-kappa correlation +function.

+
+

Note

+

While we use the term kappa (\(\kappa\)) here and the letter K in various places, +in fact any scalar field will work here. For example, you can use this to compute +correlations of non-shear quantities, e.g. the sizes or concentrations of galaxies, around +a set of lenses, where “kappa” would be the measurements of these quantities.

+
+

Ojects of this class holds the following attributes:

+
+
Attributes
+
    +
  • nbins – The number of bins in logr

  • +
  • bin_size – The size of the bins in logr

  • +
  • min_sep – The minimum separation being considered

  • +
  • max_sep – The maximum separation being considered

  • +
+
+
+

In addition, the following attributes are numpy arrays of length (nbins):

+
+
Attributes
+
    +
  • logr – The nominal center of the bin in log(r) (the natural logarithm of r).

  • +
  • rnom – The nominal center of the bin converted to regular distance. +i.e. r = exp(logr).

  • +
  • meanr – The (weighted) mean value of r for the pairs in each bin. +If there are no pairs in a bin, then exp(logr) will be used instead.

  • +
  • meanlogr – The (weighted) mean value of log(r) for the pairs in each bin. +If there are no pairs in a bin, then logr will be used instead.

  • +
  • xi – The correlation function, \(\xi(r) = \langle \kappa\rangle\).

  • +
  • varxi – An estimate of the variance of \(\xi\)

  • +
  • weight – The total weight in each bin.

  • +
  • npairs – The number of pairs going into each bin (including pairs where one or +both objects have w=0).

  • +
  • cov – An estimate of the full covariance matrix.

  • +
  • raw_xi – The raw value of xi, uncorrected by an RK calculation. cf. calculateXi

  • +
  • raw_varxi – The raw value of varxi, uncorrected by an RK calculation. cf. calculateXi

  • +
+
+
+
+

Note

+

The default method for estimating the variance and covariance attributes (varxi, +and cov) is ‘shot’, which only includes the shape noise propagated into +the final correlation. This does not include sample variance, so it is always an +underestimate of the actual variance. To get better estimates, you need to set +var_method to something else and use patches in the input catalog(s). +cf. Covariance Estimates.

+
+

If sep_units are given (either in the config dict or as a named kwarg) then the distances +will all be in these units.

+
+

Note

+

If you separate out the steps of the process command and use process_cross, +then the units will not be applied to meanr or meanlogr until the finalize +function is called.

+
+

The typical usage pattern is as follows:

+
>>> nk = treecorr.NKCorrelation(config)
+>>> nk.process(cat1,cat2)   # Compute the cross-correlation function.
+>>> nk.write(file_name)     # Write out to a file.
+>>> xi = nk.xi              # Or access the correlation function directly.
+
+
+
+
Parameters
+
    +
  • config (dict) – A configuration dict that can be used to pass in kwargs if desired. +This dict is allowed to have addition entries besides those listed +in BinnedCorr2, which are ignored here. (default: None)

  • +
  • logger – If desired, a logger object for logging. (default: None, in which case +one will be built according to the config dict’s verbose level.)

  • +
+
+
Keyword Arguments
+

**kwargs – See the documentation for BinnedCorr2 for the list of allowed keyword +arguments, which may be passed either directly or in the config dict.

+
+
+
+
+__eq__(other)[source]
+

Return whether two NKCorrelation instances are equal

+
+ +
+
+__iadd__(other)[source]
+

Add a second NKCorrelation’s data to this one.

+
+

Note

+

For this to make sense, both NKCorrelation objects should not have had finalize +called yet. Then, after adding them together, you should call finalize on the sum.

+
+
+ +
+
+__init__(config=None, *, logger=None, **kwargs)[source]
+

Initialize NKCorrelation. See class doc for details.

+
+ +
+
+__repr__()[source]
+

Return repr(self).

+
+ +
+
+calculateXi(*, rk=None)[source]
+

Calculate the correlation function possibly given another correlation function +that uses random points for the foreground objects.

+
    +
  • If rk is None, the simple correlation function \(\langle \kappa \rangle\) is +returned.

  • +
  • If rk is not None, then a compensated calculation is done: +\(\langle \kappa \rangle = (DK - RK)\), where DK represents the mean kappa +around the lenses and RK represents the mean kappa around random points.

  • +
+

After calling this function, the attributes xi, varxi and cov will correspond +to the compensated values (if rk is provided). The raw, uncompensated values are +available as rawxi and raw_varxi.

+
+
Parameters
+

rk (NKCorrelation) – The cross-correlation using random locations as the lenses (RK), +if desired. (default: None)

+
+
Returns
+

Tuple containing

+
+
    +
  • xi = array of \(\xi(r)\)

  • +
  • varxi = array of variance estimates of \(\xi(r)\)

  • +
+
+

+
+
+
+ +
+
+copy()[source]
+

Make a copy

+
+ +
+
+finalize(vark)[source]
+

Finalize the calculation of the correlation function.

+

The process_cross command accumulates values in each bin, so it can be called +multiple times if appropriate. Afterwards, this command finishes the calculation +by dividing each column by the total weight.

+
+
Parameters
+

vark – The kappa variance for the second field.

+
+
+
+ +
+
+process(cat1, cat2, *, metric=None, num_threads=None, comm=None, low_mem=False, initialize=True, finalize=True)[source]
+

Compute the correlation function.

+

Both arguments may be lists, in which case all items in the list are used +for that element of the correlation.

+
+
Parameters
+
    +
  • cat1 (Catalog) – A catalog or list of catalogs for the N field.

  • +
  • cat2 (Catalog) – A catalog or list of catalogs for the K field.

  • +
  • metric (str) – Which metric to use. See Metrics for details. +(default: ‘Euclidean’; this value can also be given in the +constructor in the config dict.)

  • +
  • num_threads (int) – How many OpenMP threads to use during the calculation. +(default: use the number of cpu cores; this value can also be given +in the constructor in the config dict.)

  • +
  • comm (mpi4py.Comm) – If running MPI, an mpi4py Comm object to communicate between +processes. If used, the rank=0 process will have the final +computation. This only works if using patches. (default: None)

  • +
  • low_mem (bool) – Whether to sacrifice a little speed to try to reduce memory usage. +This only works if using patches. (default: False)

  • +
  • initialize (bool) – Whether to begin the calculation with a call to +BinnedCorr2.clear. (default: True)

  • +
  • finalize (bool) – Whether to complete the calculation with a call to finalize. +(default: True)

  • +
+
+
+
+ +
+
+process_cross(cat1, cat2, *, metric=None, num_threads=None)[source]
+

Process a single pair of catalogs, accumulating the cross-correlation.

+

This accumulates the weighted sums into the bins, but does not finalize +the calculation by dividing by the total weight at the end. After +calling this function as often as desired, the finalize command will +finish the calculation.

+
+
Parameters
+
    +
  • cat1 (Catalog) – The first catalog to process

  • +
  • cat2 (Catalog) – The second catalog to process

  • +
  • metric (str) – Which metric to use. See Metrics for details. +(default: ‘Euclidean’; this value can also be given in the +constructor in the config dict.)

  • +
  • num_threads (int) – How many OpenMP threads to use during the calculation. +(default: use the number of cpu cores; this value can also be given +in the constructor in the config dict.)

  • +
+
+
+
+ +
+
+process_pairwise(cat1, cat2, *, metric=None, num_threads=None)[source]
+

Process a single pair of catalogs, accumulating the cross-correlation, only using +the corresponding pairs of objects in each catalog.

+

This accumulates the weighted sums into the bins, but does not finalize +the calculation by dividing by the total weight at the end. After +calling this function as often as desired, the finalize command will +finish the calculation.

+
+

Warning

+
+

Deprecated since version 4.1: This function is deprecated and slated to be removed. +If you have a need for it, please open an issue to describe your use case.

+
+
+
+
Parameters
+
    +
  • cat1 (Catalog) – The first catalog to process

  • +
  • cat2 (Catalog) – The second catalog to process

  • +
  • metric (str) – Which metric to use. See Metrics for details. +(default: ‘Euclidean’; this value can also be given in the +constructor in the config dict.)

  • +
  • num_threads (int) – How many OpenMP threads to use during the calculation. +(default: use the number of cpu cores; this value can also be given +in the constructor in the config dict.)

  • +
+
+
+
+ +
+
+read(file_name, *, file_type=None)[source]
+

Read in values from a file.

+

This should be a file that was written by TreeCorr, preferably a FITS file, so there +is no loss of information.

+
+

Warning

+

The NKCorrelation object should be constructed with the same configuration +parameters as the one being read. e.g. the same min_sep, max_sep, etc. This is not +checked by the read function.

+
+
+
Parameters
+
    +
  • file_name (str) – The name of the file to read in.

  • +
  • file_type (str) – The type of file (‘ASCII’ or ‘FITS’). (default: determine the type +automatically from the extension of file_name.)

  • +
+
+
+
+ +
+
+write(file_name, *, rk=None, file_type=None, precision=None, write_patch_results=False)[source]
+

Write the correlation function to the file, file_name.

+
    +
  • If rk is None, the simple correlation function \(\langle \kappa \rangle(R)\) is +used.

  • +
  • If rk is not None, then a compensated calculation is done: +\(\langle \kappa \rangle = (DK - RK)\), where DK represents the mean kappa +around the lenses and RK represents the mean kappa around random points.

  • +
+

The output file will include the following columns:

+ ++++ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

Column

Description

r_nom

The nominal center of the bin in r

meanr

The mean value \(\langle r\rangle\) of pairs that +fell into each bin

meanlogr

The mean value \(\langle \log(r)\rangle\) of pairs +that fell into each bin

kappa

The mean value \(\langle \kappa\rangle(r)\)

sigma

The sqrt of the variance estimate of +\(\langle \kappa\rangle\)

weight

The total weight contributing to each bin

npairs

The total number of pairs in each bin

+

If sep_units was given at construction, then the distances will all be in these units. +Otherwise, they will be in either the same units as x,y,z (for flat or 3d coordinates) or +radians (for spherical coordinates).

+
+
Parameters
+
    +
  • file_name (str) – The name of the file to write to.

  • +
  • rk (NKCorrelation) – The cross-correlation using random locations as the lenses (RK), +if desired. (default: None)

  • +
  • file_type (str) – The type of file to write (‘ASCII’ or ‘FITS’). (default: determine +the type automatically from the extension of file_name.)

  • +
  • precision (int) – For ASCII output catalogs, the desired precision. (default: 4; +this value can also be given in the constructor in the config dict.)

  • +
  • write_patch_results (bool) – Whether to write the patch-based results as well. +(default: False)

  • +
+
+
+
+ +
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/nn.html b/docs/_build/html/nn.html new file mode 100644 index 00000000..a7798385 --- /dev/null +++ b/docs/_build/html/nn.html @@ -0,0 +1,584 @@ + + + + + + NNCorrelation: Count-count correlations — TreeCorr 4.3.0 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

NNCorrelation: Count-count correlations

+
+
+class treecorr.NNCorrelation(config=None, *, logger=None, **kwargs)[source]
+

Bases: treecorr.binnedcorr2.BinnedCorr2

+

This class handles the calculation and storage of a 2-point count-count correlation +function. i.e. the regular density correlation function.

+

Ojects of this class holds the following attributes:

+
+
Attributes
+
    +
  • nbins – The number of bins in logr

  • +
  • bin_size – The size of the bins in logr

  • +
  • min_sep – The minimum separation being considered

  • +
  • max_sep – The maximum separation being considered

  • +
+
+
+

In addition, the following attributes are numpy arrays of length (nbins):

+
+
Attributes
+
    +
  • logr – The nominal center of the bin in log(r) (the natural logarithm of r).

  • +
  • rnom – The nominal center of the bin converted to regular distance. +i.e. r = exp(logr).

  • +
  • meanr – The (weighted) mean value of r for the pairs in each bin. +If there are no pairs in a bin, then exp(logr) will be used instead.

  • +
  • meanlogr – The mean value of log(r) for the pairs in each bin. +If there are no pairs in a bin, then logr will be used instead.

  • +
  • weight – The total weight in each bin.

  • +
  • npairs – The number of pairs going into each bin (including pairs where one or +both objects have w=0).

  • +
  • tot – The total number of pairs processed, which is used to normalize +the randoms if they have a different number of pairs.

  • +
+
+
+

If calculateXi has been called, then the following will also be available:

+
+
Attributes
+
    +
  • xi – The correlation function, \(\xi(r)\)

  • +
  • varxi – An estimate of the variance of \(\xi\)

  • +
  • cov – An estimate of the full covariance matrix.

  • +
+
+
+

If sep_units are given (either in the config dict or as a named kwarg) then the distances +will all be in these units.

+
+

Note

+

If you separate out the steps of the process command and use process_auto and/or +process_cross, then the units will not be applied to meanr or meanlogr until +the finalize function is called.

+
+

The typical usage pattern is as follows:

+
>>> nn = treecorr.NNCorrelation(config)
+>>> nn.process(cat)         # For auto-correlation.
+>>> nn.process(cat1,cat2)   # For cross-correlation.
+>>> rr.process...           # Likewise for random-random correlations
+>>> dr.process...           # If desired, also do data-random correlations
+>>> rd.process...           # For cross-correlations, also do the reverse.
+>>> nn.write(file_name,rr=rr,dr=dr,rd=rd)         # Write out to a file.
+>>> xi,varxi = nn.calculateXi(rr=rr,dr=dr,rd=rd)  # Or get correlation function directly.
+
+
+
+
Parameters
+
    +
  • config (dict) – A configuration dict that can be used to pass in kwargs if desired. +This dict is allowed to have addition entries besides those listed +in BinnedCorr2, which are ignored here. (default: None)

  • +
  • logger – If desired, a logger object for logging. (default: None, in which case +one will be built according to the config dict’s verbose level.)

  • +
+
+
Keyword Arguments
+

**kwargs – See the documentation for BinnedCorr2 for the list of allowed keyword +arguments, which may be passed either directly or in the config dict.

+
+
+
+
+__eq__(other)[source]
+

Return whether two NNCorrelation instances are equal

+
+ +
+
+__iadd__(other)[source]
+

Add a second NNCorrelation’s data to this one.

+
+

Note

+

For this to make sense, both NNCorrelation objects should not have had finalize +called yet. Then, after adding them together, you should call finalize on the sum.

+
+
+ +
+
+__init__(config=None, *, logger=None, **kwargs)[source]
+

Initialize NNCorrelation. See class doc for details.

+
+ +
+
+__repr__()[source]
+

Return repr(self).

+
+ +
+
+calculateNapSq(*, rr, R=None, dr=None, rd=None, m2_uform=None)[source]
+

Calculate the corrollary to the aperture mass statistics for counts.

+
+\[\begin{split}\langle N_{ap}^2 \rangle(R) &= \int_{0}^{rmax} \frac{r dr}{2R^2} +\left [ T_+\left(\frac{r}{R}\right) \xi(r) \right] \\\end{split}\]
+

The m2_uform parameter sets which definition of the aperture mass to use. +The default is to use ‘Crittenden’.

+

If m2_uform is ‘Crittenden’:

+
+\[\begin{split}U(r) &= \frac{1}{2\pi} (1-r^2) \exp(-r^2/2) \\ +T_+(s) &= \frac{s^4 - 16s^2 + 32}{128} \exp(-s^2/4) \\ +rmax &= \infty\end{split}\]
+

cf. Crittenden, et al (2002): ApJ, 568, 20

+

If m2_uform is ‘Schneider’:

+
+\[\begin{split}U(r) &= \frac{9}{\pi} (1-r^2) (1/3-r^2) \\ +T_+(s) &= \frac{12}{5\pi} (2-15s^2) \arccos(s/2) \\ +&\qquad + \frac{1}{100\pi} s \sqrt{4-s^2} (120 + 2320s^2 - 754s^4 + 132s^6 - 9s^8) \\ +rmax &= 2R\end{split}\]
+

cf. Schneider, et al (2002): A&A, 389, 729

+

This is used by NGCorrelation.writeNorm. See that function and also +GGCorrelation.calculateMapSq for more details.

+
+
Parameters
+
    +
  • rr (NNCorrelation) – The auto-correlation of the random field (RR)

  • +
  • R (array) – The R values at which to calculate the aperture mass statistics. +(default: None, which means use self.rnom)

  • +
  • dr (NNCorrelation) – The cross-correlation of the data with randoms (DR), if +desired. (default: None)

  • +
  • rd (NNCorrelation) – The cross-correlation of the randoms with data (RD), if +desired. (default: None, which means use rd=dr)

  • +
  • m2_uform (str) – Which form to use for the aperture mass. (default: ‘Crittenden’; +this value can also be given in the constructor in the config dict.)

  • +
+
+
Returns
+

Tuple containing

+
+
    +
  • nsq = array of \(\langle N_{ap}^2 \rangle(R)\)

  • +
  • varnsq = array of variance estimates of this value

  • +
+
+

+
+
+
+ +
+
+calculateXi(*, rr, dr=None, rd=None)[source]
+

Calculate the correlation function given another correlation function of random +points using the same mask, and possibly cross correlations of the data and random.

+

The rr value is the NNCorrelation function for random points. +For a signal that involves a cross correlations, there should be two random +cross-correlations: data-random and random-data, given as dr and rd.

+
    +
  • If dr is None, the simple correlation function \(\xi = (DD/RR - 1)\) is used.

  • +
  • if dr is given and rd is None, then \(\xi = (DD - 2DR + RR)/RR\) is used.

  • +
  • If dr and rd are both given, then \(\xi = (DD - DR - RD + RR)/RR\) is used.

  • +
+

where DD is the data NN correlation function, which is the current object.

+
+

Note

+

The default method for estimating the variance is ‘shot’, which only includes the +shot noise propagated into the final correlation. This does not include sample +variance, so it is always an underestimate of the actual variance. To get better +estimates, you need to set var_method to something else and use patches in the +input catalog(s). cf. Covariance Estimates.

+
+

After calling this method, you can use the BinnedCorr2.estimate_cov method or use this +correlation object in the estimate_multi_cov function. Also, the calculated xi and +varxi returned from this function will be available as attributes.

+
+
Parameters
+
    +
  • rr (NNCorrelation) – The auto-correlation of the random field (RR)

  • +
  • dr (NNCorrelation) – The cross-correlation of the data with randoms (DR), if +desired, in which case the Landy-Szalay estimator will be +calculated. (default: None)

  • +
  • rd (NNCorrelation) – The cross-correlation of the randoms with data (RD), if +desired. (default: None, which means use rd=dr)

  • +
+
+
Returns
+

    +
  • xi = array of \(\xi(r)\)

  • +
  • varxi = an estimate of the variance of \(\xi(r)\)

  • +
+

+
+
Return type
+

Tuple containing

+
+
+
+ +
+
+copy()[source]
+

Make a copy

+
+ +
+
+finalize()[source]
+

Finalize the calculation of the correlation function.

+

The process_auto and process_cross commands accumulate values in each bin, +so they can be called multiple times if appropriate. Afterwards, this command +finishes the calculation of meanr, meanlogr by dividing by the total weight.

+
+ +
+
+getStat()[source]
+

The standard statistic for the current correlation object as a 1-d array.

+

This raises a RuntimeError if calculateXi has not been run yet.

+
+ +
+
+getWeight()[source]
+

The weight array for the current correlation object as a 1-d array.

+

This is the weight array corresponding to getStat. In this case, it is the denominator +RR from the calculation done by calculateXi().

+
+ +
+
+process(cat1, cat2=None, *, metric=None, num_threads=None, comm=None, low_mem=False, initialize=True, finalize=True)[source]
+

Compute the correlation function.

+
    +
  • If only 1 argument is given, then compute an auto-correlation function.

  • +
  • If 2 arguments are given, then compute a cross-correlation function.

  • +
+

Both arguments may be lists, in which case all items in the list are used +for that element of the correlation.

+
+
Parameters
+
    +
  • cat1 (Catalog) – A catalog or list of catalogs for the first N field.

  • +
  • cat2 (Catalog) – A catalog or list of catalogs for the second N field, if any. +(default: None)

  • +
  • metric (str) – Which metric to use. See Metrics for details. +(default: ‘Euclidean’; this value can also be given in the +constructor in the config dict.)

  • +
  • num_threads (int) – How many OpenMP threads to use during the calculation. +(default: use the number of cpu cores; this value can also be given +in the constructor in the config dict.)

  • +
  • comm (mpi4py.Comm) – If running MPI, an mpi4py Comm object to communicate between +processes. If used, the rank=0 process will have the final +computation. This only works if using patches. (default: None)

  • +
  • low_mem (bool) – Whether to sacrifice a little speed to try to reduce memory usage. +This only works if using patches. (default: False)

  • +
  • initialize (bool) – Whether to begin the calculation with a call to +BinnedCorr2.clear. (default: True)

  • +
  • finalize (bool) – Whether to complete the calculation with a call to finalize. +(default: True)

  • +
+
+
+
+ +
+
+process_auto(cat, *, metric=None, num_threads=None)[source]
+

Process a single catalog, accumulating the auto-correlation.

+

This accumulates the auto-correlation for the given catalog. After +calling this function as often as desired, the finalize command will +finish the calculation of meanr, meanlogr.

+
+
Parameters
+
    +
  • cat (Catalog) – The catalog to process

  • +
  • metric (str) – Which metric to use. See Metrics for details. +(default: ‘Euclidean’; this value can also be given in the +constructor in the config dict.)

  • +
  • num_threads (int) – How many OpenMP threads to use during the calculation. +(default: use the number of cpu cores; this value can also be given +in the constructor in the config dict.)

  • +
+
+
+
+ +
+
+process_cross(cat1, cat2, *, metric=None, num_threads=None)[source]
+

Process a single pair of catalogs, accumulating the cross-correlation.

+

This accumulates the cross-correlation for the given catalogs. After +calling this function as often as desired, the finalize command will +finish the calculation of meanr, meanlogr.

+
+
Parameters
+
    +
  • cat1 (Catalog) – The first catalog to process

  • +
  • cat2 (Catalog) – The second catalog to process

  • +
  • metric (str) – Which metric to use. See Metrics for details. +(default: ‘Euclidean’; this value can also be given in the +constructor in the config dict.)

  • +
  • num_threads (int) – How many OpenMP threads to use during the calculation. +(default: use the number of cpu cores; this value can also be given +in the constructor in the config dict.)

  • +
+
+
+
+ +
+
+process_pairwise(cat1, cat2, *, metric=None, num_threads=None)[source]
+

Process a single pair of catalogs, accumulating the cross-correlation, only using +the corresponding pairs of objects in each catalog.

+

This accumulates the sums into the bins, but does not finalize the calculation. +After calling this function as often as desired, the finalize command will +finish the calculation.

+
+

Warning

+
+

Deprecated since version 4.1: This function is deprecated and slated to be removed. +If you have a need for it, please open an issue to describe your use case.

+
+
+
+
Parameters
+
    +
  • cat1 (Catalog) – The first catalog to process

  • +
  • cat2 (Catalog) – The second catalog to process

  • +
  • metric (str) – Which metric to use. See Metrics for details. +(default: ‘Euclidean’; this value can also be given in the +constructor in the config dict.)

  • +
  • num_threads (int) – How many OpenMP threads to use during the calculation. +(default: use the number of cpu cores; this value can also be given +in the constructor in the config dict.)

  • +
+
+
+
+ +
+
+read(file_name, *, file_type=None)[source]
+

Read in values from a file.

+

This should be a file that was written by TreeCorr, preferably a FITS file, so there +is no loss of information.

+
+

Warning

+

The NNCorrelation object should be constructed with the same configuration +parameters as the one being read. e.g. the same min_sep, max_sep, etc. This is not +checked by the read function.

+
+
+
Parameters
+
    +
  • file_name (str) – The name of the file to read in.

  • +
  • file_type (str) – The type of file (‘ASCII’ or ‘FITS’). (default: determine the type +automatically from the extension of file_name.)

  • +
+
+
+
+ +
+
+write(file_name, *, rr=None, dr=None, rd=None, file_type=None, precision=None, write_patch_results=False)[source]
+

Write the correlation function to the file, file_name.

+

rr is the NNCorrelation function for random points. +If dr is None, the simple correlation function \(\xi = (DD - RR)/RR\) is used. +if dr is given and rd is None, then \(\xi = (DD - 2DR + RR)/RR\) is used. +If dr and rd are both given, then \(\xi = (DD - DR - RD + RR)/RR\) is used.

+

Normally, at least rr should be provided, but if this is also None, then only the +basic accumulated number of pairs are output (along with the separation columns).

+

The output file will include the following columns:

+ ++++ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

Column

Description

r_nom

The nominal center of the bin in r

meanr

The mean value \(\langle r\rangle\) of pairs that fell +into each bin

meanlogr

The mean value \(\langle \log(r)\rangle\) of pairs that +fell into each bin

xi

The estimator \(\xi\) (if rr is given, or calculateXi +has been called)

sigma_xi

The sqrt of the variance estimate of xi (if rr is given +or calculateXi has been called)

DD

The total weight of pairs in each bin.

RR

The total weight of RR pairs in each bin (if rr is given)

DR

The total weight of DR pairs in each bin (if dr is given)

RD

The total weight of RD pairs in each bin (if rd is given)

npairs

The total number of pairs in each bin

+

If sep_units was given at construction, then the distances will all be in these units. +Otherwise, they will be in either the same units as x,y,z (for flat or 3d coordinates) or +radians (for spherical coordinates).

+
+
Parameters
+
    +
  • file_name (str) – The name of the file to write to.

  • +
  • rr (NNCorrelation) – The auto-correlation of the random field (RR)

  • +
  • dr (NNCorrelation) – The cross-correlation of the data with randoms (DR), if +desired. (default: None)

  • +
  • rd (NNCorrelation) – The cross-correlation of the randoms with data (RD), if +desired. (default: None, which means use rd=dr)

  • +
  • file_type (str) – The type of file to write (‘ASCII’ or ‘FITS’). +(default: determine the type automatically from the extension +of file_name.)

  • +
  • precision (int) – For ASCII output catalogs, the desired precision. (default: 4; +this value can also be given in the constructor in the config +dict.)

  • +
  • write_patch_results (bool) – Whether to write the patch-based results as well. +(default: False)

  • +
+
+
+
+ +
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/nnn.html b/docs/_build/html/nnn.html new file mode 100644 index 00000000..b4d28c9a --- /dev/null +++ b/docs/_build/html/nnn.html @@ -0,0 +1,889 @@ + + + + + + NNNCorrelation: Count-count-count correlations — TreeCorr 4.3.0 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

NNNCorrelation: Count-count-count correlations

+
+
+class treecorr.NNNCorrelation(config=None, *, logger=None, **kwargs)[source]
+

Bases: treecorr.binnedcorr3.BinnedCorr3

+

This class handles the calculation and storage of a 2-point count-count correlation +function. i.e. the regular density correlation function.

+

See the doc string of BinnedCorr3 for a description of how the triangles are binned.

+

Ojects of this class holds the following attributes:

+
+
Attributes
+
    +
  • logr – The nominal center of the bin in log(r) (the natural logarithm of r).

  • +
  • nbins – The number of bins in logr where r = d2

  • +
  • bin_size – The size of the bins in logr

  • +
  • min_sep – The minimum separation being considered

  • +
  • max_sep – The maximum separation being considered

  • +
  • nubins – The number of bins in u where u = d3/d2

  • +
  • ubin_size – The size of the bins in u

  • +
  • min_u – The minimum u being considered

  • +
  • max_u – The maximum u being considered

  • +
  • nvbins – The number of bins in v where v = +-(d1-d2)/d3

  • +
  • vbin_size – The size of the bins in v

  • +
  • min_v – The minimum v being considered

  • +
  • max_v – The maximum v being considered

  • +
  • logr1d – The nominal centers of the nbins bins in log(r).

  • +
  • u1d – The nominal centers of the nubins bins in u.

  • +
  • v1d – The nominal centers of the nvbins bins in v.

  • +
+
+
+

In addition, the following attributes are numpy arrays whose shape is (nbins, nubins, nvbins):

+
+
Attributes
+
    +
  • logr – The nominal center of the bin in log(r).

  • +
  • rnom – The nominal center of the bin converted to regular distance. +i.e. r = exp(logr).

  • +
  • u – The nominal center of the bin in u.

  • +
  • v – The nominal center of the bin in v.

  • +
  • meand1 – The (weighted) mean value of d1 for the triangles in each bin.

  • +
  • meanlogd1 – The mean value of log(d1) for the triangles in each bin.

  • +
  • meand2 – The (weighted) mean value of d2 (aka r) for the triangles in each bin.

  • +
  • meanlogd2 – The mean value of log(d2) for the triangles in each bin.

  • +
  • meand2 – The (weighted) mean value of d3 for the triangles in each bin.

  • +
  • meanlogd2 – The mean value of log(d3) for the triangles in each bin.

  • +
  • meanu – The mean value of u for the triangles in each bin.

  • +
  • meanv – The mean value of v for the triangles in each bin.

  • +
  • weight – The total weight in each bin.

  • +
  • ntri – The number of triangles going into each bin (including those where one or +more objects have w=0).

  • +
  • tot – The total number of triangles processed, which is used to normalize +the randoms if they have a different number of triangles.

  • +
+
+
+

If sep_units are given (either in the config dict or as a named kwarg) then the distances +will all be in these units.

+
+

Note

+

If you separate out the steps of the process command and use process_auto and/or +process_cross, then the units will not be applied to meanr or meanlogr until +the finalize function is called.

+
+

The typical usage pattern is as follows:

+
>>> nnn = treecorr.NNNCorrelation(config)
+>>> nnn.process(cat)         # For auto-correlation.
+>>> rrr.process(rand)        # Likewise for random-random correlations
+>>> drr.process(cat,rand)    # If desired, also do data-random correlations
+>>> rdd.process(rand,cat)    # Also with two data and one random
+>>> nnn.write(file_name,rrr=rrr,drr=drr,...)  # Write out to a file.
+>>> zeta,varzeta = nnn.calculateZeta(rrr=rrr,drr=drr,rdd=rdd)  # Or get zeta directly.
+
+
+
+
Parameters
+
    +
  • config (dict) – A configuration dict that can be used to pass in kwargs if desired. +This dict is allowed to have addition entries besides those listed +in BinnedCorr3, which are ignored here. (default: None)

  • +
  • logger – If desired, a logger object for logging. (default: None, in which case +one will be built according to the config dict’s verbose level.)

  • +
+
+
Keyword Arguments
+

**kwargs – See the documentation for BinnedCorr3 for the list of allowed keyword +arguments, which may be passed either directly or in the config dict.

+
+
+
+
+__eq__(other)[source]
+

Return whether two NNNCorrelation instances are equal

+
+ +
+
+__iadd__(other)[source]
+

Add a second NNNCorrelation’s data to this one.

+
+

Note

+

For this to make sense, both NNNCorrelation objects should not have had finalize +called yet. Then, after adding them together, you should call finalize on the sum.

+
+
+ +
+
+__init__(config=None, *, logger=None, **kwargs)[source]
+

Initialize NNNCorrelation. See class doc for details.

+
+ +
+
+__repr__()[source]
+

Return repr(self).

+
+ +
+
+calculateZeta(*, rrr, drr=None, rdd=None)[source]
+

Calculate the 3pt function given another 3pt function of random +points using the same mask, and possibly cross correlations of the data and random.

+

There are two possible formulae that are currently supported.

+
    +
  1. The simplest formula to use is \(\zeta^\prime = (DDD-RRR)/RRR\). +In this case, only rrr needs to be given, the NNNCorrelation of a random field. +However, note that in this case, the return value is not normally called \(\zeta\). +Rather, this is an estimator of

    +
    +\[\zeta^\prime(d_1,d_2,d_3) = \zeta(d_1,d_2,d_3) + \xi(d_1) + \xi(d_2) + \xi(d_3)\]
    +

    where \(\xi\) is the two-point correlation function for each leg of the triangle. +You would typically want to calculate that separately and subtract off the +two-point contributions.

    +
  2. +
  3. For auto-correlations, a better formula is \(\zeta = (DDD-RDD+DRR-RRR)/RRR\). +In this case, RDD is the number of triangles where 1 point comes from the randoms +and 2 points are from the data. Similarly, DRR has 1 point from the data and 2 from +the randoms. These are what are calculated from calling:

    +
    >>> drr.process(data_cat, rand_cat)
    +>>> rdd.process(rand_cat, data_cat)
    +
    +
    +
    +

    Note

    +

    One might thing the formula should be \(\zeta = (DDD-3RDD+3DRR-RRR)/RRR\) +by analogy with the 2pt Landy-Szalay formula. However, the way these are +calculated, the object we are calling RDD already includes triangles where R +is in each of the 3 locations. So it is really more like RDD + DRD + DDR. +These are not computed separately. Rather the single computation of rdd +described above accumulates all three permutations together. So that one +object includes everything for the second term. Likewise drr has all the +permutations that are relevant for the third term.

    +
    +
  4. +
+
    +
  • If only rrr is provided, the first formula will be used.

  • +
  • If all of rrr, drr, rdd are provided then the second will be used.

  • +
+
+
Parameters
+
+
+
Returns
+

Tuple containing

+
+
    +
  • zeta = array of \(\zeta(d_1,d_2,d_3)\)

  • +
  • varzeta = array of variance estimates of \(\zeta(d_1,d_2,d_3)\)

  • +
+
+

+
+
+
+ +
+
+copy()[source]
+

Make a copy

+
+ +
+
+finalize()[source]
+

Finalize the calculation of meand1, meanlogd1, etc.

+

The process_auto and process_cross commands accumulate values in each bin, +so they can be called multiple times if appropriate. Afterwards, this command +finishes the calculation of meanlogr, meanu, meanv by dividing by the total weight.

+
+ +
+
+getStat()[source]
+

The standard statistic for the current correlation object as a 1-d array.

+

This raises a RuntimeError if calculateZeta has not been run yet.

+
+ +
+
+getWeight()[source]
+

The weight array for the current correlation object as a 1-d array.

+

This is the weight array corresponding to getStat. In this case, it is the denominator +RRR from the calculation done by calculateZeta().

+
+ +
+
+process(cat1, cat2=None, cat3=None, *, metric=None, num_threads=None, comm=None, low_mem=False, initialize=True, finalize=True)[source]
+

Accumulate the 3pt correlation of the points in the given Catalog(s).

+
    +
  • If only 1 argument is given, then compute an auto-correlation function.

  • +
  • If 2 arguments are given, then compute a cross-correlation function with the +first catalog taking one corner of the triangles, and the second taking two corners.

  • +
  • If 3 arguments are given, then compute a three-way cross-correlation.

  • +
+

All arguments may be lists, in which case all items in the list are used +for that element of the correlation.

+
+

Note

+

For a correlation of multiple catalogs, it typically matters which corner of the +triangle comes from which catalog, which is not kept track of by this function. +The final accumulation will have d1 > d2 > d3 regardless of which input catalog +appears at each corner. The class which keeps track of which catalog appears +in each position in the triangle is NNNCrossCorrelation.

+
+
+
Parameters
+
    +
  • cat1 (Catalog) – A catalog or list of catalogs for the first N field.

  • +
  • cat2 (Catalog) – A catalog or list of catalogs for the second N field. +(default: None)

  • +
  • cat3 (Catalog) – A catalog or list of catalogs for the third N field. +(default: None)

  • +
  • metric (str) – Which metric to use. See Metrics for details. +(default: ‘Euclidean’; this value can also be given in the +constructor in the config dict.)

  • +
  • num_threads (int) – How many OpenMP threads to use during the calculation. +(default: use the number of cpu cores; this value can also be given +in the constructor in the config dict.)

  • +
  • comm (mpi4py.Comm) – If running MPI, an mpi4py Comm object to communicate between +processes. If used, the rank=0 process will have the final +computation. This only works if using patches. (default: None)

  • +
  • low_mem (bool) – Whether to sacrifice a little speed to try to reduce memory usage. +This only works if using patches. (default: False)

  • +
  • initialize (bool) – Whether to begin the calculation with a call to +BinnedCorr3.clear. (default: True)

  • +
  • finalize (bool) – Whether to complete the calculation with a call to finalize. +(default: True)

  • +
+
+
+
+ +
+
+process_auto(cat, *, metric=None, num_threads=None)[source]
+

Process a single catalog, accumulating the auto-correlation.

+

This accumulates the auto-correlation for the given catalog. After +calling this function as often as desired, the finalize command will +finish the calculation of meand1, meanlogd1, etc.

+
+
Parameters
+
    +
  • cat (Catalog) – The catalog to process

  • +
  • metric (str) – Which metric to use. See Metrics for details. +(default: ‘Euclidean’; this value can also be given in the +constructor in the config dict.)

  • +
  • num_threads (int) – How many OpenMP threads to use during the calculation. +(default: use the number of cpu cores; this value can also be given +in the constructor in the config dict.)

  • +
+
+
+
+ +
+
+process_cross(cat1, cat2, cat3, *, metric=None, num_threads=None)[source]
+

Process a set of three catalogs, accumulating the 3pt cross-correlation.

+

This accumulates the cross-correlation for the given catalogs as part of a larger +auto-correlation calculation. E.g. when splitting up a large catalog into patches, +this is appropriate to use for the cross correlation between different patches +as part of the complete auto-correlation of the full catalog.

+
+
Parameters
+
    +
  • cat1 (Catalog) – The first catalog to process

  • +
  • cat2 (Catalog) – The second catalog to process

  • +
  • cat3 (Catalog) – The third catalog to process

  • +
  • metric (str) – Which metric to use. See Metrics for details. +(default: ‘Euclidean’; this value can also be given in the +constructor in the config dict.)

  • +
  • num_threads (int) – How many OpenMP threads to use during the calculation. +(default: use the number of cpu cores; this value can also be given +in the constructor in the config dict.)

  • +
+
+
+
+ +
+
+process_cross12(cat1, cat2, *, metric=None, num_threads=None)[source]
+

Process two catalogs, accumulating the 3pt cross-correlation, where one of the +points in each triangle come from the first catalog, and two come from the second.

+

This accumulates the cross-correlation for the given catalogs as part of a larger +auto-correlation calculation. E.g. when splitting up a large catalog into patches, +this is appropriate to use for the cross correlation between different patches +as part of the complete auto-correlation of the full catalog.

+
+
Parameters
+
    +
  • cat1 (Catalog) – The first catalog to process. (1 point in each triangle will come +from this catalog.)

  • +
  • cat2 (Catalog) – The second catalog to process. (2 points in each triangle will come +from this catalog.)

  • +
  • metric (str) – Which metric to use. See Metrics for details. +(default: ‘Euclidean’; this value can also be given in the +constructor in the config dict.)

  • +
  • num_threads (int) – How many OpenMP threads to use during the calculation. +(default: use the number of cpu cores; this value can also be given +in the constructor in the config dict.)

  • +
+
+
+
+ +
+
+read(file_name, *, file_type=None)[source]
+

Read in values from a file.

+

This should be a file that was written by TreeCorr, preferably a FITS file, so there +is no loss of information.

+
+

Warning

+

The NNNCorrelation object should be constructed with the same configuration +parameters as the one being read. e.g. the same min_sep, max_sep, etc. This is not +checked by the read function.

+
+
+
Parameters
+
    +
  • file_name (str) – The name of the file to read in.

  • +
  • file_type (str) – The type of file (‘ASCII’ or ‘FITS’). (default: determine the type +automatically from the extension of file_name.)

  • +
+
+
+
+ +
+
+write(file_name, *, rrr=None, drr=None, rdd=None, file_type=None, precision=None, write_patch_results=False)[source]
+

Write the correlation function to the file, file_name.

+

Normally, at least rrr should be provided, but if this is None, then only the +basic accumulated number of triangles are output (along with the columns parametrizing +the size and shape of the triangles).

+

If at least rrr is given, then it will output an estimate of the final 3pt correlation +function, \(\zeta\). There are two possible formulae that are currently supported.

+
    +
  1. The simplest formula to use is \(\zeta^\prime = (DDD-RRR)/RRR\). +In this case, only rrr needs to be given, the NNNCorrelation of a random field. +However, note that in this case, the return value is not what is normally called +\(\zeta\). Rather, this is an estimator of

    +
    +\[\zeta^\prime(d_1,d_2,d_3) = \zeta(d_1,d_2,d_3) + \xi(d_1) + \xi(d_2) + \xi(d_3)\]
    +

    where \(\xi\) is the two-point correlation function for each leg of the triangle. +You would typically want to calculate that separately and subtract off the +two-point contributions.

    +
  2. +
  3. For auto-correlations, a better formula is \(\zeta = (DDD-RDD+DRR-RRR)/RRR\). +In this case, RDD is the number of triangles where 1 point comes from the randoms +and 2 points are from the data. Similarly, DRR has 1 point from the data and 2 from +the randoms. +For this case, all combinations rrr, drr, and rdd must be provided.

  4. +
+

The output file will include the following columns:

+ ++++ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

Column

Description

r_nom

The nominal center of the bin in r = d2 where d1 > d2 > d3

u_nom

The nominal center of the bin in u = d3/d2

v_nom

The nominal center of the bin in v = +-(d1-d2)/d3

meand1

The mean value \(\langle d1\rangle\) of triangles that fell +into each bin

meanlogd1

The mean value \(\langle \log(d1)\rangle\) of triangles that +fell into each bin

meand2

The mean value \(\langle d2\rangle\) of triangles that fell +into each bin

meanlogd2

The mean value \(\langle \log(d2)\rangle\) of triangles that +fell into each bin

meand3

The mean value \(\langle d3\rangle\) of triangles that fell +into each bin

meanlogd3

The mean value \(\langle \log(d3)\rangle\) of triangles that +fell into each bin

meanu

The mean value \(\langle u\rangle\) of triangles that fell +into each bin

meanv

The mean value \(\langle v\rangle\) of triangles that fell +into each bin

zeta

The estimator \(\zeta(r,u,v)\) (if rrr is given)

sigma_zeta

The sqrt of the variance estimate of \(\zeta\) +(if rrr is given)

DDD

The total weight of DDD triangles in each bin

RRR

The total weight of RRR triangles in each bin (if rrr is given)

DRR

The total weight of DRR triangles in each bin (if drr is given)

RDD

The total weight of RDD triangles in each bin (if rdd is given)

ntri

The number of triangles contributing to each bin

+

If sep_units was given at construction, then the distances will all be in these units. +Otherwise, they will be in either the same units as x,y,z (for flat or 3d coordinates) or +radians (for spherical coordinates).

+
+
Parameters
+
    +
  • file_name (str) – The name of the file to write to.

  • +
  • rrr (NNNCorrelation) – The auto-correlation of the random field (RRR)

  • +
  • drr (NNNCorrelation) – DRR if desired. (default: None)

  • +
  • rdd (NNNCorrelation) – RDD if desired. (default: None)

  • +
  • file_type (str) – The type of file to write (‘ASCII’ or ‘FITS’). +(default: determine the type automatically from the extension +of file_name.)

  • +
  • precision (int) – For ASCII output catalogs, the desired precision. (default: 4; +this value can also be given in the constructor in the config +dict.)

  • +
  • write_patch_results (bool) – Whether to write the patch-based results as well. +(default: False)

  • +
+
+
+
+ +
+ +
+
+class treecorr.NNNCrossCorrelation(config=None, *, logger=None, **kwargs)[source]
+

Bases: treecorr.binnedcorr3.BinnedCorr3

+

This class handles the calculation a 3-point count-count-count cross-correlation +function.

+

For 3-point cross correlations, it matters which of the two or three fields falls on +each corner of the triangle. E.g. is field 1 on the corner opposite d1 (the longest +size of the triangle) or is it field 2 (or 3) there? This is in contrast to the 2-point +correlation where the symmetry of the situation means that it doesn’t matter which point +is identified with each field. This makes it significantly more complicated to keep track +of all the relevant information for a 3-point cross correlation function.

+

The NNNCorrelation class holds a single \(\zeta\) functions describing all +possible triangles, parameterized according to their relative side lengths ordered as +d1 > d2 > d3.

+

For a cross-correlation of two fields: N1 - N1 - N2 (i.e. the N1 field is at two of the +corners and N2 is at one corner), then we need three these \(\zeta\) functions +to capture all of the triangles, since the N2 points may be opposite d1 or d2 or d3. +For a cross-correlation of three fields: N1 - N2 - N3, we need six sets, to account for +all of the possible permutations relative to the triangle sides.

+

Therefore, this class holds 6 instances of NNNCorrelation, which in turn hold the +information about triangles in each of the relevant configurations. We name these:

+
+
Attributes
+
    +
  • n1n2n3 – Triangles where N1 is opposite d1, N2 is opposite d2, N3 is opposite d3.

  • +
  • n1n3n2 – Triangles where N1 is opposite d1, N3 is opposite d2, N2 is opposite d3.

  • +
  • n2n1n3 – Triangles where N2 is opposite d1, N1 is opposite d2, N3 is opposite d3.

  • +
  • n2n3n1 – Triangles where N2 is opposite d1, N3 is opposite d2, N1 is opposite d3.

  • +
  • n3n1n2 – Triangles where N3 is opposite d1, N1 is opposite d2, N2 is opposite d3.

  • +
  • n3n2n1 – Triangles where N3 is opposite d1, N2 is opposite d2, N1 is opposite d3.

  • +
+
+
+

If for instance N2 and N3 are the same field, then e.g. n1n2n3 and n1n3n2 will have +the same values.

+

Ojects of this class also hold the following attributes, which are identical in each of +the above NNNCorrelation instances.

+
+
Attributes
+
    +
  • nbins – The number of bins in logr where r = d2

  • +
  • bin_size – The size of the bins in logr

  • +
  • min_sep – The minimum separation being considered

  • +
  • max_sep – The maximum separation being considered

  • +
  • nubins – The number of bins in u where u = d3/d2

  • +
  • ubin_size – The size of the bins in u

  • +
  • min_u – The minimum u being considered

  • +
  • max_u – The maximum u being considered

  • +
  • nvbins – The number of bins in v where v = +-(d1-d2)/d3

  • +
  • vbin_size – The size of the bins in v

  • +
  • min_v – The minimum v being considered

  • +
  • max_v – The maximum v being considered

  • +
  • logr1d – The nominal centers of the nbins bins in log(r).

  • +
  • u1d – The nominal centers of the nubins bins in u.

  • +
  • v1d – The nominal centers of the nvbins bins in v.

  • +
+
+
+

If sep_units are given (either in the config dict or as a named kwarg) then the distances +will all be in these units.

+
+

Note

+

If you separate out the steps of the process command and use process_cross directly, +then the units will not be applied to meanr or meanlogr until the finalize +function is called.

+
+
+
Parameters
+
    +
  • config (dict) – A configuration dict that can be used to pass in kwargs if desired. +This dict is allowed to have addition entries besides those listed +in BinnedCorr3, which are ignored here. (default: None)

  • +
  • logger – If desired, a logger object for logging. (default: None, in which case +one will be built according to the config dict’s verbose level.)

  • +
+
+
Keyword Arguments
+

**kwargs – See the documentation for BinnedCorr3 for the list of allowed keyword +arguments, which may be passed either directly or in the config dict.

+
+
+
+
+__eq__(other)[source]
+

Return whether two NNNCrossCorrelation instances are equal

+
+ +
+
+__iadd__(other)[source]
+

Add a second NNNCrossCorrelation’s data to this one.

+
+

Note

+

For this to make sense, both NNNCrossCorrelation objects should not have had +finalize called yet. Then, after adding them together, you should call finalize +on the sum.

+
+
+ +
+
+__init__(config=None, *, logger=None, **kwargs)[source]
+

Initialize NNNCrossCorrelation. See class doc for details.

+
+ +
+
+__repr__()[source]
+

Return repr(self).

+
+ +
+
+copy()[source]
+

Make a copy

+
+ +
+
+finalize()[source]
+

Finalize the calculation of the correlation function.

+

The process_cross command accumulate values in each bin, so they can be called +multiple times if appropriate. Afterwards, this command finishes the calculation +by dividing by the total weight.

+
+ +
+
+getWeight()[source]
+

The weight array for the current correlation object as a 1-d array.

+

For NNNCrossCorrelation, this is always just 1. We don’t currently have any ability +to automatically handle a random catalog for NNNCrossCorrelations, so we don’t know +what the correct weight would be for a given patch or set of patches. This value +is only used by the sample method of covariance estimation, so this limitation means +that sample covariances may be expected to be less accurate than normal when used with +NNNCorrelations.

+
+ +
+
+property nonzero
+

Return if there are any values accumulated yet. (i.e. ntri > 0)

+
+ +
+
+process(cat1, cat2, cat3=None, *, metric=None, num_threads=None, comm=None, low_mem=False, initialize=True, finalize=True)[source]
+

Accumulate the cross-correlation of the points in the given Catalogs: cat1, cat2, cat3.

+
    +
  • If 2 arguments are given, then compute a cross-correlation function with the +first catalog taking one corner of the triangles, and the second taking two corners.

  • +
  • If 3 arguments are given, then compute a three-way cross-correlation function.

  • +
+

All arguments may be lists, in which case all items in the list are used +for that element of the correlation.

+
+
Parameters
+
    +
  • cat1 (Catalog) – A catalog or list of catalogs for the first N field.

  • +
  • cat2 (Catalog) – A catalog or list of catalogs for the second N field.

  • +
  • cat3 (Catalog) – A catalog or list of catalogs for the third N field. +(default: None)

  • +
  • metric (str) – Which metric to use. See Metrics for details. +(default: ‘Euclidean’; this value can also be given in the +constructor in the config dict.)

  • +
  • num_threads (int) – How many OpenMP threads to use during the calculation. +(default: use the number of cpu cores; this value can also be given +in the constructor in the config dict.)

  • +
  • comm (mpi4py.Comm) – If running MPI, an mpi4py Comm object to communicate between +processes. If used, the rank=0 process will have the final +computation. This only works if using patches. (default: None)

  • +
  • low_mem (bool) – Whether to sacrifice a little speed to try to reduce memory usage. +This only works if using patches. (default: False)

  • +
  • initialize (bool) – Whether to begin the calculation with a call to +BinnedCorr3.clear. (default: True)

  • +
  • finalize (bool) – Whether to complete the calculation with a call to finalize. +(default: True)

  • +
+
+
+
+ +
+
+process_cross(cat1, cat2, cat3, *, metric=None, num_threads=None)[source]
+

Process a set of three catalogs, accumulating the 3pt cross-correlation.

+

This accumulates the cross-correlation for the given catalogs. After +calling this function as often as desired, the finalize command will +finish the calculation of meand1, meanlogd1, etc.

+
+
Parameters
+
    +
  • cat1 (Catalog) – The first catalog to process

  • +
  • cat2 (Catalog) – The second catalog to process

  • +
  • cat3 (Catalog) – The third catalog to process

  • +
  • metric (str) – Which metric to use. See Metrics for details. +(default: ‘Euclidean’; this value can also be given in the +constructor in the config dict.)

  • +
  • num_threads (int) – How many OpenMP threads to use during the calculation. +(default: use the number of cpu cores; this value can also be given +in the constructor in the config dict.)

  • +
+
+
+
+ +
+
+process_cross12(cat1, cat2, *, metric=None, num_threads=None)[source]
+

Process two catalogs, accumulating the 3pt cross-correlation, where one of the +points in each triangle come from the first catalog, and two come from the second.

+

This accumulates the cross-correlation for the given catalogs. After +calling this function as often as desired, the finalize command will +finish the calculation of meand1, meanlogd1, etc.

+
+

Note

+

This only adds to the attributes n1n2n3, n2n1n3, n2n3n1, not the ones where +3 comes before 2. When running this via the regular process method, it will +combine them at the end to make sure n1n2n3 == n1n3n2, etc. for a complete +calculation of the 1-2 cross-correlation.

+
+
+
Parameters
+
    +
  • cat1 (Catalog) – The first catalog to process. (1 point in each triangle will come +from this catalog.)

  • +
  • cat2 (Catalog) – The second catalog to process. (2 points in each triangle will come +from this catalog.)

  • +
  • metric (str) – Which metric to use. See Metrics for details. +(default: ‘Euclidean’; this value can also be given in the +constructor in the config dict.)

  • +
  • num_threads (int) – How many OpenMP threads to use during the calculation. +(default: use the number of cpu cores; this value can also be given +in the constructor in the config dict.)

  • +
+
+
+
+ +
+
+read(file_name, *, file_type=None)[source]
+

Read in values from a file.

+

This should be a file that was written by TreeCorr, preferably a FITS file, so there +is no loss of information.

+
+

Warning

+

The NNNCrossCorrelation object should be constructed with the same configuration +parameters as the one being read. e.g. the same min_sep, max_sep, etc. This is not +checked by the read function.

+
+
+
Parameters
+
    +
  • file_name (str) – The name of the file to read in.

  • +
  • file_type (str) – The type of file (‘ASCII’ or ‘FITS’). (default: determine the type +automatically from the extension of file_name.)

  • +
+
+
+
+ +
+
+write(file_name, *, file_type=None, precision=None, write_patch_results=False)[source]
+

Write the correlation function to the file, file_name.

+
+
Parameters
+
    +
  • file_name (str) – The name of the file to write to.

  • +
  • file_type (str) – The type of file to write (‘ASCII’ or ‘FITS’). (default: determine +the type automatically from the extension of file_name.)

  • +
  • precision (int) – For ASCII output catalogs, the desired precision. (default: 4; +this value can also be given in the constructor in the config dict.)

  • +
  • write_patch_results (bool) – Whether to write the patch-based results as well. +(default: False)

  • +
+
+
+
+ +
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/objects.inv b/docs/_build/html/objects.inv new file mode 100644 index 00000000..0f9efa94 Binary files /dev/null and b/docs/_build/html/objects.inv differ diff --git a/docs/_build/html/overview.html b/docs/_build/html/overview.html new file mode 100644 index 00000000..3b7cb572 --- /dev/null +++ b/docs/_build/html/overview.html @@ -0,0 +1,428 @@ + + + + + + Overview — TreeCorr 4.3.0 documentation + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

Overview

+https://travis-ci.org/rmjarvis/TreeCorr.svg?branch=main +https://codecov.io/gh/rmjarvis/TreeCorr/branch/main/graph/badge.svg +

TreeCorr is a package for efficiently computing 2-point and 3-point correlation +functions.

+
    +
  • The code is hosted at https://github.com/rmjarvis/TreeCorr

  • +
  • It can compute correlations of regular number counts, weak lensing shears, or +scalar quantities such as convergence or CMB temperature fluctutations.

  • +
  • 2-point correlations may be auto-correlations or cross-correlations. This +includes shear-shear, count-shear, count-count, kappa-kappa, etc. (Any +combination of shear, kappa, and counts.)

  • +
  • 3-point correlations currently can only be auto-correlations. This includes +shear-shear-shear, count-count-count, and kappa-kappa-kappa. The cross +varieties are planned to be added in the near future.

  • +
  • Both 2- and 3-point functions can be done with the correct curved-sky +calculation using RA, Dec coordinates, on a Euclidean tangent plane, or in +3D using either (RA,Dec,r) or (x,y,z) positions.

  • +
  • The front end is in Python, which can be used as a Python module or as a +standalone executable using configuration files. (The executable is corr2 +for 2-point and corr3 for 3-point.)

  • +
  • The actual computation of the correlation functions is done in C++ using ball +trees (similar to kd trees), which make the calculation extremely efficient.

  • +
  • When available, OpenMP is used to run in parallel on multi-core machines.

  • +
  • Approximate running time for 2-point shear-shear is ~30 sec * (N/10^6) / core +for a bin size b=0.1 in log(r). It scales as b^(-2). This is the slowest +of the various kinds of 2-point correlations, so others will be a bit faster, +but with the same scaling with N and b.

  • +
  • The running time for 3-point functions are highly variable depending on the +range of triangle geometries you are calculating. They are significantly +slower than the 2-point functions, but many orders of magnitude faster than +brute force algorithms.

  • +
  • If you use TreeCorr in published research, please reference: +Jarvis, Bernstein, & Jain, 2004, MNRAS, 352, 338 +(I’m working on new paper about TreeCorr, including some of the improvements +I’ve made since then, but this will suffice as a reference for now.)

  • +
  • Record on the Astrophyics Source Code Library: http://ascl.net/1508.007

  • +
  • Developed by Mike Jarvis. Fee free to contact me with questions or comments +at mikejarvis17 at gmail. Or post an issue (see below) if you have any +problems with the code.

  • +
+

The code is licensed under a FreeBSD license. Essentially, you can use the +code in any way you want, but if you distribute it, you need to include the +file TreeCorr_LICENSE with the distribution. See that file for details.

+
+

Installation

+

The easiest ways to install TreeCorr are either with pip:

+
pip install treecorr
+
+
+

or with conda:

+
conda install -c conda-forge treecorr
+
+
+

If you have previously installed TreeCorr, and want to upgrade to a new +released version, you should do:

+
pip install treecorr --upgrade
+
+
+

or:

+
conda update -c conda-forge treecorr
+
+
+

Depending on the write permissions of the python distribution for your specific +system, you might need to use one of the following variants for pip installation:

+
sudo pip install treecorr
+pip install treecorr --user
+
+
+

The latter installs the Python module into ~/.local/lib/python3.X/site-packages, +which is normally already in your PYTHONPATH, but it puts the executables +corr2 and corr3 into ~/.local/bin which is probably not in your PATH. +To use these scripts, you should add this directory to your PATH. If you would +rather install into a different prefix rather than ~/.local, you can use:

+
pip install treecorr --install-option="--prefix=PREFIX"
+
+
+

This would install the executables into PREFIX/bin and the Python module +into PREFIX/lib/python3.X/site-packages.

+

If you would rather download the tarball and install TreeCorr yourself, +that is also relatively straightforward:

+
+

1. Download TreeCorr

+
+

You can download the latest tarball from:

+
https://github.com/rmjarvis/TreeCorr/releases/
+
+
+

Or you can clone the repository using either of the following:

+
git clone git@github.com:rmjarvis/TreeCorr.git
+git clone https://github.com/rmjarvis/TreeCorr.git
+
+
+

which will start out in the current stable release branch.

+

Either way, cd into the TreeCorr directory.

+
+
+
+

2. Install dependencies

+
+

All required dependencies should be installed automatically for you by +pip or conda, so you should not need to worry about these. But if you +are interested, the dependencies are:

+
+
    +
  • numpy

  • +
  • pyyaml

  • +
  • LSSTDESC.Coord

  • +
  • cffi

  • +
+
+

They can all be installed at once by running:

+
pip install -r requirements.txt
+
+
+

or:

+
conda install -c conda-forge treecorr --only-deps
+
+
+

The last dependency is the only one that typically could cause any problems, since it in +turn depends on a library called libffi. This is a common thing to have installed already +on linux machines, so it is likely that you won’t have any trouble with it, but if you get +errors about “ffi.h” not being found, then you may need to either install it yourself or +update your paths to include the directory where ffi.h is found.

+

See https://cffi.readthedocs.io/en/latest/installation.html for more information about +installing cffi, including its libffi dependency.

+
+

Note

+

Three additional modules are not required for basic TreeCorr operations, but are +potentially useful.

+
    +
  1. fitsio is required for reading FITS catalogs or writing to FITS output files.

  2. +
  3. pandas will signficantly speed up reading from ASCII catalogs.

  4. +
  5. h5py is required for reading HDF5 catalogs.

  6. +
+

These are all pip installable:

+
pip install fitsio
+pip install pandas
+pip install h5py
+
+
+

But they are not installed with TreeCorr automatically.

+
+
+
+
+

3. Install

+
+

You can then install TreeCorr from the local distribution. Typically this would be the +command:

+
pip install .
+
+
+

If you don’t have write permission in your python distribution, you might need +to use:

+
pip install . --user
+
+
+

In addition to installing the Python module treecorr, this will install +the executables corr2 and corr3 in a bin folder somewhere on your +system. Look for a line like:

+
Installing corr2 script to /anaconda3/bin
+
+
+

or similar in the output to see where the scripts are installed. If the +directory is not in your path, you will also get a warning message at the +end letting you know which directory you should add to your path if you want +to run these scripts.

+
+
+
+

4. Run Tests (optional)

+
+

If you want to run the unit tests, you can do the following:

+
pip install -r test_requirements.txt
+cd tests
+pytest
+
+
+
+
+
+
+

Two-point Correlations

+

This software is able to compute a variety of two-point correlations:

+
+
NN
+

The normal two-point correlation function of number counts (typically +galaxy counts).

+
+
GG
+

Two-point shear-shear correlation function.

+
+
KK
+

Nominally the two-point kappa-kappa correlation function, although any +scalar quantity can be used as “kappa”. In lensing, kappa is the +convergence, but this could be used for temperature, size, etc.

+
+
NG
+

Cross-correlation of counts with shear. This is what is often called +galaxy-galaxy lensing.

+
+
NK
+

Cross-correlation of counts with kappa. Again, “kappa” here can be any scalar +quantity.

+
+
KG
+

Cross-correlation of convergence with shear. Like the NG calculation, but +weighting the pairs by the kappa values the foreground points.

+
+
+

See Two-point Correlation Functions for more details.

+
+
+

Three-point Correlations

+

This software is not yet able to compute three-point cross-correlations, so the +only avaiable three-point correlations are:

+
+
NNN
+

Three-point correlation function of number counts.

+
+
GGG
+

Three-point shear correlation function. We use the “natural components” +called Gamma, described by Schneider & Lombardi (2003) (Astron.Astrophys. +397, 809) using the triangle centroid as the reference point.

+
+
KKK
+

Three-point kappa correlation function. Again, “kappa” here can be any +scalar quantity.

+
+
+

See Three-point Correlation Functions for more details.

+
+
+

Running corr2 and corr3

+

The executables corr2 and corr3 each take one required command-line argument, +which is the name of a configuration file:

+
corr2 config_file
+corr3 config_file
+
+
+

A sample configuration file for corr2 is provided, called sample.params. +See Configuration Parameters +for the complete documentation about the allowed parameters.

+

You can also specify parameters on the command line after the name of +the configuration file. e.g.:

+
corr2 config_file file_name=file1.dat gg_file_name=file1.out
+corr2 config_file file_name=file2.dat gg_file_name=file2.out
+...
+
+
+

This can be useful when running the program from a script for lots of input +files.

+

See Using configuration files +for more details.

+
+
+

Using the Python module

+

The typical usage in python is in three stages:

+
    +
  1. Define one or more Catalogs with the input data to be correlated.

  2. +
  3. Define the correlation function that you want to perform on those data.

  4. +
  5. Run the correlation by calling process.

  6. +
  7. Maybe write the results to a file or use them in some way.

  8. +
+

For instance, computing a shear-shear correlation from an input file stored +in a fits file would look something like the following:

+
>>> import treecorr
+>>> cat = treecorr.Catalog('cat.fits', ra_col='RA', dec_col='DEC',
+...                        ra_units='degrees', dec_units='degrees',
+...                        g1_col='GAMMA1', g2_col='GAMMA2')
+>>> gg = treecorr.GGCorrelation(min_sep=1., max_sep=100., bin_size=0.1,
+...                             sep_units='arcmin')
+>>> gg.process(cat)
+>>> xip = gg.xip  # The xi_plus correlation function
+>>> xim = gg.xim  # The xi_minus correlation function
+>>> gg.write('gg.out')  # Write results to a file
+
+
+

For more details, see our slightly longer Getting Started Guide.

+

Or for a more involved worked example, see our Jupyter notebook tutorial.

+

And for the complete details about all aspects of the code, see the Sphinx-generated +documentation.

+
+
+

Reporting bugs

+

If you find a bug running the code, please report it at:

+

https://github.com/rmjarvis/TreeCorr/issues

+

Click “New Issue”, which will open up a form for you to fill in with the +details of the problem you are having.

+
+
+

Requesting features

+

If you would like to request a new feature, do the same thing. Open a new +issue and fill in the details of the feature you would like added to TreeCorr. +Or if there is already an issue for your desired feature, please add to the +discussion, describing your use case. The more people who say they want a +feature, the more likely I am to get around to it sooner than later.

+
+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/params.html b/docs/_build/html/params.html new file mode 100644 index 00000000..c3f69e7d --- /dev/null +++ b/docs/_build/html/params.html @@ -0,0 +1,938 @@ + + + + + + Configuration Parameters — TreeCorr 4.3.0 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

Configuration Parameters

+

This section describes the various configuration parameters for controlling +what the corr2 and corr3 scripts (or functions) do:

+
+

Parameters about the input file(s)

+
+
file_name
+

(str or list) +The file(s) with the data to be correlated.

+

For an auto-correlation, like cosmic shear, this will be the only file +name you need to specify. This parameter is always required, and +depending on what kind of correlation you are doing, you may need to +specify others below.

+

Normally, there would only be a single file name here, but sometimes +the galaxy data comes in multiple files. To treat them all as though +they were a single large catalog, you may specify a list of file names +here:

+
file_name : [ file1.dat, file2.dat, file3.dat ]
+
+
+

If you are specifying this on the command line, you’ll need to put +quotes around the names, or it won’t be parsed correctly:

+
file_name="[file1.dat,file2.dat,file3.dat]"
+
+
+
+
file_name2
+

(str or list) +The file(s) to use for the second field for a cross-correlation.

+

If you want to cross-correlate one file (or set of files) with another, then +file_name2 is used to specify the second thing being correlated. e.g. +for galaxy-galaxy lensing, file_name should be the catalog of lenses, and +file_name2 should be the catalog of source shear values.

+
+
file_name3
+

(str or list) +The file(s) to use for the third field for a three-point cross-correlation.

+
+
rand_file_name
+

(str or list) +If necessary, a list of random files with the same masking as the file_name catalog.

+
+
rand_file_name2
+

(str or list) +If necessary, a list of random files with the same masking as the file_name2 catalog.

+
+
rand_file_name3
+

(str or list) +If necessary, a list of random files with the same masking as the file_name3 catalog.

+

When doing NN and NNN correlations, you need to account for masks and variable +depth by providing a file or list of files that correspond to a uniform- +density field as observed with the same masking and other observational +details. For cross-correlations, you need to provide both of the above +values to separately calibrate the first and second fields.

+

rand_file_name may also be used for NG and NK correlations, but it is not +required in those cases.

+
+
file_list
+

(str) A text file with file names in lieu of file_name.

+
+
file_list2
+

(str) A text file with file names in lieu of file_name2.

+
+
file_list3
+

(str) A text file with file names in lieu of file_name3.

+
+
rand_file_list
+

(str) A text file with file names in lieu of rand_file_name.

+
+
rand_file_list2
+

(str) A text file with file names in lieu of rand_file_name2.

+
+
rand_file_list3
+

(str) A text file with file names in lieu of rand_file_name3.

+

If you have a list of file names, it may be cumbersome to list them all +in the file_name (etc) parameter. It may be easier to do something like +ls *.cat > catlist and then use file_list=catlist as the list of +file names to use. Of course, it is an error to specify both file_list +and file_name (or any of the other corresponding pairs).

+
+
file_type
+

(ASCII, FITS, HDF5, or Parquet) The file type of the input files.

+
+
delimiter
+

(str, default = ‘0’) The delimeter between input values in an ASCII catalog.

+
+
comment_marker
+

(str, default = ‘#’) The first (non-whitespace) character of comment lines in an input ASCII catalog.

+

The default file type is normally ASCII. However, if the file name +includes “.fit” in it, then a fits binary table is assumed. +You can override this behavior using file_type.

+

Furthermore, you may specify a delimiter for ASCII catalogs if desired. +e.g. delimiter=’,’ for a comma-separated value file. Similarly, +comment lines usually begin with ‘#’, but you may specify something +different if necessary.

+
+
ext
+

(int/str, default=1 for FITS or root for HDF5) The extension (fits) or group (hdf) to read from

+

Normally if you are using a fits file, the binary fits table is +taken from the first extension, HDU 1. If you want to read from a +different HDU, you can specify which one to use here. For HDF files, +the default is to read from the root of the file, but you can also +specify group names like “/data/cat1”

+
+
first_row
+

(int, default=1)

+
+
last_row
+

(int, default=-1)

+
+
every_nth
+

(int, default=1)

+

You can optionally not use all the rows in the input file. +You may specify first_row, last_row, or both to limit the rows being used. +The rows are numbered starting with 1. If last_row is not positive, it +means to use to the end of the file. If every_nth is set, it will skip +rows, selecting only 1 out of every n rows.

+
+
npatch
+

(int, default=1)

+

How many patches to split the catalog into (using kmeans if no other +patch information is provided) for the purpose of jackknife variance +or other options that involve running via patches. (default: 1)

+
+

Note

+

If the catalog has ra,dec,r positions, the patches will +be made using just ra,dec.

+
+
+
kmeans_init
+

(str, default=’tree’)

+
+
kmeans_alt
+

(bool, default=False)

+

If using kmeans to make patches, these two parameters specify which init method +to use and whether to use the alternate kmeans algorithm. +cf. Field.run_kmeans

+
+
patch_centers
+

(str)

+

Alternative to setting patch by hand or using kmeans, you +may instead give patch_centers either as a file name or an array +from which the patches will be determined.

+
+
x_col
+

(int/str) Which column to use for x.

+
+
y_col
+

(int/str) Which column to use for y.

+
+
ra_col
+

(int/str) Which column to use for ra.

+
+
dec_col
+

(int/str) Which column to use for dec.

+

For the positions of the objects, you can specify either x,y values, which +imply a flat-sky approximation has already been performed (or ignored), +or ra,dec values, which are of course positions on the curved sky.

+

For ASCII files, the columns are specified by number, starting with 1 being +the first column (not 0!). +For FITS files, the columns are specified by name, not number.

+
+
x_units
+

(str, default=None) The units of x values.

+
+
y_units
+

(str, default=None) The units of y values.

+
+
ra_units
+

(str) The units of ra values.

+
+
dec_units
+

(str) The units of dec values.

+

All distances on the sky include a “units” parameter to specify what in +units the values are specified. Options for units are radians, hours, +degrees, arcmin, arcsec. For ra, dec the units field is required. +But for x,y, you can ignore all the unit issues, in which case the +output distances will be in the same units as the input positions.

+
+
r_col
+

(int/str) Which column to use for r.

+

When using spherical coordinates, ra,dec, you can optionally provide a +distance to the object. In this case, the calculation will be done in +three dimensional distances rather than angular distances. The distances +between objects will be the 3-D Euclidean distance, so you should define +your r values appropriately, given whatever cosmology you are assuming.

+

r_col is invalid in conjunction with x_col, y_col.

+
+
z_col
+

(int/str) Which column to use for z.

+

Rather than specifying 3-D coordinates as (ra, dec, r), you may instead +specify them as (x, y, z).

+

z_col is invalid in conjunction with ra_col, dec_col.

+
+
g1_col
+

(int/str) Which column to use for g1.

+
+
g2_col
+

(int/str) Which column to use for g2.

+

If you are doing one of the shear correlation functions (i.e. NG, KG, GG), +then you need to specify the shear estimates of the corresponding galaxies. +The g1,g2 values are taken to be reduced shear values. They should be +unbiases estimators of g1,g2, so they are allowed to exceed \(|g| = 1\). +(This is required for some methods to produce unbiased estimates.

+
+
k_col
+

(int/str) Which column to use for kappa.

+

If you are doing one of the kappa correlation functions (i.e. NK, KG, KK), +then you need to specify the column to use for kappa. While kappa is +nominally the lensing convergence, it could really be any scalar quantity, +like temperature, size, etc.

+
+
patch_col
+

(int/str) Which column to use for patch.

+

Use precalculated patch numbers to split the catalog into patches.

+
+
w_col
+

(int/str) Which column to use for the weight (if any).

+
+
wpos_col
+

(int/str) Which column to use for the position weight (if any).

+

The weight column is optional. If omitted, all weights are taken to be 1.

+
+
flag_col
+

(int/str) Which column to use for the weight (if any).

+
+
ignore_flag
+

(int) What flag(s) should be ignored.

+
+
ok_flag
+

(int) What flag(s) are ok to use.

+

The code can be set to ignore objects with a particular flag value if desired. +Some codes output a flag along with the shear value. Typically any flag != 0 +should be ignored, but you can optionally have the code ignore only particular +flags, treating the flag value as a bit mask. If ignore_flag is set to +something, then objects with (flag & ignore_flag != 0) will be ignored. +If ok_flag is set, then objects with (flag & ~ok_flag != 0) will be ignored. +The default is equivalent to ok_flag = 0, which ignores any flag != 0.

+
+
x_ext
+

(int/str) Which HDU (fits) or group (HDF) to use for the x_col.

+
+
y_ext
+

(int/str) Which HDU (fits) or group (HDF) to use for the y_col.

+
+
z_ext
+

(int/str) Which HDU (fits) or group (HDF) to use for the z_col.

+
+
ra_ext
+

(int/str) Which HDU (fits) or group (HDF) to use for the ra_col.

+
+
dec_ext
+

(int/str) Which HDU (fits) or group (HDF) to use for the dec_col.

+
+
r_ext
+

(int/str) Which HDU (fits) or group (HDF) to use for the r_col.

+
+
g1_ext
+

(int/str) Which HDU (fits) or group (HDF) to use for the g1_col.

+
+
g2_ext
+

(int/str) Which HDU (fits) or group (HDF) to use for the g2_col.

+
+
k_ext
+

(int/str) Which HDU (fits) or group (HDF) to use for the k_col.

+
+
patch_ext
+

(int/str) Which HDU (fits) or group (HDF) to use for the patch_col.

+
+
w_ext
+

(int/str) Which HDU (fits) or group (HDF) to use for the w_col.

+
+
wpos_ext
+

(int/str) Which HDU (fits) or group (HDF) to use for the wpos_col.

+
+
flag_ext
+

(int/str) Which HDU (fits) or group (HDF) to use for the flag_col.

+

If you want to use an extension other than the first one, normally you would +specify which fits extension or HDF5 group to use with the ext parameter. +However, if different columns need to come from different HDUs, then you can +override the default (given by ext, or ‘1’ (fits), or ‘/’ (HDF) if there +is no ext parameter) for each column separately.

+
+
allow_xyz
+

(bool, default=False)

+

Whether to allow x,y,z columns in conjunction with ra, dec.

+
+
flip_g1
+

(bool, default=False) Whether to flip the sign of g1.

+
+
flip_g2
+

(bool, default=False) Whether to flip the sign of g2.

+

Sometimes there are issues with the sign conventions of gamma. If you +need to flip the sign of g1 or g2, you may do that with flip_g1 or flip_g2 +(or both).

+
+
keep_zero_weight
+

(bool, default=False)

+

Whether to keep objects with wpos=0 in the catalog (including +any objects that indirectly get wpos=0 due to NaN or flags), so they +would be included in ntot and also in npairs calculations that use +this Catalog, although of course not contribute to the accumulated +weight of pairs.

+
+
+
+

Note

+
    +
  • If you are cross-correlating two files with different formats, you may +set any of the above items from file_type to flip_g2 as a two element +list (i.e. two values separated by a space). In this case, the first +item refers to the file(s) in file_name, and the second item refers +to the file(s) in files_name2.

  • +
  • You may not mix (x,y) columns with (ra,dec) columns, since its meaning +would be ambiguous.

  • +
  • If you don’t need a particular column for one of the files, you may +use 0 to indicate not to read that column. This is true for +any format of input catalog.

  • +
  • Also, if the given column only applies to one of the two input files +(e.g. k_col for an n-kappa cross-correlation) then you may specify just +the column name or number for the file to which it does apply.

  • +
+
+
+
+

Parameters about the binned correlation function to be calculated

+
+
bin_type
+

(str, default=’Log’) Which type of binning should be used.

+

See Metrics for details.

+
+
min_sep
+

(float) The minimum separation to include in the output.

+
+
max_sep
+

(float) The maximum separation to include in the output.

+
+
nbins
+

(int) The number of output bins to use.

+
+
bin_size
+

(float) The size of the output bins in log(sep).

+

The bins for the histogram may be defined by setting any 3 of the above 4 +parameters. The fourth one is automatically calculated from the values +of the other three.

+

See Binning for details about how these parameters are used for the +different choice of bin_type.

+
+
sep_units
+

(str, default=None) The units to use for min_sep and max_sep.

+

sep_units is also the units of R in the output file. For ra, dec values, +you should always specify sep_units explicitly to indicate what angular +units you want to use for the separations. But if your catalogs use x,y, +or if you specify 3-d correlations with r, then the output separations are +in the same units as the input positions.

+

See sep_units for more discussion about this parameter.

+
+
bin_slop
+

(float, default=1) The fraction of a bin width by which it is ok to let the pairs miss the correct bin.

+

The code normally determines when to stop traversing the tree when all of the +distance pairs for the two nodes have a spread in distance that is less than the +bin size. i.e. the error in the tree traversal is less than the uncertainty +induced by just binning the results into a histogram. This factor can be changed +by the parameter bin_slop. It is probably best to keep it at 1, but if you want to +make the code more conservative, you can decrease it, in which case the error +from using the tree nodes will be less than the error in the histogram binning. +(In practice, if you are going to do this, you are probably better off just +decreasing the bin_size instead and leaving bin_slop=1.)

+

See bin_slop for more discussion about this parameter.

+
+
brute
+

(bool/int, default=False) Whether to do the “brute force” algorithm, where the +tree traversal always goes to the leaf cells.

+

In addition to True or False, whose meanings are obvious, you may also set +brute to 1 or 2, which means to go to the leaves for cat1 or cat2, respectively, +but stop traversing the other catalog according to the normal bin_slop criterion.

+

See brute for more discussion about this parameter.

+
+
min_u
+

(float) The minimum u=d3/d2 to include for three-point functions.

+
+
max_u
+

(float) The maximum u=d3/d2 to include for three-point functions.

+
+
nubins
+

(int) The number of output bins to use for u.

+
+
ubin_size
+

(float) The size of the output bins for u.

+
+
min_v
+

(float) The minimum positive v=(d1-d2)/d3 to include for three-point functions.

+
+
max_v
+

(float) The maximum positive v=(d1-d2)/d3 to include for three-point functions.

+
+
nvbins
+

(int) The number of output bins to use for positive v. +The total number of bins in the v direction will be twice this number.

+
+
vbin_size
+

(float) The size of the output bins for v.

+
+
metric
+

(str, default=’Euclidean’) Which metric to use for distance measurements.

+

See Metrics for details.

+
+
min_rpar
+

(float) If the metric supports it, the minimum Rparallel to allow for pairs +to be included in the correlation function.

+
+
max_rpar
+

(float) If the metric supports it, the maximum Rparallel to allow for pairs +to be included in the correlation function.

+
+
period
+

(float) For the ‘Periodic’ metric, the period to use in all directions.

+
+
xperiod
+

(float) For the ‘Periodic’ metric, the period to use in the x directions.

+
+
yperiod
+

(float) For the ‘Periodic’ metric, the period to use in the y directions.

+
+
zperiod
+

(float) For the ‘Periodic’ metric, the period to use in the z directions.

+
+
+
+
+

Parameters about the output file(s)

+

The kind of correlation function that the code will calculate is based on +which output file(s) you specify. It will do the calculation(s) relevant for +each output file you set. For each output file, the first line of the output +says what the columns are. See the descriptions below for more information +about the output columns.

+
+
nn_file_name
+

(str) The output filename for count-count correlation function.

+

This is the normal density two-point correlation function.

+

The output columns are:

+
    +
  • R_nom = The center of the bin

  • +
  • meanR = The mean separation of the points that went into the bin.

  • +
  • meanlogR = The mean log(R) of the points that went into the bin.

  • +
  • xi = The correlation function.

  • +
  • sigma_xi = The 1-sigma error bar for xi.

  • +
  • DD, RR = The raw numbers of pairs for the data and randoms

  • +
  • DR (if nn_statistic=compensated) = The cross terms between data and random.

  • +
  • RD (if nn_statistic=compensated cross-correlation) = The cross term between random and data, which for a cross-correlation is not equivalent to DR.

  • +
+
+
nn_statistic
+

(str, default=’compensated’) Which statistic to use for xi as the estimator of the NN correlation function.

+

Options are (D = data catalog, R = random catalog)

+
    +
  • ‘compensated’ is the now-normal Landy-Szalay statistic: xi = (DD-2DR+RR)/RR, or for cross-correlations, xi = (DD-DR-RD+RR)/RR

  • +
  • ‘simple’ is the older version: xi = (DD/RR - 1)

  • +
+
+
ng_file_name
+

(str) The output filename for count-shear correlation function.

+

This is the count-shear correlation function, often called galaxy-galaxy +lensing.

+

The output columns are:

+
    +
  • R_nom = The center of the bin

  • +
  • meanR = The mean separation of the points that went into the bin.

  • +
  • meanlogR = The mean log(R) of the points that went into the bin.

  • +
  • gamT = The mean tangential shear with respect to the point in question.

  • +
  • gamX = The shear component 45 degrees from the tangential direction.

  • +
  • sigma = The 1-sigma error bar for gamT and gamX.

  • +
  • weight = The total weight of the pairs in each bin.

  • +
  • npairs = The total number of pairs in each bin.

  • +
+
+
ng_statistic
+

(str, default=’compensated’ if rand_files is given, otherwise ‘simple’) Which statistic to use for the mean shear as the estimator of the NG correlation function.

+

Options are:

+
    +
  • ‘compensated’ is simiar to the Landy-Szalay statistic: +Define:

    +
      +
    • NG = Sum(gamma around data points)

    • +
    • RG = Sum(gamma around random points), scaled to be equivalent in effective number as the number of pairs in NG.

    • +
    • npairs = number of pairs in NG.

    • +
    +

    Then this statistic is gamT = (NG-RG)/npairs

    +
  • +
  • ‘simple’ is the normal version: gamT = NG/npairs

  • +
+
+
gg_file_name
+

(str) The output filename for shear-shear correlation function.

+

This is the shear-shear correlation function, used for cosmic shear.

+

The output columns are:

+
    +
  • R_nom = The center of the bin

  • +
  • meanR = The mean separation of the points that went into the bin.

  • +
  • meanlogR = The mean log(R) of the points that went into the bin.

  • +
  • xip = <g1 g1 + g2 g2> where g1 and g2 are measured with respect to the line joining the two galaxies.

  • +
  • xim = <g1 g1 - g2 g2> where g1 and g2 are measured with respect to the line joining the two galaxies.

  • +
  • xip_im = <g2 g1 - g1 g2>.

    +
    +

    In the formulation of xi+ using complex numbers, this is the imaginary component. +It should normally be consistent with zero, especially for an +auto-correlation, because if every pair were counted twice to +get each galaxy in both positions, then this would come out +exactly zero.

    +
    +
  • +
  • xim_im = <g2 g1 + g1 g2>.

    +
    +

    In the formulation of xi- using complex +numbers, this is the imaginary component. +It should be consistent with zero for parity invariant shear +fields.

    +
    +
  • +
  • sigma_xi = The 1-sigma error bar for xi+ and xi-.

  • +
  • weight = The total weight of the pairs in each bin.

  • +
  • npairs = The total number of pairs in each bin.

  • +
+
+
nk_file_name
+

(str) The output filename for count-kappa correlation function.

+

This is nominally the kappa version of the ne calculation. However, k is +really any scalar quantity, so it can be used for temperature, size, etc.

+

The output columns are:

+
    +
  • R_nom = The center of the bin

  • +
  • meanR = The mean separation of the points that went into the bin.

  • +
  • meanlogR = The mean log(R) of the points that went into the bin.

  • +
  • kappa = The mean kappa this distance from the foreground points.

  • +
  • sigma = The 1-sigma error bar for <kappa>.

  • +
  • weight = The total weight of the pairs in each bin.

  • +
  • npairs = The total number of pairs in each bin.

  • +
+
+
nk_statistic
+

(str, default=’compensated’ if rand_files is given, otherwise ‘simple’) Which statistic to use for the mean shear as the estimator of the NK correlation function.

+

Options are:

+
    +
  • ‘compensated’ is simiar to the Landy-Szalay statistic: +Define:

    +
      +
    • NK = Sum(kappa around data points)

    • +
    • RK = Sum(kappa around random points), scaled to be equivalent in effective number as the number of pairs in NK.

    • +
    • npairs = number of pairs in NK.

    • +
    +

    Then this statistic is <kappa> = (NK-RK)/npairs

    +
  • +
  • ‘simple’ is the normal version: <kappa> = NK/npairs

  • +
+
+
kk_file_name
+

(str) The output filename for kappa-kappa correlation function.

+

This is the kappa-kappa correlation function. However, k is really any +scalar quantity, so it can be used for temperature, size, etc.

+

The output columns are:

+
    +
  • R_nom = The center of the bin

  • +
  • meanR = The mean separation of the points that went into the bin.

  • +
  • meanlogR = The mean log(R) of the points that went into the bin.

  • +
  • xi = The correlation function <k k>

  • +
  • sigma_xi = The 1-sigma error bar for xi.

  • +
  • weight = The total weight of the pairs in each bin.

  • +
  • npairs = The total number of pairs in each bin.

  • +
+
+
kg_file_name
+

(str) The output filename for kappa-shear correlation function.

+

This is the kappa-shear correlation function. Essentially, this is just +galaxy-galaxy lensing, weighting the tangential shears by the foreground +kappa values.

+

The output columns are:

+
    +
  • R_nom = The center of the bin

  • +
  • meanR = The mean separation of the points that went into the bin.

  • +
  • meanlogR = The mean log(R) of the points that went into the bin.

  • +
  • kgamT = The kappa-weighted mean tangential shear.

  • +
  • kgamX = The kappa-weighted shear component 45 degrees from the tangential direction.

  • +
  • sigma = The 1-sigma error bar for kgamT and kgamX.

  • +
  • weight = The total weight of the pairs in each bin.

  • +
  • npairs = The total number of pairs in each bin.

  • +
+
+
nnn_file_name
+

(str) The output filename for count-count-count correlation function.

+

This is three-point correlation function of number counts.

+

The output columns are:

+
    +
  • R_nom = The center of the bin in R = d2 where d1 > d2 > d3

  • +
  • u_nom = The center of the bin in u = d3/d2

  • +
  • v_nom = The center of the bin in v = +-(d1-d2)/d3

  • +
  • meand1 = The mean value of d1 for the triangles in each bin

  • +
  • meanlogd1 = The mean value of log(d1) for the triangles in each bin

  • +
  • meand2 = The mean value of d2 for the triangles in each bin

  • +
  • meanlogd2 = The mean value of log(d2) for the triangles in each bin

  • +
  • meand3 = The mean value of d3 for the triangles in each bin

  • +
  • meanlogd3 = The mean value of log(d3) for the triangles in each bin

  • +
  • zeta = The correlation function.

  • +
  • sigma_zeta = The 1-sigma error bar for zeta.

  • +
  • DDD, RRR = The raw numbers of triangles for the data and randoms

  • +
  • DDR, DRD, RDD, DRR, RDR, RRD (if nn_statistic=compensated) = The cross terms between data and random.

  • +
+
+
nnn_statistic
+

(str, default=’compensated’) Which statistic to use for xi as the estimator of the NNN correlation function.

+

Options are:

+
    +
  • ‘compensated’ is the Szapudi & Szalay (1998) estimator: +zeta = (DDD-DDR-DRD-RDD+DRR+RDR+RRD-RRR)/RRR

  • +
  • ‘simple’ is the older version: zeta = (DDD/RRR - 1), although this is not actually +an estimator of zeta. Rather, it estimates zeta(d1,d2,d3) + xi(d1) + xi(d2) + xi(d3).

  • +
+
+
ggg_file_name
+

(str) The output filename for shear-shear-shear correlation function.

+

This is the shear three-point correlation function. We use the “natural components” +as suggested by Schenider & Lombardi (2003): Gamma_0, Gamma_1, Gamma_2, Gamma_3. +All are complex-valued functions of (d1,d2,d3). The offer several options for the projection +direction. We choose to use the triangle centroid as the reference point.

+

The output columns are:

+
    +
  • R_nom = The center of the bin in R = d2 where d1 > d2 > d3

  • +
  • u_nom = The center of the bin in u = d3/d2

  • +
  • v_nom = The center of the bin in v = +-(d1-d2)/d3

  • +
  • meand1 = The mean value of d1 for the triangles in each bin

  • +
  • meanlogd1 = The mean value of log(d1) for the triangles in each bin

  • +
  • meand2 = The mean value of d2 for the triangles in each bin

  • +
  • meanlogd2 = The mean value of log(d2) for the triangles in each bin

  • +
  • meand3 = The mean value of d3 for the triangles in each bin

  • +
  • meanlogd3 = The mean value of log(d3) for the triangles in each bin

  • +
  • gam0r = The real part of Gamma_0.

  • +
  • gam0i = The imag part of Gamma_0.

  • +
  • gam1r = The real part of Gamma_1.

  • +
  • gam1i = The imag part of Gamma_1.

  • +
  • gam2r = The real part of Gamma_2.

  • +
  • gam2i = The imag part of Gamma_2.

  • +
  • gam3r = The real part of Gamma_3.

  • +
  • gam3i = The imag part of Gamma_3.

  • +
  • sigma_gam = The 1-sigma error bar for the Gamma values.

  • +
  • weight = The total weight of the triangles in each bin.

  • +
  • ntri = The total number of triangles in each bin.

  • +
+
+
kkk_file_name
+

(str) The output filename for kappa-kappa-kappa correlation function.

+

This is the three-point correlation function of a scalar field.

+

The output columns are:

+
    +
  • R_nom = The center of the bin in R = d2 where d1 > d2 > d3

  • +
  • u_nom = The center of the bin in u = d3/d2

  • +
  • v_nom = The center of the bin in v = +-(d1-d2)/d3

  • +
  • meand1 = The mean value of d1 for the triangles in each bin

  • +
  • meanlogd1 = The mean value of log(d1) for the triangles in each bin

  • +
  • meand2 = The mean value of d2 for the triangles in each bin

  • +
  • meanlogd2 = The mean value of log(d2) for the triangles in each bin

  • +
  • meand3 = The mean value of d3 for the triangles in each bin

  • +
  • meanlogd3 = The mean value of log(d3) for the triangles in each bin

  • +
  • zeta = The correlation function.

  • +
  • sigma_zeta = The 1-sigma error bar for zeta.

  • +
  • weight = The total weight of the triangles in each bin.

  • +
  • ntri = The total number of triangles in each bin.

  • +
+
+
precision
+

(int) The number of digits after the decimal in the output.

+

All output quantities are printed using scientific notation, so this sets +the number of digits output for all values. The default precision is 4. +So if you want more (or less) precise values, you can set this to something +else.

+
+
+
+
+

Derived output quantities

+

The rest of these output files are calculated based on one or more correlation +functions.

+
+
m2_file_name
+

(str) The output filename for the aperture mass statistics.

+

This file outputs the aperture mass variance and related quantities, +derived from the shear-shear correlation function.

+

The output columns are:

+
    +
  • R = The radius of the aperture. (Spaced the same way as R_nom is in the correlation function output files.

  • +
  • Mapsq = The E-mode aperture mass variance for each radius R.

  • +
  • Mxsq = The B-mode aperture mass variance.

  • +
  • MMxa, MMxb = Two semi-independent estimate for the E-B cross term. (Both should be consistent with zero for parity invariance shear fields.)

  • +
  • sig_map = The 1-sigma error bar for these values.

  • +
  • Gamsq = The variance of the top-hat weighted mean shear in apertures of the given radius R.

  • +
  • sig_gam = The 1-sigma error bar for Gamsq.

  • +
+
+
m2_uform
+

(str, default=’Crittenden’) The function form of the aperture

+

The form of the aperture mass statistic popularized by Schneider is

+
+

U = 9/Pi (1-r^2) (1/3-r^2) +Q = 6/Pi r^2 (1-r^2)

+
+

However, in many ways the form used by Crittenden:

+
+

U = 1/2Pi (1-r^2) exp(-r^2/2) +Q = 1/4Pi r^2 exp(-r^2/2)

+
+

is easier to use. For example, the skewness of the aperture mass +has a closed form solution in terms of the 3-point function for the +Crittenden form, but no such formula is known for the Schneider form.

+

The m2_uform parameter allows you to switch between the two forms, +at least for 2-point applications. (You will get an error if you +try to use ‘Schneider’ with the m3 output.)

+
+
nm_file_name
+

(str) The output filename for <N Map> and related values.

+

This file outputs the correlation of the aperture mass with the +aperture-smoothed density field, derived from the count-shear correlation +function.

+

The output columns are:

+
    +
  • R = The radius of the aperture. (Spaced the same way as R_nom is in the correlation function output files.

  • +
  • NMap = The E-mode aperture mass correlated with the density smoothed with the same aperture profile as the aperture mass statistic uses.

  • +
  • NMx = The corresponding B-mode statistic.

  • +
  • sig_nmap = The 1-sigma error bar for these values.

  • +
+
+
norm_file_name
+

(str) The output filename for <Nap Map>^2/<Nap^2><Map^2> and related values.

+

This file outputs the <Nap Map> values normalized by <Nap^2><Map^2>. This +provides an estimate of the correlation coefficient, r.

+

The output columns are:

+
    +
  • R = The radius of the aperture. (Spaced the same way as R_nom is in the correlation function output files.

  • +
  • NMap = The E-mode aperture mass correlated with the density smoothed with the same aperture profile as the aperture mass statistic uses.

  • +
  • NMx = The corresponding B-mode statistic.

  • +
  • sig_nmap = The 1-sigma error bar for these values.

  • +
  • Napsq = The variance of the aperture-weighted galaxy density.

  • +
  • sig_napsq = The 1-sigma error bar for <Nap^2>.

  • +
  • Mapsq = The aperture mass variance.

  • +
  • sig_mapsq = The 1-sigma error bar for <Map^2>.

  • +
  • NMap_norm = <Nap Map>^2 / (<Nap^2> <Map^2>)

  • +
  • sig_norm = The 1-sigma error bar for this value.

  • +
  • Nsq_Mapsq = <Nap^2> / <Map^2>

  • +
  • sig_nn_mm = The 1-sigma error bar for this value.

  • +
+
+
+
+
+

Miscellaneous parameters

+
+
verbose
+

(int, default=1) How verbose the code should be during processing.

+
    +
  • 0 = no output unless there is an error

  • +
  • 1 = output warnings

  • +
  • 2 = output progress information

  • +
  • 3 = output extra debugging lines

  • +
+

This is overridden by the -v command line argument for the corr2 executable.

+
+
log_file
+

(str, default=None) Where to write the logging information.

+

The default is to write lines to the screen, but this option allows you to +write them to a file instead. With the corr2 executable, this can also be +specified with the -l command line argument.

+
+
output_dots
+

(bool, default=(verbose>=2)) Whether to output progress dots during the +calculation of the correlation function.

+
+
split_method
+

(str, default=’mean’) Which method to use for splitting cells.

+

When building the tree, there are three obvious choices for how to split a set +of points into two chld cells. The direction is always taken to be the +coordinate direction with the largest extent. Then, in that direction, +you can split at the mean value, the median value, or the “middle” = +(xmin+xmax)/2. To select among these, split_method may be given as +“mean”, “median”, or “middle” respectively.

+

In addition, sometimes it may be useful to inject some randomness into the +tree construction to study how much the results depend on the specific splitting +used. For that purpose, there is also the option to set split_method = ‘random’, +which will choose a random point in the middle two quartiles of the range.

+
+
min_top
+

(int, default=3) The minimum number of top layers to use when setting up the field.

+

The OpenMP parallelization happens over the top level cells, so setting this > 0 +ensures that there will be multiple jobs to be run in parallel. For systems with +very many cores, it may be helpful to set this larger than the default value of 3.

+
+
max_top
+

(int, default=10) The maximum number of top layers to use when setting up the field.

+

The top-level cells are the cells where each calculation job starts. There will +typically be of order 2^max_top top-level cells.

+
+
num_threads
+

(int, default=0) How many (OpenMP) threads should be used.

+

The default is to try to determine the number of cpu cores your system has +and use that many threads.

+
+
+
+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/patches.html b/docs/_build/html/patches.html new file mode 100644 index 00000000..d03dbff8 --- /dev/null +++ b/docs/_build/html/patches.html @@ -0,0 +1,493 @@ + + + + + + Patches — TreeCorr 4.3.0 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

Patches

+

Normally, TreeCorr is used to compute the auto-correlation function +of data in a single input Catalog or the cross-correlation of data +from two Catalogs. +However, there are a number of reasons that it might make sense to +divide up a region into several smaller patches for computing the +correlation function:

+
    +
  1. To compute a more accurate covariance matrix. +There are a number of ways to compute more accurate covariance estimates +from the data than the default method. All of them require dividing +up the data into patches and doing different things with the +patch-to-patch correlations. See Covariance Estimates for details.

  2. +
  3. To save memory. +The entire data set might be too large to fit in memory, so you might +want to divide it up so less data is required to be in memory at a time. +See Reducing Memory Use below.

  4. +
  5. To split the job among multiple machines. +TreeCorr does a good job of utilizing many cores on a single machine +using OpenMP. However, for very large jobs, you may want to also +split the work among more than one node on a cluster. The most +effective way to do this is to split the data into patches. +See Using MPI below.

  6. +
  7. To run k-means on some data set for non-correlation reasons. +TreeCorr happens to have an extremely efficient implementation of the +k-means algorithm. So if you want to perform k-means clustering on +some data that can be represnted in a TreeCorr Catalog (i.e. +only 2 or 3 spatial dimensions), then using TreeCorr may be a +particularly efficient way to do the clustering. +See Running K-Means below.

  8. +
+

Below we describe how to split up an input Catalog into patches and +a few things you can do with it once you have done so.

+
+

Defining Patches on Input

+

The most straightforward way to define which object goes in which patch +is to just tell TreeCorr the patch number for each object explicitly.

+

If passing in numpy arrays for everything, then just pass in a patch +parameter with integer values indicating the patch number.

+

If reading in data from a file, then set a patch_col to use which +should have these values.

+

The next simplest way to define the patches is to tell TreeCorr how many +patches you want using npatch. +TreeCorr will then run the K-Means algorithm to split up the full area +into this many patches. +See Running K-Means below for more details.

+

Finally, to make sure multiple catalogs are using the same definition for +where patches are on the sky, you would probably want to have a single +set of patch centers and have all of your catalogs use that via +the patch_centers option. See Using Patch Centers below for details.

+
+
+

Running K-Means

+

One standard way to split up a set of objects into roughly equal area +patches is an algorithm called +k-means clustering.

+

The basic idea of the algorithm is to divide the points \(\vec x_j\) into +\(k\) patches, \(S_i\), such that the total “inertia” is minimized. +Inertia \(I_i\) of each patch is defined as follows:

+
+\[I_i = \sum_{j \in S_i} \left| \vec x_j - \vec \mu_i \right|^2,\]
+

where \(\vec \mu_i\) is the center of each patch:

+
+\[\vec \mu_i \equiv \frac{\sum_{j \in S_i} \vec x_j}{N_i},\]
+

and \(N_i\) is the number of points assigned to patch \(S_i\). +The k-means algorithm finds a solution that is a local minimum in the total inertia, +\(\sum_i I_i\), or equivalently the mean inertia \(\langle I_i \rangle\) +of all the patches.

+

This definition of inertia is a relatively good proxy for area on the +sky that has objects, so this algorithm is a good choice for dividing up a +catalog of astronomical objects into fairly uniform patches.

+

To use the TreeCorr implementation of k-means, simply +set the npatch parameter in the Catalog constructor to specifiy +how many patches you want TreeCorr to split the data into.

+
+

Note

+

If the input catalog has (ra, dec, r) positions, then the patches will +be made using only the ra,dec location on the sky, not the full 3-D +positions. This is usually what you want for making patches over an +astronomical survey area. If you really want to make patches according +to 3-D clustering of points, then you should input x,y,z values instead.

+
+

There are also two additional options which can affect how the k-means +algorithm runs:

+
    +
  • kmeans_init specifies what procedure to use for the initialization +of the patches. Options are:

    +
    +
      +
    • ‘random’ = Choose initial centers randomly from among the input points. +This is the traditional k-means initialization algorithm.

    • +
    • ‘kmeans++’ = Use k-means++, +an improved algorithm by Arthur and Vassilvitskii +with a provable upper bound for how close the final result will +be to the global minimum possible total inertia.

    • +
    • ‘tree’ = Use the upper layers of the TreeCorr ball tree to define +the initial centers. This is the default, and in practice, +it will almost always yield the best final patches. +(See Comparison with other implementations below.)

    • +
    +
    +
  • +
  • kmeans_alt specifies whether to use an alternate iteration algorithm +similar to k-means, which often produces somewhat more uniform patches.

    +

    This alternate algorithm specifically targets minimizing the standard deviation +of the inertia rather than the mean inertia, so it tends to lead to patches that +have a smaller final size variation than the regular k-means algorithm.

    +

    This is not the default algorithm because it is not provably (at least by +me) stable. It is possible that the iteration can get into a failure mode +where one patch will end up with zero objects. The regular k-means +provably cannot fail in this way.

    +

    So if you care especially about having very uniform patch sizes, you might +want to try this option, but be careful about inspecting the results that +they don’t look crazy.

    +
  • +
+

See also Field.run_kmeans, which has more information about these options, +where these parameters are called simply init and alt respectively.

+
+

Comparison with other implementations

+

Before implementing k-means in TreeCorr, I investigated what other options +there were in the Python landscape. I found the following implementations:

+ +

I made a notebook +comparing the different algorithms using a random million galaxies from the DES SV +(Dark Energy Survey, Science Verification) footprint, chosen because it is a +real-life use case that has some ratty edges to deal with, so it seemed like it would +provide a reasonable challenge without being crazy.

+

The ideal patches would be essentially uniform in size according to some measure of the +effective area of the patch. To make things simple, I just used the inertia as my +proxy for area, since that’s the thing that k-means algorithms natively work with.

+

However, we don’t really care about the total inertia being minimized. For most purposes +here, we really want the patches to be all close to the same size. So rather than +the total inertia, my metric for quality was the rms variation of the intertia +(aka the standard deviation).

+

Fortunately, the process of minimizing the total inertia does tend to select patches with +small rms variation as well, but it is worth noting that this is not directly targeted by the +normal k-means algorithm. And furthermore, the k-means algorithm almost never finds the true +global minimum inertia. The quality of the local minimum depends pretty strongly on the +choice of initial centers to seed the iterative part of the algorithm.

+

Comparing the results of the various k-means implementations, I found that they all tend +to be either fairly slow, taking a minute or more for just 1 million objects, or they have +very high rms variation in the inertia. +I reran each code multiple times using a different random million objects selected from the original +catalog (of around 16 million objects). Here is a scatter plot of the time vs rms variation +in the inertia for the various codes.

+https://user-images.githubusercontent.com/623887/57647337-ac6bd800-7590-11e9-80bc-900bda3bf66b.png +

Since there was no existing implementation I was particularly happy with, +I implemented it myself in TreeCorr. It turns out (not surprisingly) that the ball tree +data structure that TreeCorr uses for efficient calculation of correlation functions +also enables a very efficient implementation of the k-means iteration step. +Furthermore, the quality of the k-means result is pretty dependent +on the choice of the initial centers, and using the ball tree for the initialization turns +out to produce reliably better results than the initialization methods used by other packages.

+

The big red dots in the lower left corner are the TreeCorr implementation of the standard +k-means clustering algorithm. It typically takes about 1 or 2 seconds to classify these +1 million points into 40 patches, and the rms variation is usually less than any other +implementation.

+

The notebook also +includes plots of total inertia, variation in size according to the mean d^2 rather than +sum, and variation in the counts. The TreeCorr algorithm tends to be the best k-means +implementation according to any of these metrics.

+

In addition, you can see some slightly smaller orange dots, which have even lower rms +variation but take very slightly longer to run. These are the alternate algorithm I mentioned +above. This alternate algorithm is similar to k-means, but it penalizes patches with a +larger-than-average inertia, so they give up some of their outer points to patches with +smaller inertia. In other words, it explicitly targets making the rms variation as small as +possible. But in practice, it is not much worse in terms of total inertia either.

+

The alternate algorithm is available using alt=True in Field.run_kmeans. +I left this as a non-default option for two reasons. First, it’s not actually the real +k-means, so I didn’t want to confuse people who just want to use this for regular k-means +clustering. But second, I’m not completely sure that it is always stable. There is a free +parameter in the penalty function I chose, which I set to 3. Setting it to 4 gave even better +results (slightly), but at 5 the algorithm broke down with neighboring patches trading +escalating numbers of points between each other until one of them had no points left.

+

I couldn’t convince myself that 4 was actually a magic number and not just the particular +value for this data set. So 3 might be safe, or there might be data sets where that also +leads to this runaway trading failure mode. I know the regular k-means algorithm can’t get +into this mode, so it’s always safe. Therefore, I think it’s better to force the user to +intentionally select the alternate algorithm if they really care about having a low rms +size variation, with the normal algorithm being the backup if the alternate one fails for them.

+
+
+
+

Using Patch Centers

+

If you are doing a cross correlation, and you want to use patches for computing +a jackknife covariance for instance, you cannot +just set npatch in both and expect it to work properly. The two catalogs +would end up with patches arranged very differently on the sky. Patch 2 +for one catalog would not be in the same place as patch 2 in the other one. +Thus, the jackknife calculation would be messed up.

+

Instead, you should define the patches using one of the two (or more) +catalogs you want to work with, +and then use its patch centers attribute as the patch_centers +parameter when building the other catalog(s):

+
>>> cat1 = treecorr.Catalog(cat_file1, config1, npatch=N)
+>>> cat2 = treecorr.Catalog(cat_file2, config2, patch_centers=cat1.patch_centers)
+
+
+

You can also save the patches to a file using Catalog.write_patch_centers +and use that file name as the patch_centers parameter:

+
>>> cat1 = treecorr.Catalog(cat_file1, config1, npatch=N)
+>>> cat1.write_patch_centers(cen_file)
+>>> cat2 = treecorr.Catalog(cat_file2, config2, patch_centers=cen_file)
+
+
+

With either method, cat2 will have patches assigned according to which patch +center each object is closest to.

+
+
+

Reducing Memory Use

+

One reason you might want to use patches is if the full Catalog doesn’t fit +in memory. (Or possibly by itself it fits, but when performing the correlation function, +the additional memory from building the tree overflows the memory.) +Then you can potentially perform the calculation over patches +with less data loaded into memory at any given time. +The overall procedure for doing this is as follows:

+
    +
  1. First define your patch centers using some smaller Catalog, which +does fit in memory. This could be a catalog over the same survey +geometry, which is intrinsically sparser (say a catalog of red sequence +galaxies or clusters or even stars). Or it could be the large catalog +you want to use, but sampled using the every_nth option to read +in only a fraction of the rows. Run k-means on the smaller catalog +and write the patch_centers to a file, as describe above.

  2. +
  3. Set up a directory somewhere that TreeCorr can use as temporary +space for writing the individual patch files.

  4. +
  5. Define the full Catalog, specifying to use the above centers file for the +patch_centers and the temp directory as save_patch_dir.

  6. +
  7. Make sure not to do anything that requires the catalog be loaded from disk. +TreeCorr will delay doing the actual load until it needs to do so. +Here, we want to make sure it never loads the full data.

  8. +
  9. Run the process function (for whichever correlation +type you need) using the low_mem=True option.

  10. +
+

Here are some worked examples. First, an auto-correlation of a +single large shear catalog:

+
>>> small_cat = treecorr.Catalog(cat_file, config, every_nth=100, npatch=N)
+>>> small_cat.write_patch_centers(cen_file)
+>>> del small_cat
+>>> full_cat = treecorr.Catalog(cat_file, config, patch_centers=cen_file,
+...                             save_patch_dir=tmp_dir)
+>>> gg = treecorr.GGCorrelation(ggconfig)
+>>> gg.process(full_cat, low_mem=True)
+
+
+

Second, a cross-correlation, where the lens catalog is small enough not to +be a problem, but the source catalog is too large to hold in memory:

+
>>> len_cat = treecorr.Catalog(lens_file, lens_config, npatch=N)
+>>> source_cat = treecorr.Catalog(source_file, source_config,
+...                               patch_centers=lens_cat.patch_centers,
+...                               save_patch_dir=tmp_dir)
+>>> ng = treecorr.NGCorrelation(ngconfig)
+>>> ng.process(lens_cat, source_cat, low_mem=True)
+
+
+

In both cases, the result should be equivalent to what you would get if you could +hold the catalogs fully in memory, but the peak memory will be much lower. +The downside is that this usage will generally take somewhat longer – +probably something like a factor of 2 for typical scenarios, but this of course +depends heavily on the nature of your calculation, how fast your disk I/O is +compared to your CPUs, and how many cores you are using.

+
+

Note

+

Technically, the save_patch_dir parameter is not required, but it is +recommended. The first time a given patch is loaded, it will find the right +rows in the full catalog and load the ones you need. If you give it a +directory, then it will write these data to disk, which will make subsequent +reads of that patch much faster.

+
+
+

Warning

+

One caveat with respect to the save_patch_dir parameter is that if there +are already files present in the directory with the right names, then it +will go ahead and use them, rather than make new patch files. This is usually +an efficiency gain, since repeated runs with the same data will already have +the right patch files present. However, if you use the same file name and +save directory for a different data set, or if you make new patches for the +same input file, then TreeCorr won’t notice.

+

To get TreeCorr to make new patch files, you can either manually delete +everything in the save directory before starting, or (easier) call:

+
>>> cat.write_patch_files()
+
+
+

which will overwrite any existing files that may be there with the same names.

+
+
+
+

Using MPI

+

Another use case that is enabled by using patches is +to divide up the work of calculating a correlation function +over multiple machines with MPI using mpi4py.

+

For this usage, the process functions take an optional comm +parameter. When running in an MPI job, you can pass in comm=MPI.COMM_WORLD, +and TreeCorr will divide up the work among however many nodes you are using. +The results will be sent back the the rank 0 node and combined to produce the +complete answer:

+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
# File name: run_with_mpi.py
+from mpi4py import MPI
+comm = MPI.COMM_WORLD
+rank = comm.Get_rank()
+
+# Define stuff
+fname = ...
+centers_file = ...
+config = ...
+ggconfig = ...
+
+# All machines read the catalog
+cat = treecorr.Catalog(fname, config, patch_centers=centers_file)
+
+# All machines define the same correlation object
+gg = treecorr.GGCorrelation(ggconfig)
+
+# Pass the comm object to the process function
+gg.process(cat, comm=comm)
+
+# rank 0 has the completed result.
+if rank == 0:
+    # Probably do something more interesting with this now...
+    print('xip = ',gg.xip)
+
+
+

You would then run this script using (e.g. with 4 processes):

+
$ mpiexec -n 4 python run_with_mpi.py
+
+
+

The file defining the patch centers should already be written to make sure +that each machine is using the same patch definitions. There is some level of +randomness in the k-means calculation, so if you use npatch=N, then each +machine may end up with different patch definitions, which would definitely +mess things up.

+

If you wanted to have it all run in a single script, you should have only +the rank 0 process define the patches. Then send cat.patch_centers to the +other ranks, who can build their catalogs using this. +But it’s probably easier to just precompute the centers and save them to a file +before starting the MPI run.

+

A more complete worked example is +available +in the TreeCorr devel directory.

+
+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/py-modindex.html b/docs/_build/html/py-modindex.html new file mode 100644 index 00000000..e7855b0c --- /dev/null +++ b/docs/_build/html/py-modindex.html @@ -0,0 +1,151 @@ + + + + + + Python Module Index — TreeCorr 4.3.0 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • »
  • +
  • Python Module Index
  • +
  • +
  • +
+
+
+
+
+ + +

Python Module Index

+ +
+ c | + t +
+ + + + + + + + + + + + + + + + + + + + + +
 
+ c
+ catalog +
+ config +
 
+ t
+ treecorr +
    + treecorr.catalog +
    + treecorr.config +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/scripts.html b/docs/_build/html/scripts.html new file mode 100644 index 00000000..dcaae7e4 --- /dev/null +++ b/docs/_build/html/scripts.html @@ -0,0 +1,567 @@ + + + + + + Using configuration files — TreeCorr 4.3.0 documentation + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

Using configuration files

+

Most of the TreeCorr classes can take a config parameter in lieu +of a set of keyword arguments. This is not necessarily incredibly +useful when driving the code from Python; however, it enables running +the code from some executable scripts, described below.

+

Specifically, the parameters defined in the configuration file are +loaded into a Python dict, which is passed to each of the classes +as needed. The advantage of this is that TreeCorr will only use the +parameters it actually needs when initializing each object. +Any additional parameters (e.g. those +that are relevant to a different class) are ignored.

+
+

The corr2 and corr3 executables

+

Along with the installed Python library, TreeCorr also includes +two executable scripts, called corr2 and corr3. +The scripts takes one required command-line argument, which +is the name of a configuration file:

+
corr2 config.yaml
+corr3 config.yaml
+
+
+

A sample configuration file is provided, called sample_config.yaml.

+

For the complete documentation about the allowed parameters, see:

+ +

YAML is the recommended format for the configuration file, but we +also allow JSON files if you prefer, or a legacy format, which is +like an .ini file, but without the section headings, consisting of +key = value lines. The three formats are normally distinguished +by their extensions (.yaml, .json, or .params respectively), but +you can also give the file type explicitly with the -f option. E.g.:

+
corr2 my_config_file.txt -f params
+
+
+

would specify that the configuration file my_config_file.txt uses +the legacy “params” format.

+

You can also specify parameters on the command line after the name of +the configuration file. e.g.:

+
corr2 config.yaml file_name=file1.dat gg_file_name=file1.out
+corr2 config.yaml file_name=file2.dat gg_file_name=file2.out
+...
+
+
+

This can be useful when running the program from a script for lots of input +files.

+
+
+

The corr2 function from python

+

The same functionality that you have from the corr2 executable is available in python via the +corr2 function:

+
import treecorr
+config = treecorr.read_config(config_file)
+config['file_name'] = 'catalog.dat'
+config['gg_file_name'] = 'gg.out'
+treecorr.corr2(config)
+
+
+
+
+treecorr.corr2(config, logger=None)[source]
+

Run the full two-point correlation function code based on the parameters in the +given config dict.

+

The function print_corr2_params will output information about the valid parameters +that are expected to be in the config dict.

+

Optionally a logger parameter maybe given, in which case it is used for logging. +If not given, the logging will be based on the verbose and log_file parameters.

+
+
Parameters
+
    +
  • config – The configuration dict which defines what to do.

  • +
  • logger – If desired, a logger object for logging. (default: None, in which case +one will be built according to the config dict’s verbose level.)

  • +
+
+
+
+ +
+
+

The corr3 function from python

+
+
+treecorr.corr3(config, logger=None)[source]
+

Run the full three-point correlation function code based on the parameters in the +given config dict.

+

The function print_corr3_params will output information about the valid parameters +that are expected to be in the config dict.

+

Optionally a logger parameter maybe given, in which case it is used for logging. +If not given, the logging will be based on the verbose and log_file parameters.

+
+
Parameters
+
    +
  • config – The configuration dict which defines what to do.

  • +
  • logger – If desired, a logger object for logging. (default: None, in which case +one will be built according to the config dict’s verbose level.)

  • +
+
+
+
+ +
+ +
+

Utilities related to the configuration dict

+
+
+treecorr.config.check_config(config, params, aliases=None, logger=None)[source]
+

Check (and update) a config dict to conform to the given parameter rules. +The params dict has an entry for each valid config parameter whose value is a tuple +with the following items:

+
    +
  • type

  • +
  • can be a list?

  • +
  • default value

  • +
  • valid values

  • +
  • description (Multiple entries here are allowed for longer strings)

  • +
+

The file corr2.py has a list of parameters for the corr2 program.

+
+
Parameters
+
    +
  • config – The config dict to check.

  • +
  • params – A dict of valid parameters with information about each one.

  • +
  • aliases – A dict of deprecated parameters that are still aliases for new names. +(default: None)

  • +
  • logger – If desired, a logger object for logging any warnings here. (default: None)

  • +
+
+
Returns
+

The updated config dict.

+
+
+
+ +
+
+treecorr.config.convert(value, value_type, key)[source]
+

Convert the given value to the given type.

+

The key helps determine what kind of conversion should be performed. +Specifically if ‘unit’ is in the key value, then a unit conversion is done. +Otherwise, it just parses the value according to the value_type.

+
+
Parameters
+
    +
  • value – The input value to be converted. Usually a string.

  • +
  • value_type – The type to convert to.

  • +
  • key – The key for this value. Only used to see if it includes ‘unit’.

  • +
+
+
Returns
+

The converted value.

+
+
+
+ +
+
+treecorr.config.get(config, key, value_type=<class 'str'>, default=None)[source]
+

A helper function to get a key from config converting to a particular type

+
+
Parameters
+
    +
  • config – The configuration dict from which to get the key value.

  • +
  • key – Which key to get from config.

  • +
  • value_type – Which type should the value be converted to. (default: str)

  • +
  • default – What value should be used if the key is not in the config dict, +or the value corresponding to the key is None. +(default: None)

  • +
+
+
Returns
+

The specified value, converted as needed.

+
+
+
+ +
+
+treecorr.config.get_from_list(config, key, num, value_type=<class 'str'>, default=None)[source]
+

A helper function to get a key from config that is allowed to be a list

+

Some of the config values are allowed to be lists of values, in which case we take the +num item from the list. If they are not a list, then the given value is used for +all values of num.

+
+
Parameters
+
    +
  • config – The configuration dict from which to get the key value.

  • +
  • key – What key to get from config.

  • +
  • num – Which number element to use if the item is a list.

  • +
  • value_type – What type should the value be converted to. (default: str)

  • +
  • default – What value should be used if the key is not in the config dict, +or the value corresponding to the key is None. +(default: None)

  • +
+
+
Returns
+

The specified value, converted as needed.

+
+
+
+ +
+
+treecorr.config.merge_config(config, kwargs, valid_params, aliases=None)[source]
+

Merge in the values from kwargs into config.

+

If either of these is None, then the other one is returned. +If they are both dicts, then the values in kwargs take precedence over ones in config +if there are any keys that are in both. Also, the kwargs dict will be modified in this case.

+
+
Parameters
+
    +
  • config – The root config (will not be modified)

  • +
  • kwargs – A second dict with more or updated values

  • +
  • valid_params – A dict of valid parameters that are allowed for this usage. +The config dict is allowed to have extra items, but kwargs is not.

  • +
  • aliases – An optional dict of aliases. (default: None)

  • +
+
+
Returns
+

The merged dict, including only items that are in valid_params.

+
+
+
+ +
+
+treecorr.config.parse(value, value_type, name)[source]
+

Parse the input value as the given type.

+
+
Parameters
+
    +
  • value – The value to parse.

  • +
  • value_type – The type expected for this.

  • +
  • name – The name of this value. Only used for error reporting.

  • +
+
+
Returns
+

value

+
+
+
+ +
+
+treecorr.config.parse_bool(value)[source]
+

Parse a value as a boolean.

+

Valid string values for True are: ‘true’, ‘yes’, ‘t’, ‘y’ +Valid string values for False are: ‘false’, ‘no’, ‘f’, ‘n’, ‘none’ +Capitalization is ignored.

+

If value is a number, it is converted to a bool in the usual way.

+
+
Parameters
+

value – The value to parse.

+
+
Returns
+

The value converted to a bool.

+
+
+
+ +
+
+treecorr.config.parse_unit(value)[source]
+

Parse the input value as a string that should be one of the valid angle units in +coord.AngleUnit.valid_names.

+

The value is allowed to merely start with one of the unit names. So ‘deg’, ‘degree’, +‘degrees’ all convert to ‘deg’ which is the name in coord.AngleUnit.valid_names. +The return value in this case would be coord.AngleUnit.from_name(‘deg’).value, +which has the value pi/180.

+
+
Parameters
+

value – The unit as a string value to parse.

+
+
Returns
+

The given unit in radians.

+
+
+
+ +
+
+treecorr.config.parse_variable(config, v)[source]
+

Parse a configuration variable from a string that should look like ‘key = value’ +and write that value to config[key].

+
+
Parameters
+
    +
  • config – The configuration dict to wich to write the key,value pair

  • +
  • v – A string of the form ‘key = value’

  • +
+
+
+
+ +
+
+treecorr.config.print_params(params)[source]
+

Print the information about the valid parameters, given by the given params dict. +See check_config for the structure of the params dict.

+
+
Parameters
+

params – A dict of valid parameters with information about each one.

+
+
+
+ +
+
+treecorr.config.read_config(file_name, file_type='auto')[source]
+

Read a configuration dict from a file.

+
+
Parameters
+
    +
  • file_name – The file name from which the configuration dict should be read.

  • +
  • file_type – The type of config file. Options are ‘auto’, ‘yaml’, ‘json’, ‘params’. +(default: ‘auto’, which tries to determine the type from the extension)

  • +
+
+
Returns
+

A config dict built from the configuration file.

+
+
+
+ +
+
+treecorr.config.setup_logger(verbose, log_file=None)[source]
+

Parse the integer verbosity level from the command line args into a logging_level string

+
+
Parameters
+
    +
  • verbose – An integer indicating what verbosity level to use.

  • +
  • log_file – If given, a file name to which to write the logging output. +If omitted or None, then output to stdout.

  • +
+
+
Returns
+

The logging.Logger object to use.

+
+
+
+ +
+
+

File Writers

+
+
+class treecorr.writer.FitsWriter(file_name, *, logger=None)[source]
+

Writer interface for FITS files.

+
+
+write(col_names, columns, *, params=None, ext=None)[source]
+

Write some columns to an output ASCII file with the given column names.

+

If name is not None, then it is used as the name of the extension for these data.

+
+
Parameters
+
    +
  • col_names – A list of columns names for the given columns. These will be written +in a header comment line at the top of the output file.

  • +
  • columns – A list of numpy arrays with the data to write.

  • +
  • params – A dict of extra parameters to write at the top of the output file.

  • +
  • ext – Optional ext name for these data. (default: None)

  • +
+
+
+
+ +
+ +
+
+class treecorr.writer.HdfWriter(file_name, *, logger=None)[source]
+

Writer interface for HDF5 files. +Uses h5py to read columns, etc.

+
+
+write(col_names, columns, *, params=None, ext=None)[source]
+

Write some columns to an output ASCII file with the given column names.

+

If name is not None, then it is used as the name of the extension for these data.

+
+
Parameters
+
    +
  • col_names – A list of columns names for the given columns. These will be written +in a header comment line at the top of the output file.

  • +
  • columns – A list of numpy arrays with the data to write.

  • +
  • params – A dict of extra parameters to write at the top of the output file.

  • +
  • ext – Optional group name for these data. (default: None)

  • +
+
+
+
+ +
+ +
+
+class treecorr.writer.AsciiWriter(file_name, *, precision=4, logger=None)[source]
+

Write data to an ASCII (text) file.

+
+
+write(col_names, columns, *, params=None, ext=None)[source]
+

Write some columns to an output ASCII file with the given column names.

+
+
Parameters
+
    +
  • col_names – A list of columns names for the given columns. These will be written +in a header comment line at the top of the output file.

  • +
  • columns – A list of numpy arrays with the data to write.

  • +
  • params – A dict of extra parameters to write at the top of the output file.

  • +
  • ext – Optional ext name for these data. (default: None)

  • +
+
+
+
+ +
+ +
+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_build/html/search.html b/docs/_build/html/search.html new file mode 100644 index 00000000..b95c1b0d --- /dev/null +++ b/docs/_build/html/search.html @@ -0,0 +1,127 @@ + + + + + + Search — TreeCorr 4.3.0 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • »
  • +
  • Search
  • +
  • +
  • +
+
+
+
+
+ + + + +
+ +
+ +
+
+ +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/docs/_build/html/searchindex.js b/docs/_build/html/searchindex.js new file mode 100644 index 00000000..9859e4fb --- /dev/null +++ b/docs/_build/html/searchindex.js @@ -0,0 +1 @@ +Search.setIndex({docnames:["binning","catalog","changes","correlation2","correlation3","cov","field","gg","ggg","guide","history","index","kg","kk","kkk","metric","ng","nk","nn","nnn","overview","params","patches","scripts"],envversion:{"sphinx.domains.c":2,"sphinx.domains.changeset":1,"sphinx.domains.citation":1,"sphinx.domains.cpp":3,"sphinx.domains.index":1,"sphinx.domains.javascript":2,"sphinx.domains.math":2,"sphinx.domains.python":2,"sphinx.domains.rst":2,"sphinx.domains.std":2,"sphinx.ext.viewcode":1,sphinx:56},filenames:["binning.rst","catalog.rst","changes.rst","correlation2.rst","correlation3.rst","cov.rst","field.rst","gg.rst","ggg.rst","guide.rst","history.rst","index.rst","kg.rst","kk.rst","kkk.rst","metric.rst","ng.rst","nk.rst","nn.rst","nnn.rst","overview.rst","params.rst","patches.rst","scripts.rst"],objects:{"":{catalog:[1,0,0,"-"],config:[23,0,0,"-"]},"treecorr.BinnedCorr2":{build_cov_design_matrix:[3,2,1,""],clear:[3,2,1,""],estimate_cov:[3,2,1,""],getStat:[3,2,1,""],getWeight:[3,2,1,""],nonzero:[3,2,1,""],sample_pairs:[3,2,1,""]},"treecorr.BinnedCorr3":{build_cov_design_matrix:[4,2,1,""],clear:[4,2,1,""],estimate_cov:[4,2,1,""],getStat:[4,2,1,""],getWeight:[4,2,1,""],nonzero:[4,2,1,""]},"treecorr.Catalog":{checkForNaN:[1,2,1,""],clear_cache:[1,2,1,""],copy:[1,2,1,""],getGField:[1,2,1,""],getGSimpleField:[1,2,1,""],getKField:[1,2,1,""],getKSimpleField:[1,2,1,""],getNField:[1,2,1,""],getNSimpleField:[1,2,1,""],get_patch_centers:[1,2,1,""],get_patch_file_names:[1,2,1,""],get_patches:[1,2,1,""],load:[1,2,1,""],makeArray:[1,2,1,""],read_patch_centers:[1,2,1,""],read_patches:[1,2,1,""],resize_cache:[1,2,1,""],select:[1,2,1,""],unload:[1,2,1,""],write:[1,2,1,""],write_patch_centers:[1,2,1,""],write_patches:[1,2,1,""]},"treecorr.Field":{cat:[6,2,1,""],count_near:[6,2,1,""],get_near:[6,2,1,""],kmeans_assign_patches:[6,2,1,""],kmeans_initialize_centers:[6,2,1,""],kmeans_refine_centers:[6,2,1,""],nTopLevelNodes:[6,2,1,""],run_kmeans:[6,2,1,""]},"treecorr.GGCorrelation":{__eq__:[7,2,1,""],__iadd__:[7,2,1,""],__init__:[7,2,1,""],__repr__:[7,2,1,""],calculateGamSq:[7,2,1,""],calculateMapSq:[7,2,1,""],copy:[7,2,1,""],finalize:[7,2,1,""],getStat:[7,2,1,""],getWeight:[7,2,1,""],process:[7,2,1,""],process_auto:[7,2,1,""],process_cross:[7,2,1,""],process_pairwise:[7,2,1,""],read:[7,2,1,""],write:[7,2,1,""],writeMapSq:[7,2,1,""]},"treecorr.GGGCorrelation":{__eq__:[8,2,1,""],__iadd__:[8,2,1,""],__init__:[8,2,1,""],__repr__:[8,2,1,""],calculateMap3:[8,2,1,""],copy:[8,2,1,""],finalize:[8,2,1,""],getStat:[8,2,1,""],getWeight:[8,2,1,""],process:[8,2,1,""],process_auto:[8,2,1,""],process_cross12:[8,2,1,""],process_cross:[8,2,1,""],read:[8,2,1,""],write:[8,2,1,""],writeMap3:[8,2,1,""]},"treecorr.GGGCrossCorrelation":{__eq__:[8,2,1,""],__iadd__:[8,2,1,""],__init__:[8,2,1,""],__repr__:[8,2,1,""],copy:[8,2,1,""],finalize:[8,2,1,""],getStat:[8,2,1,""],getWeight:[8,2,1,""],nonzero:[8,2,1,""],process:[8,2,1,""],process_cross12:[8,2,1,""],process_cross:[8,2,1,""],read:[8,2,1,""],write:[8,2,1,""]},"treecorr.KGCorrelation":{__eq__:[12,2,1,""],__iadd__:[12,2,1,""],__init__:[12,2,1,""],__repr__:[12,2,1,""],copy:[12,2,1,""],finalize:[12,2,1,""],process:[12,2,1,""],process_cross:[12,2,1,""],process_pairwise:[12,2,1,""],read:[12,2,1,""],write:[12,2,1,""]},"treecorr.KKCorrelation":{__eq__:[13,2,1,""],__iadd__:[13,2,1,""],__init__:[13,2,1,""],__repr__:[13,2,1,""],copy:[13,2,1,""],finalize:[13,2,1,""],process:[13,2,1,""],process_auto:[13,2,1,""],process_cross:[13,2,1,""],process_pairwise:[13,2,1,""],read:[13,2,1,""],write:[13,2,1,""]},"treecorr.KKKCorrelation":{__eq__:[14,2,1,""],__iadd__:[14,2,1,""],__init__:[14,2,1,""],__repr__:[14,2,1,""],copy:[14,2,1,""],finalize:[14,2,1,""],process:[14,2,1,""],process_auto:[14,2,1,""],process_cross12:[14,2,1,""],process_cross:[14,2,1,""],read:[14,2,1,""],write:[14,2,1,""]},"treecorr.KKKCrossCorrelation":{__eq__:[14,2,1,""],__iadd__:[14,2,1,""],__init__:[14,2,1,""],__repr__:[14,2,1,""],copy:[14,2,1,""],finalize:[14,2,1,""],getStat:[14,2,1,""],getWeight:[14,2,1,""],nonzero:[14,2,1,""],process:[14,2,1,""],process_cross12:[14,2,1,""],process_cross:[14,2,1,""],read:[14,2,1,""],write:[14,2,1,""]},"treecorr.NGCorrelation":{__eq__:[16,2,1,""],__iadd__:[16,2,1,""],__init__:[16,2,1,""],__repr__:[16,2,1,""],calculateNMap:[16,2,1,""],calculateXi:[16,2,1,""],copy:[16,2,1,""],finalize:[16,2,1,""],process:[16,2,1,""],process_cross:[16,2,1,""],process_pairwise:[16,2,1,""],read:[16,2,1,""],write:[16,2,1,""],writeNMap:[16,2,1,""],writeNorm:[16,2,1,""]},"treecorr.NKCorrelation":{__eq__:[17,2,1,""],__iadd__:[17,2,1,""],__init__:[17,2,1,""],__repr__:[17,2,1,""],calculateXi:[17,2,1,""],copy:[17,2,1,""],finalize:[17,2,1,""],process:[17,2,1,""],process_cross:[17,2,1,""],process_pairwise:[17,2,1,""],read:[17,2,1,""],write:[17,2,1,""]},"treecorr.NNCorrelation":{__eq__:[18,2,1,""],__iadd__:[18,2,1,""],__init__:[18,2,1,""],__repr__:[18,2,1,""],calculateNapSq:[18,2,1,""],calculateXi:[18,2,1,""],copy:[18,2,1,""],finalize:[18,2,1,""],getStat:[18,2,1,""],getWeight:[18,2,1,""],process:[18,2,1,""],process_auto:[18,2,1,""],process_cross:[18,2,1,""],process_pairwise:[18,2,1,""],read:[18,2,1,""],write:[18,2,1,""]},"treecorr.NNNCorrelation":{__eq__:[19,2,1,""],__iadd__:[19,2,1,""],__init__:[19,2,1,""],__repr__:[19,2,1,""],calculateZeta:[19,2,1,""],copy:[19,2,1,""],finalize:[19,2,1,""],getStat:[19,2,1,""],getWeight:[19,2,1,""],process:[19,2,1,""],process_auto:[19,2,1,""],process_cross12:[19,2,1,""],process_cross:[19,2,1,""],read:[19,2,1,""],write:[19,2,1,""]},"treecorr.NNNCrossCorrelation":{__eq__:[19,2,1,""],__iadd__:[19,2,1,""],__init__:[19,2,1,""],__repr__:[19,2,1,""],copy:[19,2,1,""],finalize:[19,2,1,""],getWeight:[19,2,1,""],nonzero:[19,2,1,""],process:[19,2,1,""],process_cross12:[19,2,1,""],process_cross:[19,2,1,""],read:[19,2,1,""],write:[19,2,1,""]},"treecorr.catalog":{isGColRequired:[1,3,1,""],isKColRequired:[1,3,1,""]},"treecorr.config":{check_config:[23,3,1,""],convert:[23,3,1,""],get:[23,3,1,""],get_from_list:[23,3,1,""],merge_config:[23,3,1,""],parse:[23,3,1,""],parse_bool:[23,3,1,""],parse_unit:[23,3,1,""],parse_variable:[23,3,1,""],print_params:[23,3,1,""],read_config:[23,3,1,""],setup_logger:[23,3,1,""]},"treecorr.corr2":{print_corr2_params:[23,3,1,""]},"treecorr.corr3":{print_corr3_params:[23,3,1,""]},"treecorr.reader":{AsciiReader:[1,1,1,""],FitsReader:[1,1,1,""],HdfReader:[1,1,1,""],PandasReader:[1,1,1,""],ParquetReader:[1,1,1,""]},"treecorr.reader.AsciiReader":{check_valid_ext:[1,2,1,""],names:[1,2,1,""],read:[1,2,1,""],read_data:[1,2,1,""],read_params:[1,2,1,""],row_count:[1,2,1,""]},"treecorr.reader.FitsReader":{check_valid_ext:[1,2,1,""],names:[1,2,1,""],read:[1,2,1,""],read_data:[1,2,1,""],read_params:[1,2,1,""],row_count:[1,2,1,""]},"treecorr.reader.HdfReader":{check_valid_ext:[1,2,1,""],names:[1,2,1,""],read:[1,2,1,""],read_data:[1,2,1,""],read_params:[1,2,1,""],row_count:[1,2,1,""]},"treecorr.reader.PandasReader":{read:[1,2,1,""]},"treecorr.reader.ParquetReader":{check_valid_ext:[1,2,1,""],names:[1,2,1,""],read:[1,2,1,""],row_count:[1,2,1,""]},"treecorr.writer":{AsciiWriter:[23,1,1,""],FitsWriter:[23,1,1,""],HdfWriter:[23,1,1,""]},"treecorr.writer.AsciiWriter":{write:[23,2,1,""]},"treecorr.writer.FitsWriter":{write:[23,2,1,""]},"treecorr.writer.HdfWriter":{write:[23,2,1,""]},treecorr:{BinnedCorr2:[3,1,1,""],BinnedCorr3:[4,1,1,""],Catalog:[1,1,1,""],Field:[6,1,1,""],GField:[6,1,1,""],GGCorrelation:[7,1,1,""],GGGCorrelation:[8,1,1,""],GGGCrossCorrelation:[8,1,1,""],GSimpleField:[6,1,1,""],KField:[6,1,1,""],KGCorrelation:[12,1,1,""],KKCorrelation:[13,1,1,""],KKKCorrelation:[14,1,1,""],KKKCrossCorrelation:[14,1,1,""],KSimpleField:[6,1,1,""],NField:[6,1,1,""],NGCorrelation:[16,1,1,""],NKCorrelation:[17,1,1,""],NNCorrelation:[18,1,1,""],NNNCorrelation:[19,1,1,""],NNNCrossCorrelation:[19,1,1,""],NSimpleField:[6,1,1,""],SimpleField:[6,1,1,""],build_multi_cov_design_matrix:[3,3,1,""],calculateVarG:[1,3,1,""],calculateVarK:[1,3,1,""],catalog:[1,0,0,"-"],config:[23,0,0,"-"],corr2:[23,3,1,""],corr3:[23,3,1,""],estimate_multi_cov:[3,3,1,""],read_catalogs:[1,3,1,""]}},objnames:{"0":["py","module","Python module"],"1":["py","class","Python class"],"2":["py","method","Python method"],"3":["py","function","Python function"]},objtypes:{"0":"py:module","1":"py:class","2":"py:method","3":"py:function"},terms:{"007":20,"0th":8,"100":[2,3,5,7,9,18,20,22],"1024":6,"120":[7,18],"128":[7,16,18],"129":2,"132":[2,7,18],"136":2,"138":2,"139":2,"141":2,"142":2,"143":2,"14s":16,"1508":20,"15s":[7,18],"16s":[7,18],"180":23,"196":16,"1994":[3,15],"1998":21,"1st":8,"200":6,"2002":[7,16,18],"2002apj":16,"2003":[8,20,21],"2004":[8,20],"2005":8,"2008":[3,4,5],"2008apj":[3,4,5],"2020":0,"2320":[7,18],"267":[3,15],"2dr":[9,18,21],"2nd":8,"2pi":21,"2pt":[5,19],"338":20,"352":[8,20],"389":[7,16,18],"397":[8,20],"3drr":19,"3pt":[5,8,14,19],"3rd":8,"3rdd":19,"40th":6,"431":8,"4pi":21,"500":[3,4,5],"568":[7,16,18],"577":16,"604h":16,"60th":6,"681":[3,4,5],"726l":[3,4,5],"729":[7,16,18],"74s":16,"754":[7,18],"809":[8,20],"818":8,"927":[3,15],"abstract":6,"boolean":23,"break":[9,15],"case":[0,1,3,4,5,6,7,8,9,12,13,14,15,16,17,18,19,20,21,22,23],"class":[0,3,4,6,7,8,11,12,13,14,16,17,18,19,23],"default":[0,1,3,4,5,6,7,8,9,12,13,14,15,16,17,18,19,21,22,23],"final":[1,3,4,5,6,7,8,9,12,13,14,15,16,17,18,19,22],"float":[1,3,4,6,7,8,12,13,14,16,21],"function":[0,1,2,5,6,7,8,11,12,13,14,15,16,17,18,19,20,22],"import":[1,15,20,22,23],"int":[0,1,3,4,6,7,8,12,13,14,16,17,18,19,21],"long":[0,1],"new":[1,3,6,11,20,22,23],"return":[1,3,4,6,7,8,12,13,14,16,17,18,19,23],"switch":[6,15,21],"true":[0,1,2,3,4,5,6,7,8,12,13,14,16,17,18,19,21,22,23],"try":[0,1,7,8,12,13,14,16,17,18,19,21,22],"var":1,"while":[0,12,13,14,17,21],Added:2,And:[0,3,4,5,20,22],But:[0,1,3,4,6,20,21,22],DES:[6,22],For:[0,1,3,4,5,6,7,8,9,12,13,14,15,16,17,18,19,20,21,22,23],Its:6,One:[0,1,5,19,22],PRs:2,That:15,The:[0,2,3,4,5,6,7,8,9,11,12,13,14,15,16,17,18,19,20,21,22],Then:[5,6,7,8,12,13,14,16,17,18,19,21,22],There:[0,1,3,4,5,6,8,9,15,19,21,22],These:[3,5,6,8,9,15,19,20,22,23],Use:[1,3,4,6,11,21],Used:1,Uses:[1,23],Using:[5,11],With:[6,21,22],__eq__:[7,8,12,13,14,16,17,18,19],__iadd__:[7,8,12,13,14,16,17,18,19],__init__:[7,8,12,13,14,16,17,18,19],__repr__:[7,8,12,13,14,16,17,18,19],_top:[3,4],abil:[2,4,15,19],abl:[0,1,2,9,20],about:[0,1,3,4,6,8,9,11,14,19,20,22,23],abov:[0,1,3,4,5,6,7,8,9,14,15,16,19,21,22],abs:[3,4,5,16],absolut:0,accept:0,access:[1,5,7,8,9,12,13,14,16,17],accommod:[0,8],accord:[0,1,3,4,5,6,7,8,12,13,14,16,17,18,19,21,22,23],account:[3,4,7,8,9,14,19,21],accumul:[0,1,3,4,7,8,11,12,13,14,16,17,18,19,21],accur:[19,22],accuraci:0,acoommod:3,across:[0,4,5,15],act:[3,4],actual:[0,1,3,4,5,6,7,8,12,13,14,15,16,17,18,20,21,22,23],add:[1,2,7,8,12,13,14,16,17,18,19,20],added:20,adding:[6,7,8,12,13,14,16,17,18,19],addit:[0,1,3,4,5,6,7,8,9,12,13,14,15,16,17,18,19,20,21,22,23],addition:[0,5],adjust:5,adsab:[3,4,5,16],advantag:23,advis:0,affect:[0,22],after:[0,1,2,5,7,8,9,12,13,14,16,17,18,19,20,21,23],afterward:[7,8,12,13,14,16,17,18,19],again:[1,6,9,20],ahead:22,aka:[1,6,8,14,19,22],algorithm:[0,1,3,4,5,6,20,21,22],alia:3,alias:23,all:[0,1,3,4,5,6,7,8,9,12,13,14,15,16,17,18,19,20,21,22,23],allow:[0,1,2,3,4,5,6,7,8,9,12,13,14,15,16,17,18,19,20,21,23],allow_xyz:[1,21],almost:[6,22],along:[3,4,8,9,15,18,19,21,23],alpha2000:1,alreadi:[1,3,4,6,19,20,21,22],also:[0,1,3,4,5,6,7,8,9,12,13,14,15,16,17,18,19,20,21,22,23],alt:[6,22],altern:[1,6,21,22],although:[0,1,3,4,8,9,20,21],alwai:[0,1,3,4,6,7,8,9,12,13,14,16,17,18,19,21,22],ambigu:[4,21],among:[0,5,9,21,22],amount:0,anaconda3:20,analog:[0,4,5,19],ancillari:[3,4,5],angl:[0,6,9,15,23],angleunit:23,angular:[0,1,3,4,6,15,21],ani:[0,1,3,4,5,6,7,8,9,12,13,14,15,17,18,19,20,21,22,23],anoth:[6,8,15,16,17,18,19,21,22],answer:22,anymor:9,anyth:[3,4,9,22],apertur:[3,5,7,8,16,18,21],api:11,apj:[7,16,18],appear:[5,8,14,19],appendix:0,appli:[0,1,7,8,12,13,14,15,16,17,18,19,21],applic:[0,3,4,8,15,21],appreci:0,approach:6,appropri:[0,1,3,4,7,8,9,12,13,14,16,17,18,19,21],approx:15,approxim:[0,20,21],arbitrari:[1,3,4,6],arc:[0,3,4,11],arcco:[7,16,18],arcmin:[0,1,3,4,9,20,21],arcsec:[0,1,3,4,21],arcsin:[7,15],area:[3,5,22],aren:[3,4,5],arg:[6,23],argument:[1,3,4,6,7,8,9,12,13,14,16,17,18,19,20,21,23],arithmet:[0,1,3,4],around:[0,16,17,20,21,22],arrai:[0,1,3,4,5,6,7,8,11,12,13,14,16,17,18,19,21,22,23],arrang:22,arthur:22,ascens:[1,6,9],ascii:[1,7,8,9,12,13,14,16,17,18,19,20,21,23],asciiread:1,asciiwrit:23,ascl:20,aspect:20,assign:[1,6,22],assum:[1,6,15,21],astron:[8,20],astronom:22,astronomi:9,astrophi:[8,20],astrophy:20,attribut:[0,1,5,7,8,9,12,13,14,15,16,17,18,19,22],auto:[3,4,5,7,8,11,13,14,16,18,19,20,21,22,23],automat:[1,7,8,9,12,13,14,16,17,18,19,20,21],avaiabl:20,avail:[0,6,9,16,17,18,20,22,23],averag:[0,1,3,4,6,22],avoid:6,awai:15,back:[1,2,22],background:15,backup:22,ball:[6,20,22],bao:0,bar:[5,21],base:[0,1,2,3,4,5,6,7,8,12,13,14,15,16,17,18,19,21,23],basic:[9,15,18,19,20,22],beat:9,becaus:[0,1,3,6,9,21,22],becom:[3,4,6,15],been:[0,1,3,4,5,6,15,18,19,21],befor:[1,2,3,8,14,19,22],begin:[7,8,12,13,14,16,17,18,19,21],behav:[3,4],behavior:[1,7,21],behind:15,being:[0,1,2,3,4,5,7,8,9,12,13,14,15,16,17,18,19,20,21,22],belong:0,below:[0,2,3,4,6,9,15,20,21,22,23],bernstein:[8,20],besid:[7,8,12,13,14,16,17,18,19],best:[6,21,22],bet:6,better:[5,6,7,9,12,13,16,17,18,19,21,22],between:[0,1,3,4,5,6,7,8,9,12,13,14,15,16,17,18,19,21,22],bia:[0,16],bias:9,big:22,bin:[1,3,4,5,7,8,11,12,13,14,15,16,17,18,19,20,23],bin_siz:[0,2,3,4,7,8,12,13,14,16,17,18,19,20,21],bin_slop:[3,4,11,21],bin_typ:[0,2,3,4,9,21],binari:21,binnedcorr2:[0,2,3,5,7,12,13,15,16,17,18],binnedcorr3:[0,4,5,8,14,15,19],binsiz:4,bit:[0,4,20,21],blow:1,bodi:15,bool:[1,3,4,6,7,8,12,13,14,16,17,18,19,21,23],bootstrap:[3,4,11],bot:15,both:[0,1,3,4,5,6,7,8,12,13,14,15,16,17,18,19,20,21,22,23],bother:1,bottom:0,bottom_edg:0,bound:22,boundari:[3,4,15],branch:20,bring:1,broke:22,brute:[1,3,4,6,11,20,21],bug:11,build:[1,3,4,5,6,11,21,22],build_cov_design_matrix:[2,3,4],build_multi_cov_design_matrix:[2,3],built:[1,3,4,7,8,12,13,14,16,17,18,19,23],cach:1,cal:8,calcaul:6,calcualt:[3,4],calcul:[0,1,2,3,4,5,6,7,8,9,11,12,13,14,15,16,17,18,19,20,22,23],calculategamsq:7,calculatemap3:8,calculatemapsq:[3,5,7,8,18],calculatenapsq:18,calculatenmap:16,calculatevarg:[1,9],calculatevark:1,calculatexi:[5,9,16,17,18],calculatezeta:[5,9,19],calibr:21,call:[1,3,5,7,8,9,12,13,14,15,16,17,18,19,20,21,22,23],can:[0,1,3,4,5,6,7,8,9,12,13,14,15,16,17,18,19,20,21,22,23],cancel:0,cannot:[1,3,4,5,6,22],capit:23,captur:[5,8,14,19],care:[0,3,6,15,22],cartesian:[6,15],cat1:[0,1,3,4,7,8,9,12,13,14,16,17,18,19,21,22],cat1a:5,cat1b:5,cat2:[0,3,4,7,8,9,12,13,14,16,17,18,19,21,22],cat2a:5,cat2b:5,cat3:[8,14,19],cat:[1,6,7,8,9,13,14,18,19,20,21,22],cat_fil:22,cat_file1:22,cat_file2:22,cat_list:1,cat_precis:1,catalog:[0,3,4,6,7,8,11,12,13,14,15,16,17,18,19,20,21,22,23],catlist:21,caus:20,caveat:22,cdot:15,celesti:9,celestialcoord:6,celestialcorod:6,cell:[0,1,3,4,6,21],cen_fil:22,cenroid:8,center:[0,1,5,6,7,8,9,11,12,13,14,16,17,18,19,21],centers_fil:22,centroid:[1,6,8,20,21],cffi:20,challeng:22,chang:[0,1,3,4,5,10,11,15,21],charact:21,character:4,check:[1,3,4,6,7,8,12,13,14,15,16,17,18,19,23],check_config:23,check_valid_ext:1,checkfornan:1,chld:21,choic:[0,6,8,9,21,22],choos:[6,8,21,22],chord:[4,15],chose:[6,22],chosen:22,circl:[3,4,15],clang:[1,3,4],classic:[5,6],classifi:22,clear:[1,3,4,7,8,12,13,14,16,17,18,19],clear_cach:1,click:20,clockwis:4,clone:20,close:[0,9,21,22],closer:[0,6],closest:[6,22],cluster:[6,22],cmb:[3,13,14,20],code:[0,1,2,6,8,20,21,22,23],coeffici:[16,21],col:1,col_nam:23,col_str:1,collect:[1,6],column:[1,7,8,9,12,13,14,16,17,18,19,21,23],com:20,combin:[0,3,4,8,9,14,16,19,20,22],come:[0,3,5,8,14,19,21],comm:[3,4,7,8,12,13,14,16,17,18,19,22],comm_world:22,comma:21,command:[1,2,6,7,8,12,13,14,16,17,18,19,20,21,23],comment:[1,20,21,23],comment_mark:[1,21],common:[3,4,15,20],commonli:[6,15,16],commun:[3,4,7,8,12,13,14,16,17,18,19],compar:[0,5,6,8,22],comparison:0,compat:[1,3,4],compens:[9,16,17,21],compil:[1,3,4],complet:[0,1,2,3,4,5,7,8,12,13,14,16,17,18,19,20,22,23],complex:[6,8,21],complic:[3,4,8,9,14,19],compon:[1,3,5,7,8,12,15,16,20,21],compris:6,comput:[0,1,2,3,4,5,6,7,8,9,12,13,14,16,17,18,19,20,22],concaten:[1,3,4,7,8,14],concentr:17,concern:0,concret:6,conda:20,condit:15,config1:[9,22],config2:[9,22],config:[1,3,4,5,7,8,9,12,13,14,16,17,18,19,22,23],config_fil:[20,23],configur:[1,2,3,4,7,8,9,11,12,13,14,16,17,18,19,20],confirm:0,conform:23,confus:22,conjug:8,conjunct:[1,3,4,21],connect:[3,9,15],conserv:[1,21],consid:[1,6,7,8,12,13,14,16,17,18,19],consider:6,consist:[1,15,21,23],constant:6,construct:[0,1,3,4,6,7,8,12,13,14,15,16,17,18,19,21],constructor:[1,3,4,5,6,7,8,12,13,14,16,17,18,19,22],contact:20,contain:[3,6,7,8,16,17,18,19],context:6,continu:0,contrariwis:15,contrast:[8,14,19],contribut:[1,5,7,8,12,13,14,16,17,19,21],control:[1,9,21],conveni:[0,1],convent:[1,9,15,21],converg:[1,6,20,21],convers:[15,23],convert:[1,7,8,12,13,14,15,16,17,18,19,23],convinc:22,coord:[1,6,20,23],coordin:[1,3,4,6,7,8,12,13,14,15,16,17,18,19,20,21],copi:[1,7,8,12,13,14,16,17,18,19],core:[1,3,4,7,8,12,13,14,16,17,18,19,20,21,22],corner:[8,14,19,22],corr2:[11,21],corr3:[11,21],corr:[1,2,3,4,5],correct:[0,15,19,20,21],correctli:[2,21],correl:[0,1,2,5,6,11,15,22,23],correspond:[0,1,3,4,5,6,7,8,9,12,13,16,17,18,19,21,23],correspong:1,corret:4,corrlet:8,corrollari:18,cosmic:[9,21],cosmolog:21,could:[1,2,3,4,5,6,9,20,21,22],couldn:22,count:[0,1,3,4,5,6,9,11,20,21,22],count_near:6,counter:4,cours:[0,1,5,6,9,15,21,22],cov:[3,5,7,12,13,16,17,18],cov_boot:5,cov_jk:5,covari:[0,2,3,4,6,7,8,11,12,13,16,17,18,19,22],cover:9,cpu:[1,3,4,6,7,8,12,13,14,16,17,18,19,21,22],crazi:22,creat:6,criterion:[0,6,21],crittenden:[3,7,8,16,18,21],cross:[0,1,3,4,5,7,8,9,12,13,14,16,17,18,19,20,21,22],cull:6,cumbersom:21,current:[1,2,3,4,6,7,8,14,15,18,19,20],curv:[20,21],cut:0,d_1:19,d_2:19,d_3:19,dark:22,dat:[1,9,20,21,23],data:[0,3,4,5,6,7,8,9,11,12,13,14,15,16,17,18,19,20,21,22,23],data_cat:[1,19],data_fil:9,daughter:6,dd_cov:5,dd_cov_b:5,ddd:[9,19,21],ddr:[9,19,21],deal:[9,22],debug:[1,3,4,21],dec:[0,1,4,6,9,15,20,21,22],dec_col:[1,9,20,21],dec_ext:[1,21],dec_unit:[1,6,9,20,21],decid:0,decim:21,declar:1,declin:[1,6,9],decomposit:[7,15],decreas:[0,21],defin:[0,1,3,4,5,6,8,11,15,20,21,23],definit:[3,4,5,6,7,8,15,16,18,22],defint:15,deg:[1,23],degre:[0,1,3,4,9,20,21,23],del:22,delai:[1,22],delet:[1,22],delimet:21,delimit:[1,21],delta2000:1,delta:[0,13,14],denomin:[5,18,19],dens:5,densiti:[9,18,19,21],dep:20,depend:[1,6,9,11,21,22],deprec:[2,3,6,7,12,13,16,17,18,23],depth:21,deriv:[3,4,11,16,23],describ:[5,6,7,8,12,13,14,15,16,17,18,19,20,21,22,23],descript:[1,7,8,12,13,14,16,17,18,19,21,23],design:[3,4],desir:[1,3,4,5,6,7,8,9,12,13,14,16,17,18,19,20,21,23],despit:6,detail:[3,4,7,8,9,12,13,14,16,17,18,19,20,21,22],determin:[1,6,7,8,12,13,14,16,17,18,19,21,23],devel:22,develop:[0,20],deviat:[6,22],devis:6,diagon:[3,4,5,8],dict:[1,3,4,7,8,9,11,12,13,14,16,17,18,19],dictionari:9,did:9,didn:22,differ:[0,1,3,4,5,6,8,9,14,15,18,19,20,21,22,23],differenct:[3,4],difficult:6,digit:[1,3,4,21],dimens:[0,6,22],dimension:[1,3,6,15,21],direct:[0,3,4,15,21],directli:[1,3,4,6,7,8,9,12,13,14,16,17,18,19,22],directori:[1,20,22],disadvantag:0,discuss:[20,21],disjoint:6,disk:[1,9,22],distanc:[0,1,3,4,6,7,8,12,13,14,15,16,17,18,19,21],distinct:0,distinguish:23,distribut:20,divid:[0,1,5,6,7,8,9,12,13,14,16,17,18,19,22],doc:[7,8,9,12,13,14,16,17,18,19],docstr:9,document:[3,4,7,8,9,12,13,14,16,17,18,19,20,23],doe:[0,1,3,4,5,6,7,8,9,12,13,14,15,16,17,18,21,22],doesn:[3,4,5,6,8,14,19,22],doing:[0,1,5,6,9,21,22],domin:5,don:[0,1,3,4,5,6,9,19,20,21,22],done:[5,6,16,17,18,19,20,21,22,23],dot:[3,4,5,21,22],down:[0,6,9,22],download:11,downsid:22,drd:[9,19,21],drive:23,drr:[9,19,21],dtype:1,due:[1,21],duplic:[3,4,7],dure:[1,3,4,7,8,12,13,14,16,17,18,19,21],dynam:0,each:[0,1,3,4,5,6,7,8,9,12,13,14,15,16,17,18,19,20,21,22,23],earli:0,earth:[0,1,3,15],easier:[1,21,22],easiest:20,edg:[0,3,6,9,22],edu:[3,4,5,16],effect:[0,3,4,9,21,22],effici:[1,6,8,20,22],either:[0,1,3,5,6,7,8,9,12,13,14,15,16,17,18,19,20,21,22,23],element:[1,7,8,12,13,14,16,17,18,19,21,23],els:[1,7,12,13,16,17,18,21],empir:6,enabl:[5,8,22,23],encapsul:1,end:[0,1,6,7,8,12,13,14,15,16,17,19,20,21,22],energi:22,enforc:4,enough:[5,6,22],ensur:21,enter:1,entir:[1,22],entri:[3,4,7,8,12,13,14,16,17,18,19,23],equal:[0,3,5,6,7,8,9,12,13,14,16,17,18,19,22],equat:8,equiv:[6,15,22],equival:[0,1,2,3,4,15,21,22],error:[0,1,3,4,6,20,21,23],escal:22,especi:[0,1,8,9,21,22],essenti:[0,20,21,22],estim:[1,2,3,4,6,7,8,9,11,12,13,14,16,17,18,19,21,22],estimate_cov:[3,4,5,18],estimate_multi_cov:[3,5,18],etc:[1,4,7,8,9,12,13,14,16,17,18,19,20,21,23],euclidean:[0,3,4,7,8,9,11,12,13,14,16,17,18,19,20,21],even:[0,5,6,8,9,22],evenli:6,everi:[0,1,3,15,21],every_nth:[1,21,22],everyon:9,everyth:[19,22],exact:[0,3],exactli:[0,3,4,21],exampl:[0,3,5,9,12,13,14,17,20,21,22],exce:21,except:[3,4,15],exclud:[0,3,4,5],execut:[11,20,21],exist:[1,6,22],exp:[7,8,12,13,14,16,17,18,19,21],expand:9,expect:[0,3,4,19,22,23],explan:[7,16],explicitli:[1,3,21,22,23],ext:[1,21,23],extens:[1,3,4,7,8,12,13,14,16,17,18,19,21,23],extent:[6,21],extra:[1,5,15,21,23],extrem:[20,22],fact:[3,5,12,13,14,17],factor:[6,21,22],fail:22,failur:[6,22],fairli:[0,6,8,22],fall:[0,3,5,8,14,19],fals:[0,1,3,4,6,7,8,12,13,14,16,17,18,19,21,23],far:[4,9],farm:9,farther:15,fast:[5,6,22],faster:[0,20,22],feasibl:0,featur:[3,4,11],fee:20,fell:[1,3,7,8,12,13,14,16,17,18,19],few:[0,3,4,5,6,15,22],ffi:20,field:[1,3,4,5,7,8,9,11,12,13,14,16,17,18,19,21,22],figur:0,file1:[20,21,23],file2:[20,21,23],file3:21,file:[2,3,4,6,7,8,9,11,12,13,14,16,17,18,19,20,22],file_list2:21,file_list3:21,file_list:[1,21],file_nam:[1,7,8,9,12,13,14,16,17,18,19,20,21,23],file_name1:9,file_name2:[1,9,21],file_name3:21,file_typ:[1,7,8,12,13,14,16,17,18,19,21,23],filenam:21,files_name2:21,fill:20,find:[0,3,5,6,9,20,22],fine:[0,9],finish:[0,7,8,9,12,13,14,16,17,18,19],first:[1,3,4,5,6,7,8,9,12,13,14,15,16,17,18,19,21,22],first_row:[1,21],fisher:[3,15],fisherrperp:[3,11],fit:[1,7,8,9,12,13,14,16,17,18,19,20,21,22,23],fitsio:[1,20],fitsread:1,fitswrit:23,fix:11,flag:[1,9,21],flag_col:[1,21],flag_ext:[1,21],flat:[0,1,3,4,6,7,8,9,12,13,14,15,16,17,18,19,21],flip:[1,9,21],flip_g1:[1,21],flip_g2:[1,21],fluctuat:[3,13,14],fluctut:20,fname:22,folder:20,follow:[0,1,3,4,5,6,7,8,9,12,13,14,15,16,17,18,19,20,22,23],footprint:22,forc:[0,1,3,4,6,20,21,22],foreground:[16,17,20,21],forg:20,form:[3,6,7,16,18,20,21,23],format:[21,23],former:0,formul:21,formula:[8,15,16,19,21],fortun:22,found:[0,3,4,6,20,22],four:[0,4],fourth:21,frac:[4,5,6,7,15,16,18,22],fraction:[0,6,21,22],free:[20,22],freebsd:20,from:[0,1,3,4,5,6,7,8,10,11,12,13,14,15,16,17,18,19,20,21,22],from_nam:23,front:20,full:[0,1,2,3,4,5,6,7,8,12,13,14,15,16,17,18,19,22,23],full_cat:22,fulli:22,func:[3,4,5],further:[1,6,9],furthermor:[0,1,9,21,22],futur:20,fuzzi:0,g1_col:[1,9,20,21],g1_ext:[1,21],g1_valu:9,g1g2g3:8,g1g3g2:8,g2_col:[1,9,20,21],g2_ext:[1,21],g2_valu:9,g2g1g3:8,g2g3g1:8,g3g1g2:8,g3g2g1:8,gain:22,galaxi:[9,15,16,17,20,21,22],gam0:[4,8],gam0i:[8,21],gam0r:[8,21],gam1:[4,8],gam1i:[8,21],gam1r:[8,21],gam2:[4,8],gam2i:[8,21],gam2r:[8,21],gam3:[4,8],gam3i:[8,21],gam3r:[8,21],gamma1:20,gamma2:20,gamma:[4,6,7,8,20,21],gamma_0:[8,21],gamma_1:[8,21],gamma_2:[8,21],gamma_3:[8,21],gamma_:[12,16],gamma_t:[9,12,16],gamsq:[7,21],gamsq_:7,gamsq_b:7,gamt:[16,21],gamx:[16,21],garbag:[1,6],gave:22,gener:[1,3,4,6,8,9,15,20,22],geometr:0,geometri:[1,6,8,9,20,22],get:[0,1,3,4,5,6,7,11,12,13,15,16,17,18,19,20,21,22,23],get_from_list:23,get_near:6,get_patch:1,get_patch_cent:1,get_patch_file_nam:1,get_rank:22,getgfield:[1,6],getgsimplefield:[1,6],getkfield:[1,6],getksimplefield:[1,6],getnfield:[1,6],getnsimplefield:[1,6],getstat:[3,4,7,8,14,18,19],getweight:[3,4,7,8,14,18,19],gfield:[1,6],gg_file_nam:[1,20,21,23],ggconfig:22,ggcorrel:[3,5,6,9,11,16,18,20,22],ggg:[4,8,20],ggg_file_nam:21,gggcorrel:[4,6,9,11],gggcrosscorrel:[4,8],git:20,github:20,give:[0,1,3,4,5,6,8,21,22,23],given:[0,1,3,4,5,6,7,8,9,12,13,14,15,16,17,18,19,21,22,23],global:22,gmail:20,goe:[0,21,22],going:[1,7,8,12,13,14,16,17,18,19,21],good:[0,3,4,6,9,22],great:[3,4,15],group:[1,21,23],gsimplefield:[1,6],guid:[11,20],h5py:[1,20,23],had:[7,8,12,13,14,16,17,18,19,22],hadn:2,half:[0,6],hand:[1,21],handl:[0,3,4,7,8,12,13,14,16,17,18,19],happen:[1,5,6,21,22],happi:22,harvard:[3,4,5,16],has:[0,1,3,4,5,6,9,18,19,21,22,23],hat:21,have:[0,1,3,4,5,6,7,8,9,12,13,14,15,16,17,18,19,20,21,22,23],hdf5:[1,20,21,23],hdf:[1,21],hdfreader:1,hdfwriter:23,hdu:21,head:23,header:[1,23],heart:9,heavili:22,help:[0,3,4,21,23],helper:[1,3,4,23],here:[1,3,4,5,7,8,9,12,13,14,16,17,18,19,20,21,22,23],hermitian:8,high:[0,6,22],higher:[0,6],highest:[3,4],highli:20,histogram:21,histori:11,hoekstra:16,hold:[1,6,7,8,12,13,14,16,17,18,19,22],honest:9,host:20,hour:[0,1,3,4,9,21],how:[0,1,3,4,6,7,8,9,12,13,14,15,16,17,18,19,21,22],howev:[0,1,3,4,5,6,8,9,15,19,21,22,23],html:20,http:[3,4,5,6,16,20],i_i:[6,22],idea:22,ideal:[5,6,22],ident:[8,14,19],identifi:[6,8,14,15,19],ignor:[1,3,4,7,8,12,13,14,16,17,18,19,21,23],ignore_flag:[1,21],imag:[1,7,8,9,12,16,21],imaginari:[3,7,12,16,21],impact:[0,15],implement:[0,4,5,6,7,9],impli:21,implicitli:[0,1,3],importatnt:15,imprecis:0,improv:[11,20,22],includ:[0,1,3,4,5,6,7,8,9,12,13,14,15,16,17,18,19,20,21,22,23],incorpor:9,incorrect:[3,4],increas:6,incred:23,inde:[0,6],independ:21,index:[0,1,11],indic:[0,1,3,6,8,21,22,23],indirectli:[1,21],individu:[0,1,5,15,22],induc:21,indx:1,inertia:[6,22],inform:[1,3,4,5,6,7,8,9,12,13,14,16,17,18,19,20,21,22,23],infti:[7,18],inher:0,ini:23,init:[1,6,21,22],initi:[1,6,7,8,12,13,14,16,17,18,19,22,23],inject:21,input:[0,3,4,5,7,8,9,11,12,13,14,16,17,18,19,20,23],input_cat:9,inspect:22,instal:[11,23],instanc:[0,1,3,4,5,6,7,8,12,13,14,16,17,18,19,20,22],instanti:6,instead:[0,1,6,7,9,12,13,16,17,18,21,22],insuffici:9,int_0:7,int_:[7,16,18],integ:[0,1,3,4,6,22,23],intend:[3,4],intention:22,interest:[0,3,4,5,9,20,22],interfac:[1,23],intern:0,interpret:9,intertia:22,intial:6,intrins:22,invalid:[0,1,15,21],invalu:0,invari:21,invers:6,investig:[0,15,22],involv:[1,3,4,5,6,9,18,20,21],is_rand:1,isgcolrequir:1,iskcolrequir:1,isn:1,issu:[0,2,6,7,12,13,16,17,18,20,21],item:[1,2,3,7,8,12,13,14,16,17,18,19,21,23],iter:[6,22],its:[1,5,6,20,21,22],itself:[5,6,22],jackknif:[1,3,4,6,11,21,22],jain:[8,20],jarvi:[8,20],job:[3,4,21,22],join:[0,8,21],joint:5,json:23,jupyt:[11,20],just:[1,3,4,5,6,9,15,19,21,22,23],k1k2k3:14,k1k3k2:14,k2k1k3:14,k2k3k1:14,k3k1k2:14,k3k2k1:14,k_2:8,k_3:8,k_col:[1,9,21],k_ext:[1,21],kappa:[1,3,4,6,9,11,20,21],keep:[0,1,3,4,5,6,8,14,19,21],keep_zero_weight:[1,21],kei:[1,23],kept:[8,14,19],keyword:[1,2,3,4,7,8,9,12,13,14,16,17,18,19,23],kfield:[1,6],kg_file_nam:21,kgamt:[12,21],kgamx:[12,21],kgcorrel:[3,6,9,11],kgg:4,kilbing:8,kind:[1,3,4,5,6,9,20,21,23],kk_file_nam:21,kkcorrel:[3,6,9,11],kkk:[14,20],kkk_file_nam:21,kkkcorrel:[4,6,9,11],kkkcrosscorrel:[4,14],kmean:[1,6,21,22],kmeans2:22,kmeans_alt:[1,21,22],kmeans_assign_patch:6,kmeans_init:[1,21,22],kmeans_initialize_cent:6,kmeans_radec:22,kmeans_refine_cent:6,know:[1,5,6,15,19,20,22],known:[6,8,21],ksimplefield:[1,6],kwarg:[1,3,4,6,7,8,12,13,14,16,17,18,19,23],l_x:15,l_y:15,l_z:15,label:6,lambda:[3,4,5],landi:[1,9,16,18,19,21],landscap:22,langl:[1,6,7,8,12,13,14,16,17,18,19,22],larg:[0,5,6,8,14,15,19,21,22],larger:[0,6,8,14,19,21,22],largest:[6,21],last:[0,1,5,9,20],last_row:[1,21],later:20,latest:20,latter:[1,6,20],law:[0,9],layer:[1,3,4,6,21,22],lead:[0,5,6,22],leaf:[0,3,4,21],least:[1,5,6,18,19,21,22],leav:[0,1,3,4,6,21],left:[0,6,7,15,16,18,22],left_edg:[0,3],leg:19,legaci:23,len:[3,15,16,22],len_cat:22,lenght:4,length:[1,3,4,7,8,12,13,14,16,17,18,19],lens:[3,6,15,16,17,20,21],lens_cat:[5,9,22],lens_config:22,lens_fil:[9,22],less:[4,5,6,19,21,22],let:[0,1,5,6,9,20,21],letter:[12,13,14,17],level:[0,1,3,4,6,7,8,12,13,14,16,17,18,19,21,22,23],lib:20,libffi:20,librari:[20,23],licens:20,lieu:[6,21,23],life:22,light:15,like:[0,1,3,4,6,9,19,20,21,22,23],likewis:[0,3,18,19],limit:[15,19,21],line:[0,1,3,4,5,8,11,20,21,23],linear:[3,9,11],linearli:[0,4],linux:20,list:[1,2,3,4,5,6,7,8,9,12,13,14,16,17,18,19,21,23],list_kei:1,liter:6,littl:[0,7,8,12,13,14,16,17,18,19],load:[1,11,22,23],local:[0,6,20,22],locat:[0,6,8,16,17,19,22],log:[1,3,4,7,8,9,11,12,13,14,15,16,17,18,19,20,21,23],log_2:[1,3,4,6],log_fil:[1,3,4,21,23],logarithm:[0,3,7,12,13,16,17,18,19],logger:[1,3,4,6,7,8,12,13,14,16,17,18,19,23],logging_level:23,logic:1,logr1d:[8,14,19],logr:[0,1,7,8,9,12,13,14,16,17,18,19],logruv:4,loh:[3,4,5],lombardi:[8,20,21],longer:[2,20,22,23],longest:[8,14,19],look:[20,22,23],loss:[7,8,12,13,14,16,17,18,19],lot:[20,23],low:22,low_mem:[1,7,8,12,13,14,16,17,18,19,22],lower:[0,6,22],lowest:6,lru:1,lss:9,lsstdesc:20,m2_file_nam:21,m2_uform:[3,7,8,16,18,21],machin:[2,9,20,22],maco:2,made:[1,5,20,21,22],magic:22,magnitud:[6,20],mai:[0,1,3,4,5,6,7,8,9,12,13,14,15,16,17,18,19,20,21,22,23],main:[0,1,9],make:[0,1,3,4,5,6,7,8,9,12,13,14,16,17,18,19,20,21,22],makearrai:1,mani:[0,1,2,3,4,5,6,7,8,9,12,13,14,16,17,18,19,20,21,22],manner:5,manual:[11,22],map2mx:8,map3:8,map:[8,21],mapmapmx:8,mapmx2:8,mapmxmap:8,mapmxmx:8,mapsq:[7,16,21],mapsq_im:7,mark:[3,4,5],marked_bootstrap:[3,4,11],mask:[9,18,19,21],mass:[3,5,7,8,16,18,21],massiv:6,match:[0,1,3,4],materi:5,mathbf:8,mathrm:[1,5],matric:2,matrix:[0,2,3,4,7,8,11,12,13,16,17,18,22],matter:[0,8,14,19],max:[1,3,4,6,15],max_it:6,max_row:1,max_rpar:[3,15,21],max_sep:[0,1,2,3,4,5,6,7,8,9,12,13,14,16,17,18,19,20,21],max_siz:[1,6],max_top:[1,3,4,6,21],max_u:[4,8,14,19,21],max_v:[4,8,14,19,21],maximum:[0,1,3,4,6,7,8,9,12,13,14,15,16,17,18,19,21],maxsiz:1,mayb:[0,9,20,23],mean:[0,1,3,4,6,7,8,9,11,12,13,14,15,16,17,18,19,21],meand1:[8,14,19,21],meand2:[8,14,19,21],meand3:[8,14,19,21],meani:6,meanlogd1:[8,14,19,21],meanlogd2:[8,14,19,21],meanlogd3:[8,14,19,21],meanlogr:[0,1,7,8,9,12,13,14,15,16,17,18,19,21],meanr:[0,1,7,8,12,13,14,15,16,17,18,19,21],means_clust:6,meant:15,meanu:[8,14,19],meanv:[8,14,19],meanx:6,meanz:6,measur:[0,3,4,5,6,9,12,17,21,22],median:[1,3,4,6,21],memori:[1,7,8,11,12,13,14,16,17,18,19],meng:5,mention:[5,22],mere:23,merg:23,merge_config:23,mess:22,messag:20,mesur:5,method:[1,3,4,6,7,8,9,11,12,13,14,16,17,18,19,21,22],metric:[0,3,4,7,8,9,11,12,13,14,16,17,18,19,21,22],middl:[1,3,4,6,21],midpoint:6,might:[0,1,3,4,5,6,9,19,20,22],mike:20,mikejarvis17:20,million:[6,22],min:15,min_rpar:[3,15,21],min_sep:[0,1,2,3,4,5,6,7,8,9,12,13,14,16,17,18,19,20,21],min_siz:[1,6],min_top:[1,3,4,6,21],min_u:[4,8,14,19,21],min_v:[4,8,14,19,21],minibatchkmean:22,minim:[6,22],minimum:[0,1,3,4,6,7,8,9,12,13,14,15,16,17,18,19,21,22],minut:22,miscellan:[11,23],miss:[5,21],mix:21,mmxa:[7,21],mmxb:21,mnra:[3,8,15,20],mock:5,mode:[6,9,21,22],modifi:[6,23],modul:11,modulo:3,more:[0,1,3,4,5,6,8,9,14,15,18,19,20,21,22,23],most:[0,1,3,4,5,6,8,9,15,22,23],mpc:15,mpi4pi:[3,4,7,8,12,13,14,16,17,18,19,22],mpi:[2,3,4,7,8,9,11,12,13,14,16,17,18,19],mpiexec:22,mu_i:[6,22],much:[0,1,3,4,9,21,22],multi:20,multi_cov:5,multipl:[1,3,5,7,8,9,12,13,14,16,17,18,19,21,22,23],must:[1,19],mutipl:1,mx3:8,mxmapmap:8,mxmapmx:8,mxmxmap:8,mxsq:[7,21],mxsq_im:7,my_config_fil:23,myself:22,n1n2n3:19,n1n3n2:19,n2n1n3:19,n2n3n1:19,n3n1n2:19,n3n2n1:19,n_i:[6,22],name:[0,1,3,6,7,8,9,12,13,14,15,16,17,18,19,20,21,22,23],nan:[1,21],nap:21,napsq:[16,21],nativ:[9,22],natur:[0,7,8,12,13,16,17,18,19,20,21,22],nbin:[0,2,3,4,5,7,8,9,12,13,14,16,17,18,19,21],nearbi:6,nearli:[0,15],necessari:[1,7,9,21],necessarili:[5,9,15,23],need:[0,1,3,4,5,6,7,8,9,12,13,14,15,16,17,18,19,20,21,22,23],neg:4,neighbor:22,neither:16,net:20,never:[6,22],new_cat:1,next:[0,1,3,4,6,22],nfield:[1,6],ng_file_nam:[1,21],ng_statist:21,ngconfig:22,ngcorrel:[3,5,6,9,11,18,22],nk1:5,nk2:5,nk_file_nam:21,nk_statist:21,nkcorrel:[3,5,6,9,11],nm_file_nam:21,nmap:[16,21],nmap_norm:[16,21],nmx:[16,21],nn_config:5,nn_file_nam:21,nn_statist:21,nncorrel:[3,5,6,9,11,16],nng:4,nnn:[5,9,19,20,21],nnn_file_nam:21,nnn_statist:21,nnncorrel:[4,5,6,9,11],nnncrosscorrel:[4,19],nobj:1,node:[6,21,22],nois:[1,3,4,5,7,8,9,12,13,14,16,17,18],nomin:[0,7,8,9,12,13,14,15,16,17,18,19,20,21],non:[0,1,3,4,15,17,21,22],none:[1,2,3,4,6,7,8,12,13,14,16,17,18,19,21,23],nontrivial_w:1,nonzero:[3,4,8,14,19],norm_file_nam:21,normal:[0,1,3,4,5,6,9,15,16,18,19,20,21,22,23],notabl:[0,1],notat:21,note:[0,3,4,5,15,19,22],notebook:[9,20,22],notic:22,now:[2,5,9,15,20,21,22],npair:[1,3,7,12,13,16,17,18,21],npatch:[1,5,6,21,22],nsimplefield:[1,6],nsq:18,nsq_mapsq:[16,21],nth:1,ntoplevelnod:6,ntot:[1,6,21],ntri:[4,8,14,19,21],nubin:[4,8,14,19,21],num:[1,23],num_bootstrap:[3,4,5],num_thread:[1,3,4,7,8,12,13,14,16,17,18,19,21],number:[0,1,2,3,4,5,6,7,8,9,12,13,14,16,17,18,19,20,21,22,23],numer:5,numpi:[1,3,4,6,7,8,11,12,13,14,16,17,18,19,20,22,23],nvbin:[4,8,14,19,21],object:[0,1,2,3,4,5,6,7,8,9,12,13,14,15,16,17,18,19,21,22,23],observ:[3,4,5,12,21],obviou:21,occur:5,odd:0,off:[1,3,4,5,19,21],offer:[6,8,21],often:[1,3,4,7,8,9,12,13,14,15,16,17,18,19,20,21,22],oject:[7,8,12,13,14,16,17,18,19],ok_flag:[1,21],old:[2,15],older:21,oldrperp:[3,11],omit:[0,9,21,23],onc:[1,5,6,20,22],one:[0,1,3,4,5,6,7,8,12,13,14,15,16,17,18,19,20,21,22,23],ones:[8,9,14,19,22,23],onli:[0,1,2,3,4,5,6,7,8,9,12,13,14,15,16,17,18,19,20,21,22,23],onto:0,open:[0,6,7,12,13,15,16,17,18,20],openmp:[1,3,4,7,8,12,13,14,16,17,18,19,20,21,22],oper:20,opposit:[8,14,19],optim:1,option:[1,3,4,5,6,8,9,11,15,21,22,23],orang:22,order:[3,4,8,14,19,20,21],org:6,orient:4,origin:[0,3,5,9,15,22],other:[3,4,5,6,7,8,11,12,13,14,15,16,17,18,19,20,21],otherwis:[1,7,8,12,13,14,16,17,18,19,21,23],our:[6,20],out:[0,2,5,6,7,8,9,12,13,14,15,16,17,18,19,20,21,22,23],out_file_nam:9,outer:22,output:[1,3,4,7,8,9,11,12,13,14,16,17,18,19,20,23],output_dot:[3,4,21],outsid:3,outskirt:6,over:[0,1,3,4,9,21,22,23],overal:[1,5,6,22],overflow:22,overlap:0,overrid:[1,21],overridden:[4,21],overview:11,overwrit:22,own:9,p_1:15,p_2:15,packag:[6,20,22],page:[9,11],painfulli:1,pair:[0,1,2,3,4,7,9,12,13,15,16,17,18,20,21,23],pairwis:[3,6],panda:[1,20],pandasread:1,paper:[16,20],par:1,paradigm:8,parallel:[0,3,4,6,15,20,21],param:[1,20,23],paramet:[0,1,2,3,4,5,6,7,8,9,11,12,13,14,15,16,17,18,19,20,22,23],parameter:[8,14,19],parametr:19,paramt:1,pariti:21,parquet:[1,21],parquetread:1,pars:[21,23],parse_bool:23,parse_unit:23,parse_vari:23,part:[3,7,8,9,12,14,15,16,19,21,22],particular:[0,1,3,4,5,15,21,22,23],particularli:[1,15,22],pass:[1,3,4,5,7,8,12,13,14,15,16,17,18,19,22,23],past:5,patch:[1,2,3,4,5,6,7,8,9,11,12,13,14,16,17,18,19,21],patch_cent:[1,5,21,22],patch_col:[1,21,22],patch_ext:[1,21],path:[15,20],pattern:[5,7,8,9,12,13,14,16,17,18,19],peak:[0,22],penal:[6,22],penalti:[6,22],peopl:[20,22],per:[1,7,12,16],percent:0,percentil:6,perfectli:0,perform:[0,3,4,9,11,20,21,22,23],period:[3,4,11,21],permiss:[0,20],permut:[8,14,19],perpendicular:[3,15],persist:1,physic:0,piec:1,pip:20,place:[0,1,3,4,6,12,13,14,15,17,22],placement:[3,4],plan:20,plane:20,pleas:[0,6,7,12,13,16,17,18,20],plot:[9,22],plu:[3,4],point:[0,1,5,6,7,8,11,12,13,14,15,16,17,18,19,21,22,23],poisson:[3,4,5],poor:6,popular:21,portion:0,posit:[0,1,2,3,4,6,8,9,14,15,19,20,21,22],possibil:3,possibl:[0,1,3,4,6,8,9,14,16,17,18,19,22],post:[5,20],potenti:[20,22],power:[0,9],practic:[21,22],precalcul:21,preced:[3,4,23],precis:[0,1,3,4,7,8,12,13,14,15,16,17,18,19,21,23],precomput:22,predict:0,prefer:[0,1,3,4,7,8,12,13,14,16,17,18,19,23],prefix:20,prescript:[3,5],presenc:1,present:[3,5,22],preserv:15,pretti:[0,22],prevent:5,previou:[6,11],previous:20,prime:[6,19],print:[21,22,23],print_corr2_param:23,print_corr3_param:23,print_param:23,prior:[1,3,4,15],probabl:[0,3,4,5,6,9,15,20,21,22],problem:[6,20,22],problemat:15,procedur:22,process:[0,1,3,4,5,6,7,8,9,12,13,14,16,17,18,19,20,21,22],process_auto:[7,8,9,13,14,18,19],process_cross12:[8,14,19],process_cross:[7,8,9,12,13,14,16,17,18,19],process_pairwis:[7,12,13,16,17,18],produc:[5,6,9,21,22],product:[5,9],profil:[16,21],program:[20,23],progress:[1,3,4,21],project:[0,8,9,21],propag:[5,7,8,12,13,14,16,17,18],properli:22,properti:[1,3,4,6,8,12,14,19],propos:8,provabl:22,prove:6,provid:[1,3,4,6,9,15,16,17,18,19,20,21,22,23],proxi:22,publish:20,pure:[6,8],purpos:[0,1,6,15,21,22],put:[0,5,9,20,21],pyclust:22,pytest:20,python3:20,python:[2,9,11,22],pythonpath:20,pyyaml:20,qquad:[7,16,18],qualiti:22,quantiti:[1,3,6,11,17,20,23],quartil:[1,3,4,21],question:[20,21],quick:1,quickli:[0,6],quit:0,quot:21,r_1:15,r_2:15,r_col:[1,21],r_ext:[1,21],r_nom:[7,8,12,13,14,16,17,18,19,21],ra_col:[1,9,20,21],ra_ext:[1,21],ra_unit:[1,6,9,20,21],radial:15,radian:[0,1,3,4,7,8,12,13,14,16,17,18,19,21,23],radiu:[1,6,7,8,16,21],rais:[1,18,19],rand:[5,9,19],rand_cat:[1,19],rand_config:5,rand_fil:[9,21],rand_file_list2:21,rand_file_list3:21,rand_file_list:21,rand_file_nam:21,rand_file_name2:21,rand_file_name3:21,rand_list:9,random:[1,3,4,6,11,16,17,18,19,21,22],randomli:[6,22],randomst:[1,3,4,6],rang:[0,1,3,4,20,21],rangl:[1,6,7,8,12,13,14,16,17,18,19,22],rangle_:7,rangle_b:7,rank:[7,8,12,13,14,16,17,18,19,22],rather:[0,1,3,5,6,9,15,19,20,21,22],ratio:[0,3,5,8,16],ratti:22,ravel:[3,4,7,8,14],raw:[5,9,16,17,21],raw_varxi:[16,17],raw_xi:[16,17],raw_xi_im:16,rawxi:[16,17],rdd:[9,19,21],rdr:[9,21],reach:6,read:[1,2,7,8,9,12,13,14,16,17,18,19,20,21,22,23],read_catalog:1,read_config:23,read_data:1,read_param:1,read_patch:1,read_patch_cent:1,reader:11,readjust:[3,4],readthedoc:20,real:[0,3,5,7,8,9,12,16,21,22],realist:15,realli:[0,1,3,4,12,13,14,15,19,21,22],reason:[0,6,8,9,22],reassembl:9,rebuild:1,recalcul:3,recarrai:1,recent:1,recommend:[0,5,9,22,23],record:[5,20],recov:1,rectangular:0,red:22,redo:5,reduc:[7,8,11,12,13,14,15,16,17,18,19,21],refer:[1,3,6,15,16,20,21],regardless:[8,14,19],region:[0,6,22],regular:[6,7,8,12,13,14,16,17,18,19,20,22],regularli:2,rel:[0,5,6,8,14,15,19,20,22],relat:[0,3,4,8,9,11,16,21],releas:20,relev:[1,2,3,4,6,8,9,14,15,19,21,23],reliabl:22,remov:[1,6,7,12,13,16,17,18],reorder:5,repackag:6,repeat:[3,4,5,6,15,22],repeatedli:1,repetit:[3,4,5],replac:[3,4,5],repo:9,report:[11,23],repositori:20,repr:[7,8,12,13,14,16,17,18,19],repres:[1,6,9,15,16,17],represnt:22,request:[6,11],requir:[0,1,3,4,5,6,15,20,21,22,23],reran:22,resampl:[3,4,5],research:20,resiz:1,resize_cach:1,resolut:0,respect:[0,3,4,5,6,8,9,21,22,23],rest:21,restrict:11,result:[0,2,3,4,5,6,7,8,9,12,13,14,16,17,18,19,20,21,22],reus:1,revers:18,right:[0,1,6,7,9,15,16,18,22],right_edg:[0,3],rlen:[3,11],rmax:[7,16,18],rmjarvi:20,rms:[6,22],rng:[1,3,4,6],rnom1d:8,rnom:[0,7,8,12,13,14,16,17,18,19],root:[1,6,21,23],roughli:[0,5,9,22],round:[2,3,4],routin:8,row:[1,3,4,21,22],row_count:1,rparallel:[3,21],rperp:[3,11],rperp_alia:3,rrd:[9,21],rrr:[9,19,21],rule:23,run:[1,5,6,7,8,9,11,12,13,14,16,17,18,19,21,23],run_kmean:[1,6,21,22],run_with_mpi:22,runawai:22,runtimeerror:[18,19],s_i:[6,22],sacrific:[7,8,12,13,14,16,17,18,19],safe:22,sai:[1,15,20,21,22],same:[0,1,3,4,5,7,8,9,12,13,14,15,16,17,18,19,20,21,22,23],sampl:[3,4,6,7,8,11,12,13,14,16,17,18,19,20,22,23],sample_config:23,sample_pair:3,save:[1,5,9,22],save_patch_dir:[1,22],scalar:[1,3,5,6,9,12,13,14,17,20,21],scale:[0,5,6,20,21],scatter:[0,3,4,5,22],scenario:22,schenid:21,schneider:[7,8,16,18,20,21],scienc:[0,15,22],scientif:21,scipi:22,screen:21,script:[20,21,22,23],search:[1,11],sec:20,second:[1,3,4,6,7,8,12,13,14,15,16,17,18,19,21,22,23],section:[1,8,21,23],see:[0,1,2,3,4,5,6,7,8,9,12,13,14,15,16,17,18,19,20,21,22,23],seed:22,seem:[5,22],seen:0,select:[1,3,4,5,6,21,22],self:[1,3,4,6,7,8,12,13,14,16,17,18,19],semi:21,send:22,sens:[3,4,7,8,12,13,14,16,17,18,19,22],sent:22,sep:[3,6,21],sep_unit:[3,4,6,7,8,9,11,12,13,14,16,17,18,19,20,21],separ:[0,1,3,4,5,6,7,8,9,11,12,13,14,16,17,18,19,21],sequenc:[1,22],set:[0,1,2,3,4,5,6,7,8,9,12,13,14,15,16,17,18,19,21,22,23],settabl:6,settl:6,setup_logg:23,sever:[3,5,6,8,9,21,22],shape:[1,3,4,5,6,7,8,9,12,14,16,17,19],shear:[0,1,3,4,5,6,11,17,20,21,22],shift:[0,6],shortli:9,shot:[1,3,4,7,8,11,12,13,14,16,17,18],should:[0,1,3,4,5,6,7,8,9,12,13,14,15,16,17,18,19,20,21,22,23],show:0,shown:1,side:[0,4,8,14,19],sig_gam:[7,21],sig_map:[7,8,21],sig_mapsq:[16,21],sig_napsq:[16,21],sig_nmap:[16,21],sig_nn_mm:[16,21],sig_norm:[16,21],sight:11,sigma:[12,16,17,21],sigma_gam0:8,sigma_gam1:8,sigma_gam2:8,sigma_gam3:8,sigma_gam:21,sigma_xi:[13,18,21],sigma_xim:7,sigma_xip:7,sigma_zeta:[14,19,21],sign:[0,1,4,9,15,21],signal:[0,5,9,18],signficantli:20,signific:15,significantli:[0,5,6,8,9,14,15,19,20],simiar:21,similar:[1,3,4,5,6,9,16,20,22],similarli:[0,9,16,19,21],simpl:[1,8,16,17,18,21,22],simplefield:6,simpler:9,simplest:[1,5,9,19,22],simpli:[1,3,5,22],simul:0,simulu:15,sin:15,sinc:[0,1,3,4,5,6,7,8,9,12,13,14,15,16,17,18,19,20,21,22],singh:0,singl:[1,3,4,5,7,8,9,12,13,14,16,17,18,19,21,22],site:20,situat:[8,9,14,19],six:[8,14,19],size:[0,1,6,7,8,9,12,13,14,16,17,18,19,20,21,22],skew:[8,21],skip:[0,1,6,9,21],skl:8,sklearn:22,sky:[0,15,20,21,22],slate:[6,7,12,13,16,17,18],slice:1,slight:0,slightli:[0,3,5,6,9,20,22],slop:[0,3,4],slow:[1,22],slower:[0,6,15,20],slowest:20,small:[0,5,6,15,22],small_cat:22,smaller:[0,6,22],smallest:15,smartli:6,smooth:21,softwar:20,solut:[6,21,22],some:[0,1,3,4,5,6,8,9,12,15,20,21,22,23],somehow:6,someth:[3,5,7,12,13,16,17,18,20,21,22],sometim:[0,5,21],somewhat:[5,6,22],somewher:[1,3,4,6,20,22],sooner:20,sort:6,sourc:[1,3,4,6,7,8,12,13,14,15,16,17,18,19,20,21,22,23],source_cat:[5,9,22],source_config:22,source_fil:[9,22],space:[0,9,15,21,22],span:0,sparser:22,spatial:[5,22],specif:[0,1,4,5,6,9,15,20,21,22,23],specifi:[0,1,2,3,4,5,6,9,20,21,22,23],specifii:22,speed:[6,7,8,12,13,14,16,17,18,19,20],speedup:0,spent:6,sphere:[1,4,6,9,15],spheric:[1,3,4,6,7,8,12,13,14,15,16,17,18,19,21],sphinx:20,spinor:[1,6],split:[1,3,4,5,6,8,14,19,21,22],split_method:[1,3,4,6,21],spread:[6,21],sqrt:[7,8,12,13,14,15,16,17,18,19],squar:6,stabl:[20,22],stage:20,standalon:20,standard:[1,3,4,6,7,8,14,18,19,22],star:[12,22],start:[0,1,2,3,4,5,6,11,20,21,22,23],state:[1,9],statist:[0,3,4,5,7,8,9,14,16,18,19,21],statistc:3,std:6,stdout:23,step:[1,3,4,5,6,7,8,9,12,13,14,16,17,18,19,22],still:[0,2,6,23],stop:[0,3,4,6,21],storag:[7,8,12,13,14,16,17,18,19],store:[1,3,4,6,9,20],str:[1,3,4,6,7,8,12,13,14,16,17,18,19,21,23],straight:[3,4],straightforward:[20,22],string:[1,3,4,8,9,14,15,19,23],strongli:22,structur:[1,3,4,6,22,23],studi:21,stuff:[6,22],sub:[1,3,4,6],subclass:6,subsequ:22,subset:[3,6],subtleti:0,subtract:19,success:0,sudo:20,suffer:[6,9],suffic:20,suffici:9,suggest:21,sum:[1,5,6,7,8,12,13,14,16,17,18,19,21,22],sum_:[6,22],sum_i:[5,6,22],sumw:1,support:[11,19,21],suppos:5,sure:[1,5,6,8,14,19,22],surprisingli:22,survei:[9,12,22],symmetr:0,symmetri:[8,14,19],syntax:[1,2],system:[1,3,4,6,11,15,20,21],szalai:[1,9,16,18,19,21],szapudi:21,t_i:8,tabl:[1,9,21],take:[1,3,4,5,6,8,9,14,15,19,20,22,23],taken:[3,4,5,15,21],tangent:20,tangenti:[3,16,21],tarbal:20,target:[6,22],technic:[1,6,22],tell:[5,22],temp:22,temperatur:[3,13,14,20,21],temporari:22,tend:[0,1,5,6,22],tendanc:6,tendenc:6,term:[3,4,5,6,12,13,14,15,17,19,21,22],test:[0,2,6,11],test_requir:20,text:[21,23],than:[0,1,3,4,5,6,9,15,19,20,21,22],thei:[0,1,3,4,6,7,8,9,12,13,14,15,16,17,18,19,20,21,22,23],them:[1,2,3,4,6,7,8,9,12,13,14,16,17,18,19,20,21,22],theoret:0,therefor:[1,3,8,14,19,22],theta:15,thi:[0,1,2,3,4,5,6,7,8,9,12,13,14,15,16,17,18,19,20,21,22,23],thing:[1,5,19,20,21,22],think:[0,22],third:[1,3,8,14,19,21],those:[1,3,4,6,7,8,9,12,13,14,15,16,17,18,19,20,21,23],though:[9,21],thread:[1,3,4,7,8,12,13,14,16,17,18,19,21],three:[3,6,8,11,14,19,21,23],through:[2,6,9],thu:[0,15,22],tightli:15,time:[1,3,4,5,6,7,8,12,13,14,16,17,18,19,20,22],tmp_dir:22,togeth:[7,8,12,13,14,16,17,18,19],token:1,tol:6,toler:6,tomograph:5,too:[0,1,22],tool:9,top:[0,1,3,4,6,21,23],top_edg:0,tophat:7,tot:[18,19],total:[0,1,3,4,5,6,7,8,9,12,13,14,16,17,18,19,21,22],touch:0,track:[0,1,3,4,5,6,8,14,19],trade:22,tradit:22,tradition:6,transport:0,trapezoid:0,travers:[0,1,6,21],treat:21,tree:[0,1,3,4,6,20,21,22],treecorr:[0,1,2,3,4,5,6,7,8,9,12,13,14,15,16,17,18,19,22,23],treecorr_licens:20,tri:[1,23],triang:8,triangl:[4,8,9,14,19,20,21],trig:1,trigger:1,trigonometr:15,trim:1,trip:2,tripl:8,trivial:15,troubl:20,tupl:[3,6,7,8,16,17,18,19,23],turn:[0,1,6,8,14,15,19,20,22],tutori:[11,20],twice:21,two:[0,1,4,5,6,7,8,11,12,13,14,15,16,17,18,19,21,22,23],twod:[2,3,4,9,11],txcov:5,txt:[20,23],type:[0,1,3,4,5,6,7,8,12,13,14,16,17,18,19,21,22,23],typic:[0,1,3,4,5,6,7,8,9,12,13,14,15,16,17,18,19,20,21,22],u1d:[8,14,19],u_nom:[8,14,19,21],ubin_s:[4,8,14,19,21],ubuntu:2,unari:[3,4],unbias:21,uncertainti:21,uncompens:[9,16,17],uncorrect:[16,17],under:20,underestim:[7,8,12,13,14,16,17,18],understand:0,unfortun:9,uniform:[3,4,9,15,21,22],uniformli:[0,15],unit:[0,1,3,4,6,7,8,9,12,13,14,15,16,17,18,19,20,21,23],unless:[3,4,21],unlik:[1,8,15],unload:1,unnecessari:[1,15],unstabl:6,until:[1,5,6,7,8,12,13,14,16,17,18,19,22],updat:[6,9,20,23],upgrad:20,upper:22,upshot:6,usag:[7,8,12,13,14,16,17,18,19,20,22,23],use:[0,1,2,3,4,5,6,7,8,9,12,13,14,15,16,17,18,19,20,21,22,23],used:[1,3,4,5,6,7,8,9,12,13,14,15,16,17,18,19,20,21,22,23],useful:[0,1,3,4,6,9,15,20,21,23],user:[5,20,22],uses:[1,3,4,6,15,16,17,21,22,23],using:[0,1,2,3,4,5,6,7,8,9,12,13,14,15,16,17,18,19,20,21,22],usual:[1,3,4,5,6,21,22,23],util:[11,22],v1d:[8,14,19],v_nom:[8,14,19,21],valid:[0,1,3,4,5,15,23],valid_nam:23,valid_param:23,valu:[0,1,3,4,5,6,7,8,9,12,13,14,15,16,17,18,19,20,21,22,23],value_typ:23,valueerror:1,var_method:[1,3,4,5,7,12,13,16,17,18],varg1:[7,8],varg2:[7,8],varg3:8,varg:[1,9,12,16],vargam0:8,vargam1:8,vargam2:8,vargam3:8,vargamsq:7,vargamsq_:7,variabl:[20,21,23],varianc:[1,3,4,7,8,9,11,12,13,14,16,17,18,19,21],variant:20,variat:[5,22],varieti:[0,20],variou:[1,2,3,4,8,9,12,13,14,15,17,20,21,22],vark1:[13,14],vark2:[13,14],vark3:14,vark:[1,12,17],varmap3:8,varmapsq:7,varnmap:16,varnsq:18,varxi:[5,9,12,13,16,17,18],varxim:[5,7],varxip:[5,7],varzeta:[14,19],vassilvitskii:22,vbin_siz:[4,8,14,19,21],vec:[6,22],vector:[1,3,4,5,6,7,15],verbos:[1,3,4,7,8,12,13,14,16,17,18,19,21,23],veri:[0,9,21,22],verif:22,versa:6,version:[1,3,4,6,7,10,11,12,13,15,16,17,18,20,21],via:[1,3,8,9,14,19,21,22,23],vice:6,w_col:[1,21],w_ext:[1,21],w_i:5,wai:[0,1,3,4,5,6,8,9,14,15,19,20,21,22,23],want:[0,1,3,4,5,6,9,15,19,20,21,22],warn:[1,3,4,20,21,23],wasn:1,wcss:6,weak:[3,6,20],weakref:[1,6],weight:[0,1,3,4,5,6,7,8,9,12,13,14,16,17,18,19,20,21],well:[0,1,3,4,6,7,8,9,12,13,14,16,17,18,19,22],went:21,were:[1,3,5,9,21,22],what:[0,1,3,4,5,9,15,19,20,21,22,23],whatev:[0,1,5,21],when:[0,1,3,4,5,6,8,9,14,15,19,20,21,22,23],whenev:[1,3,4],where:[0,1,2,3,4,5,6,7,8,9,12,13,14,15,16,17,18,19,20,21,22],wherebi:3,wherein:1,whether:[0,1,3,4,6,7,8,12,13,14,15,16,17,18,19,21,22],which:[0,1,2,3,4,5,6,7,8,9,12,13,14,15,16,17,18,19,20,21,22,23],whichev:[6,22],whitespac:[1,21],who:[0,20,22],whole:6,whose:[0,2,3,6,8,14,19,21,23],wich:23,wide:9,width:[0,3,4,21],wiki:6,wikipedia:6,window:2,wise:2,within:[0,6,9],without:[1,3,4,5,15,22,23],won:[0,1,3,4,20,21,22],word:[1,15,22],work:[1,2,3,4,5,6,7,8,9,12,13,14,16,17,18,19,20,22],world:0,worri:20,wors:22,worth:22,would:[0,1,3,5,6,9,12,13,14,17,19,20,21,22,23],wouldn:1,wpo:[1,21],wpos_col:[1,21],wpos_ext:[1,21],write:[1,2,3,4,7,8,9,12,13,14,16,17,18,19,20,21,22,23],write_patch:1,write_patch_cent:[1,22],write_patch_fil:22,write_patch_result:[2,7,8,12,13,14,16,17,18,19],writemap3:8,writemapsq:7,writenmap:16,writenorm:[16,18],writer:11,written:[1,2,7,8,12,13,14,16,17,18,19,22,23],wrong:0,wrote:9,x_1:15,x_2:15,x_col:[1,9,21],x_ext:[1,21],x_imag:9,x_j:[6,22],x_unit:[1,21],x_valu:9,xi_:7,xi_i:[3,5],xi_im:[3,5,12,16],xi_minu:20,xi_plu:20,xim:[3,5,7,9,20,21],xim_im:[7,21],xip:[3,5,7,9,20,21,22],xip_im:[7,21],xmax:21,xmin:21,xperiod:[3,4,15,21],y_1:15,y_2:15,y_col:[1,9,21],y_ext:[1,21],y_imag:9,y_unit:[1,21],y_valu:9,yaml:23,yes:23,yet:[1,3,4,7,8,12,13,14,16,17,18,19,20],yield:[3,4,22],you:[0,1,3,4,5,6,7,8,9,12,13,14,15,16,17,18,19,20,21,22,23],your:[0,1,3,4,5,6,7,9,12,13,15,16,17,18,20,21,22],yourself:[3,20],yperiod:[3,4,15,21],z_1:15,z_2:15,z_col:[1,21],z_ext:[1,21],zero:[0,1,21,22],zeta:[4,8,9,14,19,21],zeta_i:4,zip:9,zperiod:[3,4,15,21]},titles:["Binning","Input Data","Changes from version 4.2 to 4.3","Two-point Correlation Functions","Three-point Correlation Functions","Covariance Estimates","Fields","GGCorrelation: Shear-shear correlations","GGGCorrelation: Shear-shear-shear correlations","Getting Started Guide","Previous History","TreeCorr Documentation","KGCorrelation: Kappa-shear correlations","KKCorrelation: Kappa-kappa correlations","KKKCorrelation: Kappa-kappa-kappa correlations","Metrics","NGCorrelation: Count-shear correlations","NKCorrelation: Count-kappa correlations","NNCorrelation: Count-count correlations","NNNCorrelation: Count-count-count correlations","Overview","Configuration Parameters","Patches","Using configuration files"],titleterms:{"class":[1,9],"function":[3,4,9,21,23],"new":2,The:[1,23],Use:22,Using:[9,20,22,23],about:21,accumul:9,api:2,arc:15,arrai:9,auto:9,bin:[0,9,21],bin_slop:0,bootstrap:5,brute:0,bug:[2,20],build:9,calcul:21,catalog:[1,5,9],center:22,chang:2,comparison:22,configur:[21,23],corr2:[20,23],corr3:[20,23],correl:[3,4,7,8,9,12,13,14,16,17,18,19,20,21],count:[16,17,18,19],covari:5,data:1,defin:[9,22],depend:20,deriv:[5,21],dict:23,document:11,download:20,estim:5,euclidean:15,execut:23,featur:[2,20],field:6,file:[1,21,23],fisherrperp:15,fix:2,from:[2,9,23],get:9,ggcorrel:7,gggcorrel:8,guid:9,histori:10,implement:22,improv:2,input:[1,21,22],instal:20,jackknif:5,jupyt:9,kappa:[12,13,14,17],kgcorrel:12,kkcorrel:13,kkkcorrel:14,line:15,linear:0,load:9,log:0,manual:9,marked_bootstrap:5,matrix:5,mean:22,memori:22,method:5,metric:15,miscellan:21,modul:20,mpi:22,ngcorrel:16,nkcorrel:17,nncorrel:18,nnncorrel:19,numpi:9,oldrperp:15,option:[0,20],other:[0,1,9,22,23],output:[0,21],overview:20,paramet:21,patch:22,perform:2,period:15,point:[3,4,9,20],previou:10,python:[20,23],quantiti:[0,5,21],random:[5,9],reader:1,reduc:22,relat:[1,23],report:20,request:20,restrict:15,rlen:15,rperp:15,run:[20,22],sampl:5,sep_unit:0,separ:15,shear:[7,8,9,12,16],shot:5,sight:15,start:9,support:2,system:2,test:20,three:[4,9,20],treecorr:[11,20],tutori:9,two:[3,9,20],twod:0,util:[1,23],varianc:5,version:2,writer:23}}) \ No newline at end of file