diff --git a/.buildinfo b/.buildinfo new file mode 100644 index 0000000000..03d0bcf423 --- /dev/null +++ b/.buildinfo @@ -0,0 +1,4 @@ +# Sphinx build info version 1 +# This file hashes the configuration used when building these files. When it is not found, a full rebuild will be done. +config: ee10fcae1221319d5ff054cc0e656345 +tags: 645f666f9bcd5a90fca523b33c5a78b7 diff --git a/.doctrees/architecture/datamodel.doctree b/.doctrees/architecture/datamodel.doctree new file mode 100644 index 0000000000..926bd3835e Binary files /dev/null and b/.doctrees/architecture/datamodel.doctree differ diff --git a/.doctrees/architecture/overview.doctree b/.doctrees/architecture/overview.doctree new file mode 100644 index 0000000000..d2afd6b476 Binary files /dev/null and b/.doctrees/architecture/overview.doctree differ diff --git a/.doctrees/architecture/queryprocessing.doctree b/.doctrees/architecture/queryprocessing.doctree new file mode 100644 index 0000000000..19265e8de2 Binary files /dev/null and b/.doctrees/architecture/queryprocessing.doctree differ diff --git a/.doctrees/architecture/slicing.doctree b/.doctrees/architecture/slicing.doctree new file mode 100644 index 0000000000..000e9344b5 Binary files /dev/null and b/.doctrees/architecture/slicing.doctree differ diff --git a/.doctrees/clickhouse/death_queries.doctree b/.doctrees/clickhouse/death_queries.doctree new file mode 100644 index 0000000000..ec99451c97 Binary files /dev/null and b/.doctrees/clickhouse/death_queries.doctree differ diff --git a/.doctrees/clickhouse/schema_design.doctree b/.doctrees/clickhouse/schema_design.doctree new file mode 100644 index 0000000000..fcc844873e Binary files /dev/null and b/.doctrees/clickhouse/schema_design.doctree differ diff --git a/.doctrees/clickhouse/supported_versions.doctree b/.doctrees/clickhouse/supported_versions.doctree new file mode 100644 index 0000000000..e889035f5d Binary files /dev/null and b/.doctrees/clickhouse/supported_versions.doctree differ diff --git a/.doctrees/clickhouse/topology.doctree b/.doctrees/clickhouse/topology.doctree new file mode 100644 index 0000000000..c129897959 Binary files /dev/null and b/.doctrees/clickhouse/topology.doctree differ diff --git a/.doctrees/configuration/dataset.doctree b/.doctrees/configuration/dataset.doctree new file mode 100644 index 0000000000..58c247cfd5 Binary files /dev/null and b/.doctrees/configuration/dataset.doctree differ diff --git a/.doctrees/configuration/entity.doctree b/.doctrees/configuration/entity.doctree new file mode 100644 index 0000000000..46f5c3a591 Binary files /dev/null and b/.doctrees/configuration/entity.doctree differ diff --git a/.doctrees/configuration/entity_subscription.doctree b/.doctrees/configuration/entity_subscription.doctree new file mode 100644 index 0000000000..3b50b80a55 Binary files /dev/null and b/.doctrees/configuration/entity_subscription.doctree differ diff --git a/.doctrees/configuration/intro.doctree b/.doctrees/configuration/intro.doctree new file mode 100644 index 0000000000..2a4daadd03 Binary files /dev/null and b/.doctrees/configuration/intro.doctree differ diff --git a/.doctrees/configuration/migration_group.doctree b/.doctrees/configuration/migration_group.doctree new file mode 100644 index 0000000000..45d5ca56b4 Binary files /dev/null and b/.doctrees/configuration/migration_group.doctree differ diff --git a/.doctrees/configuration/overview.doctree b/.doctrees/configuration/overview.doctree new file mode 100644 index 0000000000..18029a897f Binary files /dev/null and b/.doctrees/configuration/overview.doctree differ diff --git a/.doctrees/configuration/readable_storage.doctree b/.doctrees/configuration/readable_storage.doctree new file mode 100644 index 0000000000..b302aaf39f Binary files /dev/null and b/.doctrees/configuration/readable_storage.doctree differ diff --git a/.doctrees/configuration/writable_storage.doctree b/.doctrees/configuration/writable_storage.doctree new file mode 100644 index 0000000000..253db2ebf1 Binary files /dev/null and b/.doctrees/configuration/writable_storage.doctree differ diff --git a/.doctrees/contributing/environment.doctree b/.doctrees/contributing/environment.doctree new file mode 100644 index 0000000000..abf36ea181 Binary files /dev/null and b/.doctrees/contributing/environment.doctree differ diff --git a/.doctrees/environment.pickle b/.doctrees/environment.pickle new file mode 100644 index 0000000000..8bf08a44e1 Binary files /dev/null and b/.doctrees/environment.pickle differ diff --git a/.doctrees/getstarted.doctree b/.doctrees/getstarted.doctree new file mode 100644 index 0000000000..a13c96ce4b Binary files /dev/null and b/.doctrees/getstarted.doctree differ diff --git a/.doctrees/index.doctree b/.doctrees/index.doctree new file mode 100644 index 0000000000..69206fc436 Binary files /dev/null and b/.doctrees/index.doctree differ diff --git a/.doctrees/intro.doctree b/.doctrees/intro.doctree new file mode 100644 index 0000000000..4d3daa027f Binary files /dev/null and b/.doctrees/intro.doctree differ diff --git a/.doctrees/language/snql.doctree b/.doctrees/language/snql.doctree new file mode 100644 index 0000000000..7e2b9bc9db Binary files /dev/null and b/.doctrees/language/snql.doctree differ diff --git a/.doctrees/migrations/modes.doctree b/.doctrees/migrations/modes.doctree new file mode 100644 index 0000000000..4b4951abfe Binary files /dev/null and b/.doctrees/migrations/modes.doctree differ diff --git a/.doctrees/query/overview.doctree b/.doctrees/query/overview.doctree new file mode 100644 index 0000000000..455666a7e9 Binary files /dev/null and b/.doctrees/query/overview.doctree differ diff --git a/.nojekyll b/.nojekyll new file mode 100644 index 0000000000..e69de29bb2 diff --git a/_images/clickhouse_nodes.png b/_images/clickhouse_nodes.png new file mode 100644 index 0000000000..fd8d3c61e4 Binary files /dev/null and b/_images/clickhouse_nodes.png differ diff --git a/_images/compositeprocessing.png b/_images/compositeprocessing.png new file mode 100644 index 0000000000..7f79616c6f Binary files /dev/null and b/_images/compositeprocessing.png differ diff --git a/_images/datamodel.png b/_images/datamodel.png new file mode 100644 index 0000000000..56400cce2d Binary files /dev/null and b/_images/datamodel.png differ diff --git a/_images/deployment_legend.png b/_images/deployment_legend.png new file mode 100644 index 0000000000..0959c64345 Binary files /dev/null and b/_images/deployment_legend.png differ diff --git a/_images/errors_transactions_deployment.png b/_images/errors_transactions_deployment.png new file mode 100644 index 0000000000..8ba7f42b14 Binary files /dev/null and b/_images/errors_transactions_deployment.png differ diff --git a/_images/joins.png b/_images/joins.png new file mode 100644 index 0000000000..2b262515d9 Binary files /dev/null and b/_images/joins.png differ diff --git a/_images/multientity.png b/_images/multientity.png new file mode 100644 index 0000000000..9250392c66 Binary files /dev/null and b/_images/multientity.png differ diff --git a/_images/outcomes_deployment.png b/_images/outcomes_deployment.png new file mode 100644 index 0000000000..d40ebd0ffa Binary files /dev/null and b/_images/outcomes_deployment.png differ diff --git a/_images/overview.png b/_images/overview.png new file mode 100644 index 0000000000..cd66812674 Binary files /dev/null and b/_images/overview.png differ diff --git a/_images/queryprocessing.png b/_images/queryprocessing.png new file mode 100644 index 0000000000..a2b25e848d Binary files /dev/null and b/_images/queryprocessing.png differ diff --git a/_images/sessions_deployment.png b/_images/sessions_deployment.png new file mode 100644 index 0000000000..03423b46d9 Binary files /dev/null and b/_images/sessions_deployment.png differ diff --git a/_images/singleentity.png b/_images/singleentity.png new file mode 100644 index 0000000000..f1c47a5e46 Binary files /dev/null and b/_images/singleentity.png differ diff --git a/_images/snubaUI.png b/_images/snubaUI.png new file mode 100644 index 0000000000..5873bb0d08 Binary files /dev/null and b/_images/snubaUI.png differ diff --git a/_sources/architecture/datamodel.rst.txt b/_sources/architecture/datamodel.rst.txt new file mode 100644 index 0000000000..d54906546e --- /dev/null +++ b/_sources/architecture/datamodel.rst.txt @@ -0,0 +1,177 @@ +================ +Snuba Data Model +================ + +This section explains how data is organized in Snuba and how user facing +data is mapped to the underlying database (Clickhouse in this case). + +The Snuba data model is divided horizontally into a **logical model** and +a **physical model**. The logical data model is what is visible to the Snuba +clients through the Snuba query language. Elements in this model may or may +not map 1:1 to tables in the database. The physical model, instead, maps 1:1 +to database concepts (like tables and views). + +The reasoning behind this division is that it allows Snuba to expose a +stable interface through the logical data model and perform complex mapping +internally to execute a query on different tables (part of the physical +model) to improve performance in a way that is transparent to the client. + +The rest of this section outlines the concepts that compose the two models +and how they are connected to each other. + +The main concepts, described below are dataset, entity and storage. + +.. image:: /_static/architecture/datamodel.png + +Datasets +======== + +A Dataset is a name space over Snuba data. It provides its own schema and +it is independent from other datasets both in terms of logical model and +physical model. + +Examples of datasets are, discover, outcomes, sessions. There is no +relationship between them. + +A Dataset can be seen as a container for the components that define its +abstract data model and its concrete data model that are described below. + +In term of query language, every Snuba query targets one and only one +Dataset, and the Dataset can provide extensions to the query language. + +Entities and Entity Types +========================= + +The fundamental block of the logical data model Snuba exposes to the client +is the Entity. In the logical model an entity represents an instance of an +abstract concept (like a transaction or an error). In practice an *Entity* +corresponds to a row in a table in the database. The *Entity Type* is the +class of the Entity (like Error**s** or Transaction**s**). + +The logical data model is composed by a set of *Entity Types* and by their +relationships. + +Each *Entity Type* has a schema which is defined by a list of fields with +their associated abstract data types. The schemas of all the *Entity Types* +of a Dataset (there can be several) compose the logical data model that is +visible to the Snuba client and against which Snuba Queries are validated. +No lower level concept is supposed to be exposed. + +Entity Types are unequivocally contained in a Dataset. An Entity Type cannot +be present in multiple Datasets. + +Relationships between Entity Types +---------------------------------- + +Entity Types in a Dataset are logically related. There are two types of +relationships we support: + +- Entity Set Relationship. This mimics foreign keys. This relationship is + meant to allow joins between Entity Types. It only supports one-to-one + and one-to-many relationships at this point in time. +- Inheritance Relationship. This mimics nominal subtyping. A group of Entity + Types can share a parent Entity Type. Subtypes inherit the schema from the + parent type. Semantically the parent Entity Type must represent the union + of all the Entities whose type inherit from it. It also must be possible + to query the parent Entity Type. This cannot be just a logical relationship. + +Entity Type and consistency +--------------------------- + +The Entity Type is the largest unit where Snuba **can** provide some strong +data consistency guarantees. Specifically it is possible to query an Entity +Type expecting Serializable Consistency (please don't use that. Seriously, +if you think you need that, you probably don't). This does not extend to +any query that spans multiple Entity Types where, at best, we will have +eventual consistency. + +This also has an impact on Subscription queries. These can only work on one +Entity Type at a time since, otherwise, they would require consistency between +Entity Types, which we do not support. + +.. ATTENTION:: + To be precise the unit of consistency (depending on the Entity Type) + can be even smaller and depend on how the data ingestion topics + are partitioned (project_id for example), the Entity Type is the + maximum Snuba allows. More details are (ok, will be) provided in + the Ingestion section of this guide. + +Storage +======= + +Storages represent and define the physical data model of a Dataset. Each +Storage represent is materialized in a physical database concept like a table +or a materialized view. As a consequence each Storage has a schema defined +by fields with their types that reflects the physical schema of the DB +table/view the Storage maps to and it is able to provide all the details to +generate DDL statements to build the tables on the database. + +Storages are able to map the logical concepts in the logical model discussed +above to the physical concept of the database, thus each Storage needs to be +related with an Entity Type. Specifically: + +- Each Entity Type must be backed by least one Readable Storage (a Storage we + can run query on), but can be backed by multiple Storages (for example a + pre-aggregate materialized view). Multiple Storages per Entity Type are meant + to allow query optimizations. +- Each Entity Type must be backed by one and only one Writable + Storage that is used to ingest data and fill in the database tables. +- Each Storage is backing exclusively one Entity Type. + + + +Examples +======== + +This section provides some examples of how the Snuba data model can represent +some real world models. + +These case studies are not necessarily reflecting the current Sentry production +model nor they are part of the same deployment. They have to be considered as +examples taken in isolation. + +Single Entity Dataset +--------------------- + +This looks like the Outcomes dataset used by Sentry. This actually does not +reflect Outcomes as of April 2020. It is though the design Outcomes should +move towards. + +.. image:: /_static/architecture/singleentity.png + +This Dataset has one Entity Type only which represent an individual Outcome +ingested by the Dataset. Querying raw Outcomes is painfully slow so we have +two Storages. One is the Raw storage that reflects the data we ingest and a +materialized view that computes hourly aggregations that are much more efficient +to query. The Query Planner would pick the storage depending if the query +can be executed on the aggregated data or not. + +Multi Entity Type Dataset +------------------------- + +The canonical example of this Dataset is the Discover dataset. + +.. image:: /_static/architecture/multientity.png + +This has three Entity Types. Errors, Transaction and they both inherit from +Events. These form the logical data model, thus querying the Events Entity +Type gives the union of Transactions and Errors but it only allows common +fields between the two to be present in the query. + +The Errors Entity Type is backed by two Storages for performance reasons. +One is the main Errors Storage that is used to ingest data, the other is a +read only view that is putting less load on Clickhosue when querying but +that offers lower consistency guarantees. Transactions only have one storage +and there is a Merge Table to serve Events (which is essentially a view over +the union of the two tables). + +Joining Entity types +-------------------- + +This is a simple example of a dataset that includes multiple Entity Types +that can be joined together in a query. + +.. image:: /_static/architecture/joins.png + +GroupedMessage and GroupAssingee can be part of a left join query with Errors. +The rest is similar with what was discussed in the previous examples. diff --git a/_sources/architecture/overview.rst.txt b/_sources/architecture/overview.rst.txt new file mode 100644 index 0000000000..8556b447fd --- /dev/null +++ b/_sources/architecture/overview.rst.txt @@ -0,0 +1,156 @@ +=========================== +Snuba Architecture Overview +=========================== + +Snuba is a time series oriented data store backed by +`Clickhouse `_, which is a columnary storage +distributed database well suited for the kind of queries Snuba serves. + +Data is fully stored in Clickhouse tables and materialized views, +it is ingested through input streams (only Kafka topics today) +and can be queried either through point in time queries or through +streaming queries (subscriptions). + +.. image:: /_static/architecture/overview.png + +Storage +======= + +Clickhouse was chosen as backing storage because it provides a good balance +of the real time performance Snuba needs, its distributed and replicated +nature, its flexibility in terms of storage engines and consistency guarantees. + +Snuba data is stored in Clickhouse tables and Clickhouse materialized views. +Multiple Clickhouse `storage engines `_ +are used depending on the goal of the table. + +Snuba data is organized in multiple Datasets which represent independent +partitions of the data model. More details in the :doc:`/architecture/datamodel` +section. + +Ingestion +========= + +Snuba does not provide an api endpoint to insert rows (except when running +in debug mode). Data is loaded from multiple input streams, processed by +a series of consumers and written to Clickhouse tables. + +A consumer consumes one or multiple topics and writes on one or multiple +tables. No table is written onto by multiple consumers as of today. This +allows some consistency guarantees discussed below. + +Data ingestion is most effective in batches (both for Kafka but especially +for Clickhouse). Our consumers support batching and guarantee that one batch +of events taken from Kafka is passed to Clickhouse at least once. By properly +selecting the Clickhouse table engine to deduplicate rows we can achieve +exactly once semantics if we accept eventual consistency. + +Query +===== + +The simplest query system is point in time. Queries are expressed in a +the SnQL language (:doc:`/language/snql`) and are sent as post HTTP calls. +The query engine processes the query (process described in +:doc:`/architecture/queryprocessing`) and transforms it into a ClickHouse +query. + +Streaming queries (done through the Subscription Engine) allow the client +to receive query results in a push way. In this case an HTTP endpoint allows +the client to register a streaming query. Then The Subscription Consumer consumes +to the topic that is used to fill the relevant Clickhouse table for updates, +periodically runs the query through the Query Engine and produces the result +on the subscriptions Kafka topic. + +Data Consistency +================ + +Different consistency models coexist in Snuba to provide different guarantees. + +By default Snuba is eventually consistent. When running a query, by default, +there is no guarantee of monotonic reads since Clickhouse is multi-leader +and a query can hit any replica and there is no guarantee the replicas will +be up to date. Also, by default, there is no guarantee Clickhouse will have +reached a consistent state on its own. + +It is possible to achieve strong consistency on specific query by forcing +Clickhouse to reach consistency before the query is executed (FINAL keyword), +and by forcing queries to hit the specific replica the consumer writes onto. +This essentially uses Clickhouse as if it was a single leader system and it +allows Sequential consistency. + +================================ +Snuba within a Sentry Deployment +================================ + +This sections explains the role Snuba plays within a Sentry deployment showing +the main data flows. If you are deploying Snuba stand alone, this won't be +useful for you. + +Legend: + +.. image:: /_static/architecture/deployment_legend.png + +Deployments: + +Errors and transaction: + +.. image:: /_static/architecture/errors_transactions_deployment.png + + +Sessions: + +.. image:: /_static/architecture/sessions_deployment.png + +Outcomes: + +.. image:: /_static/architecture/outcomes_deployment.png + +Errors and Transactions data flow +================================= + +The main section at the top of the diagram illustrates the ingestion process +for the ``Events`` and ``Transactions`` Entities. These two entities serve +most issue/errors related features in Sentry and the whole Performance +product. + +There is only one Kafka topic (``events``) shared between errors and transactions +that feeds this pipeline. This topic contains both error messages and transaction +messages. + +The Errors consumers consumes the ``events`` topic, writes messages in the Clickhouse +``errors`` table. Upon commit it also produces a record on the ``snuba-commit-log`` +topic. + +Alerts on Errors are generated by the Errors Subscription Consumer. This is synchronized +consumer that consumes both the main ``events`` topic and the ``snuba-commit-log`` topic +so it can proceed in lockstep with the main consumer. + +The synchronized consumer then produces alerts by querying Clickhouse and produces +the result on the result topic. + +An identical but independent pipeline exists for transactions. + +The Errors pipeline has an additional step: writing to the ``replacements`` topic. +Errors mutations (merge/unmerge/reprocessing/etc.) are produced by Sentry on the +``events`` topic. They are then forwarded to the ``replacements`` topic by the +Errors Consumer and executed by the Replacement Consumer. + +The ``events`` topic must be partitioned semantically by Sentry project id to +allow in order processing of the events within a project. This, as of today, is a +requirement for alerts and replacements. + +Sessions and Outcomes +===================== + +``Sessions`` and ``Outcomes`` work in very similar and simpler way. Specifically +``Sessions`` power Release Health features, while ``Outcomes`` mainly provide +data to the Sentry ``stats`` page. + +Both pipelines have their own Kafka topic, Kafka consumer and they write on their +own table in Clickhouse. + +Change Data Capture pipeline +============================ + +This pipeline is still under construction. It consumes the ``cdc`` topic and fills +two independent tables in Clickhouse. diff --git a/_sources/architecture/queryprocessing.rst.txt b/_sources/architecture/queryprocessing.rst.txt new file mode 100644 index 0000000000..42f3bd44ff --- /dev/null +++ b/_sources/architecture/queryprocessing.rst.txt @@ -0,0 +1,195 @@ +====================== +Snuba Query Processing +====================== + +Snuba has a query processing pipeline that starts with the parsing of the +Snuba query language (legacy and SnQL) into an AST ands with a SQL query +being executed on Clickhouse. Between these two phases, several passes on +the AST to apply query processing transformations are executed. + +The processing pipeline has two main goals: optimize the query and prevent +queries that would be dangerous for our infrastructure. + +As for the data model, the query processing pipeline is divided into a logical +section where the product related processing is performed and a physical +section which is focused on optimizing the query. + +The logical sections contains steps like the validation of the query to +ensure it matches the data model or applying custom functions. The physical +section includes steps like promoting tags and selecting a pre-aggregated +view to serve the query. + +Query Processing Phases +======================= + +This section gives an introduction and some pointers to code and examples +for the phases discussed above. + +.. image:: /_static/architecture/queryprocessing.png + +Legacy and SnQL Parsers +----------------------- + +Snuba supports two languages, the legacy JSON based one and the new one named +SnQL. With the exceptions of joins and composite queries which are not supported +by the legacy language, the query processing pipeline does not change whether +one or the other language is used. + +They both produce a logical query AST which is represented by +`this data structure `_. + +The legacy parser is `here `_, +while the SnQL parser is `in this module `_. + +Query Validation +---------------- + +This phase ensures the query can be run (most of the times, we do not yet catch +all possible invalid queries). The responsibility of this phase is to return an +HTTP400 in case of an invalid query with a proper useful message to the user. + +This is divided in two sub-phases: general validation and entity specific +validation. + +General validation is composed by a set of checks that are applied to each query +right after the Query is produced by the parser. This happens +`in this function `_. +This includes validations like preventing alias shadowing and function signature +validation. + +Each entity can provide some validation logic as well in the form of required +columns. This happens `in this class `_. +This allows the query processing to reject queries that do not have a condition +on project_id or that do not have a time range. + +Logical Query Processors +------------------------ + +Query processors are stateless transformations that receive a Query object (with +its AST) and transform it in place. `This `_ +is the interface to implement for logical processors. In the logical phase each +entity provides the query processors to be applied in sequence. Common use +cases are custom functions like `apdex `_, +or time bucketing like the `time series processor `_. + +Query processors are not supposed to depend on other processors to be executed +before or after and should be independent from each other. + +Storage Selector +---------------- + +As explained in :doc:`/architecture/datamodel`, each Entity can define multiple Storages. +Multiple storages represent multiple tables and materialized views can be defined +for performance reasons as some can respond to some queries faster. + +At the end of the logical processing phase (which is entirely based on the entity) +the storage selector can inspect the query and pick the appropriate storage for +the query. Storage selectors are defined in the entity data model and implement +this `interface `_. +An example is the Errors entity, which has two storages, one is for consistent +queries (they are routed to the same nodes where events are written) and the +other only includes replicas we do not write onto to serve most queries. This +reduces the load on the nodes we write onto. + +Query Translator +---------------- + +Different storages have different schemas (these reflect the schema of a +clickhouse table or view). All of them are generally different from the entity +model, the most notable example being the subscriptable expression used for +tags ``tags[abc]`` that does not exist in clickhouse where accessing a tags +looks like ``tags.values[indexOf(tags.key, 'abc')]`` . + +After a storage has been selected, the query needs to be translated to the physical +query. Translator is a rule based systems, rules are defined by the entity (for +each storage) and are applied in sequence. + +Contrarily to query processors, translation rules do not have full context +on the query and can only translate an individual expression. This allows us +to compose translation rules easily and reuse them across entities. + +`These `_ +are the translation rules for the transactions entity. + +Physical Query Processors +------------------------- + +Physical query processors work in a very similar way compared to the Logical +query processors. Their interface is very similar and the semantics is the same. +The difference is that they operate on the physical query and, as such, they +are mainly designed for optimizations. For example `this processor `_ +finds equality conditions on tags and replace them with the equivalent condition +on a tags hashmap (where we have a bloom filter index) making the filtering +operation faster. + +Query Splitter +-------------- + +Some queries can be executed in an optimized way by splitting them into multiple +individual Clickhouse queries and by assembling the results of each one of them. + +Two examples are time splitting and column splitting. Both hare `in this file `_. + +Time splitting splits a query (that does not contain aggregations and is properly +sorted) into multiple ones over a variable time range that increases in size +progressively and executes them in sequence stopping as soon we have enough +results. + +Column splitting splits filtering and column fetching. It executes the filtering +part of the query on a minimal number of columns so Clickhouse loads fewer columns, +then, through a second query, fetches the missing columns only for the rows +filtered by the first query. + +Query Formatter +--------------- + +This component simply formats the query into the Clickhouse query string. + +Composite Query Processing +========================== + +The discussion above is valid only for simple queries, composite ones +(joins and queries that include subqueries follow a slightly different path). + +The simple query pipeline discussed above would not work on join queries or +on queries that contain subqueries. In order to make that work, each step +would have to take into account joined queries and subqueries, which would +multiply the complexity of the process. + +To solve the issue we transform each join query into a join of multiple +simple subqueries. Each subquery is a simple query that can be processed by +the pipeline described above. This is also the preferred way to run Clickhouse +joins as it allows us to apply filters before the join. + +.. image:: /_static/architecture/compositeprocessing.png + +The Query Processing Pipeline for this type of queries is composed of a few +additional steps with respect to what was described above. + +Subquery Generator +------------------ + +This component takes a simple SnQL join query and create a subquery for each +table in the join. + +Expressions Push Down +--------------------- + +The query generated at the previous step would be a valid join but incredibly +inefficient. This step is basically a join optimizer that pushes down into +subqueries all expressions that can be part of a subquery. This is a needed +step independently from the subquery processing as the Clickhouse join engine +does not do any expression push down and it would be up to Snuba to optimize +the query. + +Simple Query Processing Pipeline +-------------------------------- + +This is the same pipeline discussed above from the logical query validation +to the physical query processors. + +Join Optimizations +------------------ + +At the end of the processing we can apply some optimizations to the overall +composite query like turning a join into a Semi Join. diff --git a/_sources/architecture/slicing.rst.txt b/_sources/architecture/slicing.rst.txt new file mode 100644 index 0000000000..59243891a7 --- /dev/null +++ b/_sources/architecture/slicing.rst.txt @@ -0,0 +1,108 @@ +=========================================== +Snuba Data Slicing (under development) +=========================================== + +*This feature is under active development and is subject to change* + +To support a higher volume of data, we are building out support for +datasets and storages that span multiple physical resources +(Kafka clusters, Redis instances, Postgres databases, ClickHouse clusters, +etc.) with the same schema. Across Sentry, data records will +have a logical partition assignment based on the data's organization_id. In Snuba, +we maintain a mapping of logical partitions to physical slices in +``settings.LOGICAL_PARTITION_MAPPING``. + +In a future revision, this ``settings.LOGICAL_PARTITION_MAPPING`` will be +used along with ``settings.SLICED_STORAGE_SETS`` to map queries and incoming +data from consumers to different ClickHouse clusters using a +(StorageSetKey, slice_id) pairing that exists in configuration. + +=========================== +Configuring a slice +=========================== + +Mapping logical partitions : physical slices +---------------------------------------------- +To add a slice to a storage set's logical:physical mapping, or repartition, +increment the slice count in ``settings.SLICED_STORAGE_SETS`` for the relevant +storage set. Change the mapping of the relevant storage set's +logical partitions in ``settings.LOGICAL_PARTITION_MAPPING``. +Every logical partition **must** be assigned to a slice and the +valid values of slices are in the range of ``[0,settings.SLICED_STORAGE_SETS[storage_set])``. + +Defining ClickHouse clusters in a sliced environment +---------------------------------------------------- + +Given a storage set, there can be three different cases: + +1. The storage set is not sliced +2. The storage set is sliced and no mega-cluster is needed +3. The storage set is sliced and a mega-cluster is needed + +A mega-cluster is needed when there may be partial data residing on different sliced +ClickHouse clusters. This could happen, for example, when a logical partition:slice +mapping changes. In this scenario, writes of new data will be routed to the new slice, +but reads of data will need to span multiple clusters. Now that queries need to work +across different slices, a mega-cluster query node will be needed. + +For each of the cases above, different types of ClickHouse cluster +configuration will be needed. + +For case 1, we simply define clusters as per usual in ``settings.CLUSTERS``. + +For cases 2 and 3: + +To add a sliced cluster with an associated (storage set key, slice) pair, add cluster definitions +to ``settings.SLICED_CLUSTERS`` in the desired environment's settings. Follow the same structure as +regular cluster definitions in ``settings.CLUSTERS``. In the ``storage_set_slices`` field, sliced storage +sets should be added in the form of ``(StorageSetKey, slice_id)`` where slice_id is in +the range ``[0,settings.SLICED_STORAGE_SETS[storage_set])`` for the relevant ``StorageSetKey``. + + +Preparing the storage for sharding +---------------------------------- +A storage that should be sharded requires setting the partition key column that will be used +to calculate the logical partition and ultimately the slice ID for how to query the destination +data. + +This is done with the `partition_key_column_name` property in the storage schema (we do not +support sharded storages for non-YAML based entities). You can see an example of how one +might shard by organization_id in generic_metrics_sets and generic_metrics_distributions +dataset YAML files. + +Adding sliced Kafka topics +--------------------------------- +In order to define a "sliced" Kafka topic, add ``(default logical topic name, slice id)`` to +``settings.SLICED_KAFKA_TOPIC_MAP``. This tuple should be mapped to a custom physical topic +name of the form ``logical_topic_name-slice_id``. Make sure to add the corresponding broker +configuration details to ``settings.SLICED_KAFKA_BROKER_CONFIG``. Here, use the same tuple +``(default logical topic name, slice id)`` as the key, and the broker config info as the value. + +Example configurations: + +``SLICED_KAFKA_TOPIC_MAP`` = {("snuba-generic-metrics", 1): "snuba-generic-metrics-1"} + +``SLICED_KAFKA_BROKER_CONFIG`` = {("snuba-generic-metrics", 1): BROKER_CONFIG} + +These types of topics can be "sliced": raw topics, replacements topics, commit log topics, +subscription scheduler topics. Note that the slicing boundary stops at this point and +the results topics for subscriptions cannot be sliced. + + +================================= +Working in a Sliced Environment +================================= + +Starting a sliced consumer +----------------------------- + +First, ensure that your slicing configuration is set up properly: ``SLICED_STORAGE_SETS``, +``SLICED_CLUSTERS``, ``SLICED_KAFKA_TOPIC_MAP``, and ``SLICED_KAFKA_BROKER_CONFIG``. +See above for details. + +Start up ``snuba consumer`` as per usual, with an extra flag ``--slice-id`` set equal +to the slice number you are reading from. + + +TODO: handling subscriptions, scheduler and executor, etc. +---------------------------------------------------------- diff --git a/_sources/clickhouse/death_queries.rst.txt b/_sources/clickhouse/death_queries.rst.txt new file mode 100644 index 0000000000..98824133f4 --- /dev/null +++ b/_sources/clickhouse/death_queries.rst.txt @@ -0,0 +1,16 @@ +Clickhouse Queries Of Death +=========================== + + +The following queries have been shown to segfault ClickHouse on 20.7 (which is the minimum Clickhouse version of Snuba). Do not run these queries in the tracing tool, unless you really want to take ClickHouse down. + +countif(”DOOM”) +--------------- + +Query :: + + SELECT countIf(environment='production') + FROM ... + PREWHERE environment = 'production' + +A ``countif`` in the ``SELECT`` with that same condition in the ``PREWHERE`` will segfault ClickHouse. This will be fixed in 21.8 when the upgrade is complete diff --git a/_sources/clickhouse/schema_design.rst.txt b/_sources/clickhouse/schema_design.rst.txt new file mode 100644 index 0000000000..176c1e15bd --- /dev/null +++ b/_sources/clickhouse/schema_design.rst.txt @@ -0,0 +1,151 @@ +======================================= +ClickHouse Schema Design Best Practices +======================================= + +.. tip:: + This is a work-in-progress document for collecting ClickHouse schema and querying + best-practices based on experiences running ClickHouse at scale at Sentry. + It is subject to change and if something doesn't seem right please + submit a PR to Snuba. + +.. contents:: :local: + + +Columns based on dictionary data (tag promotion) +------------------------------------------------ + +ClickHouse is a columnar datastore, and at run-time it loads columns on-demand +based on the columns referenced in the query (both the columns ``SELECT`` ed +and those part of the ``WHERE`` clause). The ability to store different columns independently +and not load them for every row for every query is part of the performance advantage that +ClickHouse provides over a traditional RDBMS (like PostgreSQL). + +Commonly, a data schema contains a flexible key:value pair mapping +(canonically at Sentry: ``tags`` or ``contexts``) and stores that +data in a ``Nested`` column that contains two arrays where the first array contains the keys +of the dictionary and the second array contains the values. To make queries faster, +a column like this can be indexed with bloom filters as described in :ref:`bloom`. In general +we construct this index across datasets for ``tags`` but not for other columns. + +This works well when your dataset and query design gives you the ability to +filter for exact matches and a large number of rows will NOT be an exact match. +Often, however, a ClickHouse query filters for rows that contain a substring match or regular +expression match for a tag value of a given key. This makes bloom filter indexes +not usable for the query and, depending on the other selectivity attributes of your query, +can necessitate moving (or promoting) those relevant values for a given tag key to a new separate +column [#dupe]_. + +.. _selectivity: + +Selectivity in queries and indices +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Queries are much more efficient when they have the attribute of being low-selectivity on +the table indexes -- meaning the query conditions on indexed columns filter the dataset +to a very small proportion of the overall number of rows. High selectivity +can break the efficiency of the bloom-filter style index on dictionary columns +(see :ref:`bloom`). In cases of high-selectivity queries, there is a negative performance impact on both +bloom-filter indexed columns as well as promoted tag value columns (when searching for a ``key=value`` +pair exact match). The promoted column can make the penalty a bit less severe because +it does not load tag values from unrelated keys. Still, an effort should be made to avoid +low-selectivity queries. + +.. _bloom: + +Bloom filter indexing on dictionary-like columns +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +To facilitate faster searching on dictionary columns, we tend to create bloom filter indices +on a hashes of both the unique ``key`` values of each row as well as hashes of all the ``key=value`` +pairs of each row. The `bloom filter `_ registers these +in a stochastic data structure designed to quickly determine which elements do NOT exist in a set. +So that it can model the entire unbounded keyspace in a fixed amount of memory, a bloom filter +is designed to have false positives. This means that there is actually a performance **penalty** +if the value is often present in the underlying set: First, the value must be tested +against the bloom filter (which will always return "maybe present"), and after +that operation occurs a full scan of the column must be performed. + +Due to their structure, bloom filters are only good for exact value searching. They +cannot be used for "is user-value a prefix of column?" or "does column match regex?" style queries. +Those styles of queries require a separate column to search. + +.. [#dupe] During migration from non-promoted to promoted, putting the data in both map and + top-level column may be necessary so that queries of old rows can still access the + attributes. After the table goes through a full TTL period and the API/storage definition + is changed to serve the values from the top-level field, message processors should be changed + to stop writing the data in duplicate places. + + +Aggregate Tables and Materialization +------------------------------------ + +A common use case for ClickHouse and Snuba is to ingest raw data and automatically +roll it up to aggregate values (keyed by a custom set of dimensions). This lets +a dataset owner simplify their write logic while getting the query benefits of +rows that are pre-aggregated. This is done with what we'll call a raw table +(the table the consumer writes to), an aggregate table (the table the API reads from) +and a materialized view (which describes how the data should be transformed from +raw to aggregate). + +`Sample usage of a materialized view/aggregate table from the official ClickHouse Documentation `_. +Note that contrary to this example, the aggregate table definition in Snuba is +always separate from the materialized view definition (which is just a ClickHouse SQL +transformation, similar to a PostgreSQL trigger). + +In general, Snuba follows the naming conventions here: + +* (``widgets_raw_local``, ``widgets_raw_dist``) for the raw (local, distributed) tables +* ``widgets_aggregation_mv`` for the materialized view (this only exists on storage nodes) +* (``widgets_aggregated_local``, ``widgets_aggregated_dist``) for the roll-up/aggregated (local, distributed) tables + +Materialized views are immutable so it's normal to have multiple versions of +``widgets_aggregation_mv`` when behavior is updated, with suffixes like +``widgets_aggregation_mv_v1``, ``widgets_aggregation_mv_v2``, etc. Migration +between materialized view versions are described in the next section but in general +old materialized views should be discarded once they are no longer used. + +Schema migrations using materialization_version +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +As we discussed at the end of the prior section, materialized view logic cannot +be updated in place. In order to continuously roll-up input data without data +loss or duplication in the aggregate table, we control logic changes with a +column on the raw table, ``materialization_version``, and making the materialized +view logic depend on specific values of that column. To update MV logic, you +create a new materialized view that looks for the last used value of +``materialization_version`` plus one and then, after that's been created in all +relevant environments, update the consumer to write the new materialization_version +to the raw column. + +Here is how this might look in practice: + +Context: + +1. There is a raw table ``click_events_raw_local``, that has a field named + ``click_duration``, of type Float64. A snuba consumer is setting this to 0 for + certain types of click events. +2. There is a materialized view ``click_events_aggregation_mv`` that is writing + a ``quantilesState()`` value for a ``click_duration`` column in ``click_events_aggregated_local`` + including those zero-values. This materialized view looks for the value of + ``materialization_version = 0`` in its WHERE condition. +3. The query users are being surprised by p90, p95, and p99 values that are taking into + account zero-duration click events which don't make sense for the use case. + +To resolve this confusion, we don't want to set quantilesState for ``click_duration`` if +the incoming ``click_duration`` is 0. + +Steps to resolve the issue: + +1. Create a new materialized view ``click_events_aggregation_mv_v1`` via the migration system. This new materialized + view will use the WHERE clause or some kind of filtering to avoid setting quantilesState(0) + in the write for the ``click_duration`` column. This new materialized will only operate on + inputs in ``click_events_raw_local`` where ``materialization_version = 1`` +2. Test that this fixes the issue in your local environment by changing your consumer to use + ``materialization_version = 1``. It can make sense to control this via the settings file in + (in ``snuba/settings/__init.py__``) +3. Run the migration in all relevant environments. +4. Change the materialization_version setting mentioned above in a specific environment, to + set ``materialization_version = 1`` on write. +5. Validate that the consumer is writing rows with the new materialization version, and that + it produces the expected roll-up results. +6. Write a migration to remove the now-unused materialized view (``click_events_aggregation_mv``). diff --git a/_sources/clickhouse/supported_versions.rst.txt b/_sources/clickhouse/supported_versions.rst.txt new file mode 100644 index 0000000000..29b55090ec --- /dev/null +++ b/_sources/clickhouse/supported_versions.rst.txt @@ -0,0 +1,15 @@ +============================= +ClickHouse supported versions +============================= +The following versions of Clickhouse have been tested and are known to work +with Snuba: + +- 20.3 +- 20.7 +- 21.8 + +Any version of Clikhouse used outside of this list could potentially work, +but is not guaranteed to work. Some functionality might be broken. Use a +different version at your own risk. There are plans to support more recent +versions of Clickhouse in the future. When Snuba has been validated to work +with the new versions of Clickhouse, this list will be updated. diff --git a/_sources/clickhouse/topology.rst.txt b/_sources/clickhouse/topology.rst.txt new file mode 100644 index 0000000000..1543b068c1 --- /dev/null +++ b/_sources/clickhouse/topology.rst.txt @@ -0,0 +1,45 @@ +================================== +ClickHouse Topology Best Practices +================================== + +Sentry has a few conventions for using ClickHouse that are not inherent to +the database but useful to consider if you intend to create new tables or +datasets. + +Storage nodes vs. Query nodes +----------------------------- + +We tend to deploy two different types of nodes in production: + +1. storage nodes -- these contain table data for a given shard, and replicate + data between other nodes of that shard. For most datasets, they + are not queried directly. +2. query nodes -- these contain references to tables on storage nodes via + distributed tables, and are intended to be queried directly by the API or + written to directly by consumers. They do not themselves store data but aggregate + or delegate to tables on the storage node. + +This separation allows us to do maintenance on storage nodes in a way that is +invisible to the application (as the query nodes generally act as a proxy, and +can more generally be kept up indefinitely). + +Distributed Tables vs. Local Tables +----------------------------------- + +Astute snuba users might notice that migrations contain references to tables with names +suffixed with ``_local`` and table names suffixed with ``_dist``. This is used to +distinguish between distributed tables (generally using the ClickHouse table engine +`Distributed `_) +and local tables (generally using one of the +`MergeTree-derived `_ +table engines). Distributed tables exist to aggregate from the shards of local tables, and following +the sequence above, distributed tables tend to be created on query nodes and local tables +tend to be created on storage nodes. + +Tying it all together +--------------------- + +This diagram hopefully combines all the above concepts into an understandable +quick to consume format. + +.. image:: /_static/architecture/clickhouse_nodes.png diff --git a/_sources/configuration/dataset.md.txt b/_sources/configuration/dataset.md.txt new file mode 100644 index 0000000000..e96443d51d --- /dev/null +++ b/_sources/configuration/dataset.md.txt @@ -0,0 +1,8 @@ +# Dataset Schema + +## Properties + +- **version**: Version of schema. +- **kind**: Component kind. +- **name** *(string)*: Name of the dataset. +- **entities** *(array)*: Names of entities associated with this dataset. diff --git a/_sources/configuration/entity.md.txt b/_sources/configuration/entity.md.txt new file mode 100644 index 0000000000..c7db9b56e1 --- /dev/null +++ b/_sources/configuration/entity.md.txt @@ -0,0 +1,48 @@ +# Entity Schema + +## Properties + +- **version**: Version of schema. +- **kind**: Component kind. +- **schema** *(array)*: Objects (or nested objects) representing columns containg a name, type and args. +- **name** *(string)*: Name of the entity. +- **storages** *(array)*: An array of storages and their associated translation mappers. + - **storage** *(string)*: Name of a readable or writable storage class which provides an abstraction to read from a table or a view in ClickHouse. + - **is_writable** *(boolean)*: Marks the storage is a writable one. + - **translation_mappers** *(object)*: Represents the set of rules used to translates different expression types. + - **columns** *(array)* + - **mapper** *(string)*: Mapper class name. + - **args** *(object)*: Key/value mappings required to instantiate Mapper class. + - **functions** *(array)* + - **mapper** *(string)*: Mapper class name. + - **args** *(object)*: Key/value mappings required to instantiate Mapper class. + - **curried_functions** *(array)* + - **mapper** *(string)*: Mapper class name. + - **args** *(object)*: Key/value mappings required to instantiate Mapper class. + - **subscriptables** *(array)* + - **mapper** *(string)*: Mapper class name. + - **args** *(object)*: Key/value mappings required to instantiate Mapper class. +- **join_relationships** *(object)* + - **^.*$** *(object)*: The join relationship. The key for this relationship is how the relationship is specified in queries (MATCH x -[key]-> y). + - **rhs_entity** *(string)*: The entity key of the rhs entity to join with. + - **columns** *(array)*: A sequence of tuples of columns to join on, in the form (left, right). + - **join_type** *(string)*: The type of join that can be performed (either 'left' or 'inner'. + - **equivalences** *(array)*: Tracking columns in the two entities that are not part of the join key but are still equivalent. +- **storage_selector** *(object)*: + - **selector** *(string)*: QueryStorageSelector class name. + - **args** *(object)*: Key/value mappings required to instantiate QueryStorageSelector class. +- **query_processors** *(array)*: Represents a transformation applied to the ClickHouse query. + - **processor** *(string)*: Name of LogicalQueryProcessor class config key. Responsible for the transformation applied to a query. + - **args** *(object)*: Key/value mappings required to instantiate QueryProcessor class. +- **validators** *(array)*: The validation logic used on the ClickHouse query. + - **validator** *(string)*: Validator class name. + - **args** *(object)*: Key/value mappings required to instantiate Validator class. +- **validate_data_model** *(['string', 'null'])*: The level at which mismatched functions and columns when querying the entity should be logged. +- **required_time_column** *(['string', 'null'])*: The name of the required time column specifed in schema. +- **partition_key_column_name** *(['string', 'null'])*: The column name, if this entity is partitioned, to select slice. +- **subscription_processors** *(array)* + - **processor** *(string)*: Entity Subscription Processor class name. + - **args** *(object)*: Key/value mappings required to instantiate Entity Subscription Processor class. +- **subscription_validators** *(array)* + - **validator** *(string)*: Entity Subscription Validator class name. + - **args** *(object)*: Key/value mappings required to instantiate Entity Subscription Validator class. diff --git a/_sources/configuration/entity_subscription.md.txt b/_sources/configuration/entity_subscription.md.txt new file mode 100644 index 0000000000..f0521a9bd6 --- /dev/null +++ b/_sources/configuration/entity_subscription.md.txt @@ -0,0 +1,9 @@ +# Entity Subscription Schema + +## Properties + +- **version**: Version of schema. +- **kind**: Component kind. +- **name** *(string)*: Name of the entity subscription. +- **max_allowed_aggregations** *(['integer', 'null'])*: Maximum number of allowed aggregations. +- **disallowed_aggregations** *(['array', 'null'])*: Name of aggregation clauses that are not allowed. diff --git a/_sources/configuration/intro.rst.txt b/_sources/configuration/intro.rst.txt new file mode 100644 index 0000000000..e9646551c2 --- /dev/null +++ b/_sources/configuration/intro.rst.txt @@ -0,0 +1,5 @@ +===================== +Dataset Configuration +===================== + +Snuba Datasets are defined through YAML configuration files. These are then loaded and validated by the Snuba application. diff --git a/_sources/configuration/migration_group.md.txt b/_sources/configuration/migration_group.md.txt new file mode 100644 index 0000000000..998bcd0c10 --- /dev/null +++ b/_sources/configuration/migration_group.md.txt @@ -0,0 +1,9 @@ +# Migration Group Schema + +## Properties + +- **version**: Version of schema. +- **kind**: Component kind. +- **name** *(string)*: Name of the migration group. +- **optional** *(boolean)*: Flag to determine if migration group is optional. +- **migrations** *(array)*: Names of migrations to be applied in group. diff --git a/_sources/configuration/overview.rst.txt b/_sources/configuration/overview.rst.txt new file mode 100644 index 0000000000..fbb608e856 --- /dev/null +++ b/_sources/configuration/overview.rst.txt @@ -0,0 +1,19 @@ +.. include:: intro.rst + +Schemas: +--------- + +.. toctree:: + :maxdepth: 1 + + dataset + + entity + + entity_subscription + + migration_group + + readable_storage + + writable_storage diff --git a/_sources/configuration/readable_storage.md.txt b/_sources/configuration/readable_storage.md.txt new file mode 100644 index 0000000000..0bbadfee74 --- /dev/null +++ b/_sources/configuration/readable_storage.md.txt @@ -0,0 +1,29 @@ +# Readable Storage Schema + +## Properties + +- **version**: Version of schema. +- **kind**: Component kind. +- **name** *(string)*: Name of the readable storage. +- **storage** *(object)*: + - **key** *(string)*: A unique key identifier for the storage. + - **set_key** *(string)*: A unique key identifier for a collection of storages located in the same cluster. +- **readiness_state** *(string)*: The readiness state defines the availability of the storage in various environments. Internally, this label is used to determine which environments this storage is released in. There for four different readiness states: limited, deprecrate, partial, and complete. Different environments support a set of these readiness_states . If this is a new storage, start with limited which only exposes the storage to CI and local development. Must be one of: ['limited', 'deprecate', 'partial', 'complete']. +- **schema** *(object)*: + - **columns** *(array)*: Objects (or nested objects) representing columns containg a name, type and args. + - **local_table_name** *(string)*: The local table name in a single-node ClickHouse. + - **dist_table_name** *(string)*: The distributed table name in distributed ClickHouse. + - **not_deleted_mandatory_condition** *(string)*: The name of the column flagging a deletion, eg deleted column in Errors. Defining this column here will ensure any query served by this storage explicitly filters out any 'deleted' rows. Should only be used for storages supporting deletion replacement. + - **partition_format** *(array)*: The format of the partitions in Clickhouse. Used in the cleanup job. +- **query_processors** *(array)* + - **processor** *(string)*: Name of ClickhouseQueryProcessor class config key. Responsible for the transformation applied to a query. + - **args** *(object)*: Key/value mappings required to instantiate QueryProcessor class. +- **query_splitters** *(array)* + - **splitter** *(string)*: Name of QuerySplitStrategy class config key. Responsible for splitting a query into two at runtime and combining the results. + - **args** *(object)*: Key/value mappings required to instantiate QuerySplitStrategy class. +- **mandatory_condition_checkers** *(array)* + - **condition** *(string)*: Name of ConditionChecker class config key. Responsible for running final checks on a query to ensure that transformations haven't impacted/removed conditions required for security reasons. + - **args** *(object)*: Key/value mappings required to instantiate ConditionChecker class. +- **allocation_policies** *(array)* + - **name** *(string)*: Name of the AllocationPolicy used for allocating read resources per query on this storage. + - **args** *(object)*: Key/value mappings required to instantiate AllocationPolicy class. diff --git a/_sources/configuration/writable_storage.md.txt b/_sources/configuration/writable_storage.md.txt new file mode 100644 index 0000000000..57ed0a8a86 --- /dev/null +++ b/_sources/configuration/writable_storage.md.txt @@ -0,0 +1,47 @@ +# Writable Storage Schema + +## Properties + +- **version**: Version of schema. +- **kind**: Component kind. +- **name** *(string)*: Name of the writable storage. +- **storage** *(object)*: + - **key** *(string)*: A unique key identifier for the storage. + - **set_key** *(string)*: A unique key identifier for a collection of storages located in the same cluster. +- **readiness_state** *(string)*: The readiness state defines the availability of the storage in various environments. Internally, this label is used to determine which environments this storage is released in. There for four different readiness states: limited, deprecrate, partial, and complete. Different environments support a set of these readiness_states . If this is a new storage, start with limited which only exposes the storage to CI and local development. Must be one of: ['limited', 'deprecate', 'partial', 'complete']. +- **schema** *(object)*: + - **columns** *(array)*: Objects (or nested objects) representing columns containg a name, type and args. + - **local_table_name** *(string)*: The local table name in a single-node ClickHouse. + - **dist_table_name** *(string)*: The distributed table name in distributed ClickHouse. + - **not_deleted_mandatory_condition** *(string)*: The name of the column flagging a deletion, eg deleted column in Errors. Defining this column here will ensure any query served by this storage explicitly filters out any 'deleted' rows. Should only be used for storages supporting deletion replacement. + - **partition_format** *(array)*: The format of the partitions in Clickhouse. Used in the cleanup job. +- **stream_loader** *(object)*: The stream loader for a writing to ClickHouse. This provides what is needed to start a Kafka consumer and fill in the ClickHouse table. + - **processor** *(object)*: Name of Processor class config key and it's arguments. Responsible for converting an incoming message body from the event stream into a row or statement to be inserted or executed against clickhouse. + - **name** *(string)* + - **args** *(object)*: Key/value mappings required to instantiate the processor class. + - **default_topic** *(string)*: Name of the Kafka topic to consume from. + - **commit_log_topic** *(['string', 'null'])*: Name of the commit log Kafka topic. + - **subscription_scheduled_topic** *(['string', 'null'])*: Name of the subscription scheduled Kafka topic. + - **subscription_scheduler_mode** *(['string', 'null'])*: The subscription scheduler mode used (e.g. partition or global). This must be specified if subscriptions are supported for this storage. + - **subscription_result_topic** *(['string', 'null'])*: Name of the subscription result Kafka topic. + - **replacement_topic** *(['string', 'null'])*: Name of the replacements Kafka topic. + - **dlq_topic** *(['string', 'null'])*: Name of the DLQ Kafka topic. + - **pre_filter** *(object)*: Name of class which filter messages incoming from stream. + - **type** *(string)*: Name of StreamMessageFilter class config key. + - **args** *(object)*: Key/value mappings required to instantiate StreamMessageFilter class. +- **query_processors** *(array)* + - **processor** *(string)*: Name of ClickhouseQueryProcessor class config key. Responsible for the transformation applied to a query. + - **args** *(object)*: Key/value mappings required to instantiate QueryProcessor class. +- **query_splitters** *(array)* + - **splitter** *(string)*: Name of QuerySplitStrategy class config key. Responsible for splitting a query into two at runtime and combining the results. + - **args** *(object)*: Key/value mappings required to instantiate QuerySplitStrategy class. +- **mandatory_condition_checkers** *(array)* + - **condition** *(string)*: Name of ConditionChecker class config key. Responsible for running final checks on a query to ensure that transformations haven't impacted/removed conditions required for security reasons. + - **args** *(object)*: Key/value mappings required to instantiate ConditionChecker class. +- **allocation_policies** *(array)* + - **name** *(string)*: Name of the AllocationPolicy used for allocating read resources per query on this storage. + - **args** *(object)*: Key/value mappings required to instantiate AllocationPolicy class. +- **replacer_processor** *(object)*: + - **processor** *(string)*: Name of ReplacerProcessor class config key. Responsible for optimizing queries on a storage which can have replacements, eg deletions/updates. + - **args** *(object)*: Key/value mappings required to instantiate ReplacerProcessor class. +- **writer_options** *(object)*: Extra Clickhouse fields that are used for consumer writes. diff --git a/_sources/contributing/environment.rst.txt b/_sources/contributing/environment.rst.txt new file mode 100644 index 0000000000..b7ba6650b6 --- /dev/null +++ b/_sources/contributing/environment.rst.txt @@ -0,0 +1,84 @@ +============================= +Snuba development environment +============================= + +This section explains how to run snuba from source and set up a development +environment. + +In order to set up Clickhouse, Redis, and Kafka, please refer to :doc:`/getstarted`. + +Prerequisites +------------- +`pyenv `_ must be installed on your system. +It is also assumed that you have completed the steps to set up the `sentry dev environment `_. + +If you are using Homebrew and a M1 Mac, ensure the development packages you've installed with Homebrew are available +by setting these environment variables:: + + export CPATH=/opt/homebrew/include + export LIBRARY_PATH=/opt/homebrew/lib + +Install / Run +------------- + +clone this repo into your workspace:: + + git@github.com:getsentry/snuba.git + +These commands set up the Python virtual environment:: + + cd snuba + make pyenv-setup + python -m venv .venv + source .venv/bin/activate + pip install --upgrade pip==22.2.2 + make develop + +These commands start the Snuba api, which is capable of processing queries:: + + snuba api + +This command instead will start the api and all the Snuba consumers to ingest +data from Kafka:: + + snuba devserver + +Running tests +------------- + +This command runs unit and integration tests:: + + make develop (if you have not run it already) + make test + +Running sentry tests against snuba +++++++++++++++++++++++++++++++++++ + +This section instead runs Sentry tests against a running Snuba installation + +Make sure there is no snuba container already running:: + + docker ps -a | grep snuba + +Start your local snuba api server:: + + git checkout your-snuba-branch + source .venv/bin/activate + snuba api + +and, in another terminal:: + + cd ../sentry + git checkout master + git pull + sentry devservices up --exclude=snuba + +This will get the most recent version of Sentry on master, and bring up all snuba's dependencies. + +You will want to run the following Sentry tests:: + + make test-acceptance + make test-snuba + make test-python + +These tests do not use Kafka due to performance reasons. The snuba test suite does test the kafka functionality diff --git a/_sources/getstarted.rst.txt b/_sources/getstarted.rst.txt new file mode 100644 index 0000000000..3dcfe8c0bd --- /dev/null +++ b/_sources/getstarted.rst.txt @@ -0,0 +1,56 @@ +========================== +Getting started with Snuba +========================== + +This is a guide to quickly start Snuba up in the context of a Sentry +development environment. + +Requirements +------------ + +Snuba assumes: + +1. A Clickhouse server endpoint at ``CLICKHOUSE_HOST`` (default ``127.0.0.1``). +2. A redis instance running at ``REDIS_HOST`` (default ``127.0.0.1``). On port + `6379` +3. A Kafka cluster running at ``127.0.0.1`` on port `9092`. + +A quick way to get these services running is to set up sentry, and add the following line +in ``~/.sentry/sentry.conf.py``:: + + SENTRY_EVENTSTREAM = "sentry.eventstream.kafka.KafkaEventStream" + +And then use:: + + sentry devservices up --exclude=snuba + +Note that Snuba assumes that everything is running on UTC time. Otherwise +you may experience issues with timezone mismatches. + + +Sentry + Snuba +-------------- + +Add/change the following lines in ``~/.sentry/sentry.conf.py``:: + + SENTRY_SEARCH = 'sentry.search.snuba.EventsDatasetSnubaSearchBackend' + SENTRY_TSDB = 'sentry.tsdb.redissnuba.RedisSnubaTSDB' + SENTRY_EVENTSTREAM = 'sentry.eventstream.snuba.SnubaEventStream' + +Run:: + + sentry devservices up + +Access raw clickhouse client (similar to psql):: + + docker exec -it sentry_clickhouse clickhouse-client + +Data is written into the table `sentry_local`: `select count() from sentry_local;` + +Settings +-------- + +Settings are found in ``settings.py`` + +- ``CLUSTERS`` : Provides the list of clusters and the hostname, port, and storage sets that should run on each cluster. Local vs distributed is also set per cluster. +- ``REDIS_HOST`` : The host redis is running on. diff --git a/_sources/index.rst.txt b/_sources/index.rst.txt new file mode 100644 index 0000000000..9b53a0513c --- /dev/null +++ b/_sources/index.rst.txt @@ -0,0 +1,22 @@ +.. include:: intro.rst + +Contents: +--------- + +.. toctree:: + :maxdepth: 1 + + getstarted + architecture/overview + architecture/datamodel + architecture/slicing + architecture/queryprocessing + configuration/overview + query/overview + language/snql + migrations/modes + contributing/environment + clickhouse/death_queries + clickhouse/topology + clickhouse/schema_design + clickhouse/supported_versions diff --git a/_sources/intro.rst.txt b/_sources/intro.rst.txt new file mode 100644 index 0000000000..80f331c6d3 --- /dev/null +++ b/_sources/intro.rst.txt @@ -0,0 +1,33 @@ +Snuba is a service that provides a rich data model on top of Clickhouse +together with a fast ingestion consumer and a query optimizer. + +Snuba was originally developed to replace a combination of Postgres and +Redis to search and provide aggregated data on Sentry errors. +Since then it has evolved into the current form where it supports most +time series related Sentry features over several data sets. + +Features: +--------- + +* Provides a database access layer to the Clickhouse distributed data store. +* Provides a graph logical data model the client can query through the SnQL + language which provides functionalities similar to those of SQL. +* Support multiple separate data sets in a single installation. +* Provides a rule based query optimizer. +* Provides a migration system to apply DDL changes to Clickhouse both in a + single node and distributed environment. +* Ingest data directly from Kafka +* Supports both point in time queries and streaming queries. + + +Some use cases in Sentry: +------------------------- + +* The ``events`` data set powers features like the Issue Page. Here the search + functionality is powered by Snuba as well as all the aggregations. +* The ``discover`` data set powers all the Performance Monitoring related + features. +* The ``sessions`` data set powers the Releases feature. Specifically this + data set ingests a much higher volume of data points and stores pre-aggregated + data to allow fast queries over higher volume of data. +* The ``outcomes`` data set powers the Stats page. diff --git a/_sources/language/snql.rst.txt b/_sources/language/snql.rst.txt new file mode 100644 index 0000000000..f4b6b102bc --- /dev/null +++ b/_sources/language/snql.rst.txt @@ -0,0 +1,195 @@ +======================= +The SnQL query language +======================= + +This document describes the Snuba Query Language (SnQL). For more details on +how to actually send a query to Snuba see :doc:`/query/overview`. + +This is the query structure.:: + + MATCH simple | join | subquery + SELECT [expressions] | [aggregations BY expressions] + ARRAY JOIN [column] + WHERE condition [[AND | OR] condition]* + HAVING condition [[AND | OR] condition]* + ORDER BY expression ASC|DESC [, expression ASC|DESC]* + LIMIT n BY [expressions] + LIMIT n + OFFSET n + GRANULARITY n + TOTALS boolean + + +These queries are sent as strings to the ``/:dataset/snql`` endpoint encoded in a +JSON body of the form below.:: + + { + "query": "", + "dataset": "", + "consistent": bool, + "turbo": bool, + "debug": bool, + } + +The dataset is implied through the url used for the query. All of the fields except +for ``query`` are optional in the JSON body. + +MATCH +===== + +Our data model is represented by a graph of entities. This clause identifies +the pattern of the subgraphs we are querying on. There are three types of +MATCH clause that are currently supported: + +**Simple:** + +``MATCH ( [SAMPLE n])`` + +This is equivalent to all of our current queries. This is querying data from +a single entity (Events, Transactions etc.) It is possible to add an optional +sample to the query by adding it with the entity. + +Example ``MATCH (events)``. + +**Subquery:** + +``MATCH { }`` + +Inside the curly braces can be another SnQL query in its entirety. Anything +in the SELECT/BY clause of the subquery will be exposed in the outer query +using the aliases specified. + +Example:: + + MATCH { + MATCH (transactions) + SELECT avg(duration) AS avg_d BY transaction + } + SELECT max(avg_d) + +**Join:** + +``MATCH (: [SAMPLE n]) -[]-> (: [SAMPLE n])`` + +A join represents a multi node subgraph is a subgraph that includes +multiple relationships between different nodes. We only support 1..n, +n..1 and 1..1 directed relationships between nodes. + +With JOINs every entity must have an alias, which is a unique string. +Sampling can also be applied to any of the entities in the join. The +```` is a string that is specified in the Entity in Snuba, and +is a short hand for a set of join conditions. It's possible to have more +than one join clause, separated by commas. + +Example:: + + MATCH + (e: events) -[grouped]-> (g: groupedmessage), + (e: events) -[assigned]-> (a: groupassignee) + SELECT count() AS tot BY e.project_id, g.id + WHERE a.user_id = "somebody" + +The type of join (left/inner) and the join key are part of the data model +and not part of the query. They are hard coded in the entity code. +This is because not entity can be safely joined with any other entity +in the distributed version of the underlying database. + +The tuples provided by the match clause to the where clause look exactly +like the ones produced by conventional join clause.:: + + [ + {"e.project_id": 1, "g.id": 10} + {"e.project_id": 1, "g.id": 11} + {"e.project_id": 2, "g.id": 20} + ... + ] + + +SELECT .. BY +============ + +This clause specifies which results should be returned in the output. +If there is an aggregation, when everything in the ``BY`` clause is +treated as a grouping key. +It is possible to have aggregations without a ``BY`` clause if we want +to aggregate across the entire result set, but, in such case, nothing +other than the aggregation can be in the ``SELECT``. +It's not valid to have +an empty ``SELECT`` clause, even if there is a ``BY`` clause. + +Expressions in the SELECT clause can be columns, arithmetic, functions +or any combination of the three. If the query is a join, then each column +must have a qualifying alias that matches one of the entity aliases in the +MATCH clause. + +WHERE +===== + +This is the filter of the query that happens **before** aggregations (like +the WHERE in SQL). + +Conditions are infix expressions of the form ``LHS OP RHS*``, where ``LHS`` +and ``RHS`` are literal values or expressions. ``OP`` refers to a specific +operator to compare the two values. These operators are one of +``=, !=, <, <=, >, >=, IN, NOT IN, LIKE, NOT LIKE, IS NULL, IS NOT NULL``. +Note that the ``RHS`` is optional when using an operator like ``IS NULL``. + +Conditions can be combined using the boolean keywords ``AND`` or ``OR``. +They can also be grouped using ``()``. + +Some conditions will be mandatory to provide a valid query depending on +the entity. For example the Transactions entity requires a project id +condition and a time range condition. + +HAVING +====== + +Works like the WHERE clause but it is applied after the aggregations declared +in the SELECT clause. So we can apply conditions on the result of an aggregation +function here. + +ORDER BY +======== + +Specify the expression(s) to order the result set on. + +LIMIT BY/LIMIT/OFFSET +===================== + +Pretty self explanatory, they take integers and set the corresponding +values in the Clickhouse query. If a query doesn't specify the limit or +offset, they will be defaulted to 1000 and 0 respectively. + +GRANULARITY +=========== + +An integer representing the granularity to group time based results. + +Some of the entities in Snuba provides a magic column that you can use to group data by. The column gives a floored time value for each row so that rows in the same minute/hour/day/etc. can be grouped. + +The magic column for a given entity can be found by finding the TimeSeriesProcessor for the entity. Example, for errors, you can find the TimeSeriesProcessor defined `here `_. You can see that the magic column is `time` and it uses the `timestamp` column for grouping. + +Granularity determines the number of seconds in each of these time buckets. Eg, to count the number of events by hour, you would do + +Example:: + + MATCH(events) count(event_id) AS event_count + BY time + WHERE timestamp >= toDateTime('2022-01-15T00:00:00.000000') AND timestamp < toDateTime('2022-01-21T00:00:00.000000') + GRANULARITY 3600 + +TOTALS +====== + +If set to True, the response from Snuba will have a ``"totals"`` key that +contains the total values across all the selected rows. + +SAMPLE +====== + +If a sampling rate isn't provided by a node in the ``MATCH`` clause, then it +can be specified here. In this case, Snuba will assign the sample right to +one of the nodes in the query. A sample can be either a float between 0 and +1, representing a percentage of rows to sample. + +Or it can be an integer greater 1 which represents the number of rows to sample. diff --git a/_sources/migrations/modes.rst.txt b/_sources/migrations/modes.rst.txt new file mode 100644 index 0000000000..94b8b98f17 --- /dev/null +++ b/_sources/migrations/modes.rst.txt @@ -0,0 +1,57 @@ +====================== +Snuba Migration Modes +====================== + +This document outlines a way to try out distributed migrations. +Note that this is experimental, and should be used only for development +purposes at the moment. Distributed mode is not supported when testing yet. +Local mode for migrations is currently fully supported. + +If you are running ClickHouse via Sentry's devservices, the +main "switch" between the two modes for running data migrations (local and +distributed) lives in ``sentry/conf/server.py``. +The controlling envrionment variable is ``SENTRY_DISTRIBUTED_CLICKHOUSE_TABLES``, +and its value must be set in order to use a specific mode. + +Once this boolean variable is set, one of two ClickHouse Docker volumes will be +used for data storage, depending on the mode (distributed or local). Whenever a user +wants to switch between the two modes, they must "turn off" the running ClickHouse +container, alter the environment variable mentioned above, and then "turn on" the +same container to be in the new mode. + +More information on migrations in general can be found `here `_. + +Enabling Local Mode +===================== + +In your local ``server.py``, set ``SENTRY_DISTRIBUTED_CLICKHOUSE_TABLES`` +to False. This is the default setting, so configuration is already +set up for local mode migrations. Start up the corresponding ClickHouse +container (``sentry devservices up clickhouse``). + +Now, run migrations as expected (``snuba migrations migrate --force``). + + +Enabling Distributed Mode +============================ + +In your local ``server.py``, set ``SENTRY_DISTRIBUTED_CLICKHOUSE_TABLES`` +to True. Start up the corresponding ClickHouse container (``sentry devservices up clickhouse``). +Make sure that the Zookeeper container is also running; without it, distributed migrations +will not work properly. + +Now, we take a look at the cluster configurations that can be used in Distributed tables. These are +set in `this config `_. +The current configuration in the file is a default, 1 shard with 1 replica model, and is best to use +for now, as it supports migrations for all storages. Moving forward, we look to adding support +for multi-sharded configurations, and ensuring storages are placed on the right clusters. +More examples of and information on cluster configurations can be found in `this link `_. + +Finally, set up cluster connection details (for example, which storage is to be assigned +to be which cluster) in `this file `_. +This needs to be done only for distributed migrations, as the default cluster details will be used in local mode. +The default in this file works with the default cluster configurations mentioned above, so no changes +are immediately necessary. + +Now, run migrations with the ``SNUBA_SETTINGS`` environment variable pointing to distributed mode. +This can be done as follows: ``SNUBA_SETTINGS=distributed snuba migrations migrate --force``. diff --git a/_sources/query/overview.rst.txt b/_sources/query/overview.rst.txt new file mode 100644 index 0000000000..3bde4737b9 --- /dev/null +++ b/_sources/query/overview.rst.txt @@ -0,0 +1,313 @@ +============== +Querying Snuba +============== + +This guide drives you through the process of authoring and testing a +Snuba query. + +Exploring the Snuba data model +============================== + +In order to architect a Snuba query, the first step is being able to +know which Dataset you should query, which Entities you should select +and what the schema of each Entity is. + +For an introduction about Datasets and Entities, see the :doc:`/architecture/datamodel` +section. + +Datasets can be found `in this module `_. +Each Dataset is a class that references the Entities. + +The list of entities in the system can be found via the ``snuba entities`` +command:: + + snuba entities list + +would return something like:: + + Declared Entities: + discover + errors + events + groups + groupassignee + groupedmessage + ..... + +Once we have found the entity we are interested into, we need to understand +the schema and the relationship declared on that entity. +The same command describes an Entity:: + + snuba entities describe groupedmessage + +Would return:: + + Entity groupedmessage + Entity schema + -------------------------------- + offset UInt64 + record_deleted UInt8 + project_id UInt64 + id UInt64 + status Nullable(UInt8) + last_seen Nullable(DateTime) + first_seen Nullable(DateTime) + active_at Nullable(DateTime) + first_release_id Nullable(UInt64) + + Relationships + -------------------------------- + groups + -------------------------------- + Destination: events + Type: LEFT + Join keys + -------------------------------- + project_id = LEFT.project_id + id = LEFT.group_id + + +Which provides the list of columns with their type and the relationships to +other entities defined in the data model. + +Preparing a query for Snuba +=========================== + +Snuba query language is called SnQL. It is documented in the :doc:`/language/snql` +section. So this section does not go into details. + +There is a python sdk that can be used to build Snuba queries and it can +be used in any Python client including Sentry. `This `_ +is where the sdk project is documented. + +The query is represented as a ``Query`` object like:: + + query = Query( + dataset="discover", + match=Entity("events"), + select=[ + Column("title"), + Function("uniq", [Column("event_id")], "uniq_events"), + ], + groupby=[Column("title")], + where=[ + Condition(Column("timestamp"), Op.GT, datetime.datetime(2021, 1, 1)), + Condition(Column("project_id"), Op.IN, Function("tuple", [1, 2, 3])), + ], + limit=Limit(10), + offset=Offset(0), + granularity=Granularity(3600), + ) + +More details on how to build a query are in the sdk documentation. + +Once the query object is ready it can be sent to Snuba. + +Sending a query to Snuba with Sentry +==================================== + +The most common use case when querying Snuba is via Sentry. This section +explains how to build a query in the Sentry code base and send it to Snuba. + +Sentry imports the Snuba sdk described above. This is the recommended way +to build Snuba queries. + +Once a ``Query`` object has been created the Snuba client api provided by +Sentry can and should be used to send the query to Snuba. + +The api is in `this module `_. +It takes care of caching, retries and allows bulk queries. + +The method returns a dictionary that contains the data in response and +additional metadata:: + + { + "data": [ + { + "title": "very bad", + "uniq_events": 2 + } + ], + "meta": [ + { + "name": "title", + "type": "String" + }, + { + "name": "uniq_events", + "type": "UInt64" + } + ], + "timing": { + ... details ... + } + } + +The ``data`` section is a list with one dictionary per row. The ``meta`` +section contains the list of the columns included in the response with +their data type as inferred by Clickhouse. + +More details about the structure of the timing section below. + +Sending a test query through the web UI +======================================= + +Snuba has a minimal web UI you can use to send queries. You can run Snuba +locally and the web UI will be accessible at ``http://127.0.0.1:1218/[DATASET NAME]/snql``. + +.. image:: /_static/query/snubaUI.png + +The SnQL query should be provided (sorry, on one line only) in the `query` +attribute and the structure of the response is the same discussed in the +section above. + +Sending a query via curl +======================== + +The web ui just sends the payload as a POST. So the same result can be +achieved with curl or any other HTTP client. + +Request and response formats +============================ + +The request format is the same visible in the screenshot: + +* ``query`` contains the SnQL query as a string +* ``dataset`` is the dataset name (if not already specified in the url +* ``debug`` makes Snuba provide exhaustive statistics in the response + including the Clickhouse query. +* ``consistent`` forces the Clickhouse query to be executed in single + threaded mode and, in case the Clickhouse table is + replicated, it will force Snuba to always hit the same + node. Which can guarantee sequential consistency as + that is the node where the consumer write by default. + This is achieved with the `load balancing `_ + Clickhouse property which is set as ``in_order``. +* ``turbo`` sets a sampling rate to the query defined in the ``TURBO_SAMPLE_RATE`` + Snuba setting. It also prevents Snuba to apply the ``FINAL`` + mode to the Clickhouse query in case it was needed to guarantee + correct results after replacements. + +Snuba can respond with 4 http codes. 200 is for a successful query, +if the query cannot be properly validated it will be a 400. A 500 generally +means a Clickhouse related issue (that go from timeout to connection issues) +though there are several invalid queries that Snuba is not able to identify +in advance still (we are removing them). +Snuba has an internal rate limiter so 429 is also a possible return code. + +The response format for a successful query is the same discussed above. +The complete version looks like this (in debug mode) :: + + { + "data": [], + "meta": [ + { + "name": "title", + "type": "String" + } + ], + "timing": { + "timestamp": 1621038379, + "duration_ms": 95, + "marks_ms": { + "cache_get": 1, + "cache_set": 4, + "execute": 39, + "get_configs": 0, + "prepare_query": 10, + "rate_limit": 4, + "validate_schema": 34 + } + }, + "stats": { + "clickhouse_table": "errors_local", + "final": false, + "referrer": "http://127.0.0.1:1218/events/snql", + "sample": null, + "project_rate": 0, + "project_concurrent": 1, + "global_rate": 0, + "global_concurrent": 1, + "consistent": false, + "result_rows": 0, + "result_cols": 1, + "query_id": "f09f3f9e1c632f395792c6a4bfe7c4fe" + }, + "sql": "SELECT (title AS _snuba_title) FROM errors_local PREWHERE equals((project_id AS _snuba_project_id), 1) WHERE equals(deleted, 0) AND greaterOrEquals((timestamp AS _snuba_timestamp), toDateTime('2021-05-01T00:00:00', 'Universal')) AND less(_snuba_timestamp, toDateTime('2021-05-11T00:00:00', 'Universal')) LIMIT 1000 OFFSET 0" + } + +The ``timing`` section contains the timestamp of the query and the duration. What +is interesting is that the duration is broken down into phases: ``marks_ms``. + +The ``sql`` element is the Clickhouse query. + +The ``stats`` dictionary contains the following keys + +* ``clickhouse_table`` is the table picked by snuba during query processing +* ``final`` tells if Snuba decided to send a FINAL query to Clickhouse which would force + Clickhouse to apply the relevant merges (for merge trees) right away. + `Details `_ +* ``sample`` is the sampling rate applied +* ``project_rate`` is the number of request per second Snuba received for the specific + project at the time of the query +* ``project_concurrent`` is the number of concurrent query involving the specific project + at the time of the query. +* ``global_rate`` same as for ``project_rate`` but not focused on one project +* ``global_concurrent`` same as for ``project_concurrent`` but not focused on one project +* ``query_id`` is a unique identifier for the this query. + +A query validation issue would generally have this format:: + + { + "error": { + "type": "invalid_query", + "message": "Missing >= condition with a datetime literal on column timestamp for entity events. Example: timestamp >= toDateTime('2023-05-16 00:00')" + } + } + +A Clickhouse error would have a similar structure. The ``type`` field will say +``clickhouse``, the message will contain details around the exception. +Contrarily to the query validation errors, in case of Clickhouse errors, the +query is actually executed, so all the timing and stats details described for +successful query are present. + + +Creating a Subscription query +============================= + +Send the payload as a POST to ``127.0.0.1:1218/[DATASET NAME]/[ENTITY NAME]/subscriptions``. + +Request Format +=============== + +A subscription query would generally have this payload format:: + + { + "project_id": 42, + "time_window" : 150, + "resolution" : 60, + "query" : "MATCH (events) SELECT ...." + } + +project_id, resolution, time_window are all specified as separate fields +in the subscription payload by the user, alongside the query. This allows +us to pre-build one subscription query and vary these as separate parameters. + +'time_window' becomes part of the query condition (i.e the WHERE), and the +subscription query will look at the past 'time_window' seconds (as specified +by the window) of events. For example, if 'time_window' = 60, the +subscription query will select rows whose timestamp column's values fall in +the range of [start - 60 seconds, start) where 'start' is defined as +the timestamp at which the subscription was created. As 'time_window' +increases, the larger the range of accepted values for the relevant +timestamp column. + +'project_id' becomes part of the query condition, and the query will filter +records by matching on the specified id. + +'resolution' is used to determine when the scheduler creates tasks so that +the executor can run subscription queries. The scheduler can either schedule +the subscription immediately, or can schedule subscriptions with +a jitter (see JitteredTaskBuilder defintion for more details). For scheduling, +a running timestamp is maintained and in the case of immediate scheduling, +a subscription task is scheduled every 'resolution' seconds. diff --git a/_static/_sphinx_javascript_frameworks_compat.js b/_static/_sphinx_javascript_frameworks_compat.js new file mode 100644 index 0000000000..8549469dc2 --- /dev/null +++ b/_static/_sphinx_javascript_frameworks_compat.js @@ -0,0 +1,134 @@ +/* + * _sphinx_javascript_frameworks_compat.js + * ~~~~~~~~~~ + * + * Compatability shim for jQuery and underscores.js. + * + * WILL BE REMOVED IN Sphinx 6.0 + * xref RemovedInSphinx60Warning + * + */ + +/** + * select a different prefix for underscore + */ +$u = _.noConflict(); + + +/** + * small helper function to urldecode strings + * + * See https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/decodeURIComponent#Decoding_query_parameters_from_a_URL + */ +jQuery.urldecode = function(x) { + if (!x) { + return x + } + return decodeURIComponent(x.replace(/\+/g, ' ')); +}; + +/** + * small helper function to urlencode strings + */ +jQuery.urlencode = encodeURIComponent; + +/** + * This function returns the parsed url parameters of the + * current request. Multiple values per key are supported, + * it will always return arrays of strings for the value parts. + */ +jQuery.getQueryParameters = function(s) { + if (typeof s === 'undefined') + s = document.location.search; + var parts = s.substr(s.indexOf('?') + 1).split('&'); + var result = {}; + for (var i = 0; i < parts.length; i++) { + var tmp = parts[i].split('=', 2); + var key = jQuery.urldecode(tmp[0]); + var value = jQuery.urldecode(tmp[1]); + if (key in result) + result[key].push(value); + else + result[key] = [value]; + } + return result; +}; + +/** + * highlight a given string on a jquery object by wrapping it in + * span elements with the given class name. + */ +jQuery.fn.highlightText = function(text, className) { + function highlight(node, addItems) { + if (node.nodeType === 3) { + var val = node.nodeValue; + var pos = val.toLowerCase().indexOf(text); + if (pos >= 0 && + !jQuery(node.parentNode).hasClass(className) && + !jQuery(node.parentNode).hasClass("nohighlight")) { + var span; + var isInSVG = jQuery(node).closest("body, svg, foreignObject").is("svg"); + if (isInSVG) { + span = document.createElementNS("http://www.w3.org/2000/svg", "tspan"); + } else { + span = document.createElement("span"); + span.className = className; + } + span.appendChild(document.createTextNode(val.substr(pos, text.length))); + node.parentNode.insertBefore(span, node.parentNode.insertBefore( + document.createTextNode(val.substr(pos + text.length)), + node.nextSibling)); + node.nodeValue = val.substr(0, pos); + if (isInSVG) { + var rect = document.createElementNS("http://www.w3.org/2000/svg", "rect"); + var bbox = node.parentElement.getBBox(); + rect.x.baseVal.value = bbox.x; + rect.y.baseVal.value = bbox.y; + rect.width.baseVal.value = bbox.width; + rect.height.baseVal.value = bbox.height; + rect.setAttribute('class', className); + addItems.push({ + "parent": node.parentNode, + "target": rect}); + } + } + } + else if (!jQuery(node).is("button, select, textarea")) { + jQuery.each(node.childNodes, function() { + highlight(this, addItems); + }); + } + } + var addItems = []; + var result = this.each(function() { + highlight(this, addItems); + }); + for (var i = 0; i < addItems.length; ++i) { + jQuery(addItems[i].parent).before(addItems[i].target); + } + return result; +}; + +/* + * backward compatibility for jQuery.browser + * This will be supported until firefox bug is fixed. + */ +if (!jQuery.browser) { + jQuery.uaMatch = function(ua) { + ua = ua.toLowerCase(); + + var match = /(chrome)[ \/]([\w.]+)/.exec(ua) || + /(webkit)[ \/]([\w.]+)/.exec(ua) || + /(opera)(?:.*version|)[ \/]([\w.]+)/.exec(ua) || + /(msie) ([\w.]+)/.exec(ua) || + ua.indexOf("compatible") < 0 && /(mozilla)(?:.*? rv:([\w.]+)|)/.exec(ua) || + []; + + return { + browser: match[ 1 ] || "", + version: match[ 2 ] || "0" + }; + }; + jQuery.browser = {}; + jQuery.browser[jQuery.uaMatch(navigator.userAgent).browser] = true; +} diff --git a/_static/alabaster.css b/_static/alabaster.css new file mode 100644 index 0000000000..517d0b29cb --- /dev/null +++ b/_static/alabaster.css @@ -0,0 +1,703 @@ +@import url("basic.css"); + +/* -- page layout ----------------------------------------------------------- */ + +body { + font-family: Georgia, serif; + font-size: 17px; + background-color: #fff; + color: #000; + margin: 0; + padding: 0; +} + + +div.document { + width: 940px; + margin: 30px auto 0 auto; +} + +div.documentwrapper { + float: left; + width: 100%; +} + +div.bodywrapper { + margin: 0 0 0 220px; +} + +div.sphinxsidebar { + width: 220px; + font-size: 14px; + line-height: 1.5; +} + +hr { + border: 1px solid #B1B4B6; +} + +div.body { + background-color: #fff; + color: #3E4349; + padding: 0 30px 0 30px; +} + +div.body > .section { + text-align: left; +} + +div.footer { + width: 940px; + margin: 20px auto 30px auto; + font-size: 14px; + color: #888; + text-align: right; +} + +div.footer a { + color: #888; +} + +p.caption { + font-family: inherit; + font-size: inherit; +} + + +div.relations { + display: none; +} + + +div.sphinxsidebar a { + color: #444; + text-decoration: none; + border-bottom: 1px dotted #999; +} + +div.sphinxsidebar a:hover { + border-bottom: 1px solid #999; +} + +div.sphinxsidebarwrapper { + padding: 18px 10px; +} + +div.sphinxsidebarwrapper p.logo { + padding: 0; + margin: -10px 0 0 0px; + text-align: center; +} + +div.sphinxsidebarwrapper h1.logo { + margin-top: -10px; + text-align: center; + margin-bottom: 5px; + text-align: left; +} + +div.sphinxsidebarwrapper h1.logo-name { + margin-top: 0px; +} + +div.sphinxsidebarwrapper p.blurb { + margin-top: 0; + font-style: normal; +} + +div.sphinxsidebar h3, +div.sphinxsidebar h4 { + font-family: Georgia, serif; + color: #444; + font-size: 24px; + font-weight: normal; + margin: 0 0 5px 0; + padding: 0; +} + +div.sphinxsidebar h4 { + font-size: 20px; +} + +div.sphinxsidebar h3 a { + color: #444; +} + +div.sphinxsidebar p.logo a, +div.sphinxsidebar h3 a, +div.sphinxsidebar p.logo a:hover, +div.sphinxsidebar h3 a:hover { + border: none; +} + +div.sphinxsidebar p { + color: #555; + margin: 10px 0; +} + +div.sphinxsidebar ul { + margin: 10px 0; + padding: 0; + color: #000; +} + +div.sphinxsidebar ul li.toctree-l1 > a { + font-size: 120%; +} + +div.sphinxsidebar ul li.toctree-l2 > a { + font-size: 110%; +} + +div.sphinxsidebar input { + border: 1px solid #CCC; + font-family: Georgia, serif; + font-size: 1em; +} + +div.sphinxsidebar hr { + border: none; + height: 1px; + color: #AAA; + background: #AAA; + + text-align: left; + margin-left: 0; + width: 50%; +} + +div.sphinxsidebar .badge { + border-bottom: none; +} + +div.sphinxsidebar .badge:hover { + border-bottom: none; +} + +/* To address an issue with donation coming after search */ +div.sphinxsidebar h3.donation { + margin-top: 10px; +} + +/* -- body styles ----------------------------------------------------------- */ + +a { + color: #004B6B; + text-decoration: underline; +} + +a:hover { + color: #6D4100; + text-decoration: underline; +} + +div.body h1, +div.body h2, +div.body h3, +div.body h4, +div.body h5, +div.body h6 { + font-family: Georgia, serif; + font-weight: normal; + margin: 30px 0px 10px 0px; + padding: 0; +} + +div.body h1 { margin-top: 0; padding-top: 0; font-size: 240%; } +div.body h2 { font-size: 180%; } +div.body h3 { font-size: 150%; } +div.body h4 { font-size: 130%; } +div.body h5 { font-size: 100%; } +div.body h6 { font-size: 100%; } + +a.headerlink { + color: #DDD; + padding: 0 4px; + text-decoration: none; +} + +a.headerlink:hover { + color: #444; + background: #EAEAEA; +} + +div.body p, div.body dd, div.body li { + line-height: 1.4em; +} + +div.admonition { + margin: 20px 0px; + padding: 10px 30px; + background-color: #EEE; + border: 1px solid #CCC; +} + +div.admonition tt.xref, div.admonition code.xref, div.admonition a tt { + background-color: #FBFBFB; + border-bottom: 1px solid #fafafa; +} + +div.admonition p.admonition-title { + font-family: Georgia, serif; + font-weight: normal; + font-size: 24px; + margin: 0 0 10px 0; + padding: 0; + line-height: 1; +} + +div.admonition p.last { + margin-bottom: 0; +} + +div.highlight { + background-color: #fff; +} + +dt:target, .highlight { + background: #FAF3E8; +} + +div.warning { + background-color: #FCC; + border: 1px solid #FAA; +} + +div.danger { + background-color: #FCC; + border: 1px solid #FAA; + -moz-box-shadow: 2px 2px 4px #D52C2C; + -webkit-box-shadow: 2px 2px 4px #D52C2C; + box-shadow: 2px 2px 4px #D52C2C; +} + +div.error { + background-color: #FCC; + border: 1px solid #FAA; + -moz-box-shadow: 2px 2px 4px #D52C2C; + -webkit-box-shadow: 2px 2px 4px #D52C2C; + box-shadow: 2px 2px 4px #D52C2C; +} + +div.caution { + background-color: #FCC; + border: 1px solid #FAA; +} + +div.attention { + background-color: #FCC; + border: 1px solid #FAA; +} + +div.important { + background-color: #EEE; + border: 1px solid #CCC; +} + +div.note { + background-color: #EEE; + border: 1px solid #CCC; +} + +div.tip { + background-color: #EEE; + border: 1px solid #CCC; +} + +div.hint { + background-color: #EEE; + border: 1px solid #CCC; +} + +div.seealso { + background-color: #EEE; + border: 1px solid #CCC; +} + +div.topic { + background-color: #EEE; +} + +p.admonition-title { + display: inline; +} + +p.admonition-title:after { + content: ":"; +} + +pre, tt, code { + font-family: 'Consolas', 'Menlo', 'DejaVu Sans Mono', 'Bitstream Vera Sans Mono', monospace; + font-size: 0.9em; +} + +.hll { + background-color: #FFC; + margin: 0 -12px; + padding: 0 12px; + display: block; +} + +img.screenshot { +} + +tt.descname, tt.descclassname, code.descname, code.descclassname { + font-size: 0.95em; +} + +tt.descname, code.descname { + padding-right: 0.08em; +} + +img.screenshot { + -moz-box-shadow: 2px 2px 4px #EEE; + -webkit-box-shadow: 2px 2px 4px #EEE; + box-shadow: 2px 2px 4px #EEE; +} + +table.docutils { + border: 1px solid #888; + -moz-box-shadow: 2px 2px 4px #EEE; + -webkit-box-shadow: 2px 2px 4px #EEE; + box-shadow: 2px 2px 4px #EEE; +} + +table.docutils td, table.docutils th { + border: 1px solid #888; + padding: 0.25em 0.7em; +} + +table.field-list, table.footnote { + border: none; + -moz-box-shadow: none; + -webkit-box-shadow: none; + box-shadow: none; +} + +table.footnote { + margin: 15px 0; + width: 100%; + border: 1px solid #EEE; + background: #FDFDFD; + font-size: 0.9em; +} + +table.footnote + table.footnote { + margin-top: -15px; + border-top: none; +} + +table.field-list th { + padding: 0 0.8em 0 0; +} + +table.field-list td { + padding: 0; +} + +table.field-list p { + margin-bottom: 0.8em; +} + +/* Cloned from + * https://github.com/sphinx-doc/sphinx/commit/ef60dbfce09286b20b7385333d63a60321784e68 + */ +.field-name { + -moz-hyphens: manual; + -ms-hyphens: manual; + -webkit-hyphens: manual; + hyphens: manual; +} + +table.footnote td.label { + width: .1px; + padding: 0.3em 0 0.3em 0.5em; +} + +table.footnote td { + padding: 0.3em 0.5em; +} + +dl { + margin-left: 0; + margin-right: 0; + margin-top: 0; + padding: 0; +} + +dl dd { + margin-left: 30px; +} + +blockquote { + margin: 0 0 0 30px; + padding: 0; +} + +ul, ol { + /* Matches the 30px from the narrow-screen "li > ul" selector below */ + margin: 10px 0 10px 30px; + padding: 0; +} + +pre { + background: #EEE; + padding: 7px 30px; + margin: 15px 0px; + line-height: 1.3em; +} + +div.viewcode-block:target { + background: #ffd; +} + +dl pre, blockquote pre, li pre { + margin-left: 0; + padding-left: 30px; +} + +tt, code { + background-color: #ecf0f3; + color: #222; + /* padding: 1px 2px; */ +} + +tt.xref, code.xref, a tt { + background-color: #FBFBFB; + border-bottom: 1px solid #fff; +} + +a.reference { + text-decoration: none; + border-bottom: 1px dotted #004B6B; +} + +/* Don't put an underline on images */ +a.image-reference, a.image-reference:hover { + border-bottom: none; +} + +a.reference:hover { + border-bottom: 1px solid #6D4100; +} + +a.footnote-reference { + text-decoration: none; + font-size: 0.7em; + vertical-align: top; + border-bottom: 1px dotted #004B6B; +} + +a.footnote-reference:hover { + border-bottom: 1px solid #6D4100; +} + +a:hover tt, a:hover code { + background: #EEE; +} + + +@media screen and (max-width: 870px) { + + div.sphinxsidebar { + display: none; + } + + div.document { + width: 100%; + + } + + div.documentwrapper { + margin-left: 0; + margin-top: 0; + margin-right: 0; + margin-bottom: 0; + } + + div.bodywrapper { + margin-top: 0; + margin-right: 0; + margin-bottom: 0; + margin-left: 0; + } + + ul { + margin-left: 0; + } + + li > ul { + /* Matches the 30px from the "ul, ol" selector above */ + margin-left: 30px; + } + + .document { + width: auto; + } + + .footer { + width: auto; + } + + .bodywrapper { + margin: 0; + } + + .footer { + width: auto; + } + + .github { + display: none; + } + + + +} + + + +@media screen and (max-width: 875px) { + + body { + margin: 0; + padding: 20px 30px; + } + + div.documentwrapper { + float: none; + background: #fff; + } + + div.sphinxsidebar { + display: block; + float: none; + width: 102.5%; + margin: 50px -30px -20px -30px; + padding: 10px 20px; + background: #333; + color: #FFF; + } + + div.sphinxsidebar h3, div.sphinxsidebar h4, div.sphinxsidebar p, + div.sphinxsidebar h3 a { + color: #fff; + } + + div.sphinxsidebar a { + color: #AAA; + } + + div.sphinxsidebar p.logo { + display: none; + } + + div.document { + width: 100%; + margin: 0; + } + + div.footer { + display: none; + } + + div.bodywrapper { + margin: 0; + } + + div.body { + min-height: 0; + padding: 0; + } + + .rtd_doc_footer { + display: none; + } + + .document { + width: auto; + } + + .footer { + width: auto; + } + + .footer { + width: auto; + } + + .github { + display: none; + } +} + + +/* misc. */ + +.revsys-inline { + display: none!important; +} + +/* Make nested-list/multi-paragraph items look better in Releases changelog + * pages. Without this, docutils' magical list fuckery causes inconsistent + * formatting between different release sub-lists. + */ +div#changelog > div.section > ul > li > p:only-child { + margin-bottom: 0; +} + +/* Hide fugly table cell borders in ..bibliography:: directive output */ +table.docutils.citation, table.docutils.citation td, table.docutils.citation th { + border: none; + /* Below needed in some edge cases; if not applied, bottom shadows appear */ + -moz-box-shadow: none; + -webkit-box-shadow: none; + box-shadow: none; +} + + +/* relbar */ + +.related { + line-height: 30px; + width: 100%; + font-size: 0.9rem; +} + +.related.top { + border-bottom: 1px solid #EEE; + margin-bottom: 20px; +} + +.related.bottom { + border-top: 1px solid #EEE; +} + +.related ul { + padding: 0; + margin: 0; + list-style: none; +} + +.related li { + display: inline; +} + +nav#rellinks { + float: right; +} + +nav#rellinks li+li:before { + content: "|"; +} + +nav#breadcrumbs li+li:before { + content: "\00BB"; +} + +/* Hide certain items when printing */ +@media print { + div.related { + display: none; + } +} \ No newline at end of file diff --git a/_static/architecture/clickhouse_nodes.png b/_static/architecture/clickhouse_nodes.png new file mode 100644 index 0000000000..fd8d3c61e4 Binary files /dev/null and b/_static/architecture/clickhouse_nodes.png differ diff --git a/_static/architecture/compositeprocessing.png b/_static/architecture/compositeprocessing.png new file mode 100644 index 0000000000..7f79616c6f Binary files /dev/null and b/_static/architecture/compositeprocessing.png differ diff --git a/_static/architecture/datamodel.png b/_static/architecture/datamodel.png new file mode 100644 index 0000000000..56400cce2d Binary files /dev/null and b/_static/architecture/datamodel.png differ diff --git a/_static/architecture/deployment_legend.png b/_static/architecture/deployment_legend.png new file mode 100644 index 0000000000..0959c64345 Binary files /dev/null and b/_static/architecture/deployment_legend.png differ diff --git a/_static/architecture/errors_transactions_deployment.png b/_static/architecture/errors_transactions_deployment.png new file mode 100644 index 0000000000..8ba7f42b14 Binary files /dev/null and b/_static/architecture/errors_transactions_deployment.png differ diff --git a/_static/architecture/joins.png b/_static/architecture/joins.png new file mode 100644 index 0000000000..2b262515d9 Binary files /dev/null and b/_static/architecture/joins.png differ diff --git a/_static/architecture/multientity.png b/_static/architecture/multientity.png new file mode 100644 index 0000000000..9250392c66 Binary files /dev/null and b/_static/architecture/multientity.png differ diff --git a/_static/architecture/outcomes_deployment.png b/_static/architecture/outcomes_deployment.png new file mode 100644 index 0000000000..d40ebd0ffa Binary files /dev/null and b/_static/architecture/outcomes_deployment.png differ diff --git a/_static/architecture/overview.png b/_static/architecture/overview.png new file mode 100644 index 0000000000..cd66812674 Binary files /dev/null and b/_static/architecture/overview.png differ diff --git a/_static/architecture/queryprocessing.png b/_static/architecture/queryprocessing.png new file mode 100644 index 0000000000..a2b25e848d Binary files /dev/null and b/_static/architecture/queryprocessing.png differ diff --git a/_static/architecture/sessions_deployment.png b/_static/architecture/sessions_deployment.png new file mode 100644 index 0000000000..03423b46d9 Binary files /dev/null and b/_static/architecture/sessions_deployment.png differ diff --git a/_static/architecture/singleentity.png b/_static/architecture/singleentity.png new file mode 100644 index 0000000000..f1c47a5e46 Binary files /dev/null and b/_static/architecture/singleentity.png differ diff --git a/_static/architecture/snuba_deployment.png b/_static/architecture/snuba_deployment.png new file mode 100644 index 0000000000..0383838ac0 Binary files /dev/null and b/_static/architecture/snuba_deployment.png differ diff --git a/_static/basic.css b/_static/basic.css new file mode 100644 index 0000000000..4e9a9f1fac --- /dev/null +++ b/_static/basic.css @@ -0,0 +1,900 @@ +/* + * basic.css + * ~~~~~~~~~ + * + * Sphinx stylesheet -- basic theme. + * + * :copyright: Copyright 2007-2022 by the Sphinx team, see AUTHORS. + * :license: BSD, see LICENSE for details. + * + */ + +/* -- main layout ----------------------------------------------------------- */ + +div.clearer { + clear: both; +} + +div.section::after { + display: block; + content: ''; + clear: left; +} + +/* -- relbar ---------------------------------------------------------------- */ + +div.related { + width: 100%; + font-size: 90%; +} + +div.related h3 { + display: none; +} + +div.related ul { + margin: 0; + padding: 0 0 0 10px; + list-style: none; +} + +div.related li { + display: inline; +} + +div.related li.right { + float: right; + margin-right: 5px; +} + +/* -- sidebar --------------------------------------------------------------- */ + +div.sphinxsidebarwrapper { + padding: 10px 5px 0 10px; +} + +div.sphinxsidebar { + float: left; + width: 230px; + margin-left: -100%; + font-size: 90%; + word-wrap: break-word; + overflow-wrap : break-word; +} + +div.sphinxsidebar ul { + list-style: none; +} + +div.sphinxsidebar ul ul, +div.sphinxsidebar ul.want-points { + margin-left: 20px; + list-style: square; +} + +div.sphinxsidebar ul ul { + margin-top: 0; + margin-bottom: 0; +} + +div.sphinxsidebar form { + margin-top: 10px; +} + +div.sphinxsidebar input { + border: 1px solid #98dbcc; + font-family: sans-serif; + font-size: 1em; +} + +div.sphinxsidebar #searchbox form.search { + overflow: hidden; +} + +div.sphinxsidebar #searchbox input[type="text"] { + float: left; + width: 80%; + padding: 0.25em; + box-sizing: border-box; +} + +div.sphinxsidebar #searchbox input[type="submit"] { + float: left; + width: 20%; + border-left: none; + padding: 0.25em; + box-sizing: border-box; +} + + +img { + border: 0; + max-width: 100%; +} + +/* -- search page ----------------------------------------------------------- */ + +ul.search { + margin: 10px 0 0 20px; + padding: 0; +} + +ul.search li { + padding: 5px 0 5px 20px; + background-image: url(file.png); + background-repeat: no-repeat; + background-position: 0 7px; +} + +ul.search li a { + font-weight: bold; +} + +ul.search li p.context { + color: #888; + margin: 2px 0 0 30px; + text-align: left; +} + +ul.keywordmatches li.goodmatch a { + font-weight: bold; +} + +/* -- index page ------------------------------------------------------------ */ + +table.contentstable { + width: 90%; + margin-left: auto; + margin-right: auto; +} + +table.contentstable p.biglink { + line-height: 150%; +} + +a.biglink { + font-size: 1.3em; +} + +span.linkdescr { + font-style: italic; + padding-top: 5px; + font-size: 90%; +} + +/* -- general index --------------------------------------------------------- */ + +table.indextable { + width: 100%; +} + +table.indextable td { + text-align: left; + vertical-align: top; +} + +table.indextable ul { + margin-top: 0; + margin-bottom: 0; + list-style-type: none; +} + +table.indextable > tbody > tr > td > ul { + padding-left: 0em; +} + +table.indextable tr.pcap { + height: 10px; +} + +table.indextable tr.cap { + margin-top: 10px; + background-color: #f2f2f2; +} + +img.toggler { + margin-right: 3px; + margin-top: 3px; + cursor: pointer; +} + +div.modindex-jumpbox { + border-top: 1px solid #ddd; + border-bottom: 1px solid #ddd; + margin: 1em 0 1em 0; + padding: 0.4em; +} + +div.genindex-jumpbox { + border-top: 1px solid #ddd; + border-bottom: 1px solid #ddd; + margin: 1em 0 1em 0; + padding: 0.4em; +} + +/* -- domain module index --------------------------------------------------- */ + +table.modindextable td { + padding: 2px; + border-collapse: collapse; +} + +/* -- general body styles --------------------------------------------------- */ + +div.body { + min-width: 360px; + max-width: 800px; +} + +div.body p, div.body dd, div.body li, div.body blockquote { + -moz-hyphens: auto; + -ms-hyphens: auto; + -webkit-hyphens: auto; + hyphens: auto; +} + +a.headerlink { + visibility: hidden; +} + +h1:hover > a.headerlink, +h2:hover > a.headerlink, +h3:hover > a.headerlink, +h4:hover > a.headerlink, +h5:hover > a.headerlink, +h6:hover > a.headerlink, +dt:hover > a.headerlink, +caption:hover > a.headerlink, +p.caption:hover > a.headerlink, +div.code-block-caption:hover > a.headerlink { + visibility: visible; +} + +div.body p.caption { + text-align: inherit; +} + +div.body td { + text-align: left; +} + +.first { + margin-top: 0 !important; +} + +p.rubric { + margin-top: 30px; + font-weight: bold; +} + +img.align-left, figure.align-left, .figure.align-left, object.align-left { + clear: left; + float: left; + margin-right: 1em; +} + +img.align-right, figure.align-right, .figure.align-right, object.align-right { + clear: right; + float: right; + margin-left: 1em; +} + +img.align-center, figure.align-center, .figure.align-center, object.align-center { + display: block; + margin-left: auto; + margin-right: auto; +} + +img.align-default, figure.align-default, .figure.align-default { + display: block; + margin-left: auto; + margin-right: auto; +} + +.align-left { + text-align: left; +} + +.align-center { + text-align: center; +} + +.align-default { + text-align: center; +} + +.align-right { + text-align: right; +} + +/* -- sidebars -------------------------------------------------------------- */ + +div.sidebar, +aside.sidebar { + margin: 0 0 0.5em 1em; + border: 1px solid #ddb; + padding: 7px; + background-color: #ffe; + width: 40%; + float: right; + clear: right; + overflow-x: auto; +} + +p.sidebar-title { + font-weight: bold; +} +nav.contents, +aside.topic, +div.admonition, div.topic, blockquote { + clear: left; +} + +/* -- topics ---------------------------------------------------------------- */ +nav.contents, +aside.topic, +div.topic { + border: 1px solid #ccc; + padding: 7px; + margin: 10px 0 10px 0; +} + +p.topic-title { + font-size: 1.1em; + font-weight: bold; + margin-top: 10px; +} + +/* -- admonitions ----------------------------------------------------------- */ + +div.admonition { + margin-top: 10px; + margin-bottom: 10px; + padding: 7px; +} + +div.admonition dt { + font-weight: bold; +} + +p.admonition-title { + margin: 0px 10px 5px 0px; + font-weight: bold; +} + +div.body p.centered { + text-align: center; + margin-top: 25px; +} + +/* -- content of sidebars/topics/admonitions -------------------------------- */ + +div.sidebar > :last-child, +aside.sidebar > :last-child, +nav.contents > :last-child, +aside.topic > :last-child, +div.topic > :last-child, +div.admonition > :last-child { + margin-bottom: 0; +} + +div.sidebar::after, +aside.sidebar::after, +nav.contents::after, +aside.topic::after, +div.topic::after, +div.admonition::after, +blockquote::after { + display: block; + content: ''; + clear: both; +} + +/* -- tables ---------------------------------------------------------------- */ + +table.docutils { + margin-top: 10px; + margin-bottom: 10px; + border: 0; + border-collapse: collapse; +} + +table.align-center { + margin-left: auto; + margin-right: auto; +} + +table.align-default { + margin-left: auto; + margin-right: auto; +} + +table caption span.caption-number { + font-style: italic; +} + +table caption span.caption-text { +} + +table.docutils td, table.docutils th { + padding: 1px 8px 1px 5px; + border-top: 0; + border-left: 0; + border-right: 0; + border-bottom: 1px solid #aaa; +} + +th { + text-align: left; + padding-right: 5px; +} + +table.citation { + border-left: solid 1px gray; + margin-left: 1px; +} + +table.citation td { + border-bottom: none; +} + +th > :first-child, +td > :first-child { + margin-top: 0px; +} + +th > :last-child, +td > :last-child { + margin-bottom: 0px; +} + +/* -- figures --------------------------------------------------------------- */ + +div.figure, figure { + margin: 0.5em; + padding: 0.5em; +} + +div.figure p.caption, figcaption { + padding: 0.3em; +} + +div.figure p.caption span.caption-number, +figcaption span.caption-number { + font-style: italic; +} + +div.figure p.caption span.caption-text, +figcaption span.caption-text { +} + +/* -- field list styles ----------------------------------------------------- */ + +table.field-list td, table.field-list th { + border: 0 !important; +} + +.field-list ul { + margin: 0; + padding-left: 1em; +} + +.field-list p { + margin: 0; +} + +.field-name { + -moz-hyphens: manual; + -ms-hyphens: manual; + -webkit-hyphens: manual; + hyphens: manual; +} + +/* -- hlist styles ---------------------------------------------------------- */ + +table.hlist { + margin: 1em 0; +} + +table.hlist td { + vertical-align: top; +} + +/* -- object description styles --------------------------------------------- */ + +.sig { + font-family: 'Consolas', 'Menlo', 'DejaVu Sans Mono', 'Bitstream Vera Sans Mono', monospace; +} + +.sig-name, code.descname { + background-color: transparent; + font-weight: bold; +} + +.sig-name { + font-size: 1.1em; +} + +code.descname { + font-size: 1.2em; +} + +.sig-prename, code.descclassname { + background-color: transparent; +} + +.optional { + font-size: 1.3em; +} + +.sig-paren { + font-size: larger; +} + +.sig-param.n { + font-style: italic; +} + +/* C++ specific styling */ + +.sig-inline.c-texpr, +.sig-inline.cpp-texpr { + font-family: unset; +} + +.sig.c .k, .sig.c .kt, +.sig.cpp .k, .sig.cpp .kt { + color: #0033B3; +} + +.sig.c .m, +.sig.cpp .m { + color: #1750EB; +} + +.sig.c .s, .sig.c .sc, +.sig.cpp .s, .sig.cpp .sc { + color: #067D17; +} + + +/* -- other body styles ----------------------------------------------------- */ + +ol.arabic { + list-style: decimal; +} + +ol.loweralpha { + list-style: lower-alpha; +} + +ol.upperalpha { + list-style: upper-alpha; +} + +ol.lowerroman { + list-style: lower-roman; +} + +ol.upperroman { + list-style: upper-roman; +} + +:not(li) > ol > li:first-child > :first-child, +:not(li) > ul > li:first-child > :first-child { + margin-top: 0px; +} + +:not(li) > ol > li:last-child > :last-child, +:not(li) > ul > li:last-child > :last-child { + margin-bottom: 0px; +} + +ol.simple ol p, +ol.simple ul p, +ul.simple ol p, +ul.simple ul p { + margin-top: 0; +} + +ol.simple > li:not(:first-child) > p, +ul.simple > li:not(:first-child) > p { + margin-top: 0; +} + +ol.simple p, +ul.simple p { + margin-bottom: 0; +} +aside.footnote > span, +div.citation > span { + float: left; +} +aside.footnote > span:last-of-type, +div.citation > span:last-of-type { + padding-right: 0.5em; +} +aside.footnote > p { + margin-left: 2em; +} +div.citation > p { + margin-left: 4em; +} +aside.footnote > p:last-of-type, +div.citation > p:last-of-type { + margin-bottom: 0em; +} +aside.footnote > p:last-of-type:after, +div.citation > p:last-of-type:after { + content: ""; + clear: both; +} + +dl.field-list { + display: grid; + grid-template-columns: fit-content(30%) auto; +} + +dl.field-list > dt { + font-weight: bold; + word-break: break-word; + padding-left: 0.5em; + padding-right: 5px; +} + +dl.field-list > dd { + padding-left: 0.5em; + margin-top: 0em; + margin-left: 0em; + margin-bottom: 0em; +} + +dl { + margin-bottom: 15px; +} + +dd > :first-child { + margin-top: 0px; +} + +dd ul, dd table { + margin-bottom: 10px; +} + +dd { + margin-top: 3px; + margin-bottom: 10px; + margin-left: 30px; +} + +dl > dd:last-child, +dl > dd:last-child > :last-child { + margin-bottom: 0; +} + +dt:target, span.highlighted { + background-color: #fbe54e; +} + +rect.highlighted { + fill: #fbe54e; +} + +dl.glossary dt { + font-weight: bold; + font-size: 1.1em; +} + +.versionmodified { + font-style: italic; +} + +.system-message { + background-color: #fda; + padding: 5px; + border: 3px solid red; +} + +.footnote:target { + background-color: #ffa; +} + +.line-block { + display: block; + margin-top: 1em; + margin-bottom: 1em; +} + +.line-block .line-block { + margin-top: 0; + margin-bottom: 0; + margin-left: 1.5em; +} + +.guilabel, .menuselection { + font-family: sans-serif; +} + +.accelerator { + text-decoration: underline; +} + +.classifier { + font-style: oblique; +} + +.classifier:before { + font-style: normal; + margin: 0 0.5em; + content: ":"; + display: inline-block; +} + +abbr, acronym { + border-bottom: dotted 1px; + cursor: help; +} + +/* -- code displays --------------------------------------------------------- */ + +pre { + overflow: auto; + overflow-y: hidden; /* fixes display issues on Chrome browsers */ +} + +pre, div[class*="highlight-"] { + clear: both; +} + +span.pre { + -moz-hyphens: none; + -ms-hyphens: none; + -webkit-hyphens: none; + hyphens: none; + white-space: nowrap; +} + +div[class*="highlight-"] { + margin: 1em 0; +} + +td.linenos pre { + border: 0; + background-color: transparent; + color: #aaa; +} + +table.highlighttable { + display: block; +} + +table.highlighttable tbody { + display: block; +} + +table.highlighttable tr { + display: flex; +} + +table.highlighttable td { + margin: 0; + padding: 0; +} + +table.highlighttable td.linenos { + padding-right: 0.5em; +} + +table.highlighttable td.code { + flex: 1; + overflow: hidden; +} + +.highlight .hll { + display: block; +} + +div.highlight pre, +table.highlighttable pre { + margin: 0; +} + +div.code-block-caption + div { + margin-top: 0; +} + +div.code-block-caption { + margin-top: 1em; + padding: 2px 5px; + font-size: small; +} + +div.code-block-caption code { + background-color: transparent; +} + +table.highlighttable td.linenos, +span.linenos, +div.highlight span.gp { /* gp: Generic.Prompt */ + user-select: none; + -webkit-user-select: text; /* Safari fallback only */ + -webkit-user-select: none; /* Chrome/Safari */ + -moz-user-select: none; /* Firefox */ + -ms-user-select: none; /* IE10+ */ +} + +div.code-block-caption span.caption-number { + padding: 0.1em 0.3em; + font-style: italic; +} + +div.code-block-caption span.caption-text { +} + +div.literal-block-wrapper { + margin: 1em 0; +} + +code.xref, a code { + background-color: transparent; + font-weight: bold; +} + +h1 code, h2 code, h3 code, h4 code, h5 code, h6 code { + background-color: transparent; +} + +.viewcode-link { + float: right; +} + +.viewcode-back { + float: right; + font-family: sans-serif; +} + +div.viewcode-block:target { + margin: -1px -10px; + padding: 0 10px; +} + +/* -- math display ---------------------------------------------------------- */ + +img.math { + vertical-align: middle; +} + +div.body div.math p { + text-align: center; +} + +span.eqno { + float: right; +} + +span.eqno a.headerlink { + position: absolute; + z-index: 1; +} + +div.math:hover a.headerlink { + visibility: visible; +} + +/* -- printout stylesheet --------------------------------------------------- */ + +@media print { + div.document, + div.documentwrapper, + div.bodywrapper { + margin: 0 !important; + width: 100%; + } + + div.sphinxsidebar, + div.related, + div.footer, + #top-link { + display: none; + } +} \ No newline at end of file diff --git a/_static/custom.css b/_static/custom.css new file mode 100644 index 0000000000..2a924f1d6a --- /dev/null +++ b/_static/custom.css @@ -0,0 +1 @@ +/* This file intentionally left blank. */ diff --git a/_static/doctools.js b/_static/doctools.js new file mode 100644 index 0000000000..c3db08d1c3 --- /dev/null +++ b/_static/doctools.js @@ -0,0 +1,264 @@ +/* + * doctools.js + * ~~~~~~~~~~~ + * + * Base JavaScript utilities for all Sphinx HTML documentation. + * + * :copyright: Copyright 2007-2022 by the Sphinx team, see AUTHORS. + * :license: BSD, see LICENSE for details. + * + */ +"use strict"; + +const _ready = (callback) => { + if (document.readyState !== "loading") { + callback(); + } else { + document.addEventListener("DOMContentLoaded", callback); + } +}; + +/** + * highlight a given string on a node by wrapping it in + * span elements with the given class name. + */ +const _highlight = (node, addItems, text, className) => { + if (node.nodeType === Node.TEXT_NODE) { + const val = node.nodeValue; + const parent = node.parentNode; + const pos = val.toLowerCase().indexOf(text); + if ( + pos >= 0 && + !parent.classList.contains(className) && + !parent.classList.contains("nohighlight") + ) { + let span; + + const closestNode = parent.closest("body, svg, foreignObject"); + const isInSVG = closestNode && closestNode.matches("svg"); + if (isInSVG) { + span = document.createElementNS("http://www.w3.org/2000/svg", "tspan"); + } else { + span = document.createElement("span"); + span.classList.add(className); + } + + span.appendChild(document.createTextNode(val.substr(pos, text.length))); + parent.insertBefore( + span, + parent.insertBefore( + document.createTextNode(val.substr(pos + text.length)), + node.nextSibling + ) + ); + node.nodeValue = val.substr(0, pos); + + if (isInSVG) { + const rect = document.createElementNS( + "http://www.w3.org/2000/svg", + "rect" + ); + const bbox = parent.getBBox(); + rect.x.baseVal.value = bbox.x; + rect.y.baseVal.value = bbox.y; + rect.width.baseVal.value = bbox.width; + rect.height.baseVal.value = bbox.height; + rect.setAttribute("class", className); + addItems.push({ parent: parent, target: rect }); + } + } + } else if (node.matches && !node.matches("button, select, textarea")) { + node.childNodes.forEach((el) => _highlight(el, addItems, text, className)); + } +}; +const _highlightText = (thisNode, text, className) => { + let addItems = []; + _highlight(thisNode, addItems, text, className); + addItems.forEach((obj) => + obj.parent.insertAdjacentElement("beforebegin", obj.target) + ); +}; + +/** + * Small JavaScript module for the documentation. + */ +const Documentation = { + init: () => { + Documentation.highlightSearchWords(); + Documentation.initDomainIndexTable(); + Documentation.initOnKeyListeners(); + }, + + /** + * i18n support + */ + TRANSLATIONS: {}, + PLURAL_EXPR: (n) => (n === 1 ? 0 : 1), + LOCALE: "unknown", + + // gettext and ngettext don't access this so that the functions + // can safely bound to a different name (_ = Documentation.gettext) + gettext: (string) => { + const translated = Documentation.TRANSLATIONS[string]; + switch (typeof translated) { + case "undefined": + return string; // no translation + case "string": + return translated; // translation exists + default: + return translated[0]; // (singular, plural) translation tuple exists + } + }, + + ngettext: (singular, plural, n) => { + const translated = Documentation.TRANSLATIONS[singular]; + if (typeof translated !== "undefined") + return translated[Documentation.PLURAL_EXPR(n)]; + return n === 1 ? singular : plural; + }, + + addTranslations: (catalog) => { + Object.assign(Documentation.TRANSLATIONS, catalog.messages); + Documentation.PLURAL_EXPR = new Function( + "n", + `return (${catalog.plural_expr})` + ); + Documentation.LOCALE = catalog.locale; + }, + + /** + * highlight the search words provided in the url in the text + */ + highlightSearchWords: () => { + const highlight = + new URLSearchParams(window.location.search).get("highlight") || ""; + const terms = highlight.toLowerCase().split(/\s+/).filter(x => x); + if (terms.length === 0) return; // nothing to do + + // There should never be more than one element matching "div.body" + const divBody = document.querySelectorAll("div.body"); + const body = divBody.length ? divBody[0] : document.querySelector("body"); + window.setTimeout(() => { + terms.forEach((term) => _highlightText(body, term, "highlighted")); + }, 10); + + const searchBox = document.getElementById("searchbox"); + if (searchBox === null) return; + searchBox.appendChild( + document + .createRange() + .createContextualFragment( + '" + ) + ); + }, + + /** + * helper function to hide the search marks again + */ + hideSearchWords: () => { + document + .querySelectorAll("#searchbox .highlight-link") + .forEach((el) => el.remove()); + document + .querySelectorAll("span.highlighted") + .forEach((el) => el.classList.remove("highlighted")); + const url = new URL(window.location); + url.searchParams.delete("highlight"); + window.history.replaceState({}, "", url); + }, + + /** + * helper function to focus on search bar + */ + focusSearchBar: () => { + document.querySelectorAll("input[name=q]")[0]?.focus(); + }, + + /** + * Initialise the domain index toggle buttons + */ + initDomainIndexTable: () => { + const toggler = (el) => { + const idNumber = el.id.substr(7); + const toggledRows = document.querySelectorAll(`tr.cg-${idNumber}`); + if (el.src.substr(-9) === "minus.png") { + el.src = `${el.src.substr(0, el.src.length - 9)}plus.png`; + toggledRows.forEach((el) => (el.style.display = "none")); + } else { + el.src = `${el.src.substr(0, el.src.length - 8)}minus.png`; + toggledRows.forEach((el) => (el.style.display = "")); + } + }; + + const togglerElements = document.querySelectorAll("img.toggler"); + togglerElements.forEach((el) => + el.addEventListener("click", (event) => toggler(event.currentTarget)) + ); + togglerElements.forEach((el) => (el.style.display = "")); + if (DOCUMENTATION_OPTIONS.COLLAPSE_INDEX) togglerElements.forEach(toggler); + }, + + initOnKeyListeners: () => { + // only install a listener if it is really needed + if ( + !DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS && + !DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS + ) + return; + + const blacklistedElements = new Set([ + "TEXTAREA", + "INPUT", + "SELECT", + "BUTTON", + ]); + document.addEventListener("keydown", (event) => { + if (blacklistedElements.has(document.activeElement.tagName)) return; // bail for input elements + if (event.altKey || event.ctrlKey || event.metaKey) return; // bail with special keys + + if (!event.shiftKey) { + switch (event.key) { + case "ArrowLeft": + if (!DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS) break; + + const prevLink = document.querySelector('link[rel="prev"]'); + if (prevLink && prevLink.href) { + window.location.href = prevLink.href; + event.preventDefault(); + } + break; + case "ArrowRight": + if (!DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS) break; + + const nextLink = document.querySelector('link[rel="next"]'); + if (nextLink && nextLink.href) { + window.location.href = nextLink.href; + event.preventDefault(); + } + break; + case "Escape": + if (!DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS) break; + Documentation.hideSearchWords(); + event.preventDefault(); + } + } + + // some keyboard layouts may need Shift to get / + switch (event.key) { + case "/": + if (!DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS) break; + Documentation.focusSearchBar(); + event.preventDefault(); + } + }); + }, +}; + +// quick alias for translations +const _ = Documentation.gettext; + +_ready(Documentation.init); diff --git a/_static/documentation_options.js b/_static/documentation_options.js new file mode 100644 index 0000000000..6fd198138e --- /dev/null +++ b/_static/documentation_options.js @@ -0,0 +1,14 @@ +var DOCUMENTATION_OPTIONS = { + URL_ROOT: document.getElementById("documentation_options").getAttribute('data-url_root'), + VERSION: '23.7.0.dev0', + LANGUAGE: 'en', + COLLAPSE_INDEX: false, + BUILDER: 'html', + FILE_SUFFIX: '.html', + LINK_SUFFIX: '.html', + HAS_SOURCE: true, + SOURCELINK_SUFFIX: '.txt', + NAVIGATION_WITH_KEYS: false, + SHOW_SEARCH_SUMMARY: true, + ENABLE_SEARCH_SHORTCUTS: true, +}; \ No newline at end of file diff --git a/_static/file.png b/_static/file.png new file mode 100644 index 0000000000..a858a410e4 Binary files /dev/null and b/_static/file.png differ diff --git a/_static/jquery-3.6.0.js b/_static/jquery-3.6.0.js new file mode 100644 index 0000000000..fc6c299b73 --- /dev/null +++ b/_static/jquery-3.6.0.js @@ -0,0 +1,10881 @@ +/*! + * jQuery JavaScript Library v3.6.0 + * https://jquery.com/ + * + * Includes Sizzle.js + * https://sizzlejs.com/ + * + * Copyright OpenJS Foundation and other contributors + * Released under the MIT license + * https://jquery.org/license + * + * Date: 2021-03-02T17:08Z + */ +( function( global, factory ) { + + "use strict"; + + if ( typeof module === "object" && typeof module.exports === "object" ) { + + // For CommonJS and CommonJS-like environments where a proper `window` + // is present, execute the factory and get jQuery. + // For environments that do not have a `window` with a `document` + // (such as Node.js), expose a factory as module.exports. + // This accentuates the need for the creation of a real `window`. + // e.g. var jQuery = require("jquery")(window); + // See ticket #14549 for more info. + module.exports = global.document ? + factory( global, true ) : + function( w ) { + if ( !w.document ) { + throw new Error( "jQuery requires a window with a document" ); + } + return factory( w ); + }; + } else { + factory( global ); + } + +// Pass this if window is not defined yet +} )( typeof window !== "undefined" ? window : this, function( window, noGlobal ) { + +// Edge <= 12 - 13+, Firefox <=18 - 45+, IE 10 - 11, Safari 5.1 - 9+, iOS 6 - 9.1 +// throw exceptions when non-strict code (e.g., ASP.NET 4.5) accesses strict mode +// arguments.callee.caller (trac-13335). But as of jQuery 3.0 (2016), strict mode should be common +// enough that all such attempts are guarded in a try block. +"use strict"; + +var arr = []; + +var getProto = Object.getPrototypeOf; + +var slice = arr.slice; + +var flat = arr.flat ? function( array ) { + return arr.flat.call( array ); +} : function( array ) { + return arr.concat.apply( [], array ); +}; + + +var push = arr.push; + +var indexOf = arr.indexOf; + +var class2type = {}; + +var toString = class2type.toString; + +var hasOwn = class2type.hasOwnProperty; + +var fnToString = hasOwn.toString; + +var ObjectFunctionString = fnToString.call( Object ); + +var support = {}; + +var isFunction = function isFunction( obj ) { + + // Support: Chrome <=57, Firefox <=52 + // In some browsers, typeof returns "function" for HTML elements + // (i.e., `typeof document.createElement( "object" ) === "function"`). + // We don't want to classify *any* DOM node as a function. + // Support: QtWeb <=3.8.5, WebKit <=534.34, wkhtmltopdf tool <=0.12.5 + // Plus for old WebKit, typeof returns "function" for HTML collections + // (e.g., `typeof document.getElementsByTagName("div") === "function"`). (gh-4756) + return typeof obj === "function" && typeof obj.nodeType !== "number" && + typeof obj.item !== "function"; + }; + + +var isWindow = function isWindow( obj ) { + return obj != null && obj === obj.window; + }; + + +var document = window.document; + + + + var preservedScriptAttributes = { + type: true, + src: true, + nonce: true, + noModule: true + }; + + function DOMEval( code, node, doc ) { + doc = doc || document; + + var i, val, + script = doc.createElement( "script" ); + + script.text = code; + if ( node ) { + for ( i in preservedScriptAttributes ) { + + // Support: Firefox 64+, Edge 18+ + // Some browsers don't support the "nonce" property on scripts. + // On the other hand, just using `getAttribute` is not enough as + // the `nonce` attribute is reset to an empty string whenever it + // becomes browsing-context connected. + // See https://github.com/whatwg/html/issues/2369 + // See https://html.spec.whatwg.org/#nonce-attributes + // The `node.getAttribute` check was added for the sake of + // `jQuery.globalEval` so that it can fake a nonce-containing node + // via an object. + val = node[ i ] || node.getAttribute && node.getAttribute( i ); + if ( val ) { + script.setAttribute( i, val ); + } + } + } + doc.head.appendChild( script ).parentNode.removeChild( script ); + } + + +function toType( obj ) { + if ( obj == null ) { + return obj + ""; + } + + // Support: Android <=2.3 only (functionish RegExp) + return typeof obj === "object" || typeof obj === "function" ? + class2type[ toString.call( obj ) ] || "object" : + typeof obj; +} +/* global Symbol */ +// Defining this global in .eslintrc.json would create a danger of using the global +// unguarded in another place, it seems safer to define global only for this module + + + +var + version = "3.6.0", + + // Define a local copy of jQuery + jQuery = function( selector, context ) { + + // The jQuery object is actually just the init constructor 'enhanced' + // Need init if jQuery is called (just allow error to be thrown if not included) + return new jQuery.fn.init( selector, context ); + }; + +jQuery.fn = jQuery.prototype = { + + // The current version of jQuery being used + jquery: version, + + constructor: jQuery, + + // The default length of a jQuery object is 0 + length: 0, + + toArray: function() { + return slice.call( this ); + }, + + // Get the Nth element in the matched element set OR + // Get the whole matched element set as a clean array + get: function( num ) { + + // Return all the elements in a clean array + if ( num == null ) { + return slice.call( this ); + } + + // Return just the one element from the set + return num < 0 ? this[ num + this.length ] : this[ num ]; + }, + + // Take an array of elements and push it onto the stack + // (returning the new matched element set) + pushStack: function( elems ) { + + // Build a new jQuery matched element set + var ret = jQuery.merge( this.constructor(), elems ); + + // Add the old object onto the stack (as a reference) + ret.prevObject = this; + + // Return the newly-formed element set + return ret; + }, + + // Execute a callback for every element in the matched set. + each: function( callback ) { + return jQuery.each( this, callback ); + }, + + map: function( callback ) { + return this.pushStack( jQuery.map( this, function( elem, i ) { + return callback.call( elem, i, elem ); + } ) ); + }, + + slice: function() { + return this.pushStack( slice.apply( this, arguments ) ); + }, + + first: function() { + return this.eq( 0 ); + }, + + last: function() { + return this.eq( -1 ); + }, + + even: function() { + return this.pushStack( jQuery.grep( this, function( _elem, i ) { + return ( i + 1 ) % 2; + } ) ); + }, + + odd: function() { + return this.pushStack( jQuery.grep( this, function( _elem, i ) { + return i % 2; + } ) ); + }, + + eq: function( i ) { + var len = this.length, + j = +i + ( i < 0 ? len : 0 ); + return this.pushStack( j >= 0 && j < len ? [ this[ j ] ] : [] ); + }, + + end: function() { + return this.prevObject || this.constructor(); + }, + + // For internal use only. + // Behaves like an Array's method, not like a jQuery method. + push: push, + sort: arr.sort, + splice: arr.splice +}; + +jQuery.extend = jQuery.fn.extend = function() { + var options, name, src, copy, copyIsArray, clone, + target = arguments[ 0 ] || {}, + i = 1, + length = arguments.length, + deep = false; + + // Handle a deep copy situation + if ( typeof target === "boolean" ) { + deep = target; + + // Skip the boolean and the target + target = arguments[ i ] || {}; + i++; + } + + // Handle case when target is a string or something (possible in deep copy) + if ( typeof target !== "object" && !isFunction( target ) ) { + target = {}; + } + + // Extend jQuery itself if only one argument is passed + if ( i === length ) { + target = this; + i--; + } + + for ( ; i < length; i++ ) { + + // Only deal with non-null/undefined values + if ( ( options = arguments[ i ] ) != null ) { + + // Extend the base object + for ( name in options ) { + copy = options[ name ]; + + // Prevent Object.prototype pollution + // Prevent never-ending loop + if ( name === "__proto__" || target === copy ) { + continue; + } + + // Recurse if we're merging plain objects or arrays + if ( deep && copy && ( jQuery.isPlainObject( copy ) || + ( copyIsArray = Array.isArray( copy ) ) ) ) { + src = target[ name ]; + + // Ensure proper type for the source value + if ( copyIsArray && !Array.isArray( src ) ) { + clone = []; + } else if ( !copyIsArray && !jQuery.isPlainObject( src ) ) { + clone = {}; + } else { + clone = src; + } + copyIsArray = false; + + // Never move original objects, clone them + target[ name ] = jQuery.extend( deep, clone, copy ); + + // Don't bring in undefined values + } else if ( copy !== undefined ) { + target[ name ] = copy; + } + } + } + } + + // Return the modified object + return target; +}; + +jQuery.extend( { + + // Unique for each copy of jQuery on the page + expando: "jQuery" + ( version + Math.random() ).replace( /\D/g, "" ), + + // Assume jQuery is ready without the ready module + isReady: true, + + error: function( msg ) { + throw new Error( msg ); + }, + + noop: function() {}, + + isPlainObject: function( obj ) { + var proto, Ctor; + + // Detect obvious negatives + // Use toString instead of jQuery.type to catch host objects + if ( !obj || toString.call( obj ) !== "[object Object]" ) { + return false; + } + + proto = getProto( obj ); + + // Objects with no prototype (e.g., `Object.create( null )`) are plain + if ( !proto ) { + return true; + } + + // Objects with prototype are plain iff they were constructed by a global Object function + Ctor = hasOwn.call( proto, "constructor" ) && proto.constructor; + return typeof Ctor === "function" && fnToString.call( Ctor ) === ObjectFunctionString; + }, + + isEmptyObject: function( obj ) { + var name; + + for ( name in obj ) { + return false; + } + return true; + }, + + // Evaluates a script in a provided context; falls back to the global one + // if not specified. + globalEval: function( code, options, doc ) { + DOMEval( code, { nonce: options && options.nonce }, doc ); + }, + + each: function( obj, callback ) { + var length, i = 0; + + if ( isArrayLike( obj ) ) { + length = obj.length; + for ( ; i < length; i++ ) { + if ( callback.call( obj[ i ], i, obj[ i ] ) === false ) { + break; + } + } + } else { + for ( i in obj ) { + if ( callback.call( obj[ i ], i, obj[ i ] ) === false ) { + break; + } + } + } + + return obj; + }, + + // results is for internal usage only + makeArray: function( arr, results ) { + var ret = results || []; + + if ( arr != null ) { + if ( isArrayLike( Object( arr ) ) ) { + jQuery.merge( ret, + typeof arr === "string" ? + [ arr ] : arr + ); + } else { + push.call( ret, arr ); + } + } + + return ret; + }, + + inArray: function( elem, arr, i ) { + return arr == null ? -1 : indexOf.call( arr, elem, i ); + }, + + // Support: Android <=4.0 only, PhantomJS 1 only + // push.apply(_, arraylike) throws on ancient WebKit + merge: function( first, second ) { + var len = +second.length, + j = 0, + i = first.length; + + for ( ; j < len; j++ ) { + first[ i++ ] = second[ j ]; + } + + first.length = i; + + return first; + }, + + grep: function( elems, callback, invert ) { + var callbackInverse, + matches = [], + i = 0, + length = elems.length, + callbackExpect = !invert; + + // Go through the array, only saving the items + // that pass the validator function + for ( ; i < length; i++ ) { + callbackInverse = !callback( elems[ i ], i ); + if ( callbackInverse !== callbackExpect ) { + matches.push( elems[ i ] ); + } + } + + return matches; + }, + + // arg is for internal usage only + map: function( elems, callback, arg ) { + var length, value, + i = 0, + ret = []; + + // Go through the array, translating each of the items to their new values + if ( isArrayLike( elems ) ) { + length = elems.length; + for ( ; i < length; i++ ) { + value = callback( elems[ i ], i, arg ); + + if ( value != null ) { + ret.push( value ); + } + } + + // Go through every key on the object, + } else { + for ( i in elems ) { + value = callback( elems[ i ], i, arg ); + + if ( value != null ) { + ret.push( value ); + } + } + } + + // Flatten any nested arrays + return flat( ret ); + }, + + // A global GUID counter for objects + guid: 1, + + // jQuery.support is not used in Core but other projects attach their + // properties to it so it needs to exist. + support: support +} ); + +if ( typeof Symbol === "function" ) { + jQuery.fn[ Symbol.iterator ] = arr[ Symbol.iterator ]; +} + +// Populate the class2type map +jQuery.each( "Boolean Number String Function Array Date RegExp Object Error Symbol".split( " " ), + function( _i, name ) { + class2type[ "[object " + name + "]" ] = name.toLowerCase(); + } ); + +function isArrayLike( obj ) { + + // Support: real iOS 8.2 only (not reproducible in simulator) + // `in` check used to prevent JIT error (gh-2145) + // hasOwn isn't used here due to false negatives + // regarding Nodelist length in IE + var length = !!obj && "length" in obj && obj.length, + type = toType( obj ); + + if ( isFunction( obj ) || isWindow( obj ) ) { + return false; + } + + return type === "array" || length === 0 || + typeof length === "number" && length > 0 && ( length - 1 ) in obj; +} +var Sizzle = +/*! + * Sizzle CSS Selector Engine v2.3.6 + * https://sizzlejs.com/ + * + * Copyright JS Foundation and other contributors + * Released under the MIT license + * https://js.foundation/ + * + * Date: 2021-02-16 + */ +( function( window ) { +var i, + support, + Expr, + getText, + isXML, + tokenize, + compile, + select, + outermostContext, + sortInput, + hasDuplicate, + + // Local document vars + setDocument, + document, + docElem, + documentIsHTML, + rbuggyQSA, + rbuggyMatches, + matches, + contains, + + // Instance-specific data + expando = "sizzle" + 1 * new Date(), + preferredDoc = window.document, + dirruns = 0, + done = 0, + classCache = createCache(), + tokenCache = createCache(), + compilerCache = createCache(), + nonnativeSelectorCache = createCache(), + sortOrder = function( a, b ) { + if ( a === b ) { + hasDuplicate = true; + } + return 0; + }, + + // Instance methods + hasOwn = ( {} ).hasOwnProperty, + arr = [], + pop = arr.pop, + pushNative = arr.push, + push = arr.push, + slice = arr.slice, + + // Use a stripped-down indexOf as it's faster than native + // https://jsperf.com/thor-indexof-vs-for/5 + indexOf = function( list, elem ) { + var i = 0, + len = list.length; + for ( ; i < len; i++ ) { + if ( list[ i ] === elem ) { + return i; + } + } + return -1; + }, + + booleans = "checked|selected|async|autofocus|autoplay|controls|defer|disabled|hidden|" + + "ismap|loop|multiple|open|readonly|required|scoped", + + // Regular expressions + + // http://www.w3.org/TR/css3-selectors/#whitespace + whitespace = "[\\x20\\t\\r\\n\\f]", + + // https://www.w3.org/TR/css-syntax-3/#ident-token-diagram + identifier = "(?:\\\\[\\da-fA-F]{1,6}" + whitespace + + "?|\\\\[^\\r\\n\\f]|[\\w-]|[^\0-\\x7f])+", + + // Attribute selectors: http://www.w3.org/TR/selectors/#attribute-selectors + attributes = "\\[" + whitespace + "*(" + identifier + ")(?:" + whitespace + + + // Operator (capture 2) + "*([*^$|!~]?=)" + whitespace + + + // "Attribute values must be CSS identifiers [capture 5] + // or strings [capture 3 or capture 4]" + "*(?:'((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\"|(" + identifier + "))|)" + + whitespace + "*\\]", + + pseudos = ":(" + identifier + ")(?:\\((" + + + // To reduce the number of selectors needing tokenize in the preFilter, prefer arguments: + // 1. quoted (capture 3; capture 4 or capture 5) + "('((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\")|" + + + // 2. simple (capture 6) + "((?:\\\\.|[^\\\\()[\\]]|" + attributes + ")*)|" + + + // 3. anything else (capture 2) + ".*" + + ")\\)|)", + + // Leading and non-escaped trailing whitespace, capturing some non-whitespace characters preceding the latter + rwhitespace = new RegExp( whitespace + "+", "g" ), + rtrim = new RegExp( "^" + whitespace + "+|((?:^|[^\\\\])(?:\\\\.)*)" + + whitespace + "+$", "g" ), + + rcomma = new RegExp( "^" + whitespace + "*," + whitespace + "*" ), + rcombinators = new RegExp( "^" + whitespace + "*([>+~]|" + whitespace + ")" + whitespace + + "*" ), + rdescend = new RegExp( whitespace + "|>" ), + + rpseudo = new RegExp( pseudos ), + ridentifier = new RegExp( "^" + identifier + "$" ), + + matchExpr = { + "ID": new RegExp( "^#(" + identifier + ")" ), + "CLASS": new RegExp( "^\\.(" + identifier + ")" ), + "TAG": new RegExp( "^(" + identifier + "|[*])" ), + "ATTR": new RegExp( "^" + attributes ), + "PSEUDO": new RegExp( "^" + pseudos ), + "CHILD": new RegExp( "^:(only|first|last|nth|nth-last)-(child|of-type)(?:\\(" + + whitespace + "*(even|odd|(([+-]|)(\\d*)n|)" + whitespace + "*(?:([+-]|)" + + whitespace + "*(\\d+)|))" + whitespace + "*\\)|)", "i" ), + "bool": new RegExp( "^(?:" + booleans + ")$", "i" ), + + // For use in libraries implementing .is() + // We use this for POS matching in `select` + "needsContext": new RegExp( "^" + whitespace + + "*[>+~]|:(even|odd|eq|gt|lt|nth|first|last)(?:\\(" + whitespace + + "*((?:-\\d)?\\d*)" + whitespace + "*\\)|)(?=[^-]|$)", "i" ) + }, + + rhtml = /HTML$/i, + rinputs = /^(?:input|select|textarea|button)$/i, + rheader = /^h\d$/i, + + rnative = /^[^{]+\{\s*\[native \w/, + + // Easily-parseable/retrievable ID or TAG or CLASS selectors + rquickExpr = /^(?:#([\w-]+)|(\w+)|\.([\w-]+))$/, + + rsibling = /[+~]/, + + // CSS escapes + // http://www.w3.org/TR/CSS21/syndata.html#escaped-characters + runescape = new RegExp( "\\\\[\\da-fA-F]{1,6}" + whitespace + "?|\\\\([^\\r\\n\\f])", "g" ), + funescape = function( escape, nonHex ) { + var high = "0x" + escape.slice( 1 ) - 0x10000; + + return nonHex ? + + // Strip the backslash prefix from a non-hex escape sequence + nonHex : + + // Replace a hexadecimal escape sequence with the encoded Unicode code point + // Support: IE <=11+ + // For values outside the Basic Multilingual Plane (BMP), manually construct a + // surrogate pair + high < 0 ? + String.fromCharCode( high + 0x10000 ) : + String.fromCharCode( high >> 10 | 0xD800, high & 0x3FF | 0xDC00 ); + }, + + // CSS string/identifier serialization + // https://drafts.csswg.org/cssom/#common-serializing-idioms + rcssescape = /([\0-\x1f\x7f]|^-?\d)|^-$|[^\0-\x1f\x7f-\uFFFF\w-]/g, + fcssescape = function( ch, asCodePoint ) { + if ( asCodePoint ) { + + // U+0000 NULL becomes U+FFFD REPLACEMENT CHARACTER + if ( ch === "\0" ) { + return "\uFFFD"; + } + + // Control characters and (dependent upon position) numbers get escaped as code points + return ch.slice( 0, -1 ) + "\\" + + ch.charCodeAt( ch.length - 1 ).toString( 16 ) + " "; + } + + // Other potentially-special ASCII characters get backslash-escaped + return "\\" + ch; + }, + + // Used for iframes + // See setDocument() + // Removing the function wrapper causes a "Permission Denied" + // error in IE + unloadHandler = function() { + setDocument(); + }, + + inDisabledFieldset = addCombinator( + function( elem ) { + return elem.disabled === true && elem.nodeName.toLowerCase() === "fieldset"; + }, + { dir: "parentNode", next: "legend" } + ); + +// Optimize for push.apply( _, NodeList ) +try { + push.apply( + ( arr = slice.call( preferredDoc.childNodes ) ), + preferredDoc.childNodes + ); + + // Support: Android<4.0 + // Detect silently failing push.apply + // eslint-disable-next-line no-unused-expressions + arr[ preferredDoc.childNodes.length ].nodeType; +} catch ( e ) { + push = { apply: arr.length ? + + // Leverage slice if possible + function( target, els ) { + pushNative.apply( target, slice.call( els ) ); + } : + + // Support: IE<9 + // Otherwise append directly + function( target, els ) { + var j = target.length, + i = 0; + + // Can't trust NodeList.length + while ( ( target[ j++ ] = els[ i++ ] ) ) {} + target.length = j - 1; + } + }; +} + +function Sizzle( selector, context, results, seed ) { + var m, i, elem, nid, match, groups, newSelector, + newContext = context && context.ownerDocument, + + // nodeType defaults to 9, since context defaults to document + nodeType = context ? context.nodeType : 9; + + results = results || []; + + // Return early from calls with invalid selector or context + if ( typeof selector !== "string" || !selector || + nodeType !== 1 && nodeType !== 9 && nodeType !== 11 ) { + + return results; + } + + // Try to shortcut find operations (as opposed to filters) in HTML documents + if ( !seed ) { + setDocument( context ); + context = context || document; + + if ( documentIsHTML ) { + + // If the selector is sufficiently simple, try using a "get*By*" DOM method + // (excepting DocumentFragment context, where the methods don't exist) + if ( nodeType !== 11 && ( match = rquickExpr.exec( selector ) ) ) { + + // ID selector + if ( ( m = match[ 1 ] ) ) { + + // Document context + if ( nodeType === 9 ) { + if ( ( elem = context.getElementById( m ) ) ) { + + // Support: IE, Opera, Webkit + // TODO: identify versions + // getElementById can match elements by name instead of ID + if ( elem.id === m ) { + results.push( elem ); + return results; + } + } else { + return results; + } + + // Element context + } else { + + // Support: IE, Opera, Webkit + // TODO: identify versions + // getElementById can match elements by name instead of ID + if ( newContext && ( elem = newContext.getElementById( m ) ) && + contains( context, elem ) && + elem.id === m ) { + + results.push( elem ); + return results; + } + } + + // Type selector + } else if ( match[ 2 ] ) { + push.apply( results, context.getElementsByTagName( selector ) ); + return results; + + // Class selector + } else if ( ( m = match[ 3 ] ) && support.getElementsByClassName && + context.getElementsByClassName ) { + + push.apply( results, context.getElementsByClassName( m ) ); + return results; + } + } + + // Take advantage of querySelectorAll + if ( support.qsa && + !nonnativeSelectorCache[ selector + " " ] && + ( !rbuggyQSA || !rbuggyQSA.test( selector ) ) && + + // Support: IE 8 only + // Exclude object elements + ( nodeType !== 1 || context.nodeName.toLowerCase() !== "object" ) ) { + + newSelector = selector; + newContext = context; + + // qSA considers elements outside a scoping root when evaluating child or + // descendant combinators, which is not what we want. + // In such cases, we work around the behavior by prefixing every selector in the + // list with an ID selector referencing the scope context. + // The technique has to be used as well when a leading combinator is used + // as such selectors are not recognized by querySelectorAll. + // Thanks to Andrew Dupont for this technique. + if ( nodeType === 1 && + ( rdescend.test( selector ) || rcombinators.test( selector ) ) ) { + + // Expand context for sibling selectors + newContext = rsibling.test( selector ) && testContext( context.parentNode ) || + context; + + // We can use :scope instead of the ID hack if the browser + // supports it & if we're not changing the context. + if ( newContext !== context || !support.scope ) { + + // Capture the context ID, setting it first if necessary + if ( ( nid = context.getAttribute( "id" ) ) ) { + nid = nid.replace( rcssescape, fcssescape ); + } else { + context.setAttribute( "id", ( nid = expando ) ); + } + } + + // Prefix every selector in the list + groups = tokenize( selector ); + i = groups.length; + while ( i-- ) { + groups[ i ] = ( nid ? "#" + nid : ":scope" ) + " " + + toSelector( groups[ i ] ); + } + newSelector = groups.join( "," ); + } + + try { + push.apply( results, + newContext.querySelectorAll( newSelector ) + ); + return results; + } catch ( qsaError ) { + nonnativeSelectorCache( selector, true ); + } finally { + if ( nid === expando ) { + context.removeAttribute( "id" ); + } + } + } + } + } + + // All others + return select( selector.replace( rtrim, "$1" ), context, results, seed ); +} + +/** + * Create key-value caches of limited size + * @returns {function(string, object)} Returns the Object data after storing it on itself with + * property name the (space-suffixed) string and (if the cache is larger than Expr.cacheLength) + * deleting the oldest entry + */ +function createCache() { + var keys = []; + + function cache( key, value ) { + + // Use (key + " ") to avoid collision with native prototype properties (see Issue #157) + if ( keys.push( key + " " ) > Expr.cacheLength ) { + + // Only keep the most recent entries + delete cache[ keys.shift() ]; + } + return ( cache[ key + " " ] = value ); + } + return cache; +} + +/** + * Mark a function for special use by Sizzle + * @param {Function} fn The function to mark + */ +function markFunction( fn ) { + fn[ expando ] = true; + return fn; +} + +/** + * Support testing using an element + * @param {Function} fn Passed the created element and returns a boolean result + */ +function assert( fn ) { + var el = document.createElement( "fieldset" ); + + try { + return !!fn( el ); + } catch ( e ) { + return false; + } finally { + + // Remove from its parent by default + if ( el.parentNode ) { + el.parentNode.removeChild( el ); + } + + // release memory in IE + el = null; + } +} + +/** + * Adds the same handler for all of the specified attrs + * @param {String} attrs Pipe-separated list of attributes + * @param {Function} handler The method that will be applied + */ +function addHandle( attrs, handler ) { + var arr = attrs.split( "|" ), + i = arr.length; + + while ( i-- ) { + Expr.attrHandle[ arr[ i ] ] = handler; + } +} + +/** + * Checks document order of two siblings + * @param {Element} a + * @param {Element} b + * @returns {Number} Returns less than 0 if a precedes b, greater than 0 if a follows b + */ +function siblingCheck( a, b ) { + var cur = b && a, + diff = cur && a.nodeType === 1 && b.nodeType === 1 && + a.sourceIndex - b.sourceIndex; + + // Use IE sourceIndex if available on both nodes + if ( diff ) { + return diff; + } + + // Check if b follows a + if ( cur ) { + while ( ( cur = cur.nextSibling ) ) { + if ( cur === b ) { + return -1; + } + } + } + + return a ? 1 : -1; +} + +/** + * Returns a function to use in pseudos for input types + * @param {String} type + */ +function createInputPseudo( type ) { + return function( elem ) { + var name = elem.nodeName.toLowerCase(); + return name === "input" && elem.type === type; + }; +} + +/** + * Returns a function to use in pseudos for buttons + * @param {String} type + */ +function createButtonPseudo( type ) { + return function( elem ) { + var name = elem.nodeName.toLowerCase(); + return ( name === "input" || name === "button" ) && elem.type === type; + }; +} + +/** + * Returns a function to use in pseudos for :enabled/:disabled + * @param {Boolean} disabled true for :disabled; false for :enabled + */ +function createDisabledPseudo( disabled ) { + + // Known :disabled false positives: fieldset[disabled] > legend:nth-of-type(n+2) :can-disable + return function( elem ) { + + // Only certain elements can match :enabled or :disabled + // https://html.spec.whatwg.org/multipage/scripting.html#selector-enabled + // https://html.spec.whatwg.org/multipage/scripting.html#selector-disabled + if ( "form" in elem ) { + + // Check for inherited disabledness on relevant non-disabled elements: + // * listed form-associated elements in a disabled fieldset + // https://html.spec.whatwg.org/multipage/forms.html#category-listed + // https://html.spec.whatwg.org/multipage/forms.html#concept-fe-disabled + // * option elements in a disabled optgroup + // https://html.spec.whatwg.org/multipage/forms.html#concept-option-disabled + // All such elements have a "form" property. + if ( elem.parentNode && elem.disabled === false ) { + + // Option elements defer to a parent optgroup if present + if ( "label" in elem ) { + if ( "label" in elem.parentNode ) { + return elem.parentNode.disabled === disabled; + } else { + return elem.disabled === disabled; + } + } + + // Support: IE 6 - 11 + // Use the isDisabled shortcut property to check for disabled fieldset ancestors + return elem.isDisabled === disabled || + + // Where there is no isDisabled, check manually + /* jshint -W018 */ + elem.isDisabled !== !disabled && + inDisabledFieldset( elem ) === disabled; + } + + return elem.disabled === disabled; + + // Try to winnow out elements that can't be disabled before trusting the disabled property. + // Some victims get caught in our net (label, legend, menu, track), but it shouldn't + // even exist on them, let alone have a boolean value. + } else if ( "label" in elem ) { + return elem.disabled === disabled; + } + + // Remaining elements are neither :enabled nor :disabled + return false; + }; +} + +/** + * Returns a function to use in pseudos for positionals + * @param {Function} fn + */ +function createPositionalPseudo( fn ) { + return markFunction( function( argument ) { + argument = +argument; + return markFunction( function( seed, matches ) { + var j, + matchIndexes = fn( [], seed.length, argument ), + i = matchIndexes.length; + + // Match elements found at the specified indexes + while ( i-- ) { + if ( seed[ ( j = matchIndexes[ i ] ) ] ) { + seed[ j ] = !( matches[ j ] = seed[ j ] ); + } + } + } ); + } ); +} + +/** + * Checks a node for validity as a Sizzle context + * @param {Element|Object=} context + * @returns {Element|Object|Boolean} The input node if acceptable, otherwise a falsy value + */ +function testContext( context ) { + return context && typeof context.getElementsByTagName !== "undefined" && context; +} + +// Expose support vars for convenience +support = Sizzle.support = {}; + +/** + * Detects XML nodes + * @param {Element|Object} elem An element or a document + * @returns {Boolean} True iff elem is a non-HTML XML node + */ +isXML = Sizzle.isXML = function( elem ) { + var namespace = elem && elem.namespaceURI, + docElem = elem && ( elem.ownerDocument || elem ).documentElement; + + // Support: IE <=8 + // Assume HTML when documentElement doesn't yet exist, such as inside loading iframes + // https://bugs.jquery.com/ticket/4833 + return !rhtml.test( namespace || docElem && docElem.nodeName || "HTML" ); +}; + +/** + * Sets document-related variables once based on the current document + * @param {Element|Object} [doc] An element or document object to use to set the document + * @returns {Object} Returns the current document + */ +setDocument = Sizzle.setDocument = function( node ) { + var hasCompare, subWindow, + doc = node ? node.ownerDocument || node : preferredDoc; + + // Return early if doc is invalid or already selected + // Support: IE 11+, Edge 17 - 18+ + // IE/Edge sometimes throw a "Permission denied" error when strict-comparing + // two documents; shallow comparisons work. + // eslint-disable-next-line eqeqeq + if ( doc == document || doc.nodeType !== 9 || !doc.documentElement ) { + return document; + } + + // Update global variables + document = doc; + docElem = document.documentElement; + documentIsHTML = !isXML( document ); + + // Support: IE 9 - 11+, Edge 12 - 18+ + // Accessing iframe documents after unload throws "permission denied" errors (jQuery #13936) + // Support: IE 11+, Edge 17 - 18+ + // IE/Edge sometimes throw a "Permission denied" error when strict-comparing + // two documents; shallow comparisons work. + // eslint-disable-next-line eqeqeq + if ( preferredDoc != document && + ( subWindow = document.defaultView ) && subWindow.top !== subWindow ) { + + // Support: IE 11, Edge + if ( subWindow.addEventListener ) { + subWindow.addEventListener( "unload", unloadHandler, false ); + + // Support: IE 9 - 10 only + } else if ( subWindow.attachEvent ) { + subWindow.attachEvent( "onunload", unloadHandler ); + } + } + + // Support: IE 8 - 11+, Edge 12 - 18+, Chrome <=16 - 25 only, Firefox <=3.6 - 31 only, + // Safari 4 - 5 only, Opera <=11.6 - 12.x only + // IE/Edge & older browsers don't support the :scope pseudo-class. + // Support: Safari 6.0 only + // Safari 6.0 supports :scope but it's an alias of :root there. + support.scope = assert( function( el ) { + docElem.appendChild( el ).appendChild( document.createElement( "div" ) ); + return typeof el.querySelectorAll !== "undefined" && + !el.querySelectorAll( ":scope fieldset div" ).length; + } ); + + /* Attributes + ---------------------------------------------------------------------- */ + + // Support: IE<8 + // Verify that getAttribute really returns attributes and not properties + // (excepting IE8 booleans) + support.attributes = assert( function( el ) { + el.className = "i"; + return !el.getAttribute( "className" ); + } ); + + /* getElement(s)By* + ---------------------------------------------------------------------- */ + + // Check if getElementsByTagName("*") returns only elements + support.getElementsByTagName = assert( function( el ) { + el.appendChild( document.createComment( "" ) ); + return !el.getElementsByTagName( "*" ).length; + } ); + + // Support: IE<9 + support.getElementsByClassName = rnative.test( document.getElementsByClassName ); + + // Support: IE<10 + // Check if getElementById returns elements by name + // The broken getElementById methods don't pick up programmatically-set names, + // so use a roundabout getElementsByName test + support.getById = assert( function( el ) { + docElem.appendChild( el ).id = expando; + return !document.getElementsByName || !document.getElementsByName( expando ).length; + } ); + + // ID filter and find + if ( support.getById ) { + Expr.filter[ "ID" ] = function( id ) { + var attrId = id.replace( runescape, funescape ); + return function( elem ) { + return elem.getAttribute( "id" ) === attrId; + }; + }; + Expr.find[ "ID" ] = function( id, context ) { + if ( typeof context.getElementById !== "undefined" && documentIsHTML ) { + var elem = context.getElementById( id ); + return elem ? [ elem ] : []; + } + }; + } else { + Expr.filter[ "ID" ] = function( id ) { + var attrId = id.replace( runescape, funescape ); + return function( elem ) { + var node = typeof elem.getAttributeNode !== "undefined" && + elem.getAttributeNode( "id" ); + return node && node.value === attrId; + }; + }; + + // Support: IE 6 - 7 only + // getElementById is not reliable as a find shortcut + Expr.find[ "ID" ] = function( id, context ) { + if ( typeof context.getElementById !== "undefined" && documentIsHTML ) { + var node, i, elems, + elem = context.getElementById( id ); + + if ( elem ) { + + // Verify the id attribute + node = elem.getAttributeNode( "id" ); + if ( node && node.value === id ) { + return [ elem ]; + } + + // Fall back on getElementsByName + elems = context.getElementsByName( id ); + i = 0; + while ( ( elem = elems[ i++ ] ) ) { + node = elem.getAttributeNode( "id" ); + if ( node && node.value === id ) { + return [ elem ]; + } + } + } + + return []; + } + }; + } + + // Tag + Expr.find[ "TAG" ] = support.getElementsByTagName ? + function( tag, context ) { + if ( typeof context.getElementsByTagName !== "undefined" ) { + return context.getElementsByTagName( tag ); + + // DocumentFragment nodes don't have gEBTN + } else if ( support.qsa ) { + return context.querySelectorAll( tag ); + } + } : + + function( tag, context ) { + var elem, + tmp = [], + i = 0, + + // By happy coincidence, a (broken) gEBTN appears on DocumentFragment nodes too + results = context.getElementsByTagName( tag ); + + // Filter out possible comments + if ( tag === "*" ) { + while ( ( elem = results[ i++ ] ) ) { + if ( elem.nodeType === 1 ) { + tmp.push( elem ); + } + } + + return tmp; + } + return results; + }; + + // Class + Expr.find[ "CLASS" ] = support.getElementsByClassName && function( className, context ) { + if ( typeof context.getElementsByClassName !== "undefined" && documentIsHTML ) { + return context.getElementsByClassName( className ); + } + }; + + /* QSA/matchesSelector + ---------------------------------------------------------------------- */ + + // QSA and matchesSelector support + + // matchesSelector(:active) reports false when true (IE9/Opera 11.5) + rbuggyMatches = []; + + // qSa(:focus) reports false when true (Chrome 21) + // We allow this because of a bug in IE8/9 that throws an error + // whenever `document.activeElement` is accessed on an iframe + // So, we allow :focus to pass through QSA all the time to avoid the IE error + // See https://bugs.jquery.com/ticket/13378 + rbuggyQSA = []; + + if ( ( support.qsa = rnative.test( document.querySelectorAll ) ) ) { + + // Build QSA regex + // Regex strategy adopted from Diego Perini + assert( function( el ) { + + var input; + + // Select is set to empty string on purpose + // This is to test IE's treatment of not explicitly + // setting a boolean content attribute, + // since its presence should be enough + // https://bugs.jquery.com/ticket/12359 + docElem.appendChild( el ).innerHTML = "" + + ""; + + // Support: IE8, Opera 11-12.16 + // Nothing should be selected when empty strings follow ^= or $= or *= + // The test attribute must be unknown in Opera but "safe" for WinRT + // https://msdn.microsoft.com/en-us/library/ie/hh465388.aspx#attribute_section + if ( el.querySelectorAll( "[msallowcapture^='']" ).length ) { + rbuggyQSA.push( "[*^$]=" + whitespace + "*(?:''|\"\")" ); + } + + // Support: IE8 + // Boolean attributes and "value" are not treated correctly + if ( !el.querySelectorAll( "[selected]" ).length ) { + rbuggyQSA.push( "\\[" + whitespace + "*(?:value|" + booleans + ")" ); + } + + // Support: Chrome<29, Android<4.4, Safari<7.0+, iOS<7.0+, PhantomJS<1.9.8+ + if ( !el.querySelectorAll( "[id~=" + expando + "-]" ).length ) { + rbuggyQSA.push( "~=" ); + } + + // Support: IE 11+, Edge 15 - 18+ + // IE 11/Edge don't find elements on a `[name='']` query in some cases. + // Adding a temporary attribute to the document before the selection works + // around the issue. + // Interestingly, IE 10 & older don't seem to have the issue. + input = document.createElement( "input" ); + input.setAttribute( "name", "" ); + el.appendChild( input ); + if ( !el.querySelectorAll( "[name='']" ).length ) { + rbuggyQSA.push( "\\[" + whitespace + "*name" + whitespace + "*=" + + whitespace + "*(?:''|\"\")" ); + } + + // Webkit/Opera - :checked should return selected option elements + // http://www.w3.org/TR/2011/REC-css3-selectors-20110929/#checked + // IE8 throws error here and will not see later tests + if ( !el.querySelectorAll( ":checked" ).length ) { + rbuggyQSA.push( ":checked" ); + } + + // Support: Safari 8+, iOS 8+ + // https://bugs.webkit.org/show_bug.cgi?id=136851 + // In-page `selector#id sibling-combinator selector` fails + if ( !el.querySelectorAll( "a#" + expando + "+*" ).length ) { + rbuggyQSA.push( ".#.+[+~]" ); + } + + // Support: Firefox <=3.6 - 5 only + // Old Firefox doesn't throw on a badly-escaped identifier. + el.querySelectorAll( "\\\f" ); + rbuggyQSA.push( "[\\r\\n\\f]" ); + } ); + + assert( function( el ) { + el.innerHTML = "" + + ""; + + // Support: Windows 8 Native Apps + // The type and name attributes are restricted during .innerHTML assignment + var input = document.createElement( "input" ); + input.setAttribute( "type", "hidden" ); + el.appendChild( input ).setAttribute( "name", "D" ); + + // Support: IE8 + // Enforce case-sensitivity of name attribute + if ( el.querySelectorAll( "[name=d]" ).length ) { + rbuggyQSA.push( "name" + whitespace + "*[*^$|!~]?=" ); + } + + // FF 3.5 - :enabled/:disabled and hidden elements (hidden elements are still enabled) + // IE8 throws error here and will not see later tests + if ( el.querySelectorAll( ":enabled" ).length !== 2 ) { + rbuggyQSA.push( ":enabled", ":disabled" ); + } + + // Support: IE9-11+ + // IE's :disabled selector does not pick up the children of disabled fieldsets + docElem.appendChild( el ).disabled = true; + if ( el.querySelectorAll( ":disabled" ).length !== 2 ) { + rbuggyQSA.push( ":enabled", ":disabled" ); + } + + // Support: Opera 10 - 11 only + // Opera 10-11 does not throw on post-comma invalid pseudos + el.querySelectorAll( "*,:x" ); + rbuggyQSA.push( ",.*:" ); + } ); + } + + if ( ( support.matchesSelector = rnative.test( ( matches = docElem.matches || + docElem.webkitMatchesSelector || + docElem.mozMatchesSelector || + docElem.oMatchesSelector || + docElem.msMatchesSelector ) ) ) ) { + + assert( function( el ) { + + // Check to see if it's possible to do matchesSelector + // on a disconnected node (IE 9) + support.disconnectedMatch = matches.call( el, "*" ); + + // This should fail with an exception + // Gecko does not error, returns false instead + matches.call( el, "[s!='']:x" ); + rbuggyMatches.push( "!=", pseudos ); + } ); + } + + rbuggyQSA = rbuggyQSA.length && new RegExp( rbuggyQSA.join( "|" ) ); + rbuggyMatches = rbuggyMatches.length && new RegExp( rbuggyMatches.join( "|" ) ); + + /* Contains + ---------------------------------------------------------------------- */ + hasCompare = rnative.test( docElem.compareDocumentPosition ); + + // Element contains another + // Purposefully self-exclusive + // As in, an element does not contain itself + contains = hasCompare || rnative.test( docElem.contains ) ? + function( a, b ) { + var adown = a.nodeType === 9 ? a.documentElement : a, + bup = b && b.parentNode; + return a === bup || !!( bup && bup.nodeType === 1 && ( + adown.contains ? + adown.contains( bup ) : + a.compareDocumentPosition && a.compareDocumentPosition( bup ) & 16 + ) ); + } : + function( a, b ) { + if ( b ) { + while ( ( b = b.parentNode ) ) { + if ( b === a ) { + return true; + } + } + } + return false; + }; + + /* Sorting + ---------------------------------------------------------------------- */ + + // Document order sorting + sortOrder = hasCompare ? + function( a, b ) { + + // Flag for duplicate removal + if ( a === b ) { + hasDuplicate = true; + return 0; + } + + // Sort on method existence if only one input has compareDocumentPosition + var compare = !a.compareDocumentPosition - !b.compareDocumentPosition; + if ( compare ) { + return compare; + } + + // Calculate position if both inputs belong to the same document + // Support: IE 11+, Edge 17 - 18+ + // IE/Edge sometimes throw a "Permission denied" error when strict-comparing + // two documents; shallow comparisons work. + // eslint-disable-next-line eqeqeq + compare = ( a.ownerDocument || a ) == ( b.ownerDocument || b ) ? + a.compareDocumentPosition( b ) : + + // Otherwise we know they are disconnected + 1; + + // Disconnected nodes + if ( compare & 1 || + ( !support.sortDetached && b.compareDocumentPosition( a ) === compare ) ) { + + // Choose the first element that is related to our preferred document + // Support: IE 11+, Edge 17 - 18+ + // IE/Edge sometimes throw a "Permission denied" error when strict-comparing + // two documents; shallow comparisons work. + // eslint-disable-next-line eqeqeq + if ( a == document || a.ownerDocument == preferredDoc && + contains( preferredDoc, a ) ) { + return -1; + } + + // Support: IE 11+, Edge 17 - 18+ + // IE/Edge sometimes throw a "Permission denied" error when strict-comparing + // two documents; shallow comparisons work. + // eslint-disable-next-line eqeqeq + if ( b == document || b.ownerDocument == preferredDoc && + contains( preferredDoc, b ) ) { + return 1; + } + + // Maintain original order + return sortInput ? + ( indexOf( sortInput, a ) - indexOf( sortInput, b ) ) : + 0; + } + + return compare & 4 ? -1 : 1; + } : + function( a, b ) { + + // Exit early if the nodes are identical + if ( a === b ) { + hasDuplicate = true; + return 0; + } + + var cur, + i = 0, + aup = a.parentNode, + bup = b.parentNode, + ap = [ a ], + bp = [ b ]; + + // Parentless nodes are either documents or disconnected + if ( !aup || !bup ) { + + // Support: IE 11+, Edge 17 - 18+ + // IE/Edge sometimes throw a "Permission denied" error when strict-comparing + // two documents; shallow comparisons work. + /* eslint-disable eqeqeq */ + return a == document ? -1 : + b == document ? 1 : + /* eslint-enable eqeqeq */ + aup ? -1 : + bup ? 1 : + sortInput ? + ( indexOf( sortInput, a ) - indexOf( sortInput, b ) ) : + 0; + + // If the nodes are siblings, we can do a quick check + } else if ( aup === bup ) { + return siblingCheck( a, b ); + } + + // Otherwise we need full lists of their ancestors for comparison + cur = a; + while ( ( cur = cur.parentNode ) ) { + ap.unshift( cur ); + } + cur = b; + while ( ( cur = cur.parentNode ) ) { + bp.unshift( cur ); + } + + // Walk down the tree looking for a discrepancy + while ( ap[ i ] === bp[ i ] ) { + i++; + } + + return i ? + + // Do a sibling check if the nodes have a common ancestor + siblingCheck( ap[ i ], bp[ i ] ) : + + // Otherwise nodes in our document sort first + // Support: IE 11+, Edge 17 - 18+ + // IE/Edge sometimes throw a "Permission denied" error when strict-comparing + // two documents; shallow comparisons work. + /* eslint-disable eqeqeq */ + ap[ i ] == preferredDoc ? -1 : + bp[ i ] == preferredDoc ? 1 : + /* eslint-enable eqeqeq */ + 0; + }; + + return document; +}; + +Sizzle.matches = function( expr, elements ) { + return Sizzle( expr, null, null, elements ); +}; + +Sizzle.matchesSelector = function( elem, expr ) { + setDocument( elem ); + + if ( support.matchesSelector && documentIsHTML && + !nonnativeSelectorCache[ expr + " " ] && + ( !rbuggyMatches || !rbuggyMatches.test( expr ) ) && + ( !rbuggyQSA || !rbuggyQSA.test( expr ) ) ) { + + try { + var ret = matches.call( elem, expr ); + + // IE 9's matchesSelector returns false on disconnected nodes + if ( ret || support.disconnectedMatch || + + // As well, disconnected nodes are said to be in a document + // fragment in IE 9 + elem.document && elem.document.nodeType !== 11 ) { + return ret; + } + } catch ( e ) { + nonnativeSelectorCache( expr, true ); + } + } + + return Sizzle( expr, document, null, [ elem ] ).length > 0; +}; + +Sizzle.contains = function( context, elem ) { + + // Set document vars if needed + // Support: IE 11+, Edge 17 - 18+ + // IE/Edge sometimes throw a "Permission denied" error when strict-comparing + // two documents; shallow comparisons work. + // eslint-disable-next-line eqeqeq + if ( ( context.ownerDocument || context ) != document ) { + setDocument( context ); + } + return contains( context, elem ); +}; + +Sizzle.attr = function( elem, name ) { + + // Set document vars if needed + // Support: IE 11+, Edge 17 - 18+ + // IE/Edge sometimes throw a "Permission denied" error when strict-comparing + // two documents; shallow comparisons work. + // eslint-disable-next-line eqeqeq + if ( ( elem.ownerDocument || elem ) != document ) { + setDocument( elem ); + } + + var fn = Expr.attrHandle[ name.toLowerCase() ], + + // Don't get fooled by Object.prototype properties (jQuery #13807) + val = fn && hasOwn.call( Expr.attrHandle, name.toLowerCase() ) ? + fn( elem, name, !documentIsHTML ) : + undefined; + + return val !== undefined ? + val : + support.attributes || !documentIsHTML ? + elem.getAttribute( name ) : + ( val = elem.getAttributeNode( name ) ) && val.specified ? + val.value : + null; +}; + +Sizzle.escape = function( sel ) { + return ( sel + "" ).replace( rcssescape, fcssescape ); +}; + +Sizzle.error = function( msg ) { + throw new Error( "Syntax error, unrecognized expression: " + msg ); +}; + +/** + * Document sorting and removing duplicates + * @param {ArrayLike} results + */ +Sizzle.uniqueSort = function( results ) { + var elem, + duplicates = [], + j = 0, + i = 0; + + // Unless we *know* we can detect duplicates, assume their presence + hasDuplicate = !support.detectDuplicates; + sortInput = !support.sortStable && results.slice( 0 ); + results.sort( sortOrder ); + + if ( hasDuplicate ) { + while ( ( elem = results[ i++ ] ) ) { + if ( elem === results[ i ] ) { + j = duplicates.push( i ); + } + } + while ( j-- ) { + results.splice( duplicates[ j ], 1 ); + } + } + + // Clear input after sorting to release objects + // See https://github.com/jquery/sizzle/pull/225 + sortInput = null; + + return results; +}; + +/** + * Utility function for retrieving the text value of an array of DOM nodes + * @param {Array|Element} elem + */ +getText = Sizzle.getText = function( elem ) { + var node, + ret = "", + i = 0, + nodeType = elem.nodeType; + + if ( !nodeType ) { + + // If no nodeType, this is expected to be an array + while ( ( node = elem[ i++ ] ) ) { + + // Do not traverse comment nodes + ret += getText( node ); + } + } else if ( nodeType === 1 || nodeType === 9 || nodeType === 11 ) { + + // Use textContent for elements + // innerText usage removed for consistency of new lines (jQuery #11153) + if ( typeof elem.textContent === "string" ) { + return elem.textContent; + } else { + + // Traverse its children + for ( elem = elem.firstChild; elem; elem = elem.nextSibling ) { + ret += getText( elem ); + } + } + } else if ( nodeType === 3 || nodeType === 4 ) { + return elem.nodeValue; + } + + // Do not include comment or processing instruction nodes + + return ret; +}; + +Expr = Sizzle.selectors = { + + // Can be adjusted by the user + cacheLength: 50, + + createPseudo: markFunction, + + match: matchExpr, + + attrHandle: {}, + + find: {}, + + relative: { + ">": { dir: "parentNode", first: true }, + " ": { dir: "parentNode" }, + "+": { dir: "previousSibling", first: true }, + "~": { dir: "previousSibling" } + }, + + preFilter: { + "ATTR": function( match ) { + match[ 1 ] = match[ 1 ].replace( runescape, funescape ); + + // Move the given value to match[3] whether quoted or unquoted + match[ 3 ] = ( match[ 3 ] || match[ 4 ] || + match[ 5 ] || "" ).replace( runescape, funescape ); + + if ( match[ 2 ] === "~=" ) { + match[ 3 ] = " " + match[ 3 ] + " "; + } + + return match.slice( 0, 4 ); + }, + + "CHILD": function( match ) { + + /* matches from matchExpr["CHILD"] + 1 type (only|nth|...) + 2 what (child|of-type) + 3 argument (even|odd|\d*|\d*n([+-]\d+)?|...) + 4 xn-component of xn+y argument ([+-]?\d*n|) + 5 sign of xn-component + 6 x of xn-component + 7 sign of y-component + 8 y of y-component + */ + match[ 1 ] = match[ 1 ].toLowerCase(); + + if ( match[ 1 ].slice( 0, 3 ) === "nth" ) { + + // nth-* requires argument + if ( !match[ 3 ] ) { + Sizzle.error( match[ 0 ] ); + } + + // numeric x and y parameters for Expr.filter.CHILD + // remember that false/true cast respectively to 0/1 + match[ 4 ] = +( match[ 4 ] ? + match[ 5 ] + ( match[ 6 ] || 1 ) : + 2 * ( match[ 3 ] === "even" || match[ 3 ] === "odd" ) ); + match[ 5 ] = +( ( match[ 7 ] + match[ 8 ] ) || match[ 3 ] === "odd" ); + + // other types prohibit arguments + } else if ( match[ 3 ] ) { + Sizzle.error( match[ 0 ] ); + } + + return match; + }, + + "PSEUDO": function( match ) { + var excess, + unquoted = !match[ 6 ] && match[ 2 ]; + + if ( matchExpr[ "CHILD" ].test( match[ 0 ] ) ) { + return null; + } + + // Accept quoted arguments as-is + if ( match[ 3 ] ) { + match[ 2 ] = match[ 4 ] || match[ 5 ] || ""; + + // Strip excess characters from unquoted arguments + } else if ( unquoted && rpseudo.test( unquoted ) && + + // Get excess from tokenize (recursively) + ( excess = tokenize( unquoted, true ) ) && + + // advance to the next closing parenthesis + ( excess = unquoted.indexOf( ")", unquoted.length - excess ) - unquoted.length ) ) { + + // excess is a negative index + match[ 0 ] = match[ 0 ].slice( 0, excess ); + match[ 2 ] = unquoted.slice( 0, excess ); + } + + // Return only captures needed by the pseudo filter method (type and argument) + return match.slice( 0, 3 ); + } + }, + + filter: { + + "TAG": function( nodeNameSelector ) { + var nodeName = nodeNameSelector.replace( runescape, funescape ).toLowerCase(); + return nodeNameSelector === "*" ? + function() { + return true; + } : + function( elem ) { + return elem.nodeName && elem.nodeName.toLowerCase() === nodeName; + }; + }, + + "CLASS": function( className ) { + var pattern = classCache[ className + " " ]; + + return pattern || + ( pattern = new RegExp( "(^|" + whitespace + + ")" + className + "(" + whitespace + "|$)" ) ) && classCache( + className, function( elem ) { + return pattern.test( + typeof elem.className === "string" && elem.className || + typeof elem.getAttribute !== "undefined" && + elem.getAttribute( "class" ) || + "" + ); + } ); + }, + + "ATTR": function( name, operator, check ) { + return function( elem ) { + var result = Sizzle.attr( elem, name ); + + if ( result == null ) { + return operator === "!="; + } + if ( !operator ) { + return true; + } + + result += ""; + + /* eslint-disable max-len */ + + return operator === "=" ? result === check : + operator === "!=" ? result !== check : + operator === "^=" ? check && result.indexOf( check ) === 0 : + operator === "*=" ? check && result.indexOf( check ) > -1 : + operator === "$=" ? check && result.slice( -check.length ) === check : + operator === "~=" ? ( " " + result.replace( rwhitespace, " " ) + " " ).indexOf( check ) > -1 : + operator === "|=" ? result === check || result.slice( 0, check.length + 1 ) === check + "-" : + false; + /* eslint-enable max-len */ + + }; + }, + + "CHILD": function( type, what, _argument, first, last ) { + var simple = type.slice( 0, 3 ) !== "nth", + forward = type.slice( -4 ) !== "last", + ofType = what === "of-type"; + + return first === 1 && last === 0 ? + + // Shortcut for :nth-*(n) + function( elem ) { + return !!elem.parentNode; + } : + + function( elem, _context, xml ) { + var cache, uniqueCache, outerCache, node, nodeIndex, start, + dir = simple !== forward ? "nextSibling" : "previousSibling", + parent = elem.parentNode, + name = ofType && elem.nodeName.toLowerCase(), + useCache = !xml && !ofType, + diff = false; + + if ( parent ) { + + // :(first|last|only)-(child|of-type) + if ( simple ) { + while ( dir ) { + node = elem; + while ( ( node = node[ dir ] ) ) { + if ( ofType ? + node.nodeName.toLowerCase() === name : + node.nodeType === 1 ) { + + return false; + } + } + + // Reverse direction for :only-* (if we haven't yet done so) + start = dir = type === "only" && !start && "nextSibling"; + } + return true; + } + + start = [ forward ? parent.firstChild : parent.lastChild ]; + + // non-xml :nth-child(...) stores cache data on `parent` + if ( forward && useCache ) { + + // Seek `elem` from a previously-cached index + + // ...in a gzip-friendly way + node = parent; + outerCache = node[ expando ] || ( node[ expando ] = {} ); + + // Support: IE <9 only + // Defend against cloned attroperties (jQuery gh-1709) + uniqueCache = outerCache[ node.uniqueID ] || + ( outerCache[ node.uniqueID ] = {} ); + + cache = uniqueCache[ type ] || []; + nodeIndex = cache[ 0 ] === dirruns && cache[ 1 ]; + diff = nodeIndex && cache[ 2 ]; + node = nodeIndex && parent.childNodes[ nodeIndex ]; + + while ( ( node = ++nodeIndex && node && node[ dir ] || + + // Fallback to seeking `elem` from the start + ( diff = nodeIndex = 0 ) || start.pop() ) ) { + + // When found, cache indexes on `parent` and break + if ( node.nodeType === 1 && ++diff && node === elem ) { + uniqueCache[ type ] = [ dirruns, nodeIndex, diff ]; + break; + } + } + + } else { + + // Use previously-cached element index if available + if ( useCache ) { + + // ...in a gzip-friendly way + node = elem; + outerCache = node[ expando ] || ( node[ expando ] = {} ); + + // Support: IE <9 only + // Defend against cloned attroperties (jQuery gh-1709) + uniqueCache = outerCache[ node.uniqueID ] || + ( outerCache[ node.uniqueID ] = {} ); + + cache = uniqueCache[ type ] || []; + nodeIndex = cache[ 0 ] === dirruns && cache[ 1 ]; + diff = nodeIndex; + } + + // xml :nth-child(...) + // or :nth-last-child(...) or :nth(-last)?-of-type(...) + if ( diff === false ) { + + // Use the same loop as above to seek `elem` from the start + while ( ( node = ++nodeIndex && node && node[ dir ] || + ( diff = nodeIndex = 0 ) || start.pop() ) ) { + + if ( ( ofType ? + node.nodeName.toLowerCase() === name : + node.nodeType === 1 ) && + ++diff ) { + + // Cache the index of each encountered element + if ( useCache ) { + outerCache = node[ expando ] || + ( node[ expando ] = {} ); + + // Support: IE <9 only + // Defend against cloned attroperties (jQuery gh-1709) + uniqueCache = outerCache[ node.uniqueID ] || + ( outerCache[ node.uniqueID ] = {} ); + + uniqueCache[ type ] = [ dirruns, diff ]; + } + + if ( node === elem ) { + break; + } + } + } + } + } + + // Incorporate the offset, then check against cycle size + diff -= last; + return diff === first || ( diff % first === 0 && diff / first >= 0 ); + } + }; + }, + + "PSEUDO": function( pseudo, argument ) { + + // pseudo-class names are case-insensitive + // http://www.w3.org/TR/selectors/#pseudo-classes + // Prioritize by case sensitivity in case custom pseudos are added with uppercase letters + // Remember that setFilters inherits from pseudos + var args, + fn = Expr.pseudos[ pseudo ] || Expr.setFilters[ pseudo.toLowerCase() ] || + Sizzle.error( "unsupported pseudo: " + pseudo ); + + // The user may use createPseudo to indicate that + // arguments are needed to create the filter function + // just as Sizzle does + if ( fn[ expando ] ) { + return fn( argument ); + } + + // But maintain support for old signatures + if ( fn.length > 1 ) { + args = [ pseudo, pseudo, "", argument ]; + return Expr.setFilters.hasOwnProperty( pseudo.toLowerCase() ) ? + markFunction( function( seed, matches ) { + var idx, + matched = fn( seed, argument ), + i = matched.length; + while ( i-- ) { + idx = indexOf( seed, matched[ i ] ); + seed[ idx ] = !( matches[ idx ] = matched[ i ] ); + } + } ) : + function( elem ) { + return fn( elem, 0, args ); + }; + } + + return fn; + } + }, + + pseudos: { + + // Potentially complex pseudos + "not": markFunction( function( selector ) { + + // Trim the selector passed to compile + // to avoid treating leading and trailing + // spaces as combinators + var input = [], + results = [], + matcher = compile( selector.replace( rtrim, "$1" ) ); + + return matcher[ expando ] ? + markFunction( function( seed, matches, _context, xml ) { + var elem, + unmatched = matcher( seed, null, xml, [] ), + i = seed.length; + + // Match elements unmatched by `matcher` + while ( i-- ) { + if ( ( elem = unmatched[ i ] ) ) { + seed[ i ] = !( matches[ i ] = elem ); + } + } + } ) : + function( elem, _context, xml ) { + input[ 0 ] = elem; + matcher( input, null, xml, results ); + + // Don't keep the element (issue #299) + input[ 0 ] = null; + return !results.pop(); + }; + } ), + + "has": markFunction( function( selector ) { + return function( elem ) { + return Sizzle( selector, elem ).length > 0; + }; + } ), + + "contains": markFunction( function( text ) { + text = text.replace( runescape, funescape ); + return function( elem ) { + return ( elem.textContent || getText( elem ) ).indexOf( text ) > -1; + }; + } ), + + // "Whether an element is represented by a :lang() selector + // is based solely on the element's language value + // being equal to the identifier C, + // or beginning with the identifier C immediately followed by "-". + // The matching of C against the element's language value is performed case-insensitively. + // The identifier C does not have to be a valid language name." + // http://www.w3.org/TR/selectors/#lang-pseudo + "lang": markFunction( function( lang ) { + + // lang value must be a valid identifier + if ( !ridentifier.test( lang || "" ) ) { + Sizzle.error( "unsupported lang: " + lang ); + } + lang = lang.replace( runescape, funescape ).toLowerCase(); + return function( elem ) { + var elemLang; + do { + if ( ( elemLang = documentIsHTML ? + elem.lang : + elem.getAttribute( "xml:lang" ) || elem.getAttribute( "lang" ) ) ) { + + elemLang = elemLang.toLowerCase(); + return elemLang === lang || elemLang.indexOf( lang + "-" ) === 0; + } + } while ( ( elem = elem.parentNode ) && elem.nodeType === 1 ); + return false; + }; + } ), + + // Miscellaneous + "target": function( elem ) { + var hash = window.location && window.location.hash; + return hash && hash.slice( 1 ) === elem.id; + }, + + "root": function( elem ) { + return elem === docElem; + }, + + "focus": function( elem ) { + return elem === document.activeElement && + ( !document.hasFocus || document.hasFocus() ) && + !!( elem.type || elem.href || ~elem.tabIndex ); + }, + + // Boolean properties + "enabled": createDisabledPseudo( false ), + "disabled": createDisabledPseudo( true ), + + "checked": function( elem ) { + + // In CSS3, :checked should return both checked and selected elements + // http://www.w3.org/TR/2011/REC-css3-selectors-20110929/#checked + var nodeName = elem.nodeName.toLowerCase(); + return ( nodeName === "input" && !!elem.checked ) || + ( nodeName === "option" && !!elem.selected ); + }, + + "selected": function( elem ) { + + // Accessing this property makes selected-by-default + // options in Safari work properly + if ( elem.parentNode ) { + // eslint-disable-next-line no-unused-expressions + elem.parentNode.selectedIndex; + } + + return elem.selected === true; + }, + + // Contents + "empty": function( elem ) { + + // http://www.w3.org/TR/selectors/#empty-pseudo + // :empty is negated by element (1) or content nodes (text: 3; cdata: 4; entity ref: 5), + // but not by others (comment: 8; processing instruction: 7; etc.) + // nodeType < 6 works because attributes (2) do not appear as children + for ( elem = elem.firstChild; elem; elem = elem.nextSibling ) { + if ( elem.nodeType < 6 ) { + return false; + } + } + return true; + }, + + "parent": function( elem ) { + return !Expr.pseudos[ "empty" ]( elem ); + }, + + // Element/input types + "header": function( elem ) { + return rheader.test( elem.nodeName ); + }, + + "input": function( elem ) { + return rinputs.test( elem.nodeName ); + }, + + "button": function( elem ) { + var name = elem.nodeName.toLowerCase(); + return name === "input" && elem.type === "button" || name === "button"; + }, + + "text": function( elem ) { + var attr; + return elem.nodeName.toLowerCase() === "input" && + elem.type === "text" && + + // Support: IE<8 + // New HTML5 attribute values (e.g., "search") appear with elem.type === "text" + ( ( attr = elem.getAttribute( "type" ) ) == null || + attr.toLowerCase() === "text" ); + }, + + // Position-in-collection + "first": createPositionalPseudo( function() { + return [ 0 ]; + } ), + + "last": createPositionalPseudo( function( _matchIndexes, length ) { + return [ length - 1 ]; + } ), + + "eq": createPositionalPseudo( function( _matchIndexes, length, argument ) { + return [ argument < 0 ? argument + length : argument ]; + } ), + + "even": createPositionalPseudo( function( matchIndexes, length ) { + var i = 0; + for ( ; i < length; i += 2 ) { + matchIndexes.push( i ); + } + return matchIndexes; + } ), + + "odd": createPositionalPseudo( function( matchIndexes, length ) { + var i = 1; + for ( ; i < length; i += 2 ) { + matchIndexes.push( i ); + } + return matchIndexes; + } ), + + "lt": createPositionalPseudo( function( matchIndexes, length, argument ) { + var i = argument < 0 ? + argument + length : + argument > length ? + length : + argument; + for ( ; --i >= 0; ) { + matchIndexes.push( i ); + } + return matchIndexes; + } ), + + "gt": createPositionalPseudo( function( matchIndexes, length, argument ) { + var i = argument < 0 ? argument + length : argument; + for ( ; ++i < length; ) { + matchIndexes.push( i ); + } + return matchIndexes; + } ) + } +}; + +Expr.pseudos[ "nth" ] = Expr.pseudos[ "eq" ]; + +// Add button/input type pseudos +for ( i in { radio: true, checkbox: true, file: true, password: true, image: true } ) { + Expr.pseudos[ i ] = createInputPseudo( i ); +} +for ( i in { submit: true, reset: true } ) { + Expr.pseudos[ i ] = createButtonPseudo( i ); +} + +// Easy API for creating new setFilters +function setFilters() {} +setFilters.prototype = Expr.filters = Expr.pseudos; +Expr.setFilters = new setFilters(); + +tokenize = Sizzle.tokenize = function( selector, parseOnly ) { + var matched, match, tokens, type, + soFar, groups, preFilters, + cached = tokenCache[ selector + " " ]; + + if ( cached ) { + return parseOnly ? 0 : cached.slice( 0 ); + } + + soFar = selector; + groups = []; + preFilters = Expr.preFilter; + + while ( soFar ) { + + // Comma and first run + if ( !matched || ( match = rcomma.exec( soFar ) ) ) { + if ( match ) { + + // Don't consume trailing commas as valid + soFar = soFar.slice( match[ 0 ].length ) || soFar; + } + groups.push( ( tokens = [] ) ); + } + + matched = false; + + // Combinators + if ( ( match = rcombinators.exec( soFar ) ) ) { + matched = match.shift(); + tokens.push( { + value: matched, + + // Cast descendant combinators to space + type: match[ 0 ].replace( rtrim, " " ) + } ); + soFar = soFar.slice( matched.length ); + } + + // Filters + for ( type in Expr.filter ) { + if ( ( match = matchExpr[ type ].exec( soFar ) ) && ( !preFilters[ type ] || + ( match = preFilters[ type ]( match ) ) ) ) { + matched = match.shift(); + tokens.push( { + value: matched, + type: type, + matches: match + } ); + soFar = soFar.slice( matched.length ); + } + } + + if ( !matched ) { + break; + } + } + + // Return the length of the invalid excess + // if we're just parsing + // Otherwise, throw an error or return tokens + return parseOnly ? + soFar.length : + soFar ? + Sizzle.error( selector ) : + + // Cache the tokens + tokenCache( selector, groups ).slice( 0 ); +}; + +function toSelector( tokens ) { + var i = 0, + len = tokens.length, + selector = ""; + for ( ; i < len; i++ ) { + selector += tokens[ i ].value; + } + return selector; +} + +function addCombinator( matcher, combinator, base ) { + var dir = combinator.dir, + skip = combinator.next, + key = skip || dir, + checkNonElements = base && key === "parentNode", + doneName = done++; + + return combinator.first ? + + // Check against closest ancestor/preceding element + function( elem, context, xml ) { + while ( ( elem = elem[ dir ] ) ) { + if ( elem.nodeType === 1 || checkNonElements ) { + return matcher( elem, context, xml ); + } + } + return false; + } : + + // Check against all ancestor/preceding elements + function( elem, context, xml ) { + var oldCache, uniqueCache, outerCache, + newCache = [ dirruns, doneName ]; + + // We can't set arbitrary data on XML nodes, so they don't benefit from combinator caching + if ( xml ) { + while ( ( elem = elem[ dir ] ) ) { + if ( elem.nodeType === 1 || checkNonElements ) { + if ( matcher( elem, context, xml ) ) { + return true; + } + } + } + } else { + while ( ( elem = elem[ dir ] ) ) { + if ( elem.nodeType === 1 || checkNonElements ) { + outerCache = elem[ expando ] || ( elem[ expando ] = {} ); + + // Support: IE <9 only + // Defend against cloned attroperties (jQuery gh-1709) + uniqueCache = outerCache[ elem.uniqueID ] || + ( outerCache[ elem.uniqueID ] = {} ); + + if ( skip && skip === elem.nodeName.toLowerCase() ) { + elem = elem[ dir ] || elem; + } else if ( ( oldCache = uniqueCache[ key ] ) && + oldCache[ 0 ] === dirruns && oldCache[ 1 ] === doneName ) { + + // Assign to newCache so results back-propagate to previous elements + return ( newCache[ 2 ] = oldCache[ 2 ] ); + } else { + + // Reuse newcache so results back-propagate to previous elements + uniqueCache[ key ] = newCache; + + // A match means we're done; a fail means we have to keep checking + if ( ( newCache[ 2 ] = matcher( elem, context, xml ) ) ) { + return true; + } + } + } + } + } + return false; + }; +} + +function elementMatcher( matchers ) { + return matchers.length > 1 ? + function( elem, context, xml ) { + var i = matchers.length; + while ( i-- ) { + if ( !matchers[ i ]( elem, context, xml ) ) { + return false; + } + } + return true; + } : + matchers[ 0 ]; +} + +function multipleContexts( selector, contexts, results ) { + var i = 0, + len = contexts.length; + for ( ; i < len; i++ ) { + Sizzle( selector, contexts[ i ], results ); + } + return results; +} + +function condense( unmatched, map, filter, context, xml ) { + var elem, + newUnmatched = [], + i = 0, + len = unmatched.length, + mapped = map != null; + + for ( ; i < len; i++ ) { + if ( ( elem = unmatched[ i ] ) ) { + if ( !filter || filter( elem, context, xml ) ) { + newUnmatched.push( elem ); + if ( mapped ) { + map.push( i ); + } + } + } + } + + return newUnmatched; +} + +function setMatcher( preFilter, selector, matcher, postFilter, postFinder, postSelector ) { + if ( postFilter && !postFilter[ expando ] ) { + postFilter = setMatcher( postFilter ); + } + if ( postFinder && !postFinder[ expando ] ) { + postFinder = setMatcher( postFinder, postSelector ); + } + return markFunction( function( seed, results, context, xml ) { + var temp, i, elem, + preMap = [], + postMap = [], + preexisting = results.length, + + // Get initial elements from seed or context + elems = seed || multipleContexts( + selector || "*", + context.nodeType ? [ context ] : context, + [] + ), + + // Prefilter to get matcher input, preserving a map for seed-results synchronization + matcherIn = preFilter && ( seed || !selector ) ? + condense( elems, preMap, preFilter, context, xml ) : + elems, + + matcherOut = matcher ? + + // If we have a postFinder, or filtered seed, or non-seed postFilter or preexisting results, + postFinder || ( seed ? preFilter : preexisting || postFilter ) ? + + // ...intermediate processing is necessary + [] : + + // ...otherwise use results directly + results : + matcherIn; + + // Find primary matches + if ( matcher ) { + matcher( matcherIn, matcherOut, context, xml ); + } + + // Apply postFilter + if ( postFilter ) { + temp = condense( matcherOut, postMap ); + postFilter( temp, [], context, xml ); + + // Un-match failing elements by moving them back to matcherIn + i = temp.length; + while ( i-- ) { + if ( ( elem = temp[ i ] ) ) { + matcherOut[ postMap[ i ] ] = !( matcherIn[ postMap[ i ] ] = elem ); + } + } + } + + if ( seed ) { + if ( postFinder || preFilter ) { + if ( postFinder ) { + + // Get the final matcherOut by condensing this intermediate into postFinder contexts + temp = []; + i = matcherOut.length; + while ( i-- ) { + if ( ( elem = matcherOut[ i ] ) ) { + + // Restore matcherIn since elem is not yet a final match + temp.push( ( matcherIn[ i ] = elem ) ); + } + } + postFinder( null, ( matcherOut = [] ), temp, xml ); + } + + // Move matched elements from seed to results to keep them synchronized + i = matcherOut.length; + while ( i-- ) { + if ( ( elem = matcherOut[ i ] ) && + ( temp = postFinder ? indexOf( seed, elem ) : preMap[ i ] ) > -1 ) { + + seed[ temp ] = !( results[ temp ] = elem ); + } + } + } + + // Add elements to results, through postFinder if defined + } else { + matcherOut = condense( + matcherOut === results ? + matcherOut.splice( preexisting, matcherOut.length ) : + matcherOut + ); + if ( postFinder ) { + postFinder( null, results, matcherOut, xml ); + } else { + push.apply( results, matcherOut ); + } + } + } ); +} + +function matcherFromTokens( tokens ) { + var checkContext, matcher, j, + len = tokens.length, + leadingRelative = Expr.relative[ tokens[ 0 ].type ], + implicitRelative = leadingRelative || Expr.relative[ " " ], + i = leadingRelative ? 1 : 0, + + // The foundational matcher ensures that elements are reachable from top-level context(s) + matchContext = addCombinator( function( elem ) { + return elem === checkContext; + }, implicitRelative, true ), + matchAnyContext = addCombinator( function( elem ) { + return indexOf( checkContext, elem ) > -1; + }, implicitRelative, true ), + matchers = [ function( elem, context, xml ) { + var ret = ( !leadingRelative && ( xml || context !== outermostContext ) ) || ( + ( checkContext = context ).nodeType ? + matchContext( elem, context, xml ) : + matchAnyContext( elem, context, xml ) ); + + // Avoid hanging onto element (issue #299) + checkContext = null; + return ret; + } ]; + + for ( ; i < len; i++ ) { + if ( ( matcher = Expr.relative[ tokens[ i ].type ] ) ) { + matchers = [ addCombinator( elementMatcher( matchers ), matcher ) ]; + } else { + matcher = Expr.filter[ tokens[ i ].type ].apply( null, tokens[ i ].matches ); + + // Return special upon seeing a positional matcher + if ( matcher[ expando ] ) { + + // Find the next relative operator (if any) for proper handling + j = ++i; + for ( ; j < len; j++ ) { + if ( Expr.relative[ tokens[ j ].type ] ) { + break; + } + } + return setMatcher( + i > 1 && elementMatcher( matchers ), + i > 1 && toSelector( + + // If the preceding token was a descendant combinator, insert an implicit any-element `*` + tokens + .slice( 0, i - 1 ) + .concat( { value: tokens[ i - 2 ].type === " " ? "*" : "" } ) + ).replace( rtrim, "$1" ), + matcher, + i < j && matcherFromTokens( tokens.slice( i, j ) ), + j < len && matcherFromTokens( ( tokens = tokens.slice( j ) ) ), + j < len && toSelector( tokens ) + ); + } + matchers.push( matcher ); + } + } + + return elementMatcher( matchers ); +} + +function matcherFromGroupMatchers( elementMatchers, setMatchers ) { + var bySet = setMatchers.length > 0, + byElement = elementMatchers.length > 0, + superMatcher = function( seed, context, xml, results, outermost ) { + var elem, j, matcher, + matchedCount = 0, + i = "0", + unmatched = seed && [], + setMatched = [], + contextBackup = outermostContext, + + // We must always have either seed elements or outermost context + elems = seed || byElement && Expr.find[ "TAG" ]( "*", outermost ), + + // Use integer dirruns iff this is the outermost matcher + dirrunsUnique = ( dirruns += contextBackup == null ? 1 : Math.random() || 0.1 ), + len = elems.length; + + if ( outermost ) { + + // Support: IE 11+, Edge 17 - 18+ + // IE/Edge sometimes throw a "Permission denied" error when strict-comparing + // two documents; shallow comparisons work. + // eslint-disable-next-line eqeqeq + outermostContext = context == document || context || outermost; + } + + // Add elements passing elementMatchers directly to results + // Support: IE<9, Safari + // Tolerate NodeList properties (IE: "length"; Safari: ) matching elements by id + for ( ; i !== len && ( elem = elems[ i ] ) != null; i++ ) { + if ( byElement && elem ) { + j = 0; + + // Support: IE 11+, Edge 17 - 18+ + // IE/Edge sometimes throw a "Permission denied" error when strict-comparing + // two documents; shallow comparisons work. + // eslint-disable-next-line eqeqeq + if ( !context && elem.ownerDocument != document ) { + setDocument( elem ); + xml = !documentIsHTML; + } + while ( ( matcher = elementMatchers[ j++ ] ) ) { + if ( matcher( elem, context || document, xml ) ) { + results.push( elem ); + break; + } + } + if ( outermost ) { + dirruns = dirrunsUnique; + } + } + + // Track unmatched elements for set filters + if ( bySet ) { + + // They will have gone through all possible matchers + if ( ( elem = !matcher && elem ) ) { + matchedCount--; + } + + // Lengthen the array for every element, matched or not + if ( seed ) { + unmatched.push( elem ); + } + } + } + + // `i` is now the count of elements visited above, and adding it to `matchedCount` + // makes the latter nonnegative. + matchedCount += i; + + // Apply set filters to unmatched elements + // NOTE: This can be skipped if there are no unmatched elements (i.e., `matchedCount` + // equals `i`), unless we didn't visit _any_ elements in the above loop because we have + // no element matchers and no seed. + // Incrementing an initially-string "0" `i` allows `i` to remain a string only in that + // case, which will result in a "00" `matchedCount` that differs from `i` but is also + // numerically zero. + if ( bySet && i !== matchedCount ) { + j = 0; + while ( ( matcher = setMatchers[ j++ ] ) ) { + matcher( unmatched, setMatched, context, xml ); + } + + if ( seed ) { + + // Reintegrate element matches to eliminate the need for sorting + if ( matchedCount > 0 ) { + while ( i-- ) { + if ( !( unmatched[ i ] || setMatched[ i ] ) ) { + setMatched[ i ] = pop.call( results ); + } + } + } + + // Discard index placeholder values to get only actual matches + setMatched = condense( setMatched ); + } + + // Add matches to results + push.apply( results, setMatched ); + + // Seedless set matches succeeding multiple successful matchers stipulate sorting + if ( outermost && !seed && setMatched.length > 0 && + ( matchedCount + setMatchers.length ) > 1 ) { + + Sizzle.uniqueSort( results ); + } + } + + // Override manipulation of globals by nested matchers + if ( outermost ) { + dirruns = dirrunsUnique; + outermostContext = contextBackup; + } + + return unmatched; + }; + + return bySet ? + markFunction( superMatcher ) : + superMatcher; +} + +compile = Sizzle.compile = function( selector, match /* Internal Use Only */ ) { + var i, + setMatchers = [], + elementMatchers = [], + cached = compilerCache[ selector + " " ]; + + if ( !cached ) { + + // Generate a function of recursive functions that can be used to check each element + if ( !match ) { + match = tokenize( selector ); + } + i = match.length; + while ( i-- ) { + cached = matcherFromTokens( match[ i ] ); + if ( cached[ expando ] ) { + setMatchers.push( cached ); + } else { + elementMatchers.push( cached ); + } + } + + // Cache the compiled function + cached = compilerCache( + selector, + matcherFromGroupMatchers( elementMatchers, setMatchers ) + ); + + // Save selector and tokenization + cached.selector = selector; + } + return cached; +}; + +/** + * A low-level selection function that works with Sizzle's compiled + * selector functions + * @param {String|Function} selector A selector or a pre-compiled + * selector function built with Sizzle.compile + * @param {Element} context + * @param {Array} [results] + * @param {Array} [seed] A set of elements to match against + */ +select = Sizzle.select = function( selector, context, results, seed ) { + var i, tokens, token, type, find, + compiled = typeof selector === "function" && selector, + match = !seed && tokenize( ( selector = compiled.selector || selector ) ); + + results = results || []; + + // Try to minimize operations if there is only one selector in the list and no seed + // (the latter of which guarantees us context) + if ( match.length === 1 ) { + + // Reduce context if the leading compound selector is an ID + tokens = match[ 0 ] = match[ 0 ].slice( 0 ); + if ( tokens.length > 2 && ( token = tokens[ 0 ] ).type === "ID" && + context.nodeType === 9 && documentIsHTML && Expr.relative[ tokens[ 1 ].type ] ) { + + context = ( Expr.find[ "ID" ]( token.matches[ 0 ] + .replace( runescape, funescape ), context ) || [] )[ 0 ]; + if ( !context ) { + return results; + + // Precompiled matchers will still verify ancestry, so step up a level + } else if ( compiled ) { + context = context.parentNode; + } + + selector = selector.slice( tokens.shift().value.length ); + } + + // Fetch a seed set for right-to-left matching + i = matchExpr[ "needsContext" ].test( selector ) ? 0 : tokens.length; + while ( i-- ) { + token = tokens[ i ]; + + // Abort if we hit a combinator + if ( Expr.relative[ ( type = token.type ) ] ) { + break; + } + if ( ( find = Expr.find[ type ] ) ) { + + // Search, expanding context for leading sibling combinators + if ( ( seed = find( + token.matches[ 0 ].replace( runescape, funescape ), + rsibling.test( tokens[ 0 ].type ) && testContext( context.parentNode ) || + context + ) ) ) { + + // If seed is empty or no tokens remain, we can return early + tokens.splice( i, 1 ); + selector = seed.length && toSelector( tokens ); + if ( !selector ) { + push.apply( results, seed ); + return results; + } + + break; + } + } + } + } + + // Compile and execute a filtering function if one is not provided + // Provide `match` to avoid retokenization if we modified the selector above + ( compiled || compile( selector, match ) )( + seed, + context, + !documentIsHTML, + results, + !context || rsibling.test( selector ) && testContext( context.parentNode ) || context + ); + return results; +}; + +// One-time assignments + +// Sort stability +support.sortStable = expando.split( "" ).sort( sortOrder ).join( "" ) === expando; + +// Support: Chrome 14-35+ +// Always assume duplicates if they aren't passed to the comparison function +support.detectDuplicates = !!hasDuplicate; + +// Initialize against the default document +setDocument(); + +// Support: Webkit<537.32 - Safari 6.0.3/Chrome 25 (fixed in Chrome 27) +// Detached nodes confoundingly follow *each other* +support.sortDetached = assert( function( el ) { + + // Should return 1, but returns 4 (following) + return el.compareDocumentPosition( document.createElement( "fieldset" ) ) & 1; +} ); + +// Support: IE<8 +// Prevent attribute/property "interpolation" +// https://msdn.microsoft.com/en-us/library/ms536429%28VS.85%29.aspx +if ( !assert( function( el ) { + el.innerHTML = ""; + return el.firstChild.getAttribute( "href" ) === "#"; +} ) ) { + addHandle( "type|href|height|width", function( elem, name, isXML ) { + if ( !isXML ) { + return elem.getAttribute( name, name.toLowerCase() === "type" ? 1 : 2 ); + } + } ); +} + +// Support: IE<9 +// Use defaultValue in place of getAttribute("value") +if ( !support.attributes || !assert( function( el ) { + el.innerHTML = ""; + el.firstChild.setAttribute( "value", "" ); + return el.firstChild.getAttribute( "value" ) === ""; +} ) ) { + addHandle( "value", function( elem, _name, isXML ) { + if ( !isXML && elem.nodeName.toLowerCase() === "input" ) { + return elem.defaultValue; + } + } ); +} + +// Support: IE<9 +// Use getAttributeNode to fetch booleans when getAttribute lies +if ( !assert( function( el ) { + return el.getAttribute( "disabled" ) == null; +} ) ) { + addHandle( booleans, function( elem, name, isXML ) { + var val; + if ( !isXML ) { + return elem[ name ] === true ? name.toLowerCase() : + ( val = elem.getAttributeNode( name ) ) && val.specified ? + val.value : + null; + } + } ); +} + +return Sizzle; + +} )( window ); + + + +jQuery.find = Sizzle; +jQuery.expr = Sizzle.selectors; + +// Deprecated +jQuery.expr[ ":" ] = jQuery.expr.pseudos; +jQuery.uniqueSort = jQuery.unique = Sizzle.uniqueSort; +jQuery.text = Sizzle.getText; +jQuery.isXMLDoc = Sizzle.isXML; +jQuery.contains = Sizzle.contains; +jQuery.escapeSelector = Sizzle.escape; + + + + +var dir = function( elem, dir, until ) { + var matched = [], + truncate = until !== undefined; + + while ( ( elem = elem[ dir ] ) && elem.nodeType !== 9 ) { + if ( elem.nodeType === 1 ) { + if ( truncate && jQuery( elem ).is( until ) ) { + break; + } + matched.push( elem ); + } + } + return matched; +}; + + +var siblings = function( n, elem ) { + var matched = []; + + for ( ; n; n = n.nextSibling ) { + if ( n.nodeType === 1 && n !== elem ) { + matched.push( n ); + } + } + + return matched; +}; + + +var rneedsContext = jQuery.expr.match.needsContext; + + + +function nodeName( elem, name ) { + + return elem.nodeName && elem.nodeName.toLowerCase() === name.toLowerCase(); + +} +var rsingleTag = ( /^<([a-z][^\/\0>:\x20\t\r\n\f]*)[\x20\t\r\n\f]*\/?>(?:<\/\1>|)$/i ); + + + +// Implement the identical functionality for filter and not +function winnow( elements, qualifier, not ) { + if ( isFunction( qualifier ) ) { + return jQuery.grep( elements, function( elem, i ) { + return !!qualifier.call( elem, i, elem ) !== not; + } ); + } + + // Single element + if ( qualifier.nodeType ) { + return jQuery.grep( elements, function( elem ) { + return ( elem === qualifier ) !== not; + } ); + } + + // Arraylike of elements (jQuery, arguments, Array) + if ( typeof qualifier !== "string" ) { + return jQuery.grep( elements, function( elem ) { + return ( indexOf.call( qualifier, elem ) > -1 ) !== not; + } ); + } + + // Filtered directly for both simple and complex selectors + return jQuery.filter( qualifier, elements, not ); +} + +jQuery.filter = function( expr, elems, not ) { + var elem = elems[ 0 ]; + + if ( not ) { + expr = ":not(" + expr + ")"; + } + + if ( elems.length === 1 && elem.nodeType === 1 ) { + return jQuery.find.matchesSelector( elem, expr ) ? [ elem ] : []; + } + + return jQuery.find.matches( expr, jQuery.grep( elems, function( elem ) { + return elem.nodeType === 1; + } ) ); +}; + +jQuery.fn.extend( { + find: function( selector ) { + var i, ret, + len = this.length, + self = this; + + if ( typeof selector !== "string" ) { + return this.pushStack( jQuery( selector ).filter( function() { + for ( i = 0; i < len; i++ ) { + if ( jQuery.contains( self[ i ], this ) ) { + return true; + } + } + } ) ); + } + + ret = this.pushStack( [] ); + + for ( i = 0; i < len; i++ ) { + jQuery.find( selector, self[ i ], ret ); + } + + return len > 1 ? jQuery.uniqueSort( ret ) : ret; + }, + filter: function( selector ) { + return this.pushStack( winnow( this, selector || [], false ) ); + }, + not: function( selector ) { + return this.pushStack( winnow( this, selector || [], true ) ); + }, + is: function( selector ) { + return !!winnow( + this, + + // If this is a positional/relative selector, check membership in the returned set + // so $("p:first").is("p:last") won't return true for a doc with two "p". + typeof selector === "string" && rneedsContext.test( selector ) ? + jQuery( selector ) : + selector || [], + false + ).length; + } +} ); + + +// Initialize a jQuery object + + +// A central reference to the root jQuery(document) +var rootjQuery, + + // A simple way to check for HTML strings + // Prioritize #id over to avoid XSS via location.hash (#9521) + // Strict HTML recognition (#11290: must start with <) + // Shortcut simple #id case for speed + rquickExpr = /^(?:\s*(<[\w\W]+>)[^>]*|#([\w-]+))$/, + + init = jQuery.fn.init = function( selector, context, root ) { + var match, elem; + + // HANDLE: $(""), $(null), $(undefined), $(false) + if ( !selector ) { + return this; + } + + // Method init() accepts an alternate rootjQuery + // so migrate can support jQuery.sub (gh-2101) + root = root || rootjQuery; + + // Handle HTML strings + if ( typeof selector === "string" ) { + if ( selector[ 0 ] === "<" && + selector[ selector.length - 1 ] === ">" && + selector.length >= 3 ) { + + // Assume that strings that start and end with <> are HTML and skip the regex check + match = [ null, selector, null ]; + + } else { + match = rquickExpr.exec( selector ); + } + + // Match html or make sure no context is specified for #id + if ( match && ( match[ 1 ] || !context ) ) { + + // HANDLE: $(html) -> $(array) + if ( match[ 1 ] ) { + context = context instanceof jQuery ? context[ 0 ] : context; + + // Option to run scripts is true for back-compat + // Intentionally let the error be thrown if parseHTML is not present + jQuery.merge( this, jQuery.parseHTML( + match[ 1 ], + context && context.nodeType ? context.ownerDocument || context : document, + true + ) ); + + // HANDLE: $(html, props) + if ( rsingleTag.test( match[ 1 ] ) && jQuery.isPlainObject( context ) ) { + for ( match in context ) { + + // Properties of context are called as methods if possible + if ( isFunction( this[ match ] ) ) { + this[ match ]( context[ match ] ); + + // ...and otherwise set as attributes + } else { + this.attr( match, context[ match ] ); + } + } + } + + return this; + + // HANDLE: $(#id) + } else { + elem = document.getElementById( match[ 2 ] ); + + if ( elem ) { + + // Inject the element directly into the jQuery object + this[ 0 ] = elem; + this.length = 1; + } + return this; + } + + // HANDLE: $(expr, $(...)) + } else if ( !context || context.jquery ) { + return ( context || root ).find( selector ); + + // HANDLE: $(expr, context) + // (which is just equivalent to: $(context).find(expr) + } else { + return this.constructor( context ).find( selector ); + } + + // HANDLE: $(DOMElement) + } else if ( selector.nodeType ) { + this[ 0 ] = selector; + this.length = 1; + return this; + + // HANDLE: $(function) + // Shortcut for document ready + } else if ( isFunction( selector ) ) { + return root.ready !== undefined ? + root.ready( selector ) : + + // Execute immediately if ready is not present + selector( jQuery ); + } + + return jQuery.makeArray( selector, this ); + }; + +// Give the init function the jQuery prototype for later instantiation +init.prototype = jQuery.fn; + +// Initialize central reference +rootjQuery = jQuery( document ); + + +var rparentsprev = /^(?:parents|prev(?:Until|All))/, + + // Methods guaranteed to produce a unique set when starting from a unique set + guaranteedUnique = { + children: true, + contents: true, + next: true, + prev: true + }; + +jQuery.fn.extend( { + has: function( target ) { + var targets = jQuery( target, this ), + l = targets.length; + + return this.filter( function() { + var i = 0; + for ( ; i < l; i++ ) { + if ( jQuery.contains( this, targets[ i ] ) ) { + return true; + } + } + } ); + }, + + closest: function( selectors, context ) { + var cur, + i = 0, + l = this.length, + matched = [], + targets = typeof selectors !== "string" && jQuery( selectors ); + + // Positional selectors never match, since there's no _selection_ context + if ( !rneedsContext.test( selectors ) ) { + for ( ; i < l; i++ ) { + for ( cur = this[ i ]; cur && cur !== context; cur = cur.parentNode ) { + + // Always skip document fragments + if ( cur.nodeType < 11 && ( targets ? + targets.index( cur ) > -1 : + + // Don't pass non-elements to Sizzle + cur.nodeType === 1 && + jQuery.find.matchesSelector( cur, selectors ) ) ) { + + matched.push( cur ); + break; + } + } + } + } + + return this.pushStack( matched.length > 1 ? jQuery.uniqueSort( matched ) : matched ); + }, + + // Determine the position of an element within the set + index: function( elem ) { + + // No argument, return index in parent + if ( !elem ) { + return ( this[ 0 ] && this[ 0 ].parentNode ) ? this.first().prevAll().length : -1; + } + + // Index in selector + if ( typeof elem === "string" ) { + return indexOf.call( jQuery( elem ), this[ 0 ] ); + } + + // Locate the position of the desired element + return indexOf.call( this, + + // If it receives a jQuery object, the first element is used + elem.jquery ? elem[ 0 ] : elem + ); + }, + + add: function( selector, context ) { + return this.pushStack( + jQuery.uniqueSort( + jQuery.merge( this.get(), jQuery( selector, context ) ) + ) + ); + }, + + addBack: function( selector ) { + return this.add( selector == null ? + this.prevObject : this.prevObject.filter( selector ) + ); + } +} ); + +function sibling( cur, dir ) { + while ( ( cur = cur[ dir ] ) && cur.nodeType !== 1 ) {} + return cur; +} + +jQuery.each( { + parent: function( elem ) { + var parent = elem.parentNode; + return parent && parent.nodeType !== 11 ? parent : null; + }, + parents: function( elem ) { + return dir( elem, "parentNode" ); + }, + parentsUntil: function( elem, _i, until ) { + return dir( elem, "parentNode", until ); + }, + next: function( elem ) { + return sibling( elem, "nextSibling" ); + }, + prev: function( elem ) { + return sibling( elem, "previousSibling" ); + }, + nextAll: function( elem ) { + return dir( elem, "nextSibling" ); + }, + prevAll: function( elem ) { + return dir( elem, "previousSibling" ); + }, + nextUntil: function( elem, _i, until ) { + return dir( elem, "nextSibling", until ); + }, + prevUntil: function( elem, _i, until ) { + return dir( elem, "previousSibling", until ); + }, + siblings: function( elem ) { + return siblings( ( elem.parentNode || {} ).firstChild, elem ); + }, + children: function( elem ) { + return siblings( elem.firstChild ); + }, + contents: function( elem ) { + if ( elem.contentDocument != null && + + // Support: IE 11+ + // elements with no `data` attribute has an object + // `contentDocument` with a `null` prototype. + getProto( elem.contentDocument ) ) { + + return elem.contentDocument; + } + + // Support: IE 9 - 11 only, iOS 7 only, Android Browser <=4.3 only + // Treat the template element as a regular one in browsers that + // don't support it. + if ( nodeName( elem, "template" ) ) { + elem = elem.content || elem; + } + + return jQuery.merge( [], elem.childNodes ); + } +}, function( name, fn ) { + jQuery.fn[ name ] = function( until, selector ) { + var matched = jQuery.map( this, fn, until ); + + if ( name.slice( -5 ) !== "Until" ) { + selector = until; + } + + if ( selector && typeof selector === "string" ) { + matched = jQuery.filter( selector, matched ); + } + + if ( this.length > 1 ) { + + // Remove duplicates + if ( !guaranteedUnique[ name ] ) { + jQuery.uniqueSort( matched ); + } + + // Reverse order for parents* and prev-derivatives + if ( rparentsprev.test( name ) ) { + matched.reverse(); + } + } + + return this.pushStack( matched ); + }; +} ); +var rnothtmlwhite = ( /[^\x20\t\r\n\f]+/g ); + + + +// Convert String-formatted options into Object-formatted ones +function createOptions( options ) { + var object = {}; + jQuery.each( options.match( rnothtmlwhite ) || [], function( _, flag ) { + object[ flag ] = true; + } ); + return object; +} + +/* + * Create a callback list using the following parameters: + * + * options: an optional list of space-separated options that will change how + * the callback list behaves or a more traditional option object + * + * By default a callback list will act like an event callback list and can be + * "fired" multiple times. + * + * Possible options: + * + * once: will ensure the callback list can only be fired once (like a Deferred) + * + * memory: will keep track of previous values and will call any callback added + * after the list has been fired right away with the latest "memorized" + * values (like a Deferred) + * + * unique: will ensure a callback can only be added once (no duplicate in the list) + * + * stopOnFalse: interrupt callings when a callback returns false + * + */ +jQuery.Callbacks = function( options ) { + + // Convert options from String-formatted to Object-formatted if needed + // (we check in cache first) + options = typeof options === "string" ? + createOptions( options ) : + jQuery.extend( {}, options ); + + var // Flag to know if list is currently firing + firing, + + // Last fire value for non-forgettable lists + memory, + + // Flag to know if list was already fired + fired, + + // Flag to prevent firing + locked, + + // Actual callback list + list = [], + + // Queue of execution data for repeatable lists + queue = [], + + // Index of currently firing callback (modified by add/remove as needed) + firingIndex = -1, + + // Fire callbacks + fire = function() { + + // Enforce single-firing + locked = locked || options.once; + + // Execute callbacks for all pending executions, + // respecting firingIndex overrides and runtime changes + fired = firing = true; + for ( ; queue.length; firingIndex = -1 ) { + memory = queue.shift(); + while ( ++firingIndex < list.length ) { + + // Run callback and check for early termination + if ( list[ firingIndex ].apply( memory[ 0 ], memory[ 1 ] ) === false && + options.stopOnFalse ) { + + // Jump to end and forget the data so .add doesn't re-fire + firingIndex = list.length; + memory = false; + } + } + } + + // Forget the data if we're done with it + if ( !options.memory ) { + memory = false; + } + + firing = false; + + // Clean up if we're done firing for good + if ( locked ) { + + // Keep an empty list if we have data for future add calls + if ( memory ) { + list = []; + + // Otherwise, this object is spent + } else { + list = ""; + } + } + }, + + // Actual Callbacks object + self = { + + // Add a callback or a collection of callbacks to the list + add: function() { + if ( list ) { + + // If we have memory from a past run, we should fire after adding + if ( memory && !firing ) { + firingIndex = list.length - 1; + queue.push( memory ); + } + + ( function add( args ) { + jQuery.each( args, function( _, arg ) { + if ( isFunction( arg ) ) { + if ( !options.unique || !self.has( arg ) ) { + list.push( arg ); + } + } else if ( arg && arg.length && toType( arg ) !== "string" ) { + + // Inspect recursively + add( arg ); + } + } ); + } )( arguments ); + + if ( memory && !firing ) { + fire(); + } + } + return this; + }, + + // Remove a callback from the list + remove: function() { + jQuery.each( arguments, function( _, arg ) { + var index; + while ( ( index = jQuery.inArray( arg, list, index ) ) > -1 ) { + list.splice( index, 1 ); + + // Handle firing indexes + if ( index <= firingIndex ) { + firingIndex--; + } + } + } ); + return this; + }, + + // Check if a given callback is in the list. + // If no argument is given, return whether or not list has callbacks attached. + has: function( fn ) { + return fn ? + jQuery.inArray( fn, list ) > -1 : + list.length > 0; + }, + + // Remove all callbacks from the list + empty: function() { + if ( list ) { + list = []; + } + return this; + }, + + // Disable .fire and .add + // Abort any current/pending executions + // Clear all callbacks and values + disable: function() { + locked = queue = []; + list = memory = ""; + return this; + }, + disabled: function() { + return !list; + }, + + // Disable .fire + // Also disable .add unless we have memory (since it would have no effect) + // Abort any pending executions + lock: function() { + locked = queue = []; + if ( !memory && !firing ) { + list = memory = ""; + } + return this; + }, + locked: function() { + return !!locked; + }, + + // Call all callbacks with the given context and arguments + fireWith: function( context, args ) { + if ( !locked ) { + args = args || []; + args = [ context, args.slice ? args.slice() : args ]; + queue.push( args ); + if ( !firing ) { + fire(); + } + } + return this; + }, + + // Call all the callbacks with the given arguments + fire: function() { + self.fireWith( this, arguments ); + return this; + }, + + // To know if the callbacks have already been called at least once + fired: function() { + return !!fired; + } + }; + + return self; +}; + + +function Identity( v ) { + return v; +} +function Thrower( ex ) { + throw ex; +} + +function adoptValue( value, resolve, reject, noValue ) { + var method; + + try { + + // Check for promise aspect first to privilege synchronous behavior + if ( value && isFunction( ( method = value.promise ) ) ) { + method.call( value ).done( resolve ).fail( reject ); + + // Other thenables + } else if ( value && isFunction( ( method = value.then ) ) ) { + method.call( value, resolve, reject ); + + // Other non-thenables + } else { + + // Control `resolve` arguments by letting Array#slice cast boolean `noValue` to integer: + // * false: [ value ].slice( 0 ) => resolve( value ) + // * true: [ value ].slice( 1 ) => resolve() + resolve.apply( undefined, [ value ].slice( noValue ) ); + } + + // For Promises/A+, convert exceptions into rejections + // Since jQuery.when doesn't unwrap thenables, we can skip the extra checks appearing in + // Deferred#then to conditionally suppress rejection. + } catch ( value ) { + + // Support: Android 4.0 only + // Strict mode functions invoked without .call/.apply get global-object context + reject.apply( undefined, [ value ] ); + } +} + +jQuery.extend( { + + Deferred: function( func ) { + var tuples = [ + + // action, add listener, callbacks, + // ... .then handlers, argument index, [final state] + [ "notify", "progress", jQuery.Callbacks( "memory" ), + jQuery.Callbacks( "memory" ), 2 ], + [ "resolve", "done", jQuery.Callbacks( "once memory" ), + jQuery.Callbacks( "once memory" ), 0, "resolved" ], + [ "reject", "fail", jQuery.Callbacks( "once memory" ), + jQuery.Callbacks( "once memory" ), 1, "rejected" ] + ], + state = "pending", + promise = { + state: function() { + return state; + }, + always: function() { + deferred.done( arguments ).fail( arguments ); + return this; + }, + "catch": function( fn ) { + return promise.then( null, fn ); + }, + + // Keep pipe for back-compat + pipe: function( /* fnDone, fnFail, fnProgress */ ) { + var fns = arguments; + + return jQuery.Deferred( function( newDefer ) { + jQuery.each( tuples, function( _i, tuple ) { + + // Map tuples (progress, done, fail) to arguments (done, fail, progress) + var fn = isFunction( fns[ tuple[ 4 ] ] ) && fns[ tuple[ 4 ] ]; + + // deferred.progress(function() { bind to newDefer or newDefer.notify }) + // deferred.done(function() { bind to newDefer or newDefer.resolve }) + // deferred.fail(function() { bind to newDefer or newDefer.reject }) + deferred[ tuple[ 1 ] ]( function() { + var returned = fn && fn.apply( this, arguments ); + if ( returned && isFunction( returned.promise ) ) { + returned.promise() + .progress( newDefer.notify ) + .done( newDefer.resolve ) + .fail( newDefer.reject ); + } else { + newDefer[ tuple[ 0 ] + "With" ]( + this, + fn ? [ returned ] : arguments + ); + } + } ); + } ); + fns = null; + } ).promise(); + }, + then: function( onFulfilled, onRejected, onProgress ) { + var maxDepth = 0; + function resolve( depth, deferred, handler, special ) { + return function() { + var that = this, + args = arguments, + mightThrow = function() { + var returned, then; + + // Support: Promises/A+ section 2.3.3.3.3 + // https://promisesaplus.com/#point-59 + // Ignore double-resolution attempts + if ( depth < maxDepth ) { + return; + } + + returned = handler.apply( that, args ); + + // Support: Promises/A+ section 2.3.1 + // https://promisesaplus.com/#point-48 + if ( returned === deferred.promise() ) { + throw new TypeError( "Thenable self-resolution" ); + } + + // Support: Promises/A+ sections 2.3.3.1, 3.5 + // https://promisesaplus.com/#point-54 + // https://promisesaplus.com/#point-75 + // Retrieve `then` only once + then = returned && + + // Support: Promises/A+ section 2.3.4 + // https://promisesaplus.com/#point-64 + // Only check objects and functions for thenability + ( typeof returned === "object" || + typeof returned === "function" ) && + returned.then; + + // Handle a returned thenable + if ( isFunction( then ) ) { + + // Special processors (notify) just wait for resolution + if ( special ) { + then.call( + returned, + resolve( maxDepth, deferred, Identity, special ), + resolve( maxDepth, deferred, Thrower, special ) + ); + + // Normal processors (resolve) also hook into progress + } else { + + // ...and disregard older resolution values + maxDepth++; + + then.call( + returned, + resolve( maxDepth, deferred, Identity, special ), + resolve( maxDepth, deferred, Thrower, special ), + resolve( maxDepth, deferred, Identity, + deferred.notifyWith ) + ); + } + + // Handle all other returned values + } else { + + // Only substitute handlers pass on context + // and multiple values (non-spec behavior) + if ( handler !== Identity ) { + that = undefined; + args = [ returned ]; + } + + // Process the value(s) + // Default process is resolve + ( special || deferred.resolveWith )( that, args ); + } + }, + + // Only normal processors (resolve) catch and reject exceptions + process = special ? + mightThrow : + function() { + try { + mightThrow(); + } catch ( e ) { + + if ( jQuery.Deferred.exceptionHook ) { + jQuery.Deferred.exceptionHook( e, + process.stackTrace ); + } + + // Support: Promises/A+ section 2.3.3.3.4.1 + // https://promisesaplus.com/#point-61 + // Ignore post-resolution exceptions + if ( depth + 1 >= maxDepth ) { + + // Only substitute handlers pass on context + // and multiple values (non-spec behavior) + if ( handler !== Thrower ) { + that = undefined; + args = [ e ]; + } + + deferred.rejectWith( that, args ); + } + } + }; + + // Support: Promises/A+ section 2.3.3.3.1 + // https://promisesaplus.com/#point-57 + // Re-resolve promises immediately to dodge false rejection from + // subsequent errors + if ( depth ) { + process(); + } else { + + // Call an optional hook to record the stack, in case of exception + // since it's otherwise lost when execution goes async + if ( jQuery.Deferred.getStackHook ) { + process.stackTrace = jQuery.Deferred.getStackHook(); + } + window.setTimeout( process ); + } + }; + } + + return jQuery.Deferred( function( newDefer ) { + + // progress_handlers.add( ... ) + tuples[ 0 ][ 3 ].add( + resolve( + 0, + newDefer, + isFunction( onProgress ) ? + onProgress : + Identity, + newDefer.notifyWith + ) + ); + + // fulfilled_handlers.add( ... ) + tuples[ 1 ][ 3 ].add( + resolve( + 0, + newDefer, + isFunction( onFulfilled ) ? + onFulfilled : + Identity + ) + ); + + // rejected_handlers.add( ... ) + tuples[ 2 ][ 3 ].add( + resolve( + 0, + newDefer, + isFunction( onRejected ) ? + onRejected : + Thrower + ) + ); + } ).promise(); + }, + + // Get a promise for this deferred + // If obj is provided, the promise aspect is added to the object + promise: function( obj ) { + return obj != null ? jQuery.extend( obj, promise ) : promise; + } + }, + deferred = {}; + + // Add list-specific methods + jQuery.each( tuples, function( i, tuple ) { + var list = tuple[ 2 ], + stateString = tuple[ 5 ]; + + // promise.progress = list.add + // promise.done = list.add + // promise.fail = list.add + promise[ tuple[ 1 ] ] = list.add; + + // Handle state + if ( stateString ) { + list.add( + function() { + + // state = "resolved" (i.e., fulfilled) + // state = "rejected" + state = stateString; + }, + + // rejected_callbacks.disable + // fulfilled_callbacks.disable + tuples[ 3 - i ][ 2 ].disable, + + // rejected_handlers.disable + // fulfilled_handlers.disable + tuples[ 3 - i ][ 3 ].disable, + + // progress_callbacks.lock + tuples[ 0 ][ 2 ].lock, + + // progress_handlers.lock + tuples[ 0 ][ 3 ].lock + ); + } + + // progress_handlers.fire + // fulfilled_handlers.fire + // rejected_handlers.fire + list.add( tuple[ 3 ].fire ); + + // deferred.notify = function() { deferred.notifyWith(...) } + // deferred.resolve = function() { deferred.resolveWith(...) } + // deferred.reject = function() { deferred.rejectWith(...) } + deferred[ tuple[ 0 ] ] = function() { + deferred[ tuple[ 0 ] + "With" ]( this === deferred ? undefined : this, arguments ); + return this; + }; + + // deferred.notifyWith = list.fireWith + // deferred.resolveWith = list.fireWith + // deferred.rejectWith = list.fireWith + deferred[ tuple[ 0 ] + "With" ] = list.fireWith; + } ); + + // Make the deferred a promise + promise.promise( deferred ); + + // Call given func if any + if ( func ) { + func.call( deferred, deferred ); + } + + // All done! + return deferred; + }, + + // Deferred helper + when: function( singleValue ) { + var + + // count of uncompleted subordinates + remaining = arguments.length, + + // count of unprocessed arguments + i = remaining, + + // subordinate fulfillment data + resolveContexts = Array( i ), + resolveValues = slice.call( arguments ), + + // the primary Deferred + primary = jQuery.Deferred(), + + // subordinate callback factory + updateFunc = function( i ) { + return function( value ) { + resolveContexts[ i ] = this; + resolveValues[ i ] = arguments.length > 1 ? slice.call( arguments ) : value; + if ( !( --remaining ) ) { + primary.resolveWith( resolveContexts, resolveValues ); + } + }; + }; + + // Single- and empty arguments are adopted like Promise.resolve + if ( remaining <= 1 ) { + adoptValue( singleValue, primary.done( updateFunc( i ) ).resolve, primary.reject, + !remaining ); + + // Use .then() to unwrap secondary thenables (cf. gh-3000) + if ( primary.state() === "pending" || + isFunction( resolveValues[ i ] && resolveValues[ i ].then ) ) { + + return primary.then(); + } + } + + // Multiple arguments are aggregated like Promise.all array elements + while ( i-- ) { + adoptValue( resolveValues[ i ], updateFunc( i ), primary.reject ); + } + + return primary.promise(); + } +} ); + + +// These usually indicate a programmer mistake during development, +// warn about them ASAP rather than swallowing them by default. +var rerrorNames = /^(Eval|Internal|Range|Reference|Syntax|Type|URI)Error$/; + +jQuery.Deferred.exceptionHook = function( error, stack ) { + + // Support: IE 8 - 9 only + // Console exists when dev tools are open, which can happen at any time + if ( window.console && window.console.warn && error && rerrorNames.test( error.name ) ) { + window.console.warn( "jQuery.Deferred exception: " + error.message, error.stack, stack ); + } +}; + + + + +jQuery.readyException = function( error ) { + window.setTimeout( function() { + throw error; + } ); +}; + + + + +// The deferred used on DOM ready +var readyList = jQuery.Deferred(); + +jQuery.fn.ready = function( fn ) { + + readyList + .then( fn ) + + // Wrap jQuery.readyException in a function so that the lookup + // happens at the time of error handling instead of callback + // registration. + .catch( function( error ) { + jQuery.readyException( error ); + } ); + + return this; +}; + +jQuery.extend( { + + // Is the DOM ready to be used? Set to true once it occurs. + isReady: false, + + // A counter to track how many items to wait for before + // the ready event fires. See #6781 + readyWait: 1, + + // Handle when the DOM is ready + ready: function( wait ) { + + // Abort if there are pending holds or we're already ready + if ( wait === true ? --jQuery.readyWait : jQuery.isReady ) { + return; + } + + // Remember that the DOM is ready + jQuery.isReady = true; + + // If a normal DOM Ready event fired, decrement, and wait if need be + if ( wait !== true && --jQuery.readyWait > 0 ) { + return; + } + + // If there are functions bound, to execute + readyList.resolveWith( document, [ jQuery ] ); + } +} ); + +jQuery.ready.then = readyList.then; + +// The ready event handler and self cleanup method +function completed() { + document.removeEventListener( "DOMContentLoaded", completed ); + window.removeEventListener( "load", completed ); + jQuery.ready(); +} + +// Catch cases where $(document).ready() is called +// after the browser event has already occurred. +// Support: IE <=9 - 10 only +// Older IE sometimes signals "interactive" too soon +if ( document.readyState === "complete" || + ( document.readyState !== "loading" && !document.documentElement.doScroll ) ) { + + // Handle it asynchronously to allow scripts the opportunity to delay ready + window.setTimeout( jQuery.ready ); + +} else { + + // Use the handy event callback + document.addEventListener( "DOMContentLoaded", completed ); + + // A fallback to window.onload, that will always work + window.addEventListener( "load", completed ); +} + + + + +// Multifunctional method to get and set values of a collection +// The value/s can optionally be executed if it's a function +var access = function( elems, fn, key, value, chainable, emptyGet, raw ) { + var i = 0, + len = elems.length, + bulk = key == null; + + // Sets many values + if ( toType( key ) === "object" ) { + chainable = true; + for ( i in key ) { + access( elems, fn, i, key[ i ], true, emptyGet, raw ); + } + + // Sets one value + } else if ( value !== undefined ) { + chainable = true; + + if ( !isFunction( value ) ) { + raw = true; + } + + if ( bulk ) { + + // Bulk operations run against the entire set + if ( raw ) { + fn.call( elems, value ); + fn = null; + + // ...except when executing function values + } else { + bulk = fn; + fn = function( elem, _key, value ) { + return bulk.call( jQuery( elem ), value ); + }; + } + } + + if ( fn ) { + for ( ; i < len; i++ ) { + fn( + elems[ i ], key, raw ? + value : + value.call( elems[ i ], i, fn( elems[ i ], key ) ) + ); + } + } + } + + if ( chainable ) { + return elems; + } + + // Gets + if ( bulk ) { + return fn.call( elems ); + } + + return len ? fn( elems[ 0 ], key ) : emptyGet; +}; + + +// Matches dashed string for camelizing +var rmsPrefix = /^-ms-/, + rdashAlpha = /-([a-z])/g; + +// Used by camelCase as callback to replace() +function fcamelCase( _all, letter ) { + return letter.toUpperCase(); +} + +// Convert dashed to camelCase; used by the css and data modules +// Support: IE <=9 - 11, Edge 12 - 15 +// Microsoft forgot to hump their vendor prefix (#9572) +function camelCase( string ) { + return string.replace( rmsPrefix, "ms-" ).replace( rdashAlpha, fcamelCase ); +} +var acceptData = function( owner ) { + + // Accepts only: + // - Node + // - Node.ELEMENT_NODE + // - Node.DOCUMENT_NODE + // - Object + // - Any + return owner.nodeType === 1 || owner.nodeType === 9 || !( +owner.nodeType ); +}; + + + + +function Data() { + this.expando = jQuery.expando + Data.uid++; +} + +Data.uid = 1; + +Data.prototype = { + + cache: function( owner ) { + + // Check if the owner object already has a cache + var value = owner[ this.expando ]; + + // If not, create one + if ( !value ) { + value = {}; + + // We can accept data for non-element nodes in modern browsers, + // but we should not, see #8335. + // Always return an empty object. + if ( acceptData( owner ) ) { + + // If it is a node unlikely to be stringify-ed or looped over + // use plain assignment + if ( owner.nodeType ) { + owner[ this.expando ] = value; + + // Otherwise secure it in a non-enumerable property + // configurable must be true to allow the property to be + // deleted when data is removed + } else { + Object.defineProperty( owner, this.expando, { + value: value, + configurable: true + } ); + } + } + } + + return value; + }, + set: function( owner, data, value ) { + var prop, + cache = this.cache( owner ); + + // Handle: [ owner, key, value ] args + // Always use camelCase key (gh-2257) + if ( typeof data === "string" ) { + cache[ camelCase( data ) ] = value; + + // Handle: [ owner, { properties } ] args + } else { + + // Copy the properties one-by-one to the cache object + for ( prop in data ) { + cache[ camelCase( prop ) ] = data[ prop ]; + } + } + return cache; + }, + get: function( owner, key ) { + return key === undefined ? + this.cache( owner ) : + + // Always use camelCase key (gh-2257) + owner[ this.expando ] && owner[ this.expando ][ camelCase( key ) ]; + }, + access: function( owner, key, value ) { + + // In cases where either: + // + // 1. No key was specified + // 2. A string key was specified, but no value provided + // + // Take the "read" path and allow the get method to determine + // which value to return, respectively either: + // + // 1. The entire cache object + // 2. The data stored at the key + // + if ( key === undefined || + ( ( key && typeof key === "string" ) && value === undefined ) ) { + + return this.get( owner, key ); + } + + // When the key is not a string, or both a key and value + // are specified, set or extend (existing objects) with either: + // + // 1. An object of properties + // 2. A key and value + // + this.set( owner, key, value ); + + // Since the "set" path can have two possible entry points + // return the expected data based on which path was taken[*] + return value !== undefined ? value : key; + }, + remove: function( owner, key ) { + var i, + cache = owner[ this.expando ]; + + if ( cache === undefined ) { + return; + } + + if ( key !== undefined ) { + + // Support array or space separated string of keys + if ( Array.isArray( key ) ) { + + // If key is an array of keys... + // We always set camelCase keys, so remove that. + key = key.map( camelCase ); + } else { + key = camelCase( key ); + + // If a key with the spaces exists, use it. + // Otherwise, create an array by matching non-whitespace + key = key in cache ? + [ key ] : + ( key.match( rnothtmlwhite ) || [] ); + } + + i = key.length; + + while ( i-- ) { + delete cache[ key[ i ] ]; + } + } + + // Remove the expando if there's no more data + if ( key === undefined || jQuery.isEmptyObject( cache ) ) { + + // Support: Chrome <=35 - 45 + // Webkit & Blink performance suffers when deleting properties + // from DOM nodes, so set to undefined instead + // https://bugs.chromium.org/p/chromium/issues/detail?id=378607 (bug restricted) + if ( owner.nodeType ) { + owner[ this.expando ] = undefined; + } else { + delete owner[ this.expando ]; + } + } + }, + hasData: function( owner ) { + var cache = owner[ this.expando ]; + return cache !== undefined && !jQuery.isEmptyObject( cache ); + } +}; +var dataPriv = new Data(); + +var dataUser = new Data(); + + + +// Implementation Summary +// +// 1. Enforce API surface and semantic compatibility with 1.9.x branch +// 2. Improve the module's maintainability by reducing the storage +// paths to a single mechanism. +// 3. Use the same single mechanism to support "private" and "user" data. +// 4. _Never_ expose "private" data to user code (TODO: Drop _data, _removeData) +// 5. Avoid exposing implementation details on user objects (eg. expando properties) +// 6. Provide a clear path for implementation upgrade to WeakMap in 2014 + +var rbrace = /^(?:\{[\w\W]*\}|\[[\w\W]*\])$/, + rmultiDash = /[A-Z]/g; + +function getData( data ) { + if ( data === "true" ) { + return true; + } + + if ( data === "false" ) { + return false; + } + + if ( data === "null" ) { + return null; + } + + // Only convert to a number if it doesn't change the string + if ( data === +data + "" ) { + return +data; + } + + if ( rbrace.test( data ) ) { + return JSON.parse( data ); + } + + return data; +} + +function dataAttr( elem, key, data ) { + var name; + + // If nothing was found internally, try to fetch any + // data from the HTML5 data-* attribute + if ( data === undefined && elem.nodeType === 1 ) { + name = "data-" + key.replace( rmultiDash, "-$&" ).toLowerCase(); + data = elem.getAttribute( name ); + + if ( typeof data === "string" ) { + try { + data = getData( data ); + } catch ( e ) {} + + // Make sure we set the data so it isn't changed later + dataUser.set( elem, key, data ); + } else { + data = undefined; + } + } + return data; +} + +jQuery.extend( { + hasData: function( elem ) { + return dataUser.hasData( elem ) || dataPriv.hasData( elem ); + }, + + data: function( elem, name, data ) { + return dataUser.access( elem, name, data ); + }, + + removeData: function( elem, name ) { + dataUser.remove( elem, name ); + }, + + // TODO: Now that all calls to _data and _removeData have been replaced + // with direct calls to dataPriv methods, these can be deprecated. + _data: function( elem, name, data ) { + return dataPriv.access( elem, name, data ); + }, + + _removeData: function( elem, name ) { + dataPriv.remove( elem, name ); + } +} ); + +jQuery.fn.extend( { + data: function( key, value ) { + var i, name, data, + elem = this[ 0 ], + attrs = elem && elem.attributes; + + // Gets all values + if ( key === undefined ) { + if ( this.length ) { + data = dataUser.get( elem ); + + if ( elem.nodeType === 1 && !dataPriv.get( elem, "hasDataAttrs" ) ) { + i = attrs.length; + while ( i-- ) { + + // Support: IE 11 only + // The attrs elements can be null (#14894) + if ( attrs[ i ] ) { + name = attrs[ i ].name; + if ( name.indexOf( "data-" ) === 0 ) { + name = camelCase( name.slice( 5 ) ); + dataAttr( elem, name, data[ name ] ); + } + } + } + dataPriv.set( elem, "hasDataAttrs", true ); + } + } + + return data; + } + + // Sets multiple values + if ( typeof key === "object" ) { + return this.each( function() { + dataUser.set( this, key ); + } ); + } + + return access( this, function( value ) { + var data; + + // The calling jQuery object (element matches) is not empty + // (and therefore has an element appears at this[ 0 ]) and the + // `value` parameter was not undefined. An empty jQuery object + // will result in `undefined` for elem = this[ 0 ] which will + // throw an exception if an attempt to read a data cache is made. + if ( elem && value === undefined ) { + + // Attempt to get data from the cache + // The key will always be camelCased in Data + data = dataUser.get( elem, key ); + if ( data !== undefined ) { + return data; + } + + // Attempt to "discover" the data in + // HTML5 custom data-* attrs + data = dataAttr( elem, key ); + if ( data !== undefined ) { + return data; + } + + // We tried really hard, but the data doesn't exist. + return; + } + + // Set the data... + this.each( function() { + + // We always store the camelCased key + dataUser.set( this, key, value ); + } ); + }, null, value, arguments.length > 1, null, true ); + }, + + removeData: function( key ) { + return this.each( function() { + dataUser.remove( this, key ); + } ); + } +} ); + + +jQuery.extend( { + queue: function( elem, type, data ) { + var queue; + + if ( elem ) { + type = ( type || "fx" ) + "queue"; + queue = dataPriv.get( elem, type ); + + // Speed up dequeue by getting out quickly if this is just a lookup + if ( data ) { + if ( !queue || Array.isArray( data ) ) { + queue = dataPriv.access( elem, type, jQuery.makeArray( data ) ); + } else { + queue.push( data ); + } + } + return queue || []; + } + }, + + dequeue: function( elem, type ) { + type = type || "fx"; + + var queue = jQuery.queue( elem, type ), + startLength = queue.length, + fn = queue.shift(), + hooks = jQuery._queueHooks( elem, type ), + next = function() { + jQuery.dequeue( elem, type ); + }; + + // If the fx queue is dequeued, always remove the progress sentinel + if ( fn === "inprogress" ) { + fn = queue.shift(); + startLength--; + } + + if ( fn ) { + + // Add a progress sentinel to prevent the fx queue from being + // automatically dequeued + if ( type === "fx" ) { + queue.unshift( "inprogress" ); + } + + // Clear up the last queue stop function + delete hooks.stop; + fn.call( elem, next, hooks ); + } + + if ( !startLength && hooks ) { + hooks.empty.fire(); + } + }, + + // Not public - generate a queueHooks object, or return the current one + _queueHooks: function( elem, type ) { + var key = type + "queueHooks"; + return dataPriv.get( elem, key ) || dataPriv.access( elem, key, { + empty: jQuery.Callbacks( "once memory" ).add( function() { + dataPriv.remove( elem, [ type + "queue", key ] ); + } ) + } ); + } +} ); + +jQuery.fn.extend( { + queue: function( type, data ) { + var setter = 2; + + if ( typeof type !== "string" ) { + data = type; + type = "fx"; + setter--; + } + + if ( arguments.length < setter ) { + return jQuery.queue( this[ 0 ], type ); + } + + return data === undefined ? + this : + this.each( function() { + var queue = jQuery.queue( this, type, data ); + + // Ensure a hooks for this queue + jQuery._queueHooks( this, type ); + + if ( type === "fx" && queue[ 0 ] !== "inprogress" ) { + jQuery.dequeue( this, type ); + } + } ); + }, + dequeue: function( type ) { + return this.each( function() { + jQuery.dequeue( this, type ); + } ); + }, + clearQueue: function( type ) { + return this.queue( type || "fx", [] ); + }, + + // Get a promise resolved when queues of a certain type + // are emptied (fx is the type by default) + promise: function( type, obj ) { + var tmp, + count = 1, + defer = jQuery.Deferred(), + elements = this, + i = this.length, + resolve = function() { + if ( !( --count ) ) { + defer.resolveWith( elements, [ elements ] ); + } + }; + + if ( typeof type !== "string" ) { + obj = type; + type = undefined; + } + type = type || "fx"; + + while ( i-- ) { + tmp = dataPriv.get( elements[ i ], type + "queueHooks" ); + if ( tmp && tmp.empty ) { + count++; + tmp.empty.add( resolve ); + } + } + resolve(); + return defer.promise( obj ); + } +} ); +var pnum = ( /[+-]?(?:\d*\.|)\d+(?:[eE][+-]?\d+|)/ ).source; + +var rcssNum = new RegExp( "^(?:([+-])=|)(" + pnum + ")([a-z%]*)$", "i" ); + + +var cssExpand = [ "Top", "Right", "Bottom", "Left" ]; + +var documentElement = document.documentElement; + + + + var isAttached = function( elem ) { + return jQuery.contains( elem.ownerDocument, elem ); + }, + composed = { composed: true }; + + // Support: IE 9 - 11+, Edge 12 - 18+, iOS 10.0 - 10.2 only + // Check attachment across shadow DOM boundaries when possible (gh-3504) + // Support: iOS 10.0-10.2 only + // Early iOS 10 versions support `attachShadow` but not `getRootNode`, + // leading to errors. We need to check for `getRootNode`. + if ( documentElement.getRootNode ) { + isAttached = function( elem ) { + return jQuery.contains( elem.ownerDocument, elem ) || + elem.getRootNode( composed ) === elem.ownerDocument; + }; + } +var isHiddenWithinTree = function( elem, el ) { + + // isHiddenWithinTree might be called from jQuery#filter function; + // in that case, element will be second argument + elem = el || elem; + + // Inline style trumps all + return elem.style.display === "none" || + elem.style.display === "" && + + // Otherwise, check computed style + // Support: Firefox <=43 - 45 + // Disconnected elements can have computed display: none, so first confirm that elem is + // in the document. + isAttached( elem ) && + + jQuery.css( elem, "display" ) === "none"; + }; + + + +function adjustCSS( elem, prop, valueParts, tween ) { + var adjusted, scale, + maxIterations = 20, + currentValue = tween ? + function() { + return tween.cur(); + } : + function() { + return jQuery.css( elem, prop, "" ); + }, + initial = currentValue(), + unit = valueParts && valueParts[ 3 ] || ( jQuery.cssNumber[ prop ] ? "" : "px" ), + + // Starting value computation is required for potential unit mismatches + initialInUnit = elem.nodeType && + ( jQuery.cssNumber[ prop ] || unit !== "px" && +initial ) && + rcssNum.exec( jQuery.css( elem, prop ) ); + + if ( initialInUnit && initialInUnit[ 3 ] !== unit ) { + + // Support: Firefox <=54 + // Halve the iteration target value to prevent interference from CSS upper bounds (gh-2144) + initial = initial / 2; + + // Trust units reported by jQuery.css + unit = unit || initialInUnit[ 3 ]; + + // Iteratively approximate from a nonzero starting point + initialInUnit = +initial || 1; + + while ( maxIterations-- ) { + + // Evaluate and update our best guess (doubling guesses that zero out). + // Finish if the scale equals or crosses 1 (making the old*new product non-positive). + jQuery.style( elem, prop, initialInUnit + unit ); + if ( ( 1 - scale ) * ( 1 - ( scale = currentValue() / initial || 0.5 ) ) <= 0 ) { + maxIterations = 0; + } + initialInUnit = initialInUnit / scale; + + } + + initialInUnit = initialInUnit * 2; + jQuery.style( elem, prop, initialInUnit + unit ); + + // Make sure we update the tween properties later on + valueParts = valueParts || []; + } + + if ( valueParts ) { + initialInUnit = +initialInUnit || +initial || 0; + + // Apply relative offset (+=/-=) if specified + adjusted = valueParts[ 1 ] ? + initialInUnit + ( valueParts[ 1 ] + 1 ) * valueParts[ 2 ] : + +valueParts[ 2 ]; + if ( tween ) { + tween.unit = unit; + tween.start = initialInUnit; + tween.end = adjusted; + } + } + return adjusted; +} + + +var defaultDisplayMap = {}; + +function getDefaultDisplay( elem ) { + var temp, + doc = elem.ownerDocument, + nodeName = elem.nodeName, + display = defaultDisplayMap[ nodeName ]; + + if ( display ) { + return display; + } + + temp = doc.body.appendChild( doc.createElement( nodeName ) ); + display = jQuery.css( temp, "display" ); + + temp.parentNode.removeChild( temp ); + + if ( display === "none" ) { + display = "block"; + } + defaultDisplayMap[ nodeName ] = display; + + return display; +} + +function showHide( elements, show ) { + var display, elem, + values = [], + index = 0, + length = elements.length; + + // Determine new display value for elements that need to change + for ( ; index < length; index++ ) { + elem = elements[ index ]; + if ( !elem.style ) { + continue; + } + + display = elem.style.display; + if ( show ) { + + // Since we force visibility upon cascade-hidden elements, an immediate (and slow) + // check is required in this first loop unless we have a nonempty display value (either + // inline or about-to-be-restored) + if ( display === "none" ) { + values[ index ] = dataPriv.get( elem, "display" ) || null; + if ( !values[ index ] ) { + elem.style.display = ""; + } + } + if ( elem.style.display === "" && isHiddenWithinTree( elem ) ) { + values[ index ] = getDefaultDisplay( elem ); + } + } else { + if ( display !== "none" ) { + values[ index ] = "none"; + + // Remember what we're overwriting + dataPriv.set( elem, "display", display ); + } + } + } + + // Set the display of the elements in a second loop to avoid constant reflow + for ( index = 0; index < length; index++ ) { + if ( values[ index ] != null ) { + elements[ index ].style.display = values[ index ]; + } + } + + return elements; +} + +jQuery.fn.extend( { + show: function() { + return showHide( this, true ); + }, + hide: function() { + return showHide( this ); + }, + toggle: function( state ) { + if ( typeof state === "boolean" ) { + return state ? this.show() : this.hide(); + } + + return this.each( function() { + if ( isHiddenWithinTree( this ) ) { + jQuery( this ).show(); + } else { + jQuery( this ).hide(); + } + } ); + } +} ); +var rcheckableType = ( /^(?:checkbox|radio)$/i ); + +var rtagName = ( /<([a-z][^\/\0>\x20\t\r\n\f]*)/i ); + +var rscriptType = ( /^$|^module$|\/(?:java|ecma)script/i ); + + + +( function() { + var fragment = document.createDocumentFragment(), + div = fragment.appendChild( document.createElement( "div" ) ), + input = document.createElement( "input" ); + + // Support: Android 4.0 - 4.3 only + // Check state lost if the name is set (#11217) + // Support: Windows Web Apps (WWA) + // `name` and `type` must use .setAttribute for WWA (#14901) + input.setAttribute( "type", "radio" ); + input.setAttribute( "checked", "checked" ); + input.setAttribute( "name", "t" ); + + div.appendChild( input ); + + // Support: Android <=4.1 only + // Older WebKit doesn't clone checked state correctly in fragments + support.checkClone = div.cloneNode( true ).cloneNode( true ).lastChild.checked; + + // Support: IE <=11 only + // Make sure textarea (and checkbox) defaultValue is properly cloned + div.innerHTML = ""; + support.noCloneChecked = !!div.cloneNode( true ).lastChild.defaultValue; + + // Support: IE <=9 only + // IE <=9 replaces "; + support.option = !!div.lastChild; +} )(); + + +// We have to close these tags to support XHTML (#13200) +var wrapMap = { + + // XHTML parsers do not magically insert elements in the + // same way that tag soup parsers do. So we cannot shorten + // this by omitting or other required elements. + thead: [ 1, "", "
" ], + col: [ 2, "", "
" ], + tr: [ 2, "", "
" ], + td: [ 3, "", "
" ], + + _default: [ 0, "", "" ] +}; + +wrapMap.tbody = wrapMap.tfoot = wrapMap.colgroup = wrapMap.caption = wrapMap.thead; +wrapMap.th = wrapMap.td; + +// Support: IE <=9 only +if ( !support.option ) { + wrapMap.optgroup = wrapMap.option = [ 1, "" ]; +} + + +function getAll( context, tag ) { + + // Support: IE <=9 - 11 only + // Use typeof to avoid zero-argument method invocation on host objects (#15151) + var ret; + + if ( typeof context.getElementsByTagName !== "undefined" ) { + ret = context.getElementsByTagName( tag || "*" ); + + } else if ( typeof context.querySelectorAll !== "undefined" ) { + ret = context.querySelectorAll( tag || "*" ); + + } else { + ret = []; + } + + if ( tag === undefined || tag && nodeName( context, tag ) ) { + return jQuery.merge( [ context ], ret ); + } + + return ret; +} + + +// Mark scripts as having already been evaluated +function setGlobalEval( elems, refElements ) { + var i = 0, + l = elems.length; + + for ( ; i < l; i++ ) { + dataPriv.set( + elems[ i ], + "globalEval", + !refElements || dataPriv.get( refElements[ i ], "globalEval" ) + ); + } +} + + +var rhtml = /<|&#?\w+;/; + +function buildFragment( elems, context, scripts, selection, ignored ) { + var elem, tmp, tag, wrap, attached, j, + fragment = context.createDocumentFragment(), + nodes = [], + i = 0, + l = elems.length; + + for ( ; i < l; i++ ) { + elem = elems[ i ]; + + if ( elem || elem === 0 ) { + + // Add nodes directly + if ( toType( elem ) === "object" ) { + + // Support: Android <=4.0 only, PhantomJS 1 only + // push.apply(_, arraylike) throws on ancient WebKit + jQuery.merge( nodes, elem.nodeType ? [ elem ] : elem ); + + // Convert non-html into a text node + } else if ( !rhtml.test( elem ) ) { + nodes.push( context.createTextNode( elem ) ); + + // Convert html into DOM nodes + } else { + tmp = tmp || fragment.appendChild( context.createElement( "div" ) ); + + // Deserialize a standard representation + tag = ( rtagName.exec( elem ) || [ "", "" ] )[ 1 ].toLowerCase(); + wrap = wrapMap[ tag ] || wrapMap._default; + tmp.innerHTML = wrap[ 1 ] + jQuery.htmlPrefilter( elem ) + wrap[ 2 ]; + + // Descend through wrappers to the right content + j = wrap[ 0 ]; + while ( j-- ) { + tmp = tmp.lastChild; + } + + // Support: Android <=4.0 only, PhantomJS 1 only + // push.apply(_, arraylike) throws on ancient WebKit + jQuery.merge( nodes, tmp.childNodes ); + + // Remember the top-level container + tmp = fragment.firstChild; + + // Ensure the created nodes are orphaned (#12392) + tmp.textContent = ""; + } + } + } + + // Remove wrapper from fragment + fragment.textContent = ""; + + i = 0; + while ( ( elem = nodes[ i++ ] ) ) { + + // Skip elements already in the context collection (trac-4087) + if ( selection && jQuery.inArray( elem, selection ) > -1 ) { + if ( ignored ) { + ignored.push( elem ); + } + continue; + } + + attached = isAttached( elem ); + + // Append to fragment + tmp = getAll( fragment.appendChild( elem ), "script" ); + + // Preserve script evaluation history + if ( attached ) { + setGlobalEval( tmp ); + } + + // Capture executables + if ( scripts ) { + j = 0; + while ( ( elem = tmp[ j++ ] ) ) { + if ( rscriptType.test( elem.type || "" ) ) { + scripts.push( elem ); + } + } + } + } + + return fragment; +} + + +var rtypenamespace = /^([^.]*)(?:\.(.+)|)/; + +function returnTrue() { + return true; +} + +function returnFalse() { + return false; +} + +// Support: IE <=9 - 11+ +// focus() and blur() are asynchronous, except when they are no-op. +// So expect focus to be synchronous when the element is already active, +// and blur to be synchronous when the element is not already active. +// (focus and blur are always synchronous in other supported browsers, +// this just defines when we can count on it). +function expectSync( elem, type ) { + return ( elem === safeActiveElement() ) === ( type === "focus" ); +} + +// Support: IE <=9 only +// Accessing document.activeElement can throw unexpectedly +// https://bugs.jquery.com/ticket/13393 +function safeActiveElement() { + try { + return document.activeElement; + } catch ( err ) { } +} + +function on( elem, types, selector, data, fn, one ) { + var origFn, type; + + // Types can be a map of types/handlers + if ( typeof types === "object" ) { + + // ( types-Object, selector, data ) + if ( typeof selector !== "string" ) { + + // ( types-Object, data ) + data = data || selector; + selector = undefined; + } + for ( type in types ) { + on( elem, type, selector, data, types[ type ], one ); + } + return elem; + } + + if ( data == null && fn == null ) { + + // ( types, fn ) + fn = selector; + data = selector = undefined; + } else if ( fn == null ) { + if ( typeof selector === "string" ) { + + // ( types, selector, fn ) + fn = data; + data = undefined; + } else { + + // ( types, data, fn ) + fn = data; + data = selector; + selector = undefined; + } + } + if ( fn === false ) { + fn = returnFalse; + } else if ( !fn ) { + return elem; + } + + if ( one === 1 ) { + origFn = fn; + fn = function( event ) { + + // Can use an empty set, since event contains the info + jQuery().off( event ); + return origFn.apply( this, arguments ); + }; + + // Use same guid so caller can remove using origFn + fn.guid = origFn.guid || ( origFn.guid = jQuery.guid++ ); + } + return elem.each( function() { + jQuery.event.add( this, types, fn, data, selector ); + } ); +} + +/* + * Helper functions for managing events -- not part of the public interface. + * Props to Dean Edwards' addEvent library for many of the ideas. + */ +jQuery.event = { + + global: {}, + + add: function( elem, types, handler, data, selector ) { + + var handleObjIn, eventHandle, tmp, + events, t, handleObj, + special, handlers, type, namespaces, origType, + elemData = dataPriv.get( elem ); + + // Only attach events to objects that accept data + if ( !acceptData( elem ) ) { + return; + } + + // Caller can pass in an object of custom data in lieu of the handler + if ( handler.handler ) { + handleObjIn = handler; + handler = handleObjIn.handler; + selector = handleObjIn.selector; + } + + // Ensure that invalid selectors throw exceptions at attach time + // Evaluate against documentElement in case elem is a non-element node (e.g., document) + if ( selector ) { + jQuery.find.matchesSelector( documentElement, selector ); + } + + // Make sure that the handler has a unique ID, used to find/remove it later + if ( !handler.guid ) { + handler.guid = jQuery.guid++; + } + + // Init the element's event structure and main handler, if this is the first + if ( !( events = elemData.events ) ) { + events = elemData.events = Object.create( null ); + } + if ( !( eventHandle = elemData.handle ) ) { + eventHandle = elemData.handle = function( e ) { + + // Discard the second event of a jQuery.event.trigger() and + // when an event is called after a page has unloaded + return typeof jQuery !== "undefined" && jQuery.event.triggered !== e.type ? + jQuery.event.dispatch.apply( elem, arguments ) : undefined; + }; + } + + // Handle multiple events separated by a space + types = ( types || "" ).match( rnothtmlwhite ) || [ "" ]; + t = types.length; + while ( t-- ) { + tmp = rtypenamespace.exec( types[ t ] ) || []; + type = origType = tmp[ 1 ]; + namespaces = ( tmp[ 2 ] || "" ).split( "." ).sort(); + + // There *must* be a type, no attaching namespace-only handlers + if ( !type ) { + continue; + } + + // If event changes its type, use the special event handlers for the changed type + special = jQuery.event.special[ type ] || {}; + + // If selector defined, determine special event api type, otherwise given type + type = ( selector ? special.delegateType : special.bindType ) || type; + + // Update special based on newly reset type + special = jQuery.event.special[ type ] || {}; + + // handleObj is passed to all event handlers + handleObj = jQuery.extend( { + type: type, + origType: origType, + data: data, + handler: handler, + guid: handler.guid, + selector: selector, + needsContext: selector && jQuery.expr.match.needsContext.test( selector ), + namespace: namespaces.join( "." ) + }, handleObjIn ); + + // Init the event handler queue if we're the first + if ( !( handlers = events[ type ] ) ) { + handlers = events[ type ] = []; + handlers.delegateCount = 0; + + // Only use addEventListener if the special events handler returns false + if ( !special.setup || + special.setup.call( elem, data, namespaces, eventHandle ) === false ) { + + if ( elem.addEventListener ) { + elem.addEventListener( type, eventHandle ); + } + } + } + + if ( special.add ) { + special.add.call( elem, handleObj ); + + if ( !handleObj.handler.guid ) { + handleObj.handler.guid = handler.guid; + } + } + + // Add to the element's handler list, delegates in front + if ( selector ) { + handlers.splice( handlers.delegateCount++, 0, handleObj ); + } else { + handlers.push( handleObj ); + } + + // Keep track of which events have ever been used, for event optimization + jQuery.event.global[ type ] = true; + } + + }, + + // Detach an event or set of events from an element + remove: function( elem, types, handler, selector, mappedTypes ) { + + var j, origCount, tmp, + events, t, handleObj, + special, handlers, type, namespaces, origType, + elemData = dataPriv.hasData( elem ) && dataPriv.get( elem ); + + if ( !elemData || !( events = elemData.events ) ) { + return; + } + + // Once for each type.namespace in types; type may be omitted + types = ( types || "" ).match( rnothtmlwhite ) || [ "" ]; + t = types.length; + while ( t-- ) { + tmp = rtypenamespace.exec( types[ t ] ) || []; + type = origType = tmp[ 1 ]; + namespaces = ( tmp[ 2 ] || "" ).split( "." ).sort(); + + // Unbind all events (on this namespace, if provided) for the element + if ( !type ) { + for ( type in events ) { + jQuery.event.remove( elem, type + types[ t ], handler, selector, true ); + } + continue; + } + + special = jQuery.event.special[ type ] || {}; + type = ( selector ? special.delegateType : special.bindType ) || type; + handlers = events[ type ] || []; + tmp = tmp[ 2 ] && + new RegExp( "(^|\\.)" + namespaces.join( "\\.(?:.*\\.|)" ) + "(\\.|$)" ); + + // Remove matching events + origCount = j = handlers.length; + while ( j-- ) { + handleObj = handlers[ j ]; + + if ( ( mappedTypes || origType === handleObj.origType ) && + ( !handler || handler.guid === handleObj.guid ) && + ( !tmp || tmp.test( handleObj.namespace ) ) && + ( !selector || selector === handleObj.selector || + selector === "**" && handleObj.selector ) ) { + handlers.splice( j, 1 ); + + if ( handleObj.selector ) { + handlers.delegateCount--; + } + if ( special.remove ) { + special.remove.call( elem, handleObj ); + } + } + } + + // Remove generic event handler if we removed something and no more handlers exist + // (avoids potential for endless recursion during removal of special event handlers) + if ( origCount && !handlers.length ) { + if ( !special.teardown || + special.teardown.call( elem, namespaces, elemData.handle ) === false ) { + + jQuery.removeEvent( elem, type, elemData.handle ); + } + + delete events[ type ]; + } + } + + // Remove data and the expando if it's no longer used + if ( jQuery.isEmptyObject( events ) ) { + dataPriv.remove( elem, "handle events" ); + } + }, + + dispatch: function( nativeEvent ) { + + var i, j, ret, matched, handleObj, handlerQueue, + args = new Array( arguments.length ), + + // Make a writable jQuery.Event from the native event object + event = jQuery.event.fix( nativeEvent ), + + handlers = ( + dataPriv.get( this, "events" ) || Object.create( null ) + )[ event.type ] || [], + special = jQuery.event.special[ event.type ] || {}; + + // Use the fix-ed jQuery.Event rather than the (read-only) native event + args[ 0 ] = event; + + for ( i = 1; i < arguments.length; i++ ) { + args[ i ] = arguments[ i ]; + } + + event.delegateTarget = this; + + // Call the preDispatch hook for the mapped type, and let it bail if desired + if ( special.preDispatch && special.preDispatch.call( this, event ) === false ) { + return; + } + + // Determine handlers + handlerQueue = jQuery.event.handlers.call( this, event, handlers ); + + // Run delegates first; they may want to stop propagation beneath us + i = 0; + while ( ( matched = handlerQueue[ i++ ] ) && !event.isPropagationStopped() ) { + event.currentTarget = matched.elem; + + j = 0; + while ( ( handleObj = matched.handlers[ j++ ] ) && + !event.isImmediatePropagationStopped() ) { + + // If the event is namespaced, then each handler is only invoked if it is + // specially universal or its namespaces are a superset of the event's. + if ( !event.rnamespace || handleObj.namespace === false || + event.rnamespace.test( handleObj.namespace ) ) { + + event.handleObj = handleObj; + event.data = handleObj.data; + + ret = ( ( jQuery.event.special[ handleObj.origType ] || {} ).handle || + handleObj.handler ).apply( matched.elem, args ); + + if ( ret !== undefined ) { + if ( ( event.result = ret ) === false ) { + event.preventDefault(); + event.stopPropagation(); + } + } + } + } + } + + // Call the postDispatch hook for the mapped type + if ( special.postDispatch ) { + special.postDispatch.call( this, event ); + } + + return event.result; + }, + + handlers: function( event, handlers ) { + var i, handleObj, sel, matchedHandlers, matchedSelectors, + handlerQueue = [], + delegateCount = handlers.delegateCount, + cur = event.target; + + // Find delegate handlers + if ( delegateCount && + + // Support: IE <=9 + // Black-hole SVG instance trees (trac-13180) + cur.nodeType && + + // Support: Firefox <=42 + // Suppress spec-violating clicks indicating a non-primary pointer button (trac-3861) + // https://www.w3.org/TR/DOM-Level-3-Events/#event-type-click + // Support: IE 11 only + // ...but not arrow key "clicks" of radio inputs, which can have `button` -1 (gh-2343) + !( event.type === "click" && event.button >= 1 ) ) { + + for ( ; cur !== this; cur = cur.parentNode || this ) { + + // Don't check non-elements (#13208) + // Don't process clicks on disabled elements (#6911, #8165, #11382, #11764) + if ( cur.nodeType === 1 && !( event.type === "click" && cur.disabled === true ) ) { + matchedHandlers = []; + matchedSelectors = {}; + for ( i = 0; i < delegateCount; i++ ) { + handleObj = handlers[ i ]; + + // Don't conflict with Object.prototype properties (#13203) + sel = handleObj.selector + " "; + + if ( matchedSelectors[ sel ] === undefined ) { + matchedSelectors[ sel ] = handleObj.needsContext ? + jQuery( sel, this ).index( cur ) > -1 : + jQuery.find( sel, this, null, [ cur ] ).length; + } + if ( matchedSelectors[ sel ] ) { + matchedHandlers.push( handleObj ); + } + } + if ( matchedHandlers.length ) { + handlerQueue.push( { elem: cur, handlers: matchedHandlers } ); + } + } + } + } + + // Add the remaining (directly-bound) handlers + cur = this; + if ( delegateCount < handlers.length ) { + handlerQueue.push( { elem: cur, handlers: handlers.slice( delegateCount ) } ); + } + + return handlerQueue; + }, + + addProp: function( name, hook ) { + Object.defineProperty( jQuery.Event.prototype, name, { + enumerable: true, + configurable: true, + + get: isFunction( hook ) ? + function() { + if ( this.originalEvent ) { + return hook( this.originalEvent ); + } + } : + function() { + if ( this.originalEvent ) { + return this.originalEvent[ name ]; + } + }, + + set: function( value ) { + Object.defineProperty( this, name, { + enumerable: true, + configurable: true, + writable: true, + value: value + } ); + } + } ); + }, + + fix: function( originalEvent ) { + return originalEvent[ jQuery.expando ] ? + originalEvent : + new jQuery.Event( originalEvent ); + }, + + special: { + load: { + + // Prevent triggered image.load events from bubbling to window.load + noBubble: true + }, + click: { + + // Utilize native event to ensure correct state for checkable inputs + setup: function( data ) { + + // For mutual compressibility with _default, replace `this` access with a local var. + // `|| data` is dead code meant only to preserve the variable through minification. + var el = this || data; + + // Claim the first handler + if ( rcheckableType.test( el.type ) && + el.click && nodeName( el, "input" ) ) { + + // dataPriv.set( el, "click", ... ) + leverageNative( el, "click", returnTrue ); + } + + // Return false to allow normal processing in the caller + return false; + }, + trigger: function( data ) { + + // For mutual compressibility with _default, replace `this` access with a local var. + // `|| data` is dead code meant only to preserve the variable through minification. + var el = this || data; + + // Force setup before triggering a click + if ( rcheckableType.test( el.type ) && + el.click && nodeName( el, "input" ) ) { + + leverageNative( el, "click" ); + } + + // Return non-false to allow normal event-path propagation + return true; + }, + + // For cross-browser consistency, suppress native .click() on links + // Also prevent it if we're currently inside a leveraged native-event stack + _default: function( event ) { + var target = event.target; + return rcheckableType.test( target.type ) && + target.click && nodeName( target, "input" ) && + dataPriv.get( target, "click" ) || + nodeName( target, "a" ); + } + }, + + beforeunload: { + postDispatch: function( event ) { + + // Support: Firefox 20+ + // Firefox doesn't alert if the returnValue field is not set. + if ( event.result !== undefined && event.originalEvent ) { + event.originalEvent.returnValue = event.result; + } + } + } + } +}; + +// Ensure the presence of an event listener that handles manually-triggered +// synthetic events by interrupting progress until reinvoked in response to +// *native* events that it fires directly, ensuring that state changes have +// already occurred before other listeners are invoked. +function leverageNative( el, type, expectSync ) { + + // Missing expectSync indicates a trigger call, which must force setup through jQuery.event.add + if ( !expectSync ) { + if ( dataPriv.get( el, type ) === undefined ) { + jQuery.event.add( el, type, returnTrue ); + } + return; + } + + // Register the controller as a special universal handler for all event namespaces + dataPriv.set( el, type, false ); + jQuery.event.add( el, type, { + namespace: false, + handler: function( event ) { + var notAsync, result, + saved = dataPriv.get( this, type ); + + if ( ( event.isTrigger & 1 ) && this[ type ] ) { + + // Interrupt processing of the outer synthetic .trigger()ed event + // Saved data should be false in such cases, but might be a leftover capture object + // from an async native handler (gh-4350) + if ( !saved.length ) { + + // Store arguments for use when handling the inner native event + // There will always be at least one argument (an event object), so this array + // will not be confused with a leftover capture object. + saved = slice.call( arguments ); + dataPriv.set( this, type, saved ); + + // Trigger the native event and capture its result + // Support: IE <=9 - 11+ + // focus() and blur() are asynchronous + notAsync = expectSync( this, type ); + this[ type ](); + result = dataPriv.get( this, type ); + if ( saved !== result || notAsync ) { + dataPriv.set( this, type, false ); + } else { + result = {}; + } + if ( saved !== result ) { + + // Cancel the outer synthetic event + event.stopImmediatePropagation(); + event.preventDefault(); + + // Support: Chrome 86+ + // In Chrome, if an element having a focusout handler is blurred by + // clicking outside of it, it invokes the handler synchronously. If + // that handler calls `.remove()` on the element, the data is cleared, + // leaving `result` undefined. We need to guard against this. + return result && result.value; + } + + // If this is an inner synthetic event for an event with a bubbling surrogate + // (focus or blur), assume that the surrogate already propagated from triggering the + // native event and prevent that from happening again here. + // This technically gets the ordering wrong w.r.t. to `.trigger()` (in which the + // bubbling surrogate propagates *after* the non-bubbling base), but that seems + // less bad than duplication. + } else if ( ( jQuery.event.special[ type ] || {} ).delegateType ) { + event.stopPropagation(); + } + + // If this is a native event triggered above, everything is now in order + // Fire an inner synthetic event with the original arguments + } else if ( saved.length ) { + + // ...and capture the result + dataPriv.set( this, type, { + value: jQuery.event.trigger( + + // Support: IE <=9 - 11+ + // Extend with the prototype to reset the above stopImmediatePropagation() + jQuery.extend( saved[ 0 ], jQuery.Event.prototype ), + saved.slice( 1 ), + this + ) + } ); + + // Abort handling of the native event + event.stopImmediatePropagation(); + } + } + } ); +} + +jQuery.removeEvent = function( elem, type, handle ) { + + // This "if" is needed for plain objects + if ( elem.removeEventListener ) { + elem.removeEventListener( type, handle ); + } +}; + +jQuery.Event = function( src, props ) { + + // Allow instantiation without the 'new' keyword + if ( !( this instanceof jQuery.Event ) ) { + return new jQuery.Event( src, props ); + } + + // Event object + if ( src && src.type ) { + this.originalEvent = src; + this.type = src.type; + + // Events bubbling up the document may have been marked as prevented + // by a handler lower down the tree; reflect the correct value. + this.isDefaultPrevented = src.defaultPrevented || + src.defaultPrevented === undefined && + + // Support: Android <=2.3 only + src.returnValue === false ? + returnTrue : + returnFalse; + + // Create target properties + // Support: Safari <=6 - 7 only + // Target should not be a text node (#504, #13143) + this.target = ( src.target && src.target.nodeType === 3 ) ? + src.target.parentNode : + src.target; + + this.currentTarget = src.currentTarget; + this.relatedTarget = src.relatedTarget; + + // Event type + } else { + this.type = src; + } + + // Put explicitly provided properties onto the event object + if ( props ) { + jQuery.extend( this, props ); + } + + // Create a timestamp if incoming event doesn't have one + this.timeStamp = src && src.timeStamp || Date.now(); + + // Mark it as fixed + this[ jQuery.expando ] = true; +}; + +// jQuery.Event is based on DOM3 Events as specified by the ECMAScript Language Binding +// https://www.w3.org/TR/2003/WD-DOM-Level-3-Events-20030331/ecma-script-binding.html +jQuery.Event.prototype = { + constructor: jQuery.Event, + isDefaultPrevented: returnFalse, + isPropagationStopped: returnFalse, + isImmediatePropagationStopped: returnFalse, + isSimulated: false, + + preventDefault: function() { + var e = this.originalEvent; + + this.isDefaultPrevented = returnTrue; + + if ( e && !this.isSimulated ) { + e.preventDefault(); + } + }, + stopPropagation: function() { + var e = this.originalEvent; + + this.isPropagationStopped = returnTrue; + + if ( e && !this.isSimulated ) { + e.stopPropagation(); + } + }, + stopImmediatePropagation: function() { + var e = this.originalEvent; + + this.isImmediatePropagationStopped = returnTrue; + + if ( e && !this.isSimulated ) { + e.stopImmediatePropagation(); + } + + this.stopPropagation(); + } +}; + +// Includes all common event props including KeyEvent and MouseEvent specific props +jQuery.each( { + altKey: true, + bubbles: true, + cancelable: true, + changedTouches: true, + ctrlKey: true, + detail: true, + eventPhase: true, + metaKey: true, + pageX: true, + pageY: true, + shiftKey: true, + view: true, + "char": true, + code: true, + charCode: true, + key: true, + keyCode: true, + button: true, + buttons: true, + clientX: true, + clientY: true, + offsetX: true, + offsetY: true, + pointerId: true, + pointerType: true, + screenX: true, + screenY: true, + targetTouches: true, + toElement: true, + touches: true, + which: true +}, jQuery.event.addProp ); + +jQuery.each( { focus: "focusin", blur: "focusout" }, function( type, delegateType ) { + jQuery.event.special[ type ] = { + + // Utilize native event if possible so blur/focus sequence is correct + setup: function() { + + // Claim the first handler + // dataPriv.set( this, "focus", ... ) + // dataPriv.set( this, "blur", ... ) + leverageNative( this, type, expectSync ); + + // Return false to allow normal processing in the caller + return false; + }, + trigger: function() { + + // Force setup before trigger + leverageNative( this, type ); + + // Return non-false to allow normal event-path propagation + return true; + }, + + // Suppress native focus or blur as it's already being fired + // in leverageNative. + _default: function() { + return true; + }, + + delegateType: delegateType + }; +} ); + +// Create mouseenter/leave events using mouseover/out and event-time checks +// so that event delegation works in jQuery. +// Do the same for pointerenter/pointerleave and pointerover/pointerout +// +// Support: Safari 7 only +// Safari sends mouseenter too often; see: +// https://bugs.chromium.org/p/chromium/issues/detail?id=470258 +// for the description of the bug (it existed in older Chrome versions as well). +jQuery.each( { + mouseenter: "mouseover", + mouseleave: "mouseout", + pointerenter: "pointerover", + pointerleave: "pointerout" +}, function( orig, fix ) { + jQuery.event.special[ orig ] = { + delegateType: fix, + bindType: fix, + + handle: function( event ) { + var ret, + target = this, + related = event.relatedTarget, + handleObj = event.handleObj; + + // For mouseenter/leave call the handler if related is outside the target. + // NB: No relatedTarget if the mouse left/entered the browser window + if ( !related || ( related !== target && !jQuery.contains( target, related ) ) ) { + event.type = handleObj.origType; + ret = handleObj.handler.apply( this, arguments ); + event.type = fix; + } + return ret; + } + }; +} ); + +jQuery.fn.extend( { + + on: function( types, selector, data, fn ) { + return on( this, types, selector, data, fn ); + }, + one: function( types, selector, data, fn ) { + return on( this, types, selector, data, fn, 1 ); + }, + off: function( types, selector, fn ) { + var handleObj, type; + if ( types && types.preventDefault && types.handleObj ) { + + // ( event ) dispatched jQuery.Event + handleObj = types.handleObj; + jQuery( types.delegateTarget ).off( + handleObj.namespace ? + handleObj.origType + "." + handleObj.namespace : + handleObj.origType, + handleObj.selector, + handleObj.handler + ); + return this; + } + if ( typeof types === "object" ) { + + // ( types-object [, selector] ) + for ( type in types ) { + this.off( type, selector, types[ type ] ); + } + return this; + } + if ( selector === false || typeof selector === "function" ) { + + // ( types [, fn] ) + fn = selector; + selector = undefined; + } + if ( fn === false ) { + fn = returnFalse; + } + return this.each( function() { + jQuery.event.remove( this, types, fn, selector ); + } ); + } +} ); + + +var + + // Support: IE <=10 - 11, Edge 12 - 13 only + // In IE/Edge using regex groups here causes severe slowdowns. + // See https://connect.microsoft.com/IE/feedback/details/1736512/ + rnoInnerhtml = /\s*$/g; + +// Prefer a tbody over its parent table for containing new rows +function manipulationTarget( elem, content ) { + if ( nodeName( elem, "table" ) && + nodeName( content.nodeType !== 11 ? content : content.firstChild, "tr" ) ) { + + return jQuery( elem ).children( "tbody" )[ 0 ] || elem; + } + + return elem; +} + +// Replace/restore the type attribute of script elements for safe DOM manipulation +function disableScript( elem ) { + elem.type = ( elem.getAttribute( "type" ) !== null ) + "/" + elem.type; + return elem; +} +function restoreScript( elem ) { + if ( ( elem.type || "" ).slice( 0, 5 ) === "true/" ) { + elem.type = elem.type.slice( 5 ); + } else { + elem.removeAttribute( "type" ); + } + + return elem; +} + +function cloneCopyEvent( src, dest ) { + var i, l, type, pdataOld, udataOld, udataCur, events; + + if ( dest.nodeType !== 1 ) { + return; + } + + // 1. Copy private data: events, handlers, etc. + if ( dataPriv.hasData( src ) ) { + pdataOld = dataPriv.get( src ); + events = pdataOld.events; + + if ( events ) { + dataPriv.remove( dest, "handle events" ); + + for ( type in events ) { + for ( i = 0, l = events[ type ].length; i < l; i++ ) { + jQuery.event.add( dest, type, events[ type ][ i ] ); + } + } + } + } + + // 2. Copy user data + if ( dataUser.hasData( src ) ) { + udataOld = dataUser.access( src ); + udataCur = jQuery.extend( {}, udataOld ); + + dataUser.set( dest, udataCur ); + } +} + +// Fix IE bugs, see support tests +function fixInput( src, dest ) { + var nodeName = dest.nodeName.toLowerCase(); + + // Fails to persist the checked state of a cloned checkbox or radio button. + if ( nodeName === "input" && rcheckableType.test( src.type ) ) { + dest.checked = src.checked; + + // Fails to return the selected option to the default selected state when cloning options + } else if ( nodeName === "input" || nodeName === "textarea" ) { + dest.defaultValue = src.defaultValue; + } +} + +function domManip( collection, args, callback, ignored ) { + + // Flatten any nested arrays + args = flat( args ); + + var fragment, first, scripts, hasScripts, node, doc, + i = 0, + l = collection.length, + iNoClone = l - 1, + value = args[ 0 ], + valueIsFunction = isFunction( value ); + + // We can't cloneNode fragments that contain checked, in WebKit + if ( valueIsFunction || + ( l > 1 && typeof value === "string" && + !support.checkClone && rchecked.test( value ) ) ) { + return collection.each( function( index ) { + var self = collection.eq( index ); + if ( valueIsFunction ) { + args[ 0 ] = value.call( this, index, self.html() ); + } + domManip( self, args, callback, ignored ); + } ); + } + + if ( l ) { + fragment = buildFragment( args, collection[ 0 ].ownerDocument, false, collection, ignored ); + first = fragment.firstChild; + + if ( fragment.childNodes.length === 1 ) { + fragment = first; + } + + // Require either new content or an interest in ignored elements to invoke the callback + if ( first || ignored ) { + scripts = jQuery.map( getAll( fragment, "script" ), disableScript ); + hasScripts = scripts.length; + + // Use the original fragment for the last item + // instead of the first because it can end up + // being emptied incorrectly in certain situations (#8070). + for ( ; i < l; i++ ) { + node = fragment; + + if ( i !== iNoClone ) { + node = jQuery.clone( node, true, true ); + + // Keep references to cloned scripts for later restoration + if ( hasScripts ) { + + // Support: Android <=4.0 only, PhantomJS 1 only + // push.apply(_, arraylike) throws on ancient WebKit + jQuery.merge( scripts, getAll( node, "script" ) ); + } + } + + callback.call( collection[ i ], node, i ); + } + + if ( hasScripts ) { + doc = scripts[ scripts.length - 1 ].ownerDocument; + + // Reenable scripts + jQuery.map( scripts, restoreScript ); + + // Evaluate executable scripts on first document insertion + for ( i = 0; i < hasScripts; i++ ) { + node = scripts[ i ]; + if ( rscriptType.test( node.type || "" ) && + !dataPriv.access( node, "globalEval" ) && + jQuery.contains( doc, node ) ) { + + if ( node.src && ( node.type || "" ).toLowerCase() !== "module" ) { + + // Optional AJAX dependency, but won't run scripts if not present + if ( jQuery._evalUrl && !node.noModule ) { + jQuery._evalUrl( node.src, { + nonce: node.nonce || node.getAttribute( "nonce" ) + }, doc ); + } + } else { + DOMEval( node.textContent.replace( rcleanScript, "" ), node, doc ); + } + } + } + } + } + } + + return collection; +} + +function remove( elem, selector, keepData ) { + var node, + nodes = selector ? jQuery.filter( selector, elem ) : elem, + i = 0; + + for ( ; ( node = nodes[ i ] ) != null; i++ ) { + if ( !keepData && node.nodeType === 1 ) { + jQuery.cleanData( getAll( node ) ); + } + + if ( node.parentNode ) { + if ( keepData && isAttached( node ) ) { + setGlobalEval( getAll( node, "script" ) ); + } + node.parentNode.removeChild( node ); + } + } + + return elem; +} + +jQuery.extend( { + htmlPrefilter: function( html ) { + return html; + }, + + clone: function( elem, dataAndEvents, deepDataAndEvents ) { + var i, l, srcElements, destElements, + clone = elem.cloneNode( true ), + inPage = isAttached( elem ); + + // Fix IE cloning issues + if ( !support.noCloneChecked && ( elem.nodeType === 1 || elem.nodeType === 11 ) && + !jQuery.isXMLDoc( elem ) ) { + + // We eschew Sizzle here for performance reasons: https://jsperf.com/getall-vs-sizzle/2 + destElements = getAll( clone ); + srcElements = getAll( elem ); + + for ( i = 0, l = srcElements.length; i < l; i++ ) { + fixInput( srcElements[ i ], destElements[ i ] ); + } + } + + // Copy the events from the original to the clone + if ( dataAndEvents ) { + if ( deepDataAndEvents ) { + srcElements = srcElements || getAll( elem ); + destElements = destElements || getAll( clone ); + + for ( i = 0, l = srcElements.length; i < l; i++ ) { + cloneCopyEvent( srcElements[ i ], destElements[ i ] ); + } + } else { + cloneCopyEvent( elem, clone ); + } + } + + // Preserve script evaluation history + destElements = getAll( clone, "script" ); + if ( destElements.length > 0 ) { + setGlobalEval( destElements, !inPage && getAll( elem, "script" ) ); + } + + // Return the cloned set + return clone; + }, + + cleanData: function( elems ) { + var data, elem, type, + special = jQuery.event.special, + i = 0; + + for ( ; ( elem = elems[ i ] ) !== undefined; i++ ) { + if ( acceptData( elem ) ) { + if ( ( data = elem[ dataPriv.expando ] ) ) { + if ( data.events ) { + for ( type in data.events ) { + if ( special[ type ] ) { + jQuery.event.remove( elem, type ); + + // This is a shortcut to avoid jQuery.event.remove's overhead + } else { + jQuery.removeEvent( elem, type, data.handle ); + } + } + } + + // Support: Chrome <=35 - 45+ + // Assign undefined instead of using delete, see Data#remove + elem[ dataPriv.expando ] = undefined; + } + if ( elem[ dataUser.expando ] ) { + + // Support: Chrome <=35 - 45+ + // Assign undefined instead of using delete, see Data#remove + elem[ dataUser.expando ] = undefined; + } + } + } + } +} ); + +jQuery.fn.extend( { + detach: function( selector ) { + return remove( this, selector, true ); + }, + + remove: function( selector ) { + return remove( this, selector ); + }, + + text: function( value ) { + return access( this, function( value ) { + return value === undefined ? + jQuery.text( this ) : + this.empty().each( function() { + if ( this.nodeType === 1 || this.nodeType === 11 || this.nodeType === 9 ) { + this.textContent = value; + } + } ); + }, null, value, arguments.length ); + }, + + append: function() { + return domManip( this, arguments, function( elem ) { + if ( this.nodeType === 1 || this.nodeType === 11 || this.nodeType === 9 ) { + var target = manipulationTarget( this, elem ); + target.appendChild( elem ); + } + } ); + }, + + prepend: function() { + return domManip( this, arguments, function( elem ) { + if ( this.nodeType === 1 || this.nodeType === 11 || this.nodeType === 9 ) { + var target = manipulationTarget( this, elem ); + target.insertBefore( elem, target.firstChild ); + } + } ); + }, + + before: function() { + return domManip( this, arguments, function( elem ) { + if ( this.parentNode ) { + this.parentNode.insertBefore( elem, this ); + } + } ); + }, + + after: function() { + return domManip( this, arguments, function( elem ) { + if ( this.parentNode ) { + this.parentNode.insertBefore( elem, this.nextSibling ); + } + } ); + }, + + empty: function() { + var elem, + i = 0; + + for ( ; ( elem = this[ i ] ) != null; i++ ) { + if ( elem.nodeType === 1 ) { + + // Prevent memory leaks + jQuery.cleanData( getAll( elem, false ) ); + + // Remove any remaining nodes + elem.textContent = ""; + } + } + + return this; + }, + + clone: function( dataAndEvents, deepDataAndEvents ) { + dataAndEvents = dataAndEvents == null ? false : dataAndEvents; + deepDataAndEvents = deepDataAndEvents == null ? dataAndEvents : deepDataAndEvents; + + return this.map( function() { + return jQuery.clone( this, dataAndEvents, deepDataAndEvents ); + } ); + }, + + html: function( value ) { + return access( this, function( value ) { + var elem = this[ 0 ] || {}, + i = 0, + l = this.length; + + if ( value === undefined && elem.nodeType === 1 ) { + return elem.innerHTML; + } + + // See if we can take a shortcut and just use innerHTML + if ( typeof value === "string" && !rnoInnerhtml.test( value ) && + !wrapMap[ ( rtagName.exec( value ) || [ "", "" ] )[ 1 ].toLowerCase() ] ) { + + value = jQuery.htmlPrefilter( value ); + + try { + for ( ; i < l; i++ ) { + elem = this[ i ] || {}; + + // Remove element nodes and prevent memory leaks + if ( elem.nodeType === 1 ) { + jQuery.cleanData( getAll( elem, false ) ); + elem.innerHTML = value; + } + } + + elem = 0; + + // If using innerHTML throws an exception, use the fallback method + } catch ( e ) {} + } + + if ( elem ) { + this.empty().append( value ); + } + }, null, value, arguments.length ); + }, + + replaceWith: function() { + var ignored = []; + + // Make the changes, replacing each non-ignored context element with the new content + return domManip( this, arguments, function( elem ) { + var parent = this.parentNode; + + if ( jQuery.inArray( this, ignored ) < 0 ) { + jQuery.cleanData( getAll( this ) ); + if ( parent ) { + parent.replaceChild( elem, this ); + } + } + + // Force callback invocation + }, ignored ); + } +} ); + +jQuery.each( { + appendTo: "append", + prependTo: "prepend", + insertBefore: "before", + insertAfter: "after", + replaceAll: "replaceWith" +}, function( name, original ) { + jQuery.fn[ name ] = function( selector ) { + var elems, + ret = [], + insert = jQuery( selector ), + last = insert.length - 1, + i = 0; + + for ( ; i <= last; i++ ) { + elems = i === last ? this : this.clone( true ); + jQuery( insert[ i ] )[ original ]( elems ); + + // Support: Android <=4.0 only, PhantomJS 1 only + // .get() because push.apply(_, arraylike) throws on ancient WebKit + push.apply( ret, elems.get() ); + } + + return this.pushStack( ret ); + }; +} ); +var rnumnonpx = new RegExp( "^(" + pnum + ")(?!px)[a-z%]+$", "i" ); + +var getStyles = function( elem ) { + + // Support: IE <=11 only, Firefox <=30 (#15098, #14150) + // IE throws on elements created in popups + // FF meanwhile throws on frame elements through "defaultView.getComputedStyle" + var view = elem.ownerDocument.defaultView; + + if ( !view || !view.opener ) { + view = window; + } + + return view.getComputedStyle( elem ); + }; + +var swap = function( elem, options, callback ) { + var ret, name, + old = {}; + + // Remember the old values, and insert the new ones + for ( name in options ) { + old[ name ] = elem.style[ name ]; + elem.style[ name ] = options[ name ]; + } + + ret = callback.call( elem ); + + // Revert the old values + for ( name in options ) { + elem.style[ name ] = old[ name ]; + } + + return ret; +}; + + +var rboxStyle = new RegExp( cssExpand.join( "|" ), "i" ); + + + +( function() { + + // Executing both pixelPosition & boxSizingReliable tests require only one layout + // so they're executed at the same time to save the second computation. + function computeStyleTests() { + + // This is a singleton, we need to execute it only once + if ( !div ) { + return; + } + + container.style.cssText = "position:absolute;left:-11111px;width:60px;" + + "margin-top:1px;padding:0;border:0"; + div.style.cssText = + "position:relative;display:block;box-sizing:border-box;overflow:scroll;" + + "margin:auto;border:1px;padding:1px;" + + "width:60%;top:1%"; + documentElement.appendChild( container ).appendChild( div ); + + var divStyle = window.getComputedStyle( div ); + pixelPositionVal = divStyle.top !== "1%"; + + // Support: Android 4.0 - 4.3 only, Firefox <=3 - 44 + reliableMarginLeftVal = roundPixelMeasures( divStyle.marginLeft ) === 12; + + // Support: Android 4.0 - 4.3 only, Safari <=9.1 - 10.1, iOS <=7.0 - 9.3 + // Some styles come back with percentage values, even though they shouldn't + div.style.right = "60%"; + pixelBoxStylesVal = roundPixelMeasures( divStyle.right ) === 36; + + // Support: IE 9 - 11 only + // Detect misreporting of content dimensions for box-sizing:border-box elements + boxSizingReliableVal = roundPixelMeasures( divStyle.width ) === 36; + + // Support: IE 9 only + // Detect overflow:scroll screwiness (gh-3699) + // Support: Chrome <=64 + // Don't get tricked when zoom affects offsetWidth (gh-4029) + div.style.position = "absolute"; + scrollboxSizeVal = roundPixelMeasures( div.offsetWidth / 3 ) === 12; + + documentElement.removeChild( container ); + + // Nullify the div so it wouldn't be stored in the memory and + // it will also be a sign that checks already performed + div = null; + } + + function roundPixelMeasures( measure ) { + return Math.round( parseFloat( measure ) ); + } + + var pixelPositionVal, boxSizingReliableVal, scrollboxSizeVal, pixelBoxStylesVal, + reliableTrDimensionsVal, reliableMarginLeftVal, + container = document.createElement( "div" ), + div = document.createElement( "div" ); + + // Finish early in limited (non-browser) environments + if ( !div.style ) { + return; + } + + // Support: IE <=9 - 11 only + // Style of cloned element affects source element cloned (#8908) + div.style.backgroundClip = "content-box"; + div.cloneNode( true ).style.backgroundClip = ""; + support.clearCloneStyle = div.style.backgroundClip === "content-box"; + + jQuery.extend( support, { + boxSizingReliable: function() { + computeStyleTests(); + return boxSizingReliableVal; + }, + pixelBoxStyles: function() { + computeStyleTests(); + return pixelBoxStylesVal; + }, + pixelPosition: function() { + computeStyleTests(); + return pixelPositionVal; + }, + reliableMarginLeft: function() { + computeStyleTests(); + return reliableMarginLeftVal; + }, + scrollboxSize: function() { + computeStyleTests(); + return scrollboxSizeVal; + }, + + // Support: IE 9 - 11+, Edge 15 - 18+ + // IE/Edge misreport `getComputedStyle` of table rows with width/height + // set in CSS while `offset*` properties report correct values. + // Behavior in IE 9 is more subtle than in newer versions & it passes + // some versions of this test; make sure not to make it pass there! + // + // Support: Firefox 70+ + // Only Firefox includes border widths + // in computed dimensions. (gh-4529) + reliableTrDimensions: function() { + var table, tr, trChild, trStyle; + if ( reliableTrDimensionsVal == null ) { + table = document.createElement( "table" ); + tr = document.createElement( "tr" ); + trChild = document.createElement( "div" ); + + table.style.cssText = "position:absolute;left:-11111px;border-collapse:separate"; + tr.style.cssText = "border:1px solid"; + + // Support: Chrome 86+ + // Height set through cssText does not get applied. + // Computed height then comes back as 0. + tr.style.height = "1px"; + trChild.style.height = "9px"; + + // Support: Android 8 Chrome 86+ + // In our bodyBackground.html iframe, + // display for all div elements is set to "inline", + // which causes a problem only in Android 8 Chrome 86. + // Ensuring the div is display: block + // gets around this issue. + trChild.style.display = "block"; + + documentElement + .appendChild( table ) + .appendChild( tr ) + .appendChild( trChild ); + + trStyle = window.getComputedStyle( tr ); + reliableTrDimensionsVal = ( parseInt( trStyle.height, 10 ) + + parseInt( trStyle.borderTopWidth, 10 ) + + parseInt( trStyle.borderBottomWidth, 10 ) ) === tr.offsetHeight; + + documentElement.removeChild( table ); + } + return reliableTrDimensionsVal; + } + } ); +} )(); + + +function curCSS( elem, name, computed ) { + var width, minWidth, maxWidth, ret, + + // Support: Firefox 51+ + // Retrieving style before computed somehow + // fixes an issue with getting wrong values + // on detached elements + style = elem.style; + + computed = computed || getStyles( elem ); + + // getPropertyValue is needed for: + // .css('filter') (IE 9 only, #12537) + // .css('--customProperty) (#3144) + if ( computed ) { + ret = computed.getPropertyValue( name ) || computed[ name ]; + + if ( ret === "" && !isAttached( elem ) ) { + ret = jQuery.style( elem, name ); + } + + // A tribute to the "awesome hack by Dean Edwards" + // Android Browser returns percentage for some values, + // but width seems to be reliably pixels. + // This is against the CSSOM draft spec: + // https://drafts.csswg.org/cssom/#resolved-values + if ( !support.pixelBoxStyles() && rnumnonpx.test( ret ) && rboxStyle.test( name ) ) { + + // Remember the original values + width = style.width; + minWidth = style.minWidth; + maxWidth = style.maxWidth; + + // Put in the new values to get a computed value out + style.minWidth = style.maxWidth = style.width = ret; + ret = computed.width; + + // Revert the changed values + style.width = width; + style.minWidth = minWidth; + style.maxWidth = maxWidth; + } + } + + return ret !== undefined ? + + // Support: IE <=9 - 11 only + // IE returns zIndex value as an integer. + ret + "" : + ret; +} + + +function addGetHookIf( conditionFn, hookFn ) { + + // Define the hook, we'll check on the first run if it's really needed. + return { + get: function() { + if ( conditionFn() ) { + + // Hook not needed (or it's not possible to use it due + // to missing dependency), remove it. + delete this.get; + return; + } + + // Hook needed; redefine it so that the support test is not executed again. + return ( this.get = hookFn ).apply( this, arguments ); + } + }; +} + + +var cssPrefixes = [ "Webkit", "Moz", "ms" ], + emptyStyle = document.createElement( "div" ).style, + vendorProps = {}; + +// Return a vendor-prefixed property or undefined +function vendorPropName( name ) { + + // Check for vendor prefixed names + var capName = name[ 0 ].toUpperCase() + name.slice( 1 ), + i = cssPrefixes.length; + + while ( i-- ) { + name = cssPrefixes[ i ] + capName; + if ( name in emptyStyle ) { + return name; + } + } +} + +// Return a potentially-mapped jQuery.cssProps or vendor prefixed property +function finalPropName( name ) { + var final = jQuery.cssProps[ name ] || vendorProps[ name ]; + + if ( final ) { + return final; + } + if ( name in emptyStyle ) { + return name; + } + return vendorProps[ name ] = vendorPropName( name ) || name; +} + + +var + + // Swappable if display is none or starts with table + // except "table", "table-cell", or "table-caption" + // See here for display values: https://developer.mozilla.org/en-US/docs/CSS/display + rdisplayswap = /^(none|table(?!-c[ea]).+)/, + rcustomProp = /^--/, + cssShow = { position: "absolute", visibility: "hidden", display: "block" }, + cssNormalTransform = { + letterSpacing: "0", + fontWeight: "400" + }; + +function setPositiveNumber( _elem, value, subtract ) { + + // Any relative (+/-) values have already been + // normalized at this point + var matches = rcssNum.exec( value ); + return matches ? + + // Guard against undefined "subtract", e.g., when used as in cssHooks + Math.max( 0, matches[ 2 ] - ( subtract || 0 ) ) + ( matches[ 3 ] || "px" ) : + value; +} + +function boxModelAdjustment( elem, dimension, box, isBorderBox, styles, computedVal ) { + var i = dimension === "width" ? 1 : 0, + extra = 0, + delta = 0; + + // Adjustment may not be necessary + if ( box === ( isBorderBox ? "border" : "content" ) ) { + return 0; + } + + for ( ; i < 4; i += 2 ) { + + // Both box models exclude margin + if ( box === "margin" ) { + delta += jQuery.css( elem, box + cssExpand[ i ], true, styles ); + } + + // If we get here with a content-box, we're seeking "padding" or "border" or "margin" + if ( !isBorderBox ) { + + // Add padding + delta += jQuery.css( elem, "padding" + cssExpand[ i ], true, styles ); + + // For "border" or "margin", add border + if ( box !== "padding" ) { + delta += jQuery.css( elem, "border" + cssExpand[ i ] + "Width", true, styles ); + + // But still keep track of it otherwise + } else { + extra += jQuery.css( elem, "border" + cssExpand[ i ] + "Width", true, styles ); + } + + // If we get here with a border-box (content + padding + border), we're seeking "content" or + // "padding" or "margin" + } else { + + // For "content", subtract padding + if ( box === "content" ) { + delta -= jQuery.css( elem, "padding" + cssExpand[ i ], true, styles ); + } + + // For "content" or "padding", subtract border + if ( box !== "margin" ) { + delta -= jQuery.css( elem, "border" + cssExpand[ i ] + "Width", true, styles ); + } + } + } + + // Account for positive content-box scroll gutter when requested by providing computedVal + if ( !isBorderBox && computedVal >= 0 ) { + + // offsetWidth/offsetHeight is a rounded sum of content, padding, scroll gutter, and border + // Assuming integer scroll gutter, subtract the rest and round down + delta += Math.max( 0, Math.ceil( + elem[ "offset" + dimension[ 0 ].toUpperCase() + dimension.slice( 1 ) ] - + computedVal - + delta - + extra - + 0.5 + + // If offsetWidth/offsetHeight is unknown, then we can't determine content-box scroll gutter + // Use an explicit zero to avoid NaN (gh-3964) + ) ) || 0; + } + + return delta; +} + +function getWidthOrHeight( elem, dimension, extra ) { + + // Start with computed style + var styles = getStyles( elem ), + + // To avoid forcing a reflow, only fetch boxSizing if we need it (gh-4322). + // Fake content-box until we know it's needed to know the true value. + boxSizingNeeded = !support.boxSizingReliable() || extra, + isBorderBox = boxSizingNeeded && + jQuery.css( elem, "boxSizing", false, styles ) === "border-box", + valueIsBorderBox = isBorderBox, + + val = curCSS( elem, dimension, styles ), + offsetProp = "offset" + dimension[ 0 ].toUpperCase() + dimension.slice( 1 ); + + // Support: Firefox <=54 + // Return a confounding non-pixel value or feign ignorance, as appropriate. + if ( rnumnonpx.test( val ) ) { + if ( !extra ) { + return val; + } + val = "auto"; + } + + + // Support: IE 9 - 11 only + // Use offsetWidth/offsetHeight for when box sizing is unreliable. + // In those cases, the computed value can be trusted to be border-box. + if ( ( !support.boxSizingReliable() && isBorderBox || + + // Support: IE 10 - 11+, Edge 15 - 18+ + // IE/Edge misreport `getComputedStyle` of table rows with width/height + // set in CSS while `offset*` properties report correct values. + // Interestingly, in some cases IE 9 doesn't suffer from this issue. + !support.reliableTrDimensions() && nodeName( elem, "tr" ) || + + // Fall back to offsetWidth/offsetHeight when value is "auto" + // This happens for inline elements with no explicit setting (gh-3571) + val === "auto" || + + // Support: Android <=4.1 - 4.3 only + // Also use offsetWidth/offsetHeight for misreported inline dimensions (gh-3602) + !parseFloat( val ) && jQuery.css( elem, "display", false, styles ) === "inline" ) && + + // Make sure the element is visible & connected + elem.getClientRects().length ) { + + isBorderBox = jQuery.css( elem, "boxSizing", false, styles ) === "border-box"; + + // Where available, offsetWidth/offsetHeight approximate border box dimensions. + // Where not available (e.g., SVG), assume unreliable box-sizing and interpret the + // retrieved value as a content box dimension. + valueIsBorderBox = offsetProp in elem; + if ( valueIsBorderBox ) { + val = elem[ offsetProp ]; + } + } + + // Normalize "" and auto + val = parseFloat( val ) || 0; + + // Adjust for the element's box model + return ( val + + boxModelAdjustment( + elem, + dimension, + extra || ( isBorderBox ? "border" : "content" ), + valueIsBorderBox, + styles, + + // Provide the current computed size to request scroll gutter calculation (gh-3589) + val + ) + ) + "px"; +} + +jQuery.extend( { + + // Add in style property hooks for overriding the default + // behavior of getting and setting a style property + cssHooks: { + opacity: { + get: function( elem, computed ) { + if ( computed ) { + + // We should always get a number back from opacity + var ret = curCSS( elem, "opacity" ); + return ret === "" ? "1" : ret; + } + } + } + }, + + // Don't automatically add "px" to these possibly-unitless properties + cssNumber: { + "animationIterationCount": true, + "columnCount": true, + "fillOpacity": true, + "flexGrow": true, + "flexShrink": true, + "fontWeight": true, + "gridArea": true, + "gridColumn": true, + "gridColumnEnd": true, + "gridColumnStart": true, + "gridRow": true, + "gridRowEnd": true, + "gridRowStart": true, + "lineHeight": true, + "opacity": true, + "order": true, + "orphans": true, + "widows": true, + "zIndex": true, + "zoom": true + }, + + // Add in properties whose names you wish to fix before + // setting or getting the value + cssProps: {}, + + // Get and set the style property on a DOM Node + style: function( elem, name, value, extra ) { + + // Don't set styles on text and comment nodes + if ( !elem || elem.nodeType === 3 || elem.nodeType === 8 || !elem.style ) { + return; + } + + // Make sure that we're working with the right name + var ret, type, hooks, + origName = camelCase( name ), + isCustomProp = rcustomProp.test( name ), + style = elem.style; + + // Make sure that we're working with the right name. We don't + // want to query the value if it is a CSS custom property + // since they are user-defined. + if ( !isCustomProp ) { + name = finalPropName( origName ); + } + + // Gets hook for the prefixed version, then unprefixed version + hooks = jQuery.cssHooks[ name ] || jQuery.cssHooks[ origName ]; + + // Check if we're setting a value + if ( value !== undefined ) { + type = typeof value; + + // Convert "+=" or "-=" to relative numbers (#7345) + if ( type === "string" && ( ret = rcssNum.exec( value ) ) && ret[ 1 ] ) { + value = adjustCSS( elem, name, ret ); + + // Fixes bug #9237 + type = "number"; + } + + // Make sure that null and NaN values aren't set (#7116) + if ( value == null || value !== value ) { + return; + } + + // If a number was passed in, add the unit (except for certain CSS properties) + // The isCustomProp check can be removed in jQuery 4.0 when we only auto-append + // "px" to a few hardcoded values. + if ( type === "number" && !isCustomProp ) { + value += ret && ret[ 3 ] || ( jQuery.cssNumber[ origName ] ? "" : "px" ); + } + + // background-* props affect original clone's values + if ( !support.clearCloneStyle && value === "" && name.indexOf( "background" ) === 0 ) { + style[ name ] = "inherit"; + } + + // If a hook was provided, use that value, otherwise just set the specified value + if ( !hooks || !( "set" in hooks ) || + ( value = hooks.set( elem, value, extra ) ) !== undefined ) { + + if ( isCustomProp ) { + style.setProperty( name, value ); + } else { + style[ name ] = value; + } + } + + } else { + + // If a hook was provided get the non-computed value from there + if ( hooks && "get" in hooks && + ( ret = hooks.get( elem, false, extra ) ) !== undefined ) { + + return ret; + } + + // Otherwise just get the value from the style object + return style[ name ]; + } + }, + + css: function( elem, name, extra, styles ) { + var val, num, hooks, + origName = camelCase( name ), + isCustomProp = rcustomProp.test( name ); + + // Make sure that we're working with the right name. We don't + // want to modify the value if it is a CSS custom property + // since they are user-defined. + if ( !isCustomProp ) { + name = finalPropName( origName ); + } + + // Try prefixed name followed by the unprefixed name + hooks = jQuery.cssHooks[ name ] || jQuery.cssHooks[ origName ]; + + // If a hook was provided get the computed value from there + if ( hooks && "get" in hooks ) { + val = hooks.get( elem, true, extra ); + } + + // Otherwise, if a way to get the computed value exists, use that + if ( val === undefined ) { + val = curCSS( elem, name, styles ); + } + + // Convert "normal" to computed value + if ( val === "normal" && name in cssNormalTransform ) { + val = cssNormalTransform[ name ]; + } + + // Make numeric if forced or a qualifier was provided and val looks numeric + if ( extra === "" || extra ) { + num = parseFloat( val ); + return extra === true || isFinite( num ) ? num || 0 : val; + } + + return val; + } +} ); + +jQuery.each( [ "height", "width" ], function( _i, dimension ) { + jQuery.cssHooks[ dimension ] = { + get: function( elem, computed, extra ) { + if ( computed ) { + + // Certain elements can have dimension info if we invisibly show them + // but it must have a current display style that would benefit + return rdisplayswap.test( jQuery.css( elem, "display" ) ) && + + // Support: Safari 8+ + // Table columns in Safari have non-zero offsetWidth & zero + // getBoundingClientRect().width unless display is changed. + // Support: IE <=11 only + // Running getBoundingClientRect on a disconnected node + // in IE throws an error. + ( !elem.getClientRects().length || !elem.getBoundingClientRect().width ) ? + swap( elem, cssShow, function() { + return getWidthOrHeight( elem, dimension, extra ); + } ) : + getWidthOrHeight( elem, dimension, extra ); + } + }, + + set: function( elem, value, extra ) { + var matches, + styles = getStyles( elem ), + + // Only read styles.position if the test has a chance to fail + // to avoid forcing a reflow. + scrollboxSizeBuggy = !support.scrollboxSize() && + styles.position === "absolute", + + // To avoid forcing a reflow, only fetch boxSizing if we need it (gh-3991) + boxSizingNeeded = scrollboxSizeBuggy || extra, + isBorderBox = boxSizingNeeded && + jQuery.css( elem, "boxSizing", false, styles ) === "border-box", + subtract = extra ? + boxModelAdjustment( + elem, + dimension, + extra, + isBorderBox, + styles + ) : + 0; + + // Account for unreliable border-box dimensions by comparing offset* to computed and + // faking a content-box to get border and padding (gh-3699) + if ( isBorderBox && scrollboxSizeBuggy ) { + subtract -= Math.ceil( + elem[ "offset" + dimension[ 0 ].toUpperCase() + dimension.slice( 1 ) ] - + parseFloat( styles[ dimension ] ) - + boxModelAdjustment( elem, dimension, "border", false, styles ) - + 0.5 + ); + } + + // Convert to pixels if value adjustment is needed + if ( subtract && ( matches = rcssNum.exec( value ) ) && + ( matches[ 3 ] || "px" ) !== "px" ) { + + elem.style[ dimension ] = value; + value = jQuery.css( elem, dimension ); + } + + return setPositiveNumber( elem, value, subtract ); + } + }; +} ); + +jQuery.cssHooks.marginLeft = addGetHookIf( support.reliableMarginLeft, + function( elem, computed ) { + if ( computed ) { + return ( parseFloat( curCSS( elem, "marginLeft" ) ) || + elem.getBoundingClientRect().left - + swap( elem, { marginLeft: 0 }, function() { + return elem.getBoundingClientRect().left; + } ) + ) + "px"; + } + } +); + +// These hooks are used by animate to expand properties +jQuery.each( { + margin: "", + padding: "", + border: "Width" +}, function( prefix, suffix ) { + jQuery.cssHooks[ prefix + suffix ] = { + expand: function( value ) { + var i = 0, + expanded = {}, + + // Assumes a single number if not a string + parts = typeof value === "string" ? value.split( " " ) : [ value ]; + + for ( ; i < 4; i++ ) { + expanded[ prefix + cssExpand[ i ] + suffix ] = + parts[ i ] || parts[ i - 2 ] || parts[ 0 ]; + } + + return expanded; + } + }; + + if ( prefix !== "margin" ) { + jQuery.cssHooks[ prefix + suffix ].set = setPositiveNumber; + } +} ); + +jQuery.fn.extend( { + css: function( name, value ) { + return access( this, function( elem, name, value ) { + var styles, len, + map = {}, + i = 0; + + if ( Array.isArray( name ) ) { + styles = getStyles( elem ); + len = name.length; + + for ( ; i < len; i++ ) { + map[ name[ i ] ] = jQuery.css( elem, name[ i ], false, styles ); + } + + return map; + } + + return value !== undefined ? + jQuery.style( elem, name, value ) : + jQuery.css( elem, name ); + }, name, value, arguments.length > 1 ); + } +} ); + + +function Tween( elem, options, prop, end, easing ) { + return new Tween.prototype.init( elem, options, prop, end, easing ); +} +jQuery.Tween = Tween; + +Tween.prototype = { + constructor: Tween, + init: function( elem, options, prop, end, easing, unit ) { + this.elem = elem; + this.prop = prop; + this.easing = easing || jQuery.easing._default; + this.options = options; + this.start = this.now = this.cur(); + this.end = end; + this.unit = unit || ( jQuery.cssNumber[ prop ] ? "" : "px" ); + }, + cur: function() { + var hooks = Tween.propHooks[ this.prop ]; + + return hooks && hooks.get ? + hooks.get( this ) : + Tween.propHooks._default.get( this ); + }, + run: function( percent ) { + var eased, + hooks = Tween.propHooks[ this.prop ]; + + if ( this.options.duration ) { + this.pos = eased = jQuery.easing[ this.easing ]( + percent, this.options.duration * percent, 0, 1, this.options.duration + ); + } else { + this.pos = eased = percent; + } + this.now = ( this.end - this.start ) * eased + this.start; + + if ( this.options.step ) { + this.options.step.call( this.elem, this.now, this ); + } + + if ( hooks && hooks.set ) { + hooks.set( this ); + } else { + Tween.propHooks._default.set( this ); + } + return this; + } +}; + +Tween.prototype.init.prototype = Tween.prototype; + +Tween.propHooks = { + _default: { + get: function( tween ) { + var result; + + // Use a property on the element directly when it is not a DOM element, + // or when there is no matching style property that exists. + if ( tween.elem.nodeType !== 1 || + tween.elem[ tween.prop ] != null && tween.elem.style[ tween.prop ] == null ) { + return tween.elem[ tween.prop ]; + } + + // Passing an empty string as a 3rd parameter to .css will automatically + // attempt a parseFloat and fallback to a string if the parse fails. + // Simple values such as "10px" are parsed to Float; + // complex values such as "rotate(1rad)" are returned as-is. + result = jQuery.css( tween.elem, tween.prop, "" ); + + // Empty strings, null, undefined and "auto" are converted to 0. + return !result || result === "auto" ? 0 : result; + }, + set: function( tween ) { + + // Use step hook for back compat. + // Use cssHook if its there. + // Use .style if available and use plain properties where available. + if ( jQuery.fx.step[ tween.prop ] ) { + jQuery.fx.step[ tween.prop ]( tween ); + } else if ( tween.elem.nodeType === 1 && ( + jQuery.cssHooks[ tween.prop ] || + tween.elem.style[ finalPropName( tween.prop ) ] != null ) ) { + jQuery.style( tween.elem, tween.prop, tween.now + tween.unit ); + } else { + tween.elem[ tween.prop ] = tween.now; + } + } + } +}; + +// Support: IE <=9 only +// Panic based approach to setting things on disconnected nodes +Tween.propHooks.scrollTop = Tween.propHooks.scrollLeft = { + set: function( tween ) { + if ( tween.elem.nodeType && tween.elem.parentNode ) { + tween.elem[ tween.prop ] = tween.now; + } + } +}; + +jQuery.easing = { + linear: function( p ) { + return p; + }, + swing: function( p ) { + return 0.5 - Math.cos( p * Math.PI ) / 2; + }, + _default: "swing" +}; + +jQuery.fx = Tween.prototype.init; + +// Back compat <1.8 extension point +jQuery.fx.step = {}; + + + + +var + fxNow, inProgress, + rfxtypes = /^(?:toggle|show|hide)$/, + rrun = /queueHooks$/; + +function schedule() { + if ( inProgress ) { + if ( document.hidden === false && window.requestAnimationFrame ) { + window.requestAnimationFrame( schedule ); + } else { + window.setTimeout( schedule, jQuery.fx.interval ); + } + + jQuery.fx.tick(); + } +} + +// Animations created synchronously will run synchronously +function createFxNow() { + window.setTimeout( function() { + fxNow = undefined; + } ); + return ( fxNow = Date.now() ); +} + +// Generate parameters to create a standard animation +function genFx( type, includeWidth ) { + var which, + i = 0, + attrs = { height: type }; + + // If we include width, step value is 1 to do all cssExpand values, + // otherwise step value is 2 to skip over Left and Right + includeWidth = includeWidth ? 1 : 0; + for ( ; i < 4; i += 2 - includeWidth ) { + which = cssExpand[ i ]; + attrs[ "margin" + which ] = attrs[ "padding" + which ] = type; + } + + if ( includeWidth ) { + attrs.opacity = attrs.width = type; + } + + return attrs; +} + +function createTween( value, prop, animation ) { + var tween, + collection = ( Animation.tweeners[ prop ] || [] ).concat( Animation.tweeners[ "*" ] ), + index = 0, + length = collection.length; + for ( ; index < length; index++ ) { + if ( ( tween = collection[ index ].call( animation, prop, value ) ) ) { + + // We're done with this property + return tween; + } + } +} + +function defaultPrefilter( elem, props, opts ) { + var prop, value, toggle, hooks, oldfire, propTween, restoreDisplay, display, + isBox = "width" in props || "height" in props, + anim = this, + orig = {}, + style = elem.style, + hidden = elem.nodeType && isHiddenWithinTree( elem ), + dataShow = dataPriv.get( elem, "fxshow" ); + + // Queue-skipping animations hijack the fx hooks + if ( !opts.queue ) { + hooks = jQuery._queueHooks( elem, "fx" ); + if ( hooks.unqueued == null ) { + hooks.unqueued = 0; + oldfire = hooks.empty.fire; + hooks.empty.fire = function() { + if ( !hooks.unqueued ) { + oldfire(); + } + }; + } + hooks.unqueued++; + + anim.always( function() { + + // Ensure the complete handler is called before this completes + anim.always( function() { + hooks.unqueued--; + if ( !jQuery.queue( elem, "fx" ).length ) { + hooks.empty.fire(); + } + } ); + } ); + } + + // Detect show/hide animations + for ( prop in props ) { + value = props[ prop ]; + if ( rfxtypes.test( value ) ) { + delete props[ prop ]; + toggle = toggle || value === "toggle"; + if ( value === ( hidden ? "hide" : "show" ) ) { + + // Pretend to be hidden if this is a "show" and + // there is still data from a stopped show/hide + if ( value === "show" && dataShow && dataShow[ prop ] !== undefined ) { + hidden = true; + + // Ignore all other no-op show/hide data + } else { + continue; + } + } + orig[ prop ] = dataShow && dataShow[ prop ] || jQuery.style( elem, prop ); + } + } + + // Bail out if this is a no-op like .hide().hide() + propTween = !jQuery.isEmptyObject( props ); + if ( !propTween && jQuery.isEmptyObject( orig ) ) { + return; + } + + // Restrict "overflow" and "display" styles during box animations + if ( isBox && elem.nodeType === 1 ) { + + // Support: IE <=9 - 11, Edge 12 - 15 + // Record all 3 overflow attributes because IE does not infer the shorthand + // from identically-valued overflowX and overflowY and Edge just mirrors + // the overflowX value there. + opts.overflow = [ style.overflow, style.overflowX, style.overflowY ]; + + // Identify a display type, preferring old show/hide data over the CSS cascade + restoreDisplay = dataShow && dataShow.display; + if ( restoreDisplay == null ) { + restoreDisplay = dataPriv.get( elem, "display" ); + } + display = jQuery.css( elem, "display" ); + if ( display === "none" ) { + if ( restoreDisplay ) { + display = restoreDisplay; + } else { + + // Get nonempty value(s) by temporarily forcing visibility + showHide( [ elem ], true ); + restoreDisplay = elem.style.display || restoreDisplay; + display = jQuery.css( elem, "display" ); + showHide( [ elem ] ); + } + } + + // Animate inline elements as inline-block + if ( display === "inline" || display === "inline-block" && restoreDisplay != null ) { + if ( jQuery.css( elem, "float" ) === "none" ) { + + // Restore the original display value at the end of pure show/hide animations + if ( !propTween ) { + anim.done( function() { + style.display = restoreDisplay; + } ); + if ( restoreDisplay == null ) { + display = style.display; + restoreDisplay = display === "none" ? "" : display; + } + } + style.display = "inline-block"; + } + } + } + + if ( opts.overflow ) { + style.overflow = "hidden"; + anim.always( function() { + style.overflow = opts.overflow[ 0 ]; + style.overflowX = opts.overflow[ 1 ]; + style.overflowY = opts.overflow[ 2 ]; + } ); + } + + // Implement show/hide animations + propTween = false; + for ( prop in orig ) { + + // General show/hide setup for this element animation + if ( !propTween ) { + if ( dataShow ) { + if ( "hidden" in dataShow ) { + hidden = dataShow.hidden; + } + } else { + dataShow = dataPriv.access( elem, "fxshow", { display: restoreDisplay } ); + } + + // Store hidden/visible for toggle so `.stop().toggle()` "reverses" + if ( toggle ) { + dataShow.hidden = !hidden; + } + + // Show elements before animating them + if ( hidden ) { + showHide( [ elem ], true ); + } + + /* eslint-disable no-loop-func */ + + anim.done( function() { + + /* eslint-enable no-loop-func */ + + // The final step of a "hide" animation is actually hiding the element + if ( !hidden ) { + showHide( [ elem ] ); + } + dataPriv.remove( elem, "fxshow" ); + for ( prop in orig ) { + jQuery.style( elem, prop, orig[ prop ] ); + } + } ); + } + + // Per-property setup + propTween = createTween( hidden ? dataShow[ prop ] : 0, prop, anim ); + if ( !( prop in dataShow ) ) { + dataShow[ prop ] = propTween.start; + if ( hidden ) { + propTween.end = propTween.start; + propTween.start = 0; + } + } + } +} + +function propFilter( props, specialEasing ) { + var index, name, easing, value, hooks; + + // camelCase, specialEasing and expand cssHook pass + for ( index in props ) { + name = camelCase( index ); + easing = specialEasing[ name ]; + value = props[ index ]; + if ( Array.isArray( value ) ) { + easing = value[ 1 ]; + value = props[ index ] = value[ 0 ]; + } + + if ( index !== name ) { + props[ name ] = value; + delete props[ index ]; + } + + hooks = jQuery.cssHooks[ name ]; + if ( hooks && "expand" in hooks ) { + value = hooks.expand( value ); + delete props[ name ]; + + // Not quite $.extend, this won't overwrite existing keys. + // Reusing 'index' because we have the correct "name" + for ( index in value ) { + if ( !( index in props ) ) { + props[ index ] = value[ index ]; + specialEasing[ index ] = easing; + } + } + } else { + specialEasing[ name ] = easing; + } + } +} + +function Animation( elem, properties, options ) { + var result, + stopped, + index = 0, + length = Animation.prefilters.length, + deferred = jQuery.Deferred().always( function() { + + // Don't match elem in the :animated selector + delete tick.elem; + } ), + tick = function() { + if ( stopped ) { + return false; + } + var currentTime = fxNow || createFxNow(), + remaining = Math.max( 0, animation.startTime + animation.duration - currentTime ), + + // Support: Android 2.3 only + // Archaic crash bug won't allow us to use `1 - ( 0.5 || 0 )` (#12497) + temp = remaining / animation.duration || 0, + percent = 1 - temp, + index = 0, + length = animation.tweens.length; + + for ( ; index < length; index++ ) { + animation.tweens[ index ].run( percent ); + } + + deferred.notifyWith( elem, [ animation, percent, remaining ] ); + + // If there's more to do, yield + if ( percent < 1 && length ) { + return remaining; + } + + // If this was an empty animation, synthesize a final progress notification + if ( !length ) { + deferred.notifyWith( elem, [ animation, 1, 0 ] ); + } + + // Resolve the animation and report its conclusion + deferred.resolveWith( elem, [ animation ] ); + return false; + }, + animation = deferred.promise( { + elem: elem, + props: jQuery.extend( {}, properties ), + opts: jQuery.extend( true, { + specialEasing: {}, + easing: jQuery.easing._default + }, options ), + originalProperties: properties, + originalOptions: options, + startTime: fxNow || createFxNow(), + duration: options.duration, + tweens: [], + createTween: function( prop, end ) { + var tween = jQuery.Tween( elem, animation.opts, prop, end, + animation.opts.specialEasing[ prop ] || animation.opts.easing ); + animation.tweens.push( tween ); + return tween; + }, + stop: function( gotoEnd ) { + var index = 0, + + // If we are going to the end, we want to run all the tweens + // otherwise we skip this part + length = gotoEnd ? animation.tweens.length : 0; + if ( stopped ) { + return this; + } + stopped = true; + for ( ; index < length; index++ ) { + animation.tweens[ index ].run( 1 ); + } + + // Resolve when we played the last frame; otherwise, reject + if ( gotoEnd ) { + deferred.notifyWith( elem, [ animation, 1, 0 ] ); + deferred.resolveWith( elem, [ animation, gotoEnd ] ); + } else { + deferred.rejectWith( elem, [ animation, gotoEnd ] ); + } + return this; + } + } ), + props = animation.props; + + propFilter( props, animation.opts.specialEasing ); + + for ( ; index < length; index++ ) { + result = Animation.prefilters[ index ].call( animation, elem, props, animation.opts ); + if ( result ) { + if ( isFunction( result.stop ) ) { + jQuery._queueHooks( animation.elem, animation.opts.queue ).stop = + result.stop.bind( result ); + } + return result; + } + } + + jQuery.map( props, createTween, animation ); + + if ( isFunction( animation.opts.start ) ) { + animation.opts.start.call( elem, animation ); + } + + // Attach callbacks from options + animation + .progress( animation.opts.progress ) + .done( animation.opts.done, animation.opts.complete ) + .fail( animation.opts.fail ) + .always( animation.opts.always ); + + jQuery.fx.timer( + jQuery.extend( tick, { + elem: elem, + anim: animation, + queue: animation.opts.queue + } ) + ); + + return animation; +} + +jQuery.Animation = jQuery.extend( Animation, { + + tweeners: { + "*": [ function( prop, value ) { + var tween = this.createTween( prop, value ); + adjustCSS( tween.elem, prop, rcssNum.exec( value ), tween ); + return tween; + } ] + }, + + tweener: function( props, callback ) { + if ( isFunction( props ) ) { + callback = props; + props = [ "*" ]; + } else { + props = props.match( rnothtmlwhite ); + } + + var prop, + index = 0, + length = props.length; + + for ( ; index < length; index++ ) { + prop = props[ index ]; + Animation.tweeners[ prop ] = Animation.tweeners[ prop ] || []; + Animation.tweeners[ prop ].unshift( callback ); + } + }, + + prefilters: [ defaultPrefilter ], + + prefilter: function( callback, prepend ) { + if ( prepend ) { + Animation.prefilters.unshift( callback ); + } else { + Animation.prefilters.push( callback ); + } + } +} ); + +jQuery.speed = function( speed, easing, fn ) { + var opt = speed && typeof speed === "object" ? jQuery.extend( {}, speed ) : { + complete: fn || !fn && easing || + isFunction( speed ) && speed, + duration: speed, + easing: fn && easing || easing && !isFunction( easing ) && easing + }; + + // Go to the end state if fx are off + if ( jQuery.fx.off ) { + opt.duration = 0; + + } else { + if ( typeof opt.duration !== "number" ) { + if ( opt.duration in jQuery.fx.speeds ) { + opt.duration = jQuery.fx.speeds[ opt.duration ]; + + } else { + opt.duration = jQuery.fx.speeds._default; + } + } + } + + // Normalize opt.queue - true/undefined/null -> "fx" + if ( opt.queue == null || opt.queue === true ) { + opt.queue = "fx"; + } + + // Queueing + opt.old = opt.complete; + + opt.complete = function() { + if ( isFunction( opt.old ) ) { + opt.old.call( this ); + } + + if ( opt.queue ) { + jQuery.dequeue( this, opt.queue ); + } + }; + + return opt; +}; + +jQuery.fn.extend( { + fadeTo: function( speed, to, easing, callback ) { + + // Show any hidden elements after setting opacity to 0 + return this.filter( isHiddenWithinTree ).css( "opacity", 0 ).show() + + // Animate to the value specified + .end().animate( { opacity: to }, speed, easing, callback ); + }, + animate: function( prop, speed, easing, callback ) { + var empty = jQuery.isEmptyObject( prop ), + optall = jQuery.speed( speed, easing, callback ), + doAnimation = function() { + + // Operate on a copy of prop so per-property easing won't be lost + var anim = Animation( this, jQuery.extend( {}, prop ), optall ); + + // Empty animations, or finishing resolves immediately + if ( empty || dataPriv.get( this, "finish" ) ) { + anim.stop( true ); + } + }; + + doAnimation.finish = doAnimation; + + return empty || optall.queue === false ? + this.each( doAnimation ) : + this.queue( optall.queue, doAnimation ); + }, + stop: function( type, clearQueue, gotoEnd ) { + var stopQueue = function( hooks ) { + var stop = hooks.stop; + delete hooks.stop; + stop( gotoEnd ); + }; + + if ( typeof type !== "string" ) { + gotoEnd = clearQueue; + clearQueue = type; + type = undefined; + } + if ( clearQueue ) { + this.queue( type || "fx", [] ); + } + + return this.each( function() { + var dequeue = true, + index = type != null && type + "queueHooks", + timers = jQuery.timers, + data = dataPriv.get( this ); + + if ( index ) { + if ( data[ index ] && data[ index ].stop ) { + stopQueue( data[ index ] ); + } + } else { + for ( index in data ) { + if ( data[ index ] && data[ index ].stop && rrun.test( index ) ) { + stopQueue( data[ index ] ); + } + } + } + + for ( index = timers.length; index--; ) { + if ( timers[ index ].elem === this && + ( type == null || timers[ index ].queue === type ) ) { + + timers[ index ].anim.stop( gotoEnd ); + dequeue = false; + timers.splice( index, 1 ); + } + } + + // Start the next in the queue if the last step wasn't forced. + // Timers currently will call their complete callbacks, which + // will dequeue but only if they were gotoEnd. + if ( dequeue || !gotoEnd ) { + jQuery.dequeue( this, type ); + } + } ); + }, + finish: function( type ) { + if ( type !== false ) { + type = type || "fx"; + } + return this.each( function() { + var index, + data = dataPriv.get( this ), + queue = data[ type + "queue" ], + hooks = data[ type + "queueHooks" ], + timers = jQuery.timers, + length = queue ? queue.length : 0; + + // Enable finishing flag on private data + data.finish = true; + + // Empty the queue first + jQuery.queue( this, type, [] ); + + if ( hooks && hooks.stop ) { + hooks.stop.call( this, true ); + } + + // Look for any active animations, and finish them + for ( index = timers.length; index--; ) { + if ( timers[ index ].elem === this && timers[ index ].queue === type ) { + timers[ index ].anim.stop( true ); + timers.splice( index, 1 ); + } + } + + // Look for any animations in the old queue and finish them + for ( index = 0; index < length; index++ ) { + if ( queue[ index ] && queue[ index ].finish ) { + queue[ index ].finish.call( this ); + } + } + + // Turn off finishing flag + delete data.finish; + } ); + } +} ); + +jQuery.each( [ "toggle", "show", "hide" ], function( _i, name ) { + var cssFn = jQuery.fn[ name ]; + jQuery.fn[ name ] = function( speed, easing, callback ) { + return speed == null || typeof speed === "boolean" ? + cssFn.apply( this, arguments ) : + this.animate( genFx( name, true ), speed, easing, callback ); + }; +} ); + +// Generate shortcuts for custom animations +jQuery.each( { + slideDown: genFx( "show" ), + slideUp: genFx( "hide" ), + slideToggle: genFx( "toggle" ), + fadeIn: { opacity: "show" }, + fadeOut: { opacity: "hide" }, + fadeToggle: { opacity: "toggle" } +}, function( name, props ) { + jQuery.fn[ name ] = function( speed, easing, callback ) { + return this.animate( props, speed, easing, callback ); + }; +} ); + +jQuery.timers = []; +jQuery.fx.tick = function() { + var timer, + i = 0, + timers = jQuery.timers; + + fxNow = Date.now(); + + for ( ; i < timers.length; i++ ) { + timer = timers[ i ]; + + // Run the timer and safely remove it when done (allowing for external removal) + if ( !timer() && timers[ i ] === timer ) { + timers.splice( i--, 1 ); + } + } + + if ( !timers.length ) { + jQuery.fx.stop(); + } + fxNow = undefined; +}; + +jQuery.fx.timer = function( timer ) { + jQuery.timers.push( timer ); + jQuery.fx.start(); +}; + +jQuery.fx.interval = 13; +jQuery.fx.start = function() { + if ( inProgress ) { + return; + } + + inProgress = true; + schedule(); +}; + +jQuery.fx.stop = function() { + inProgress = null; +}; + +jQuery.fx.speeds = { + slow: 600, + fast: 200, + + // Default speed + _default: 400 +}; + + +// Based off of the plugin by Clint Helfers, with permission. +// https://web.archive.org/web/20100324014747/http://blindsignals.com/index.php/2009/07/jquery-delay/ +jQuery.fn.delay = function( time, type ) { + time = jQuery.fx ? jQuery.fx.speeds[ time ] || time : time; + type = type || "fx"; + + return this.queue( type, function( next, hooks ) { + var timeout = window.setTimeout( next, time ); + hooks.stop = function() { + window.clearTimeout( timeout ); + }; + } ); +}; + + +( function() { + var input = document.createElement( "input" ), + select = document.createElement( "select" ), + opt = select.appendChild( document.createElement( "option" ) ); + + input.type = "checkbox"; + + // Support: Android <=4.3 only + // Default value for a checkbox should be "on" + support.checkOn = input.value !== ""; + + // Support: IE <=11 only + // Must access selectedIndex to make default options select + support.optSelected = opt.selected; + + // Support: IE <=11 only + // An input loses its value after becoming a radio + input = document.createElement( "input" ); + input.value = "t"; + input.type = "radio"; + support.radioValue = input.value === "t"; +} )(); + + +var boolHook, + attrHandle = jQuery.expr.attrHandle; + +jQuery.fn.extend( { + attr: function( name, value ) { + return access( this, jQuery.attr, name, value, arguments.length > 1 ); + }, + + removeAttr: function( name ) { + return this.each( function() { + jQuery.removeAttr( this, name ); + } ); + } +} ); + +jQuery.extend( { + attr: function( elem, name, value ) { + var ret, hooks, + nType = elem.nodeType; + + // Don't get/set attributes on text, comment and attribute nodes + if ( nType === 3 || nType === 8 || nType === 2 ) { + return; + } + + // Fallback to prop when attributes are not supported + if ( typeof elem.getAttribute === "undefined" ) { + return jQuery.prop( elem, name, value ); + } + + // Attribute hooks are determined by the lowercase version + // Grab necessary hook if one is defined + if ( nType !== 1 || !jQuery.isXMLDoc( elem ) ) { + hooks = jQuery.attrHooks[ name.toLowerCase() ] || + ( jQuery.expr.match.bool.test( name ) ? boolHook : undefined ); + } + + if ( value !== undefined ) { + if ( value === null ) { + jQuery.removeAttr( elem, name ); + return; + } + + if ( hooks && "set" in hooks && + ( ret = hooks.set( elem, value, name ) ) !== undefined ) { + return ret; + } + + elem.setAttribute( name, value + "" ); + return value; + } + + if ( hooks && "get" in hooks && ( ret = hooks.get( elem, name ) ) !== null ) { + return ret; + } + + ret = jQuery.find.attr( elem, name ); + + // Non-existent attributes return null, we normalize to undefined + return ret == null ? undefined : ret; + }, + + attrHooks: { + type: { + set: function( elem, value ) { + if ( !support.radioValue && value === "radio" && + nodeName( elem, "input" ) ) { + var val = elem.value; + elem.setAttribute( "type", value ); + if ( val ) { + elem.value = val; + } + return value; + } + } + } + }, + + removeAttr: function( elem, value ) { + var name, + i = 0, + + // Attribute names can contain non-HTML whitespace characters + // https://html.spec.whatwg.org/multipage/syntax.html#attributes-2 + attrNames = value && value.match( rnothtmlwhite ); + + if ( attrNames && elem.nodeType === 1 ) { + while ( ( name = attrNames[ i++ ] ) ) { + elem.removeAttribute( name ); + } + } + } +} ); + +// Hooks for boolean attributes +boolHook = { + set: function( elem, value, name ) { + if ( value === false ) { + + // Remove boolean attributes when set to false + jQuery.removeAttr( elem, name ); + } else { + elem.setAttribute( name, name ); + } + return name; + } +}; + +jQuery.each( jQuery.expr.match.bool.source.match( /\w+/g ), function( _i, name ) { + var getter = attrHandle[ name ] || jQuery.find.attr; + + attrHandle[ name ] = function( elem, name, isXML ) { + var ret, handle, + lowercaseName = name.toLowerCase(); + + if ( !isXML ) { + + // Avoid an infinite loop by temporarily removing this function from the getter + handle = attrHandle[ lowercaseName ]; + attrHandle[ lowercaseName ] = ret; + ret = getter( elem, name, isXML ) != null ? + lowercaseName : + null; + attrHandle[ lowercaseName ] = handle; + } + return ret; + }; +} ); + + + + +var rfocusable = /^(?:input|select|textarea|button)$/i, + rclickable = /^(?:a|area)$/i; + +jQuery.fn.extend( { + prop: function( name, value ) { + return access( this, jQuery.prop, name, value, arguments.length > 1 ); + }, + + removeProp: function( name ) { + return this.each( function() { + delete this[ jQuery.propFix[ name ] || name ]; + } ); + } +} ); + +jQuery.extend( { + prop: function( elem, name, value ) { + var ret, hooks, + nType = elem.nodeType; + + // Don't get/set properties on text, comment and attribute nodes + if ( nType === 3 || nType === 8 || nType === 2 ) { + return; + } + + if ( nType !== 1 || !jQuery.isXMLDoc( elem ) ) { + + // Fix name and attach hooks + name = jQuery.propFix[ name ] || name; + hooks = jQuery.propHooks[ name ]; + } + + if ( value !== undefined ) { + if ( hooks && "set" in hooks && + ( ret = hooks.set( elem, value, name ) ) !== undefined ) { + return ret; + } + + return ( elem[ name ] = value ); + } + + if ( hooks && "get" in hooks && ( ret = hooks.get( elem, name ) ) !== null ) { + return ret; + } + + return elem[ name ]; + }, + + propHooks: { + tabIndex: { + get: function( elem ) { + + // Support: IE <=9 - 11 only + // elem.tabIndex doesn't always return the + // correct value when it hasn't been explicitly set + // https://web.archive.org/web/20141116233347/http://fluidproject.org/blog/2008/01/09/getting-setting-and-removing-tabindex-values-with-javascript/ + // Use proper attribute retrieval(#12072) + var tabindex = jQuery.find.attr( elem, "tabindex" ); + + if ( tabindex ) { + return parseInt( tabindex, 10 ); + } + + if ( + rfocusable.test( elem.nodeName ) || + rclickable.test( elem.nodeName ) && + elem.href + ) { + return 0; + } + + return -1; + } + } + }, + + propFix: { + "for": "htmlFor", + "class": "className" + } +} ); + +// Support: IE <=11 only +// Accessing the selectedIndex property +// forces the browser to respect setting selected +// on the option +// The getter ensures a default option is selected +// when in an optgroup +// eslint rule "no-unused-expressions" is disabled for this code +// since it considers such accessions noop +if ( !support.optSelected ) { + jQuery.propHooks.selected = { + get: function( elem ) { + + /* eslint no-unused-expressions: "off" */ + + var parent = elem.parentNode; + if ( parent && parent.parentNode ) { + parent.parentNode.selectedIndex; + } + return null; + }, + set: function( elem ) { + + /* eslint no-unused-expressions: "off" */ + + var parent = elem.parentNode; + if ( parent ) { + parent.selectedIndex; + + if ( parent.parentNode ) { + parent.parentNode.selectedIndex; + } + } + } + }; +} + +jQuery.each( [ + "tabIndex", + "readOnly", + "maxLength", + "cellSpacing", + "cellPadding", + "rowSpan", + "colSpan", + "useMap", + "frameBorder", + "contentEditable" +], function() { + jQuery.propFix[ this.toLowerCase() ] = this; +} ); + + + + + // Strip and collapse whitespace according to HTML spec + // https://infra.spec.whatwg.org/#strip-and-collapse-ascii-whitespace + function stripAndCollapse( value ) { + var tokens = value.match( rnothtmlwhite ) || []; + return tokens.join( " " ); + } + + +function getClass( elem ) { + return elem.getAttribute && elem.getAttribute( "class" ) || ""; +} + +function classesToArray( value ) { + if ( Array.isArray( value ) ) { + return value; + } + if ( typeof value === "string" ) { + return value.match( rnothtmlwhite ) || []; + } + return []; +} + +jQuery.fn.extend( { + addClass: function( value ) { + var classes, elem, cur, curValue, clazz, j, finalValue, + i = 0; + + if ( isFunction( value ) ) { + return this.each( function( j ) { + jQuery( this ).addClass( value.call( this, j, getClass( this ) ) ); + } ); + } + + classes = classesToArray( value ); + + if ( classes.length ) { + while ( ( elem = this[ i++ ] ) ) { + curValue = getClass( elem ); + cur = elem.nodeType === 1 && ( " " + stripAndCollapse( curValue ) + " " ); + + if ( cur ) { + j = 0; + while ( ( clazz = classes[ j++ ] ) ) { + if ( cur.indexOf( " " + clazz + " " ) < 0 ) { + cur += clazz + " "; + } + } + + // Only assign if different to avoid unneeded rendering. + finalValue = stripAndCollapse( cur ); + if ( curValue !== finalValue ) { + elem.setAttribute( "class", finalValue ); + } + } + } + } + + return this; + }, + + removeClass: function( value ) { + var classes, elem, cur, curValue, clazz, j, finalValue, + i = 0; + + if ( isFunction( value ) ) { + return this.each( function( j ) { + jQuery( this ).removeClass( value.call( this, j, getClass( this ) ) ); + } ); + } + + if ( !arguments.length ) { + return this.attr( "class", "" ); + } + + classes = classesToArray( value ); + + if ( classes.length ) { + while ( ( elem = this[ i++ ] ) ) { + curValue = getClass( elem ); + + // This expression is here for better compressibility (see addClass) + cur = elem.nodeType === 1 && ( " " + stripAndCollapse( curValue ) + " " ); + + if ( cur ) { + j = 0; + while ( ( clazz = classes[ j++ ] ) ) { + + // Remove *all* instances + while ( cur.indexOf( " " + clazz + " " ) > -1 ) { + cur = cur.replace( " " + clazz + " ", " " ); + } + } + + // Only assign if different to avoid unneeded rendering. + finalValue = stripAndCollapse( cur ); + if ( curValue !== finalValue ) { + elem.setAttribute( "class", finalValue ); + } + } + } + } + + return this; + }, + + toggleClass: function( value, stateVal ) { + var type = typeof value, + isValidValue = type === "string" || Array.isArray( value ); + + if ( typeof stateVal === "boolean" && isValidValue ) { + return stateVal ? this.addClass( value ) : this.removeClass( value ); + } + + if ( isFunction( value ) ) { + return this.each( function( i ) { + jQuery( this ).toggleClass( + value.call( this, i, getClass( this ), stateVal ), + stateVal + ); + } ); + } + + return this.each( function() { + var className, i, self, classNames; + + if ( isValidValue ) { + + // Toggle individual class names + i = 0; + self = jQuery( this ); + classNames = classesToArray( value ); + + while ( ( className = classNames[ i++ ] ) ) { + + // Check each className given, space separated list + if ( self.hasClass( className ) ) { + self.removeClass( className ); + } else { + self.addClass( className ); + } + } + + // Toggle whole class name + } else if ( value === undefined || type === "boolean" ) { + className = getClass( this ); + if ( className ) { + + // Store className if set + dataPriv.set( this, "__className__", className ); + } + + // If the element has a class name or if we're passed `false`, + // then remove the whole classname (if there was one, the above saved it). + // Otherwise bring back whatever was previously saved (if anything), + // falling back to the empty string if nothing was stored. + if ( this.setAttribute ) { + this.setAttribute( "class", + className || value === false ? + "" : + dataPriv.get( this, "__className__" ) || "" + ); + } + } + } ); + }, + + hasClass: function( selector ) { + var className, elem, + i = 0; + + className = " " + selector + " "; + while ( ( elem = this[ i++ ] ) ) { + if ( elem.nodeType === 1 && + ( " " + stripAndCollapse( getClass( elem ) ) + " " ).indexOf( className ) > -1 ) { + return true; + } + } + + return false; + } +} ); + + + + +var rreturn = /\r/g; + +jQuery.fn.extend( { + val: function( value ) { + var hooks, ret, valueIsFunction, + elem = this[ 0 ]; + + if ( !arguments.length ) { + if ( elem ) { + hooks = jQuery.valHooks[ elem.type ] || + jQuery.valHooks[ elem.nodeName.toLowerCase() ]; + + if ( hooks && + "get" in hooks && + ( ret = hooks.get( elem, "value" ) ) !== undefined + ) { + return ret; + } + + ret = elem.value; + + // Handle most common string cases + if ( typeof ret === "string" ) { + return ret.replace( rreturn, "" ); + } + + // Handle cases where value is null/undef or number + return ret == null ? "" : ret; + } + + return; + } + + valueIsFunction = isFunction( value ); + + return this.each( function( i ) { + var val; + + if ( this.nodeType !== 1 ) { + return; + } + + if ( valueIsFunction ) { + val = value.call( this, i, jQuery( this ).val() ); + } else { + val = value; + } + + // Treat null/undefined as ""; convert numbers to string + if ( val == null ) { + val = ""; + + } else if ( typeof val === "number" ) { + val += ""; + + } else if ( Array.isArray( val ) ) { + val = jQuery.map( val, function( value ) { + return value == null ? "" : value + ""; + } ); + } + + hooks = jQuery.valHooks[ this.type ] || jQuery.valHooks[ this.nodeName.toLowerCase() ]; + + // If set returns undefined, fall back to normal setting + if ( !hooks || !( "set" in hooks ) || hooks.set( this, val, "value" ) === undefined ) { + this.value = val; + } + } ); + } +} ); + +jQuery.extend( { + valHooks: { + option: { + get: function( elem ) { + + var val = jQuery.find.attr( elem, "value" ); + return val != null ? + val : + + // Support: IE <=10 - 11 only + // option.text throws exceptions (#14686, #14858) + // Strip and collapse whitespace + // https://html.spec.whatwg.org/#strip-and-collapse-whitespace + stripAndCollapse( jQuery.text( elem ) ); + } + }, + select: { + get: function( elem ) { + var value, option, i, + options = elem.options, + index = elem.selectedIndex, + one = elem.type === "select-one", + values = one ? null : [], + max = one ? index + 1 : options.length; + + if ( index < 0 ) { + i = max; + + } else { + i = one ? index : 0; + } + + // Loop through all the selected options + for ( ; i < max; i++ ) { + option = options[ i ]; + + // Support: IE <=9 only + // IE8-9 doesn't update selected after form reset (#2551) + if ( ( option.selected || i === index ) && + + // Don't return options that are disabled or in a disabled optgroup + !option.disabled && + ( !option.parentNode.disabled || + !nodeName( option.parentNode, "optgroup" ) ) ) { + + // Get the specific value for the option + value = jQuery( option ).val(); + + // We don't need an array for one selects + if ( one ) { + return value; + } + + // Multi-Selects return an array + values.push( value ); + } + } + + return values; + }, + + set: function( elem, value ) { + var optionSet, option, + options = elem.options, + values = jQuery.makeArray( value ), + i = options.length; + + while ( i-- ) { + option = options[ i ]; + + /* eslint-disable no-cond-assign */ + + if ( option.selected = + jQuery.inArray( jQuery.valHooks.option.get( option ), values ) > -1 + ) { + optionSet = true; + } + + /* eslint-enable no-cond-assign */ + } + + // Force browsers to behave consistently when non-matching value is set + if ( !optionSet ) { + elem.selectedIndex = -1; + } + return values; + } + } + } +} ); + +// Radios and checkboxes getter/setter +jQuery.each( [ "radio", "checkbox" ], function() { + jQuery.valHooks[ this ] = { + set: function( elem, value ) { + if ( Array.isArray( value ) ) { + return ( elem.checked = jQuery.inArray( jQuery( elem ).val(), value ) > -1 ); + } + } + }; + if ( !support.checkOn ) { + jQuery.valHooks[ this ].get = function( elem ) { + return elem.getAttribute( "value" ) === null ? "on" : elem.value; + }; + } +} ); + + + + +// Return jQuery for attributes-only inclusion + + +support.focusin = "onfocusin" in window; + + +var rfocusMorph = /^(?:focusinfocus|focusoutblur)$/, + stopPropagationCallback = function( e ) { + e.stopPropagation(); + }; + +jQuery.extend( jQuery.event, { + + trigger: function( event, data, elem, onlyHandlers ) { + + var i, cur, tmp, bubbleType, ontype, handle, special, lastElement, + eventPath = [ elem || document ], + type = hasOwn.call( event, "type" ) ? event.type : event, + namespaces = hasOwn.call( event, "namespace" ) ? event.namespace.split( "." ) : []; + + cur = lastElement = tmp = elem = elem || document; + + // Don't do events on text and comment nodes + if ( elem.nodeType === 3 || elem.nodeType === 8 ) { + return; + } + + // focus/blur morphs to focusin/out; ensure we're not firing them right now + if ( rfocusMorph.test( type + jQuery.event.triggered ) ) { + return; + } + + if ( type.indexOf( "." ) > -1 ) { + + // Namespaced trigger; create a regexp to match event type in handle() + namespaces = type.split( "." ); + type = namespaces.shift(); + namespaces.sort(); + } + ontype = type.indexOf( ":" ) < 0 && "on" + type; + + // Caller can pass in a jQuery.Event object, Object, or just an event type string + event = event[ jQuery.expando ] ? + event : + new jQuery.Event( type, typeof event === "object" && event ); + + // Trigger bitmask: & 1 for native handlers; & 2 for jQuery (always true) + event.isTrigger = onlyHandlers ? 2 : 3; + event.namespace = namespaces.join( "." ); + event.rnamespace = event.namespace ? + new RegExp( "(^|\\.)" + namespaces.join( "\\.(?:.*\\.|)" ) + "(\\.|$)" ) : + null; + + // Clean up the event in case it is being reused + event.result = undefined; + if ( !event.target ) { + event.target = elem; + } + + // Clone any incoming data and prepend the event, creating the handler arg list + data = data == null ? + [ event ] : + jQuery.makeArray( data, [ event ] ); + + // Allow special events to draw outside the lines + special = jQuery.event.special[ type ] || {}; + if ( !onlyHandlers && special.trigger && special.trigger.apply( elem, data ) === false ) { + return; + } + + // Determine event propagation path in advance, per W3C events spec (#9951) + // Bubble up to document, then to window; watch for a global ownerDocument var (#9724) + if ( !onlyHandlers && !special.noBubble && !isWindow( elem ) ) { + + bubbleType = special.delegateType || type; + if ( !rfocusMorph.test( bubbleType + type ) ) { + cur = cur.parentNode; + } + for ( ; cur; cur = cur.parentNode ) { + eventPath.push( cur ); + tmp = cur; + } + + // Only add window if we got to document (e.g., not plain obj or detached DOM) + if ( tmp === ( elem.ownerDocument || document ) ) { + eventPath.push( tmp.defaultView || tmp.parentWindow || window ); + } + } + + // Fire handlers on the event path + i = 0; + while ( ( cur = eventPath[ i++ ] ) && !event.isPropagationStopped() ) { + lastElement = cur; + event.type = i > 1 ? + bubbleType : + special.bindType || type; + + // jQuery handler + handle = ( dataPriv.get( cur, "events" ) || Object.create( null ) )[ event.type ] && + dataPriv.get( cur, "handle" ); + if ( handle ) { + handle.apply( cur, data ); + } + + // Native handler + handle = ontype && cur[ ontype ]; + if ( handle && handle.apply && acceptData( cur ) ) { + event.result = handle.apply( cur, data ); + if ( event.result === false ) { + event.preventDefault(); + } + } + } + event.type = type; + + // If nobody prevented the default action, do it now + if ( !onlyHandlers && !event.isDefaultPrevented() ) { + + if ( ( !special._default || + special._default.apply( eventPath.pop(), data ) === false ) && + acceptData( elem ) ) { + + // Call a native DOM method on the target with the same name as the event. + // Don't do default actions on window, that's where global variables be (#6170) + if ( ontype && isFunction( elem[ type ] ) && !isWindow( elem ) ) { + + // Don't re-trigger an onFOO event when we call its FOO() method + tmp = elem[ ontype ]; + + if ( tmp ) { + elem[ ontype ] = null; + } + + // Prevent re-triggering of the same event, since we already bubbled it above + jQuery.event.triggered = type; + + if ( event.isPropagationStopped() ) { + lastElement.addEventListener( type, stopPropagationCallback ); + } + + elem[ type ](); + + if ( event.isPropagationStopped() ) { + lastElement.removeEventListener( type, stopPropagationCallback ); + } + + jQuery.event.triggered = undefined; + + if ( tmp ) { + elem[ ontype ] = tmp; + } + } + } + } + + return event.result; + }, + + // Piggyback on a donor event to simulate a different one + // Used only for `focus(in | out)` events + simulate: function( type, elem, event ) { + var e = jQuery.extend( + new jQuery.Event(), + event, + { + type: type, + isSimulated: true + } + ); + + jQuery.event.trigger( e, null, elem ); + } + +} ); + +jQuery.fn.extend( { + + trigger: function( type, data ) { + return this.each( function() { + jQuery.event.trigger( type, data, this ); + } ); + }, + triggerHandler: function( type, data ) { + var elem = this[ 0 ]; + if ( elem ) { + return jQuery.event.trigger( type, data, elem, true ); + } + } +} ); + + +// Support: Firefox <=44 +// Firefox doesn't have focus(in | out) events +// Related ticket - https://bugzilla.mozilla.org/show_bug.cgi?id=687787 +// +// Support: Chrome <=48 - 49, Safari <=9.0 - 9.1 +// focus(in | out) events fire after focus & blur events, +// which is spec violation - http://www.w3.org/TR/DOM-Level-3-Events/#events-focusevent-event-order +// Related ticket - https://bugs.chromium.org/p/chromium/issues/detail?id=449857 +if ( !support.focusin ) { + jQuery.each( { focus: "focusin", blur: "focusout" }, function( orig, fix ) { + + // Attach a single capturing handler on the document while someone wants focusin/focusout + var handler = function( event ) { + jQuery.event.simulate( fix, event.target, jQuery.event.fix( event ) ); + }; + + jQuery.event.special[ fix ] = { + setup: function() { + + // Handle: regular nodes (via `this.ownerDocument`), window + // (via `this.document`) & document (via `this`). + var doc = this.ownerDocument || this.document || this, + attaches = dataPriv.access( doc, fix ); + + if ( !attaches ) { + doc.addEventListener( orig, handler, true ); + } + dataPriv.access( doc, fix, ( attaches || 0 ) + 1 ); + }, + teardown: function() { + var doc = this.ownerDocument || this.document || this, + attaches = dataPriv.access( doc, fix ) - 1; + + if ( !attaches ) { + doc.removeEventListener( orig, handler, true ); + dataPriv.remove( doc, fix ); + + } else { + dataPriv.access( doc, fix, attaches ); + } + } + }; + } ); +} +var location = window.location; + +var nonce = { guid: Date.now() }; + +var rquery = ( /\?/ ); + + + +// Cross-browser xml parsing +jQuery.parseXML = function( data ) { + var xml, parserErrorElem; + if ( !data || typeof data !== "string" ) { + return null; + } + + // Support: IE 9 - 11 only + // IE throws on parseFromString with invalid input. + try { + xml = ( new window.DOMParser() ).parseFromString( data, "text/xml" ); + } catch ( e ) {} + + parserErrorElem = xml && xml.getElementsByTagName( "parsererror" )[ 0 ]; + if ( !xml || parserErrorElem ) { + jQuery.error( "Invalid XML: " + ( + parserErrorElem ? + jQuery.map( parserErrorElem.childNodes, function( el ) { + return el.textContent; + } ).join( "\n" ) : + data + ) ); + } + return xml; +}; + + +var + rbracket = /\[\]$/, + rCRLF = /\r?\n/g, + rsubmitterTypes = /^(?:submit|button|image|reset|file)$/i, + rsubmittable = /^(?:input|select|textarea|keygen)/i; + +function buildParams( prefix, obj, traditional, add ) { + var name; + + if ( Array.isArray( obj ) ) { + + // Serialize array item. + jQuery.each( obj, function( i, v ) { + if ( traditional || rbracket.test( prefix ) ) { + + // Treat each array item as a scalar. + add( prefix, v ); + + } else { + + // Item is non-scalar (array or object), encode its numeric index. + buildParams( + prefix + "[" + ( typeof v === "object" && v != null ? i : "" ) + "]", + v, + traditional, + add + ); + } + } ); + + } else if ( !traditional && toType( obj ) === "object" ) { + + // Serialize object item. + for ( name in obj ) { + buildParams( prefix + "[" + name + "]", obj[ name ], traditional, add ); + } + + } else { + + // Serialize scalar item. + add( prefix, obj ); + } +} + +// Serialize an array of form elements or a set of +// key/values into a query string +jQuery.param = function( a, traditional ) { + var prefix, + s = [], + add = function( key, valueOrFunction ) { + + // If value is a function, invoke it and use its return value + var value = isFunction( valueOrFunction ) ? + valueOrFunction() : + valueOrFunction; + + s[ s.length ] = encodeURIComponent( key ) + "=" + + encodeURIComponent( value == null ? "" : value ); + }; + + if ( a == null ) { + return ""; + } + + // If an array was passed in, assume that it is an array of form elements. + if ( Array.isArray( a ) || ( a.jquery && !jQuery.isPlainObject( a ) ) ) { + + // Serialize the form elements + jQuery.each( a, function() { + add( this.name, this.value ); + } ); + + } else { + + // If traditional, encode the "old" way (the way 1.3.2 or older + // did it), otherwise encode params recursively. + for ( prefix in a ) { + buildParams( prefix, a[ prefix ], traditional, add ); + } + } + + // Return the resulting serialization + return s.join( "&" ); +}; + +jQuery.fn.extend( { + serialize: function() { + return jQuery.param( this.serializeArray() ); + }, + serializeArray: function() { + return this.map( function() { + + // Can add propHook for "elements" to filter or add form elements + var elements = jQuery.prop( this, "elements" ); + return elements ? jQuery.makeArray( elements ) : this; + } ).filter( function() { + var type = this.type; + + // Use .is( ":disabled" ) so that fieldset[disabled] works + return this.name && !jQuery( this ).is( ":disabled" ) && + rsubmittable.test( this.nodeName ) && !rsubmitterTypes.test( type ) && + ( this.checked || !rcheckableType.test( type ) ); + } ).map( function( _i, elem ) { + var val = jQuery( this ).val(); + + if ( val == null ) { + return null; + } + + if ( Array.isArray( val ) ) { + return jQuery.map( val, function( val ) { + return { name: elem.name, value: val.replace( rCRLF, "\r\n" ) }; + } ); + } + + return { name: elem.name, value: val.replace( rCRLF, "\r\n" ) }; + } ).get(); + } +} ); + + +var + r20 = /%20/g, + rhash = /#.*$/, + rantiCache = /([?&])_=[^&]*/, + rheaders = /^(.*?):[ \t]*([^\r\n]*)$/mg, + + // #7653, #8125, #8152: local protocol detection + rlocalProtocol = /^(?:about|app|app-storage|.+-extension|file|res|widget):$/, + rnoContent = /^(?:GET|HEAD)$/, + rprotocol = /^\/\//, + + /* Prefilters + * 1) They are useful to introduce custom dataTypes (see ajax/jsonp.js for an example) + * 2) These are called: + * - BEFORE asking for a transport + * - AFTER param serialization (s.data is a string if s.processData is true) + * 3) key is the dataType + * 4) the catchall symbol "*" can be used + * 5) execution will start with transport dataType and THEN continue down to "*" if needed + */ + prefilters = {}, + + /* Transports bindings + * 1) key is the dataType + * 2) the catchall symbol "*" can be used + * 3) selection will start with transport dataType and THEN go to "*" if needed + */ + transports = {}, + + // Avoid comment-prolog char sequence (#10098); must appease lint and evade compression + allTypes = "*/".concat( "*" ), + + // Anchor tag for parsing the document origin + originAnchor = document.createElement( "a" ); + +originAnchor.href = location.href; + +// Base "constructor" for jQuery.ajaxPrefilter and jQuery.ajaxTransport +function addToPrefiltersOrTransports( structure ) { + + // dataTypeExpression is optional and defaults to "*" + return function( dataTypeExpression, func ) { + + if ( typeof dataTypeExpression !== "string" ) { + func = dataTypeExpression; + dataTypeExpression = "*"; + } + + var dataType, + i = 0, + dataTypes = dataTypeExpression.toLowerCase().match( rnothtmlwhite ) || []; + + if ( isFunction( func ) ) { + + // For each dataType in the dataTypeExpression + while ( ( dataType = dataTypes[ i++ ] ) ) { + + // Prepend if requested + if ( dataType[ 0 ] === "+" ) { + dataType = dataType.slice( 1 ) || "*"; + ( structure[ dataType ] = structure[ dataType ] || [] ).unshift( func ); + + // Otherwise append + } else { + ( structure[ dataType ] = structure[ dataType ] || [] ).push( func ); + } + } + } + }; +} + +// Base inspection function for prefilters and transports +function inspectPrefiltersOrTransports( structure, options, originalOptions, jqXHR ) { + + var inspected = {}, + seekingTransport = ( structure === transports ); + + function inspect( dataType ) { + var selected; + inspected[ dataType ] = true; + jQuery.each( structure[ dataType ] || [], function( _, prefilterOrFactory ) { + var dataTypeOrTransport = prefilterOrFactory( options, originalOptions, jqXHR ); + if ( typeof dataTypeOrTransport === "string" && + !seekingTransport && !inspected[ dataTypeOrTransport ] ) { + + options.dataTypes.unshift( dataTypeOrTransport ); + inspect( dataTypeOrTransport ); + return false; + } else if ( seekingTransport ) { + return !( selected = dataTypeOrTransport ); + } + } ); + return selected; + } + + return inspect( options.dataTypes[ 0 ] ) || !inspected[ "*" ] && inspect( "*" ); +} + +// A special extend for ajax options +// that takes "flat" options (not to be deep extended) +// Fixes #9887 +function ajaxExtend( target, src ) { + var key, deep, + flatOptions = jQuery.ajaxSettings.flatOptions || {}; + + for ( key in src ) { + if ( src[ key ] !== undefined ) { + ( flatOptions[ key ] ? target : ( deep || ( deep = {} ) ) )[ key ] = src[ key ]; + } + } + if ( deep ) { + jQuery.extend( true, target, deep ); + } + + return target; +} + +/* Handles responses to an ajax request: + * - finds the right dataType (mediates between content-type and expected dataType) + * - returns the corresponding response + */ +function ajaxHandleResponses( s, jqXHR, responses ) { + + var ct, type, finalDataType, firstDataType, + contents = s.contents, + dataTypes = s.dataTypes; + + // Remove auto dataType and get content-type in the process + while ( dataTypes[ 0 ] === "*" ) { + dataTypes.shift(); + if ( ct === undefined ) { + ct = s.mimeType || jqXHR.getResponseHeader( "Content-Type" ); + } + } + + // Check if we're dealing with a known content-type + if ( ct ) { + for ( type in contents ) { + if ( contents[ type ] && contents[ type ].test( ct ) ) { + dataTypes.unshift( type ); + break; + } + } + } + + // Check to see if we have a response for the expected dataType + if ( dataTypes[ 0 ] in responses ) { + finalDataType = dataTypes[ 0 ]; + } else { + + // Try convertible dataTypes + for ( type in responses ) { + if ( !dataTypes[ 0 ] || s.converters[ type + " " + dataTypes[ 0 ] ] ) { + finalDataType = type; + break; + } + if ( !firstDataType ) { + firstDataType = type; + } + } + + // Or just use first one + finalDataType = finalDataType || firstDataType; + } + + // If we found a dataType + // We add the dataType to the list if needed + // and return the corresponding response + if ( finalDataType ) { + if ( finalDataType !== dataTypes[ 0 ] ) { + dataTypes.unshift( finalDataType ); + } + return responses[ finalDataType ]; + } +} + +/* Chain conversions given the request and the original response + * Also sets the responseXXX fields on the jqXHR instance + */ +function ajaxConvert( s, response, jqXHR, isSuccess ) { + var conv2, current, conv, tmp, prev, + converters = {}, + + // Work with a copy of dataTypes in case we need to modify it for conversion + dataTypes = s.dataTypes.slice(); + + // Create converters map with lowercased keys + if ( dataTypes[ 1 ] ) { + for ( conv in s.converters ) { + converters[ conv.toLowerCase() ] = s.converters[ conv ]; + } + } + + current = dataTypes.shift(); + + // Convert to each sequential dataType + while ( current ) { + + if ( s.responseFields[ current ] ) { + jqXHR[ s.responseFields[ current ] ] = response; + } + + // Apply the dataFilter if provided + if ( !prev && isSuccess && s.dataFilter ) { + response = s.dataFilter( response, s.dataType ); + } + + prev = current; + current = dataTypes.shift(); + + if ( current ) { + + // There's only work to do if current dataType is non-auto + if ( current === "*" ) { + + current = prev; + + // Convert response if prev dataType is non-auto and differs from current + } else if ( prev !== "*" && prev !== current ) { + + // Seek a direct converter + conv = converters[ prev + " " + current ] || converters[ "* " + current ]; + + // If none found, seek a pair + if ( !conv ) { + for ( conv2 in converters ) { + + // If conv2 outputs current + tmp = conv2.split( " " ); + if ( tmp[ 1 ] === current ) { + + // If prev can be converted to accepted input + conv = converters[ prev + " " + tmp[ 0 ] ] || + converters[ "* " + tmp[ 0 ] ]; + if ( conv ) { + + // Condense equivalence converters + if ( conv === true ) { + conv = converters[ conv2 ]; + + // Otherwise, insert the intermediate dataType + } else if ( converters[ conv2 ] !== true ) { + current = tmp[ 0 ]; + dataTypes.unshift( tmp[ 1 ] ); + } + break; + } + } + } + } + + // Apply converter (if not an equivalence) + if ( conv !== true ) { + + // Unless errors are allowed to bubble, catch and return them + if ( conv && s.throws ) { + response = conv( response ); + } else { + try { + response = conv( response ); + } catch ( e ) { + return { + state: "parsererror", + error: conv ? e : "No conversion from " + prev + " to " + current + }; + } + } + } + } + } + } + + return { state: "success", data: response }; +} + +jQuery.extend( { + + // Counter for holding the number of active queries + active: 0, + + // Last-Modified header cache for next request + lastModified: {}, + etag: {}, + + ajaxSettings: { + url: location.href, + type: "GET", + isLocal: rlocalProtocol.test( location.protocol ), + global: true, + processData: true, + async: true, + contentType: "application/x-www-form-urlencoded; charset=UTF-8", + + /* + timeout: 0, + data: null, + dataType: null, + username: null, + password: null, + cache: null, + throws: false, + traditional: false, + headers: {}, + */ + + accepts: { + "*": allTypes, + text: "text/plain", + html: "text/html", + xml: "application/xml, text/xml", + json: "application/json, text/javascript" + }, + + contents: { + xml: /\bxml\b/, + html: /\bhtml/, + json: /\bjson\b/ + }, + + responseFields: { + xml: "responseXML", + text: "responseText", + json: "responseJSON" + }, + + // Data converters + // Keys separate source (or catchall "*") and destination types with a single space + converters: { + + // Convert anything to text + "* text": String, + + // Text to html (true = no transformation) + "text html": true, + + // Evaluate text as a json expression + "text json": JSON.parse, + + // Parse text as xml + "text xml": jQuery.parseXML + }, + + // For options that shouldn't be deep extended: + // you can add your own custom options here if + // and when you create one that shouldn't be + // deep extended (see ajaxExtend) + flatOptions: { + url: true, + context: true + } + }, + + // Creates a full fledged settings object into target + // with both ajaxSettings and settings fields. + // If target is omitted, writes into ajaxSettings. + ajaxSetup: function( target, settings ) { + return settings ? + + // Building a settings object + ajaxExtend( ajaxExtend( target, jQuery.ajaxSettings ), settings ) : + + // Extending ajaxSettings + ajaxExtend( jQuery.ajaxSettings, target ); + }, + + ajaxPrefilter: addToPrefiltersOrTransports( prefilters ), + ajaxTransport: addToPrefiltersOrTransports( transports ), + + // Main method + ajax: function( url, options ) { + + // If url is an object, simulate pre-1.5 signature + if ( typeof url === "object" ) { + options = url; + url = undefined; + } + + // Force options to be an object + options = options || {}; + + var transport, + + // URL without anti-cache param + cacheURL, + + // Response headers + responseHeadersString, + responseHeaders, + + // timeout handle + timeoutTimer, + + // Url cleanup var + urlAnchor, + + // Request state (becomes false upon send and true upon completion) + completed, + + // To know if global events are to be dispatched + fireGlobals, + + // Loop variable + i, + + // uncached part of the url + uncached, + + // Create the final options object + s = jQuery.ajaxSetup( {}, options ), + + // Callbacks context + callbackContext = s.context || s, + + // Context for global events is callbackContext if it is a DOM node or jQuery collection + globalEventContext = s.context && + ( callbackContext.nodeType || callbackContext.jquery ) ? + jQuery( callbackContext ) : + jQuery.event, + + // Deferreds + deferred = jQuery.Deferred(), + completeDeferred = jQuery.Callbacks( "once memory" ), + + // Status-dependent callbacks + statusCode = s.statusCode || {}, + + // Headers (they are sent all at once) + requestHeaders = {}, + requestHeadersNames = {}, + + // Default abort message + strAbort = "canceled", + + // Fake xhr + jqXHR = { + readyState: 0, + + // Builds headers hashtable if needed + getResponseHeader: function( key ) { + var match; + if ( completed ) { + if ( !responseHeaders ) { + responseHeaders = {}; + while ( ( match = rheaders.exec( responseHeadersString ) ) ) { + responseHeaders[ match[ 1 ].toLowerCase() + " " ] = + ( responseHeaders[ match[ 1 ].toLowerCase() + " " ] || [] ) + .concat( match[ 2 ] ); + } + } + match = responseHeaders[ key.toLowerCase() + " " ]; + } + return match == null ? null : match.join( ", " ); + }, + + // Raw string + getAllResponseHeaders: function() { + return completed ? responseHeadersString : null; + }, + + // Caches the header + setRequestHeader: function( name, value ) { + if ( completed == null ) { + name = requestHeadersNames[ name.toLowerCase() ] = + requestHeadersNames[ name.toLowerCase() ] || name; + requestHeaders[ name ] = value; + } + return this; + }, + + // Overrides response content-type header + overrideMimeType: function( type ) { + if ( completed == null ) { + s.mimeType = type; + } + return this; + }, + + // Status-dependent callbacks + statusCode: function( map ) { + var code; + if ( map ) { + if ( completed ) { + + // Execute the appropriate callbacks + jqXHR.always( map[ jqXHR.status ] ); + } else { + + // Lazy-add the new callbacks in a way that preserves old ones + for ( code in map ) { + statusCode[ code ] = [ statusCode[ code ], map[ code ] ]; + } + } + } + return this; + }, + + // Cancel the request + abort: function( statusText ) { + var finalText = statusText || strAbort; + if ( transport ) { + transport.abort( finalText ); + } + done( 0, finalText ); + return this; + } + }; + + // Attach deferreds + deferred.promise( jqXHR ); + + // Add protocol if not provided (prefilters might expect it) + // Handle falsy url in the settings object (#10093: consistency with old signature) + // We also use the url parameter if available + s.url = ( ( url || s.url || location.href ) + "" ) + .replace( rprotocol, location.protocol + "//" ); + + // Alias method option to type as per ticket #12004 + s.type = options.method || options.type || s.method || s.type; + + // Extract dataTypes list + s.dataTypes = ( s.dataType || "*" ).toLowerCase().match( rnothtmlwhite ) || [ "" ]; + + // A cross-domain request is in order when the origin doesn't match the current origin. + if ( s.crossDomain == null ) { + urlAnchor = document.createElement( "a" ); + + // Support: IE <=8 - 11, Edge 12 - 15 + // IE throws exception on accessing the href property if url is malformed, + // e.g. http://example.com:80x/ + try { + urlAnchor.href = s.url; + + // Support: IE <=8 - 11 only + // Anchor's host property isn't correctly set when s.url is relative + urlAnchor.href = urlAnchor.href; + s.crossDomain = originAnchor.protocol + "//" + originAnchor.host !== + urlAnchor.protocol + "//" + urlAnchor.host; + } catch ( e ) { + + // If there is an error parsing the URL, assume it is crossDomain, + // it can be rejected by the transport if it is invalid + s.crossDomain = true; + } + } + + // Convert data if not already a string + if ( s.data && s.processData && typeof s.data !== "string" ) { + s.data = jQuery.param( s.data, s.traditional ); + } + + // Apply prefilters + inspectPrefiltersOrTransports( prefilters, s, options, jqXHR ); + + // If request was aborted inside a prefilter, stop there + if ( completed ) { + return jqXHR; + } + + // We can fire global events as of now if asked to + // Don't fire events if jQuery.event is undefined in an AMD-usage scenario (#15118) + fireGlobals = jQuery.event && s.global; + + // Watch for a new set of requests + if ( fireGlobals && jQuery.active++ === 0 ) { + jQuery.event.trigger( "ajaxStart" ); + } + + // Uppercase the type + s.type = s.type.toUpperCase(); + + // Determine if request has content + s.hasContent = !rnoContent.test( s.type ); + + // Save the URL in case we're toying with the If-Modified-Since + // and/or If-None-Match header later on + // Remove hash to simplify url manipulation + cacheURL = s.url.replace( rhash, "" ); + + // More options handling for requests with no content + if ( !s.hasContent ) { + + // Remember the hash so we can put it back + uncached = s.url.slice( cacheURL.length ); + + // If data is available and should be processed, append data to url + if ( s.data && ( s.processData || typeof s.data === "string" ) ) { + cacheURL += ( rquery.test( cacheURL ) ? "&" : "?" ) + s.data; + + // #9682: remove data so that it's not used in an eventual retry + delete s.data; + } + + // Add or update anti-cache param if needed + if ( s.cache === false ) { + cacheURL = cacheURL.replace( rantiCache, "$1" ); + uncached = ( rquery.test( cacheURL ) ? "&" : "?" ) + "_=" + ( nonce.guid++ ) + + uncached; + } + + // Put hash and anti-cache on the URL that will be requested (gh-1732) + s.url = cacheURL + uncached; + + // Change '%20' to '+' if this is encoded form body content (gh-2658) + } else if ( s.data && s.processData && + ( s.contentType || "" ).indexOf( "application/x-www-form-urlencoded" ) === 0 ) { + s.data = s.data.replace( r20, "+" ); + } + + // Set the If-Modified-Since and/or If-None-Match header, if in ifModified mode. + if ( s.ifModified ) { + if ( jQuery.lastModified[ cacheURL ] ) { + jqXHR.setRequestHeader( "If-Modified-Since", jQuery.lastModified[ cacheURL ] ); + } + if ( jQuery.etag[ cacheURL ] ) { + jqXHR.setRequestHeader( "If-None-Match", jQuery.etag[ cacheURL ] ); + } + } + + // Set the correct header, if data is being sent + if ( s.data && s.hasContent && s.contentType !== false || options.contentType ) { + jqXHR.setRequestHeader( "Content-Type", s.contentType ); + } + + // Set the Accepts header for the server, depending on the dataType + jqXHR.setRequestHeader( + "Accept", + s.dataTypes[ 0 ] && s.accepts[ s.dataTypes[ 0 ] ] ? + s.accepts[ s.dataTypes[ 0 ] ] + + ( s.dataTypes[ 0 ] !== "*" ? ", " + allTypes + "; q=0.01" : "" ) : + s.accepts[ "*" ] + ); + + // Check for headers option + for ( i in s.headers ) { + jqXHR.setRequestHeader( i, s.headers[ i ] ); + } + + // Allow custom headers/mimetypes and early abort + if ( s.beforeSend && + ( s.beforeSend.call( callbackContext, jqXHR, s ) === false || completed ) ) { + + // Abort if not done already and return + return jqXHR.abort(); + } + + // Aborting is no longer a cancellation + strAbort = "abort"; + + // Install callbacks on deferreds + completeDeferred.add( s.complete ); + jqXHR.done( s.success ); + jqXHR.fail( s.error ); + + // Get transport + transport = inspectPrefiltersOrTransports( transports, s, options, jqXHR ); + + // If no transport, we auto-abort + if ( !transport ) { + done( -1, "No Transport" ); + } else { + jqXHR.readyState = 1; + + // Send global event + if ( fireGlobals ) { + globalEventContext.trigger( "ajaxSend", [ jqXHR, s ] ); + } + + // If request was aborted inside ajaxSend, stop there + if ( completed ) { + return jqXHR; + } + + // Timeout + if ( s.async && s.timeout > 0 ) { + timeoutTimer = window.setTimeout( function() { + jqXHR.abort( "timeout" ); + }, s.timeout ); + } + + try { + completed = false; + transport.send( requestHeaders, done ); + } catch ( e ) { + + // Rethrow post-completion exceptions + if ( completed ) { + throw e; + } + + // Propagate others as results + done( -1, e ); + } + } + + // Callback for when everything is done + function done( status, nativeStatusText, responses, headers ) { + var isSuccess, success, error, response, modified, + statusText = nativeStatusText; + + // Ignore repeat invocations + if ( completed ) { + return; + } + + completed = true; + + // Clear timeout if it exists + if ( timeoutTimer ) { + window.clearTimeout( timeoutTimer ); + } + + // Dereference transport for early garbage collection + // (no matter how long the jqXHR object will be used) + transport = undefined; + + // Cache response headers + responseHeadersString = headers || ""; + + // Set readyState + jqXHR.readyState = status > 0 ? 4 : 0; + + // Determine if successful + isSuccess = status >= 200 && status < 300 || status === 304; + + // Get response data + if ( responses ) { + response = ajaxHandleResponses( s, jqXHR, responses ); + } + + // Use a noop converter for missing script but not if jsonp + if ( !isSuccess && + jQuery.inArray( "script", s.dataTypes ) > -1 && + jQuery.inArray( "json", s.dataTypes ) < 0 ) { + s.converters[ "text script" ] = function() {}; + } + + // Convert no matter what (that way responseXXX fields are always set) + response = ajaxConvert( s, response, jqXHR, isSuccess ); + + // If successful, handle type chaining + if ( isSuccess ) { + + // Set the If-Modified-Since and/or If-None-Match header, if in ifModified mode. + if ( s.ifModified ) { + modified = jqXHR.getResponseHeader( "Last-Modified" ); + if ( modified ) { + jQuery.lastModified[ cacheURL ] = modified; + } + modified = jqXHR.getResponseHeader( "etag" ); + if ( modified ) { + jQuery.etag[ cacheURL ] = modified; + } + } + + // if no content + if ( status === 204 || s.type === "HEAD" ) { + statusText = "nocontent"; + + // if not modified + } else if ( status === 304 ) { + statusText = "notmodified"; + + // If we have data, let's convert it + } else { + statusText = response.state; + success = response.data; + error = response.error; + isSuccess = !error; + } + } else { + + // Extract error from statusText and normalize for non-aborts + error = statusText; + if ( status || !statusText ) { + statusText = "error"; + if ( status < 0 ) { + status = 0; + } + } + } + + // Set data for the fake xhr object + jqXHR.status = status; + jqXHR.statusText = ( nativeStatusText || statusText ) + ""; + + // Success/Error + if ( isSuccess ) { + deferred.resolveWith( callbackContext, [ success, statusText, jqXHR ] ); + } else { + deferred.rejectWith( callbackContext, [ jqXHR, statusText, error ] ); + } + + // Status-dependent callbacks + jqXHR.statusCode( statusCode ); + statusCode = undefined; + + if ( fireGlobals ) { + globalEventContext.trigger( isSuccess ? "ajaxSuccess" : "ajaxError", + [ jqXHR, s, isSuccess ? success : error ] ); + } + + // Complete + completeDeferred.fireWith( callbackContext, [ jqXHR, statusText ] ); + + if ( fireGlobals ) { + globalEventContext.trigger( "ajaxComplete", [ jqXHR, s ] ); + + // Handle the global AJAX counter + if ( !( --jQuery.active ) ) { + jQuery.event.trigger( "ajaxStop" ); + } + } + } + + return jqXHR; + }, + + getJSON: function( url, data, callback ) { + return jQuery.get( url, data, callback, "json" ); + }, + + getScript: function( url, callback ) { + return jQuery.get( url, undefined, callback, "script" ); + } +} ); + +jQuery.each( [ "get", "post" ], function( _i, method ) { + jQuery[ method ] = function( url, data, callback, type ) { + + // Shift arguments if data argument was omitted + if ( isFunction( data ) ) { + type = type || callback; + callback = data; + data = undefined; + } + + // The url can be an options object (which then must have .url) + return jQuery.ajax( jQuery.extend( { + url: url, + type: method, + dataType: type, + data: data, + success: callback + }, jQuery.isPlainObject( url ) && url ) ); + }; +} ); + +jQuery.ajaxPrefilter( function( s ) { + var i; + for ( i in s.headers ) { + if ( i.toLowerCase() === "content-type" ) { + s.contentType = s.headers[ i ] || ""; + } + } +} ); + + +jQuery._evalUrl = function( url, options, doc ) { + return jQuery.ajax( { + url: url, + + // Make this explicit, since user can override this through ajaxSetup (#11264) + type: "GET", + dataType: "script", + cache: true, + async: false, + global: false, + + // Only evaluate the response if it is successful (gh-4126) + // dataFilter is not invoked for failure responses, so using it instead + // of the default converter is kludgy but it works. + converters: { + "text script": function() {} + }, + dataFilter: function( response ) { + jQuery.globalEval( response, options, doc ); + } + } ); +}; + + +jQuery.fn.extend( { + wrapAll: function( html ) { + var wrap; + + if ( this[ 0 ] ) { + if ( isFunction( html ) ) { + html = html.call( this[ 0 ] ); + } + + // The elements to wrap the target around + wrap = jQuery( html, this[ 0 ].ownerDocument ).eq( 0 ).clone( true ); + + if ( this[ 0 ].parentNode ) { + wrap.insertBefore( this[ 0 ] ); + } + + wrap.map( function() { + var elem = this; + + while ( elem.firstElementChild ) { + elem = elem.firstElementChild; + } + + return elem; + } ).append( this ); + } + + return this; + }, + + wrapInner: function( html ) { + if ( isFunction( html ) ) { + return this.each( function( i ) { + jQuery( this ).wrapInner( html.call( this, i ) ); + } ); + } + + return this.each( function() { + var self = jQuery( this ), + contents = self.contents(); + + if ( contents.length ) { + contents.wrapAll( html ); + + } else { + self.append( html ); + } + } ); + }, + + wrap: function( html ) { + var htmlIsFunction = isFunction( html ); + + return this.each( function( i ) { + jQuery( this ).wrapAll( htmlIsFunction ? html.call( this, i ) : html ); + } ); + }, + + unwrap: function( selector ) { + this.parent( selector ).not( "body" ).each( function() { + jQuery( this ).replaceWith( this.childNodes ); + } ); + return this; + } +} ); + + +jQuery.expr.pseudos.hidden = function( elem ) { + return !jQuery.expr.pseudos.visible( elem ); +}; +jQuery.expr.pseudos.visible = function( elem ) { + return !!( elem.offsetWidth || elem.offsetHeight || elem.getClientRects().length ); +}; + + + + +jQuery.ajaxSettings.xhr = function() { + try { + return new window.XMLHttpRequest(); + } catch ( e ) {} +}; + +var xhrSuccessStatus = { + + // File protocol always yields status code 0, assume 200 + 0: 200, + + // Support: IE <=9 only + // #1450: sometimes IE returns 1223 when it should be 204 + 1223: 204 + }, + xhrSupported = jQuery.ajaxSettings.xhr(); + +support.cors = !!xhrSupported && ( "withCredentials" in xhrSupported ); +support.ajax = xhrSupported = !!xhrSupported; + +jQuery.ajaxTransport( function( options ) { + var callback, errorCallback; + + // Cross domain only allowed if supported through XMLHttpRequest + if ( support.cors || xhrSupported && !options.crossDomain ) { + return { + send: function( headers, complete ) { + var i, + xhr = options.xhr(); + + xhr.open( + options.type, + options.url, + options.async, + options.username, + options.password + ); + + // Apply custom fields if provided + if ( options.xhrFields ) { + for ( i in options.xhrFields ) { + xhr[ i ] = options.xhrFields[ i ]; + } + } + + // Override mime type if needed + if ( options.mimeType && xhr.overrideMimeType ) { + xhr.overrideMimeType( options.mimeType ); + } + + // X-Requested-With header + // For cross-domain requests, seeing as conditions for a preflight are + // akin to a jigsaw puzzle, we simply never set it to be sure. + // (it can always be set on a per-request basis or even using ajaxSetup) + // For same-domain requests, won't change header if already provided. + if ( !options.crossDomain && !headers[ "X-Requested-With" ] ) { + headers[ "X-Requested-With" ] = "XMLHttpRequest"; + } + + // Set headers + for ( i in headers ) { + xhr.setRequestHeader( i, headers[ i ] ); + } + + // Callback + callback = function( type ) { + return function() { + if ( callback ) { + callback = errorCallback = xhr.onload = + xhr.onerror = xhr.onabort = xhr.ontimeout = + xhr.onreadystatechange = null; + + if ( type === "abort" ) { + xhr.abort(); + } else if ( type === "error" ) { + + // Support: IE <=9 only + // On a manual native abort, IE9 throws + // errors on any property access that is not readyState + if ( typeof xhr.status !== "number" ) { + complete( 0, "error" ); + } else { + complete( + + // File: protocol always yields status 0; see #8605, #14207 + xhr.status, + xhr.statusText + ); + } + } else { + complete( + xhrSuccessStatus[ xhr.status ] || xhr.status, + xhr.statusText, + + // Support: IE <=9 only + // IE9 has no XHR2 but throws on binary (trac-11426) + // For XHR2 non-text, let the caller handle it (gh-2498) + ( xhr.responseType || "text" ) !== "text" || + typeof xhr.responseText !== "string" ? + { binary: xhr.response } : + { text: xhr.responseText }, + xhr.getAllResponseHeaders() + ); + } + } + }; + }; + + // Listen to events + xhr.onload = callback(); + errorCallback = xhr.onerror = xhr.ontimeout = callback( "error" ); + + // Support: IE 9 only + // Use onreadystatechange to replace onabort + // to handle uncaught aborts + if ( xhr.onabort !== undefined ) { + xhr.onabort = errorCallback; + } else { + xhr.onreadystatechange = function() { + + // Check readyState before timeout as it changes + if ( xhr.readyState === 4 ) { + + // Allow onerror to be called first, + // but that will not handle a native abort + // Also, save errorCallback to a variable + // as xhr.onerror cannot be accessed + window.setTimeout( function() { + if ( callback ) { + errorCallback(); + } + } ); + } + }; + } + + // Create the abort callback + callback = callback( "abort" ); + + try { + + // Do send the request (this may raise an exception) + xhr.send( options.hasContent && options.data || null ); + } catch ( e ) { + + // #14683: Only rethrow if this hasn't been notified as an error yet + if ( callback ) { + throw e; + } + } + }, + + abort: function() { + if ( callback ) { + callback(); + } + } + }; + } +} ); + + + + +// Prevent auto-execution of scripts when no explicit dataType was provided (See gh-2432) +jQuery.ajaxPrefilter( function( s ) { + if ( s.crossDomain ) { + s.contents.script = false; + } +} ); + +// Install script dataType +jQuery.ajaxSetup( { + accepts: { + script: "text/javascript, application/javascript, " + + "application/ecmascript, application/x-ecmascript" + }, + contents: { + script: /\b(?:java|ecma)script\b/ + }, + converters: { + "text script": function( text ) { + jQuery.globalEval( text ); + return text; + } + } +} ); + +// Handle cache's special case and crossDomain +jQuery.ajaxPrefilter( "script", function( s ) { + if ( s.cache === undefined ) { + s.cache = false; + } + if ( s.crossDomain ) { + s.type = "GET"; + } +} ); + +// Bind script tag hack transport +jQuery.ajaxTransport( "script", function( s ) { + + // This transport only deals with cross domain or forced-by-attrs requests + if ( s.crossDomain || s.scriptAttrs ) { + var script, callback; + return { + send: function( _, complete ) { + script = jQuery( " + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

Snuba Data Model

+

This section explains how data is organized in Snuba and how user facing +data is mapped to the underlying database (Clickhouse in this case).

+

The Snuba data model is divided horizontally into a logical model and +a physical model. The logical data model is what is visible to the Snuba +clients through the Snuba query language. Elements in this model may or may +not map 1:1 to tables in the database. The physical model, instead, maps 1:1 +to database concepts (like tables and views).

+

The reasoning behind this division is that it allows Snuba to expose a +stable interface through the logical data model and perform complex mapping +internally to execute a query on different tables (part of the physical +model) to improve performance in a way that is transparent to the client.

+

The rest of this section outlines the concepts that compose the two models +and how they are connected to each other.

+

The main concepts, described below are dataset, entity and storage.

+../_images/datamodel.png +
+

Datasets

+

A Dataset is a name space over Snuba data. It provides its own schema and +it is independent from other datasets both in terms of logical model and +physical model.

+

Examples of datasets are, discover, outcomes, sessions. There is no +relationship between them.

+

A Dataset can be seen as a container for the components that define its +abstract data model and its concrete data model that are described below.

+

In term of query language, every Snuba query targets one and only one +Dataset, and the Dataset can provide extensions to the query language.

+
+
+

Entities and Entity Types

+

The fundamental block of the logical data model Snuba exposes to the client +is the Entity. In the logical model an entity represents an instance of an +abstract concept (like a transaction or an error). In practice an Entity +corresponds to a row in a table in the database. The Entity Type is the +class of the Entity (like Error**s** or Transaction**s**).

+

The logical data model is composed by a set of Entity Types and by their +relationships.

+

Each Entity Type has a schema which is defined by a list of fields with +their associated abstract data types. The schemas of all the Entity Types +of a Dataset (there can be several) compose the logical data model that is +visible to the Snuba client and against which Snuba Queries are validated. +No lower level concept is supposed to be exposed.

+

Entity Types are unequivocally contained in a Dataset. An Entity Type cannot +be present in multiple Datasets.

+
+

Relationships between Entity Types

+

Entity Types in a Dataset are logically related. There are two types of +relationships we support:

+
    +
  • Entity Set Relationship. This mimics foreign keys. This relationship is +meant to allow joins between Entity Types. It only supports one-to-one +and one-to-many relationships at this point in time.

  • +
  • Inheritance Relationship. This mimics nominal subtyping. A group of Entity +Types can share a parent Entity Type. Subtypes inherit the schema from the +parent type. Semantically the parent Entity Type must represent the union +of all the Entities whose type inherit from it. It also must be possible +to query the parent Entity Type. This cannot be just a logical relationship.

  • +
+
+
+

Entity Type and consistency

+

The Entity Type is the largest unit where Snuba can provide some strong +data consistency guarantees. Specifically it is possible to query an Entity +Type expecting Serializable Consistency (please don’t use that. Seriously, +if you think you need that, you probably don’t). This does not extend to +any query that spans multiple Entity Types where, at best, we will have +eventual consistency.

+

This also has an impact on Subscription queries. These can only work on one +Entity Type at a time since, otherwise, they would require consistency between +Entity Types, which we do not support.

+
+

Attention

+

To be precise the unit of consistency (depending on the Entity Type) +can be even smaller and depend on how the data ingestion topics +are partitioned (project_id for example), the Entity Type is the +maximum Snuba allows. More details are (ok, will be) provided in +the Ingestion section of this guide.

+
+
+
+
+

Storage

+

Storages represent and define the physical data model of a Dataset. Each +Storage represent is materialized in a physical database concept like a table +or a materialized view. As a consequence each Storage has a schema defined +by fields with their types that reflects the physical schema of the DB +table/view the Storage maps to and it is able to provide all the details to +generate DDL statements to build the tables on the database.

+

Storages are able to map the logical concepts in the logical model discussed +above to the physical concept of the database, thus each Storage needs to be +related with an Entity Type. Specifically:

+
    +
  • Each Entity Type must be backed by least one Readable Storage (a Storage we +can run query on), but can be backed by multiple Storages (for example a +pre-aggregate materialized view). Multiple Storages per Entity Type are meant +to allow query optimizations.

  • +
  • Each Entity Type must be backed by one and only one Writable +Storage that is used to ingest data and fill in the database tables.

  • +
  • Each Storage is backing exclusively one Entity Type.

  • +
+
+
+

Examples

+

This section provides some examples of how the Snuba data model can represent +some real world models.

+

These case studies are not necessarily reflecting the current Sentry production +model nor they are part of the same deployment. They have to be considered as +examples taken in isolation.

+
+

Single Entity Dataset

+

This looks like the Outcomes dataset used by Sentry. This actually does not +reflect Outcomes as of April 2020. It is though the design Outcomes should +move towards.

+../_images/singleentity.png +

This Dataset has one Entity Type only which represent an individual Outcome +ingested by the Dataset. Querying raw Outcomes is painfully slow so we have +two Storages. One is the Raw storage that reflects the data we ingest and a +materialized view that computes hourly aggregations that are much more efficient +to query. The Query Planner would pick the storage depending if the query +can be executed on the aggregated data or not.

+
+
+

Multi Entity Type Dataset

+

The canonical example of this Dataset is the Discover dataset.

+../_images/multientity.png +

This has three Entity Types. Errors, Transaction and they both inherit from +Events. These form the logical data model, thus querying the Events Entity +Type gives the union of Transactions and Errors but it only allows common +fields between the two to be present in the query.

+

The Errors Entity Type is backed by two Storages for performance reasons. +One is the main Errors Storage that is used to ingest data, the other is a +read only view that is putting less load on Clickhosue when querying but +that offers lower consistency guarantees. Transactions only have one storage +and there is a Merge Table to serve Events (which is essentially a view over +the union of the two tables).

+
+
+

Joining Entity types

+

This is a simple example of a dataset that includes multiple Entity Types +that can be joined together in a query.

+../_images/joins.png +

GroupedMessage and GroupAssingee can be part of a left join query with Errors. +The rest is similar with what was discussed in the previous examples.

+
+
+
+ + +
+ +
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/architecture/overview.html b/architecture/overview.html new file mode 100644 index 0000000000..13987b6566 --- /dev/null +++ b/architecture/overview.html @@ -0,0 +1,244 @@ + + + + + + + + + Snuba Architecture Overview — Snuba 23.7.0.dev0 documentation + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

Snuba Architecture Overview

+

Snuba is a time series oriented data store backed by +Clickhouse, which is a columnary storage +distributed database well suited for the kind of queries Snuba serves.

+

Data is fully stored in Clickhouse tables and materialized views, +it is ingested through input streams (only Kafka topics today) +and can be queried either through point in time queries or through +streaming queries (subscriptions).

+../_images/overview.png +
+

Storage

+

Clickhouse was chosen as backing storage because it provides a good balance +of the real time performance Snuba needs, its distributed and replicated +nature, its flexibility in terms of storage engines and consistency guarantees.

+

Snuba data is stored in Clickhouse tables and Clickhouse materialized views. +Multiple Clickhouse storage engines +are used depending on the goal of the table.

+

Snuba data is organized in multiple Datasets which represent independent +partitions of the data model. More details in the Snuba Data Model +section.

+
+
+

Ingestion

+

Snuba does not provide an api endpoint to insert rows (except when running +in debug mode). Data is loaded from multiple input streams, processed by +a series of consumers and written to Clickhouse tables.

+

A consumer consumes one or multiple topics and writes on one or multiple +tables. No table is written onto by multiple consumers as of today. This +allows some consistency guarantees discussed below.

+

Data ingestion is most effective in batches (both for Kafka but especially +for Clickhouse). Our consumers support batching and guarantee that one batch +of events taken from Kafka is passed to Clickhouse at least once. By properly +selecting the Clickhouse table engine to deduplicate rows we can achieve +exactly once semantics if we accept eventual consistency.

+
+
+

Query

+

The simplest query system is point in time. Queries are expressed in a +the SnQL language (The SnQL query language) and are sent as post HTTP calls. +The query engine processes the query (process described in +Snuba Query Processing) and transforms it into a ClickHouse +query.

+

Streaming queries (done through the Subscription Engine) allow the client +to receive query results in a push way. In this case an HTTP endpoint allows +the client to register a streaming query. Then The Subscription Consumer consumes +to the topic that is used to fill the relevant Clickhouse table for updates, +periodically runs the query through the Query Engine and produces the result +on the subscriptions Kafka topic.

+
+
+

Data Consistency

+

Different consistency models coexist in Snuba to provide different guarantees.

+

By default Snuba is eventually consistent. When running a query, by default, +there is no guarantee of monotonic reads since Clickhouse is multi-leader +and a query can hit any replica and there is no guarantee the replicas will +be up to date. Also, by default, there is no guarantee Clickhouse will have +reached a consistent state on its own.

+

It is possible to achieve strong consistency on specific query by forcing +Clickhouse to reach consistency before the query is executed (FINAL keyword), +and by forcing queries to hit the specific replica the consumer writes onto. +This essentially uses Clickhouse as if it was a single leader system and it +allows Sequential consistency.

+
+
+
+

Snuba within a Sentry Deployment

+

This sections explains the role Snuba plays within a Sentry deployment showing +the main data flows. If you are deploying Snuba stand alone, this won’t be +useful for you.

+

Legend:

+../_images/deployment_legend.png +

Deployments:

+

Errors and transaction:

+../_images/errors_transactions_deployment.png +

Sessions:

+../_images/sessions_deployment.png +

Outcomes:

+../_images/outcomes_deployment.png +
+

Errors and Transactions data flow

+

The main section at the top of the diagram illustrates the ingestion process +for the Events and Transactions Entities. These two entities serve +most issue/errors related features in Sentry and the whole Performance +product.

+

There is only one Kafka topic (events) shared between errors and transactions +that feeds this pipeline. This topic contains both error messages and transaction +messages.

+

The Errors consumers consumes the events topic, writes messages in the Clickhouse +errors table. Upon commit it also produces a record on the snuba-commit-log +topic.

+

Alerts on Errors are generated by the Errors Subscription Consumer. This is synchronized +consumer that consumes both the main events topic and the snuba-commit-log topic +so it can proceed in lockstep with the main consumer.

+

The synchronized consumer then produces alerts by querying Clickhouse and produces +the result on the result topic.

+

An identical but independent pipeline exists for transactions.

+

The Errors pipeline has an additional step: writing to the replacements topic. +Errors mutations (merge/unmerge/reprocessing/etc.) are produced by Sentry on the +events topic. They are then forwarded to the replacements topic by the +Errors Consumer and executed by the Replacement Consumer.

+

The events topic must be partitioned semantically by Sentry project id to +allow in order processing of the events within a project. This, as of today, is a +requirement for alerts and replacements.

+
+
+

Sessions and Outcomes

+

Sessions and Outcomes work in very similar and simpler way. Specifically +Sessions power Release Health features, while Outcomes mainly provide +data to the Sentry stats page.

+

Both pipelines have their own Kafka topic, Kafka consumer and they write on their +own table in Clickhouse.

+
+
+

Change Data Capture pipeline

+

This pipeline is still under construction. It consumes the cdc topic and fills +two independent tables in Clickhouse.

+
+
+ + +
+ +
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/architecture/queryprocessing.html b/architecture/queryprocessing.html new file mode 100644 index 0000000000..7ad823d859 --- /dev/null +++ b/architecture/queryprocessing.html @@ -0,0 +1,284 @@ + + + + + + + + + Snuba Query Processing — Snuba 23.7.0.dev0 documentation + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

Snuba Query Processing

+

Snuba has a query processing pipeline that starts with the parsing of the +Snuba query language (legacy and SnQL) into an AST ands with a SQL query +being executed on Clickhouse. Between these two phases, several passes on +the AST to apply query processing transformations are executed.

+

The processing pipeline has two main goals: optimize the query and prevent +queries that would be dangerous for our infrastructure.

+

As for the data model, the query processing pipeline is divided into a logical +section where the product related processing is performed and a physical +section which is focused on optimizing the query.

+

The logical sections contains steps like the validation of the query to +ensure it matches the data model or applying custom functions. The physical +section includes steps like promoting tags and selecting a pre-aggregated +view to serve the query.

+
+

Query Processing Phases

+

This section gives an introduction and some pointers to code and examples +for the phases discussed above.

+../_images/queryprocessing.png +
+

Legacy and SnQL Parsers

+

Snuba supports two languages, the legacy JSON based one and the new one named +SnQL. With the exceptions of joins and composite queries which are not supported +by the legacy language, the query processing pipeline does not change whether +one or the other language is used.

+

They both produce a logical query AST which is represented by +this data structure.

+

The legacy parser is here, +while the SnQL parser is in this module.

+
+
+

Query Validation

+

This phase ensures the query can be run (most of the times, we do not yet catch +all possible invalid queries). The responsibility of this phase is to return an +HTTP400 in case of an invalid query with a proper useful message to the user.

+

This is divided in two sub-phases: general validation and entity specific +validation.

+

General validation is composed by a set of checks that are applied to each query +right after the Query is produced by the parser. This happens +in this function. +This includes validations like preventing alias shadowing and function signature +validation.

+

Each entity can provide some validation logic as well in the form of required +columns. This happens in this class. +This allows the query processing to reject queries that do not have a condition +on project_id or that do not have a time range.

+
+
+

Logical Query Processors

+

Query processors are stateless transformations that receive a Query object (with +its AST) and transform it in place. This +is the interface to implement for logical processors. In the logical phase each +entity provides the query processors to be applied in sequence. Common use +cases are custom functions like apdex, +or time bucketing like the time series processor.

+

Query processors are not supposed to depend on other processors to be executed +before or after and should be independent from each other.

+
+
+

Storage Selector

+

As explained in Snuba Data Model, each Entity can define multiple Storages. +Multiple storages represent multiple tables and materialized views can be defined +for performance reasons as some can respond to some queries faster.

+

At the end of the logical processing phase (which is entirely based on the entity) +the storage selector can inspect the query and pick the appropriate storage for +the query. Storage selectors are defined in the entity data model and implement +this interface. +An example is the Errors entity, which has two storages, one is for consistent +queries (they are routed to the same nodes where events are written) and the +other only includes replicas we do not write onto to serve most queries. This +reduces the load on the nodes we write onto.

+
+
+

Query Translator

+

Different storages have different schemas (these reflect the schema of a +clickhouse table or view). All of them are generally different from the entity +model, the most notable example being the subscriptable expression used for +tags tags[abc] that does not exist in clickhouse where accessing a tags +looks like tags.values[indexOf(tags.key, 'abc')] .

+

After a storage has been selected, the query needs to be translated to the physical +query. Translator is a rule based systems, rules are defined by the entity (for +each storage) and are applied in sequence.

+

Contrarily to query processors, translation rules do not have full context +on the query and can only translate an individual expression. This allows us +to compose translation rules easily and reuse them across entities.

+

These +are the translation rules for the transactions entity.

+
+
+

Physical Query Processors

+

Physical query processors work in a very similar way compared to the Logical +query processors. Their interface is very similar and the semantics is the same. +The difference is that they operate on the physical query and, as such, they +are mainly designed for optimizations. For example this processor +finds equality conditions on tags and replace them with the equivalent condition +on a tags hashmap (where we have a bloom filter index) making the filtering +operation faster.

+
+
+

Query Splitter

+

Some queries can be executed in an optimized way by splitting them into multiple +individual Clickhouse queries and by assembling the results of each one of them.

+

Two examples are time splitting and column splitting. Both hare in this file.

+

Time splitting splits a query (that does not contain aggregations and is properly +sorted) into multiple ones over a variable time range that increases in size +progressively and executes them in sequence stopping as soon we have enough +results.

+

Column splitting splits filtering and column fetching. It executes the filtering +part of the query on a minimal number of columns so Clickhouse loads fewer columns, +then, through a second query, fetches the missing columns only for the rows +filtered by the first query.

+
+
+

Query Formatter

+

This component simply formats the query into the Clickhouse query string.

+
+
+
+

Composite Query Processing

+

The discussion above is valid only for simple queries, composite ones +(joins and queries that include subqueries follow a slightly different path).

+

The simple query pipeline discussed above would not work on join queries or +on queries that contain subqueries. In order to make that work, each step +would have to take into account joined queries and subqueries, which would +multiply the complexity of the process.

+

To solve the issue we transform each join query into a join of multiple +simple subqueries. Each subquery is a simple query that can be processed by +the pipeline described above. This is also the preferred way to run Clickhouse +joins as it allows us to apply filters before the join.

+../_images/compositeprocessing.png +

The Query Processing Pipeline for this type of queries is composed of a few +additional steps with respect to what was described above.

+
+

Subquery Generator

+

This component takes a simple SnQL join query and create a subquery for each +table in the join.

+
+
+

Expressions Push Down

+

The query generated at the previous step would be a valid join but incredibly +inefficient. This step is basically a join optimizer that pushes down into +subqueries all expressions that can be part of a subquery. This is a needed +step independently from the subquery processing as the Clickhouse join engine +does not do any expression push down and it would be up to Snuba to optimize +the query.

+
+
+

Simple Query Processing Pipeline

+

This is the same pipeline discussed above from the logical query validation +to the physical query processors.

+
+
+

Join Optimizations

+

At the end of the processing we can apply some optimizations to the overall +composite query like turning a join into a Semi Join.

+
+
+
+ + +
+ +
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/architecture/slicing.html b/architecture/slicing.html new file mode 100644 index 0000000000..ea8cb226bb --- /dev/null +++ b/architecture/slicing.html @@ -0,0 +1,213 @@ + + + + + + + + + Snuba Data Slicing (under development) — Snuba 23.7.0.dev0 documentation + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

Snuba Data Slicing (under development)

+

This feature is under active development and is subject to change

+

To support a higher volume of data, we are building out support for +datasets and storages that span multiple physical resources +(Kafka clusters, Redis instances, Postgres databases, ClickHouse clusters, +etc.) with the same schema. Across Sentry, data records will +have a logical partition assignment based on the data’s organization_id. In Snuba, +we maintain a mapping of logical partitions to physical slices in +settings.LOGICAL_PARTITION_MAPPING.

+

In a future revision, this settings.LOGICAL_PARTITION_MAPPING will be +used along with settings.SLICED_STORAGE_SETS to map queries and incoming +data from consumers to different ClickHouse clusters using a +(StorageSetKey, slice_id) pairing that exists in configuration.

+
+
+

Configuring a slice

+
+

Mapping logical partitions : physical slices

+

To add a slice to a storage set’s logical:physical mapping, or repartition, +increment the slice count in settings.SLICED_STORAGE_SETS for the relevant +storage set. Change the mapping of the relevant storage set’s +logical partitions in settings.LOGICAL_PARTITION_MAPPING. +Every logical partition must be assigned to a slice and the +valid values of slices are in the range of [0,settings.SLICED_STORAGE_SETS[storage_set]).

+
+
+

Defining ClickHouse clusters in a sliced environment

+

Given a storage set, there can be three different cases:

+
    +
  1. The storage set is not sliced

  2. +
  3. The storage set is sliced and no mega-cluster is needed

  4. +
  5. The storage set is sliced and a mega-cluster is needed

  6. +
+

A mega-cluster is needed when there may be partial data residing on different sliced +ClickHouse clusters. This could happen, for example, when a logical partition:slice +mapping changes. In this scenario, writes of new data will be routed to the new slice, +but reads of data will need to span multiple clusters. Now that queries need to work +across different slices, a mega-cluster query node will be needed.

+

For each of the cases above, different types of ClickHouse cluster +configuration will be needed.

+

For case 1, we simply define clusters as per usual in settings.CLUSTERS.

+

For cases 2 and 3:

+

To add a sliced cluster with an associated (storage set key, slice) pair, add cluster definitions +to settings.SLICED_CLUSTERS in the desired environment’s settings. Follow the same structure as +regular cluster definitions in settings.CLUSTERS. In the storage_set_slices field, sliced storage +sets should be added in the form of (StorageSetKey, slice_id) where slice_id is in +the range [0,settings.SLICED_STORAGE_SETS[storage_set]) for the relevant StorageSetKey.

+
+
+

Preparing the storage for sharding

+

A storage that should be sharded requires setting the partition key column that will be used +to calculate the logical partition and ultimately the slice ID for how to query the destination +data.

+

This is done with the partition_key_column_name property in the storage schema (we do not +support sharded storages for non-YAML based entities). You can see an example of how one +might shard by organization_id in generic_metrics_sets and generic_metrics_distributions +dataset YAML files.

+
+
+

Adding sliced Kafka topics

+

In order to define a “sliced” Kafka topic, add (default logical topic name, slice id) to +settings.SLICED_KAFKA_TOPIC_MAP. This tuple should be mapped to a custom physical topic +name of the form logical_topic_name-slice_id. Make sure to add the corresponding broker +configuration details to settings.SLICED_KAFKA_BROKER_CONFIG. Here, use the same tuple +(default logical topic name, slice id) as the key, and the broker config info as the value.

+

Example configurations:

+

SLICED_KAFKA_TOPIC_MAP = {(“snuba-generic-metrics”, 1): “snuba-generic-metrics-1”}

+

SLICED_KAFKA_BROKER_CONFIG = {(“snuba-generic-metrics”, 1): BROKER_CONFIG}

+

These types of topics can be “sliced”: raw topics, replacements topics, commit log topics, +subscription scheduler topics. Note that the slicing boundary stops at this point and +the results topics for subscriptions cannot be sliced.

+
+
+
+

Working in a Sliced Environment

+
+

Starting a sliced consumer

+

First, ensure that your slicing configuration is set up properly: SLICED_STORAGE_SETS, +SLICED_CLUSTERS, SLICED_KAFKA_TOPIC_MAP, and SLICED_KAFKA_BROKER_CONFIG. +See above for details.

+

Start up snuba consumer as per usual, with an extra flag --slice-id set equal +to the slice number you are reading from.

+
+
+

TODO: handling subscriptions, scheduler and executor, etc.

+
+
+ + +
+ +
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/clickhouse/death_queries.html b/clickhouse/death_queries.html new file mode 100644 index 0000000000..56dc96deb1 --- /dev/null +++ b/clickhouse/death_queries.html @@ -0,0 +1,139 @@ + + + + + + + + + Clickhouse Queries Of Death — Snuba 23.7.0.dev0 documentation + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

Clickhouse Queries Of Death

+

The following queries have been shown to segfault ClickHouse on 20.7 (which is the minimum Clickhouse version of Snuba). Do not run these queries in the tracing tool, unless you really want to take ClickHouse down.

+
+

countif(”DOOM”)

+

Query

+
SELECT countIf(environment='production')
+FROM ...
+PREWHERE environment = 'production'
+
+
+

A countif in the SELECT with that same condition in the PREWHERE will segfault ClickHouse. This will be fixed in 21.8 when the upgrade is complete

+
+
+ + +
+ +
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/clickhouse/schema_design.html b/clickhouse/schema_design.html new file mode 100644 index 0000000000..07ddd57541 --- /dev/null +++ b/clickhouse/schema_design.html @@ -0,0 +1,276 @@ + + + + + + + + + ClickHouse Schema Design Best Practices — Snuba 23.7.0.dev0 documentation + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

ClickHouse Schema Design Best Practices

+
+

Tip

+

This is a work-in-progress document for collecting ClickHouse schema and querying +best-practices based on experiences running ClickHouse at scale at Sentry. +It is subject to change and if something doesn’t seem right please +submit a PR to Snuba.

+
+ +
+

Columns based on dictionary data (tag promotion)

+

ClickHouse is a columnar datastore, and at run-time it loads columns on-demand +based on the columns referenced in the query (both the columns SELECT ed +and those part of the WHERE clause). The ability to store different columns independently +and not load them for every row for every query is part of the performance advantage that +ClickHouse provides over a traditional RDBMS (like PostgreSQL).

+

Commonly, a data schema contains a flexible key:value pair mapping +(canonically at Sentry: tags or contexts) and stores that +data in a Nested column that contains two arrays where the first array contains the keys +of the dictionary and the second array contains the values. To make queries faster, +a column like this can be indexed with bloom filters as described in Bloom filter indexing on dictionary-like columns. In general +we construct this index across datasets for tags but not for other columns.

+

This works well when your dataset and query design gives you the ability to +filter for exact matches and a large number of rows will NOT be an exact match. +Often, however, a ClickHouse query filters for rows that contain a substring match or regular +expression match for a tag value of a given key. This makes bloom filter indexes +not usable for the query and, depending on the other selectivity attributes of your query, +can necessitate moving (or promoting) those relevant values for a given tag key to a new separate +column [1].

+
+

Selectivity in queries and indices

+

Queries are much more efficient when they have the attribute of being low-selectivity on +the table indexes – meaning the query conditions on indexed columns filter the dataset +to a very small proportion of the overall number of rows. High selectivity +can break the efficiency of the bloom-filter style index on dictionary columns +(see Bloom filter indexing on dictionary-like columns). In cases of high-selectivity queries, there is a negative performance impact on both +bloom-filter indexed columns as well as promoted tag value columns (when searching for a key=value +pair exact match). The promoted column can make the penalty a bit less severe because +it does not load tag values from unrelated keys. Still, an effort should be made to avoid +low-selectivity queries.

+
+
+

Bloom filter indexing on dictionary-like columns

+

To facilitate faster searching on dictionary columns, we tend to create bloom filter indices +on a hashes of both the unique key values of each row as well as hashes of all the key=value +pairs of each row. The bloom filter registers these +in a stochastic data structure designed to quickly determine which elements do NOT exist in a set. +So that it can model the entire unbounded keyspace in a fixed amount of memory, a bloom filter +is designed to have false positives. This means that there is actually a performance penalty +if the value is often present in the underlying set: First, the value must be tested +against the bloom filter (which will always return “maybe present”), and after +that operation occurs a full scan of the column must be performed.

+

Due to their structure, bloom filters are only good for exact value searching. They +cannot be used for “is user-value a prefix of column?” or “does column match regex?” style queries. +Those styles of queries require a separate column to search.

+ +
+
+
+

Aggregate Tables and Materialization

+

A common use case for ClickHouse and Snuba is to ingest raw data and automatically +roll it up to aggregate values (keyed by a custom set of dimensions). This lets +a dataset owner simplify their write logic while getting the query benefits of +rows that are pre-aggregated. This is done with what we’ll call a raw table +(the table the consumer writes to), an aggregate table (the table the API reads from) +and a materialized view (which describes how the data should be transformed from +raw to aggregate).

+

Sample usage of a materialized view/aggregate table from the official ClickHouse Documentation. +Note that contrary to this example, the aggregate table definition in Snuba is +always separate from the materialized view definition (which is just a ClickHouse SQL +transformation, similar to a PostgreSQL trigger).

+

In general, Snuba follows the naming conventions here:

+
    +
  • (widgets_raw_local, widgets_raw_dist) for the raw (local, distributed) tables

  • +
  • widgets_aggregation_mv for the materialized view (this only exists on storage nodes)

  • +
  • (widgets_aggregated_local, widgets_aggregated_dist) for the roll-up/aggregated (local, distributed) tables

  • +
+

Materialized views are immutable so it’s normal to have multiple versions of +widgets_aggregation_mv when behavior is updated, with suffixes like +widgets_aggregation_mv_v1, widgets_aggregation_mv_v2, etc. Migration +between materialized view versions are described in the next section but in general +old materialized views should be discarded once they are no longer used.

+
+

Schema migrations using materialization_version

+

As we discussed at the end of the prior section, materialized view logic cannot +be updated in place. In order to continuously roll-up input data without data +loss or duplication in the aggregate table, we control logic changes with a +column on the raw table, materialization_version, and making the materialized +view logic depend on specific values of that column. To update MV logic, you +create a new materialized view that looks for the last used value of +materialization_version plus one and then, after that’s been created in all +relevant environments, update the consumer to write the new materialization_version +to the raw column.

+

Here is how this might look in practice:

+

Context:

+
    +
  1. There is a raw table click_events_raw_local, that has a field named +click_duration, of type Float64. A snuba consumer is setting this to 0 for +certain types of click events.

  2. +
  3. There is a materialized view click_events_aggregation_mv that is writing +a quantilesState() value for a click_duration column in click_events_aggregated_local +including those zero-values. This materialized view looks for the value of +materialization_version = 0 in its WHERE condition.

  4. +
  5. The query users are being surprised by p90, p95, and p99 values that are taking into +account zero-duration click events which don’t make sense for the use case.

  6. +
+

To resolve this confusion, we don’t want to set quantilesState for click_duration if +the incoming click_duration is 0.

+

Steps to resolve the issue:

+
    +
  1. Create a new materialized view click_events_aggregation_mv_v1 via the migration system. This new materialized +view will use the WHERE clause or some kind of filtering to avoid setting quantilesState(0) +in the write for the click_duration column. This new materialized will only operate on +inputs in click_events_raw_local where materialization_version = 1

  2. +
  3. Test that this fixes the issue in your local environment by changing your consumer to use +materialization_version = 1. It can make sense to control this via the settings file in +(in snuba/settings/__init.py__)

  4. +
  5. Run the migration in all relevant environments.

  6. +
  7. Change the materialization_version setting mentioned above in a specific environment, to +set materialization_version = 1 on write.

  8. +
  9. Validate that the consumer is writing rows with the new materialization version, and that +it produces the expected roll-up results.

  10. +
  11. Write a migration to remove the now-unused materialized view (click_events_aggregation_mv).

  12. +
+
+
+
+ + +
+ +
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/clickhouse/supported_versions.html b/clickhouse/supported_versions.html new file mode 100644 index 0000000000..2410008076 --- /dev/null +++ b/clickhouse/supported_versions.html @@ -0,0 +1,138 @@ + + + + + + + + + ClickHouse supported versions — Snuba 23.7.0.dev0 documentation + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

ClickHouse supported versions

+

The following versions of Clickhouse have been tested and are known to work +with Snuba:

+
    +
  • 20.3

  • +
  • 20.7

  • +
  • 21.8

  • +
+

Any version of Clikhouse used outside of this list could potentially work, +but is not guaranteed to work. Some functionality might be broken. Use a +different version at your own risk. There are plans to support more recent +versions of Clickhouse in the future. When Snuba has been validated to work +with the new versions of Clickhouse, this list will be updated.

+
+ + +
+ +
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/clickhouse/topology.html b/clickhouse/topology.html new file mode 100644 index 0000000000..67a319dd9e --- /dev/null +++ b/clickhouse/topology.html @@ -0,0 +1,165 @@ + + + + + + + + + ClickHouse Topology Best Practices — Snuba 23.7.0.dev0 documentation + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

ClickHouse Topology Best Practices

+

Sentry has a few conventions for using ClickHouse that are not inherent to +the database but useful to consider if you intend to create new tables or +datasets.

+
+

Storage nodes vs. Query nodes

+

We tend to deploy two different types of nodes in production:

+
    +
  1. storage nodes – these contain table data for a given shard, and replicate +data between other nodes of that shard. For most datasets, they +are not queried directly.

  2. +
  3. query nodes – these contain references to tables on storage nodes via +distributed tables, and are intended to be queried directly by the API or +written to directly by consumers. They do not themselves store data but aggregate +or delegate to tables on the storage node.

  4. +
+

This separation allows us to do maintenance on storage nodes in a way that is +invisible to the application (as the query nodes generally act as a proxy, and +can more generally be kept up indefinitely).

+
+
+

Distributed Tables vs. Local Tables

+

Astute snuba users might notice that migrations contain references to tables with names +suffixed with _local and table names suffixed with _dist. This is used to +distinguish between distributed tables (generally using the ClickHouse table engine +Distributed) +and local tables (generally using one of the +MergeTree-derived +table engines). Distributed tables exist to aggregate from the shards of local tables, and following +the sequence above, distributed tables tend to be created on query nodes and local tables +tend to be created on storage nodes.

+
+
+

Tying it all together

+

This diagram hopefully combines all the above concepts into an understandable +quick to consume format.

+../_images/clickhouse_nodes.png +
+
+ + +
+ +
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/configuration/dataset.html b/configuration/dataset.html new file mode 100644 index 0000000000..44d1d8834c --- /dev/null +++ b/configuration/dataset.html @@ -0,0 +1,139 @@ + + + + + + + + + Dataset Schema — Snuba 23.7.0.dev0 documentation + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

Dataset Schema

+
+

Properties

+
    +
  • version: Version of schema.

  • +
  • kind: Component kind.

  • +
  • name (string): Name of the dataset.

  • +
  • entities (array): Names of entities associated with this dataset.

  • +
+
+
+ + +
+ +
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/configuration/entity.html b/configuration/entity.html new file mode 100644 index 0000000000..b621553bbe --- /dev/null +++ b/configuration/entity.html @@ -0,0 +1,218 @@ + + + + + + + + + Entity Schema — Snuba 23.7.0.dev0 documentation + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

Entity Schema

+
+

Properties

+
    +
  • version: Version of schema.

  • +
  • kind: Component kind.

  • +
  • schema (array): Objects (or nested objects) representing columns containg a name, type and args.

  • +
  • name (string): Name of the entity.

  • +
  • storages (array): An array of storages and their associated translation mappers.

    +
      +
    • storage (string): Name of a readable or writable storage class which provides an abstraction to read from a table or a view in ClickHouse.

    • +
    • is_writable (boolean): Marks the storage is a writable one.

    • +
    • translation_mappers (object): Represents the set of rules used to translates different expression types.

      +
        +
      • columns (array)

        +
          +
        • mapper (string): Mapper class name.

        • +
        • args (object): Key/value mappings required to instantiate Mapper class.

        • +
        +
      • +
      • functions (array)

        +
          +
        • mapper (string): Mapper class name.

        • +
        • args (object): Key/value mappings required to instantiate Mapper class.

        • +
        +
      • +
      • curried_functions (array)

        +
          +
        • mapper (string): Mapper class name.

        • +
        • args (object): Key/value mappings required to instantiate Mapper class.

        • +
        +
      • +
      • subscriptables (array)

        +
          +
        • mapper (string): Mapper class name.

        • +
        • args (object): Key/value mappings required to instantiate Mapper class.

        • +
        +
      • +
      +
    • +
    +
  • +
  • join_relationships (object)

    +
      +
    • ^.*$ (object): The join relationship. The key for this relationship is how the relationship is specified in queries (MATCH x -[key]-> y).

      +
        +
      • rhs_entity (string): The entity key of the rhs entity to join with.

      • +
      • columns (array): A sequence of tuples of columns to join on, in the form (left, right).

      • +
      • join_type (string): The type of join that can be performed (either ‘left’ or ‘inner’.

      • +
      • equivalences (array): Tracking columns in the two entities that are not part of the join key but are still equivalent.

      • +
      +
    • +
    +
  • +
  • storage_selector (object):

    +
      +
    • selector (string): QueryStorageSelector class name.

    • +
    • args (object): Key/value mappings required to instantiate QueryStorageSelector class.

    • +
    +
  • +
  • query_processors (array): Represents a transformation applied to the ClickHouse query.

    +
      +
    • processor (string): Name of LogicalQueryProcessor class config key. Responsible for the transformation applied to a query.

    • +
    • args (object): Key/value mappings required to instantiate QueryProcessor class.

    • +
    +
  • +
  • validators (array): The validation logic used on the ClickHouse query.

    +
      +
    • validator (string): Validator class name.

    • +
    • args (object): Key/value mappings required to instantiate Validator class.

    • +
    +
  • +
  • validate_data_model ([‘string’, ‘null’]): The level at which mismatched functions and columns when querying the entity should be logged.

  • +
  • required_time_column ([‘string’, ‘null’]): The name of the required time column specifed in schema.

  • +
  • partition_key_column_name ([‘string’, ‘null’]): The column name, if this entity is partitioned, to select slice.

  • +
  • subscription_processors (array)

    +
      +
    • processor (string): Entity Subscription Processor class name.

    • +
    • args (object): Key/value mappings required to instantiate Entity Subscription Processor class.

    • +
    +
  • +
  • subscription_validators (array)

    +
      +
    • validator (string): Entity Subscription Validator class name.

    • +
    • args (object): Key/value mappings required to instantiate Entity Subscription Validator class.

    • +
    +
  • +
+
+
+ + +
+ +
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/configuration/entity_subscription.html b/configuration/entity_subscription.html new file mode 100644 index 0000000000..52909be272 --- /dev/null +++ b/configuration/entity_subscription.html @@ -0,0 +1,140 @@ + + + + + + + + + Entity Subscription Schema — Snuba 23.7.0.dev0 documentation + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

Entity Subscription Schema

+
+

Properties

+
    +
  • version: Version of schema.

  • +
  • kind: Component kind.

  • +
  • name (string): Name of the entity subscription.

  • +
  • max_allowed_aggregations ([‘integer’, ‘null’]): Maximum number of allowed aggregations.

  • +
  • disallowed_aggregations ([‘array’, ‘null’]): Name of aggregation clauses that are not allowed.

  • +
+
+
+ + +
+ +
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/configuration/intro.html b/configuration/intro.html new file mode 100644 index 0000000000..3596a9020b --- /dev/null +++ b/configuration/intro.html @@ -0,0 +1,125 @@ + + + + + + + + + Dataset Configuration — Snuba 23.7.0.dev0 documentation + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

Dataset Configuration

+

Snuba Datasets are defined through YAML configuration files. These are then loaded and validated by the Snuba application.

+
+ + +
+ +
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/configuration/migration_group.html b/configuration/migration_group.html new file mode 100644 index 0000000000..5668cfabb2 --- /dev/null +++ b/configuration/migration_group.html @@ -0,0 +1,140 @@ + + + + + + + + + Migration Group Schema — Snuba 23.7.0.dev0 documentation + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

Migration Group Schema

+
+

Properties

+
    +
  • version: Version of schema.

  • +
  • kind: Component kind.

  • +
  • name (string): Name of the migration group.

  • +
  • optional (boolean): Flag to determine if migration group is optional.

  • +
  • migrations (array): Names of migrations to be applied in group.

  • +
+
+
+ + +
+ +
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/configuration/overview.html b/configuration/overview.html new file mode 100644 index 0000000000..31a738e14a --- /dev/null +++ b/configuration/overview.html @@ -0,0 +1,142 @@ + + + + + + + + + Dataset Configuration — Snuba 23.7.0.dev0 documentation + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

Dataset Configuration

+

Snuba Datasets are defined through YAML configuration files. These are then loaded and validated by the Snuba application.

+
+

Schemas:

+ +
+
+ + +
+ +
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/configuration/readable_storage.html b/configuration/readable_storage.html new file mode 100644 index 0000000000..b5968ce3b3 --- /dev/null +++ b/configuration/readable_storage.html @@ -0,0 +1,178 @@ + + + + + + + + + Readable Storage Schema — Snuba 23.7.0.dev0 documentation + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

Readable Storage Schema

+
+

Properties

+
    +
  • version: Version of schema.

  • +
  • kind: Component kind.

  • +
  • name (string): Name of the readable storage.

  • +
  • storage (object):

    +
      +
    • key (string): A unique key identifier for the storage.

    • +
    • set_key (string): A unique key identifier for a collection of storages located in the same cluster.

    • +
    +
  • +
  • readiness_state (string): The readiness state defines the availability of the storage in various environments. Internally, this label is used to determine which environments this storage is released in. There for four different readiness states: limited, deprecrate, partial, and complete. Different environments support a set of these readiness_states . If this is a new storage, start with limited which only exposes the storage to CI and local development. Must be one of: [‘limited’, ‘deprecate’, ‘partial’, ‘complete’].

  • +
  • schema (object):

    +
      +
    • columns (array): Objects (or nested objects) representing columns containg a name, type and args.

    • +
    • local_table_name (string): The local table name in a single-node ClickHouse.

    • +
    • dist_table_name (string): The distributed table name in distributed ClickHouse.

    • +
    • not_deleted_mandatory_condition (string): The name of the column flagging a deletion, eg deleted column in Errors. Defining this column here will ensure any query served by this storage explicitly filters out any ‘deleted’ rows. Should only be used for storages supporting deletion replacement.

    • +
    • partition_format (array): The format of the partitions in Clickhouse. Used in the cleanup job.

    • +
    +
  • +
  • query_processors (array)

    +
      +
    • processor (string): Name of ClickhouseQueryProcessor class config key. Responsible for the transformation applied to a query.

    • +
    • args (object): Key/value mappings required to instantiate QueryProcessor class.

    • +
    +
  • +
  • query_splitters (array)

    +
      +
    • splitter (string): Name of QuerySplitStrategy class config key. Responsible for splitting a query into two at runtime and combining the results.

    • +
    • args (object): Key/value mappings required to instantiate QuerySplitStrategy class.

    • +
    +
  • +
  • mandatory_condition_checkers (array)

    +
      +
    • condition (string): Name of ConditionChecker class config key. Responsible for running final checks on a query to ensure that transformations haven’t impacted/removed conditions required for security reasons.

    • +
    • args (object): Key/value mappings required to instantiate ConditionChecker class.

    • +
    +
  • +
  • allocation_policies (array)

    +
      +
    • name (string): Name of the AllocationPolicy used for allocating read resources per query on this storage.

    • +
    • args (object): Key/value mappings required to instantiate AllocationPolicy class.

    • +
    +
  • +
+
+
+ + +
+ +
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/configuration/writable_storage.html b/configuration/writable_storage.html new file mode 100644 index 0000000000..d1df1b983d --- /dev/null +++ b/configuration/writable_storage.html @@ -0,0 +1,208 @@ + + + + + + + + + Writable Storage Schema — Snuba 23.7.0.dev0 documentation + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

Writable Storage Schema

+
+

Properties

+
    +
  • version: Version of schema.

  • +
  • kind: Component kind.

  • +
  • name (string): Name of the writable storage.

  • +
  • storage (object):

    +
      +
    • key (string): A unique key identifier for the storage.

    • +
    • set_key (string): A unique key identifier for a collection of storages located in the same cluster.

    • +
    +
  • +
  • readiness_state (string): The readiness state defines the availability of the storage in various environments. Internally, this label is used to determine which environments this storage is released in. There for four different readiness states: limited, deprecrate, partial, and complete. Different environments support a set of these readiness_states . If this is a new storage, start with limited which only exposes the storage to CI and local development. Must be one of: [‘limited’, ‘deprecate’, ‘partial’, ‘complete’].

  • +
  • schema (object):

    +
      +
    • columns (array): Objects (or nested objects) representing columns containg a name, type and args.

    • +
    • local_table_name (string): The local table name in a single-node ClickHouse.

    • +
    • dist_table_name (string): The distributed table name in distributed ClickHouse.

    • +
    • not_deleted_mandatory_condition (string): The name of the column flagging a deletion, eg deleted column in Errors. Defining this column here will ensure any query served by this storage explicitly filters out any ‘deleted’ rows. Should only be used for storages supporting deletion replacement.

    • +
    • partition_format (array): The format of the partitions in Clickhouse. Used in the cleanup job.

    • +
    +
  • +
  • stream_loader (object): The stream loader for a writing to ClickHouse. This provides what is needed to start a Kafka consumer and fill in the ClickHouse table.

    +
      +
    • processor (object): Name of Processor class config key and it’s arguments. Responsible for converting an incoming message body from the event stream into a row or statement to be inserted or executed against clickhouse.

      +
        +
      • name (string)

      • +
      • args (object): Key/value mappings required to instantiate the processor class.

      • +
      +
    • +
    • default_topic (string): Name of the Kafka topic to consume from.

    • +
    • commit_log_topic ([‘string’, ‘null’]): Name of the commit log Kafka topic.

    • +
    • subscription_scheduled_topic ([‘string’, ‘null’]): Name of the subscription scheduled Kafka topic.

    • +
    • subscription_scheduler_mode ([‘string’, ‘null’]): The subscription scheduler mode used (e.g. partition or global). This must be specified if subscriptions are supported for this storage.

    • +
    • subscription_result_topic ([‘string’, ‘null’]): Name of the subscription result Kafka topic.

    • +
    • replacement_topic ([‘string’, ‘null’]): Name of the replacements Kafka topic.

    • +
    • dlq_topic ([‘string’, ‘null’]): Name of the DLQ Kafka topic.

    • +
    • pre_filter (object): Name of class which filter messages incoming from stream.

      +
        +
      • type (string): Name of StreamMessageFilter class config key.

      • +
      • args (object): Key/value mappings required to instantiate StreamMessageFilter class.

      • +
      +
    • +
    +
  • +
  • query_processors (array)

    +
      +
    • processor (string): Name of ClickhouseQueryProcessor class config key. Responsible for the transformation applied to a query.

    • +
    • args (object): Key/value mappings required to instantiate QueryProcessor class.

    • +
    +
  • +
  • query_splitters (array)

    +
      +
    • splitter (string): Name of QuerySplitStrategy class config key. Responsible for splitting a query into two at runtime and combining the results.

    • +
    • args (object): Key/value mappings required to instantiate QuerySplitStrategy class.

    • +
    +
  • +
  • mandatory_condition_checkers (array)

    +
      +
    • condition (string): Name of ConditionChecker class config key. Responsible for running final checks on a query to ensure that transformations haven’t impacted/removed conditions required for security reasons.

    • +
    • args (object): Key/value mappings required to instantiate ConditionChecker class.

    • +
    +
  • +
  • allocation_policies (array)

    +
      +
    • name (string): Name of the AllocationPolicy used for allocating read resources per query on this storage.

    • +
    • args (object): Key/value mappings required to instantiate AllocationPolicy class.

    • +
    +
  • +
  • replacer_processor (object):

    +
      +
    • processor (string): Name of ReplacerProcessor class config key. Responsible for optimizing queries on a storage which can have replacements, eg deletions/updates.

    • +
    • args (object): Key/value mappings required to instantiate ReplacerProcessor class.

    • +
    +
  • +
  • writer_options (object): Extra Clickhouse fields that are used for consumer writes.

  • +
+
+
+ + +
+ +
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/contributing/environment.html b/contributing/environment.html new file mode 100644 index 0000000000..8471e55bad --- /dev/null +++ b/contributing/environment.html @@ -0,0 +1,204 @@ + + + + + + + + + Snuba development environment — Snuba 23.7.0.dev0 documentation + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

Snuba development environment

+

This section explains how to run snuba from source and set up a development +environment.

+

In order to set up Clickhouse, Redis, and Kafka, please refer to Getting started with Snuba.

+
+

Prerequisites

+

pyenv must be installed on your system. +It is also assumed that you have completed the steps to set up the sentry dev environment.

+

If you are using Homebrew and a M1 Mac, ensure the development packages you’ve installed with Homebrew are available +by setting these environment variables:

+
export CPATH=/opt/homebrew/include
+export LIBRARY_PATH=/opt/homebrew/lib
+
+
+
+
+

Install / Run

+

clone this repo into your workspace:

+
git@github.com:getsentry/snuba.git
+
+
+

These commands set up the Python virtual environment:

+
cd snuba
+make pyenv-setup
+python -m venv .venv
+source .venv/bin/activate
+pip install --upgrade pip==22.2.2
+make develop
+
+
+

These commands start the Snuba api, which is capable of processing queries:

+
snuba api
+
+
+

This command instead will start the api and all the Snuba consumers to ingest +data from Kafka:

+
snuba devserver
+
+
+
+
+

Running tests

+

This command runs unit and integration tests:

+
make develop (if you have not run it already)
+make test
+
+
+
+

Running sentry tests against snuba

+

This section instead runs Sentry tests against a running Snuba installation

+

Make sure there is no snuba container already running:

+
docker ps -a | grep snuba
+
+
+

Start your local snuba api server:

+
git checkout your-snuba-branch
+source .venv/bin/activate
+snuba api
+
+
+

and, in another terminal:

+
cd ../sentry
+git checkout master
+git pull
+sentry devservices up --exclude=snuba
+
+
+

This will get the most recent version of Sentry on master, and bring up all snuba’s dependencies.

+

You will want to run the following Sentry tests:

+
make test-acceptance
+make test-snuba
+make test-python
+
+
+

These tests do not use Kafka due to performance reasons. The snuba test suite does test the kafka functionality

+
+
+
+ + +
+ +
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/genindex.html b/genindex.html new file mode 100644 index 0000000000..51be52919c --- /dev/null +++ b/genindex.html @@ -0,0 +1,123 @@ + + + + + + + + Index — Snuba 23.7.0.dev0 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/getstarted.html b/getstarted.html new file mode 100644 index 0000000000..3212d7531c --- /dev/null +++ b/getstarted.html @@ -0,0 +1,177 @@ + + + + + + + + + Getting started with Snuba — Snuba 23.7.0.dev0 documentation + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

Getting started with Snuba

+

This is a guide to quickly start Snuba up in the context of a Sentry +development environment.

+
+

Requirements

+

Snuba assumes:

+
    +
  1. A Clickhouse server endpoint at CLICKHOUSE_HOST (default 127.0.0.1).

  2. +
  3. A redis instance running at REDIS_HOST (default 127.0.0.1). On port +6379

  4. +
  5. A Kafka cluster running at 127.0.0.1 on port 9092.

  6. +
+

A quick way to get these services running is to set up sentry, and add the following line +in ~/.sentry/sentry.conf.py:

+
SENTRY_EVENTSTREAM = "sentry.eventstream.kafka.KafkaEventStream"
+
+
+

And then use:

+
sentry devservices up --exclude=snuba
+
+
+

Note that Snuba assumes that everything is running on UTC time. Otherwise +you may experience issues with timezone mismatches.

+
+
+

Sentry + Snuba

+

Add/change the following lines in ~/.sentry/sentry.conf.py:

+
SENTRY_SEARCH = 'sentry.search.snuba.EventsDatasetSnubaSearchBackend'
+SENTRY_TSDB = 'sentry.tsdb.redissnuba.RedisSnubaTSDB'
+SENTRY_EVENTSTREAM = 'sentry.eventstream.snuba.SnubaEventStream'
+
+
+

Run:

+
sentry devservices up
+
+
+

Access raw clickhouse client (similar to psql):

+
docker exec -it sentry_clickhouse clickhouse-client
+
+
+

Data is written into the table sentry_local: select count() from sentry_local;

+
+
+

Settings

+

Settings are found in settings.py

+
    +
  • CLUSTERS : Provides the list of clusters and the hostname, port, and storage sets that should run on each cluster. Local vs distributed is also set per cluster.

  • +
  • REDIS_HOST : The host redis is running on.

  • +
+
+
+ + +
+ +
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/index.html b/index.html new file mode 100644 index 0000000000..a5f69728d6 --- /dev/null +++ b/index.html @@ -0,0 +1,180 @@ + + + + + + + + + Features: — Snuba 23.7.0.dev0 documentation + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +

Snuba is a service that provides a rich data model on top of Clickhouse +together with a fast ingestion consumer and a query optimizer.

+

Snuba was originally developed to replace a combination of Postgres and +Redis to search and provide aggregated data on Sentry errors. +Since then it has evolved into the current form where it supports most +time series related Sentry features over several data sets.

+
+

Features:

+
    +
  • Provides a database access layer to the Clickhouse distributed data store.

  • +
  • Provides a graph logical data model the client can query through the SnQL +language which provides functionalities similar to those of SQL.

  • +
  • Support multiple separate data sets in a single installation.

  • +
  • Provides a rule based query optimizer.

  • +
  • Provides a migration system to apply DDL changes to Clickhouse both in a +single node and distributed environment.

  • +
  • Ingest data directly from Kafka

  • +
  • Supports both point in time queries and streaming queries.

  • +
+
+
+

Some use cases in Sentry:

+
    +
  • The events data set powers features like the Issue Page. Here the search +functionality is powered by Snuba as well as all the aggregations.

  • +
  • The discover data set powers all the Performance Monitoring related +features.

  • +
  • The sessions data set powers the Releases feature. Specifically this +data set ingests a much higher volume of data points and stores pre-aggregated +data to allow fast queries over higher volume of data.

  • +
  • The outcomes data set powers the Stats page.

  • +
+
+
+

Contents:

+ +
+ + +
+ +
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/intro.html b/intro.html new file mode 100644 index 0000000000..fa275cd469 --- /dev/null +++ b/intro.html @@ -0,0 +1,154 @@ + + + + + + + + + Features: — Snuba 23.7.0.dev0 documentation + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +

Snuba is a service that provides a rich data model on top of Clickhouse +together with a fast ingestion consumer and a query optimizer.

+

Snuba was originally developed to replace a combination of Postgres and +Redis to search and provide aggregated data on Sentry errors. +Since then it has evolved into the current form where it supports most +time series related Sentry features over several data sets.

+
+

Features:

+
    +
  • Provides a database access layer to the Clickhouse distributed data store.

  • +
  • Provides a graph logical data model the client can query through the SnQL +language which provides functionalities similar to those of SQL.

  • +
  • Support multiple separate data sets in a single installation.

  • +
  • Provides a rule based query optimizer.

  • +
  • Provides a migration system to apply DDL changes to Clickhouse both in a +single node and distributed environment.

  • +
  • Ingest data directly from Kafka

  • +
  • Supports both point in time queries and streaming queries.

  • +
+
+
+

Some use cases in Sentry:

+
    +
  • The events data set powers features like the Issue Page. Here the search +functionality is powered by Snuba as well as all the aggregations.

  • +
  • The discover data set powers all the Performance Monitoring related +features.

  • +
  • The sessions data set powers the Releases feature. Specifically this +data set ingests a much higher volume of data points and stores pre-aggregated +data to allow fast queries over higher volume of data.

  • +
  • The outcomes data set powers the Stats page.

  • +
+
+ + +
+ +
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/language/snql.html b/language/snql.html new file mode 100644 index 0000000000..deee921bb0 --- /dev/null +++ b/language/snql.html @@ -0,0 +1,287 @@ + + + + + + + + + The SnQL query language — Snuba 23.7.0.dev0 documentation + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

The SnQL query language

+

This document describes the Snuba Query Language (SnQL). For more details on +how to actually send a query to Snuba see Querying Snuba.

+

This is the query structure.:

+
MATCH simple | join | subquery
+SELECT [expressions] | [aggregations BY expressions]
+ARRAY JOIN [column]
+WHERE condition [[AND | OR] condition]*
+HAVING condition [[AND | OR] condition]*
+ORDER BY expression ASC|DESC [, expression ASC|DESC]*
+LIMIT n BY [expressions]
+LIMIT n
+OFFSET n
+GRANULARITY n
+TOTALS boolean
+
+
+

These queries are sent as strings to the /:dataset/snql endpoint encoded in a +JSON body of the form below.:

+
{
+    "query": "<query>",
+    "dataset": "<dataset>",
+    "consistent": bool,
+    "turbo": bool,
+    "debug": bool,
+}
+
+
+

The dataset is implied through the url used for the query. All of the fields except +for query are optional in the JSON body.

+
+

MATCH

+

Our data model is represented by a graph of entities. This clause identifies +the pattern of the subgraphs we are querying on. There are three types of +MATCH clause that are currently supported:

+

Simple:

+

MATCH (<entity> [SAMPLE n])

+

This is equivalent to all of our current queries. This is querying data from +a single entity (Events, Transactions etc.) It is possible to add an optional +sample to the query by adding it with the entity.

+

Example MATCH (events).

+

Subquery:

+

MATCH { <query> }

+

Inside the curly braces can be another SnQL query in its entirety. Anything +in the SELECT/BY clause of the subquery will be exposed in the outer query +using the aliases specified.

+

Example:

+
MATCH {
+    MATCH (transactions)
+    SELECT avg(duration) AS avg_d BY transaction
+}
+SELECT max(avg_d)
+
+
+

Join:

+

MATCH (<alias>: <entity> [SAMPLE n]) -[<join>]-> (<alias>: <entity> [SAMPLE n])

+

A join represents a multi node subgraph is a subgraph that includes +multiple relationships between different nodes. We only support 1..n, +n..1 and 1..1 directed relationships between nodes.

+

With JOINs every entity must have an alias, which is a unique string. +Sampling can also be applied to any of the entities in the join. The +<join> is a string that is specified in the Entity in Snuba, and +is a short hand for a set of join conditions. It’s possible to have more +than one join clause, separated by commas.

+

Example:

+
MATCH
+    (e: events) -[grouped]-> (g: groupedmessage),
+    (e: events) -[assigned]-> (a: groupassignee)
+SELECT count() AS tot BY e.project_id, g.id
+WHERE a.user_id = "somebody"
+
+
+

The type of join (left/inner) and the join key are part of the data model +and not part of the query. They are hard coded in the entity code. +This is because not entity can be safely joined with any other entity +in the distributed version of the underlying database.

+

The tuples provided by the match clause to the where clause look exactly +like the ones produced by conventional join clause.:

+
[
+    {"e.project_id": 1,  "g.id": 10}
+    {"e.project_id": 1,  "g.id": 11}
+    {"e.project_id": 2,  "g.id": 20}
+    ...
+]
+
+
+
+
+

SELECT .. BY

+

This clause specifies which results should be returned in the output. +If there is an aggregation, when everything in the BY clause is +treated as a grouping key. +It is possible to have aggregations without a BY clause if we want +to aggregate across the entire result set, but, in such case, nothing +other than the aggregation can be in the SELECT. +It’s not valid to have +an empty SELECT clause, even if there is a BY clause.

+

Expressions in the SELECT clause can be columns, arithmetic, functions +or any combination of the three. If the query is a join, then each column +must have a qualifying alias that matches one of the entity aliases in the +MATCH clause.

+
+
+

WHERE

+

This is the filter of the query that happens before aggregations (like +the WHERE in SQL).

+

Conditions are infix expressions of the form LHS OP RHS*, where LHS +and RHS are literal values or expressions. OP refers to a specific +operator to compare the two values. These operators are one of +=, !=, <, <=, >, >=, IN, NOT IN, LIKE, NOT LIKE, IS NULL, IS NOT NULL. +Note that the RHS is optional when using an operator like IS NULL.

+

Conditions can be combined using the boolean keywords AND or OR. +They can also be grouped using ().

+

Some conditions will be mandatory to provide a valid query depending on +the entity. For example the Transactions entity requires a project id +condition and a time range condition.

+
+
+

HAVING

+

Works like the WHERE clause but it is applied after the aggregations declared +in the SELECT clause. So we can apply conditions on the result of an aggregation +function here.

+
+
+

ORDER BY

+

Specify the expression(s) to order the result set on.

+
+
+

LIMIT BY/LIMIT/OFFSET

+

Pretty self explanatory, they take integers and set the corresponding +values in the Clickhouse query. If a query doesn’t specify the limit or +offset, they will be defaulted to 1000 and 0 respectively.

+
+
+

GRANULARITY

+

An integer representing the granularity to group time based results.

+

Some of the entities in Snuba provides a magic column that you can use to group data by. The column gives a floored time value for each row so that rows in the same minute/hour/day/etc. can be grouped.

+

The magic column for a given entity can be found by finding the TimeSeriesProcessor for the entity. Example, for errors, you can find the TimeSeriesProcessor defined here. You can see that the magic column is time and it uses the timestamp column for grouping.

+

Granularity determines the number of seconds in each of these time buckets. Eg, to count the number of events by hour, you would do

+

Example:

+
MATCH(events) count(event_id) AS event_count
+BY time
+WHERE timestamp >= toDateTime('2022-01-15T00:00:00.000000') AND timestamp < toDateTime('2022-01-21T00:00:00.000000')
+GRANULARITY 3600
+
+
+
+
+

TOTALS

+

If set to True, the response from Snuba will have a "totals" key that +contains the total values across all the selected rows.

+
+
+

SAMPLE

+

If a sampling rate isn’t provided by a node in the MATCH clause, then it +can be specified here. In this case, Snuba will assign the sample right to +one of the nodes in the query. A sample can be either a float between 0 and +1, representing a percentage of rows to sample.

+

Or it can be an integer greater 1 which represents the number of rows to sample.

+
+
+ + +
+ +
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/migrations/modes.html b/migrations/modes.html new file mode 100644 index 0000000000..da4a86f3e1 --- /dev/null +++ b/migrations/modes.html @@ -0,0 +1,171 @@ + + + + + + + + + Snuba Migration Modes — Snuba 23.7.0.dev0 documentation + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

Snuba Migration Modes

+

This document outlines a way to try out distributed migrations. +Note that this is experimental, and should be used only for development +purposes at the moment. Distributed mode is not supported when testing yet. +Local mode for migrations is currently fully supported.

+

If you are running ClickHouse via Sentry’s devservices, the +main “switch” between the two modes for running data migrations (local and +distributed) lives in sentry/conf/server.py. +The controlling envrionment variable is SENTRY_DISTRIBUTED_CLICKHOUSE_TABLES, +and its value must be set in order to use a specific mode.

+

Once this boolean variable is set, one of two ClickHouse Docker volumes will be +used for data storage, depending on the mode (distributed or local). Whenever a user +wants to switch between the two modes, they must “turn off” the running ClickHouse +container, alter the environment variable mentioned above, and then “turn on” the +same container to be in the new mode.

+

More information on migrations in general can be found here.

+
+

Enabling Local Mode

+

In your local server.py, set SENTRY_DISTRIBUTED_CLICKHOUSE_TABLES +to False. This is the default setting, so configuration is already +set up for local mode migrations. Start up the corresponding ClickHouse +container (sentry devservices up clickhouse).

+

Now, run migrations as expected (snuba migrations migrate --force).

+
+
+

Enabling Distributed Mode

+

In your local server.py, set SENTRY_DISTRIBUTED_CLICKHOUSE_TABLES +to True. Start up the corresponding ClickHouse container (sentry devservices up clickhouse). +Make sure that the Zookeeper container is also running; without it, distributed migrations +will not work properly.

+

Now, we take a look at the cluster configurations that can be used in Distributed tables. These are +set in this config. +The current configuration in the file is a default, 1 shard with 1 replica model, and is best to use +for now, as it supports migrations for all storages. Moving forward, we look to adding support +for multi-sharded configurations, and ensuring storages are placed on the right clusters. +More examples of and information on cluster configurations can be found in this link.

+

Finally, set up cluster connection details (for example, which storage is to be assigned +to be which cluster) in this file. +This needs to be done only for distributed migrations, as the default cluster details will be used in local mode. +The default in this file works with the default cluster configurations mentioned above, so no changes +are immediately necessary.

+

Now, run migrations with the SNUBA_SETTINGS environment variable pointing to distributed mode. +This can be done as follows: SNUBA_SETTINGS=distributed snuba migrations migrate --force.

+
+
+ + +
+ +
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/objects.inv b/objects.inv new file mode 100644 index 0000000000..93828afad3 Binary files /dev/null and b/objects.inv differ diff --git a/query/overview.html b/query/overview.html new file mode 100644 index 0000000000..1590204c9d --- /dev/null +++ b/query/overview.html @@ -0,0 +1,426 @@ + + + + + + + + + Querying Snuba — Snuba 23.7.0.dev0 documentation + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

Querying Snuba

+

This guide drives you through the process of authoring and testing a +Snuba query.

+
+

Exploring the Snuba data model

+

In order to architect a Snuba query, the first step is being able to +know which Dataset you should query, which Entities you should select +and what the schema of each Entity is.

+

For an introduction about Datasets and Entities, see the Snuba Data Model +section.

+

Datasets can be found in this module. +Each Dataset is a class that references the Entities.

+

The list of entities in the system can be found via the snuba entities +command:

+
snuba entities list
+
+
+

would return something like:

+
Declared Entities:
+discover
+errors
+events
+groups
+groupassignee
+groupedmessage
+.....
+
+
+

Once we have found the entity we are interested into, we need to understand +the schema and the relationship declared on that entity. +The same command describes an Entity:

+
snuba entities describe groupedmessage
+
+
+

Would return:

+
Entity groupedmessage
+    Entity schema
+    --------------------------------
+    offset UInt64
+    record_deleted UInt8
+    project_id UInt64
+    id UInt64
+    status Nullable(UInt8)
+    last_seen Nullable(DateTime)
+    first_seen Nullable(DateTime)
+    active_at Nullable(DateTime)
+    first_release_id Nullable(UInt64)
+
+    Relationships
+    --------------------------------
+        groups
+        --------------------------------
+        Destination: events
+        Type: LEFT
+            Join keys
+            --------------------------------
+            project_id = LEFT.project_id
+            id = LEFT.group_id
+
+
+

Which provides the list of columns with their type and the relationships to +other entities defined in the data model.

+
+
+

Preparing a query for Snuba

+

Snuba query language is called SnQL. It is documented in the The SnQL query language +section. So this section does not go into details.

+

There is a python sdk that can be used to build Snuba queries and it can +be used in any Python client including Sentry. This +is where the sdk project is documented.

+

The query is represented as a Query object like:

+
query = Query(
+    dataset="discover",
+    match=Entity("events"),
+    select=[
+        Column("title"),
+        Function("uniq", [Column("event_id")], "uniq_events"),
+    ],
+    groupby=[Column("title")],
+    where=[
+        Condition(Column("timestamp"), Op.GT, datetime.datetime(2021, 1, 1)),
+        Condition(Column("project_id"), Op.IN, Function("tuple", [1, 2, 3])),
+    ],
+    limit=Limit(10),
+    offset=Offset(0),
+    granularity=Granularity(3600),
+)
+
+
+

More details on how to build a query are in the sdk documentation.

+

Once the query object is ready it can be sent to Snuba.

+
+
+

Sending a query to Snuba with Sentry

+

The most common use case when querying Snuba is via Sentry. This section +explains how to build a query in the Sentry code base and send it to Snuba.

+

Sentry imports the Snuba sdk described above. This is the recommended way +to build Snuba queries.

+

Once a Query object has been created the Snuba client api provided by +Sentry can and should be used to send the query to Snuba.

+

The api is in this module. +It takes care of caching, retries and allows bulk queries.

+

The method returns a dictionary that contains the data in response and +additional metadata:

+
{
+    "data": [
+        {
+            "title": "very bad",
+            "uniq_events": 2
+        }
+    ],
+    "meta": [
+        {
+            "name": "title",
+            "type": "String"
+        },
+        {
+            "name": "uniq_events",
+            "type": "UInt64"
+        }
+    ],
+    "timing": {
+        ... details ...
+    }
+}
+
+
+

The data section is a list with one dictionary per row. The meta +section contains the list of the columns included in the response with +their data type as inferred by Clickhouse.

+

More details about the structure of the timing section below.

+
+
+

Sending a test query through the web UI

+

Snuba has a minimal web UI you can use to send queries. You can run Snuba +locally and the web UI will be accessible at http://127.0.0.1:1218/[DATASET NAME]/snql.

+../_images/snubaUI.png +

The SnQL query should be provided (sorry, on one line only) in the query +attribute and the structure of the response is the same discussed in the +section above.

+
+
+

Sending a query via curl

+

The web ui just sends the payload as a POST. So the same result can be +achieved with curl or any other HTTP client.

+
+
+

Request and response formats

+

The request format is the same visible in the screenshot:

+
    +
  • query contains the SnQL query as a string

  • +
  • dataset is the dataset name (if not already specified in the url

  • +
  • +
    debug makes Snuba provide exhaustive statistics in the response

    including the Clickhouse query.

    +
    +
    +
  • +
  • +
    consistent forces the Clickhouse query to be executed in single

    threaded mode and, in case the Clickhouse table is +replicated, it will force Snuba to always hit the same +node. Which can guarantee sequential consistency as +that is the node where the consumer write by default. +This is achieved with the load balancing +Clickhouse property which is set as in_order.

    +
    +
    +
  • +
  • +
    turbo sets a sampling rate to the query defined in the TURBO_SAMPLE_RATE

    Snuba setting. It also prevents Snuba to apply the FINAL +mode to the Clickhouse query in case it was needed to guarantee +correct results after replacements.

    +
    +
    +
  • +
+

Snuba can respond with 4 http codes. 200 is for a successful query, +if the query cannot be properly validated it will be a 400. A 500 generally +means a Clickhouse related issue (that go from timeout to connection issues) +though there are several invalid queries that Snuba is not able to identify +in advance still (we are removing them). +Snuba has an internal rate limiter so 429 is also a possible return code.

+

The response format for a successful query is the same discussed above. +The complete version looks like this (in debug mode)

+
{
+    "data": [],
+    "meta": [
+        {
+            "name": "title",
+            "type": "String"
+        }
+    ],
+    "timing": {
+        "timestamp": 1621038379,
+        "duration_ms": 95,
+        "marks_ms": {
+            "cache_get": 1,
+            "cache_set": 4,
+            "execute": 39,
+            "get_configs": 0,
+            "prepare_query": 10,
+            "rate_limit": 4,
+            "validate_schema": 34
+        }
+    },
+    "stats": {
+        "clickhouse_table": "errors_local",
+        "final": false,
+        "referrer": "http://127.0.0.1:1218/events/snql",
+        "sample": null,
+        "project_rate": 0,
+        "project_concurrent": 1,
+        "global_rate": 0,
+        "global_concurrent": 1,
+        "consistent": false,
+        "result_rows": 0,
+        "result_cols": 1,
+        "query_id": "f09f3f9e1c632f395792c6a4bfe7c4fe"
+    },
+    "sql": "SELECT (title AS _snuba_title) FROM errors_local PREWHERE equals((project_id AS _snuba_project_id), 1) WHERE equals(deleted, 0) AND greaterOrEquals((timestamp AS _snuba_timestamp), toDateTime('2021-05-01T00:00:00', 'Universal')) AND less(_snuba_timestamp, toDateTime('2021-05-11T00:00:00', 'Universal')) LIMIT 1000 OFFSET 0"
+}
+
+
+

The timing section contains the timestamp of the query and the duration. What +is interesting is that the duration is broken down into phases: marks_ms.

+

The sql element is the Clickhouse query.

+

The stats dictionary contains the following keys

+
    +
  • clickhouse_table is the table picked by snuba during query processing

  • +
  • +
    final tells if Snuba decided to send a FINAL query to Clickhouse which would force

    Clickhouse to apply the relevant merges (for merge trees) right away. +Details

    +
    +
    +
  • +
  • sample is the sampling rate applied

  • +
  • +
    project_rate is the number of request per second Snuba received for the specific

    project at the time of the query

    +
    +
    +
  • +
  • +
    project_concurrent is the number of concurrent query involving the specific project

    at the time of the query.

    +
    +
    +
  • +
  • global_rate same as for project_rate but not focused on one project

  • +
  • global_concurrent same as for project_concurrent but not focused on one project

  • +
  • query_id is a unique identifier for the this query.

  • +
+

A query validation issue would generally have this format:

+
{
+    "error": {
+        "type": "invalid_query",
+        "message": "Missing >= condition with a datetime literal on column timestamp for entity events. Example: timestamp >= toDateTime('2023-05-16 00:00')"
+    }
+}
+
+
+

A Clickhouse error would have a similar structure. The type field will say +clickhouse, the message will contain details around the exception. +Contrarily to the query validation errors, in case of Clickhouse errors, the +query is actually executed, so all the timing and stats details described for +successful query are present.

+
+
+

Creating a Subscription query

+

Send the payload as a POST to 127.0.0.1:1218/[DATASET NAME]/[ENTITY NAME]/subscriptions.

+
+
+

Request Format

+

A subscription query would generally have this payload format:

+
{
+    "project_id": 42,
+    "time_window" : 150,
+    "resolution" : 60,
+    "query" : "MATCH (events) SELECT ...."
+}
+
+
+

project_id, resolution, time_window are all specified as separate fields +in the subscription payload by the user, alongside the query. This allows +us to pre-build one subscription query and vary these as separate parameters.

+

‘time_window’ becomes part of the query condition (i.e the WHERE), and the +subscription query will look at the past ‘time_window’ seconds (as specified +by the window) of events. For example, if ‘time_window’ = 60, the +subscription query will select rows whose timestamp column’s values fall in +the range of [start - 60 seconds, start) where ‘start’ is defined as +the timestamp at which the subscription was created. As ‘time_window’ +increases, the larger the range of accepted values for the relevant +timestamp column.

+

‘project_id’ becomes part of the query condition, and the query will filter +records by matching on the specified id.

+

‘resolution’ is used to determine when the scheduler creates tasks so that +the executor can run subscription queries. The scheduler can either schedule +the subscription immediately, or can schedule subscriptions with +a jitter (see JitteredTaskBuilder defintion for more details). For scheduling, +a running timestamp is maintained and in the case of immediate scheduling, +a subscription task is scheduled every ‘resolution’ seconds.

+
+
+ + +
+ +
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/search.html b/search.html new file mode 100644 index 0000000000..ea7e4fed7c --- /dev/null +++ b/search.html @@ -0,0 +1,142 @@ + + + + + + + + Search — Snuba 23.7.0.dev0 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +

Search

+ + + + +

+ Searching for multiple words only shows matches that contain + all words. +

+ + +
+ + + +
+ + + +
+ +
+ + +
+ +
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/searchindex.js b/searchindex.js new file mode 100644 index 0000000000..6ec09b921c --- /dev/null +++ b/searchindex.js @@ -0,0 +1 @@ +Search.setIndex({"docnames": ["architecture/datamodel", "architecture/overview", "architecture/queryprocessing", "architecture/slicing", "clickhouse/death_queries", "clickhouse/schema_design", "clickhouse/supported_versions", "clickhouse/topology", "configuration/dataset", "configuration/entity", "configuration/entity_subscription", "configuration/intro", "configuration/migration_group", "configuration/overview", "configuration/readable_storage", "configuration/writable_storage", "contributing/environment", "getstarted", "index", "intro", "language/snql", "migrations/modes", "query/overview"], "filenames": ["architecture/datamodel.rst", "architecture/overview.rst", "architecture/queryprocessing.rst", "architecture/slicing.rst", "clickhouse/death_queries.rst", "clickhouse/schema_design.rst", "clickhouse/supported_versions.rst", "clickhouse/topology.rst", "configuration/dataset.md", "configuration/entity.md", "configuration/entity_subscription.md", "configuration/intro.rst", "configuration/migration_group.md", "configuration/overview.rst", "configuration/readable_storage.md", "configuration/writable_storage.md", "contributing/environment.rst", "getstarted.rst", "index.rst", "intro.rst", "language/snql.rst", "migrations/modes.rst", "query/overview.rst"], "titles": ["Snuba Data Model", "Snuba Architecture Overview", "Snuba Query Processing", "Snuba Data Slicing (under development)", "Clickhouse Queries Of Death", "ClickHouse Schema Design Best Practices", "ClickHouse supported versions", "ClickHouse Topology Best Practices", "Dataset Schema", "Entity Schema", "Entity Subscription Schema", "Dataset Configuration", "Migration Group Schema", "Dataset Configuration", "Readable Storage Schema", "Writable Storage Schema", "Snuba development environment", "Getting started with Snuba", "Features:", "Features:", "The SnQL query language", "Snuba Migration Modes", "Querying Snuba"], "terms": {"thi": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 14, 15, 16, 17, 18, 19, 20, 21, 22], "section": [0, 1, 2, 5, 16, 22], "explain": [0, 1, 2, 16, 22], "how": [0, 3, 5, 9, 16, 20, 22], "i": [0, 1, 2, 3, 4, 5, 6, 7, 9, 12, 14, 15, 16, 17, 18, 19, 20, 21, 22], "organ": [0, 1], "user": [0, 2, 5, 7, 21, 22], "face": 0, "map": [0, 5, 9, 14, 15], "underli": [0, 5, 20], "databas": [0, 1, 3, 7, 18, 19, 20], "clickhous": [0, 1, 2, 9, 14, 15, 16, 17, 18, 19, 20, 21, 22], "case": [0, 1, 2, 3, 5, 20, 22], "The": [0, 1, 2, 3, 4, 5, 6, 9, 14, 15, 16, 17, 18, 19, 21, 22], "divid": [0, 2], "horizont": 0, "logic": [0, 5, 9, 18, 19], "physic": 0, "what": [0, 2, 5, 15, 22], "visibl": [0, 22], "client": [0, 1, 17, 18, 19, 22], "through": [0, 1, 2, 5, 11, 13, 18, 19, 20], "queri": [0, 3, 9, 14, 15, 16, 18, 19], "languag": [0, 1, 2, 18, 19, 22], "element": [0, 5, 22], "mai": [0, 3, 5, 17], "1": [0, 3, 5, 17, 20, 21, 22], "tabl": [0, 1, 2, 9, 14, 15, 17, 21, 22], "instead": [0, 16], "concept": [0, 7], "like": [0, 2, 18, 19, 20, 22], "view": [0, 1, 2, 5, 9], "reason": [0, 2, 14, 15, 16], "behind": 0, "divis": 0, "allow": [0, 1, 2, 7, 10, 18, 19, 22], "expos": [0, 14, 15, 20], "stabl": 0, "interfac": [0, 2], "perform": [0, 1, 2, 5, 9, 16, 18, 19], "complex": [0, 2], "intern": [0, 14, 15, 22], "execut": [0, 1, 2, 15, 22], "differ": [0, 1, 2, 3, 5, 6, 7, 9, 14, 15, 20], "part": [0, 2, 5, 9, 20, 22], "improv": 0, "wai": [0, 1, 2, 7, 17, 21, 22], "transpar": 0, "rest": 0, "outlin": [0, 21], "compos": [0, 2], "two": [0, 1, 2, 5, 7, 9, 14, 15, 20, 21], "thei": [0, 1, 2, 5, 7, 20, 21], "ar": [0, 1, 2, 3, 5, 6, 7, 9, 10, 11, 13, 15, 16, 17, 20, 21, 22], "connect": [0, 21, 22], "each": [0, 2, 3, 5, 17, 20, 22], "other": [0, 2, 5, 7, 20, 22], "main": [0, 1, 2, 21], "describ": [0, 1, 2, 5, 20, 22], "below": [0, 1, 20, 22], "A": [0, 1, 3, 4, 5, 9, 14, 15, 17, 20, 22], "name": [0, 2, 3, 5, 7, 8, 9, 10, 12, 14, 15, 22], "space": 0, "over": [0, 2, 5, 18, 19], "It": [0, 1, 2, 5, 16, 20, 22], "provid": [0, 1, 2, 5, 9, 15, 17, 18, 19, 20, 22], "its": [0, 1, 2, 5, 20, 21], "own": [0, 1, 6], "schema": [0, 2, 3, 18, 22], "independ": [0, 1, 2, 5], "from": [0, 1, 2, 3, 4, 5, 7, 9, 15, 16, 17, 18, 19, 20, 22], "both": [0, 1, 2, 5, 18, 19], "term": [0, 1], "discov": [0, 18, 19, 22], "outcom": [0, 18, 19], "session": [0, 18, 19], "There": [0, 1, 5, 6, 14, 15, 20, 22], "them": [0, 2, 5, 22], "can": [0, 1, 2, 3, 5, 7, 9, 15, 18, 19, 20, 21, 22], "seen": 0, "contain": [0, 1, 2, 5, 7, 16, 20, 21, 22], "compon": [0, 2, 8, 9, 10, 12, 14, 15], "defin": [0, 2, 11, 13, 14, 15, 20, 22], "abstract": [0, 9], "concret": 0, "In": [0, 1, 2, 3, 5, 16, 20, 21, 22], "everi": [0, 3, 5, 20, 22], "target": 0, "one": [0, 1, 2, 3, 5, 7, 9, 14, 15, 20, 21, 22], "onli": [0, 1, 2, 5, 14, 15, 20, 21, 22], "extens": 0, "fundament": 0, "block": 0, "an": [0, 1, 2, 3, 5, 7, 9, 15, 20, 22], "repres": [0, 1, 2, 9, 14, 15, 20, 22], "instanc": [0, 3, 17], "transact": [0, 2, 20], "error": [0, 2, 14, 15, 18, 19, 20, 22], "practic": [0, 18], "correspond": [0, 3, 20, 21], "row": [0, 1, 2, 5, 14, 15, 20, 22], "class": [0, 2, 9, 14, 15, 22], "": [0, 3, 5, 15, 16, 20, 21, 22], "set": [0, 2, 3, 5, 9, 14, 15, 16, 18, 19, 20, 21, 22], "ha": [0, 1, 2, 5, 6, 7, 18, 19, 22], "which": [0, 1, 2, 4, 5, 9, 14, 15, 16, 18, 19, 20, 21, 22], "list": [0, 6, 17, 22], "field": [0, 3, 5, 15, 20, 22], "associ": [0, 3, 8, 9], "all": [0, 2, 5, 16, 18, 19, 20, 21, 22], "sever": [0, 2, 5, 18, 19, 22], "against": [0, 5, 15], "valid": [0, 3, 5, 6, 9, 11, 13, 20, 22], "No": [0, 1], "lower": 0, "level": [0, 5, 9], "suppos": [0, 2], "unequivoc": 0, "cannot": [0, 3, 5, 22], "present": [0, 5, 22], "multipl": [0, 1, 2, 3, 5, 18, 19, 20], "relat": [0, 1, 2, 18, 19, 22], "we": [0, 1, 2, 3, 5, 7, 20, 21, 22], "support": [0, 1, 2, 3, 14, 15, 18, 19, 20, 21], "mimic": 0, "foreign": 0, "kei": [0, 2, 3, 5, 9, 14, 15, 20, 22], "meant": 0, "mani": 0, "point": [0, 1, 3, 18, 19, 21], "time": [0, 1, 2, 5, 9, 17, 18, 19, 20, 22], "inherit": 0, "nomin": 0, "subtyp": 0, "group": [0, 13, 20, 22], "share": [0, 1], "parent": 0, "semant": [0, 1, 2], "must": [0, 1, 3, 5, 14, 15, 16, 20, 21], "union": 0, "whose": [0, 22], "also": [0, 1, 2, 16, 17, 20, 21, 22], "possibl": [0, 1, 2, 20, 22], "just": [0, 5, 22], "largest": 0, "unit": [0, 16], "where": [0, 2, 3, 5, 18, 19, 22], "some": [0, 1, 2, 5, 6, 20], "strong": [0, 1], "guarante": [0, 1, 6, 22], "specif": [0, 1, 2, 5, 9, 18, 19, 20, 21, 22], "expect": [0, 5, 21], "serializ": 0, "pleas": [0, 5, 16], "don": [0, 5], "t": [0, 1, 5, 14, 15, 20], "us": [0, 1, 2, 3, 6, 7, 9, 14, 15, 16, 17, 20, 21, 22], "serious": 0, "you": [0, 1, 3, 4, 5, 7, 16, 17, 20, 21, 22], "think": 0, "need": [0, 1, 2, 3, 15, 21, 22], "probabl": 0, "doe": [0, 1, 2, 5, 16, 22], "extend": 0, "ani": [0, 1, 2, 6, 14, 15, 20, 22], "span": [0, 3], "best": [0, 18, 21], "have": [0, 1, 2, 3, 4, 5, 6, 15, 16, 22], "eventu": [0, 1], "impact": [0, 5, 14, 15], "subscript": [0, 1, 2, 9, 13, 15], "These": [0, 1, 2, 3, 11, 13, 16, 20, 21], "work": [0, 1, 2, 5, 6, 18, 20, 21], "sinc": [0, 1, 18, 19], "otherwis": [0, 17], "would": [0, 2, 20, 22], "requir": [0, 1, 2, 3, 5, 9, 14, 15, 20], "do": [0, 2, 3, 4, 5, 7, 16, 20], "To": [0, 2, 3, 5], "precis": 0, "depend": [0, 1, 2, 5, 16, 20, 21], "even": [0, 20], "smaller": 0, "ingest": [0, 5, 16, 18, 19], "topic": [0, 1, 15], "partit": [0, 1, 9, 14, 15], "project_id": [0, 2, 20, 22], "maximum": [0, 10], "more": [0, 1, 5, 6, 7, 20, 21, 22], "detail": [0, 1, 3, 20, 21, 22], "ok": 0, "guid": [0, 17, 22], "materi": [0, 1, 2], "As": [0, 2, 5, 22], "consequ": 0, "reflect": [0, 2], "db": 0, "abl": [0, 22], "gener": [0, 1, 3, 5, 7, 21, 22], "ddl": [0, 18, 19], "statement": [0, 15], "build": [0, 3, 22], "discuss": [0, 1, 2, 5, 22], "abov": [0, 2, 3, 5, 7, 21, 22], "thu": 0, "back": [0, 1], "least": [0, 1], "readabl": [0, 9, 13], "run": [0, 1, 2, 4, 5, 14, 15, 17, 21, 22], "pre": [0, 2, 5, 18, 19, 22], "aggreg": [0, 2, 7, 10, 18, 19, 20], "per": [0, 3, 14, 15, 17, 22], "optim": [0, 15, 18, 19], "writabl": [0, 9, 13], "fill": [0, 1, 15], "exclus": 0, "real": [0, 1], "world": 0, "studi": 0, "necessarili": 0, "current": [0, 18, 19, 20, 21], "sentri": [0, 3, 5, 7, 21], "product": [0, 1, 2, 4, 7], "nor": 0, "same": [0, 2, 3, 4, 14, 15, 20, 21, 22], "deploy": [0, 18], "consid": [0, 7], "taken": [0, 1], "isol": 0, "look": [0, 2, 5, 20, 21, 22], "actual": [0, 5, 20, 22], "april": 0, "2020": 0, "though": [0, 22], "design": [0, 2, 18], "should": [0, 2, 3, 5, 9, 14, 15, 17, 20, 21, 22], "move": [0, 5, 21], "toward": 0, "individu": [0, 2], "raw": [0, 3, 5, 17], "painfulli": 0, "slow": 0, "so": [0, 1, 2, 5, 20, 21, 22], "One": 0, "comput": 0, "hourli": 0, "much": [0, 5, 18, 19], "effici": [0, 5], "planner": 0, "pick": [0, 2, 22], "canon": [0, 5], "three": [0, 3, 20], "event": [0, 1, 2, 5, 15, 18, 19, 20, 22], "form": [0, 2, 3, 9, 18, 19, 20], "give": [0, 2, 5, 20], "common": [0, 2, 5, 22], "read": [0, 1, 3, 5, 9, 14, 15], "put": [0, 5], "less": [0, 5, 22], "load": [0, 1, 2, 5, 11, 13, 22], "clickhosu": 0, "when": [0, 1, 3, 4, 5, 6, 9, 20, 21, 22], "offer": 0, "merg": [0, 1, 22], "serv": [0, 1, 2, 5, 14, 15], "essenti": [0, 1], "simpl": [0, 20], "includ": [0, 2, 5, 16, 20, 22], "togeth": [0, 18, 19], "groupedmessag": [0, 20, 22], "groupassinge": 0, "left": [0, 9, 20, 22], "similar": [0, 1, 2, 5, 17, 18, 19, 22], "wa": [0, 1, 2, 18, 19, 22], "previou": [0, 2], "seri": [1, 2, 18, 19], "orient": 1, "store": [1, 5, 7, 18, 19], "columnari": 1, "distribut": [1, 5, 14, 15, 17, 18, 19, 20], "well": [1, 2, 5, 18, 19], "suit": [1, 16], "kind": [1, 5, 8, 9, 10, 12, 14, 15], "fulli": [1, 21], "input": [1, 5], "stream": [1, 15, 18, 19], "kafka": [1, 15, 16, 17, 18, 19], "todai": 1, "either": [1, 9, 20, 22], "chosen": 1, "becaus": [1, 5, 20], "good": [1, 5], "balanc": [1, 22], "replic": [1, 7, 22], "natur": 1, "flexibl": [1, 5], "engin": [1, 2, 7], "goal": [1, 2], "dataset": [1, 3, 5, 7, 18, 20, 22], "model": [1, 2, 5, 18, 19, 20, 21], "api": [1, 5, 7, 16, 22], "endpoint": [1, 17, 20], "insert": [1, 15], "except": [1, 2, 20, 22], "debug": [1, 20, 22], "mode": [1, 15, 18, 22], "process": [1, 16, 18, 22], "consum": [1, 5, 7, 15, 16, 18, 19, 22], "written": [1, 2, 7, 17], "write": [1, 2, 3, 5, 15, 22], "onto": [1, 2], "most": [1, 2, 7, 16, 18, 19, 22], "effect": 1, "batch": 1, "especi": 1, "our": [1, 2, 20], "pass": [1, 2], "onc": [1, 5, 21, 22], "By": 1, "properli": [1, 2, 3, 21, 22], "select": [1, 2, 4, 9, 17, 22], "dedupl": 1, "achiev": [1, 22], "exactli": [1, 20], "accept": [1, 16, 22], "simplest": 1, "system": [1, 2, 5, 16, 18, 19, 22], "express": [1, 5, 9, 20], "snql": [1, 18, 19, 22], "sent": [1, 20, 22], "post": [1, 22], "http": [1, 22], "call": [1, 5, 22], "transform": [1, 2, 5, 9, 14, 15], "done": [1, 3, 5, 21], "receiv": [1, 2, 22], "result": [1, 2, 3, 5, 14, 15, 20, 22], "push": 1, "regist": [1, 5], "Then": 1, "relev": [1, 3, 5, 22], "updat": [1, 5, 6, 15], "period": [1, 5], "produc": [1, 2, 5, 20], "coexist": 1, "default": [1, 3, 17, 20, 21, 22], "monoton": 1, "multi": [1, 20, 21], "leader": 1, "hit": [1, 22], "replica": [1, 2, 21], "up": [1, 2, 3, 5, 7, 16, 17, 21], "date": 1, "reach": 1, "state": [1, 14, 15], "forc": [1, 21, 22], "befor": [1, 2, 20], "final": [1, 14, 15, 21, 22], "keyword": [1, 20], "singl": [1, 14, 15, 18, 19, 20, 22], "sequenti": [1, 22], "role": 1, "plai": 1, "show": 1, "If": [1, 14, 15, 16, 20, 21], "deploi": [1, 7], "stand": 1, "alon": 1, "won": 1, "legend": 1, "top": [1, 5, 18, 19], "diagram": [1, 7], "illustr": 1, "entiti": [1, 2, 3, 8, 13, 20, 22], "issu": [1, 2, 5, 17, 18, 19, 22], "featur": [1, 3], "whole": 1, "between": [1, 2, 5, 7, 20, 21], "feed": 1, "messag": [1, 2, 5, 15, 22], "upon": 1, "commit": [1, 3, 15], "record": [1, 3, 22], "log": [1, 3, 9, 15], "alert": 1, "synchron": 1, "proce": 1, "lockstep": 1, "ident": 1, "exist": [1, 2, 3, 5, 7], "addit": [1, 2, 22], "step": [1, 2, 5, 16, 22], "replac": [1, 2, 3, 14, 15, 18, 19, 22], "mutat": 1, "unmerg": 1, "reprocess": 1, "etc": [1, 5, 20], "forward": [1, 21], "project": [1, 20, 22], "id": [1, 3, 20, 22], "order": [1, 2, 3, 5, 16, 21, 22], "veri": [1, 2, 5, 22], "simpler": 1, "power": [1, 18, 19], "releas": [1, 14, 15, 18, 19], "health": 1, "while": [1, 2, 5], "mainli": [1, 2], "stat": [1, 18, 19, 22], "page": [1, 18, 19], "still": [1, 5, 9, 22], "under": [1, 18], "construct": [1, 5], "cdc": 1, "start": [2, 14, 15, 16, 18, 21, 22], "pars": 2, "ast": 2, "ands": 2, "sql": [2, 5, 18, 19, 20, 22], "being": [2, 5, 22], "appli": [2, 9, 12, 14, 15, 18, 19, 20, 22], "prevent": [2, 22], "danger": 2, "infrastructur": 2, "data": [2, 7, 16, 17, 18, 19, 20, 21], "focus": [2, 22], "ensur": [2, 3, 14, 15, 16, 21], "match": [2, 5, 9, 22], "custom": [2, 3, 5], "function": [2, 6, 9, 16, 18, 19, 20, 22], "promot": 2, "tag": 2, "introduct": [2, 22], "pointer": 2, "code": [2, 20, 22], "exampl": [2, 3, 5, 20, 21, 22], "json": [2, 20], "base": [2, 3, 18, 19, 20, 22], "new": [2, 3, 5, 6, 7, 14, 15, 21], "With": [2, 20], "chang": [2, 3, 5, 17, 18, 19, 21], "whether": 2, "structur": [2, 3, 5, 20, 22], "here": [2, 3, 5, 14, 15, 18, 19, 20, 21], "modul": [2, 22], "yet": [2, 21], "catch": 2, "invalid": [2, 22], "respons": [2, 9, 14, 15, 20], "return": [2, 5, 20, 22], "http400": 2, "proper": 2, "sub": 2, "check": [2, 14, 15], "right": [2, 5, 9, 20, 21, 22], "after": [2, 5, 20, 22], "happen": [2, 3, 20], "alia": [2, 20], "shadow": 2, "signatur": 2, "column": [2, 3, 9, 14, 15, 20, 22], "reject": 2, "condit": [2, 4, 5, 14, 15, 20, 22], "rang": [2, 3, 20, 22], "stateless": 2, "object": [2, 9, 14, 15, 22], "place": [2, 5, 21], "implement": 2, "sequenc": [2, 7, 9], "apdex": 2, "bucket": [2, 20], "respond": [2, 22], "faster": [2, 5], "At": 2, "end": [2, 5], "entir": [2, 5, 20], "inspect": 2, "appropri": 2, "consist": [2, 20, 22], "rout": [2, 3], "node": [2, 3, 5, 14, 15, 18, 19, 20, 22], "reduc": 2, "notabl": 2, "abc": 2, "access": [2, 5, 17, 18, 19, 22], "valu": [2, 3, 5, 9, 14, 15, 20, 21, 22], "indexof": 2, "been": [2, 4, 5, 6, 22], "rule": [2, 9, 18, 19], "contrarili": [2, 22], "full": [2, 5], "context": [2, 5, 17], "u": [2, 7, 22], "easili": 2, "reus": 2, "across": [2, 3, 5, 20], "compar": [2, 20], "Their": 2, "oper": [2, 5, 20], "For": [2, 3, 7, 20, 22], "find": [2, 20], "equal": [2, 3, 22], "equival": [2, 9, 20], "hashmap": 2, "bloom": 2, "filter": [2, 14, 15, 20, 22], "index": 2, "make": [2, 3, 5, 16, 21, 22], "split": [2, 14, 15], "assembl": 2, "hare": 2, "file": [2, 3, 5, 11, 13, 21], "sort": 2, "ones": [2, 20], "variabl": [2, 16, 21], "increas": [2, 22], "size": 2, "progress": [2, 5], "stop": [2, 3, 5], "soon": 2, "enough": 2, "fetch": 2, "minim": [2, 22], "number": [2, 3, 5, 10, 20, 22], "fewer": 2, "second": [2, 5, 20, 22], "miss": [2, 22], "first": [2, 3, 5, 22], "simpli": [2, 3], "format": [2, 7, 14, 15], "string": [2, 8, 9, 10, 12, 14, 15, 20, 22], "follow": [2, 3, 4, 5, 6, 7, 16, 17, 21, 22], "slightli": 2, "path": 2, "take": [2, 4, 5, 20, 21, 22], "account": [2, 5], "multipli": 2, "solv": 2, "prefer": 2, "type": [2, 3, 5, 7, 9, 14, 15, 20, 22], "few": [2, 7], "respect": [2, 20], "creat": [2, 5, 7], "incredibli": 2, "ineffici": 2, "basic": 2, "overal": [2, 5], "turn": [2, 21], "semi": 2, "activ": [3, 16], "subject": [3, 5], "higher": [3, 18, 19], "volum": [3, 18, 19, 21], "out": [3, 14, 15, 21], "resourc": [3, 14, 15], "redi": [3, 16, 17, 18, 19], "postgr": [3, 18, 19], "assign": [3, 20, 21], "organization_id": 3, "maintain": [3, 22], "logical_partition_map": 3, "futur": [3, 6], "revis": 3, "along": 3, "sliced_storage_set": 3, "incom": [3, 5, 15], "storagesetkei": 3, "slice_id": 3, "pair": [3, 5], "add": [3, 17, 20], "repartit": 3, "increment": 3, "count": [3, 17, 20], "0": [3, 5, 17, 20, 22], "storage_set": 3, "given": [3, 5, 7, 20], "mega": 3, "partial": [3, 14, 15], "resid": 3, "could": [3, 6], "scenario": 3, "now": [3, 5, 21], "usual": 3, "2": [3, 16, 20, 22], "3": [3, 6, 22], "definit": [3, 5], "sliced_clust": 3, "desir": 3, "regular": [3, 5], "storage_set_slic": 3, "calcul": 3, "ultim": 3, "destin": [3, 22], "partition_key_column_nam": [3, 9], "properti": [3, 22], "non": [3, 5], "yaml": [3, 11, 13], "see": [3, 5, 20, 22], "might": [3, 5, 6, 7], "generic_metrics_set": 3, "generic_metrics_distribut": 3, "sliced_kafka_topic_map": 3, "tupl": [3, 9, 20, 22], "logical_topic_nam": 3, "sure": [3, 16, 21], "broker": 3, "sliced_kafka_broker_config": 3, "config": [3, 9, 14, 15, 21], "info": 3, "metric": 3, "broker_config": 3, "note": [3, 5, 17, 20, 21], "boundari": 3, "your": [3, 5, 6, 16, 21], "extra": [3, 15], "flag": [3, 12, 14, 15], "shown": 4, "segfault": 4, "20": [4, 6, 20], "7": [4, 6], "minimum": 4, "version": [4, 5, 8, 9, 10, 12, 14, 15, 16, 18, 20, 22], "snuba": [4, 5, 6, 7, 11, 13, 18, 19, 20], "trace": 4, "tool": 4, "unless": 4, "realli": 4, "want": [4, 5, 16, 20, 21], "down": [4, 22], "environ": [4, 5, 14, 15, 17, 18, 19, 21], "prewher": [4, 22], "fix": [4, 5], "21": [4, 6], "8": [4, 6], "upgrad": [4, 16], "complet": [4, 14, 15, 16, 22], "document": [5, 20, 21, 22], "collect": [5, 14, 15], "experi": [5, 17], "scale": 5, "someth": [5, 22], "doesn": [5, 20], "seem": 5, "submit": 5, "pr": 5, "columnar": 5, "datastor": 5, "demand": 5, "referenc": 5, "ed": 5, "those": [5, 18, 19], "claus": [5, 10, 20], "abil": 5, "advantag": 5, "tradit": 5, "rdbm": 5, "postgresql": 5, "commonli": 5, "nest": [5, 9, 14, 15], "arrai": [5, 8, 9, 10, 12, 14, 15, 20], "exact": 5, "larg": 5, "NOT": [5, 20], "often": 5, "howev": 5, "substr": 5, "usabl": 5, "attribut": [5, 22], "necessit": 5, "separ": [5, 7, 18, 19, 20, 22], "low": 5, "mean": [5, 22], "small": 5, "proport": 5, "high": 5, "break": 5, "style": 5, "neg": 5, "search": [5, 17, 18, 19], "penalti": 5, "bit": 5, "unrel": 5, "effort": 5, "made": 5, "avoid": 5, "facilit": 5, "tend": [5, 7], "hash": 5, "uniqu": [5, 14, 15, 20, 22], "stochast": 5, "quickli": [5, 17], "determin": [5, 12, 14, 15, 20, 22], "unbound": 5, "keyspac": 5, "amount": 5, "memori": 5, "fals": [5, 21, 22], "posit": 5, "test": [5, 6, 21], "alwai": [5, 22], "mayb": 5, "occur": 5, "scan": 5, "due": [5, 16], "prefix": 5, "regex": 5, "dure": [5, 22], "necessari": [5, 21], "old": 5, "goe": 5, "ttl": 5, "storag": [5, 9, 13, 17, 21], "processor": [5, 9, 14, 15], "duplic": 5, "automat": 5, "roll": 5, "dimens": 5, "let": 5, "owner": 5, "simplifi": 5, "get": [5, 16, 18], "benefit": 5, "ll": 5, "sampl": [5, 22], "usag": 5, "offici": 5, "contrari": 5, "trigger": 5, "convent": [5, 7, 20], "widgets_raw_loc": 5, "widgets_raw_dist": 5, "local": [5, 14, 15, 16, 17, 22], "widgets_aggregation_mv": 5, "widgets_aggregated_loc": 5, "widgets_aggregated_dist": 5, "immut": 5, "normal": 5, "behavior": 5, "suffix": [5, 7], "widgets_aggregation_mv_v1": 5, "widgets_aggregation_mv_v2": 5, "next": 5, "discard": 5, "longer": 5, "prior": 5, "continu": 5, "without": [5, 20, 21], "loss": 5, "control": [5, 21], "mv": 5, "last": 5, "plu": 5, "click_events_raw_loc": 5, "click_dur": 5, "float64": 5, "certain": 5, "click": 5, "click_events_aggregation_mv": 5, "quantilesst": 5, "click_events_aggregated_loc": 5, "zero": 5, "surpris": 5, "p90": 5, "p95": 5, "p99": 5, "durat": [5, 20, 22], "sens": 5, "resolv": 5, "confus": 5, "click_events_aggregation_mv_v1": 5, "via": [5, 7, 21], "__init": 5, "py__": 5, "mention": [5, 21], "remov": [5, 14, 15, 22], "unus": 5, "known": 6, "clikhous": 6, "outsid": 6, "potenti": 6, "broken": [6, 22], "risk": 6, "plan": 6, "recent": [6, 16], "inher": 7, "intend": 7, "shard": [7, 21], "directli": [7, 18, 19], "refer": [7, 16, 20, 22], "themselv": 7, "deleg": 7, "mainten": 7, "invis": 7, "applic": [7, 11, 13], "act": 7, "proxi": 7, "kept": 7, "indefinit": 7, "astut": 7, "notic": 7, "migrat": [7, 13, 18, 19], "_local": 7, "_dist": 7, "distinguish": 7, "mergetre": 7, "deriv": 7, "hopefulli": 7, "combin": [7, 14, 15, 18, 19, 20], "understand": [7, 22], "quick": [7, 17], "conta": [9, 14, 15], "arg": [9, 14, 15], "translat": 9, "mapper": 9, "is_writ": 9, "boolean": [9, 12, 20, 21], "mark": 9, "translation_mapp": 9, "instanti": [9, 14, 15], "curried_funct": 9, "join_relationship": 9, "join": [9, 20, 22], "relationship": [9, 20, 22], "specifi": [9, 15, 20, 22], "x": 9, "y": 9, "rhs_entiti": 9, "rh": [9, 20], "join_typ": 9, "inner": [9, 20], "track": 9, "storage_selector": 9, "selector": 9, "querystorageselector": 9, "query_processor": [9, 14, 15], "logicalqueryprocessor": 9, "queryprocessor": [9, 14, 15], "validate_data_model": 9, "null": [9, 10, 15, 20, 22], "mismatch": [9, 17], "required_time_column": 9, "slice": [9, 18], "subscription_processor": 9, "subscription_valid": 9, "max_allowed_aggreg": 10, "integ": [10, 20], "disallowed_aggreg": 10, "option": [12, 20], "identifi": [14, 15, 20, 22], "set_kei": [14, 15], "locat": [14, 15], "cluster": [14, 15, 17, 21], "readiness_st": [14, 15], "readi": [14, 15, 22], "avail": [14, 15, 16], "variou": [14, 15], "label": [14, 15], "four": [14, 15], "limit": [14, 15, 22], "deprecr": [14, 15], "ci": [14, 15], "develop": [14, 15, 17, 18, 19, 21], "deprec": [14, 15], "local_table_nam": [14, 15], "dist_table_nam": [14, 15], "not_deleted_mandatory_condit": [14, 15], "delet": [14, 15, 22], "eg": [14, 15, 20], "explicitli": [14, 15], "partition_format": [14, 15], "cleanup": [14, 15], "job": [14, 15], "clickhousequeryprocessor": [14, 15], "query_splitt": [14, 15], "splitter": [14, 15], "querysplitstrategi": [14, 15], "runtim": [14, 15], "mandatory_condition_check": [14, 15], "conditioncheck": [14, 15], "haven": [14, 15], "secur": [14, 15], "allocation_polici": [14, 15], "allocationpolici": [14, 15], "alloc": [14, 15], "stream_load": 15, "loader": 15, "argument": 15, "convert": 15, "bodi": [15, 20], "default_top": 15, "commit_log_top": 15, "subscription_scheduled_top": 15, "schedul": [15, 22], "subscription_scheduler_mod": 15, "e": [15, 20, 22], "g": [15, 20], "global": 15, "subscription_result_top": 15, "replacement_top": 15, "dlq_topic": 15, "dlq": 15, "pre_filt": 15, "streammessagefilt": 15, "replacer_processor": 15, "replacerprocessor": 15, "writer_opt": 15, "sourc": 16, "pyenv": 16, "assum": [16, 17], "dev": 16, "homebrew": 16, "m1": 16, "mac": 16, "packag": 16, "ve": 16, "export": 16, "cpath": 16, "opt": 16, "library_path": 16, "lib": 16, "clone": 16, "repo": 16, "workspac": 16, "git": 16, "github": 16, "com": 16, "getsentri": 16, "command": [16, 22], "python": [16, 22], "virtual": 16, "cd": 16, "setup": 16, "m": 16, "venv": 16, "bin": 16, "pip": 16, "22": 16, "capabl": 16, "devserv": 16, "integr": 16, "alreadi": [16, 21, 22], "docker": [16, 17, 21], "p": 16, "grep": 16, "server": [16, 17, 21], "checkout": 16, "branch": 16, "anoth": [16, 20], "termin": 16, "master": 16, "pull": 16, "devservic": [16, 17, 21], "exclud": [16, 17], "bring": 16, "clickhouse_host": 17, "127": [17, 22], "redis_host": 17, "On": 17, "port": 17, "6379": 17, "9092": 17, "servic": [17, 18, 19], "line": [17, 22], "conf": [17, 21], "py": [17, 21], "sentry_eventstream": 17, "eventstream": 17, "kafkaeventstream": 17, "And": 17, "everyth": [17, 20], "utc": 17, "timezon": 17, "sentry_search": 17, "eventsdatasetsnubasearchbackend": 17, "sentry_tsdb": 17, "tsdb": 17, "redissnuba": 17, "redissnubatsdb": 17, "snubaeventstream": 17, "psql": 17, "exec": 17, "sentry_clickhous": 17, "sentry_loc": 17, "found": [17, 20, 21, 22], "hostnam": 17, "v": 17, "host": 17, "rich": [18, 19], "fast": [18, 19], "origin": [18, 19], "evolv": [18, 19], "layer": [18, 19], "graph": [18, 19, 20], "instal": [18, 19], "monitor": [18, 19], "architectur": 18, "overview": 18, "within": 18, "configur": [18, 21], "Of": 18, "death": 18, "topologi": 18, "send": 20, "subqueri": 20, "AND": [20, 22], "OR": 20, "asc": 20, "desc": 20, "n": 20, "encod": 20, "bool": 20, "turbo": [20, 22], "impli": 20, "url": [20, 22], "pattern": 20, "subgraph": 20, "ad": [20, 21], "insid": 20, "curli": 20, "brace": 20, "entireti": 20, "anyth": 20, "outer": 20, "alias": 20, "avg": 20, "AS": [20, 22], "avg_d": 20, "max": 20, "direct": 20, "short": 20, "hand": 20, "than": 20, "comma": 20, "groupassigne": [20, 22], "tot": 20, "user_id": 20, "somebodi": 20, "hard": 20, "safe": 20, "10": [20, 22], "11": 20, "output": 20, "treat": 20, "noth": 20, "empti": 20, "arithmet": 20, "qualifi": 20, "infix": 20, "lh": 20, "op": [20, 22], "liter": [20, 22], "IN": [20, 22], "mandatori": 20, "declar": [20, 22], "pretti": 20, "self": 20, "explanatori": 20, "1000": [20, 22], "magic": 20, "floor": 20, "minut": 20, "hour": 20, "dai": 20, "timeseriesprocessor": 20, "timestamp": [20, 22], "event_id": [20, 22], "event_count": 20, "todatetim": [20, 22], "2022": 20, "01": 20, "15t00": 20, "00": [20, 22], "000000": 20, "21t00": 20, "3600": [20, 22], "true": [20, 21], "rate": [20, 22], "isn": 20, "float": 20, "percentag": 20, "Or": 20, "greater": 20, "try": 21, "experiment": 21, "purpos": 21, "moment": 21, "switch": 21, "live": 21, "envrion": 21, "sentry_distributed_clickhouse_t": 21, "whenev": 21, "off": 21, "alter": 21, "inform": 21, "zookeep": 21, "link": 21, "immedi": [21, 22], "snuba_set": 21, "drive": 22, "author": 22, "architect": 22, "know": 22, "about": 22, "interest": 22, "offset": 22, "uint64": 22, "record_delet": 22, "uint8": 22, "statu": 22, "nullabl": 22, "last_seen": 22, "datetim": 22, "first_seen": 22, "active_at": 22, "first_release_id": 22, "group_id": 22, "go": 22, "sdk": 22, "titl": 22, "uniq": 22, "uniq_ev": 22, "groupbi": 22, "gt": 22, "2021": 22, "granular": 22, "import": 22, "recommend": 22, "care": 22, "cach": 22, "retri": 22, "bulk": 22, "method": 22, "dictionari": 22, "metadata": 22, "bad": 22, "meta": 22, "infer": 22, "1218": 22, "sorri": 22, "payload": 22, "screenshot": 22, "exhaust": 22, "statist": 22, "thread": 22, "in_ord": 22, "turbo_sample_r": 22, "correct": 22, "4": 22, "200": 22, "success": 22, "400": 22, "500": 22, "timeout": 22, "advanc": 22, "429": 22, "1621038379": 22, "duration_m": 22, "95": 22, "marks_m": 22, "cache_get": 22, "cache_set": 22, "39": 22, "get_config": 22, "prepare_queri": 22, "rate_limit": 22, "validate_schema": 22, "34": 22, "clickhouse_t": 22, "errors_loc": 22, "referr": 22, "project_r": 22, "project_concurr": 22, "global_r": 22, "global_concurr": 22, "result_row": 22, "result_col": 22, "query_id": 22, "f09f3f9e1c632f395792c6a4bfe7c4f": 22, "_snuba_titl": 22, "_snuba_project_id": 22, "greaterorequ": 22, "_snuba_timestamp": 22, "05": 22, "01t00": 22, "univers": 22, "11t00": 22, "phase": 22, "tell": 22, "decid": 22, "tree": 22, "awai": 22, "concurr": 22, "involv": 22, "invalid_queri": 22, "2023": 22, "16": 22, "sai": 22, "around": 22, "42": 22, "time_window": 22, "150": 22, "resolut": 22, "60": 22, "alongsid": 22, "vari": 22, "paramet": 22, "becom": 22, "past": 22, "window": 22, "fall": 22, "larger": 22, "task": 22, "executor": 22, "jitter": 22, "jitteredtaskbuild": 22, "defint": 22}, "objects": {}, "objtypes": {}, "objnames": {}, "titleterms": {"snuba": [0, 1, 2, 3, 16, 17, 21, 22], "data": [0, 1, 3, 5, 22], "model": [0, 22], "dataset": [0, 8, 11, 13], "entiti": [0, 9, 10], "type": 0, "relationship": 0, "between": 0, "consist": [0, 1], "storag": [0, 1, 2, 3, 7, 14, 15], "exampl": 0, "singl": 0, "multi": 0, "join": [0, 2], "architectur": 1, "overview": 1, "ingest": 1, "queri": [1, 2, 4, 5, 7, 20, 22], "within": 1, "sentri": [1, 16, 17, 18, 19, 22], "deploy": 1, "error": 1, "transact": 1, "flow": 1, "session": 1, "outcom": 1, "chang": 1, "captur": 1, "pipelin": [1, 2], "process": 2, "phase": 2, "legaci": 2, "snql": [2, 20], "parser": 2, "valid": 2, "logic": [2, 3], "processor": 2, "selector": 2, "translat": 2, "physic": [2, 3], "splitter": 2, "formatt": 2, "composit": 2, "subqueri": 2, "gener": 2, "express": 2, "push": 2, "down": 2, "simpl": 2, "optim": 2, "slice": 3, "under": 3, "develop": [3, 16], "configur": [3, 11, 13], "map": 3, "partit": 3, "defin": 3, "clickhous": [3, 4, 5, 6, 7], "cluster": 3, "environ": [3, 16], "prepar": [3, 22], "shard": 3, "ad": 3, "kafka": 3, "topic": 3, "work": 3, "start": [3, 17], "consum": 3, "todo": 3, "handl": 3, "subscript": [3, 10, 22], "schedul": 3, "executor": 3, "etc": 3, "Of": 4, "death": 4, "countif": 4, "doom": 4, "schema": [5, 8, 9, 10, 12, 13, 14, 15], "design": 5, "best": [5, 7], "practic": [5, 7], "column": 5, "base": 5, "dictionari": 5, "tag": 5, "promot": 5, "select": [5, 20], "indic": 5, "bloom": 5, "filter": 5, "index": 5, "like": 5, "aggreg": 5, "tabl": [5, 7], "materi": 5, "migrat": [5, 12, 21], "us": [5, 18, 19], "materialization_vers": 5, "support": 6, "version": 6, "topologi": 7, "node": 7, "v": 7, "distribut": [7, 21], "local": [7, 21], "ty": 7, "all": 7, "togeth": 7, "properti": [8, 9, 10, 12, 14, 15], "group": 12, "readabl": 14, "writabl": 15, "prerequisit": 16, "instal": 16, "run": 16, "test": [16, 22], "against": 16, "get": 17, "requir": 17, "set": 17, "featur": [18, 19], "some": [18, 19], "case": [18, 19], "content": 18, "The": 20, "languag": 20, "match": 20, "BY": 20, "where": 20, "have": 20, "order": 20, "limit": 20, "offset": 20, "granular": 20, "total": 20, "sampl": 20, "mode": 21, "enabl": 21, "explor": 22, "send": 22, "through": 22, "web": 22, "ui": 22, "via": 22, "curl": 22, "request": 22, "respons": 22, "format": 22, "creat": 22}, "envversion": {"sphinx.domains.c": 2, "sphinx.domains.changeset": 1, "sphinx.domains.citation": 1, "sphinx.domains.cpp": 6, "sphinx.domains.index": 1, "sphinx.domains.javascript": 2, "sphinx.domains.math": 2, "sphinx.domains.python": 3, "sphinx.domains.rst": 2, "sphinx.domains.std": 2, "sphinx.ext.intersphinx": 1, "sphinx": 56}}) \ No newline at end of file