diff --git a/src/sentry/api/endpoints/organization_measurements_meta.py b/src/sentry/api/endpoints/organization_measurements_meta.py index 2611ed5a22c83..ae6c448472941 100644 --- a/src/sentry/api/endpoints/organization_measurements_meta.py +++ b/src/sentry/api/endpoints/organization_measurements_meta.py @@ -6,7 +6,7 @@ from sentry.api.bases import NoProjects, OrganizationEventsEndpointBase from sentry.models import Organization from sentry.search.events.constants import METRIC_FUNCTION_LIST_BY_TYPE -from sentry.sentry_metrics.configuration import UseCaseKey +from sentry.sentry_metrics.use_case_id_registry import UseCaseID from sentry.snuba.metrics.datasource import get_custom_measurements @@ -24,7 +24,7 @@ def get(self, request: Request, organization: Organization) -> Response: organization_id=organization.id, start=params["start"], end=params["end"], - use_case_id=UseCaseKey.PERFORMANCE, + use_case_id=UseCaseID.TRANSACTIONS, ) with start_span(op="transform", description="metric meta"): diff --git a/src/sentry/api/endpoints/organization_metrics.py b/src/sentry/api/endpoints/organization_metrics.py index 676c7f2403ba1..1bcc5f8c82374 100644 --- a/src/sentry/api/endpoints/organization_metrics.py +++ b/src/sentry/api/endpoints/organization_metrics.py @@ -7,7 +7,7 @@ from sentry.api.exceptions import ResourceDoesNotExist from sentry.api.paginator import GenericOffsetPaginator from sentry.api.utils import InvalidParams -from sentry.sentry_metrics.configuration import UseCaseKey +from sentry.sentry_metrics.use_case_id_registry import UseCaseID from sentry.snuba.metrics import ( QueryDefinition, get_metrics, @@ -21,17 +21,17 @@ from sentry.utils.cursors import Cursor, CursorResult -def get_use_case_id(request: Request) -> UseCaseKey: +def get_use_case_id(request: Request) -> UseCaseID: """ - Get useCase from query params and validate it against UseCaseKey enum type + Get useCase from query params and validate it against UseCaseID enum type Raise a ParseError if the use_case parameter is invalid. """ try: - return UseCaseKey(request.GET.get("useCase", "release-health")) + return UseCaseID(request.GET.get("useCase", "sessions")) except ValueError: raise ParseError( - detail=f"Invalid useCase parameter. Please use one of: {[uc.value for uc in UseCaseKey]}" + detail=f"Invalid useCase parameter. Please use one of: {[uc.value for uc in UseCaseID]}" ) diff --git a/src/sentry/snuba/metrics/datasource.py b/src/sentry/snuba/metrics/datasource.py index da45bd4daec6a..e949d15d62df5 100644 --- a/src/sentry/snuba/metrics/datasource.py +++ b/src/sentry/snuba/metrics/datasource.py @@ -24,7 +24,7 @@ from sentry.api.utils import InvalidParams from sentry.models import Project from sentry.sentry_metrics import indexer -from sentry.sentry_metrics.configuration import UseCaseKey +from sentry.sentry_metrics.use_case_id_registry import UseCaseID from sentry.sentry_metrics.utils import ( MetricIndexNotFound, resolve_tag_key, @@ -87,7 +87,7 @@ def _get_metrics_for_entity( def get_available_derived_metrics( projects: Sequence[Project], supported_metric_ids_in_entities: Dict[MetricType, Sequence[int]], - use_case_id: UseCaseKey, + use_case_id: UseCaseID, ) -> Set[str]: """ Function that takes as input a dictionary of the available ids in each entity, and in turn @@ -135,7 +135,7 @@ def get_available_derived_metrics( return found_derived_metrics.intersection(public_derived_metrics) -def get_metrics(projects: Sequence[Project], use_case_id: UseCaseKey) -> Sequence[MetricMeta]: +def get_metrics(projects: Sequence[Project], use_case_id: UseCaseID) -> Sequence[MetricMeta]: ENTITY_TO_DATASET = { "sessions": { "c": "metrics_counters", @@ -179,7 +179,7 @@ def get_custom_measurements( organization_id: int, start: Optional[datetime] = None, end: Optional[datetime] = None, - use_case_id: UseCaseKey = UseCaseKey.PERFORMANCE, + use_case_id: UseCaseID = UseCaseID.TRANSACTIONS, ) -> Sequence[MetricMeta]: assert project_ids @@ -212,7 +212,7 @@ def get_custom_measurements( def _get_metrics_filter_ids( - projects: Sequence[Project], metric_mris: Sequence[str], use_case_id: UseCaseKey + projects: Sequence[Project], metric_mris: Sequence[str], use_case_id: UseCaseID ) -> Set[int]: """ Returns a set of metric_ids that map to input metric names and raises an exception if @@ -251,7 +251,7 @@ def _validate_requested_derived_metrics_in_input_metrics( projects: Sequence[Project], metric_mris: Sequence[str], supported_metric_ids_in_entities: Dict[MetricType, Sequence[int]], - use_case_id: UseCaseKey, + use_case_id: UseCaseID, ) -> None: """ Function that takes metric_mris list and a mapping of entity to its metric ids, and ensures @@ -280,7 +280,7 @@ def _fetch_tags_or_values_for_metrics( metric_names: Optional[Sequence[str]], referrer: str, column: str, - use_case_id: UseCaseKey, + use_case_id: UseCaseID, ) -> Tuple[Union[Sequence[Tag], Sequence[TagValue]], Optional[str]]: assert len({p.organization_id for p in projects}) == 1 @@ -295,7 +295,7 @@ def _fetch_tags_or_values_for_mri( metric_mris: Optional[Sequence[str]], referrer: str, column: str, - use_case_id: UseCaseKey, + use_case_id: UseCaseID, ) -> Tuple[Union[Sequence[Tag], Sequence[TagValue]], Optional[str]]: """ Function that takes as input projects, metric_mris, and a column, and based on the column @@ -333,7 +333,7 @@ def _fetch_tags_or_values_for_mri( release_health_metric_types = ("counter", "set", "distribution") performance_metric_types = ("generic_counter", "generic_set", "generic_distribution") - if use_case_id == UseCaseKey.RELEASE_HEALTH: + if use_case_id == UseCaseID.SESSIONS: metric_types = release_health_metric_types else: metric_types = performance_metric_types @@ -426,7 +426,7 @@ def _fetch_tags_or_values_for_mri( def get_single_metric_info( - projects: Sequence[Project], metric_name: str, use_case_id: UseCaseKey + projects: Sequence[Project], metric_name: str, use_case_id: UseCaseID ) -> MetricMetaWithTagKeys: assert projects @@ -462,7 +462,7 @@ def get_single_metric_info( def get_tags( - projects: Sequence[Project], metrics: Optional[Sequence[str]], use_case_id: UseCaseKey + projects: Sequence[Project], metrics: Optional[Sequence[str]], use_case_id: UseCaseID ) -> Sequence[Tag]: """Get all metric tags for the given projects and metric_names""" assert projects @@ -493,7 +493,7 @@ def get_tag_values( projects: Sequence[Project], tag_name: str, metric_names: Optional[Sequence[str]], - use_case_id: UseCaseKey, + use_case_id: UseCaseID, ) -> Sequence[TagValue]: """Get all known values for a specific tag""" assert projects @@ -552,7 +552,7 @@ class GroupLimitFilters: def _get_group_limit_filters( - metrics_query: MetricsQuery, results: List[Mapping[str, int]], use_case_id: UseCaseKey + metrics_query: MetricsQuery, results: List[Mapping[str, int]], use_case_id: UseCaseID ) -> Optional[GroupLimitFilters]: if not metrics_query.groupby or not results: return None @@ -682,7 +682,7 @@ def _prune_extra_groups(results: dict, filters: GroupLimitFilters) -> None: def get_series( projects: Sequence[Project], metrics_query: MetricsQuery, - use_case_id: UseCaseKey, + use_case_id: UseCaseID, include_meta: bool = False, tenant_ids: dict[str, Any] | None = None, ) -> dict: diff --git a/src/sentry/snuba/metrics/fields/base.py b/src/sentry/snuba/metrics/fields/base.py index a7e360411d68b..1ebeb3cd2251e 100644 --- a/src/sentry/snuba/metrics/fields/base.py +++ b/src/sentry/snuba/metrics/fields/base.py @@ -31,7 +31,7 @@ from sentry.models import Project from sentry.search.events.constants import MISERY_ALPHA, MISERY_BETA from sentry.sentry_metrics import indexer -from sentry.sentry_metrics.configuration import UseCaseKey +from sentry.sentry_metrics.use_case_id_registry import UseCaseID from sentry.sentry_metrics.utils import resolve_weak from sentry.snuba.dataset import Dataset, EntityKey from sentry.snuba.metrics.fields.histogram import ClickhouseHistogram, rebucket_histogram @@ -156,7 +156,7 @@ def run_metrics_query( def _get_known_entity_of_metric_mri(metric_mri: str) -> Optional[EntityKey]: - # ToDo(ahmed): Add an abstraction that returns relevant data based on usecasekey without repeating code + # ToDo(ahmed): Add an abstraction that returns relevant data based on UseCaseID without repeating code try: SessionMRI(metric_mri) entity_prefix = metric_mri.split(":")[0] @@ -181,7 +181,7 @@ def _get_known_entity_of_metric_mri(metric_mri: str) -> Optional[EntityKey]: def _get_entity_of_metric_mri( - projects: Sequence[Project], metric_mri: str, use_case_id: UseCaseKey + projects: Sequence[Project], metric_mri: str, use_case_id: UseCaseID ) -> EntityKey: known_entity = _get_known_entity_of_metric_mri(metric_mri) if known_entity is not None: @@ -195,11 +195,11 @@ def _get_entity_of_metric_mri( raise InvalidParams entity_keys_set: frozenset[EntityKey] - if use_case_id == UseCaseKey.PERFORMANCE: + if use_case_id == UseCaseID.TRANSACTIONS: entity_keys_set = frozenset( {EntityKey.GenericMetricsSets, EntityKey.GenericMetricsDistributions} ) - elif use_case_id == UseCaseKey.RELEASE_HEALTH: + elif use_case_id == UseCaseID.SESSIONS: entity_keys_set = frozenset( {EntityKey.MetricsCounters, EntityKey.MetricsSets, EntityKey.MetricsDistributions} ) @@ -245,11 +245,11 @@ class MetricObject(MetricObjectDefinition, ABC): """ @abstractmethod - def generate_filter_snql_conditions(self, org_id: int, use_case_id: UseCaseKey) -> Function: + def generate_filter_snql_conditions(self, org_id: int, use_case_id: UseCaseID) -> Function: raise NotImplementedError @abstractmethod - def generate_metric_ids(self, projects: Sequence[Project], use_case_id: UseCaseKey) -> Set[int]: + def generate_metric_ids(self, projects: Sequence[Project], use_case_id: UseCaseID) -> Set[int]: raise NotImplementedError @@ -259,10 +259,10 @@ class RawMetric(MetricObject): metric """ - def generate_metric_ids(self, projects: Sequence[Project], use_case_id: UseCaseKey) -> Set[int]: + def generate_metric_ids(self, projects: Sequence[Project], use_case_id: UseCaseID) -> Set[int]: return {resolve_weak(use_case_id, org_id_from_projects(projects), self.metric_mri)} - def generate_filter_snql_conditions(self, org_id: int, use_case_id: UseCaseKey) -> Function: + def generate_filter_snql_conditions(self, org_id: int, use_case_id: UseCaseID) -> Function: return Function( "equals", [Column("metric_id"), resolve_weak(use_case_id, org_id, self.metric_mri)], @@ -275,10 +275,10 @@ class AliasedDerivedMetric(AliasedDerivedMetricDefinition, MetricObject): for a raw metric name """ - def generate_metric_ids(self, projects: Sequence[Project], use_case_id: UseCaseKey) -> Set[int]: + def generate_metric_ids(self, projects: Sequence[Project], use_case_id: UseCaseID) -> Set[int]: return {resolve_weak(use_case_id, org_id_from_projects(projects), self.raw_metric_mri)} - def generate_filter_snql_conditions(self, org_id: int, use_case_id: UseCaseKey) -> Function: + def generate_filter_snql_conditions(self, org_id: int, use_case_id: UseCaseID) -> Function: conditions = [ Function( "equals", @@ -327,7 +327,7 @@ def run_post_query_function( def generate_snql_function( self, entity: MetricEntity, - use_case_id: UseCaseKey, + use_case_id: UseCaseID, alias: str, aggregate_filter: Function, org_id: int, @@ -404,13 +404,13 @@ def _wrap_quantiles(self, function: Function, alias: str) -> Function: def generate_snql_function( self, entity: MetricEntity, - use_case_id: UseCaseKey, + use_case_id: UseCaseID, alias: str, aggregate_filter: Function, org_id: int, params: Optional[MetricOperationParams] = None, ) -> Function: - if use_case_id is UseCaseKey.PERFORMANCE: + if use_case_id is UseCaseID.TRANSACTIONS: snuba_function = GENERIC_OP_TO_SNUBA_FUNCTION[entity][self.op] else: snuba_function = OP_TO_SNUBA_FUNCTION[entity][self.op] @@ -482,14 +482,14 @@ def run_post_query_function( def generate_snql_function( self, entity: MetricEntity, - use_case_id: UseCaseKey, + use_case_id: UseCaseID, alias: str, aggregate_filter: Function, org_id: int, params: Optional[MetricOperationParams] = None, ) -> Function: metrics_query_args = inspect.signature(self.snql_func).parameters.keys() - kwargs: MutableMapping[str, Union[float, int, str, UseCaseKey, Function]] = {} + kwargs: MutableMapping[str, Union[float, int, str, UseCaseID, Function]] = {} if "alias" in metrics_query_args: kwargs["alias"] = alias @@ -526,7 +526,7 @@ def validate_can_orderby(self) -> None: @abstractmethod def get_entity( - self, projects: Sequence[Project], use_case_id: UseCaseKey + self, projects: Sequence[Project], use_case_id: UseCaseID ) -> Union[MetricEntity, Dict[MetricEntity, Sequence[str]]]: """ Method that generates the entity of an instance of MetricsFieldBase. @@ -536,7 +536,7 @@ def get_entity( raise NotImplementedError @abstractmethod - def generate_metric_ids(self, projects: Sequence[Project], use_case_id: UseCaseKey) -> Set[int]: + def generate_metric_ids(self, projects: Sequence[Project], use_case_id: UseCaseID) -> Set[int]: """ Method that generates all the metric ids required to query an instance of MetricsFieldBase @@ -547,7 +547,7 @@ def generate_metric_ids(self, projects: Sequence[Project], use_case_id: UseCaseK def generate_select_statements( self, projects: Sequence[Project], - use_case_id: UseCaseKey, + use_case_id: UseCaseID, alias: str, params: Optional[MetricOperationParams] = None, ) -> List[Function]: @@ -562,7 +562,7 @@ def generate_orderby_clause( self, direction: Direction, projects: Sequence[Project], - use_case_id: UseCaseKey, + use_case_id: UseCaseID, alias: str, params: Optional[MetricOperationParams] = None, ) -> List[OrderBy]: @@ -648,7 +648,7 @@ def generate_bottom_up_derived_metrics_dependencies( def generate_groupby_statements( self, projects: Sequence[Project], - use_case_id: UseCaseKey, + use_case_id: UseCaseID, alias: str, params: Optional[MetricOperationParams] = None, ) -> List[Function]: @@ -662,7 +662,7 @@ def generate_groupby_statements( def generate_where_statements( self, projects: Sequence[Project], - use_case_id: UseCaseKey, + use_case_id: UseCaseID, alias: str, params: Optional[MetricOperationParams] = None, ) -> List[Function]: @@ -699,13 +699,13 @@ def __str__(self) -> str: def validate_can_orderby(self) -> None: self.metric_operation.validate_can_orderby() - def get_entity(self, projects: Sequence[Project], use_case_id: UseCaseKey) -> MetricEntity: + def get_entity(self, projects: Sequence[Project], use_case_id: UseCaseID) -> MetricEntity: return _get_entity_of_metric_mri(projects, self.metric_object.metric_mri, use_case_id).value def generate_select_statements( self, projects: Sequence[Project], - use_case_id: UseCaseKey, + use_case_id: UseCaseID, alias: str, params: Optional[MetricOperationParams] = None, ) -> List[Function]: @@ -724,7 +724,7 @@ def generate_orderby_clause( self, direction: Direction, projects: Sequence[Project], - use_case_id: UseCaseKey, + use_case_id: UseCaseID, alias: str, params: Optional[MetricOperationParams] = None, ) -> List[OrderBy]: @@ -744,7 +744,7 @@ def generate_available_operations(self) -> Collection[MetricOperationType]: def generate_default_null_values(self) -> Optional[Union[int, List[Tuple[float]]]]: return self.metric_operation.get_default_null_values() - def generate_metric_ids(self, projects: Sequence[Project], use_case_id: UseCaseKey) -> Set[int]: + def generate_metric_ids(self, projects: Sequence[Project], use_case_id: UseCaseID) -> Set[int]: return self.metric_object.generate_metric_ids(projects, use_case_id) def run_post_query_function( @@ -772,7 +772,7 @@ def build_conditional_aggregate_for_metric( self, org_id: int, entity: MetricEntity, - use_case_id: UseCaseKey, + use_case_id: UseCaseID, alias: str, params: Optional[MetricOperationParams] = None, ) -> Function: @@ -794,7 +794,7 @@ def build_conditional_aggregate_for_metric( def generate_groupby_statements( self, projects: Sequence[Project], - use_case_id: UseCaseKey, + use_case_id: UseCaseID, alias: str, params: Optional[MetricOperationParams] = None, ) -> List[Function]: @@ -813,7 +813,7 @@ def generate_groupby_statements( def generate_where_statements( self, projects: Sequence[Project], - use_case_id: UseCaseKey, + use_case_id: UseCaseID, alias: str, params: Optional[MetricOperationParams] = None, ) -> List[Function]: @@ -885,7 +885,7 @@ def validate_can_orderby(self) -> None: @classmethod def __recursively_get_all_entities_in_derived_metric_dependency_tree( - cls, derived_metric_mri: str, projects: Sequence[Project], use_case_id: UseCaseKey + cls, derived_metric_mri: str, projects: Sequence[Project], use_case_id: UseCaseID ) -> Set[MetricEntity]: """ Method that gets the entity of a derived metric by traversing down its dependency tree @@ -904,7 +904,7 @@ def __recursively_get_all_entities_in_derived_metric_dependency_tree( ) return entities - def get_entity(self, projects: Sequence[Project], use_case_id: UseCaseKey) -> MetricEntity: + def get_entity(self, projects: Sequence[Project], use_case_id: UseCaseID) -> MetricEntity: if not projects: self._raise_entity_validation_exception("get_entity") try: @@ -923,7 +923,7 @@ def get_entity(self, projects: Sequence[Project], use_case_id: UseCaseKey) -> Me @classmethod def __recursively_generate_metric_ids( - cls, org_id: int, derived_metric_mri: str, use_case_id: UseCaseKey + cls, org_id: int, derived_metric_mri: str, use_case_id: UseCaseID ) -> Set[int]: """ Method that traverses a derived metric dependency tree to return a set of the metric ids @@ -940,7 +940,7 @@ def __recursively_generate_metric_ids( ids |= cls.__recursively_generate_metric_ids(org_id, metric_mri, use_case_id) return ids - def generate_metric_ids(self, projects: Sequence[Project], use_case_id: UseCaseKey) -> Set[int]: + def generate_metric_ids(self, projects: Sequence[Project], use_case_id: UseCaseID) -> Set[int]: org_id = org_id_from_projects(projects) return self.__recursively_generate_metric_ids( org_id, derived_metric_mri=self.metric_mri, use_case_id=use_case_id @@ -952,7 +952,7 @@ def __recursively_generate_select_snql( project_ids: Sequence[int], org_id: int, derived_metric_mri: str, - use_case_id: UseCaseKey, + use_case_id: UseCaseID, alias: Optional[str] = None, ) -> List[Function]: """ @@ -988,7 +988,7 @@ def __recursively_generate_select_snql( def generate_select_statements( self, projects: Sequence[Project], - use_case_id: UseCaseKey, + use_case_id: UseCaseID, alias: str, params: Optional[MetricOperationParams] = None, ) -> List[Function]: @@ -1016,7 +1016,7 @@ def generate_orderby_clause( self, direction: Direction, projects: Sequence[Project], - use_case_id: UseCaseKey, + use_case_id: UseCaseID, alias: str, params: Optional[MetricOperationParams] = None, ) -> List[OrderBy]: @@ -1070,7 +1070,7 @@ def generate_bottom_up_derived_metrics_dependencies( def generate_groupby_statements( self, projects: Sequence[Project], - use_case_id: UseCaseKey, + use_case_id: UseCaseID, alias: str, params: Optional[MetricOperationParams] = None, ) -> List[Function]: @@ -1079,7 +1079,7 @@ def generate_groupby_statements( def generate_where_statements( self, projects: Sequence[Project], - use_case_id: UseCaseKey, + use_case_id: UseCaseID, alias: str, params: Optional[MetricOperationParams] = None, ) -> List[Function]: @@ -1094,13 +1094,13 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: def validate_can_orderby(self) -> None: raise NotSupportedOverCompositeEntityException() - def generate_metric_ids(self, projects: Sequence[Project], use_case_id: UseCaseKey) -> Set[Any]: + def generate_metric_ids(self, projects: Sequence[Project], use_case_id: UseCaseID) -> Set[Any]: raise NotSupportedOverCompositeEntityException() def generate_select_statements( self, projects: Sequence[Project], - use_case_id: UseCaseKey, + use_case_id: UseCaseID, alias: str, params: Optional[MetricOperationParams] = None, ) -> List[Function]: @@ -1110,7 +1110,7 @@ def generate_orderby_clause( self, direction: Direction, projects: Sequence[Project], - use_case_id: UseCaseKey, + use_case_id: UseCaseID, alias: str, params: Optional[MetricOperationParams] = None, ) -> List[OrderBy]: @@ -1129,7 +1129,7 @@ def generate_default_null_values(self) -> Optional[Union[int, List[Tuple[float]] return default_null_value def get_entity( - self, projects: Sequence[Project], use_case_id: UseCaseKey + self, projects: Sequence[Project], use_case_id: UseCaseID ) -> Dict[MetricEntity, List[str]]: if not projects: self._raise_entity_validation_exception("get_entity") @@ -1145,7 +1145,7 @@ def __recursively_generate_singular_entity_constituents( cls, projects: Optional[Sequence[Project]], derived_metric_obj: DerivedMetricExpression, - use_case_id: UseCaseKey, + use_case_id: UseCaseID, is_naive: bool = False, ) -> Dict[MetricEntity, List[str]]: entities_and_metric_mris: Dict[MetricEntity, List[str]] = {} @@ -1220,7 +1220,7 @@ def generate_bottom_up_derived_metrics_dependencies( metric_nodes.append(DERIVED_METRICS[metric]) return reversed(results) - def naively_generate_singular_entity_constituents(self, use_case_id: UseCaseKey) -> Set[str]: + def naively_generate_singular_entity_constituents(self, use_case_id: UseCaseID) -> Set[str]: single_entity_constituents = set( list( self.__recursively_generate_singular_entity_constituents( @@ -1259,7 +1259,7 @@ def run_post_query_function( def generate_groupby_statements( self, projects: Sequence[Project], - use_case_id: UseCaseKey, + use_case_id: UseCaseID, alias: str, params: Optional[MetricOperationParams] = None, ) -> List[Function]: @@ -1268,7 +1268,7 @@ def generate_groupby_statements( def generate_where_statements( self, projects: Sequence[Project], - use_case_id: UseCaseKey, + use_case_id: UseCaseID, alias: str, params: Optional[MetricOperationParams] = None, ) -> List[Function]: diff --git a/tests/sentry/api/endpoints/test_organization_metric_data.py b/tests/sentry/api/endpoints/test_organization_metric_data.py index 81f63a0c08ddb..6a12113d25988 100644 --- a/tests/sentry/api/endpoints/test_organization_metric_data.py +++ b/tests/sentry/api/endpoints/test_organization_metric_data.py @@ -70,7 +70,7 @@ def test_incorrect_use_case_id_value(self): assert response.status_code == 400 assert ( response.json()["detail"] - == "Invalid useCase parameter. Please use one of: ['release-health', 'performance']" + == f"Invalid useCase parameter. Please use one of: {[uc.value for uc in UseCaseID]}" ) def test_invalid_field(self): @@ -438,7 +438,7 @@ def test_pagination_limit_without_orderby(self): field=f"count({TransactionMetricKey.MEASUREMENTS_LCP.value})", groupBy="transaction", per_page=2, - useCase="performance", + useCase="transactions", ) assert response.status_code == 200 @@ -476,7 +476,7 @@ def test_pagination_offset_without_orderby(self): groupBy="transaction", cursor=Cursor(0, 1), statsPeriod="1h", - useCase="performance", + useCase="transactions", ) assert response.status_code == 200, response.data @@ -536,7 +536,7 @@ def test_max_and_min_on_distributions(self): statsPeriod="1h", interval="1h", per_page=3, - useCase="performance", + useCase="transactions", includeSeries="0", ) groups = response.data["groups"] @@ -572,7 +572,7 @@ def test_orderby(self): groupBy="transaction", orderBy=f"-count({TransactionMetricKey.MEASUREMENTS_LCP.value})", per_page=2, - useCase="performance", + useCase="transactions", ) groups = response.data["groups"] assert len(groups) == 2 @@ -617,7 +617,7 @@ def test_multi_field_orderby(self): f"-count({TransactionMetricKey.MEASUREMENTS_FCP.value})", ], per_page=2, - useCase="performance", + useCase="transactions", ) groups = response.data["groups"] assert len(groups) == 2 @@ -657,7 +657,7 @@ def test_orderby_percentile(self): interval="1h", groupBy="tag1", orderBy=f"p50({TransactionMetricKey.MEASUREMENTS_LCP.value})", - useCase="performance", + useCase="transactions", ) groups = response.data["groups"] assert len(groups) == 2 @@ -696,7 +696,7 @@ def test_orderby_percentile_with_pagination(self): groupBy="tag1", orderBy=f"p50({TransactionMetricKey.MEASUREMENTS_LCP.value})", per_page=1, - useCase="performance", + useCase="transactions", ) groups = response.data["groups"] assert len(groups) == 1 @@ -712,7 +712,7 @@ def test_orderby_percentile_with_pagination(self): orderBy=f"p50({TransactionMetricKey.MEASUREMENTS_LCP.value})", per_page=1, cursor=Cursor(0, 1), - useCase="performance", + useCase="transactions", ) groups = response.data["groups"] assert len(groups) == 1 @@ -743,7 +743,7 @@ def test_limit_with_orderby_is_overridden_by_paginator_limit(self): groupBy="tag1", orderBy=f"p50({TransactionMetricKey.MEASUREMENTS_LCP.value})", per_page=1, - useCase="performance", + useCase="transactions", ) groups = response.data["groups"] assert len(groups) == 1 @@ -768,7 +768,7 @@ def test_orderby_percentile_with_many_fields_one_entity_no_data(self): interval="1h", groupBy=["project_id", "transaction"], orderBy=f"p50({TransactionMetricKey.MEASUREMENTS_LCP.value})", - useCase="performance", + useCase="transactions", ) groups = response.data["groups"] assert len(groups) == 0 @@ -810,7 +810,7 @@ def test_orderby_percentile_with_many_fields_one_entity(self): interval="1h", groupBy=["project_id", "transaction"], orderBy=f"p50({TransactionMetricKey.MEASUREMENTS_LCP.value})", - useCase="performance", + useCase="transactions", ) groups = response.data["groups"] assert len(groups) == 2 @@ -868,7 +868,7 @@ def test_multi_field_orderby_percentile_with_many_fields_one_entity(self): statsPeriod="1h", interval="1h", groupBy=["project_id", "transaction"], - useCase="performance", + useCase="transactions", ) # Test order by DESC @@ -968,7 +968,7 @@ def test_orderby_percentile_with_many_fields_multiple_entities(self): interval="1h", groupBy=["project_id", "transaction"], orderBy=f"p50({TransactionMetricKey.MEASUREMENTS_LCP.value})", - useCase="performance", + useCase="transactions", ) groups = response.data["groups"] assert len(groups) == 2 @@ -1032,7 +1032,7 @@ def test_orderby_percentile_with_many_fields_multiple_entities_with_paginator(se "groupBy": ["project_id", "transaction"], "orderBy": f"p50({TransactionMetricKey.MEASUREMENTS_LCP.value})", "per_page": 1, - "useCase": "performance", + "useCase": "transactions", } response = self.get_success_response(self.organization.slug, **request_args) @@ -1174,7 +1174,7 @@ def test_orderby_percentile_with_many_fields_multiple_entities_with_missing_data interval="1h", groupBy=["project_id", "transaction"], orderBy=f"p50({TransactionMetricKey.MEASUREMENTS_LCP.value})", - useCase="performance", + useCase="transactions", ) groups = response.data["groups"] assert len(groups) == 2 @@ -1226,7 +1226,7 @@ def test_limit_without_orderby(self): interval="1h", groupBy="tag3", per_page=2, - useCase="performance", + useCase="transactions", ) groups = response.data["groups"] @@ -1549,7 +1549,7 @@ def test_derived_metric_incorrectly_defined_as_singular_entity( field=["crash_free_fake"], statsPeriod="6m", interval="1m", - useCase="release-health", + useCase="sessions", ) assert response.status_code == 400 assert response.json()["detail"] == ( @@ -2087,7 +2087,7 @@ def test_failure_rate_transaction(self): field=["transaction.failure_rate"], statsPeriod="1m", interval="1m", - useCase="performance", + useCase="transactions", ) assert len(response.data["groups"]) == 1 @@ -2121,7 +2121,7 @@ def test_failure_rate_without_transactions(self): field=["transaction.failure_rate"], statsPeriod="1m", interval="1m", - useCase="performance", + useCase="transactions", ) assert response.data["groups"] == [ @@ -2174,7 +2174,7 @@ def test_apdex_transactions(self): field=["transaction.apdex"], statsPeriod="1m", interval="1m", - useCase="performance", + useCase="transactions", ) assert len(response.data["groups"]) == 1 @@ -2204,7 +2204,7 @@ def test_miserable_users(self): field=["transaction.miserable_user"], statsPeriod="1m", interval="1m", - useCase="performance", + useCase="transactions", ) assert len(response.data["groups"]) == 1 @@ -2234,7 +2234,7 @@ def test_user_misery(self): field=["transaction.user_misery"], statsPeriod="1m", interval="1m", - useCase="performance", + useCase="transactions", ) assert len(response.data["groups"]) == 1 assert response.data["groups"][0]["totals"] == { diff --git a/tests/sentry/api/endpoints/test_organization_metric_tags.py b/tests/sentry/api/endpoints/test_organization_metric_tags.py index 3b164b87e621a..82da027c61dc8 100644 --- a/tests/sentry/api/endpoints/test_organization_metric_tags.py +++ b/tests/sentry/api/endpoints/test_organization_metric_tags.py @@ -79,7 +79,7 @@ def test_mri_metric_tags(self): response = self.get_success_response( self.organization.slug, metric=["d:transactions/duration@millisecond", "d:sessions/duration.exited@second"], - useCase="performance", + useCase="transactions", ) assert response.data == [] diff --git a/tests/sentry/api/endpoints/test_organization_metrics.py b/tests/sentry/api/endpoints/test_organization_metrics.py index 9efd67da43d78..3af889d0ea605 100644 --- a/tests/sentry/api/endpoints/test_organization_metrics.py +++ b/tests/sentry/api/endpoints/test_organization_metrics.py @@ -172,7 +172,7 @@ def test_metrics_index(self): because the setUp bypasses it. """ response = self.get_success_response( - self.organization.slug, project=[self.project.id], useCase=["release-health"] + self.organization.slug, project=[self.project.id], useCase=["sessions"] ) assert type(response.data) == list