diff --git a/src/sentry/tasks/post_process.py b/src/sentry/tasks/post_process.py index c534836f339da..2d17a49a2ea9e 100644 --- a/src/sentry/tasks/post_process.py +++ b/src/sentry/tasks/post_process.py @@ -16,6 +16,8 @@ from sentry.issues.grouptype import GroupCategory from sentry.issues.issue_occurrence import IssueOccurrence from sentry.killswitches import killswitch_matches_context +from sentry.sentry_metrics.kafka import KafkaMetricsBackend +from sentry.sentry_metrics.use_case_id_registry import UseCaseID from sentry.signals import event_processed, issue_unignored, transaction_processed from sentry.tasks.base import instrumented_task from sentry.utils import metrics @@ -115,6 +117,23 @@ def _capture_event_stats(event: Event) -> None: metrics.timing("events.size.data", event.size, tags=tags) +def _update_escalating_metrics(event: Event) -> None: + """ + Update metrics for escalating issues when an event is processed. + """ + metrics_backend = KafkaMetricsBackend() + metrics_backend.counter( + UseCaseID.ESCALATING_ISSUES, + org_id=event.project.organization_id, + project_id=event.project.id, + metric_name="event_ingested", + value=1, + tags={"group": str(event.group_id)}, + unit=None, + ) + metrics_backend.close() + + def _capture_group_stats(job: PostProcessJob) -> None: event = job["event"] if not job["group_state"]["is_new"] or not should_write_event_stats(event): @@ -571,6 +590,7 @@ def get_event_raise_exception() -> Event: update_event_groups(event, group_states) bind_organization_context(event.project.organization) _capture_event_stats(event) + _update_escalating_metrics(event) group_events: Mapping[int, GroupEvent] = { ge.group_id: ge for ge in list(event.build_group_events())