diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md index 4424f0123b..717dc48e72 100644 --- a/.github/pull_request_template.md +++ b/.github/pull_request_template.md @@ -2,10 +2,11 @@ ## Which issue(s) this PR closes -Closes [issue link here] +Related to [issue link here] diff --git a/.github/workflows/e2e-tests.yml b/.github/workflows/e2e-tests.yml index fcc9e03069..675d11ec5d 100644 --- a/.github/workflows/e2e-tests.yml +++ b/.github/workflows/e2e-tests.yml @@ -150,7 +150,7 @@ jobs: if: inputs.run-expensive-tests shell: bash env: - E2E_TESTS_CMD: "cd grafana-plugin && yarn test:e2e-expensive" + E2E_TESTS_CMD: "cd ../../grafana-plugin && yarn test:e2e-expensive" GRAFANA_VERSION: ${{ inputs.grafana_version }} GRAFANA_ADMIN_USERNAME: "irm" GRAFANA_ADMIN_PASSWORD: "irm" diff --git a/.tilt/backend/Tiltfile b/.tilt/backend/Tiltfile new file mode 100644 index 0000000000..52493436c8 --- /dev/null +++ b/.tilt/backend/Tiltfile @@ -0,0 +1,16 @@ +label = "OnCall.Backend" + +k8s_resource( + workload="celery", + resource_deps=["mariadb", "redis-master"], + labels=[label], +) + +k8s_resource( + workload="engine", + port_forwards=8080, + resource_deps=["mariadb", "redis-master"], + labels=[label], +) + +k8s_resource(workload="engine-migrate", labels=[label]) \ No newline at end of file diff --git a/.tilt/deps/Tiltfile b/.tilt/deps/Tiltfile new file mode 100644 index 0000000000..acd4e44eef --- /dev/null +++ b/.tilt/deps/Tiltfile @@ -0,0 +1,11 @@ +label = "OnCall.Deps" + +k8s_resource(workload="redis-master", labels=[label]) + +k8s_resource(workload="prometheus-server", labels=[label]) + +k8s_resource( + workload="mariadb", + port_forwards='3307:3306', # : + labels=[label], +) \ No newline at end of file diff --git a/.tilt/plugin/Tiltfile b/.tilt/plugin/Tiltfile new file mode 100644 index 0000000000..858ecffd74 --- /dev/null +++ b/.tilt/plugin/Tiltfile @@ -0,0 +1,26 @@ +label = "OnCall.Plugin" + +is_ci=config.tilt_subcommand == "ci" +grafana_plugin_dir="../../grafana-plugin" + +# On CI dependencies are installed separately so we just build prod bundle to be consumed by Grafana dev server +if is_ci: + local_resource( + "build-ui", + labels=[label], + dir=grafana_plugin_dir, + cmd="yarn build", + allow_parallel=True, + ) + +# Locally we install dependencies and we run watch mode +if not is_ci: + local_resource( + "build-ui", + labels=[label], + dir=grafana_plugin_dir, + cmd="yarn install", + serve_dir=grafana_plugin_dir, + serve_cmd="yarn watch", + allow_parallel=True, + ) \ No newline at end of file diff --git a/.tilt/tests/Tiltfile b/.tilt/tests/Tiltfile new file mode 100644 index 0000000000..7fec693baf --- /dev/null +++ b/.tilt/tests/Tiltfile @@ -0,0 +1,80 @@ +label = "OnCall.AllTests" + +load('ext://uibutton', 'cmd_button', 'location', 'text_input', 'bool_input') + +e2e_tests_cmd=os.getenv("E2E_TESTS_CMD", "cd ../../grafana-plugin && yarn test:e2e") +is_ci=config.tilt_subcommand == "ci" + +local_resource( + "e2e-tests", + labels=[label], + cmd=e2e_tests_cmd, + trigger_mode=TRIGGER_MODE_MANUAL, + auto_init=is_ci, + resource_deps=["build-ui", "grafana", "grafana-oncall-app-provisioning-configmap", "engine", "celery"] +) + +cmd_button( + name="E2E Tests - headless run", + argv=["sh", "-c", "yarn --cwd ./grafana-plugin test:e2e $STOP_ON_FIRST_FAILURE $TESTS_FILTER"], + text="Restart headless run", + resource="e2e-tests", + icon_name="replay", + inputs=[ + text_input("BROWSERS", "Browsers (e.g. \"chromium,firefox,webkit\")", "chromium", "chromium,firefox,webkit"), + text_input("TESTS_FILTER", "Test filter (e.g. \"timezones.test quality.test\")", "", "Test file names to run"), + bool_input("STOP_ON_FIRST_FAILURE", "Stop on first failure", True, "-x", ""), + ] +) + +cmd_button( + name="E2E Tests - open watch mode", + argv=["sh", "-c", "yarn --cwd grafana-plugin test:e2e:watch"], + text="Open watch mode", + resource="e2e-tests", + icon_name="visibility", +) + +cmd_button( + name="E2E Tests - show report", + argv=["sh", "-c", "yarn --cwd grafana-plugin playwright show-report"], + text="Show last HTML report", + resource="e2e-tests", + icon_name="assignment", +) + +cmd_button( + name="E2E Tests - stop current run", + argv=["sh", "-c", "kill -9 $(pgrep -f test:e2e)"], + text="Stop", + resource="e2e-tests", + icon_name="dangerous", +) + +# Inspired by https://github.com/grafana/slo/blob/main/Tiltfile#L72 +pod_engine_pytest_script = ''' +set -eu +# get engine k8s pod name from tilt resource name +POD_NAME="$(tilt get kubernetesdiscovery "engine" -ojsonpath='{.status.pods[0].name}')" +kubectl exec "$POD_NAME" -- pytest . $STOP_ON_FIRST_FAILURE $TESTS_FILTER +''' +local_resource( + "pytest-tests", + labels=[label], + cmd=['sh', '-c', pod_engine_pytest_script], + trigger_mode=TRIGGER_MODE_MANUAL, + auto_init=False, + resource_deps=["engine"] +) + +cmd_button( + name="pytest Tests - headless run", + argv=['sh', '-c', pod_engine_pytest_script], + text="Run pytest", + resource="pytest-tests", + icon_name="replay", + inputs=[ + text_input("TESTS_FILTER", "pytest optional arguments (e.g. \"apps/webhooks/tests/test_webhook.py::test_build_url_private_raises\")", "", "Test file names to run"), + bool_input("STOP_ON_FIRST_FAILURE", "Stop on first failure", True, "-x", ""), + ] +) \ No newline at end of file diff --git a/Tiltfile b/Tiltfile index df0e53fa29..ab4a1f64f8 100644 --- a/Tiltfile +++ b/Tiltfile @@ -1,9 +1,7 @@ load('ext://uibutton', 'cmd_button', 'location', 'text_input', 'bool_input') +load("ext://configmap", "configmap_create") + running_under_parent_tiltfile = os.getenv("TILT_PARENT", "false") == "true" -# The user/pass that you will login to Grafana with -grafana_admin_user_pass = os.getenv("GRAFANA_ADMIN_USER_PASS", "oncall") -grafana_version = os.getenv("GRAFANA_VERSION", "latest") -e2e_tests_cmd=os.getenv("E2E_TESTS_CMD", "cd grafana-plugin && yarn test:e2e") twilio_values=[ "oncall.twilio.accountSid=" + os.getenv("TWILIO_ACCOUNT_SID", ""), "oncall.twilio.authToken=" + os.getenv("TWILIO_AUTH_TOKEN", ""), @@ -16,25 +14,11 @@ HELM_PREFIX = "oncall-dev" # Use docker registery generated by ctlptl (dev/kind-config.yaml) DOCKER_REGISTRY = "localhost:63628/" -if not running_under_parent_tiltfile: - # Load the custom Grafana extensions - v1alpha1.extension_repo( - name="grafana-tilt-extensions", - ref="v1.2.0", - url="https://github.com/grafana/tilt-extensions", - ) -v1alpha1.extension( - name="grafana", repo_name="grafana-tilt-extensions", repo_path="grafana" -) - -load("ext://grafana", "grafana") -load("ext://configmap", "configmap_create") load("ext://docker_build_sub", "docker_build_sub") # Tell ops-devenv/Tiltifle where our plugin.json file lives plugin_file = os.path.abspath("grafana-plugin/src/plugin.json") - def plugin_json(): return plugin_file @@ -59,108 +43,15 @@ docker_build_sub( ], ) -# On CI dependencies are installed separately so we just build prod bundle to be consumed by Grafana dev server -if is_ci: - local_resource( - "build-ui", - labels=["OnCallUI"], - dir="grafana-plugin", - cmd="yarn build", - allow_parallel=True, - ) - -# Locally we install dependencies and we run watch mode -if not is_ci: - local_resource( - "build-ui", - labels=["OnCallUI"], - dir="grafana-plugin", - cmd="yarn install", - serve_dir="grafana-plugin", - serve_cmd="yarn watch", - allow_parallel=True, - ) - -local_resource( - "e2e-tests", - labels=["allTests"], - cmd=e2e_tests_cmd, - trigger_mode=TRIGGER_MODE_MANUAL, - auto_init=is_ci, - resource_deps=["build-ui", "grafana", "grafana-oncall-app-provisioning-configmap", "engine", "celery"] -) - -cmd_button( - name="E2E Tests - headless run", - argv=["sh", "-c", "yarn --cwd ./grafana-plugin test:e2e $STOP_ON_FIRST_FAILURE $TESTS_FILTER"], - text="Restart headless run", - resource="e2e-tests", - icon_name="replay", - inputs=[ - text_input("BROWSERS", "Browsers (e.g. \"chromium,firefox,webkit\")", "chromium", "chromium,firefox,webkit"), - text_input("TESTS_FILTER", "Test filter (e.g. \"timezones.test quality.test\")", "", "Test file names to run"), - bool_input("STOP_ON_FIRST_FAILURE", "Stop on first failure", True, "-x", ""), - ] -) - -cmd_button( - name="E2E Tests - open watch mode", - argv=["sh", "-c", "yarn --cwd grafana-plugin test:e2e:watch"], - text="Open watch mode", - resource="e2e-tests", - icon_name="visibility", -) - -cmd_button( - name="E2E Tests - show report", - argv=["sh", "-c", "yarn --cwd grafana-plugin playwright show-report"], - text="Show last HTML report", - resource="e2e-tests", - icon_name="assignment", -) - -cmd_button( - name="E2E Tests - stop current run", - argv=["sh", "-c", "kill -9 $(pgrep -f test:e2e)"], - text="Stop", - resource="e2e-tests", - icon_name="dangerous", -) -# Inspired by https://github.com/grafana/slo/blob/main/Tiltfile#L72 -pod_engine_pytest_script = ''' -set -eu -# get engine k8s pod name from tilt resource name -POD_NAME="$(tilt get kubernetesdiscovery "engine" -ojsonpath='{.status.pods[0].name}')" -kubectl exec "$POD_NAME" -- pytest . $STOP_ON_FIRST_FAILURE $TESTS_FILTER -''' -local_resource( - "pytest-tests", - labels=["allTests"], - cmd=['sh', '-c', pod_engine_pytest_script], - trigger_mode=TRIGGER_MODE_MANUAL, - auto_init=False, - resource_deps=["engine"] -) - -cmd_button( - name="pytest Tests - headless run", - argv=['sh', '-c', pod_engine_pytest_script], - text="Run pytest", - resource="pytest-tests", - icon_name="replay", - inputs=[ - text_input("TESTS_FILTER", "pytest optional arguments (e.g. \"apps/webhooks/tests/test_webhook.py::test_build_url_private_raises\")", "", "Test file names to run"), - bool_input("STOP_ON_FIRST_FAILURE", "Stop on first failure", True, "-x", ""), - ] -) +def load_oncall_helm(): + helm_oncall_values = ["./dev/helm-local.yml", "./dev/helm-local.dev.yml"] + if is_ci: + helm_oncall_values = helm_oncall_values + ["./.github/helm-ci.yml"] + yaml = helm("helm/oncall", name=HELM_PREFIX, values=helm_oncall_values, set=twilio_values, namespace="default") + k8s_yaml(yaml) -helm_oncall_values = ["./dev/helm-local.yml", "./dev/helm-local.dev.yml"] -if is_ci: - helm_oncall_values = helm_oncall_values + ["./.github/helm-ci.yml"] -yaml = helm("helm/oncall", name=HELM_PREFIX, values=helm_oncall_values, set=twilio_values, namespace="default") - -k8s_yaml(yaml) +# --- GRAFANA START ---- # Generate and load the grafana deploy yaml configmap_create( @@ -169,49 +60,63 @@ configmap_create( from_file="dev/grafana/provisioning/plugins/grafana-oncall-app-provisioning.yaml", ) -k8s_resource( - objects=["grafana-oncall-app-provisioning:configmap"], - new_name="grafana-oncall-app-provisioning-configmap", - resource_deps=["build-ui"], - labels=["Grafana"], -) - -# Use separate grafana helm chart if not running_under_parent_tiltfile: - grafana( - grafana_version=grafana_version, - context="grafana-plugin", - plugin_files=["grafana-plugin/src/plugin.json"], - namespace="default", - deps=["grafana-oncall-app-provisioning-configmap", "build-ui"], - extra_env={ - "GF_SECURITY_ADMIN_PASSWORD": "oncall", - "GF_SECURITY_ADMIN_USER": "oncall", - "GF_AUTH_ANONYMOUS_ENABLED": "false", - }, + # Load the custom Grafana extensions + v1alpha1.extension_repo( + name="grafana-tilt-extensions", + ref="v1.2.0", + url="https://github.com/grafana/tilt-extensions", ) - -k8s_resource( - workload="celery", - resource_deps=["mariadb", "redis-master"], - labels=["OnCallBackend"], -) -k8s_resource( - workload="engine", - port_forwards=8080, - resource_deps=["mariadb", "redis-master"], - labels=["OnCallBackend"], -) -k8s_resource(workload="engine-migrate", labels=["OnCallBackend"]) - -k8s_resource(workload="redis-master", labels=["OnCallDeps"]) -k8s_resource(workload="prometheus-server", labels=["OnCallDeps"]) -k8s_resource( - workload="mariadb", - port_forwards='3307:3306', # : - labels=["OnCallDeps"], +v1alpha1.extension( + name="grafana", repo_name="grafana-tilt-extensions", repo_path="grafana" ) +load("ext://grafana", "grafana") + +def load_grafana(): + # The user/pass that you will login to Grafana with + grafana_admin_user_pass = os.getenv("GRAFANA_ADMIN_USER_PASS", "oncall") + grafana_version = os.getenv("GRAFANA_VERSION", "latest") + + + k8s_resource( + objects=["grafana-oncall-app-provisioning:configmap"], + new_name="grafana-oncall-app-provisioning-configmap", + resource_deps=["build-ui"], + labels=["Grafana"], + ) + # Use separate grafana helm chart + if not running_under_parent_tiltfile: + grafana( + grafana_version=grafana_version, + context="grafana-plugin", + plugin_files=["grafana-plugin/src/plugin.json"], + namespace="default", + deps=["grafana-oncall-app-provisioning-configmap", "build-ui"], + extra_env={ + "GF_SECURITY_ADMIN_PASSWORD": "oncall", + "GF_SECURITY_ADMIN_USER": "oncall", + "GF_AUTH_ANONYMOUS_ENABLED": "false", + }, + ) +# --- GRAFANA END ---- + + +def get_profiles(): + profiles = os.getenv('ONCALL_PROFILES', 'grafana,plugin,backend,tests') + return profiles.split(',') +profiles = get_profiles() + +if 'grafana' in profiles: + load_grafana() +if 'plugin' in profiles: + include(".tilt/plugin/Tiltfile") +if 'backend' in profiles: + load_oncall_helm() + include(".tilt/backend/Tiltfile") + include(".tilt/deps/Tiltfile") +if 'tests' in profiles: + include(".tilt/tests/Tiltfile") # name all tilt resources after the k8s object namespace + name def resource_name(id): diff --git a/dev/README.md b/dev/README.md index c6f82c5abf..7b0b308f4e 100644 --- a/dev/README.md +++ b/dev/README.md @@ -36,6 +36,14 @@ Related: [How to develop integrations](/engine/config_integrations/README.md) value: "True" ``` + You can also choose set of resources that will be installed in your local cluster, e.g.: + + ```bash + ONCALL_PROFILES=grafana,plugin,backend tilt up + ``` + + Available profiles are: `grafana,plugin,backend,tests`, by default all the profiles are enabled. + 3. Wait until all resources are green and open (user: oncall, password: oncall) 4. Modify source code, backend and frontend will be hot reloaded diff --git a/engine/apps/alerts/models/alert_group.py b/engine/apps/alerts/models/alert_group.py index 8eea0c0e23..3429361cd4 100644 --- a/engine/apps/alerts/models/alert_group.py +++ b/engine/apps/alerts/models/alert_group.py @@ -554,7 +554,7 @@ def get_paged_users(self) -> typing.List[PagedUser]: log_records = self.log_records.filter( type__in=(AlertGroupLogRecord.TYPE_DIRECT_PAGING, AlertGroupLogRecord.TYPE_UNPAGE_USER) - ) + ).order_by("created_at") for log_record in log_records: # filter paging events, track still active escalations @@ -592,7 +592,9 @@ def get_paged_users(self) -> typing.List[PagedUser]: } else: # user was unpaged at some point, remove them - del users[user_id] + # there could be multiple unpage log records if API was hit several times + if user_id in users: + del users[user_id] return list(users.values()) diff --git a/engine/apps/alerts/tasks/notify_user.py b/engine/apps/alerts/tasks/notify_user.py index bbcec36d0b..8258da1956 100644 --- a/engine/apps/alerts/tasks/notify_user.py +++ b/engine/apps/alerts/tasks/notify_user.py @@ -626,6 +626,7 @@ def send_bundled_notification(user_notification_bundle_id: int): schedule_perform_notification_task, log_record_notification_triggered.pk, log_record_notification_triggered.alert_group_id, + False, ) ) notifications.delete() diff --git a/engine/apps/alerts/tests/test_alert_group.py b/engine/apps/alerts/tests/test_alert_group.py index caecaa8e6d..ae2aeceedd 100644 --- a/engine/apps/alerts/tests/test_alert_group.py +++ b/engine/apps/alerts/tests/test_alert_group.py @@ -589,6 +589,24 @@ def _make_log_record(alert_group, user, log_type, important=False): assert len(paged_users) == 1 assert alert_group.get_paged_users()[0]["pk"] == user.public_primary_key + # user was paged and then paged again, then unpaged - they should not show up + alert_group = make_alert_group(alert_receive_channel) + _make_log_record(alert_group, user, AlertGroupLogRecord.TYPE_DIRECT_PAGING) + _make_log_record(alert_group, user, AlertGroupLogRecord.TYPE_DIRECT_PAGING) + _make_log_record(alert_group, user, AlertGroupLogRecord.TYPE_UNPAGE_USER) + + paged_users = alert_group.get_paged_users() + assert len(paged_users) == 0 + + # adding extra unpage events should not break things + _make_log_record(alert_group, user, AlertGroupLogRecord.TYPE_UNPAGE_USER) + _make_log_record(alert_group, user, AlertGroupLogRecord.TYPE_UNPAGE_USER) + _make_log_record(alert_group, user, AlertGroupLogRecord.TYPE_DIRECT_PAGING) + + paged_users = alert_group.get_paged_users() + assert len(paged_users) == 1 + assert alert_group.get_paged_users()[0]["pk"] == user.public_primary_key + @patch("apps.alerts.models.AlertGroup.start_unsilence_task", return_value=None) @pytest.mark.django_db diff --git a/engine/apps/api/serializers/channel_filter.py b/engine/apps/api/serializers/channel_filter.py index 7ff0f0f12e..408759fb91 100644 --- a/engine/apps/api/serializers/channel_filter.py +++ b/engine/apps/api/serializers/channel_filter.py @@ -147,7 +147,8 @@ def get_filtering_term_as_jinja2(self, obj): elif obj.filtering_term_type == ChannelFilter.FILTERING_TERM_TYPE_REGEX: # Four curly braces will result in two curly braces in the final string # rf"..." is a raw f string, to keep original filtering_term - return rf'{{{{ payload | json_dumps | regex_search("{obj.filtering_term}") }}}}' + escaped_quotes = obj.filtering_term.replace('"', '\\"') if obj.filtering_term else "" + return rf'{{{{ payload | json_dumps | regex_search("{escaped_quotes}") }}}}' elif obj.filtering_labels and obj.filtering_term_type == ChannelFilter.FILTERING_TERM_TYPE_LABELS: # required labels labels = [ diff --git a/engine/apps/api/serializers/organization.py b/engine/apps/api/serializers/organization.py index 6279c0a19d..cac0edd12a 100644 --- a/engine/apps/api/serializers/organization.py +++ b/engine/apps/api/serializers/organization.py @@ -13,7 +13,7 @@ class FastSlackTeamIdentitySerializer(serializers.ModelSerializer): class Meta: model = SlackTeamIdentity - fields = ["cached_name"] + fields = ["cached_name", "needs_reinstall"] class OrganizationSerializer(EagerLoadingMixin, serializers.ModelSerializer): diff --git a/engine/apps/api/tests/test_alert_group.py b/engine/apps/api/tests/test_alert_group.py index fa4417ee99..85a9af0988 100644 --- a/engine/apps/api/tests/test_alert_group.py +++ b/engine/apps/api/tests/test_alert_group.py @@ -24,6 +24,7 @@ from apps.api.serializers.alert import AlertFieldsCacheSerializerMixin from apps.api.serializers.alert_group import AlertGroupFieldsCacheSerializerMixin from apps.base.models import UserNotificationPolicyLogRecord +from common.api_helpers.filters import DateRangeFilterMixin alert_raw_request_data = { "evalMatches": [ @@ -975,6 +976,62 @@ def test_get_filter_labels( assert response.json()["results"][0]["pk"] == alert_groups[0].public_primary_key +@pytest.mark.django_db +def test_get_title_search( + settings, + make_organization_and_user_with_plugin_token, + make_alert_receive_channel, + make_channel_filter, + make_alert_group, + make_alert, + make_user_auth_headers, +): + settings.FEATURE_ALERT_GROUP_SEARCH_ENABLED = True + settings.FEATURE_ALERT_GROUP_SEARCH_CUTOFF_DAYS = 30 + organization, user, token = make_organization_and_user_with_plugin_token() + + alert_receive_channel = make_alert_receive_channel(organization) + channel_filter = make_channel_filter(alert_receive_channel, is_default=True) + + alert_groups = [] + for i in range(3): + alert_group = make_alert_group( + alert_receive_channel, channel_filter=channel_filter, web_title_cache=f"testing {i+1}" + ) + # alert groups starting every months going back + alert_group.started_at = timezone.now() - datetime.timedelta(days=10 + 30 * i) + alert_group.save(update_fields=["started_at"]) + make_alert(alert_group=alert_group, raw_request_data=alert_raw_request_data) + alert_groups.append(alert_group) + + client = APIClient() + url = reverse("api-internal:alertgroup-list") + + # check that the search returns alert groups started in the last 30 days + response = client.get( + url + "?search=testing", + format="json", + **make_user_auth_headers(user, token), + ) + assert response.status_code == status.HTTP_200_OK + assert len(response.json()["results"]) == 1 + assert response.json()["results"][0]["pk"] == alert_groups[0].public_primary_key + + # check that the search returns alert groups started in the last 30 days of specified date range + response = client.get( + url + + "?search=testing&started_at={}_{}".format( + (timezone.now() - datetime.timedelta(days=500)).strftime(DateRangeFilterMixin.DATE_FORMAT), + (timezone.now() - datetime.timedelta(days=30)).strftime(DateRangeFilterMixin.DATE_FORMAT), + ), + format="json", + **make_user_auth_headers(user, token), + ) + assert response.status_code == status.HTTP_200_OK + assert len(response.json()["results"]) == 1 + assert response.json()["results"][0]["pk"] == alert_groups[1].public_primary_key + + @pytest.mark.django_db @pytest.mark.parametrize( "role,expected_status", diff --git a/engine/apps/api/tests/test_channel_filter.py b/engine/apps/api/tests/test_channel_filter.py index 9cd4e08806..f36fec634d 100644 --- a/engine/apps/api/tests/test_channel_filter.py +++ b/engine/apps/api/tests/test_channel_filter.py @@ -509,8 +509,10 @@ def test_channel_filter_convert_from_regex_to_jinja2( make_channel_filter(alert_receive_channel, is_default=True) - # r"..." used to keep this string as raw string - regex_filtering_term = r"\".*\": \"This alert was sent by user for demonstration purposes\"" + # regex as set by Terraform/API (not a raw string, but a string with escaped characters) + # see ChannelFilterSerializer in apps.public_api.serializers.routes.py + regex_filtering_term = '".*": "This alert was sent by user for demonstration purposes"' + # r"..." to define the expected jinja2 template translation final_filtering_term = r'{{ payload | json_dumps | regex_search("\".*\": \"This alert was sent by user for demonstration purposes\"") }}' payload = {"description": "This alert was sent by user for demonstration purposes"} diff --git a/engine/apps/api/tests/test_oncall_shift.py b/engine/apps/api/tests/test_oncall_shift.py index 3e47100902..9ebaa9f6f1 100644 --- a/engine/apps/api/tests/test_oncall_shift.py +++ b/engine/apps/api/tests/test_oncall_shift.py @@ -1778,7 +1778,7 @@ def test_on_call_shift_preview_without_users( "is_override": False, "is_empty": True, "is_gap": False, - "priority_level": None, + "priority_level": 0, "missing_users": [], "users": [], "source": "web", diff --git a/engine/apps/api/tests/test_schedules.py b/engine/apps/api/tests/test_schedules.py index 7755ad1f10..e0477ec003 100644 --- a/engine/apps/api/tests/test_schedules.py +++ b/engine/apps/api/tests/test_schedules.py @@ -1201,7 +1201,7 @@ def test_filter_events_overrides( } ], "missing_users": [], - "priority_level": None, + "priority_level": 0, "source": "api", "calendar_type": OnCallSchedule.OVERRIDES, "is_empty": False, @@ -1307,7 +1307,7 @@ def test_filter_events_final_schedule( "end": start_date + timezone.timedelta(hours=start + duration), "is_gap": is_gap, "is_override": is_override, - "priority_level": priority, + "priority_level": priority or 0, "start": start_date + timezone.timedelta(hours=start), "user": user, } diff --git a/engine/apps/api/views/alert_group.py b/engine/apps/api/views/alert_group.py index 96d49aded3..5fd6f511fd 100644 --- a/engine/apps/api/views/alert_group.py +++ b/engine/apps/api/views/alert_group.py @@ -1,8 +1,10 @@ import typing +from datetime import timedelta from django.conf import settings from django.core.exceptions import ObjectDoesNotExist from django.db.models import Count, Max, Q +from django.utils import timezone from django_filters import rest_framework as filters from drf_spectacular.utils import extend_schema, inline_serializer from rest_framework import mixins, serializers, status, viewsets @@ -228,6 +230,30 @@ def retrieve(self, request, *args, **kwargs): return Response(data={"error_code": "wrong_team"}, status=status.HTTP_403_FORBIDDEN) +class AlertGroupSearchFilter(SearchFilter): + def filter_queryset(self, request, queryset, view): + search_fields = self.get_search_fields(view, request) + search_terms = self.get_search_terms(request) + if not search_fields or not search_terms: + return queryset + + if settings.FEATURE_ALERT_GROUP_SEARCH_CUTOFF_DAYS: + started_at = request.query_params.get("started_at") + end = DateRangeFilterMixin.parse_custom_datetime_range(started_at)[1] if started_at else timezone.now() + queryset = queryset.filter( + started_at__gte=end - timedelta(days=settings.FEATURE_ALERT_GROUP_SEARCH_CUTOFF_DAYS) + ) + + return super().filter_queryset(request, queryset, view) + + def get_search_fields(self, view, request): + return ( + ["=public_primary_key", "=inside_organization_number", "web_title_cache"] + if settings.FEATURE_ALERT_GROUP_SEARCH_ENABLED + else [] + ) + + class AlertGroupView( PreviewTemplateMixin, AlertGroupTeamFilteringMixin, @@ -275,12 +301,7 @@ class AlertGroupView( pagination_class = AlertGroupCursorPaginator - filter_backends = [SearchFilter, filters.DjangoFilterBackend] - search_fields = ( - ["=public_primary_key", "=inside_organization_number", "web_title_cache"] - if settings.FEATURE_ALERT_GROUP_SEARCH_ENABLED - else [] - ) + filter_backends = [AlertGroupSearchFilter, filters.DjangoFilterBackend] filterset_class = AlertGroupFilter def get_serializer_class(self): @@ -311,6 +332,13 @@ def get_queryset(self, ignore_filtering_by_available_teams=False): alert_receive_channels_ids = list(alert_receive_channels_qs.values_list("id", flat=True)) queryset = AlertGroup.objects.filter(channel__in=alert_receive_channels_ids) + if settings.ALERT_GROUPS_DISABLE_PREFER_ORDERING_INDEX: + # workaround related to MySQL "ORDER BY LIMIT Query Optimizer Bug" + # read more: https://hackmysql.com/infamous-order-by-limit-query-optimizer-bug/ + # this achieves the same effect as "FORCE INDEX (alert_group_list_index)" when + # paired with "ORDER BY started_at_optimized DESC" (ordering is performed in AlertGroupCursorPaginator). + queryset = queryset.extra({"started_at_optimized": "alerts_alertgroup.started_at + 0"}) + # Filter by labels. Since alert group labels are "static" filter by names, not IDs. label_query = self.request.query_params.getlist("label", []) kv_pairs = parse_label_query(label_query) @@ -747,7 +775,6 @@ def filters(self, request): "href": api_root + "teams/", "global": True, }, - {"name": "search", "type": "search"}, {"name": "integration", "type": "options", "href": api_root + "alert_receive_channels/?filters=true"}, {"name": "escalation_chain", "type": "options", "href": api_root + "escalation_chains/?filters=true"}, { @@ -811,6 +838,16 @@ def filters(self, request): }, ] + if settings.FEATURE_ALERT_GROUP_SEARCH_ENABLED: + description = "Search by alert group ID, number or title." + if settings.FEATURE_ALERT_GROUP_SEARCH_CUTOFF_DAYS: + description += ( + f" The search is limited to alert groups started in the last " + f"{settings.FEATURE_ALERT_GROUP_SEARCH_CUTOFF_DAYS} days of the specified date range." + ) + + filter_options = [{"name": "search", "type": "search", "description": description}] + filter_options + if is_labels_feature_enabled(self.request.auth.organization): filter_options.append( { diff --git a/engine/apps/api/views/alert_receive_channel.py b/engine/apps/api/views/alert_receive_channel.py index 21ea9209f5..c4103e9d2d 100644 --- a/engine/apps/api/views/alert_receive_channel.py +++ b/engine/apps/api/views/alert_receive_channel.py @@ -474,6 +474,7 @@ def filters(self, request): api_root = "/api/internal/v1/" filter_options = [ + {"name": "search", "type": "search"}, { "name": "team", "type": "team_select", diff --git a/engine/apps/api/views/escalation_chain.py b/engine/apps/api/views/escalation_chain.py index af670c2d88..952da9223b 100644 --- a/engine/apps/api/views/escalation_chain.py +++ b/engine/apps/api/views/escalation_chain.py @@ -183,10 +183,10 @@ def details(self, request, pk): @action(methods=["get"], detail=False) def filters(self, request): - filter_name = request.query_params.get("search", None) api_root = "/api/internal/v1/" filter_options = [ + {"name": "search", "type": "search"}, { "name": "team", "type": "team_select", @@ -195,7 +195,4 @@ def filters(self, request): }, ] - if filter_name is not None: - filter_options = list(filter(lambda f: filter_name in f["name"], filter_options)) - return Response(filter_options) diff --git a/engine/apps/api/views/schedule.py b/engine/apps/api/views/schedule.py index a14fc427a5..261851d6e6 100644 --- a/engine/apps/api/views/schedule.py +++ b/engine/apps/api/views/schedule.py @@ -577,10 +577,10 @@ def mention_options(self, request): @action(methods=["get"], detail=False) def filters(self, request): - filter_name = request.query_params.get("search", None) api_root = "/api/internal/v1/" filter_options = [ + {"name": "search", "type": "search"}, { "name": "team", "type": "team_select", @@ -610,7 +610,4 @@ def filters(self, request): }, ] - if filter_name is not None: - filter_options = list(filter(lambda f: filter_name in f["name"], filter_options)) - return Response(filter_options) diff --git a/engine/apps/api/views/user.py b/engine/apps/api/views/user.py index 85b8182578..3bbc50ac07 100644 --- a/engine/apps/api/views/user.py +++ b/engine/apps/api/views/user.py @@ -870,10 +870,10 @@ def export_token(self, request, pk) -> Response: ) @action(methods=["get"], detail=False) def filters(self, request): - filter_name = request.query_params.get("search", None) api_root = "/api/internal/v1/" filter_options = [ + {"name": "search", "type": "search"}, { "name": "team", "type": "team_select", @@ -882,9 +882,6 @@ def filters(self, request): }, ] - if filter_name is not None: - filter_options = list(filter(lambda f: filter_name in f["name"], filter_options)) - return Response(filter_options) diff --git a/engine/apps/api/views/webhooks.py b/engine/apps/api/views/webhooks.py index 0deb30dd34..f32b31ff55 100644 --- a/engine/apps/api/views/webhooks.py +++ b/engine/apps/api/views/webhooks.py @@ -140,10 +140,10 @@ def get_object_from_organization(self): @action(methods=["get"], detail=False) def filters(self, request): - filter_name = request.query_params.get("search", None) api_root = "/api/internal/v1/" filter_options = [ + {"name": "search", "type": "search"}, { "name": "team", "type": "team_select", @@ -161,9 +161,6 @@ def filters(self, request): } ) - if filter_name is not None: - filter_options = list(filter(lambda f: filter_name in f["name"], filter_options)) - return Response(filter_options) @action(methods=["get"], detail=True) diff --git a/engine/apps/chatops_proxy/client.py b/engine/apps/chatops_proxy/client.py index 1a0b63bcb1..1c78a21312 100644 --- a/engine/apps/chatops_proxy/client.py +++ b/engine/apps/chatops_proxy/client.py @@ -6,7 +6,7 @@ import requests from django.conf import settings -SERVICE_TYPE_ONCALL = "oncall" +APP_TYPE_ONCALL = "oncall" PROVIDER_TYPE_SLACK = "slack" @@ -172,16 +172,14 @@ def get_oauth_installation( return OAuthInstallation(**response.json()["oauth_installation"]), response def delete_oauth_installation( - self, - stack_id: int, - provider_type: str, - grafana_user_id: int, + self, stack_id: int, provider_type: str, grafana_user_id: int, app_type: str ) -> tuple[bool, requests.models.Response]: url = f"{self.api_base_url}/oauth_installations/uninstall" d = { "stack_id": stack_id, "provider_type": provider_type, "grafana_user_id": grafana_user_id, + "app_type": app_type, } response = requests.post(url=url, json=d, headers=self._headers) self._check_response(response) diff --git a/engine/apps/chatops_proxy/register_oncall_tenant.py b/engine/apps/chatops_proxy/register_oncall_tenant.py index 5aaa45c414..c0daeb5641 100644 --- a/engine/apps/chatops_proxy/register_oncall_tenant.py +++ b/engine/apps/chatops_proxy/register_oncall_tenant.py @@ -1,7 +1,7 @@ # register_oncall_tenant moved to separate file from engine/apps/chatops_proxy/utils.py to avoid circular imports. from django.conf import settings -from apps.chatops_proxy.client import SERVICE_TYPE_ONCALL, ChatopsProxyAPIClient +from apps.chatops_proxy.client import APP_TYPE_ONCALL, ChatopsProxyAPIClient def register_oncall_tenant(org): @@ -12,7 +12,7 @@ def register_oncall_tenant(org): client.register_tenant( str(org.uuid), settings.ONCALL_BACKEND_REGION, - SERVICE_TYPE_ONCALL, + APP_TYPE_ONCALL, org.stack_id, org.stack_slug, ) diff --git a/engine/apps/chatops_proxy/utils.py b/engine/apps/chatops_proxy/utils.py index 06e6f14ec3..1c242bf608 100644 --- a/engine/apps/chatops_proxy/utils.py +++ b/engine/apps/chatops_proxy/utils.py @@ -3,10 +3,11 @@ """ import logging import typing +from urllib.parse import urljoin from django.conf import settings -from .client import PROVIDER_TYPE_SLACK, SERVICE_TYPE_ONCALL, ChatopsProxyAPIClient, ChatopsProxyAPIException +from .client import APP_TYPE_ONCALL, PROVIDER_TYPE_SLACK, ChatopsProxyAPIClient, ChatopsProxyAPIException from .register_oncall_tenant import register_oncall_tenant from .tasks import ( link_slack_team_async, @@ -29,8 +30,8 @@ def get_installation_link_from_chatops_proxy(user) -> typing.Optional[str]: link, _ = client.get_slack_oauth_link( org.stack_id, user.user_id, - org.web_link, - SERVICE_TYPE_ONCALL, + urljoin(org.web_link, "settings?tab=ChatOps&chatOpsTab=Slack"), + APP_TYPE_ONCALL, ) return link except ChatopsProxyAPIException as api_exc: @@ -63,7 +64,7 @@ def register_oncall_tenant_with_async_fallback(org): kwargs={ "service_tenant_id": str(org.uuid), "cluster_slug": settings.ONCALL_BACKEND_REGION, - "service_type": SERVICE_TYPE_ONCALL, + "service_type": APP_TYPE_ONCALL, "stack_id": org.stack_id, "stack_slug": org.stack_slug, "org_id": org.id, @@ -80,7 +81,7 @@ def unregister_oncall_tenant(service_tenant_id: str, cluster_slug: str): kwargs={ "service_tenant_id": service_tenant_id, "cluster_slug": cluster_slug, - "service_type": SERVICE_TYPE_ONCALL, + "service_type": APP_TYPE_ONCALL, }, countdown=2, ) @@ -97,7 +98,7 @@ def can_link_slack_team( """ client = ChatopsProxyAPIClient(settings.ONCALL_GATEWAY_URL, settings.ONCALL_GATEWAY_API_TOKEN) try: - response = client.can_slack_link(service_tenant_id, cluster_slug, slack_team_id, SERVICE_TYPE_ONCALL) + response = client.can_slack_link(service_tenant_id, cluster_slug, slack_team_id, APP_TYPE_ONCALL) return response.status_code == 200 except Exception as e: logger.error( @@ -111,7 +112,7 @@ def can_link_slack_team( def link_slack_team(service_tenant_id: str, slack_team_id: str): client = ChatopsProxyAPIClient(settings.ONCALL_GATEWAY_URL, settings.ONCALL_GATEWAY_API_TOKEN) try: - client.link_slack_team(service_tenant_id, slack_team_id, SERVICE_TYPE_ONCALL) + client.link_slack_team(service_tenant_id, slack_team_id, APP_TYPE_ONCALL) except Exception as e: logger.error( f'msg="Failed to link slack team: {e}"' @@ -121,7 +122,7 @@ def link_slack_team(service_tenant_id: str, slack_team_id: str): kwargs={ "service_tenant_id": service_tenant_id, "slack_team_id": slack_team_id, - "service_type": SERVICE_TYPE_ONCALL, + "service_type": APP_TYPE_ONCALL, }, countdown=2, ) @@ -132,7 +133,7 @@ def unlink_slack_team(service_tenant_id: str, slack_team_id: str): kwargs={ "service_tenant_id": service_tenant_id, "slack_team_id": slack_team_id, - "service_type": SERVICE_TYPE_ONCALL, + "service_type": APP_TYPE_ONCALL, } ) @@ -144,7 +145,9 @@ def uninstall_slack(stack_id: int, grafana_user_id: int) -> bool: """ client = ChatopsProxyAPIClient(settings.ONCALL_GATEWAY_URL, settings.ONCALL_GATEWAY_API_TOKEN) try: - removed, response = client.delete_oauth_installation(stack_id, PROVIDER_TYPE_SLACK, grafana_user_id) + removed, response = client.delete_oauth_installation( + stack_id, PROVIDER_TYPE_SLACK, grafana_user_id, APP_TYPE_ONCALL + ) except ChatopsProxyAPIException as api_exc: if api_exc.status == 404: return True diff --git a/engine/apps/grafana_plugin/tasks/sync.py b/engine/apps/grafana_plugin/tasks/sync.py index c33c85414c..829fb923e5 100644 --- a/engine/apps/grafana_plugin/tasks/sync.py +++ b/engine/apps/grafana_plugin/tasks/sync.py @@ -166,26 +166,13 @@ def cleanup_empty_deleted_integrations(organization_pk, dry_run=True): integration.hard_delete() -@shared_dedicated_queue_retry_task(autoretry_for=(Exception,), max_retries=0) -def start_cleanup_organizations(): - # TODO: Remove next release after tasks revoked - cleanup_threshold = timezone.now() - INACTIVE_PERIOD - organization_qs = Organization.objects.filter(last_time_synced__lte=cleanup_threshold) - organization_pks = organization_qs.values_list("pk", flat=True) - logger.debug(f"Found {len(organization_pks)} organizations") - max_countdown = CLEANUP_PERIOD.seconds - for idx, organization_pk in enumerate(organization_pks): - countdown = idx % max_countdown # Spread orgs evenly - cleanup_organization_async.apply_async((organization_pk,), countdown=countdown) - - @shared_dedicated_queue_retry_task(autoretry_for=(Exception,), max_retries=0) def start_cleanup_deleted_integrations(): cleanup_threshold = timezone.now() - CLEANUP_PERIOD channels_qs = AlertReceiveChannel.objects_with_deleted.filter(deleted_at__gte=cleanup_threshold) organization_pks = set(channels_qs.values_list("organization_id", flat=True)) logger.debug(f"Found {len(organization_pks)} organizations") - for organization_pk in enumerate(organization_pks): + for _, organization_pk in enumerate(organization_pks): cleanup_empty_deleted_integrations.apply_async( (organization_pk, False), ) diff --git a/engine/apps/metrics_exporter/metrics_collectors.py b/engine/apps/metrics_exporter/metrics_collectors.py index 282e57180d..720cdea9c6 100644 --- a/engine/apps/metrics_exporter/metrics_collectors.py +++ b/engine/apps/metrics_exporter/metrics_collectors.py @@ -1,3 +1,4 @@ +import logging import re import typing @@ -28,6 +29,7 @@ application_metrics_registry = CollectorRegistry() +logger = logging.getLogger(__name__) # _RE_BASE_PATTERN allows for optional curly-brackets around the metric name as in some cases this may occur # see common.cache.ensure_cache_key_allocates_to_the_same_hash_slot for more details regarding this @@ -92,6 +94,10 @@ def _get_alert_groups_total_metric(self, org_ids): ) for org_key, ag_states in org_ag_states.items(): for _, integration_data in ag_states.items(): + if "services" not in integration_data: + logger.warning(f"Deleting stale metrics cache for {org_key}") + cache.delete(org_key) + break # Labels values should have the same order as _integration_labels_with_state labels_values = [ integration_data["integration_name"], # integration @@ -125,6 +131,10 @@ def _get_response_time_metric(self, org_ids): ) for org_key, ag_response_time in org_ag_response_times.items(): for _, integration_data in ag_response_time.items(): + if "services" not in integration_data: + logger.warning(f"Deleting stale metrics cache for {org_key}") + cache.delete(org_key) + break # Labels values should have the same order as _integration_labels labels_values = [ integration_data["integration_name"], # integration diff --git a/engine/apps/metrics_exporter/tests/conftest.py b/engine/apps/metrics_exporter/tests/conftest.py index 07dda37d49..b3204bce79 100644 --- a/engine/apps/metrics_exporter/tests/conftest.py +++ b/engine/apps/metrics_exporter/tests/conftest.py @@ -113,6 +113,60 @@ def _mock_cache_get_many(keys, *args, **kwargs): monkeypatch.setattr(cache, "get_many", _mock_cache_get_many) +@pytest.fixture() +def mock_cache_get_old_metrics_for_collector(monkeypatch): + def _mock_cache_get(key, *args, **kwargs): + if ALERT_GROUPS_TOTAL in key: + key = ALERT_GROUPS_TOTAL + elif ALERT_GROUPS_RESPONSE_TIME in key: + key = ALERT_GROUPS_RESPONSE_TIME + elif USER_WAS_NOTIFIED_OF_ALERT_GROUPS in key: + key = USER_WAS_NOTIFIED_OF_ALERT_GROUPS + test_metrics = { + ALERT_GROUPS_TOTAL: { + 1: { + "integration_name": "Test metrics integration", + "team_name": "Test team", + "team_id": 1, + "org_id": 1, + "slug": "Test stack", + "id": 1, + "firing": 2, + "silenced": 4, + "acknowledged": 3, + "resolved": 5, + }, + }, + ALERT_GROUPS_RESPONSE_TIME: { + 1: { + "integration_name": "Test metrics integration", + "team_name": "Test team", + "team_id": 1, + "org_id": 1, + "slug": "Test stack", + "id": 1, + "response_time": [2, 10, 200, 650], + }, + }, + USER_WAS_NOTIFIED_OF_ALERT_GROUPS: { + 1: { + "org_id": 1, + "slug": "Test stack", + "id": 1, + "user_username": "Alex", + "counter": 4, + } + }, + } + return test_metrics.get(key) + + def _mock_cache_get_many(keys, *args, **kwargs): + return {key: _mock_cache_get(key) for key in keys if _mock_cache_get(key)} + + monkeypatch.setattr(cache, "get", _mock_cache_get) + monkeypatch.setattr(cache, "get_many", _mock_cache_get_many) + + @pytest.fixture() def mock_get_metrics_cache(monkeypatch): def _mock_cache_get(key, *args, **kwargs): diff --git a/engine/apps/metrics_exporter/tests/test_metrics_collectors.py b/engine/apps/metrics_exporter/tests/test_metrics_collectors.py index 24b54a5c04..640ac57bc7 100644 --- a/engine/apps/metrics_exporter/tests/test_metrics_collectors.py +++ b/engine/apps/metrics_exporter/tests/test_metrics_collectors.py @@ -1,6 +1,7 @@ from unittest.mock import patch import pytest +from django.core.cache import cache from django.test import override_settings from prometheus_client import CollectorRegistry, generate_latest @@ -11,6 +12,7 @@ NO_SERVICE_VALUE, USER_WAS_NOTIFIED_OF_ALERT_GROUPS, ) +from apps.metrics_exporter.helpers import get_metric_alert_groups_response_time_key, get_metric_alert_groups_total_key from apps.metrics_exporter.metrics_collectors import ApplicationMetricsCollector from apps.metrics_exporter.tests.conftest import METRICS_TEST_SERVICE_NAME @@ -75,3 +77,38 @@ def get_expected_labels(service_name=NO_SERVICE_VALUE, **kwargs): # Since there is no recalculation timer for test org in cache, start_calculate_and_cache_metrics must be called assert mocked_start_calculate_and_cache_metrics.called test_metrics_registry.unregister(collector) + + +@patch("apps.metrics_exporter.metrics_collectors.get_organization_ids", return_value=[1]) +@patch("apps.metrics_exporter.metrics_collectors.start_calculate_and_cache_metrics.apply_async") +@pytest.mark.django_db +def test_application_metrics_collector_with_old_metrics_without_services( + mocked_org_ids, mocked_start_calculate_and_cache_metrics, mock_cache_get_old_metrics_for_collector +): + """Test that ApplicationMetricsCollector generates expected metrics from cache""" + + org_id = 1 + collector = ApplicationMetricsCollector() + test_metrics_registry = CollectorRegistry() + test_metrics_registry.register(collector) + for metric in test_metrics_registry.collect(): + if metric.name == ALERT_GROUPS_TOTAL: + alert_groups_total_metrics_cache = cache.get(get_metric_alert_groups_total_key(org_id)) + assert alert_groups_total_metrics_cache and "services" not in alert_groups_total_metrics_cache[1] + assert len(metric.samples) == 0 + elif metric.name == ALERT_GROUPS_RESPONSE_TIME: + alert_groups_response_time_metrics_cache = cache.get(get_metric_alert_groups_response_time_key(org_id)) + assert ( + alert_groups_response_time_metrics_cache + and "services" not in alert_groups_response_time_metrics_cache[1] + ) + assert len(metric.samples) == 0 + elif metric.name == USER_WAS_NOTIFIED_OF_ALERT_GROUPS: + # metric with labels for each notified user + assert len(metric.samples) == 1 + result = generate_latest(test_metrics_registry).decode("utf-8") + assert result is not None + assert mocked_org_ids.called + # Since there is no recalculation timer for test org in cache, start_calculate_and_cache_metrics must be called + assert mocked_start_calculate_and_cache_metrics.called + test_metrics_registry.unregister(collector) diff --git a/engine/apps/schedules/models/on_call_schedule.py b/engine/apps/schedules/models/on_call_schedule.py index 5dcd83e462..78636333f9 100644 --- a/engine/apps/schedules/models/on_call_schedule.py +++ b/engine/apps/schedules/models/on_call_schedule.py @@ -412,7 +412,7 @@ def filter_events( for user in shift["users"] ], "missing_users": shift["missing_users"], - "priority_level": shift["priority"] if shift["priority"] != 0 else None, + "priority_level": shift["priority"] or 0, "source": shift["source"], "calendar_type": shift["calendar_type"], "is_empty": len(shift["users"]) == 0 and not is_gap, diff --git a/engine/apps/schedules/tests/test_on_call_schedule.py b/engine/apps/schedules/tests/test_on_call_schedule.py index 6baa33fdf8..4e249d67c6 100644 --- a/engine/apps/schedules/tests/test_on_call_schedule.py +++ b/engine/apps/schedules/tests/test_on_call_schedule.py @@ -120,7 +120,7 @@ def test_filter_events(make_organization, make_user_for_organization, make_sched "is_override": True, "is_empty": False, "is_gap": False, - "priority_level": None, + "priority_level": 0, "missing_users": [], "users": [ { @@ -178,7 +178,7 @@ def test_filter_events_include_gaps(make_organization, make_user_for_organizatio "is_override": False, "is_empty": False, "is_gap": True, - "priority_level": None, + "priority_level": 0, "missing_users": [], "users": [], "shift": {"pk": None}, @@ -213,7 +213,7 @@ def test_filter_events_include_gaps(make_organization, make_user_for_organizatio "is_override": False, "is_empty": False, "is_gap": True, - "priority_level": None, + "priority_level": 0, "missing_users": [], "users": [], "shift": {"pk": None}, @@ -263,7 +263,7 @@ def test_filter_events_include_shift_info( "is_override": False, "is_empty": False, "is_gap": True, - "priority_level": None, + "priority_level": 0, "missing_users": [], "users": [], "shift": {"pk": None}, @@ -302,7 +302,7 @@ def test_filter_events_include_shift_info( "is_override": False, "is_empty": False, "is_gap": True, - "priority_level": None, + "priority_level": 0, "missing_users": [], "users": [], "shift": {"pk": None}, @@ -512,7 +512,7 @@ def test_final_schedule_events( "end": start_date + timezone.timedelta(hours=start + duration), "is_gap": is_gap, "is_override": is_override, - "priority_level": priority, + "priority_level": priority or 0, "start": start_date + timezone.timedelta(hours=start), "user": user, "shift": ( @@ -599,7 +599,7 @@ def test_final_schedule_override_no_priority_shift( "calendar_type": 1 if is_override else 0, "end": start_date + timezone.timedelta(hours=start + duration), "is_override": is_override, - "priority_level": priority, + "priority_level": priority or 0, "start": start_date + timezone.timedelta(hours=start, milliseconds=1 if start == 0 else 0), "user": user, } @@ -679,7 +679,7 @@ def test_final_schedule_override_split( "calendar_type": 1 if is_override else 0, "end": start_date + timezone.timedelta(hours=start + duration), "is_override": is_override, - "priority_level": priority, + "priority_level": priority or 0, "start": start_date + timezone.timedelta(hours=start, milliseconds=1 if start == 0 else 0), "user": user, } @@ -1051,7 +1051,7 @@ def test_preview_shift_no_user(make_organization, make_user_for_organization, ma "is_override": False, "is_empty": True, "is_gap": False, - "priority_level": None, + "priority_level": 0, "missing_users": [], "users": [], "shift": {"pk": new_shift.public_primary_key}, @@ -1130,7 +1130,7 @@ def test_preview_override_shift(make_organization, make_user_for_organization, m "is_override": True, "is_empty": False, "is_gap": False, - "priority_level": None, + "priority_level": 0, "missing_users": [], "users": [ { @@ -1156,7 +1156,7 @@ def test_preview_override_shift(make_organization, make_user_for_organization, m expected_events = [ { "end": start_date + timezone.timedelta(hours=start + duration), - "priority_level": priority, + "priority_level": priority or 0, "start": start_date + timezone.timedelta(hours=start, milliseconds=1 if start == 0 else 0), "user": user, "is_override": is_override, diff --git a/engine/apps/slack/installation.py b/engine/apps/slack/installation.py index a48784bc2f..393a2a9a70 100644 --- a/engine/apps/slack/installation.py +++ b/engine/apps/slack/installation.py @@ -32,13 +32,11 @@ def install_slack_integration(organization, user, oauth_response): """ from apps.slack.models import SlackTeamIdentity - if organization.slack_team_identity is not None: + if organization.slack_team_identity and not organization.slack_team_identity.needs_reinstall: raise SlackInstallationExc("Organization already has Slack integration") slack_team_id = oauth_response["team"]["id"] - slack_team_identity, is_slack_team_identity_created = SlackTeamIdentity.objects.get_or_create( - slack_id=slack_team_id, - ) + slack_team_identity, _ = SlackTeamIdentity.objects.get_or_create(slack_id=slack_team_id) # update slack oauth fields by data from response slack_team_identity.update_oauth_fields(user, organization, oauth_response) write_chatops_insight_log( diff --git a/engine/apps/slack/migrations/0005_slackteamidentity__unified_slack_app_installed.py b/engine/apps/slack/migrations/0005_slackteamidentity__unified_slack_app_installed.py new file mode 100644 index 0000000000..f4798876b7 --- /dev/null +++ b/engine/apps/slack/migrations/0005_slackteamidentity__unified_slack_app_installed.py @@ -0,0 +1,18 @@ +# Generated by Django 4.2.11 on 2024-07-16 16:11 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('slack', '0004_auto_20230913_1020'), + ] + + operations = [ + migrations.AddField( + model_name='slackteamidentity', + name='_unified_slack_app_installed', + field=models.BooleanField(default=False, null=True), + ), + ] diff --git a/engine/apps/slack/models/slack_team_identity.py b/engine/apps/slack/models/slack_team_identity.py index a5e24c12f9..79e603a742 100644 --- a/engine/apps/slack/models/slack_team_identity.py +++ b/engine/apps/slack/models/slack_team_identity.py @@ -1,6 +1,7 @@ import logging import typing +from django.conf import settings from django.db import models from django.db.models import JSONField @@ -48,6 +49,9 @@ class SlackTeamIdentity(models.Model): # response after oauth.access. This field is used to reinstall app to another OnCall workspace cached_reinstall_data = JSONField(null=True, default=None) + # Do not use directly, use the "needs_reinstall" property instead + _unified_slack_app_installed = models.BooleanField(null=True, default=False) + class Meta: ordering = ("datetime",) @@ -74,6 +78,10 @@ def update_oauth_fields(self, user, organization, oauth_response): self.installed_by = slack_user_identity self.cached_reinstall_data = None self.installed_via_granular_permissions = True + + if settings.UNIFIED_SLACK_APP_ENABLED: + self._unified_slack_app_installed = True + self.save() def get_cached_channels(self, search_term=None, slack_id=None): @@ -129,6 +137,10 @@ def app_id(self): self.save(update_fields=["cached_app_id"]) return self.cached_app_id + @property + def needs_reinstall(self): + return settings.UNIFIED_SLACK_APP_ENABLED and not self._unified_slack_app_installed + def get_users_from_slack_conversation_for_organization(self, channel_id, organization): sc = SlackClient(self) members = self.get_conversation_members(sc, channel_id) diff --git a/engine/apps/slack/scenarios/alertgroup_appearance.py b/engine/apps/slack/scenarios/alertgroup_appearance.py index 431e1a211b..b07bd85b5e 100644 --- a/engine/apps/slack/scenarios/alertgroup_appearance.py +++ b/engine/apps/slack/scenarios/alertgroup_appearance.py @@ -18,6 +18,7 @@ if typing.TYPE_CHECKING: from apps.slack.models import SlackTeamIdentity, SlackUserIdentity + from apps.user_management.models import Organization class OpenAlertAppearanceDialogStep(AlertGroupActionsMixin, scenario_step.ScenarioStep): @@ -27,7 +28,8 @@ def process_scenario( self, slack_user_identity: "SlackUserIdentity", slack_team_identity: "SlackTeamIdentity", - payload: EventPayload, + payload: "EventPayload", + predefined_org: typing.Optional["Organization"] = None, ) -> None: alert_group = self.get_alert_group(slack_team_identity, payload) if not self.is_authorized(alert_group): @@ -75,7 +77,8 @@ def process_scenario( self, slack_user_identity: "SlackUserIdentity", slack_team_identity: "SlackTeamIdentity", - payload: EventPayload, + payload: "EventPayload", + predefined_org: typing.Optional["Organization"] = None, ) -> None: from apps.alerts.models import AlertGroup diff --git a/engine/apps/slack/scenarios/alertgroup_timeline.py b/engine/apps/slack/scenarios/alertgroup_timeline.py index 87976b493c..08f74b8802 100644 --- a/engine/apps/slack/scenarios/alertgroup_timeline.py +++ b/engine/apps/slack/scenarios/alertgroup_timeline.py @@ -13,6 +13,7 @@ PayloadType, ScenarioRoute, ) +from apps.user_management.models import Organization from .step_mixins import AlertGroupActionsMixin @@ -28,6 +29,7 @@ def process_scenario( slack_user_identity: "SlackUserIdentity", slack_team_identity: "SlackTeamIdentity", payload: EventPayload, + predefined_org: typing.Optional["Organization"] = None, ) -> None: alert_group = self.get_alert_group(slack_team_identity, payload) if not self.is_authorized(alert_group): diff --git a/engine/apps/slack/scenarios/declare_incident.py b/engine/apps/slack/scenarios/declare_incident.py index 7a7589df5b..cf50e20d35 100644 --- a/engine/apps/slack/scenarios/declare_incident.py +++ b/engine/apps/slack/scenarios/declare_incident.py @@ -5,6 +5,7 @@ if typing.TYPE_CHECKING: from apps.slack.models import SlackTeamIdentity, SlackUserIdentity + from apps.user_management.models import Organization class DeclareIncidentStep(scenario_step.ScenarioStep): @@ -12,7 +13,8 @@ def process_scenario( self, slack_user_identity: "SlackUserIdentity", slack_team_identity: "SlackTeamIdentity", - payload: EventPayload, + payload: "EventPayload", + predefined_org: typing.Optional["Organization"] = None, ) -> None: """ Slack sends a POST request to the backend upon clicking a button with a redirect link to Incident. diff --git a/engine/apps/slack/scenarios/distribute_alerts.py b/engine/apps/slack/scenarios/distribute_alerts.py index 6eb5586067..c4fb8890e6 100644 --- a/engine/apps/slack/scenarios/distribute_alerts.py +++ b/engine/apps/slack/scenarios/distribute_alerts.py @@ -50,6 +50,7 @@ if typing.TYPE_CHECKING: from apps.slack.models import SlackTeamIdentity, SlackUserIdentity + from apps.user_management.models import Organization ATTACH_TO_ALERT_GROUPS_LIMIT = 20 @@ -222,7 +223,8 @@ def process_scenario( self, slack_user_identity: "SlackUserIdentity", slack_team_identity: "SlackTeamIdentity", - payload: EventPayload, + payload: "EventPayload", + predefined_org: typing.Optional["Organization"] = None, ) -> None: pass @@ -239,7 +241,8 @@ def process_scenario( self, slack_user_identity: "SlackUserIdentity", slack_team_identity: "SlackTeamIdentity", - payload: EventPayload, + payload: "EventPayload", + predefined_org: typing.Optional["Organization"] = None, ) -> None: from apps.user_management.models import User @@ -275,7 +278,8 @@ def process_scenario( self, slack_user_identity: "SlackUserIdentity", slack_team_identity: "SlackTeamIdentity", - payload: EventPayload, + payload: "EventPayload", + predefined_org: typing.Optional["Organization"] = None, ) -> None: alert_group = self.get_alert_group(slack_team_identity, payload) if not self.is_authorized(alert_group): @@ -304,7 +308,8 @@ def process_scenario( self, slack_user_identity: "SlackUserIdentity", slack_team_identity: "SlackTeamIdentity", - payload: EventPayload, + payload: "EventPayload", + predefined_org: typing.Optional["Organization"] = None, ) -> None: alert_group = self.get_alert_group(slack_team_identity, payload) if not self.is_authorized(alert_group): @@ -324,7 +329,8 @@ def process_scenario( self, slack_user_identity: "SlackUserIdentity", slack_team_identity: "SlackTeamIdentity", - payload: EventPayload, + payload: "EventPayload", + predefined_org: typing.Optional["Organization"] = None, ) -> None: alert_group = self.get_alert_group(slack_team_identity, payload) if not self.is_authorized(alert_group): @@ -490,7 +496,8 @@ def process_scenario( self, slack_user_identity: "SlackUserIdentity", slack_team_identity: "SlackTeamIdentity", - payload: EventPayload, + payload: "EventPayload", + predefined_org: typing.Optional["Organization"] = None, ) -> None: # submit selection in modal window if payload["type"] == PayloadType.VIEW_SUBMISSION: @@ -542,7 +549,8 @@ def process_scenario( self, slack_user_identity: "SlackUserIdentity", slack_team_identity: "SlackTeamIdentity", - payload: EventPayload, + payload: "EventPayload", + predefined_org: typing.Optional["Organization"] = None, ) -> None: alert_group = self.get_alert_group(slack_team_identity, payload) if not self.is_authorized(alert_group): @@ -567,7 +575,8 @@ def process_scenario( self, slack_user_identity: "SlackUserIdentity", slack_team_identity: "SlackTeamIdentity", - payload: EventPayload, + payload: "EventPayload", + predefined_org: typing.Optional["Organization"] = None, ) -> None: alert_group = self.get_alert_group(slack_team_identity, payload) if not self.is_authorized(alert_group): @@ -594,7 +603,8 @@ def process_scenario( self, slack_user_identity: "SlackUserIdentity", slack_team_identity: "SlackTeamIdentity", - payload: EventPayload, + payload: "EventPayload", + predefined_org: typing.Optional["Organization"] = None, ) -> None: ResolutionNoteModalStep = scenario_step.ScenarioStep.get_step("resolution_note", "ResolutionNoteModalStep") @@ -635,7 +645,8 @@ def process_scenario( self, slack_user_identity: "SlackUserIdentity", slack_team_identity: "SlackTeamIdentity", - payload: EventPayload, + payload: "EventPayload", + predefined_org: typing.Optional["Organization"] = None, ) -> None: alert_group = self.get_alert_group(slack_team_identity, payload) if not self.is_authorized(alert_group): @@ -655,7 +666,8 @@ def process_scenario( self, slack_user_identity: "SlackUserIdentity", slack_team_identity: "SlackTeamIdentity", - payload: EventPayload, + payload: "EventPayload", + predefined_org: typing.Optional["Organization"] = None, ) -> None: alert_group = self.get_alert_group(slack_team_identity, payload) if not self.is_authorized(alert_group): @@ -675,7 +687,8 @@ def process_scenario( self, slack_user_identity: "SlackUserIdentity", slack_team_identity: "SlackTeamIdentity", - payload: EventPayload, + payload: "EventPayload", + predefined_org: typing.Optional["Organization"] = None, ) -> None: alert_group = self.get_alert_group(slack_team_identity, payload) if not self.is_authorized(alert_group): @@ -736,7 +749,8 @@ def process_scenario( self, slack_user_identity: "SlackUserIdentity", slack_team_identity: "SlackTeamIdentity", - payload: EventPayload, + payload: "EventPayload", + predefined_org: typing.Optional["Organization"] = None, ) -> None: from apps.alerts.models import AlertGroup diff --git a/engine/apps/slack/scenarios/invited_to_channel.py b/engine/apps/slack/scenarios/invited_to_channel.py index e56ab9f41e..acdd5fecb6 100644 --- a/engine/apps/slack/scenarios/invited_to_channel.py +++ b/engine/apps/slack/scenarios/invited_to_channel.py @@ -9,6 +9,7 @@ if typing.TYPE_CHECKING: from apps.slack.models import SlackTeamIdentity, SlackUserIdentity + from apps.user_management.models import Organization logger = logging.getLogger(__name__) logger.setLevel(logging.DEBUG) @@ -19,7 +20,8 @@ def process_scenario( self, slack_user_identity: "SlackUserIdentity", slack_team_identity: "SlackTeamIdentity", - payload: EventPayload, + payload: "EventPayload", + predefined_org: typing.Optional["Organization"] = None, ) -> None: if payload["event"]["user"] == slack_team_identity.bot_user_id: channel_id = payload["event"]["channel"] diff --git a/engine/apps/slack/scenarios/manage_responders.py b/engine/apps/slack/scenarios/manage_responders.py index 63b8bd7fe1..8068475a09 100644 --- a/engine/apps/slack/scenarios/manage_responders.py +++ b/engine/apps/slack/scenarios/manage_responders.py @@ -18,7 +18,8 @@ if typing.TYPE_CHECKING: from apps.alerts.models import AlertGroup from apps.slack.models import SlackTeamIdentity, SlackUserIdentity - from apps.user_management.models import User + from apps.user_management.models import Organization, User + MANAGE_RESPONDERS_USER_SELECT_ID = "responders_user_select" @@ -35,7 +36,8 @@ def process_scenario( self, slack_user_identity: "SlackUserIdentity", slack_team_identity: "SlackTeamIdentity", - payload: EventPayload, + payload: "EventPayload", + predefined_org: typing.Optional["Organization"] = None, ) -> None: alert_group = self.get_alert_group(slack_team_identity, payload) if not self.is_authorized(alert_group): @@ -53,7 +55,8 @@ def process_scenario( self, slack_user_identity: "SlackUserIdentity", slack_team_identity: "SlackTeamIdentity", - payload: EventPayload, + payload: "EventPayload", + predefined_org: typing.Optional["Organization"] = None, ) -> None: alert_group = _get_alert_group_from_payload(payload) selected_user = _get_selected_user_from_payload(payload) @@ -99,7 +102,8 @@ def process_scenario( self, slack_user_identity: "SlackUserIdentity", slack_team_identity: "SlackTeamIdentity", - payload: EventPayload, + payload: "EventPayload", + predefined_org: typing.Optional["Organization"] = None, ) -> None: alert_group = _get_alert_group_from_payload(payload) selected_user = _get_selected_user_from_payload(payload) @@ -132,7 +136,8 @@ def process_scenario( self, slack_user_identity: "SlackUserIdentity", slack_team_identity: "SlackTeamIdentity", - payload: EventPayload, + payload: "EventPayload", + predefined_org: typing.Optional["Organization"] = None, ) -> None: alert_group = _get_alert_group_from_payload(payload) selected_user = _get_selected_user_from_payload(payload) diff --git a/engine/apps/slack/scenarios/notified_user_not_in_channel.py b/engine/apps/slack/scenarios/notified_user_not_in_channel.py index d3f045d498..f1af583135 100644 --- a/engine/apps/slack/scenarios/notified_user_not_in_channel.py +++ b/engine/apps/slack/scenarios/notified_user_not_in_channel.py @@ -6,6 +6,7 @@ if typing.TYPE_CHECKING: from apps.slack.models import SlackTeamIdentity, SlackUserIdentity + from apps.user_management.models import Organization logger = logging.getLogger(__name__) @@ -20,7 +21,8 @@ def process_scenario( self, slack_user_identity: "SlackUserIdentity", slack_team_identity: "SlackTeamIdentity", - payload: EventPayload, + payload: "EventPayload", + predefined_org: typing.Optional["Organization"] = None, ) -> None: logger.info("Gracefully handle NotifiedUserNotInChannelStep. Do nothing.") pass diff --git a/engine/apps/slack/scenarios/onboarding.py b/engine/apps/slack/scenarios/onboarding.py index d84dc1ef5d..9ca5305b1a 100644 --- a/engine/apps/slack/scenarios/onboarding.py +++ b/engine/apps/slack/scenarios/onboarding.py @@ -6,6 +6,7 @@ if typing.TYPE_CHECKING: from apps.slack.models import SlackTeamIdentity, SlackUserIdentity + from apps.user_management.models import Organization logger = logging.getLogger(__name__) @@ -19,7 +20,8 @@ def process_scenario( self, slack_user_identity: "SlackUserIdentity", slack_team_identity: "SlackTeamIdentity", - payload: EventPayload, + payload: "EventPayload", + predefined_org: typing.Optional["Organization"] = None, ) -> None: logger.info("InOpenStep, doing nothing.") @@ -29,7 +31,8 @@ def process_scenario( self, slack_user_identity: "SlackUserIdentity", slack_team_identity: "SlackTeamIdentity", - payload: EventPayload, + payload: "EventPayload", + predefined_org: typing.Optional["Organization"] = None, ) -> None: pass diff --git a/engine/apps/slack/scenarios/paging.py b/engine/apps/slack/scenarios/paging.py index b9191234e1..7a2b2a3251 100644 --- a/engine/apps/slack/scenarios/paging.py +++ b/engine/apps/slack/scenarios/paging.py @@ -15,6 +15,7 @@ from apps.slack.constants import DIVIDER, PRIVATE_METADATA_MAX_LENGTH from apps.slack.errors import SlackAPIChannelNotFoundError from apps.slack.scenarios import scenario_step +from apps.slack.slash_command import SlashCommand from apps.slack.types import ( Block, BlockActionType, @@ -115,13 +116,20 @@ def get_current_items( class StartDirectPaging(scenario_step.ScenarioStep): """Handle slash command invocation and show initial dialog.""" - command_name = [settings.SLACK_DIRECT_PAGING_SLASH_COMMAND] + @staticmethod + def matcher(slash_command: SlashCommand) -> bool: + # Check if command is /escalate. It's a legacy command we keep for smooth transition. + is_legacy_command = slash_command.command == settings.SLACK_DIRECT_PAGING_SLASH_COMMAND + # Check if command is /grafana escalate. It's a new command from unified app. + is_unified_app_command = slash_command.is_grafana_command and slash_command.subcommand == "escalate" + return is_legacy_command or is_unified_app_command def process_scenario( self, slack_user_identity: "SlackUserIdentity", slack_team_identity: "SlackTeamIdentity", - payload: EventPayload, + payload: "EventPayload", + predefined_org: typing.Optional["Organization"] = None, ) -> None: input_id_prefix = _generate_input_id_prefix() @@ -136,6 +144,11 @@ def process_scenario( "submit_routing_uid": FinishDirectPaging.routing_uid(), DataKey.USERS: {}, } + # We have access to predefined org only in StartDirectPaging, since it's a slash command. + # Chatops-Proxy adds a special header to slash commands payload to define the organization. + # Other Paging steps are triggered by buttons and actions, + # so we don't have access to predefined org and use private metadata instead. + private_metadata = _inject_predefined_org_to_private_metadata(predefined_org, private_metadata) initial_payload = {"view": {"private_metadata": json.dumps(private_metadata)}} view = render_dialog(slack_user_identity, slack_team_identity, initial_payload, initial=True) self._slack_client.views_open( @@ -153,13 +166,15 @@ def process_scenario( self, slack_user_identity: "SlackUserIdentity", slack_team_identity: "SlackTeamIdentity", - payload: EventPayload, + payload: "EventPayload", + predefined_org: typing.Optional["Organization"] = None, ) -> None: message = _get_message_from_payload(payload) private_metadata = json.loads(payload["view"]["private_metadata"]) + predefined_org = _get_predefined_org_from_private_metadata(private_metadata, slack_team_identity) channel_id = private_metadata["channel_id"] input_id_prefix = private_metadata["input_id_prefix"] - selected_organization = _get_selected_org_from_payload( + selected_organization = predefined_org or _get_selected_org_from_payload( payload, input_id_prefix, slack_team_identity, slack_user_identity ) @@ -245,7 +260,8 @@ def process_scenario( self, slack_user_identity: "SlackUserIdentity", slack_team_identity: "SlackTeamIdentity", - payload: EventPayload, + payload: "EventPayload", + predefined_org: typing.Optional["Organization"] = None, ) -> None: updated_payload = reset_items(payload) view = render_dialog(slack_user_identity, slack_team_identity, updated_payload) @@ -263,7 +279,8 @@ def process_scenario( self, slack_user_identity: "SlackUserIdentity", slack_team_identity: "SlackTeamIdentity", - payload: EventPayload, + payload: "EventPayload", + predefined_org: typing.Optional["Organization"] = None, ) -> None: view = render_dialog(slack_user_identity, slack_team_identity, payload) self._slack_client.views_update( @@ -283,7 +300,8 @@ def process_scenario( self, slack_user_identity: "SlackUserIdentity", slack_team_identity: "SlackTeamIdentity", - payload: EventPayload, + payload: "EventPayload", + predefined_org: typing.Optional["Organization"] = None, ) -> None: private_metadata = json.loads(payload["view"]["private_metadata"]) selected_user = _get_selected_user_from_payload(payload, private_metadata["input_id_prefix"]) @@ -294,19 +312,20 @@ def process_scenario( if not user_is_oncall(selected_user): # display additional confirmation modal metadata = json.loads(payload["view"]["private_metadata"]) - private_metadata = make_private_metadata( - { - "state": payload["view"]["state"], - "input_id_prefix": metadata["input_id_prefix"], - "channel_id": metadata["channel_id"], - "submit_routing_uid": metadata["submit_routing_uid"], - DataKey.USERS: metadata[DataKey.USERS], - }, - selected_user.organization, - ) + private_metadata = { + "state": payload["view"]["state"], + "input_id_prefix": metadata["input_id_prefix"], + "channel_id": metadata["channel_id"], + "submit_routing_uid": metadata["submit_routing_uid"], + DataKey.USERS: metadata[DataKey.USERS], + } + # keep predefined organization in private metadata + if "organization_id" in metadata: + private_metadata["organization_id"] = metadata["organization_id"] view = _display_confirm_participant_invitation_view( - OnPagingConfirmUserChange.routing_uid(), private_metadata + OnPagingConfirmUserChange.routing_uid(), + make_private_metadata(private_metadata, selected_user.organization), ) self._slack_client.views_push(trigger_id=payload["trigger_id"], view=view) else: @@ -336,7 +355,8 @@ def process_scenario( self, slack_user_identity: "SlackUserIdentity", slack_team_identity: "SlackTeamIdentity", - payload: EventPayload, + payload: "EventPayload", + predefined_org: typing.Optional["Organization"] = None, ) -> None: policy, key, user_pk = self._parse_action(payload) @@ -361,7 +381,8 @@ def process_scenario( self, slack_user_identity: "SlackUserIdentity", slack_team_identity: "SlackTeamIdentity", - payload: EventPayload, + payload: "EventPayload", + predefined_org: typing.Optional["Organization"] = None, ) -> None: metadata = json.loads(payload["view"]["private_metadata"]) @@ -372,6 +393,10 @@ def process_scenario( "submit_routing_uid": metadata["submit_routing_uid"], DataKey.USERS: metadata[DataKey.USERS], } + # keep predefined organization in private metadata + if "organization_id" in metadata: + private_metadata["organization_id"] = metadata["organization_id"] + previous_view_payload = { "view": { "state": metadata["state"], @@ -409,7 +434,7 @@ def render_dialog( private_metadata = json.loads(payload["view"]["private_metadata"]) submit_routing_uid = private_metadata.get("submit_routing_uid") - # Get organizations available to user + predefined_org = _get_predefined_org_from_private_metadata(private_metadata, slack_team_identity) available_organizations = _get_available_organizations(slack_team_identity, slack_user_identity) if initial: @@ -417,15 +442,17 @@ def render_dialog( new_input_id_prefix = _generate_input_id_prefix() new_private_metadata = private_metadata new_private_metadata["input_id_prefix"] = new_input_id_prefix - selected_organization = available_organizations.first() + selected_organization = predefined_org if predefined_org else available_organizations.first() is_team_selected, selected_team = False, None else: # setup form using data/state old_input_id_prefix, new_input_id_prefix, new_private_metadata = _get_and_change_input_id_prefix_from_metadata( private_metadata ) - selected_organization = _get_selected_org_from_payload( - payload, old_input_id_prefix, slack_team_identity, slack_user_identity + selected_organization = ( + predefined_org + if predefined_org + else _get_selected_org_from_payload(payload, old_input_id_prefix, slack_team_identity, slack_user_identity) ) is_team_selected, selected_team = _get_selected_team_from_payload(payload, old_input_id_prefix) @@ -441,8 +468,9 @@ def render_dialog( blocks.append(_get_message_input(payload)) - # Add organization select if more than one organization available for user - if len(available_organizations) > 1: + # Add organization select if org is not defined on chatops-proxy (it's should happen only in OSS) + # and user has access to multiple orgs. + if not predefined_org and len(available_organizations) > 1: organization_select = _get_organization_select( available_organizations, selected_organization, new_input_id_prefix ) @@ -570,6 +598,32 @@ def _get_selected_org_from_payload( return Organization.objects.filter(pk=selected_org_id).first() +def _inject_predefined_org_to_private_metadata( + predefined_org: typing.Optional["Organization"], private_metadata: dict +) -> dict: + """ + Injects predefined organization to private metadata. + Predefined org is org defined by chatops-proxy for slash commands. + """ + if predefined_org: + private_metadata["organization_id"] = predefined_org.pk + return private_metadata + + +def _get_predefined_org_from_private_metadata( + private_metadata: dict, + slack_team_identity: "SlackTeamIdentity", +) -> typing.Optional["Organization"]: + """ + Returns organization from private metadata. + """ + org_id = private_metadata.get("organization_id") + if not org_id: + return None + + return slack_team_identity.organizations.filter(pk=org_id).first() + + def _get_team_select_blocks( slack_user_identity: "SlackUserIdentity", organization: "Organization", @@ -953,8 +1007,8 @@ def _generate_input_id_prefix() -> str: }, { "payload_type": PayloadType.SLASH_COMMAND, - "command_name": StartDirectPaging.command_name, "step": StartDirectPaging, + "matcher": StartDirectPaging.matcher, }, { "payload_type": PayloadType.VIEW_SUBMISSION, diff --git a/engine/apps/slack/scenarios/profile_update.py b/engine/apps/slack/scenarios/profile_update.py index 3c46d9baa0..fa7cd9838b 100644 --- a/engine/apps/slack/scenarios/profile_update.py +++ b/engine/apps/slack/scenarios/profile_update.py @@ -6,6 +6,7 @@ if typing.TYPE_CHECKING: from apps.slack.models import SlackTeamIdentity, SlackUserIdentity + from apps.user_management.models import Organization class ProfileUpdateStep(scenario_step.ScenarioStep): @@ -13,7 +14,8 @@ def process_scenario( self, slack_user_identity: "SlackUserIdentity", slack_team_identity: "SlackTeamIdentity", - payload: EventPayload, + payload: "EventPayload", + predefined_org: typing.Optional["Organization"] = None, ) -> None: """ Triggered by action: Any update in Slack Profile. diff --git a/engine/apps/slack/scenarios/resolution_note.py b/engine/apps/slack/scenarios/resolution_note.py index 67ddea664e..50f907e79b 100644 --- a/engine/apps/slack/scenarios/resolution_note.py +++ b/engine/apps/slack/scenarios/resolution_note.py @@ -38,6 +38,8 @@ if typing.TYPE_CHECKING: from apps.alerts.models import AlertGroup, ResolutionNote, ResolutionNoteSlackMessage from apps.slack.models import SlackTeamIdentity, SlackUserIdentity + from apps.user_management.models import Organization + logger = logging.getLogger(__name__) logger.setLevel(logging.DEBUG) @@ -65,7 +67,8 @@ def process_scenario( self, slack_user_identity: "SlackUserIdentity", slack_team_identity: "SlackTeamIdentity", - payload: EventPayload, + payload: "EventPayload", + predefined_org: typing.Optional["Organization"] = None, ) -> None: from apps.alerts.models import ResolutionNote, ResolutionNoteSlackMessage from apps.slack.models import SlackMessage, SlackUserIdentity @@ -357,7 +360,8 @@ def process_scenario( slack_user_identity: "SlackUserIdentity", slack_team_identity: "SlackTeamIdentity", payload: EventPayload, - data: ScenarioData | None = None, + # TODO: data is incompatible override, parent class has a different set of arguments + data: ScenarioData | None = None, # type: ignore ) -> None: if data: # Argument "data" is used when step is called from other step, e.g. AddRemoveThreadMessageStep @@ -642,7 +646,8 @@ def process_scenario( self, slack_user_identity: "SlackUserIdentity", slack_team_identity: "SlackTeamIdentity", - payload: EventPayload, + payload: "EventPayload", + predefined_org: typing.Optional["Organization"] = None, ) -> None: from apps.alerts.models import AlertGroup, ResolutionNote, ResolutionNoteSlackMessage diff --git a/engine/apps/slack/scenarios/scenario_step.py b/engine/apps/slack/scenarios/scenario_step.py index 37fa2911b8..99811ebba2 100644 --- a/engine/apps/slack/scenarios/scenario_step.py +++ b/engine/apps/slack/scenarios/scenario_step.py @@ -48,7 +48,18 @@ def process_scenario( slack_user_identity: "SlackUserIdentity", slack_team_identity: "SlackTeamIdentity", payload: "EventPayload", + predefined_org: typing.Optional["Organization"] = None, ) -> None: + """ + process_scenario executes the logic of the step on slack interaction. + Args: + slack_user_identity: SlackUserIdentity who interacted with slack + slack_team_identity: Slack Workspace where interaction happened + payload: EventPayload from slack + predefined_org: + Organization where interaction happened. + It's optionally defined by chatops-proxy for slash commands and should be used only in SlashCommands steps + """ pass @classmethod diff --git a/engine/apps/slack/scenarios/schedules.py b/engine/apps/slack/scenarios/schedules.py index 4762d90ce9..105b8c47cd 100644 --- a/engine/apps/slack/scenarios/schedules.py +++ b/engine/apps/slack/scenarios/schedules.py @@ -20,6 +20,7 @@ if typing.TYPE_CHECKING: from apps.slack.models import SlackTeamIdentity, SlackUserIdentity + from apps.user_management.models import Organization class EditScheduleShiftNotifyStep(scenario_step.ScenarioStep): @@ -32,7 +33,8 @@ def process_scenario( self, slack_user_identity: "SlackUserIdentity", slack_team_identity: "SlackTeamIdentity", - payload: EventPayload, + payload: "EventPayload", + predefined_org: typing.Optional["Organization"] = None, ) -> None: action_type = payload["actions"][0]["type"] if action_type == BlockActionType.BUTTON: diff --git a/engine/apps/slack/scenarios/shift_swap_requests.py b/engine/apps/slack/scenarios/shift_swap_requests.py index d9c54bf198..d114f615b4 100644 --- a/engine/apps/slack/scenarios/shift_swap_requests.py +++ b/engine/apps/slack/scenarios/shift_swap_requests.py @@ -15,6 +15,7 @@ if typing.TYPE_CHECKING: from apps.schedules.models import ShiftSwapRequest from apps.slack.models import SlackTeamIdentity, SlackUserIdentity + from apps.user_management.models import Organization logger = logging.getLogger(__name__) logger.setLevel(logging.DEBUG) @@ -197,7 +198,8 @@ def process_scenario( self, slack_user_identity: "SlackUserIdentity", slack_team_identity: "SlackTeamIdentity", - payload: EventPayload, + payload: "EventPayload", + predefined_org: typing.Optional["Organization"] = None, ) -> None: from apps.schedules import exceptions from apps.schedules.models import ShiftSwapRequest diff --git a/engine/apps/slack/scenarios/slack_channel.py b/engine/apps/slack/scenarios/slack_channel.py index 882815e128..b9d8aa1955 100644 --- a/engine/apps/slack/scenarios/slack_channel.py +++ b/engine/apps/slack/scenarios/slack_channel.py @@ -9,6 +9,7 @@ if typing.TYPE_CHECKING: from apps.slack.models import SlackTeamIdentity, SlackUserIdentity + from apps.user_management.models import Organization class SlackChannelCreatedOrRenamedEventStep(scenario_step.ScenarioStep): @@ -16,7 +17,8 @@ def process_scenario( self, slack_user_identity: "SlackUserIdentity", slack_team_identity: "SlackTeamIdentity", - payload: EventPayload, + payload: "EventPayload", + predefined_org: typing.Optional["Organization"] = None, ) -> None: """ Triggered by action: Create or rename channel @@ -41,7 +43,8 @@ def process_scenario( self, slack_user_identity: "SlackUserIdentity", slack_team_identity: "SlackTeamIdentity", - payload: EventPayload, + payload: "EventPayload", + predefined_org: typing.Optional["Organization"] = None, ) -> None: """ Triggered by action: Delete channel @@ -63,7 +66,8 @@ def process_scenario( self, slack_user_identity: "SlackUserIdentity", slack_team_identity: "SlackTeamIdentity", - payload: EventPayload, + payload: "EventPayload", + predefined_org: typing.Optional["Organization"] = None, ) -> None: """ Triggered by action: Archive channel @@ -84,7 +88,8 @@ def process_scenario( self, slack_user_identity: "SlackUserIdentity", slack_team_identity: "SlackTeamIdentity", - payload: EventPayload, + payload: "EventPayload", + predefined_org: typing.Optional["Organization"] = None, ) -> None: """ Triggered by action: UnArchive channel diff --git a/engine/apps/slack/scenarios/slack_channel_integration.py b/engine/apps/slack/scenarios/slack_channel_integration.py index a4d1f63760..a0bbc13bae 100644 --- a/engine/apps/slack/scenarios/slack_channel_integration.py +++ b/engine/apps/slack/scenarios/slack_channel_integration.py @@ -7,6 +7,7 @@ if typing.TYPE_CHECKING: from apps.slack.models import SlackTeamIdentity, SlackUserIdentity + from apps.user_management.models import Organization logger = logging.getLogger(__name__) logger.setLevel(logging.DEBUG) @@ -17,7 +18,8 @@ def process_scenario( self, slack_user_identity: "SlackUserIdentity", slack_team_identity: "SlackTeamIdentity", - payload: EventPayload, + payload: "EventPayload", + predefined_org: typing.Optional["Organization"] = None, ) -> None: """ Triggered by action: Any new message in channel. diff --git a/engine/apps/slack/scenarios/slack_usergroup.py b/engine/apps/slack/scenarios/slack_usergroup.py index 865ec688e4..79f409fdab 100644 --- a/engine/apps/slack/scenarios/slack_usergroup.py +++ b/engine/apps/slack/scenarios/slack_usergroup.py @@ -7,6 +7,7 @@ if typing.TYPE_CHECKING: from apps.slack.models import SlackTeamIdentity, SlackUserIdentity + from apps.user_management.models import Organization class SlackUserGroupEventStep(scenario_step.ScenarioStep): @@ -14,7 +15,8 @@ def process_scenario( self, slack_user_identity: "SlackUserIdentity", slack_team_identity: "SlackTeamIdentity", - payload: EventPayload, + payload: "EventPayload", + predefined_org: typing.Optional["Organization"] = None, ) -> None: """ Triggered by action: creation user groups or changes in user groups except its members. @@ -45,7 +47,8 @@ def process_scenario( self, slack_user_identity: "SlackUserIdentity", slack_team_identity: "SlackTeamIdentity", - payload: EventPayload, + payload: "EventPayload", + predefined_org: typing.Optional["Organization"] = None, ) -> None: """ Triggered by action: changed members in user group. diff --git a/engine/apps/slack/slash_command.py b/engine/apps/slack/slash_command.py index 89131d5bae..1bb349da25 100644 --- a/engine/apps/slack/slash_command.py +++ b/engine/apps/slack/slash_command.py @@ -22,7 +22,8 @@ def __init__(self, command, args): @property def subcommand(self): """ - Return first arg as subcommand + Return first arg as action subcommand: part of command which defines action + Example: /grafana escalate -> escalate """ return self.args[0] if len(self.args) > 0 else None @@ -34,3 +35,7 @@ def parse(payload: SlashCommandPayload): command = payload["command"].lstrip("/") args = payload["text"].split() return SlashCommand(command, args) + + @property + def is_grafana_command(self): + return self.command in ["grafana", "grafana-dev", "grafana-ops", "grafana-prod"] diff --git a/engine/apps/slack/tests/test_installation.py b/engine/apps/slack/tests/test_installation.py index 7075c276e8..23f313e1f1 100644 --- a/engine/apps/slack/tests/test_installation.py +++ b/engine/apps/slack/tests/test_installation.py @@ -95,6 +95,26 @@ def test_install_slack_integration_raises_exception_for_existing_integration( install_slack_integration(organization, user, SLACK_OAUTH_ACCESS_RESPONSE) +@pytest.mark.django_db +def test_install_slack_integration_legacy(settings, make_organization_and_user, make_slack_team_identity): + settings.UNIFIED_SLACK_APP_ENABLED = True + + slack_team_identity = make_slack_team_identity( + slack_id=SLACK_OAUTH_ACCESS_RESPONSE["team"]["id"], _unified_slack_app_installed=False + ) + organization, user = make_organization_and_user() + organization.slack_team_identity = slack_team_identity + organization.save() + + install_slack_integration(organization, user, SLACK_OAUTH_ACCESS_RESPONSE) + slack_team_identity.refresh_from_db() + assert slack_team_identity.needs_reinstall is False + + # raises exception if organization already re-installed the app + with pytest.raises(SlackInstallationExc): + install_slack_integration(organization, user, SLACK_OAUTH_ACCESS_RESPONSE) + + @patch("apps.slack.tasks.clean_slack_integration_leftovers.apply_async", return_value=None) @pytest.mark.django_db def test_uninstall_slack_integration( diff --git a/engine/apps/slack/tests/test_interactive_api_endpoint.py b/engine/apps/slack/tests/test_interactive_api_endpoint.py index 2c117f78bd..a8be48c3a2 100644 --- a/engine/apps/slack/tests/test_interactive_api_endpoint.py +++ b/engine/apps/slack/tests/test_interactive_api_endpoint.py @@ -29,15 +29,18 @@ SLACK_USER_ID = "iurtiurituritu" -def _make_request(payload): +def _make_request(payload, predefined_org=None): + headers = { + "HTTP_X_SLACK_SIGNATURE": "asdfasdf", + "HTTP_X_SLACK_REQUEST_TIMESTAMP": "xxcxcvx", + } + if predefined_org: + headers["HTTP_X_CHATOPS_STACK_ID"] = predefined_org.stack_id return APIClient().post( "/slack/interactive_api_endpoint/", format="json", data=payload, - **{ - "HTTP_X_SLACK_SIGNATURE": "asdfasdf", - "HTTP_X_SLACK_REQUEST_TIMESTAMP": "xxcxcvx", - }, + **headers, ) @@ -312,7 +315,52 @@ def test_grafana_escalate( response = _make_request(payload) assert response.status_code == status.HTTP_200_OK - mock_process_scenario.assert_called_once_with(slack_user_identity, slack_team_identity, payload) + mock_process_scenario.assert_called_once_with( + slack_user_identity, slack_team_identity, payload, predefined_org=None + ) + + +@patch("apps.slack.views.SlackEventApiEndpointView.verify_signature", return_value=True) +@patch.object(StartDirectPaging, "process_scenario") +@pytest.mark.django_db +def test_grafana_escalate_with_org_from_chatops_proxy_defines_org( + mock_process_scenario, + _mock_verify_signature, + make_organization, + make_slack_user_identity, + make_user, + slack_team_identity, +): + """ + Check StartDirectPaging.process_scenario gets called when a user types /grafana escalate. + UnifiedSlackApp commands are prefixed with /grafana. + """ + organization = make_organization(slack_team_identity=slack_team_identity) + slack_user_identity = make_slack_user_identity(slack_team_identity=slack_team_identity, slack_id=SLACK_USER_ID) + make_user(organization=organization, slack_user_identity=slack_user_identity) + + payload = { + "token": "gIkuvaNzQIHg97ATvDxqgjtO", + "team_id": slack_team_identity.slack_id, + "team_domain": "example", + "enterprise_id": "E0001", + "enterprise_name": "Globular%20Construct%20Inc", + "channel_id": "C2147483705", + "channel_name": "test", + "user_id": slack_user_identity.slack_id, + "user_name": "Steve", + "command": "/grafana", + "text": "escalate", + "response_url": "https://hooks.slack.com/commands/1234/5678", + "trigger_id": "13345224609.738474920.8088930838d88f008e0", + "api": "api_value", + } + response = _make_request(payload, predefined_org=organization) + + assert response.status_code == status.HTTP_200_OK + mock_process_scenario.assert_called_once_with( + slack_user_identity, slack_team_identity, payload, predefined_org=organization + ) @patch("apps.slack.views.SlackEventApiEndpointView.verify_signature", return_value=True) @@ -353,4 +401,6 @@ def test_escalate( response = _make_request(payload) assert response.status_code == status.HTTP_200_OK - mock_process_scenario.assert_called_once_with(slack_user_identity, slack_team_identity, payload) + mock_process_scenario.assert_called_once_with( + slack_user_identity, slack_team_identity, payload, predefined_org=None + ) diff --git a/engine/apps/slack/tests/test_scenario_steps/test_paging.py b/engine/apps/slack/tests/test_scenario_steps/test_paging.py index a46ce6abdf..0e80ac322b 100644 --- a/engine/apps/slack/tests/test_scenario_steps/test_paging.py +++ b/engine/apps/slack/tests/test_scenario_steps/test_paging.py @@ -27,26 +27,43 @@ from apps.user_management.models import Organization -def make_slack_payload(organization, team=None, user=None, current_users=None, actions=None): +def make_paging_view_slack_payload( + selected_org=None, predefined_org=None, team=None, user=None, current_users=None, actions=None +): + """ + Helper function to create a payload for paging view. + Args: + selected_org: selected organization + predefined_org: predefined organization parsed from chatops-proxy headers + team: selected team object. + user: selected user object. + current_users: Dictionary of current users. + actions: List of actions. + """ + organization = selected_org or predefined_org + if organization is None: + raise Exception("either selected or predifined org must be defined") + private_metadata = { + "input_id_prefix": "", + "channel_id": "123", + "submit_routing_uid": "FinishStepUID", + DataKey.USERS: current_users or {}, + } + if predefined_org: + private_metadata["organization_id"] = str(predefined_org.pk) payload = { "channel_id": "123", "trigger_id": "111", "view": { "id": "view-id", - "private_metadata": make_private_metadata( - { - "input_id_prefix": "", - "channel_id": "123", - "submit_routing_uid": "FinishStepUID", - DataKey.USERS: current_users or {}, - }, - organization, - ), + "private_metadata": make_private_metadata(private_metadata, organization), "state": { "values": { DIRECT_PAGING_ORG_SELECT_ID: { OnPagingOrgChange.routing_uid(): { - "selected_option": {"value": make_value({"id": organization.pk}, organization)} + "selected_option": { + "value": make_value({"id": organization.pk if selected_org else None}, organization) + } } }, DIRECT_PAGING_TEAM_SELECT_ID: { @@ -84,6 +101,50 @@ def test_initial_state( assert metadata[DataKey.USERS] == {} +@pytest.mark.django_db +def test_org_predefined( + make_organization_and_user_with_slack_identities, +): + """ + See get_org_from_chatops_proxy_header function. + """ + org, user, slack_team_identity, slack_user_identity = make_organization_and_user_with_slack_identities() + payload = {"channel_id": "123", "trigger_id": "111"} + + step = StartDirectPaging(slack_team_identity, user=user) + with patch.object(step._slack_client, "views_open") as mock_slack_api_call: + step.process_scenario(slack_user_identity, slack_team_identity, payload, predefined_org=org) + + view = mock_slack_api_call.call_args.kwargs["view"] + metadata = json.loads(view["private_metadata"]) + # Test that organization is injected to private metadata if it is defined by chatops-proxy. + assert metadata["organization_id"] == org.pk + # Test that organization select is not present if org defined by chatops-proxy. + for block in view["blocks"]: + if block.get("block_id") == DIRECT_PAGING_ORG_SELECT_ID: + raise AssertionError("Organization select block should not be present in the view") + + +@pytest.mark.django_db +def test_page_team_with_predefined_org(make_organization_and_user_with_slack_identities, make_team): + organization, user, slack_team_identity, slack_user_identity = make_organization_and_user_with_slack_identities() + team = make_team(organization) + payload = make_paging_view_slack_payload(predefined_org=organization, team=team) + + step = FinishDirectPaging(slack_team_identity) + with patch("apps.slack.scenarios.paging.direct_paging") as mock_direct_paging: + with patch.object(step._slack_client, "api_call"): + step.process_scenario(slack_user_identity, slack_team_identity, payload) + + mock_direct_paging.assert_called_once_with( + organization=organization, + from_user=user, + message="The Message", + team=team, + users=[], + ) + + @pytest.mark.parametrize("role", (LegacyAccessControlRole.VIEWER, LegacyAccessControlRole.NONE)) @pytest.mark.django_db def test_initial_unauthorized(make_organization_and_user_with_slack_identities, role): @@ -126,7 +187,7 @@ def test_add_user_no_warning(make_organization_and_user_with_slack_identities, m on_call_shift.add_rolling_users([[user]]) schedule.refresh_ical_file() - payload = make_slack_payload(organization=organization, user=user) + payload = make_paging_view_slack_payload(selected_org=organization, user=user) step = OnPagingUserChange(slack_team_identity) with patch.object(step._slack_client, "views_update") as mock_slack_api_call: @@ -161,7 +222,7 @@ def test_add_user_maximum_exceeded(make_organization_and_user_with_slack_identit on_call_shift.add_rolling_users([[user]]) schedule.refresh_ical_file() - payload = make_slack_payload(organization=organization, user=user) + payload = make_paging_view_slack_payload(selected_org=organization, user=user) step = OnPagingUserChange(slack_team_identity) with patch("apps.slack.scenarios.paging.PRIVATE_METADATA_MAX_LENGTH", 100): @@ -188,7 +249,7 @@ def test_add_user_maximum_exceeded(make_organization_and_user_with_slack_identit def test_add_user_raise_warning(make_organization_and_user_with_slack_identities): organization, user, slack_team_identity, slack_user_identity = make_organization_and_user_with_slack_identities() # user is not on call - payload = make_slack_payload(organization=organization, user=user) + payload = make_paging_view_slack_payload(selected_org=organization, user=user) step = OnPagingUserChange(slack_team_identity) with patch.object(step._slack_client, "views_push") as mock_slack_api_call: @@ -209,8 +270,8 @@ def test_add_user_raise_warning(make_organization_and_user_with_slack_identities @pytest.mark.django_db def test_change_user_policy(make_organization_and_user_with_slack_identities): organization, user, slack_team_identity, slack_user_identity = make_organization_and_user_with_slack_identities() - payload = make_slack_payload( - organization=organization, + payload = make_paging_view_slack_payload( + selected_org=organization, actions=[ { "selected_option": { @@ -231,8 +292,8 @@ def test_change_user_policy(make_organization_and_user_with_slack_identities): @pytest.mark.django_db def test_remove_user(make_organization_and_user_with_slack_identities): organization, user, slack_team_identity, slack_user_identity = make_organization_and_user_with_slack_identities() - payload = make_slack_payload( - organization=organization, + payload = make_paging_view_slack_payload( + selected_org=organization, actions=[ { "selected_option": { @@ -255,7 +316,7 @@ def test_remove_user(make_organization_and_user_with_slack_identities): @pytest.mark.django_db def test_trigger_paging_no_team_or_user_selected(make_organization_and_user_with_slack_identities): organization, user, slack_team_identity, slack_user_identity = make_organization_and_user_with_slack_identities() - payload = make_slack_payload(organization=organization) + payload = make_paging_view_slack_payload(selected_org=organization) step = FinishDirectPaging(slack_team_identity, user=user) @@ -277,7 +338,7 @@ def test_trigger_paging_unauthorized(make_organization_and_user_with_slack_ident organization, user, slack_team_identity, slack_user_identity = make_organization_and_user_with_slack_identities( role=role ) - payload = make_slack_payload(organization=organization) + payload = make_paging_view_slack_payload(selected_org=organization) step = FinishDirectPaging(slack_team_identity) with patch.object(step._slack_client, "api_call"): @@ -294,7 +355,9 @@ def test_trigger_paging_unauthorized(make_organization_and_user_with_slack_ident def test_trigger_paging_additional_responders(make_organization_and_user_with_slack_identities, make_team): organization, user, slack_team_identity, slack_user_identity = make_organization_and_user_with_slack_identities() team = make_team(organization) - payload = make_slack_payload(organization=organization, team=team, current_users={str(user.pk): Policy.IMPORTANT}) + payload = make_paging_view_slack_payload( + selected_org=organization, team=team, current_users={str(user.pk): Policy.IMPORTANT} + ) step = FinishDirectPaging(slack_team_identity) with patch("apps.slack.scenarios.paging.direct_paging") as mock_direct_paging: @@ -314,7 +377,7 @@ def test_trigger_paging_additional_responders(make_organization_and_user_with_sl def test_page_team(make_organization_and_user_with_slack_identities, make_team): organization, user, slack_team_identity, slack_user_identity = make_organization_and_user_with_slack_identities() team = make_team(organization) - payload = make_slack_payload(organization=organization, team=team) + payload = make_paging_view_slack_payload(selected_org=organization, team=team) step = FinishDirectPaging(slack_team_identity) with patch("apps.slack.scenarios.paging.direct_paging") as mock_direct_paging: diff --git a/engine/apps/slack/tests/test_slash_command.py b/engine/apps/slack/tests/test_slash_command.py index a821a8a44d..ffe1bb6902 100644 --- a/engine/apps/slack/tests/test_slash_command.py +++ b/engine/apps/slack/tests/test_slash_command.py @@ -14,6 +14,7 @@ def test_parse(): assert slash_command.command == "grafana" assert slash_command.args == ["escalate"] assert slash_command.subcommand == "escalate" + assert slash_command.is_grafana_command def test_parse_command_without_subcommand(): diff --git a/engine/apps/slack/types/scenario_routes.py b/engine/apps/slack/types/scenario_routes.py index 503a62efd5..6458ac5105 100644 --- a/engine/apps/slack/types/scenario_routes.py +++ b/engine/apps/slack/types/scenario_routes.py @@ -1,11 +1,15 @@ import typing +from apps.slack.slash_command import SlashCommand + from .common import EventType, PayloadType if typing.TYPE_CHECKING: from apps.slack.scenarios.scenario_step import ScenarioStep from apps.slack.types import BlockActionType, InteractiveMessageActionType +MatcherType = typing.Callable[[SlashCommand], bool] + class ScenarioRoute: class _Base(typing.TypedDict): @@ -32,7 +36,7 @@ class MessageActionScenarioRoute(_Base): class SlashCommandScenarioRoute(_Base): payload_type: typing.Literal[PayloadType.SLASH_COMMAND] - command_name: typing.List[str] + matcher: MatcherType class ViewSubmissionScenarioRoute(_Base): payload_type: typing.Literal[PayloadType.VIEW_SUBMISSION] diff --git a/engine/apps/slack/views.py b/engine/apps/slack/views.py index 7f23b70c4b..1812927a48 100644 --- a/engine/apps/slack/views.py +++ b/engine/apps/slack/views.py @@ -361,11 +361,12 @@ def post(self, request): cmd = SlashCommand.parse(payload) # Check both command and subcommand for backward compatibility # So both /grafana escalate and /escalate will work. - if cmd.command in route["command_name"] or cmd.subcommand in route["command_name"]: + if route["matcher"](cmd): Step = route["step"] logger.info("Routing to {}".format(Step)) step = Step(slack_team_identity, organization, user) - step.process_scenario(slack_user_identity, slack_team_identity, payload) + org = get_org_from_chatops_proxy_header(request, slack_team_identity) + step.process_scenario(slack_user_identity, slack_team_identity, payload, predefined_org=org) step_was_found = True if payload_type == route_payload_type: @@ -592,3 +593,19 @@ def post(self, request): return Response({"error": e.error_message}, status=400) return Response(status=200) + + +def get_org_from_chatops_proxy_header(request, slack_team_identity) -> Organization | None: + """ + get_org_from_chatops_proxy_header extracts organization from the X-Chatops-Stack-ID header injected by chatops-proxy + """ + stack_id = request.META.get("HTTP_X_CHATOPS_STACK_ID") + if not stack_id: + return None + + try: + # get only orgs linked to the slack workspace to avoid tampering + return slack_team_identity.organizations.get(stack_id=stack_id) + except Organization.DoesNotExist: + logger.info(f"SlackEventApiEndpointView: get_org_from_header: organization with stack_id {stack_id} not found") + return None diff --git a/engine/common/api_helpers/paginators.py b/engine/common/api_helpers/paginators.py index 3020b9d057..3692fde7aa 100644 --- a/engine/common/api_helpers/paginators.py +++ b/engine/common/api_helpers/paginators.py @@ -1,5 +1,6 @@ import typing +from django.conf import settings from rest_framework.pagination import BasePagination, CursorPagination, PageNumberPagination from rest_framework.response import Response @@ -85,4 +86,4 @@ class FifteenPageSizePaginator(PathPrefixedPagePagination): class AlertGroupCursorPaginator(PathPrefixedCursorPagination): page_size = 25 - ordering = "-started_at" # ordering by "-started_at", so it uses the right index (see AlertGroup.Meta.indexes) + ordering = "-started_at_optimized" if settings.ALERT_GROUPS_DISABLE_PREFER_ORDERING_INDEX else "-started_at" diff --git a/engine/common/ordered_model/serializer.py b/engine/common/ordered_model/serializer.py index d5c36ed162..b8aeaa3872 100644 --- a/engine/common/ordered_model/serializer.py +++ b/engine/common/ordered_model/serializer.py @@ -1,4 +1,5 @@ from rest_framework import serializers +from rest_framework.utils import model_meta from common.api_helpers.exceptions import BadRequest @@ -28,6 +29,38 @@ def create(self, validated_data): return instance + def _update(self, instance, validated_data): + # customize the update method to make sure order field is not saved again + # (which could trigger an integrity error if there was another concurrent update changing order) + serializers.raise_errors_on_nested_writes("update", self, validated_data) + info = model_meta.get_field_info(instance) + + # Simply set each attribute on the instance, and then save it. + # Note that unlike `.create()` we don't need to treat many-to-many + # relationships as being a special case. During updates we already + # have an instance pk for the relationships to be associated with. + m2m_fields = [] + update_fields = [] + for attr, value in validated_data.items(): + if attr in info.relations and info.relations[attr].to_many: + m2m_fields.append((attr, value)) + else: + setattr(instance, attr, value) + update_fields.append(attr) + + # NOTE: this is the only difference, update changed fields to avoid saving order field again + if update_fields: + instance.save(update_fields=update_fields) + + # Note that many-to-many fields are set after updating instance. + # Setting m2m fields triggers signals which could potentially change + # updated instance and we do not want it to collide with .update() + for attr, value in m2m_fields: + field = getattr(instance, attr) + field.set(value) + + return instance + def update(self, instance, validated_data): # Remove "manual_order" and "order" fields from validated_data, so they are not passed to update method. manual_order = validated_data.pop("manual_order", False) @@ -38,7 +71,7 @@ def update(self, instance, validated_data): self._adjust_order(instance, manual_order, order, created=False) # Proceed with the update. - return super().update(instance, validated_data) + return self._update(instance, validated_data) @staticmethod def _adjust_order(instance, manual_order, order, created): diff --git a/engine/common/tests/test_ordered_model.py b/engine/common/tests/test_ordered_model.py index b9efd98fea..77a58051b1 100644 --- a/engine/common/tests/test_ordered_model.py +++ b/engine/common/tests/test_ordered_model.py @@ -5,6 +5,7 @@ from django.db import models from common.ordered_model.ordered_model import OrderedModel +from common.ordered_model.serializer import OrderedModelSerializer class TestOrderedModel(OrderedModel): @@ -436,3 +437,44 @@ def delete(idx): assert not exceptions assert _orders_are_sequential() assert list(TestOrderedModel.objects.values_list("extra_field", flat=True)) == expected_extra_field_values + + +class TestOrderedModelSerializer(OrderedModelSerializer): + class Meta: + model = TestOrderedModel + fields = OrderedModelSerializer.Meta.fields + ["test_field", "extra_field"] + + +@pytest.mark.skipif(SKIP_CONCURRENT, reason="OrderedModel concurrent tests are skipped to speed up tests") +@pytest.mark.django_db(transaction=True) +def test_ordered_model_swap_all_to_zero_via_serializer(): + THREADS = 300 + exceptions = [] + + TestOrderedModel.objects.all().delete() # clear table + instances = [TestOrderedModel.objects.create(test_field="test") for _ in range(THREADS)] + + # generate random non-unique orders + random.seed(42) + positions = [random.randint(0, THREADS - 1) for _ in range(THREADS)] + + def update_order_to_zero(idx): + try: + instance = instances[idx] + serializer = TestOrderedModelSerializer(instance, data={"order": 0, "extra_field": idx}, partial=True) + serializer.is_valid(raise_exception=True) + serializer.save() + instance.swap(positions[idx]) + except Exception as e: + exceptions.append(e) + + threads = [threading.Thread(target=update_order_to_zero, args=(0,)) for _ in range(THREADS)] + for thread in threads: + thread.start() + for thread in threads: + thread.join() + + # can only check that orders are still sequential and that there are no exceptions + # can't check the exact order because it changes depending on the order of execution + assert not exceptions + assert _orders_are_sequential() diff --git a/engine/common/utils.py b/engine/common/utils.py index b902fe219e..395a1306bd 100644 --- a/engine/common/utils.py +++ b/engine/common/utils.py @@ -129,7 +129,7 @@ def getenv_boolean(variable_name: str, default: bool) -> bool: return value.lower() in ("true", "1") -def getenv_integer(variable_name: str, default: int) -> int: +def getenv_integer(variable_name: str, default: int | None) -> int | None: value = os.environ.get(variable_name) if value is None: return default diff --git a/engine/settings/base.py b/engine/settings/base.py index fea5c687a4..f5b793dd75 100644 --- a/engine/settings/base.py +++ b/engine/settings/base.py @@ -72,6 +72,7 @@ # Enable labels feature for organizations from the list. Use OnCall organization ID, for this flag FEATURE_LABELS_ENABLED_PER_ORG = getenv_list("FEATURE_LABELS_ENABLED_PER_ORG", default=list()) FEATURE_ALERT_GROUP_SEARCH_ENABLED = getenv_boolean("FEATURE_ALERT_GROUP_SEARCH_ENABLED", default=False) +FEATURE_ALERT_GROUP_SEARCH_CUTOFF_DAYS = getenv_integer("FEATURE_ALERT_GROUP_SEARCH_CUTOFF_DAYS", default=None) FEATURE_NOTIFICATION_BUNDLE_ENABLED = getenv_boolean("FEATURE_NOTIFICATION_BUNDLE_ENABLED", default=False) TWILIO_API_KEY_SID = os.environ.get("TWILIO_API_KEY_SID") @@ -188,6 +189,10 @@ class DatabaseTypes: pymysql.install_as_MySQLdb() +ALERT_GROUPS_DISABLE_PREFER_ORDERING_INDEX = DATABASE_TYPE == DatabaseTypes.MYSQL and getenv_boolean( + "ALERT_GROUPS_DISABLE_PREFER_ORDERING_INDEX", default=False +) + # Redis REDIS_USERNAME = os.getenv("REDIS_USERNAME", "") REDIS_PASSWORD = os.getenv("REDIS_PASSWORD") diff --git a/engine/settings/celery_task_routes.py b/engine/settings/celery_task_routes.py index ea866c1a7a..cce98b6fe4 100644 --- a/engine/settings/celery_task_routes.py +++ b/engine/settings/celery_task_routes.py @@ -145,7 +145,6 @@ "apps.chatops_proxy.tasks.sync_org_with_chatops_proxy": {"queue": "long"}, "apps.grafana_plugin.tasks.sync.cleanup_organization_async": {"queue": "long"}, "apps.grafana_plugin.tasks.sync.cleanup_empty_deleted_integrations": {"queue": "long"}, - "apps.grafana_plugin.tasks.sync.start_cleanup_organizations": {"queue": "long"}, "apps.grafana_plugin.tasks.sync.start_cleanup_deleted_integrations": {"queue": "long"}, "apps.grafana_plugin.tasks.sync.start_cleanup_deleted_organizations": {"queue": "long"}, "apps.grafana_plugin.tasks.sync.start_sync_organizations": {"queue": "long"}, diff --git a/grafana-plugin/e2e-tests/utils/alertGroup.ts b/grafana-plugin/e2e-tests/utils/alertGroup.ts index 77696e5507..b346da71e8 100644 --- a/grafana-plugin/e2e-tests/utils/alertGroup.ts +++ b/grafana-plugin/e2e-tests/utils/alertGroup.ts @@ -49,7 +49,7 @@ export const filterAlertGroupsTableByIntegrationAndGoToDetailPage = async ( const selectElement = await selectDropdownValue({ page, selectType: 'grafanaSelect', - placeholderText: 'Search or filter results...', + placeholderText: 'Filter results...', value: 'Integration', }); await selectElement.type(integrationName); diff --git a/grafana-plugin/src/containers/ColumnsSelectorWrapper/ColumnsModal.tsx b/grafana-plugin/src/containers/ColumnsSelectorWrapper/ColumnsModal.tsx index 7ca7d07ddc..dead50d3c4 100644 --- a/grafana-plugin/src/containers/ColumnsSelectorWrapper/ColumnsModal.tsx +++ b/grafana-plugin/src/containers/ColumnsSelectorWrapper/ColumnsModal.tsx @@ -26,6 +26,7 @@ import { ApiSchemas } from 'network/oncall-api/api.types'; import { components } from 'network/oncall-api/autogenerated-api.types'; import { useStore } from 'state/useStore'; import { UserActions } from 'utils/authorization/authorization'; +import { PROCESSING_REQUEST_ERROR } from 'utils/consts'; import { WrapWithGlobalNotification } from 'utils/decorators'; import { useDebouncedCallback, useIsLoading } from 'utils/hooks'; import { pluralize } from 'utils/utils'; @@ -142,7 +143,7 @@ export const ColumnsModal: React.FC = observer( variant="primary" onClick={WrapWithGlobalNotification(onAddNewColumns, { success: 'New column has been added to the list.', - failure: 'There was an error processing your request. Please try again.', + failure: PROCESSING_REQUEST_ERROR, })} > {isLoading ? : 'Add'} diff --git a/grafana-plugin/src/containers/ColumnsSelectorWrapper/ColumnsSelectorWrapper.tsx b/grafana-plugin/src/containers/ColumnsSelectorWrapper/ColumnsSelectorWrapper.tsx index fe7ccc9d1a..eb346615b9 100644 --- a/grafana-plugin/src/containers/ColumnsSelectorWrapper/ColumnsSelectorWrapper.tsx +++ b/grafana-plugin/src/containers/ColumnsSelectorWrapper/ColumnsSelectorWrapper.tsx @@ -13,6 +13,7 @@ import { ActionKey } from 'models/loader/action-keys'; import { ApiSchemas } from 'network/oncall-api/api.types'; import { useStore } from 'state/useStore'; import { UserActions } from 'utils/authorization/authorization'; +import { PROCESSING_REQUEST_ERROR } from 'utils/consts'; import { WrapAutoLoadingState, WrapWithGlobalNotification } from 'utils/decorators'; import { useIsLoading } from 'utils/hooks'; @@ -81,7 +82,7 @@ export const ColumnsSelectorWrapper: React.FC = obs onClick={WrapAutoLoadingState( WrapWithGlobalNotification(onColumnRemovalClick, { success: 'Column has been removed from the list.', - failure: 'There was an error processing your request. Please try again', + failure: PROCESSING_REQUEST_ERROR, }), ActionKey.REMOVE_COLUMN_FROM_ALERT_GROUP )} diff --git a/grafana-plugin/src/containers/IntegrationContainers/RouteHeading.tsx b/grafana-plugin/src/containers/IntegrationContainers/RouteHeading.tsx index f55317cee5..38fd9d06e1 100644 --- a/grafana-plugin/src/containers/IntegrationContainers/RouteHeading.tsx +++ b/grafana-plugin/src/containers/IntegrationContainers/RouteHeading.tsx @@ -55,32 +55,42 @@ const RouteHeadingDisplay: React.FC<{ channelFilter: ChannelFilter }> = ({ chann const styles = useStyles2(getStyles); const hasLabels = store.hasFeature(AppFeature.Labels); + const renderWarning = (text: string) => ( + <> +
+ +
+ {text} + + ); + if (channelFilter?.filtering_term || channelFilter?.filtering_labels) { return ( <> - - {channelFilter.filtering_term} - + {channelFilter.filtering_term && ( + + {channelFilter.filtering_term} + + )} + + {/* Show missing template warning */} + {!channelFilter.filtering_term && renderWarning('Routing template not set')} - + {channelFilter.filtering_labels?.length > 0 && } + + {/* Show missing labels warning */} + {!channelFilter.filtering_labels?.length && renderWarning('Routing labels not set')} ); } - return ( - <> -
- -
- Routing template not set - - ); + return renderWarning('Routing template not set'); }; const getStyles = (theme: GrafanaTheme2) => { diff --git a/grafana-plugin/src/containers/IntegrationLabelsForm/IntegrationLabelsForm.tsx b/grafana-plugin/src/containers/IntegrationLabelsForm/IntegrationLabelsForm.tsx index bee1cb6227..e5dd7b26e0 100644 --- a/grafana-plugin/src/containers/IntegrationLabelsForm/IntegrationLabelsForm.tsx +++ b/grafana-plugin/src/containers/IntegrationLabelsForm/IntegrationLabelsForm.tsx @@ -284,32 +284,16 @@ const CustomLabels = (props: CustomLabelsProps) => { }; const onLoadKeys = async (search?: string) => { - let result = undefined; - - try { - result = await labelsStore.loadKeys(search); - } catch (error) { - openErrorNotification('There was an error processing your request. Please try again'); - } - - const groups = splitToGroups(result); - - return groups; + const result = await labelsStore.loadKeys(search); + return splitToGroups(result); }; const onLoadValuesForKey = async (key: string, search?: string) => { - let result = undefined; - - try { - const { values } = await labelsStore.loadValuesForKey(key, search); - result = values; - } catch (error) { - openErrorNotification('There was an error processing your request. Please try again'); + if (!key) { + return []; } - - const groups = splitToGroups(result); - - return groups; + const { values } = await labelsStore.loadValuesForKey(key, search); + return splitToGroups(values); }; return ( diff --git a/grafana-plugin/src/containers/Labels/Labels.tsx b/grafana-plugin/src/containers/Labels/Labels.tsx index d7a76ff812..02625f461b 100644 --- a/grafana-plugin/src/containers/Labels/Labels.tsx +++ b/grafana-plugin/src/containers/Labels/Labels.tsx @@ -49,30 +49,16 @@ const _Labels = observer( ); const onLoadKeys = async (search?: string) => { - let result = undefined; - try { - result = await labelsStore.loadKeys(search); - } catch (error) { - openErrorNotification('There was an error processing your request. Please try again'); - } - - const groups = splitToGroups(result); - - return groups; + const result = await labelsStore.loadKeys(search); + return splitToGroups(result); }; const onLoadValuesForKey = async (key: string, search?: string) => { - let result = undefined; - try { - const { values } = await labelsStore.loadValuesForKey(key, search); - result = values; - } catch (error) { - openErrorNotification('There was an error processing your request. Please try again'); + if (!key) { + return []; } - - const groups = splitToGroups(result); - - return groups; + const { values } = await labelsStore.loadValuesForKey(key, search); + return splitToGroups(values); }; const isValid = () => { diff --git a/grafana-plugin/src/containers/RemoteFilters/RemoteFilters.tsx b/grafana-plugin/src/containers/RemoteFilters/RemoteFilters.tsx index 416bbcfd58..51d5c11198 100644 --- a/grafana-plugin/src/containers/RemoteFilters/RemoteFilters.tsx +++ b/grafana-plugin/src/containers/RemoteFilters/RemoteFilters.tsx @@ -5,7 +5,6 @@ import { GrafanaTheme2, KeyValue, SelectableValue, TimeRange } from '@grafana/da import { InlineSwitch, MultiSelect, - TimeRangeInput, Select, LoadingPlaceholder, Input, @@ -19,33 +18,53 @@ import { capitalCase } from 'change-case'; import { debounce, isUndefined, omitBy, pickBy } from 'lodash-es'; import { observer } from 'mobx-react'; import moment from 'moment-timezone'; +import ReactDOM from 'react-dom'; import Emoji from 'react-emoji-render'; +import { RenderConditionally } from 'components/RenderConditionally/RenderConditionally'; import { Text } from 'components/Text/Text'; import { LabelsFilter } from 'containers/Labels/LabelsFilter'; import { RemoteSelect } from 'containers/RemoteSelect/RemoteSelect'; import { TeamName } from 'containers/TeamName/TeamName'; -import { FiltersValues } from 'models/filters/filters.types'; +import { FilterExtraInformation, FilterExtraInformationValues } from 'models/filters/filters.types'; import { GrafanaTeamStore } from 'models/grafana_team/grafana_team'; import { SelectOption, WithStoreProps } from 'state/types'; import { withMobXProviderContext } from 'state/withStore'; import { LocationHelper } from 'utils/LocationHelper'; import { PAGE } from 'utils/consts'; import { convertTimerangeToFilterValue, getValueForDateRangeFilterType } from 'utils/datetime'; -import { allFieldsEmpty } from 'utils/utils'; import { parseFilters } from './RemoteFilters.helpers'; import { FilterOption } from './RemoteFilters.types'; +import { TimeRangePickerWrapper } from './TimeRangePickerWrapper'; interface RemoteFiltersProps extends WithStoreProps, Themeable2 { onChange: (filters: Record, isOnMount: boolean, invalidateFn: () => boolean) => void; query: KeyValue; page: PAGE; - defaultFilters?: FiltersValues; - extraFilters?: (state, setState, onFiltersValueChange) => React.ReactNode; grafanaTeamStore: GrafanaTeamStore; + extraInformation?: FilterExtraInformation; + extraFilters?: (state, setState, onFiltersValueChange) => React.ReactNode; skipFilterOptionFn?: (filterOption: FilterOption) => boolean; } + +export function filterExtraInformation(object: FilterExtraInformationValues): FilterExtraInformationValues { + const defaultValues: Partial = { + isClearable: true, + showInputLabel: true, + }; + + const result = { ...object }; + + Object.keys(defaultValues).forEach((key) => { + if (!result.hasOwnProperty(key)) { + result[key] = defaultValues[key]; + } + }); + + return result; +} + export interface RemoteFiltersState { filterOptions?: FilterOption[]; filters: FilterOption[]; @@ -86,12 +105,12 @@ class _RemoteFilters extends Component { query, page, store: { filtersStore }, - defaultFilters, skipFilterOptionFn, } = this.props; let filterOptions = await filtersStore.updateOptionsForPage(page); const currentTablePageNum = parseInt(filtersStore.currentTablePageNum[page] || query.p || 1, 10); + const defaultFilters = this.extractDefaultValuesFromExtraInformation(); if (skipFilterOptionFn) { filterOptions = filterOptions.filter((option: FilterOption) => !skipFilterOptionFn(option)); @@ -100,11 +119,11 @@ class _RemoteFilters extends Component { // set the current page from filters/query or default it to 1 filtersStore.setCurrentTablePageNum(page, currentTablePageNum); - let { filters, values } = parseFilters({ ...query, ...filtersStore.globalValues }, filterOptions, query); - - if (allFieldsEmpty(values)) { - ({ filters, values } = parseFilters(defaultFilters, filterOptions, query)); - } + let { filters, values } = parseFilters( + { ...defaultFilters, ...query, ...filtersStore.globalValues }, + filterOptions, + query + ); this.setState({ filterOptions, filters, values }, () => this.onChange(true)); } @@ -147,33 +166,13 @@ class _RemoteFilters extends Component { return (
- {filters.map((filterOption: FilterOption) => ( -
- - {filterOption.display_name || capitalCase(filterOption.name)} - {filterOption.description && ( - - - - - - )} - - {this.renderFilterOption(filterOption)} -
- ))} + {filters.map(this.renderFilterBlock)} +