diff --git a/public/pages/TopNQueries/TopNQueries.test.tsx b/public/pages/TopNQueries/TopNQueries.test.tsx
new file mode 100644
index 0000000..f642c39
--- /dev/null
+++ b/public/pages/TopNQueries/TopNQueries.test.tsx
@@ -0,0 +1,208 @@
+/*
+ * Copyright OpenSearch Contributors
+ * SPDX-License-Identifier: Apache-2.0
+ */
+
+import React from 'react';
+import { render, screen, fireEvent, waitFor } from '@testing-library/react';
+import '@testing-library/jest-dom/extend-expect';
+import { MemoryRouter } from 'react-router-dom';
+import TopNQueries, { QUERY_INSIGHTS, CONFIGURATION } from './TopNQueries';
+import { CoreStart } from 'opensearch-dashboards/public';
+
+jest.mock('../QueryInsights/QueryInsights', () => () =>
Mocked QueryInsights
);
+jest.mock('../Configuration/Configuration', () => () => Mocked Configuration
);
+jest.mock('../QueryDetails/QueryDetails', () => () => Mocked QueryDetails
);
+
+const mockCore = ({
+ http: {
+ get: jest.fn(),
+ put: jest.fn(),
+ },
+} as unknown) as CoreStart;
+
+const setUpDefaultEnabledSettings = () => {
+ const mockLatencyResponse = { response: { top_queries: [{ id: '1' }, { id: '2' }] } };
+ const mockCpuResponse = { response: { top_queries: [{ id: '2' }, { id: '3' }] } };
+ const mockMemoryResponse = { response: { top_queries: [{ id: '1' }] } };
+ // Mock API responses for each metric
+ (mockCore.http.get as jest.Mock).mockImplementation((endpoint) => {
+ if (endpoint === '/api/top_queries/latency') return Promise.resolve(mockLatencyResponse);
+ if (endpoint === '/api/top_queries/cpu') return Promise.resolve(mockCpuResponse);
+ if (endpoint === '/api/top_queries/memory') return Promise.resolve(mockMemoryResponse);
+ return Promise.resolve({ response: { top_queries: [] } });
+ });
+ // Mock API response for all metrics enabled
+ const mockSettingsResponse = {
+ response: {
+ persistent: {
+ search: {
+ insights: {
+ top_queries: {
+ latency: { enabled: 'true', top_n_size: '10', window_size: '1h' },
+ cpu: { enabled: 'true', top_n_size: '10', window_size: '1h' },
+ memory: { enabled: 'true', top_n_size: '5', window_size: '30m' },
+ },
+ },
+ },
+ },
+ },
+ };
+ (mockCore.http.get as jest.Mock).mockResolvedValueOnce(mockSettingsResponse);
+};
+
+describe('TopNQueries Component', () => {
+ beforeEach(() => {
+ jest.clearAllMocks();
+ });
+
+ it('renders and switches tabs correctly', () => {
+ render(
+
+
+
+ );
+
+ // Check for Query Insights tab content
+ expect(screen.getByText('Mocked QueryInsights')).toBeInTheDocument();
+ expect(screen.getByText('Top N queries')).toBeInTheDocument();
+ expect(screen.getByText('Configuration')).toBeInTheDocument();
+
+ // Switch to Configuration tab
+ fireEvent.click(screen.getByText('Configuration'));
+ expect(screen.getByText('Mocked Configuration')).toBeInTheDocument();
+ });
+
+ it('updates settings in retrieveConfigInfo based on API response', async () => {
+ const mockSettingsResponse = {
+ response: {
+ persistent: {
+ search: {
+ insights: {
+ top_queries: {
+ latency: { enabled: 'true', top_n_size: '10', window_size: '1h' },
+ cpu: { enabled: 'false' },
+ memory: { enabled: 'true', top_n_size: '5', window_size: '30m' },
+ },
+ },
+ },
+ },
+ },
+ };
+ (mockCore.http.get as jest.Mock).mockResolvedValueOnce(mockSettingsResponse);
+ render(
+
+
+
+ );
+ await waitFor(() => {
+ expect(mockCore.http.get).toHaveBeenCalledWith('/api/settings');
+ expect(screen.getByText('Mocked Configuration')).toBeInTheDocument();
+ });
+ });
+
+ it('fetches queries for all metrics in retrieveQueries', async () => {
+ setUpDefaultEnabledSettings();
+ render(
+
+
+
+ );
+ await waitFor(() => {
+ // Verify each endpoint is called
+ expect(mockCore.http.get).toHaveBeenCalledWith('/api/settings');
+ expect(mockCore.http.get).toHaveBeenCalledWith(
+ '/api/top_queries/latency',
+ expect.any(Object)
+ );
+ expect(mockCore.http.get).toHaveBeenCalledWith('/api/top_queries/cpu', expect.any(Object));
+ expect(mockCore.http.get).toHaveBeenCalledWith('/api/top_queries/memory', expect.any(Object));
+ // Check that deduplicated queries would be displayed in QueryInsights
+ expect(screen.getByText('Mocked QueryInsights')).toBeInTheDocument();
+ });
+ });
+
+ it('fetches queries for only enabled metrics in retrieveQueries', async () => {
+ const mockResponse = { response: { top_queries: [{ id: '1' }, { id: '2' }] } };
+ // Mock API responses for each metric
+ (mockCore.http.get as jest.Mock).mockImplementation((endpoint) => {
+ if (endpoint === '/api/top_queries/latency') return Promise.resolve(mockResponse);
+ if (endpoint === '/api/top_queries/cpu') return Promise.resolve(mockResponse);
+ if (endpoint === '/api/top_queries/memory') return Promise.resolve(mockResponse);
+ return Promise.resolve({ response: { top_queries: [] } });
+ });
+ // Mock API response for only one metrics enabled
+ const mockSettingsResponse = {
+ response: {
+ persistent: {
+ search: {
+ insights: {
+ top_queries: {
+ latency: { enabled: 'true', top_n_size: '10', window_size: '1h' },
+ cpu: { enabled: 'false' },
+ memory: { enabled: 'false' },
+ },
+ },
+ },
+ },
+ },
+ };
+ (mockCore.http.get as jest.Mock).mockResolvedValueOnce(mockSettingsResponse);
+ render(
+
+
+
+ );
+ await waitFor(() => {
+ // Verify each endpoint is called
+ expect(mockCore.http.get).toHaveBeenCalledWith('/api/settings');
+ expect(mockCore.http.get).toHaveBeenCalledWith(
+ '/api/top_queries/latency',
+ expect.any(Object)
+ );
+ expect(mockCore.http.get).not.toHaveBeenCalledWith(
+ '/api/top_queries/cpu',
+ expect.any(Object)
+ );
+ expect(mockCore.http.get).not.toHaveBeenCalledWith(
+ '/api/top_queries/memory',
+ expect.any(Object)
+ );
+ // Check that deduplicated queries would be displayed in QueryInsights
+ expect(screen.getByText('Mocked QueryInsights')).toBeInTheDocument();
+ });
+ });
+
+ it('updates time range and fetches data when time range changes', async () => {
+ setUpDefaultEnabledSettings();
+ (mockCore.http.get as jest.Mock).mockResolvedValueOnce({ response: { top_queries: [] } });
+ // Render with initial time range
+ const { rerender } = render(
+
+
+
+ );
+ // Mock a new response for the time range update
+ (mockCore.http.get as jest.Mock).mockResolvedValueOnce({
+ response: { top_queries: [{ id: 'newQuery' }] },
+ });
+ // Re-render with updated time range to simulate a change
+ rerender(
+
+
+
+ );
+ // Verify that the component re-fetches data for the new time range
+ await waitFor(() => {
+ // 1 initial call for settings, 3 each for the initial rendering and re-rendering
+ expect(mockCore.http.get).toHaveBeenCalledTimes(7);
+ expect(mockCore.http.get).toHaveBeenCalledWith('/api/settings');
+ expect(mockCore.http.get).toHaveBeenCalledWith(
+ '/api/top_queries/latency',
+ expect.any(Object)
+ );
+ expect(mockCore.http.get).toHaveBeenCalledWith('/api/top_queries/cpu', expect.any(Object));
+ expect(mockCore.http.get).toHaveBeenCalledWith('/api/top_queries/memory', expect.any(Object));
+ });
+ });
+});
diff --git a/public/pages/TopNQueries/TopNQueries.tsx b/public/pages/TopNQueries/TopNQueries.tsx
index fd70c00..db631dd 100644
--- a/public/pages/TopNQueries/TopNQueries.tsx
+++ b/public/pages/TopNQueries/TopNQueries.tsx
@@ -23,12 +23,20 @@ export interface MetricSettings {
currTimeUnit: string;
}
-const TopNQueries = ({ core }: { core: CoreStart }) => {
+const TopNQueries = ({
+ core,
+ initialStart = 'now-1d',
+ initialEnd = 'now',
+}: {
+ core: CoreStart;
+ initialStart?: string;
+ initialEnd?: string;
+}) => {
const history = useHistory();
const location = useLocation();
const [loading, setLoading] = useState(false);
- const [currStart, setStart] = useState('now-1d');
- const [currEnd, setEnd] = useState('now');
+ const [currStart, setStart] = useState(initialStart);
+ const [currEnd, setEnd] = useState(initialEnd);
const [recentlyUsedRanges, setRecentlyUsedRanges] = useState([
{ start: currStart, end: currEnd },
]);
@@ -172,10 +180,8 @@ const TopNQueries = ({ core }: { core: CoreStart }) => {
if (get) {
try {
const resp = await core.http.get('/api/settings');
- const settings = resp.response.persistent.search.insights.top_queries;
- const latency = settings.latency;
- const cpu = settings.cpu;
- const memory = settings.memory;
+ const { latency, cpu, memory } =
+ resp?.response?.persistent?.search?.insights?.top_queries || {};
if (latency !== undefined && latency.enabled === 'true') {
const [time, timeUnits] = latency.window_size.match(/\D+|\d+/g);
setMetricSettings('latency', {