Skip to content

Commit

Permalink
Merge branch 'master' into ignite-19880
Browse files Browse the repository at this point in the history
  • Loading branch information
Якимова Анастасия Николаевна authored and Якимова Анастасия Николаевна committed Sep 27, 2023
2 parents ca15cd1 + 61d9d8d commit 32e0b55
Show file tree
Hide file tree
Showing 93 changed files with 703 additions and 1,843 deletions.
3 changes: 2 additions & 1 deletion .github/workflows/sonar-pr-from-fork.yml
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,8 @@

name: Sonar Quality Pull Request Analysis

on: [pull_request_target]
# TODO IGNITE-20466 Investigate and fix the issue with running this workflow on PRs from forks.
on: pull_request

permissions:
contents: read
Expand Down
8 changes: 8 additions & 0 deletions docs/_docs/SQL/JDBC/jdbc-driver.adoc
Original file line number Diff line number Diff line change
Expand Up @@ -144,6 +144,14 @@ Use this flag to tell Ignite to perform all intermediate row analysis and update
Defaults to `false`, meaning that the intermediate results are fetched to the query initiator first.
|`false`

|`queryTimeout`
|Sets the number of seconds the driver will wait for a Statement object to execute. Zero means there is no limits.
|`0`

|`connectionTimeout`
|Sets the number of milliseconds JDBC client will waits for server to response. Zero means there is no limits.
|'0'


|=======================================================================

Expand Down
17 changes: 15 additions & 2 deletions docs/_docs/monitoring-metrics/new-metrics.adoc
Original file line number Diff line number Diff line change
Expand Up @@ -241,7 +241,7 @@ Register name: `io.statistics.cacheGroups.{group_name}`
|===


== Sorted Indexes
== Sorted Indexes I/O statistics

Register name: `io.statistics.sortedIndexes.{cache_name}.{index_name}`

Expand All @@ -257,8 +257,21 @@ Register name: `io.statistics.sortedIndexes.{cache_name}.{index_name}`
|startTime| long| Statistics collection start time
|===

== Sorted Indexes operations

== Hash Indexes
Contains metrics about low-level operations (such as `Insert`, `Search`, etc.) on pages of sorted secondary indexes.

Register name: `index.{schema_name}.{table_name}.{index_name}`

[cols="2,1,3",opts="header"]
|===
|Name | Type | Description
|{opType}Count| long| Count of {opType} operations on index.
|{opType}Time| long| Total duration (nanoseconds) of {opType} operations on index.
|===


== Hash Indexes I/O statistics

Register name: `io.statistics.hashIndexes.{cache_name}.{index_name}`

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,20 +17,22 @@

package org.apache.ignite.internal.processors.query.calcite.exec;

import java.util.Objects;
import java.util.UUID;
import org.apache.ignite.internal.GridKernalContext;
import org.apache.ignite.internal.processors.query.calcite.CalciteQueryProcessor;
import org.apache.ignite.internal.processors.query.calcite.util.AbstractService;
import org.apache.ignite.internal.processors.query.calcite.util.Commons;
import org.apache.ignite.internal.util.StripedExecutor;
import org.apache.ignite.internal.util.typedef.internal.U;
import org.apache.ignite.thread.IgniteStripedThreadPoolExecutor;

import static org.apache.ignite.internal.processors.metric.impl.MetricUtils.metricName;
import static org.apache.ignite.internal.processors.pool.PoolProcessor.THREAD_POOLS;

/**
* TODO use {@link StripedExecutor}, registered in core pols.
* Query task executor.
*/
public class QueryTaskExecutorImpl extends AbstractService implements QueryTaskExecutor, Thread.UncaughtExceptionHandler {
/** */
public static final String THREAD_POOL_NAME = "CalciteQueryExecutor";

/** */
private IgniteStripedThreadPoolExecutor stripedThreadPoolExecutor;

Expand Down Expand Up @@ -82,16 +84,18 @@ public void exceptionHandler(Thread.UncaughtExceptionHandler eHnd) {
@Override public void onStart(GridKernalContext ctx) {
exceptionHandler(ctx.uncaughtExceptionHandler());

CalciteQueryProcessor proc = Objects.requireNonNull(Commons.lookupComponent(ctx, CalciteQueryProcessor.class));

stripedThreadPoolExecutor(new IgniteStripedThreadPoolExecutor(
IgniteStripedThreadPoolExecutor executor = new IgniteStripedThreadPoolExecutor(
ctx.config().getQueryThreadPoolSize(),
ctx.igniteInstanceName(),
"calciteQry",
this,
false,
0
));
);

stripedThreadPoolExecutor(executor);

executor.registerMetrics(ctx.metric().registry(metricName(THREAD_POOLS, THREAD_POOL_NAME)));
}

/** {@inheritDoc} */
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -277,11 +277,6 @@ private void handle0(AlterTableDropCommand cmd) throws IgniteCheckedException {

assert cctx != null;

if (cctx.mvccEnabled()) {
throw new IgniteSQLException("Cannot drop column(s) with enabled MVCC. " +
"Operation is unsupported at the moment.", IgniteQueryErrorCode.UNSUPPORTED_OPERATION);
}

if (QueryUtils.isSqlType(typeDesc.valueClass())) {
throw new SchemaOperationException("Cannot drop column(s) because table was created " +
"with WRAP_VALUE=false option.");
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -53,10 +53,12 @@
import org.apache.ignite.internal.processors.cache.query.GridCacheQueryType;
import org.apache.ignite.internal.processors.metric.MetricRegistry;
import org.apache.ignite.internal.processors.performancestatistics.AbstractPerformanceStatisticsTest;
import org.apache.ignite.internal.processors.pool.PoolProcessor;
import org.apache.ignite.internal.processors.query.IgniteSQLException;
import org.apache.ignite.internal.processors.query.QueryUtils;
import org.apache.ignite.internal.processors.query.calcite.Query;
import org.apache.ignite.internal.processors.query.calcite.QueryRegistry;
import org.apache.ignite.internal.processors.query.calcite.exec.QueryTaskExecutorImpl;
import org.apache.ignite.internal.processors.security.SecurityContext;
import org.apache.ignite.internal.util.typedef.F;
import org.apache.ignite.internal.util.typedef.internal.U;
Expand All @@ -73,6 +75,7 @@
import static org.apache.ignite.internal.processors.authentication.AuthenticationProcessorSelfTest.withSecurityContextOnAllNodes;
import static org.apache.ignite.internal.processors.authentication.User.DFAULT_USER_NAME;
import static org.apache.ignite.internal.processors.cache.query.GridCacheQueryType.SQL_FIELDS;
import static org.apache.ignite.internal.processors.metric.impl.MetricUtils.metricName;
import static org.apache.ignite.internal.processors.performancestatistics.AbstractPerformanceStatisticsTest.cleanPerformanceStatisticsDir;
import static org.apache.ignite.internal.processors.performancestatistics.AbstractPerformanceStatisticsTest.startCollectStatistics;
import static org.apache.ignite.internal.processors.performancestatistics.AbstractPerformanceStatisticsTest.stopCollectStatisticsAndRead;
Expand Down Expand Up @@ -302,6 +305,23 @@ public void testUserQueriesMetrics() throws Exception {
assertEquals(0, ((LongMetric)mreg1.findMetric("canceled")).value());
}

/** */
@Test
public void testThreadPoolMetrics() {
String regName = metricName(PoolProcessor.THREAD_POOLS, QueryTaskExecutorImpl.THREAD_POOL_NAME);
MetricRegistry mreg = client.context().metric().registry(regName);

LongMetric tasksCnt = mreg.findMetric("CompletedTaskCount");

tasksCnt.reset();

assertEquals(0, tasksCnt.value());

sql("SELECT 'test'");

assertTrue(tasksCnt.value() > 0);
}

/** */
@Test
public void testPerformanceStatistics() throws Exception {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -74,7 +74,6 @@
import static java.sql.Statement.RETURN_GENERATED_KEYS;
import static org.apache.ignite.cache.query.SqlFieldsQuery.DFLT_LAZY;
import static org.apache.ignite.configuration.ClientConnectorConfiguration.DFLT_PORT;
import static org.apache.ignite.internal.processors.odbc.SqlStateCode.TRANSACTION_STATE_EXCEPTION;
import static org.apache.ignite.testframework.GridTestUtils.assertThrows;
import static org.apache.ignite.testframework.GridTestUtils.assertThrowsAnyCause;
import static org.apache.ignite.testframework.GridTestUtils.getFieldValue;
Expand Down Expand Up @@ -1290,50 +1289,6 @@ public void testRollback() throws Exception {
}
}

/**
* @throws Exception if failed.
*/
@Test
public void testBeginFails() throws Exception {
try (Connection conn = DriverManager.getConnection(urlWithPartitionAwarenessProp)) {
conn.createStatement().execute("BEGIN");

fail("Exception is expected");
}
catch (SQLException e) {
assertEquals(TRANSACTION_STATE_EXCEPTION, e.getSQLState());
}
}

/**
* @throws Exception if failed.
*/
@Test
public void testCommitIgnored() throws Exception {
try (Connection conn = DriverManager.getConnection(urlWithPartitionAwarenessProp)) {
conn.setAutoCommit(false);
conn.createStatement().execute("COMMIT");

conn.commit();
}
// assert no exception
}

/**
* @throws Exception if failed.
*/
@Test
public void testRollbackIgnored() throws Exception {
try (Connection conn = DriverManager.getConnection(urlWithPartitionAwarenessProp)) {
conn.setAutoCommit(false);

conn.createStatement().execute("ROLLBACK");

conn.rollback();
}
// assert no exception
}

/**
* @throws Exception If failed.
*/
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,7 @@
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Random;
import java.util.Set;
import java.util.TreeMap;
import java.util.TreeSet;
Expand All @@ -52,6 +53,9 @@
import java.util.regex.Pattern;
import java.util.stream.Collectors;
import java.util.stream.IntStream;
import javax.cache.expiry.CreatedExpiryPolicy;
import javax.cache.expiry.Duration;
import javax.cache.expiry.ExpiryPolicy;
import javax.cache.processor.EntryProcessor;
import javax.cache.processor.EntryProcessorException;
import javax.cache.processor.MutableEntry;
Expand Down Expand Up @@ -2393,6 +2397,38 @@ public void testCacheIdleVerifyMovingParts() throws Exception {
assertContains(log, testOut.toString(), "MOVING partitions");
}

/**
* @throws Exception If failed.
*/
@Test
public void testCacheIdleVerifyExpiringEntries() throws Exception {
IgniteEx ignite = startGrids(3);

ignite.cluster().state(ACTIVE);

IgniteCache<Object, Object> cache = ignite.createCache(new CacheConfiguration<>(DEFAULT_CACHE_NAME)
.setAffinity(new RendezvousAffinityFunction(false, 32))
.setBackups(1));

Random rnd = new Random();

// Put without expiry policy.
for (int i = 0; i < 5_000; i++)
cache.put(i, i);

// Put with expiry policy.
for (int i = 5_000; i < 10_000; i++) {
ExpiryPolicy expPol = new CreatedExpiryPolicy(new Duration(TimeUnit.MILLISECONDS, rnd.nextInt(1_000)));
cache.withExpiryPolicy(expPol).put(i, i);
}

injectTestSystemOut();

assertEquals(EXIT_CODE_OK, execute("--cache", "idle_verify"));

assertContains(log, testOut.toString(), "no conflicts have been found");
}

/** */
@Test
public void testCacheSequence() throws Exception {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1436,6 +1436,12 @@ public final class IgniteSystemProperties {
defaults = "" + IGNITE_BPLUS_TREE_LOCK_RETRIES_DEFAULT)
public static final String IGNITE_BPLUS_TREE_LOCK_RETRIES = "IGNITE_BPLUS_TREE_LOCK_RETRIES";

/**
* Disables secondary indexes B+Tree metrics.
*/
@SystemProperty(value = "Disables secondary indexes B+Tree metrics", defaults = "false")
public static final String IGNITE_BPLUS_TREE_DISABLE_METRICS = "IGNITE_BPLUS_TREE_DISABLE_METRICS";

/**
* Amount of memory reserved in the heap at node start, which can be dropped to increase the chances of success when
* handling OutOfMemoryError.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -54,11 +54,15 @@

import static org.apache.ignite.cluster.ClusterState.INACTIVE;
import static org.apache.ignite.failure.FailureType.CRITICAL_ERROR;
import static org.apache.ignite.internal.processors.metric.impl.MetricUtils.metricName;

/**
* Sorted index implementation.
*/
public class InlineIndexImpl extends AbstractIndex implements InlineIndex {
/** */
public static final String INDEX_METRIC_PREFIX = "index";

/** Unique ID. */
private final UUID id = UUID.randomUUID();

Expand Down Expand Up @@ -222,7 +226,7 @@ public InlineIndexImpl(GridCacheContext<?, ?> cctx, SortedIndexDefinition def, I

/** */
private boolean isSingleRowLookup(IndexRow lower, IndexRow upper) throws IgniteCheckedException {
return !cctx.mvccEnabled() && def.primary() && lower != null && isFullSchemaSearch(lower) && checkRowsTheSame(lower, upper);
return def.primary() && lower != null && isFullSchemaSearch(lower) && checkRowsTheSame(lower, upper);
}

/**
Expand Down Expand Up @@ -475,8 +479,6 @@ private InlineTreeFilterClosure filterClosure(IndexQueryContext qryCtx) {

MvccSnapshot v = qryCtx.mvccSnapshot();

assert !cctx.mvccEnabled() || v != null;

if (cacheFilter == null && v == null && qryCtx.rowFilter() == null)
return null;

Expand Down Expand Up @@ -555,6 +557,7 @@ private void destroy0(boolean softDel) throws IgniteCheckedException {
}

cctx.kernalContext().metric().remove(stats.metricRegistryName());
cctx.kernalContext().metric().remove(metricName(INDEX_METRIC_PREFIX, def.idxName().fullName()));

if (cctx.group().persistenceEnabled() ||
cctx.shared().kernalContext().state().clusterState().state() != INACTIVE) {
Expand Down
Loading

0 comments on commit 32e0b55

Please sign in to comment.