From 36abce13c84abc7b01e8caa653cc2d32838da8d4 Mon Sep 17 00:00:00 2001 From: Giuseppe Villani Date: Fri, 21 Jul 2023 17:30:27 +0200 Subject: [PATCH] [V8keNXu0] error details while using apoc.cypher.run* procedures (#3671) --- .../apoc.cypher/apoc.cypher.runFile.adoc | 3 + .../apoc.cypher/apoc.cypher.runFiles.adoc | 3 + .../apoc.cypher.runSchemaFile.adoc | 3 + .../apoc.cypher.runSchemaFiles.adoc | 3 + .../usage/config/apoc.cypher.runExtended.adoc | 12 + .../main/java/apoc/cypher/CypherExtended.java | 58 +++- .../java/apoc/cypher/CypherExtendedTest.java | 288 ++++++++++++++++-- .../src/test/resources/create_delete.cypher | 2 +- .../wrong_call_in_transactions.cypher | 6 + .../wrong_schema_statements_runtime.cypher | 1 + .../test/resources/wrong_statements.cypher | 1 + .../resources/wrong_statements_runtime.cypher | 2 + 12 files changed, 348 insertions(+), 34 deletions(-) create mode 100644 docs/asciidoc/modules/ROOT/partials/usage/config/apoc.cypher.runExtended.adoc create mode 100644 extended/src/test/resources/wrong_call_in_transactions.cypher create mode 100644 extended/src/test/resources/wrong_schema_statements_runtime.cypher create mode 100644 extended/src/test/resources/wrong_statements.cypher create mode 100644 extended/src/test/resources/wrong_statements_runtime.cypher diff --git a/docs/asciidoc/modules/ROOT/pages/overview/apoc.cypher/apoc.cypher.runFile.adoc b/docs/asciidoc/modules/ROOT/pages/overview/apoc.cypher/apoc.cypher.runFile.adoc index 829c1f88b1..f1c6a0750d 100644 --- a/docs/asciidoc/modules/ROOT/pages/overview/apoc.cypher/apoc.cypher.runFile.adoc +++ b/docs/asciidoc/modules/ROOT/pages/overview/apoc.cypher/apoc.cypher.runFile.adoc @@ -25,6 +25,9 @@ apoc.cypher.runFile(file :: STRING?, config = {} :: MAP?) :: (row :: INTEGER?, r |config|MAP?|{} |=== +== Config parameters +include::partial$usage/config/apoc.cypher.runExtended.adoc[] + == Output parameters [.procedures, opts=header] |=== diff --git a/docs/asciidoc/modules/ROOT/pages/overview/apoc.cypher/apoc.cypher.runFiles.adoc b/docs/asciidoc/modules/ROOT/pages/overview/apoc.cypher/apoc.cypher.runFiles.adoc index 050702cd79..5fe89fd18e 100644 --- a/docs/asciidoc/modules/ROOT/pages/overview/apoc.cypher/apoc.cypher.runFiles.adoc +++ b/docs/asciidoc/modules/ROOT/pages/overview/apoc.cypher/apoc.cypher.runFiles.adoc @@ -25,6 +25,9 @@ apoc.cypher.runFiles(file :: LIST? OF STRING?, config = {} :: MAP?) :: (row :: I |config|MAP?|{} |=== +== Config parameters +include::partial$usage/config/apoc.cypher.runExtended.adoc[] + == Output parameters [.procedures, opts=header] |=== diff --git a/docs/asciidoc/modules/ROOT/pages/overview/apoc.cypher/apoc.cypher.runSchemaFile.adoc b/docs/asciidoc/modules/ROOT/pages/overview/apoc.cypher/apoc.cypher.runSchemaFile.adoc index 7c6595321e..59b4a9c5b3 100644 --- a/docs/asciidoc/modules/ROOT/pages/overview/apoc.cypher/apoc.cypher.runSchemaFile.adoc +++ b/docs/asciidoc/modules/ROOT/pages/overview/apoc.cypher/apoc.cypher.runSchemaFile.adoc @@ -25,6 +25,9 @@ apoc.cypher.runSchemaFile(file :: STRING?, config = {} :: MAP?) :: (row :: INTEG |config|MAP?|{} |=== +== Config parameters +include::partial$usage/config/apoc.cypher.runExtended.adoc[] + == Output parameters [.procedures, opts=header] |=== diff --git a/docs/asciidoc/modules/ROOT/pages/overview/apoc.cypher/apoc.cypher.runSchemaFiles.adoc b/docs/asciidoc/modules/ROOT/pages/overview/apoc.cypher/apoc.cypher.runSchemaFiles.adoc index 6a0e3122d2..b2f156b21a 100644 --- a/docs/asciidoc/modules/ROOT/pages/overview/apoc.cypher/apoc.cypher.runSchemaFiles.adoc +++ b/docs/asciidoc/modules/ROOT/pages/overview/apoc.cypher/apoc.cypher.runSchemaFiles.adoc @@ -25,6 +25,9 @@ apoc.cypher.runSchemaFiles(file :: LIST? OF STRING?, config = {} :: MAP?) :: (ro |config|MAP?|{} |=== +== Config parameters +include::partial$usage/config/apoc.cypher.runExtended.adoc[] + == Output parameters [.procedures, opts=header] |=== diff --git a/docs/asciidoc/modules/ROOT/partials/usage/config/apoc.cypher.runExtended.adoc b/docs/asciidoc/modules/ROOT/partials/usage/config/apoc.cypher.runExtended.adoc new file mode 100644 index 0000000000..5d72591e4e --- /dev/null +++ b/docs/asciidoc/modules/ROOT/partials/usage/config/apoc.cypher.runExtended.adoc @@ -0,0 +1,12 @@ +The procedure support the following config parameters: + +.Config parameters +[opts=header, cols="1,1,1,5"] +|=== +| name | type | default | description +| reportError | boolean | false | Returns a entry row with key `error` and value the error occurred, if any. +| statistics | boolean | true | Returns an additional row with the query statistics, leveraging the `org.neo4j.graphdb.QueryStatistics` api +| timeout | long | 10 | The single query timeout (in seconds) +| queueCapacity | long | 100 | The capacity of the `java.util.concurrent.BlockingQueue` used to aggregate the results. +| parameters | Map | Empty map | Optional parameter map to be used with the `apoc.schema.runFile` and `apoc.schema.runFiles` procedures. +|=== \ No newline at end of file diff --git a/extended/src/main/java/apoc/cypher/CypherExtended.java b/extended/src/main/java/apoc/cypher/CypherExtended.java index aca0866e0f..84c5b80c65 100644 --- a/extended/src/main/java/apoc/cypher/CypherExtended.java +++ b/extended/src/main/java/apoc/cypher/CypherExtended.java @@ -6,6 +6,7 @@ import apoc.util.CompressionAlgo; import apoc.util.FileUtils; import apoc.util.QueueBasedSpliterator; +import apoc.util.QueueUtil; import apoc.util.Util; import apoc.util.collection.Iterators; import org.apache.commons.lang3.StringUtils; @@ -93,13 +94,14 @@ public Stream runFiles(@Name("file") List fileNames, @Name(va // This runs the files sequentially private Stream runFiles(List fileNames, Map config, Map parameters, boolean schemaOperation) { + boolean reportError = Util.toBoolean(config.get("reportError")); boolean addStatistics = Util.toBoolean(config.getOrDefault("statistics",true)); int timeout = Util.toInteger(config.getOrDefault("timeout",10)); int queueCapacity = Util.toInteger(config.getOrDefault("queueCapacity",100)); var result = fileNames.stream().flatMap(fileName -> { final Reader reader = readerForFile(fileName); final Scanner scanner = createScannerFor(reader); - return runManyStatements(scanner, parameters, schemaOperation, addStatistics, timeout, queueCapacity) + return runManyStatements(scanner, parameters, schemaOperation, addStatistics, timeout, queueCapacity, reportError, fileName) .onClose(() -> Util.close(scanner, (e) -> log.info("Cannot close the scanner for file " + fileName + " because the following exception", e))); }); @@ -120,12 +122,12 @@ public Stream runSchemaFiles(@Name("file") List fileNames, @N return runFiles(fileNames, config, parameters, schemaOperation); } - private Stream runManyStatements(Scanner scanner, Map params, boolean schemaOperation, boolean addStatistics, int timeout, int queueCapacity) { + private Stream runManyStatements(Scanner scanner, Map params, boolean schemaOperation, boolean addStatistics, int timeout, int queueCapacity, boolean reportError, String fileName) { BlockingQueue queue = runInSeparateThreadAndSendTombstone(queueCapacity, internalQueue -> { if (schemaOperation) { - runSchemaStatementsInTx(scanner, internalQueue, params, addStatistics, timeout); + runSchemaStatementsInTx(scanner, internalQueue, params, addStatistics, timeout, reportError, fileName); } else { - runDataStatementsInTx(scanner, internalQueue, params, addStatistics, timeout); + runDataStatementsInTx(scanner, internalQueue, params, addStatistics, timeout, reportError, fileName); } }, RowResult.TOMBSTONE); return StreamSupport.stream(new QueueBasedSpliterator<>(queue, RowResult.TOMBSTONE, terminationGuard, Integer.MAX_VALUE), false); @@ -155,18 +157,34 @@ private BlockingQueue runInSeparateThreadAndSendTombstone(int queueCapaci return queue; } - private void runDataStatementsInTx(Scanner scanner, BlockingQueue queue, Map params, boolean addStatistics, long timeout) { + private void runDataStatementsInTx(Scanner scanner, BlockingQueue queue, Map params, boolean addStatistics, long timeout, boolean reportError, String fileName) { while (scanner.hasNext()) { String stmt = removeShellControlCommands(scanner.next()); if (stmt.trim().isEmpty()) continue; - if (!isSchemaOperation(stmt)) { + boolean schemaOperation; + try { + schemaOperation = isSchemaOperation(stmt); + } catch (Exception e) { + getError(queue, reportError, e, fileName); + return; + } + + if (!schemaOperation) { if (isPeriodicOperation(stmt)) { - Util.inThread(pools , () -> db.executeTransactionally(stmt, params, result -> consumeResult(result, queue, addStatistics, timeout))); + Util.inThread(pools , () -> { + try { + return db.executeTransactionally(stmt, params, result -> consumeResult(result, queue, addStatistics, timeout)); + } catch (Exception e) { + return getError(queue, reportError, e, fileName); + } + }); } else { Util.inTx(db, pools, threadTx -> { try (Result result = threadTx.execute(stmt, params)) { return consumeResult(result, queue, addStatistics, timeout); + } catch (Exception e) { + return getError(queue, reportError, e, fileName); } }); } @@ -174,20 +192,42 @@ private void runDataStatementsInTx(Scanner scanner, BlockingQueue que } } + private Object getError(BlockingQueue queue, boolean reportError, Exception e, String fileName) { + if (reportError) { + String error = String.format("Error in `%s`:\n%s ", + fileName, e.getMessage() + ); + + RowResult result = new RowResult(-1, Map.of("error", error)); + QueueUtil.put(queue, result, 10); + return null; + } + throw new RuntimeException(e); + } + private Scanner createScannerFor(Reader reader) { Scanner scanner = new Scanner(reader); scanner.useDelimiter(";\r?\n"); return scanner; } - private void runSchemaStatementsInTx(Scanner scanner, BlockingQueue queue, Map params, boolean addStatistics, long timeout) { + private void runSchemaStatementsInTx(Scanner scanner, BlockingQueue queue, Map params, boolean addStatistics, long timeout, boolean reportError, String fileName) { while (scanner.hasNext()) { String stmt = removeShellControlCommands(scanner.next()); if (stmt.trim().isEmpty()) continue; - if (isSchemaOperation(stmt)) { + boolean schemaOperation; + try { + schemaOperation = isSchemaOperation(stmt); + } catch (Exception e) { + getError(queue, reportError, e, fileName); + return; + } + if (schemaOperation) { Util.inTx(db, pools, txInThread -> { try (Result result = txInThread.execute(stmt, params)) { return consumeResult(result, queue, addStatistics, timeout); + } catch (Exception e) { + return getError(queue, reportError, e, fileName); } }); } diff --git a/extended/src/test/java/apoc/cypher/CypherExtendedTest.java b/extended/src/test/java/apoc/cypher/CypherExtendedTest.java index a9c78edc08..729263cc6c 100644 --- a/extended/src/test/java/apoc/cypher/CypherExtendedTest.java +++ b/extended/src/test/java/apoc/cypher/CypherExtendedTest.java @@ -8,6 +8,8 @@ import org.junit.*; import org.junit.rules.ExpectedException; import org.neo4j.configuration.GraphDatabaseSettings; +import org.neo4j.graphdb.QueryExecutionException; +import org.neo4j.graphdb.Result; import org.neo4j.graphdb.Transaction; import org.neo4j.graphdb.schema.ConstraintDefinition; import org.neo4j.graphdb.schema.IndexDefinition; @@ -34,6 +36,7 @@ import static apoc.util.TestUtil.testCallEmpty; import static apoc.util.TestUtil.testResult; import static apoc.util.Util.map; +import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.hasEntry; import static org.junit.Assert.*; import static org.neo4j.driver.internal.util.Iterables.count; @@ -66,6 +69,10 @@ public static void tearDown() { @After public void clearDB() { db.executeTransactionally("MATCH (n) DETACH DELETE n"); + clearSchema(); + } + + private void clearSchema() { try (Transaction tx = db.beginTx()) { tx.schema().getConstraints().forEach(ConstraintDefinition::drop); tx.schema().getIndexes().forEach(IndexDefinition::drop); @@ -138,17 +145,8 @@ private long toLong(Object value) { public void testRunFile() throws Exception { testResult(db, "CALL apoc.cypher.runFile('create_delete.cypher')", r -> { - Map row = r.next(); - assertEquals(-1L, row.get("row")); - Map result = (Map) row.get("result"); - assertEquals(1L, toLong(result.get("nodesCreated"))); - assertEquals(1L, toLong(result.get("labelsAdded"))); - assertEquals(1L, toLong(result.get("propertiesSet"))); - row = r.next(); - result = (Map) row.get("result"); - assertEquals(-1L, row.get("row")); - assertEquals(1L, toLong(result.get("nodesDeleted"))); - assertEquals(false, r.hasNext()); + assertCreateDeleteFile(r); + assertFalse(r.hasNext()); }); } @@ -225,7 +223,7 @@ public void shouldNotFailWithTransactionErrorWithMapParallel2() { testResult(db, "MATCH (p:Page) WITH collect(p) as pages\n" + "CALL apoc.cypher.mapParallel2(\"MATCH (_)-[:Link]->(p1)<-[:Link]-(p2)\n" + "RETURN p2.title as title\", {}, pages, 1) yield value\n" + - "RETURN value.title limit 5", + "RETURN value.title limit 5", r -> assertEquals(5, Iterators.count(r))); } @@ -287,6 +285,225 @@ public void testRunFilesMultiple() throws Exception { }); } + @Test + public void testParallelWithFailingStatement() { + String call = """ + MATCH (t:Test {a: ""}) WITH collect(t) as nodes + call apoc.cypher.parallel("RETURN date(n.a)", {n: nodes}, "n") + yield value return value"""; + testFailingParallelCommon(call); + } + + @Test + public void testParallel2WithFailingStatement() { + String call = """ + MATCH (t:Test {a: ""}) WITH collect(t) as nodes + call apoc.cypher.parallel2("RETURN date(n.a)", {n: nodes}, "n") + yield value return value"""; + testFailingParallelCommon(call); + } + + @Test + public void testMapParallelWithFailingStatement() { + String call = """ + MATCH (t:Test) WITH collect(t) as nodes + CALL apoc.cypher.mapParallel("MATCH (_) RETURN date(_.a)", {}, nodes) + YIELD value RETURN value"""; + testFailingParallelCommon(call); + } + + private static void testFailingParallelCommon(String query) { + db.executeTransactionally("CREATE (t:Test {a: ''});"); + + QueryExecutionException error = assertThrows(QueryExecutionException.class, + () -> testCall(db, query, (r) -> {}) + ); + + assertThat(error.getMessage(), containsString("Text cannot be parsed to a Date")); + } + + @Test + public void testRunFileWithFailingStatement() { + String failingFile = "wrong_statements_runtime.cypher"; + String cypherError = "already exists with label `Fail` and property `foo` = 1"; + testRunFailingFileCommon(failingFile, cypherError); + } + + @Test + public void testRunFileWithFailingPeriodicStatement() { + String failingFile = "wrong_call_in_transactions.cypher"; + String cypherError = "already exists with label `Fail` and property `foo` = 1"; + testRunFailingFileCommon(failingFile, cypherError); + } + + @Test + public void testRunFileWithFailingExplain() { + // error during CypherExtended.isSchemaOperation method + String failingFile = "wrong_statements.cypher"; + String cypherError = "Invalid input ')': expected"; + testRunFailingFileCommon(failingFile, cypherError); + } + + private void testRunFailingFileCommon(String failingFile, String cypherError) { + db.executeTransactionally("CREATE CONSTRAINT FOR (n:Fail) REQUIRE n.foo IS UNIQUE"); + db.executeTransactionally("CREATE (n:Fail {foo: 1})"); + + // the failed file is skipped + testCallEmpty(db, "CALL apoc.cypher.runFile($file)", Map.of("file", failingFile)); + + // the failed file produces an "error" row + testCall(db, "CALL apoc.cypher.runFile($file, {reportError: true})", Map.of("file", failingFile), r -> { + assertErrorResult(cypherError, r); + }); + } + + @Test + public void testRunSchemaFileWithFailingStatement() { + db.executeTransactionally("CREATE CONSTRAINT uniqueConstraint FOR (n:Person) REQUIRE n.name IS UNIQUE"); + String failingFile = "constraints.cypher"; + + // the failed file is skipped + testCallEmpty(db, "CALL apoc.cypher.runSchemaFile($file)", + Map.of("file", failingFile)); + + // the failed file produces an "error" row + testCall(db, "CALL apoc.cypher.runSchemaFile($file, {reportError: true})", + Map.of("file", failingFile), + row -> { + String cypherError = "Error in `constraints.cypher`:\n" + + "An equivalent constraint already exists"; + assertErrorResult(cypherError, row); + }); + } + + @Test + public void testRunSchemaFileWithFailingExplain() { + // error during CypherExtended.isSchemaOperation method + String failingFile = "wrong_schema_statements_runtime.cypher"; + + // the failed file is skipped + testCallEmpty(db, "CALL apoc.cypher.runSchemaFile($file)", + Map.of("file", failingFile)); + + clearSchema(); + + // the failed file produces an "error" row + testCall(db, "CALL apoc.cypher.runSchemaFile($file, {reportError: true})", + Map.of("file", failingFile), + row -> { + String cypherError = "Error in `wrong_schema_statements_runtime.cypher`:\n" + + "Variable `bar` not defined"; + assertErrorResult(cypherError, row); + }); + } + + @Test + public void testRunSchemaFilesWithFailingStatement() { + String failingFile = "constraints.cypher"; + String cypherError = "Error in `constraints.cypher`:\n" + + "An equivalent constraint already exists"; + + testRunFailingSchemaFilesCommon(failingFile, cypherError); + } + + @Test + public void testRunSchemaFilesWithFailingExplain() { + // error during CypherExtended.isSchemaOperation method + String failingFile = "wrong_schema_statements_runtime.cypher"; + String cypherError = "Error in `wrong_schema_statements_runtime.cypher`:\n" + + "Variable `bar` not defined"; + + testRunFailingSchemaFilesCommon(failingFile, cypherError); + } + + private void testRunFailingSchemaFilesCommon(String failingFile, String cypherError) { + List files = List.of("constraints.cypher", failingFile, "schema.cypher"); + + testResult(db, "CALL apoc.cypher.runSchemaFiles($files)", + Map.of("files", files), + r -> { + assertConstraintsCypherFile(r); + assertSchemaCypherFile(r); + assertFalse(r.hasNext()); + }); + + clearSchema(); + + // the failed file produces an "error" row + testResult(db, "CALL apoc.cypher.runSchemaFiles($files, {reportError: true})", + Map.of("files", files), + r -> { + assertConstraintsCypherFile(r); + + Map row = r.next(); + assertErrorResult(cypherError, row); + + assertSchemaCypherFile(r); + assertFalse(r.hasNext()); + }); + } + + private void assertConstraintsCypherFile(Result r) { + Map row = r.next(); + Map result = (Map) row.get("result"); + assertEquals(1L, toLong(result.get("constraintsAdded"))); + row = r.next(); + result = (Map) row.get("result"); + assertEquals(1L, toLong(result.get("constraintsAdded"))); + } + + + @Test + public void testRunFilesWithFailingStatement() { + String failingFile = "wrong_statements_runtime.cypher"; + String cypherError = "already exists with label `Fail` and property `foo` = 1"; + + testRunFailingFilesCommon(failingFile, cypherError); + } + + @Test + public void testRunFilesWithFailingPeriodicStatement() { + String failingFile = "wrong_call_in_transactions.cypher"; + String cypherError = "already exists with label `Fail` and property `foo` = 1"; + testRunFailingFilesCommon(failingFile, cypherError); + } + + @Test + public void testRunFilesWithFailingExplain() { + // error during CypherExtended.isSchemaOperation method + String failingFile = "wrong_statements.cypher"; + String cypherError = "Invalid input ')': expected"; + testRunFailingFilesCommon(failingFile, cypherError); + } + + private void testRunFailingFilesCommon(String failingFile, String cypherError) { + db.executeTransactionally("CREATE CONSTRAINT FOR (n:Fail) REQUIRE n.foo IS UNIQUE"); + db.executeTransactionally("CREATE (n:Fail {foo: 1})"); + + List files = List.of("create_delete.cypher", failingFile, "create_delete.cypher"); + + // the failed file is skipped + testResult(db, "CALL apoc.cypher.runFiles($files)", + Map.of("files", files), + r -> { + assertCreateDeleteFile(r); + assertCreateDeleteFile(r); + assertFalse(r.hasNext()); + }); + + // the failed file produces an "error" row + testResult(db, "CALL apoc.cypher.runFiles($files, {reportError: true})", + Map.of("files", files), + r -> { + assertCreateDeleteFile(r); + + Map next = r.next(); + assertErrorResult(cypherError, next); + assertCreateDeleteFile(r); + assertFalse(r.hasNext()); + }); + } + @Test public void testSchemaRunFile() { final int expectedBefore; @@ -296,18 +513,7 @@ public void testSchemaRunFile() { testResult(db, "CALL apoc.cypher.runSchemaFile('schema.cypher')", r -> { - Map row = r.next(); - Map result = (Map) row.get("result"); - assertEquals(1L, toLong(result.get("indexesAdded"))); - row = r.next(); - result = (Map) row.get("result"); - assertEquals(1L, toLong(result.get("indexesAdded"))); - row = r.next(); - result = (Map) row.get("result"); - assertEquals(1L, toLong(result.get("indexesAdded"))); - row = r.next(); - result = (Map) row.get("result"); - assertEquals(1L, toLong(result.get("indexesAdded"))); + assertSchemaCypherFile(r); assertFalse(r.hasNext()); }); @@ -316,6 +522,21 @@ public void testSchemaRunFile() { } } + private void assertSchemaCypherFile(Result r) { + Map row = r.next(); + Map result = (Map) row.get("result"); + assertEquals(1L, toLong(result.get("indexesAdded"))); + row = r.next(); + result = (Map) row.get("result"); + assertEquals(1L, toLong(result.get("indexesAdded"))); + row = r.next(); + result = (Map) row.get("result"); + assertEquals(1L, toLong(result.get("indexesAdded"))); + row = r.next(); + result = (Map) row.get("result"); + assertEquals(1L, toLong(result.get("indexesAdded"))); + } + @Test public void testSchemaRunFiles() { schemaAssertions(Collections.emptyList(), Collections.emptyList()); @@ -376,4 +597,23 @@ public void lengthyRunManyShouldTerminate() { }); } + + private static void assertErrorResult(String cypherError, Map r) { + assertEquals(-1L, r.get("row")); + Map result = (Map) r.get("result"); + assertThat(result.get("error"), containsString(cypherError)); + } + + private void assertCreateDeleteFile(Result r) { + Map row = r.next(); + assertEquals(-1L, row.get("row")); + Map result = (Map) row.get("result"); + assertEquals(1L, toLong(result.get("nodesCreated"))); + assertEquals(1L, toLong(result.get("labelsAdded"))); + assertEquals(1L, toLong(result.get("propertiesSet"))); + row = r.next(); + result = (Map) row.get("result"); + assertEquals(-1L, row.get("row")); + assertEquals(1L, toLong(result.get("nodesDeleted"))); + } } diff --git a/extended/src/test/resources/create_delete.cypher b/extended/src/test/resources/create_delete.cypher index b81493c034..135ae5953e 100644 --- a/extended/src/test/resources/create_delete.cypher +++ b/extended/src/test/resources/create_delete.cypher @@ -1,4 +1,4 @@ CREATE (n:Node {id:1}); -MATCH (n) +MATCH (n:Node) DELETE n; diff --git a/extended/src/test/resources/wrong_call_in_transactions.cypher b/extended/src/test/resources/wrong_call_in_transactions.cypher new file mode 100644 index 0000000000..8808469af7 --- /dev/null +++ b/extended/src/test/resources/wrong_call_in_transactions.cypher @@ -0,0 +1,6 @@ +MATCH (n:Fail) +CALL { + WITH n + CREATE (:Fail {foo: 1}) +} IN TRANSACTIONS OF 1 ROW; + diff --git a/extended/src/test/resources/wrong_schema_statements_runtime.cypher b/extended/src/test/resources/wrong_schema_statements_runtime.cypher new file mode 100644 index 0000000000..dbdf6100b1 --- /dev/null +++ b/extended/src/test/resources/wrong_schema_statements_runtime.cypher @@ -0,0 +1 @@ +CREATE INDEX CustomerIndex1 FOR (foo:Something) ON (bar.name); diff --git a/extended/src/test/resources/wrong_statements.cypher b/extended/src/test/resources/wrong_statements.cypher new file mode 100644 index 0000000000..7cb0b92dd8 --- /dev/null +++ b/extended/src/test/resources/wrong_statements.cypher @@ -0,0 +1 @@ +CREATE (n:Person{id:1); \ No newline at end of file diff --git a/extended/src/test/resources/wrong_statements_runtime.cypher b/extended/src/test/resources/wrong_statements_runtime.cypher new file mode 100644 index 0000000000..7a8b3ddfd4 --- /dev/null +++ b/extended/src/test/resources/wrong_statements_runtime.cypher @@ -0,0 +1,2 @@ +CREATE (n:Fail {foo: 1}); +