diff --git a/Advanced/CsvStreaming/Readme.md b/Advanced/CsvStreaming/Readme.md
deleted file mode 100644
index ef12dd31..00000000
--- a/Advanced/CsvStreaming/Readme.md
+++ /dev/null
@@ -1,7 +0,0 @@
-## Streaming of large CSV documents
-
-Streaming in Templater is done by multiple calls to process API.
-This allows to Templater to flush the content of populated stream and reuse memory in next call to process API.
-
-Streaming can be done only up to row without tags. This means that first non-streaming tags should be processed (if there are any)
-and then streaming tags can be processed which will perform flushing.
\ No newline at end of file
diff --git a/Advanced/README.md b/Advanced/README.md
index 2b392b6c..0cd69b5c 100644
--- a/Advanced/README.md
+++ b/Advanced/README.md
@@ -44,11 +44,12 @@ Consuming embedded CSV or Excel table via Power Query (Requires Excel 2010+)
[template](PowerQuery/template/PowerQuery.xlsx?raw=true) - [result](PowerQuery/result.xlsx?raw=true)
-### [CSV streaming](CsvStreaming/Readme.md)
+### [CSV streaming](Streaming/Readme.md)
-Stream CSV while processing to support huge exports
+Stream CSV/XML while processing to support huge exports
-[template](CsvStreaming/template/input.csv) - [result](CsvStreaming/result.csv)
+[csv template](Streaming/template/input.csv) - [result](Streaming/result.csv)
+[xml template](Streaming/template/input.xml) - [result](Streaming/result.xml)
### [Various JSON examples](TemplaterServer/Readme.md)
diff --git a/Advanced/Streaming/Readme.md b/Advanced/Streaming/Readme.md
new file mode 100644
index 00000000..cf1f4946
--- /dev/null
+++ b/Advanced/Streaming/Readme.md
@@ -0,0 +1,9 @@
+## Streaming of large documents
+
+Streaming in Templater is supported out of the box if streaming type is used (ResultSet/Iterator/Enumerator).
+Alternatively streaming can be simulated manually by multiple calls to process API.
+
+Both methods allows Templater to flush the content of populated stream and reuse memory in next call to process API.
+
+Streaming can be done only up to row without tags. This means that first non-streaming tags should be processed (if there are any)
+and then streaming tags can be processed which will perform flushing.
\ No newline at end of file
diff --git a/Advanced/CsvStreaming/CsvStreaming.csproj b/Advanced/Streaming/Streaming.csproj
similarity index 91%
rename from Advanced/CsvStreaming/CsvStreaming.csproj
rename to Advanced/Streaming/Streaming.csproj
index 305735c7..1920dd92 100644
--- a/Advanced/CsvStreaming/CsvStreaming.csproj
+++ b/Advanced/Streaming/Streaming.csproj
@@ -8,8 +8,8 @@
{5BB2AABB-A28F-404F-8C37-DBE122E893F5}ExeProperties
- CsvStreaming
- CsvStreaming
+ Streaming
+ Streamingv4.0Client512
@@ -39,7 +39,7 @@
..\..\packages\DotNetZip.1.13.0\lib\net40\DotNetZip.dllTrue
-
+ ..\..\packages\Templater.7.0.0\lib\Net40\NGS.Templater.dllFalse
@@ -65,6 +65,11 @@
Always
+
+
+ Always
+
+
diff --git a/Advanced/CsvStreaming/packages.config b/Advanced/Streaming/packages.config
similarity index 100%
rename from Advanced/CsvStreaming/packages.config
rename to Advanced/Streaming/packages.config
diff --git a/Advanced/CsvStreaming/pom.xml b/Advanced/Streaming/pom.xml
similarity index 94%
rename from Advanced/CsvStreaming/pom.xml
rename to Advanced/Streaming/pom.xml
index f86e46f6..a980f2b1 100644
--- a/Advanced/CsvStreaming/pom.xml
+++ b/Advanced/Streaming/pom.xml
@@ -2,10 +2,10 @@
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
4.0.0hr.ngs.templater.example
- csv-streaming-example
+ streaming-examplejar7.0.0
- CSV streaming
+ Streaminghttps://github.com/ngs-doo/TemplaterExamples
diff --git a/Advanced/CsvStreaming/result.csv b/Advanced/Streaming/result.csv
similarity index 100%
rename from Advanced/CsvStreaming/result.csv
rename to Advanced/Streaming/result.csv
diff --git a/Advanced/Streaming/result.xml b/Advanced/Streaming/result.xml
new file mode 100644
index 00000000..aaa9d0ef
--- /dev/null
+++ b/Advanced/Streaming/result.xml
@@ -0,0 +1,165 @@
+
+
+
+
+ 260
+ 27. 07. 2019.
+
+ reference0
+ branch0
+
+
+
+ 260
+ 27. 07. 2019.
+
+ reference1
+ branch1
+
+
+
+ 260
+ 27. 07. 2019.
+
+ reference2
+ branch2
+
+ -
+
+ 260
+ 27. 07. 2019.
+
+ reference3
+ branch3
+
+ ...
+
+ 261
+ 27. 07. 2019.
+
+ reference4
+ branch4
+
+ IMPORTANT
+
+ 261
+ 27. 07. 2019.
+
+ reference5
+ branch5
+
+ REMINDER
+
+ 261
+ 27. 07. 2019.
+
+ reference6
+ branch6
+
+ something to look "into later
+
+ 261
+ 27. 07. 2019.
+
+ reference7
+ branch7
+
+ special" char,
+
+ 261
+ 27. 07. 2019.
+
+ reference8
+ branch8
+
+
+
+ 261
+ 27. 07. 2019.
+
+ reference9
+ branch9
+
+
+
+ 261
+ 27. 07. 2019.
+
+ reference10
+ branch10
+
+ -
+
+ 261
+ 27. 07. 2019.
+
+ reference11
+ branch11
+
+ ...
+
+ 262
+ 27. 07. 2019.
+
+ reference12
+ branch12
+
+ IMPORTANT
+
+ 262
+ 27. 07. 2019.
+
+ reference13
+ branch13
+
+ REMINDER
+
+ 262
+ 27. 07. 2019.
+
+ reference14
+ branch14
+
+ something to look "into later
+
+ 262
+ 27. 07. 2019.
+
+ reference15
+ branch15
+
+ special" char,
+
+ 262
+ 27. 07. 2019.
+
+ reference16
+ branch16
+
+
+
+ 262
+ 27. 07. 2019.
+
+ reference17
+ branch17
+
+
+
+ 262
+ 27. 07. 2019.
+
+ reference18
+ branch18
+
+ -
+
+ 262
+ 27. 07. 2019.
+
+ reference19
+ branch19
+
+ ...
+
+
\ No newline at end of file
diff --git a/Advanced/CsvStreaming/src/Program.cs b/Advanced/Streaming/src/Program.cs
similarity index 50%
rename from Advanced/CsvStreaming/src/Program.cs
rename to Advanced/Streaming/src/Program.cs
index fb5bcdad..284ff399 100644
--- a/Advanced/CsvStreaming/src/Program.cs
+++ b/Advanced/Streaming/src/Program.cs
@@ -1,4 +1,5 @@
using System;
+using System.Collections;
using System.Collections.Generic;
using System.Data;
using System.Diagnostics;
@@ -8,7 +9,7 @@
using Ionic.Zip;
using NGS.Templater;
-namespace CsvStreaming
+namespace Streaming
{
public class Program
{
@@ -45,7 +46,7 @@ struct StreamingRow
public string verifiedBy;
public DateTime verifiedOn;
- public StreamingRow(DataTableReader reader)
+ public StreamingRow(IDataReader reader)
{
id = reader.GetInt32(0);
amount = reader.GetDecimal(1);
@@ -59,6 +60,22 @@ public StreamingRow(DataTableReader reader)
verifiedBy = reader.IsDBNull(9) ? null : reader.GetString(9);
verifiedOn = reader.GetDateTime(10);
}
+
+ public class ReaderIterator : IEnumerator
+ {
+ private readonly IDataReader Reader;
+
+ public ReaderIterator(IDataReader reader)
+ {
+ this.Reader = reader;
+ }
+
+ public StreamingRow Current { get { return new StreamingRow(Reader); } }
+ object IEnumerator.Current { get { return Current; } }
+ public bool MoveNext() { return Reader.Read(); }
+ public void Reset() { }
+ public void Dispose() { }
+ }
}
public static void Main(string[] args)
@@ -97,42 +114,77 @@ public static void Main(string[] args)
startTimestamp.AddMinutes(i)
);
}
- var reader = table.CreateDataReader();
- var config = Configuration.Builder.Include(Quoter);
+ var reader1 = table.CreateDataReader();
+ var reader2 = table.CreateDataReader();
+ var reader3 = table.CreateDataReader();
+ var csvConfig = Configuration.Builder.Include(Quoter); //we need quoting as we are simulating CSV
+ var xmlConfig = Configuration.Builder; //we don't need quoting as XML is natively supported
//if we are using a culture which has comma as decimal separator, change the output to dot
//we could apply this always, but it adds a bit of overhead, so let's apply it conditionally
if (Thread.CurrentThread.CurrentCulture.NumberFormat.NumberDecimalSeparator.Contains(","))
- config.Include(NumberAsDot);
+ {
+ csvConfig.Include(NumberAsDot);
+ xmlConfig.Include(NumberAsDot);
+ }
+ csvConfig.Streaming(50000);//by default streaming is 16k, lets leave the default for xml
+ var csvFactory = csvConfig.Build();
+ var xmlFactory = xmlConfig.Build();
//for example purposes we will stream it a zip file
using (var zip = new ZipOutputStream("output.zip"))
{
- zip.PutNextEntry("output.csv");
- using (var doc = config.Build().Open(File.OpenRead("template/input.csv"), "csv", zip))
+ zip.PutNextEntry("manual.csv");
+ var sw = Stopwatch.StartNew();
+ ManualStreaming(reader1, csvFactory, zip);
+ Console.WriteLine("manual csv took: " + sw.ElapsedMilliseconds);
+ zip.PutNextEntry("automatic.csv");
+ sw = Stopwatch.StartNew();
+ AutomaticStreaming(reader2, csvFactory, "csv", zip);
+ Console.WriteLine("automatic csv took: " + sw.ElapsedMilliseconds);
+ zip.PutNextEntry("data.xml");
+ sw = Stopwatch.StartNew();
+ AutomaticStreaming(reader3, xmlFactory, "xml", zip);
+ Console.WriteLine("automatic xml took: " + sw.ElapsedMilliseconds);
+ }
+ Process.Start(new ProcessStartInfo("output.zip") { UseShellExecute = true });
+ }
+
+ private static void ManualStreaming(IDataReader reader, IDocumentFactory factory, ZipOutputStream zip)
+ {
+ using (var doc = factory.Open(File.OpenRead("template/input.csv"), "csv", zip))
+ {
+ //streaming processing assumes we have only a single collection, which means we first need to process all other tags
+ doc.Process(new { filter = new { date = "All", user = "All" } });
+ //to do a streaming processing we need to process collection in chunks
+ var chunk = new List(50000);
+ var hasData = reader.Read();
+ while (hasData)
{
- //streaming processing assumes we have only a single collection, which means we first need to process all other tags
- doc.Process(new { filter = new { date = "All", user = "All" } });
- //to do a streaming processing we need to process collection in chunks
- var chunk = new List(50000);
- var hasData = reader.Read();
- while (hasData)
+ //one way of doing streaming is first duplicating the template row (context)
+ doc.Templater.Resize(doc.Templater.Tags, 2);
+ //and then process that row with all known data
+ //this way we will have additional row to process (or remove) later
+ do
{
- //one way of doing streaming is first duplicating the template row (context)
- doc.Templater.Resize(doc.Templater.Tags, 2);
- //and then process that row with all known data
- //this way we will have additional row to process (or remove) later
- do
- {
- chunk.Add(new StreamingRow(reader));
- hasData = reader.Read();
- } while (chunk.Count < 50000 && hasData);
- doc.Process(new { data = chunk });
- chunk.Clear();
- }
- //remove remaining rows
- doc.Templater.Resize(doc.Templater.Tags, 0);
+ chunk.Add(new StreamingRow(reader));
+ hasData = reader.Read();
+ } while (chunk.Count < 50000 && hasData);
+ doc.Process(new { data = chunk });
+ chunk.Clear();
}
+ //remove remaining rows
+ doc.Templater.Resize(doc.Templater.Tags, 0);
+ }
+ }
+
+ private static void AutomaticStreaming(IDataReader reader, IDocumentFactory factory, string extension, ZipOutputStream zip)
+ {
+ using (var doc = factory.Open(File.OpenRead("template/input." + extension), extension, zip))
+ {
+ //we still want to make sure all non collection tags are processed first (or they are at the end of document)
+ doc.Process(new { filter = new { date = "All", user = "All" } });
+ //for streaming lets just pass enumerator for processing
+ doc.Process(new { data = new StreamingRow.ReaderIterator(reader) });
}
- Process.Start(new ProcessStartInfo("output.zip") { UseShellExecute = true });
}
}
}
diff --git a/Advanced/CsvStreaming/src/main/java/hr/ngs/templater/example/CsvStreamingExample.java b/Advanced/Streaming/src/main/java/hr/ngs/templater/example/StreamingExample.java
similarity index 51%
rename from Advanced/CsvStreaming/src/main/java/hr/ngs/templater/example/CsvStreamingExample.java
rename to Advanced/Streaming/src/main/java/hr/ngs/templater/example/StreamingExample.java
index 8743a696..b9099d11 100644
--- a/Advanced/CsvStreaming/src/main/java/hr/ngs/templater/example/CsvStreamingExample.java
+++ b/Advanced/Streaming/src/main/java/hr/ngs/templater/example/StreamingExample.java
@@ -1,6 +1,7 @@
package hr.ngs.templater.example;
import hr.ngs.templater.Configuration;
+import hr.ngs.templater.DocumentFactory;
import hr.ngs.templater.DocumentFactoryBuilder;
import hr.ngs.templater.TemplateDocument;
@@ -13,10 +14,11 @@
import java.time.OffsetDateTime;
import java.time.ZoneOffset;
import java.util.ArrayList;
+import java.util.Iterator;
import java.util.Locale;
import java.util.zip.*;
-public class CsvStreamingExample {
+public class StreamingExample {
static class Quoter implements DocumentFactoryBuilder.LowLevelReplacer {
@@ -58,7 +60,7 @@ public static class StreamingRow {
public String verifiedBy;
public Timestamp verifiedOn;
- public StreamingRow(ResultSet rs) throws SQLException {
+ public StreamingRow(ResultSet rs) throws SQLException {
id = rs.getInt(1);
amount = rs.getBigDecimal(2);
date = rs.getDate(3);
@@ -71,10 +73,35 @@ public StreamingRow(ResultSet rs) throws SQLException {
verifiedBy = rs.getString(10);
verifiedOn = rs.getTimestamp(11);
}
+
+ public static class RsIterator implements Iterator {
+ private final ResultSet rs;
+ private boolean hasNext;
+
+ public RsIterator(ResultSet rs) throws SQLException {
+ this.rs = rs;
+ this.hasNext = rs.next();
+ }
+
+ @Override
+ public boolean hasNext() {
+ return hasNext;
+ }
+
+ @Override
+ public StreamingRow next() {
+ try {
+ StreamingRow row = new StreamingRow(rs);
+ hasNext = rs.next();
+ return row;
+ } catch (SQLException e) {
+ throw new RuntimeException(e);
+ }
+ }
+ }
}
public static void main(final String[] args) throws Exception {
- InputStream templateStream = CsvStreamingExample.class.getResourceAsStream("/input.csv");
File tmp = File.createTempFile("output", ".zip");
Class.forName("org.hsqldb.jdbcDriver");
@@ -114,41 +141,89 @@ public static void main(final String[] args) throws Exception {
ins.setTimestamp(11, new java.sql.Timestamp(startTimestamp.plusMinutes(i / 1000).toInstant().toEpochMilli()));
ins.execute();
}
- ResultSet rs = conn.createStatement().executeQuery("SELECT * FROM csv_data");
- DocumentFactoryBuilder config = Configuration.builder().include(new Quoter());
+ ResultSet rs1 = conn.createStatement().executeQuery("SELECT * FROM csv_data");
+ ResultSet rs2 = conn.createStatement().executeQuery("SELECT * FROM csv_data");
+ ResultSet rs3 = conn.createStatement().executeQuery("SELECT * FROM csv_data");
+ DocumentFactoryBuilder csvConfig = Configuration.builder().include(new Quoter());
+ DocumentFactoryBuilder xmlConfig = Configuration.builder();
DecimalFormatSymbols dfs = new DecimalFormatSymbols(Locale.getDefault());
//if we are using a culture which has comma as decimal separator, change the output to dot
//we could apply this always, but it adds a bit of overhead, so let's apply it conditionally
if (dfs.getDecimalSeparator() == ',') {
- config.include(new NumberAsComma());
+ csvConfig.include(new NumberAsComma());
+ xmlConfig.include(new NumberAsComma());
}
+ csvConfig.streaming(50000);//by default streaming is 16k, lets leave the default for xml
+ DocumentFactory csvFactory = csvConfig.build();
+ DocumentFactory xmlFactory = xmlConfig.build();
//we can stream directly into a zipped stream/file
ZipOutputStream zos = new ZipOutputStream(new FileOutputStream(tmp));
- zos.putNextEntry(new ZipEntry("output.csv"));
- TemplateDocument doc = config.build().open(templateStream, "csv", zos);
- //streaming processing assumes we have only a single collection, which means we first need to process all other tags
- doc.process(new Object() { public Object filter = new Object() { public String date = "All"; public String user = "All"; }; });
- //to do a streaming processing we need to process collection in chunks
- ArrayList chunk = new ArrayList<>(50000);
- boolean hasData = rs.next();
- while (hasData) {
- //one way of doing streaming is first duplicating the template row (context)
- doc.templater().resize(doc.templater().tags(), 2);
- //and then process that row with all known data
- //this way we will have additional row to process (or remove) later
- do {
- chunk.add(new StreamingRow(rs));
- hasData = rs.next();
- } while (chunk.size() < 50000 && hasData);
- doc.process(new Object() { public ArrayList data = chunk; });
- chunk.clear();
- }
- //remove remaining rows
- doc.templater().resize(doc.templater().tags(), 0);
- doc.close();
+ zos.putNextEntry(new ZipEntry("manual.csv"));
+ long start = System.currentTimeMillis();
+ manualStreaming(rs1, csvFactory, zos);
+ System.out.println("manual csv took: " + (System.currentTimeMillis() - start));
+ zos.putNextEntry(new ZipEntry("automatic.csv"));
+ start = System.currentTimeMillis();
+ automaticStreaming(rs2, csvFactory, "csv", zos);
+ System.out.println("automatic csv took: " + (System.currentTimeMillis() - start));
+ zos.putNextEntry(new ZipEntry("data.xml"));
+ start = System.currentTimeMillis();
+ //by default XML will do many small operations so its much faster to wrap the stream with a buffer
+ BufferedOutputStream bos = new BufferedOutputStream(zos);
+ automaticStreaming(rs3, xmlFactory, "xml", bos);
+ bos.flush();
+ System.out.println("automatic xml took: " + (System.currentTimeMillis() - start));
conn.close();
- zos.closeEntry();
zos.close();
Desktop.getDesktop().open(tmp);
}
+
+ private static void manualStreaming(ResultSet rs, DocumentFactory factory, OutputStream os) throws SQLException {
+ InputStream templateStream = StreamingExample.class.getResourceAsStream("/input.csv");
+ try (TemplateDocument doc = factory.open(templateStream, "csv", os)) {
+ //streaming processing assumes we have only a single collection, which means we first need to process all other tags
+ doc.process(new Object() {
+ public Object filter = new Object() {
+ public String date = "All";
+ public String user = "All";
+ };
+ });
+ //to do a streaming processing we need to process collection in chunks
+ ArrayList chunk = new ArrayList<>(50000);
+ boolean hasData = rs.next();
+ while (hasData) {
+ //one way of doing streaming is first duplicating the template row (context)
+ doc.templater().resize(doc.templater().tags(), 2);
+ //and then process that row with all known data
+ //this way we will have additional row to process (or remove) later
+ do {
+ chunk.add(new StreamingRow(rs));
+ hasData = rs.next();
+ } while (chunk.size() < 50000 && hasData);
+ doc.process(new Object() {
+ public ArrayList data = chunk;
+ });
+ chunk.clear();
+ }
+ //remove remaining rows
+ doc.templater().resize(doc.templater().tags(), 0);
+ }
+ }
+
+ private static void automaticStreaming(ResultSet rs, DocumentFactory factory, String extension, OutputStream os) throws SQLException {
+ InputStream templateStream = StreamingExample.class.getResourceAsStream("/input." + extension);
+ try (TemplateDocument doc = factory.open(templateStream, extension, os)) {
+ //we still want to make sure all non collection tags are processed first (or they are at the end of document)
+ doc.process(new Object() {
+ public Object filter = new Object() {
+ public String date = "All";
+ public String user = "All";
+ };
+ });
+ //for streaming lets just pass iterator for processing
+ doc.process(new Object() {
+ public Iterator data = new StreamingRow.RsIterator(rs);
+ });
+ }
+ }
}
diff --git a/Advanced/CsvStreaming/template/input.csv b/Advanced/Streaming/template/input.csv
similarity index 100%
rename from Advanced/CsvStreaming/template/input.csv
rename to Advanced/Streaming/template/input.csv
diff --git a/Advanced/Streaming/template/input.xml b/Advanced/Streaming/template/input.xml
new file mode 100644
index 00000000..d8d477b9
--- /dev/null
+++ b/Advanced/Streaming/template/input.xml
@@ -0,0 +1,15 @@
+
+
+
+
+
+ [[data.amount]]
+ [[data.date]:format]
+
+ [[data.reference]]
+ [[data.branch]]
+
+ [[data.note]]
+
+
+
diff --git a/Advanced/TemplaterServer/Dockerfile b/Advanced/TemplaterServer/Dockerfile
index 5cabcfc9..8992961e 100644
--- a/Advanced/TemplaterServer/Dockerfile
+++ b/Advanced/TemplaterServer/Dockerfile
@@ -6,7 +6,7 @@ ENV TZ=Europe/Zagreb
RUN apt update && apt install openjdk-11-jre-headless libreoffice-common libreoffice-java-common libreoffice-writer libreoffice-calc wget -yq
-RUN wget -q https://github.com/ngs-doo/TemplaterExamples/releases/download/v6.1.0/templater-server.jar
+RUN wget -q https://github.com/ngs-doo/TemplaterExamples/releases/download/v7.0.0/templater-server.jar
COPY templater.lic .
diff --git a/Beginner/AndroidExample/app/build.gradle b/Beginner/AndroidExample/app/build.gradle
index 44483d38..4091b7cf 100644
--- a/Beginner/AndroidExample/app/build.gradle
+++ b/Beginner/AndroidExample/app/build.gradle
@@ -4,12 +4,16 @@ android {
compileSdkVersion 28
defaultConfig {
applicationId "hr.ngs.templater.example"
- minSdkVersion 19
+ minSdkVersion 26
targetSdkVersion 28
versionCode 1
versionName "1.0"
testInstrumentationRunner "android.support.test.runner.AndroidJUnitRunner"
}
+ compileOptions {
+ sourceCompatibility JavaVersion.VERSION_1_8
+ targetCompatibility JavaVersion.VERSION_1_8
+ }
buildTypes {
release {
minifyEnabled false
@@ -39,6 +43,7 @@ dependencies {
implementation 'com.android.support.constraint:constraint-layout:1.1.3'
implementation 'com.android.support:design:28.0.0'
implementation 'hr.ngs.templater:templater:7.0.0'
+ implementation 'stax:stax:1.2.0'
testImplementation 'junit:junit:4.12'
androidTestImplementation 'com.android.support.test:runner:1.0.2'
androidTestImplementation 'com.android.support.test.espresso:espresso-core:3.0.2'
diff --git a/Beginner/AndroidExample/app/src/main/java/hr/ngs/templater/example/Templater.java b/Beginner/AndroidExample/app/src/main/java/hr/ngs/templater/example/Templater.java
index c9a9ef45..23478cdd 100644
--- a/Beginner/AndroidExample/app/src/main/java/hr/ngs/templater/example/Templater.java
+++ b/Beginner/AndroidExample/app/src/main/java/hr/ngs/templater/example/Templater.java
@@ -1,28 +1,24 @@
package hr.ngs.templater.example;
-import org.apache.xerces.jaxp.DocumentBuilderFactoryImpl;
-
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import hr.ngs.templater.Configuration;
-import hr.ngs.templater.ITemplateDocument;
+import hr.ngs.templater.TemplateDocument;
public abstract class Templater {
public static void createDocument(InputStream template, String extension, OutputStream result, Object ...data) throws IOException {
//By default Templater will include Java images in low level plugins.
//To avoid missing awt dependency disable low level plugins
- //Use custom XML library as Android one does not work for non-trivial stuff
- ITemplateDocument document = Configuration.builder()
+ TemplateDocument document = Configuration.builder()
.builtInLowLevelPlugins(false)
- .xmlBuilder(new org.apache.xerces.jaxp.DocumentBuilderFactoryImpl(), false)
.build().open(template, extension, result);
for(Object d : data) {
document.process(d);
}
- document.flush();
+ document.close();
template.close();
}
}
diff --git a/Beginner/AndroidExample/build.gradle b/Beginner/AndroidExample/build.gradle
index 160cb824..bc090d89 100644
--- a/Beginner/AndroidExample/build.gradle
+++ b/Beginner/AndroidExample/build.gradle
@@ -6,7 +6,7 @@ buildscript {
jcenter()
}
dependencies {
- classpath 'com.android.tools.build:gradle:3.5.2'
+ classpath 'com.android.tools.build:gradle:4.0.2'
// NOTE: Do not place your application dependencies here; they belong
// in the individual module build.gradle files
diff --git a/Beginner/AndroidExample/gradle/wrapper/gradle-wrapper.properties b/Beginner/AndroidExample/gradle/wrapper/gradle-wrapper.properties
index d3b65932..54fb3aa7 100644
--- a/Beginner/AndroidExample/gradle/wrapper/gradle-wrapper.properties
+++ b/Beginner/AndroidExample/gradle/wrapper/gradle-wrapper.properties
@@ -1,6 +1,6 @@
-#Sun Dec 01 09:22:26 CET 2019
+#Tue Apr 19 15:52:59 CEST 2022
distributionBase=GRADLE_USER_HOME
distributionPath=wrapper/dists
zipStoreBase=GRADLE_USER_HOME
zipStorePath=wrapper/dists
-distributionUrl=https\://services.gradle.org/distributions/gradle-5.4.1-all.zip
+distributionUrl=https\://services.gradle.org/distributions/gradle-6.1.1-all.zip
diff --git a/Beginner/DataSet (.NET)/Readme.md b/Beginner/DataSet (.NET)/Readme.md
index bb556072..601f6072 100644
--- a/Beginner/DataSet (.NET)/Readme.md
+++ b/Beginner/DataSet (.NET)/Readme.md
@@ -57,4 +57,12 @@ In this case to specify background color for a cell, Word uses properties such a
-As of v2.5 Templater can use merge-xml metadata as instruction to merge provided XML to the surrounding context. This way we can "append" color to the appropriate place.
\ No newline at end of file
+As of v2.5 Templater can use merge-xml metadata as instruction to merge provided XML to the surrounding context. This way we can "append" color to the appropriate place.
+
+As of v7 this merge-xml can be passed directly through XML so there is no need for it in tag metadata. In that case XML would look like:
+
+
+
+
+
+
diff --git a/Intermediate/AlternativeProperty/src/Program.cs b/Intermediate/AlternativeProperty/src/Program.cs
index 9bae44dc..e39875a9 100644
--- a/Intermediate/AlternativeProperty/src/Program.cs
+++ b/Intermediate/AlternativeProperty/src/Program.cs
@@ -21,18 +21,18 @@ class MyObject
public MyObjectA objectA = new MyObjectA();
public MyObjectB objectB = new MyObjectB();
}
- private static ThreadLocal