From 9b7fd69165ab2ac71597c2c65f90df70b66ab861 Mon Sep 17 00:00:00 2001 From: Johannes Schicktanz Date: Tue, 21 Dec 2021 14:14:34 +0100 Subject: [PATCH 1/2] adds oci artifact filter --- pkg/testutils/tar.go | 68 +++++ .../process/processors/oci_artifact_filter.go | 248 ++++++++++++++++ .../processors/oci_artifact_filter_test.go | 274 ++++++++++++++++++ 3 files changed, 590 insertions(+) create mode 100644 pkg/testutils/tar.go create mode 100644 pkg/transport/process/processors/oci_artifact_filter.go create mode 100644 pkg/transport/process/processors/oci_artifact_filter_test.go diff --git a/pkg/testutils/tar.go b/pkg/testutils/tar.go new file mode 100644 index 00000000..dbcc4a4b --- /dev/null +++ b/pkg/testutils/tar.go @@ -0,0 +1,68 @@ +// SPDX-FileCopyrightText: 2021 SAP SE or an SAP affiliate company and Gardener contributors. +// +// SPDX-License-Identifier: Apache-2.0 +package testutils + +import ( + "archive/tar" + "bytes" + "fmt" + "io" + "time" + + . "github.com/onsi/gomega" +) + +// CreateTARArchive creates a TAR archive which contains a defined set of files +func CreateTARArchive(files map[string][]byte) *bytes.Buffer { + buf := bytes.NewBuffer([]byte{}) + tw := tar.NewWriter(buf) + defer tw.Close() + + for filename, content := range files { + h := tar.Header{ + Name: filename, + Size: int64(len(content)), + Mode: 0600, + ModTime: time.Now(), + } + + Expect(tw.WriteHeader(&h)).To(Succeed()) + _, err := tw.Write(content) + Expect(err).ToNot(HaveOccurred()) + } + + return buf +} + +// CheckTARArchive checks that a TAR archive contains a defined set of files +func CheckTARArchive(archiveReader io.Reader, expectedFiles map[string][]byte) { + tr := tar.NewReader(archiveReader) + + expectedFilesCopy := map[string][]byte{} + for key, value := range expectedFiles { + expectedFilesCopy[key] = value + } + + for { + header, err := tr.Next() + if err != nil { + if err == io.EOF { + break + } + Expect(err).ToNot(HaveOccurred()) + } + + actualContentBuf := bytes.NewBuffer([]byte{}) + _, err = io.Copy(actualContentBuf, tr) + Expect(err).ToNot(HaveOccurred()) + + expectedContent, ok := expectedFilesCopy[header.Name] + Expect(ok).To(BeTrue(), fmt.Sprintf("file \"%s\" is not included in expected files", header.Name)) + Expect(actualContentBuf.Bytes()).To(Equal(expectedContent)) + + delete(expectedFilesCopy, header.Name) + } + + Expect(expectedFilesCopy).To(BeEmpty(), fmt.Sprintf("unable to find all expected files in TAR archive. missing files = %+v", expectedFilesCopy)) +} diff --git a/pkg/transport/process/processors/oci_artifact_filter.go b/pkg/transport/process/processors/oci_artifact_filter.go new file mode 100644 index 00000000..5f2da990 --- /dev/null +++ b/pkg/transport/process/processors/oci_artifact_filter.go @@ -0,0 +1,248 @@ +// SPDX-FileCopyrightText: 2021 SAP SE or an SAP affiliate company and Gardener contributors. +// +// SPDX-License-Identifier: Apache-2.0 +package processors + +import ( + "bytes" + "compress/gzip" + "context" + "crypto/sha256" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + + "github.com/containerd/containerd/images" + "github.com/opencontainers/go-digest" + ocispecv1 "github.com/opencontainers/image-spec/specs-go/v1" + + "github.com/gardener/component-cli/ociclient/cache" + "github.com/gardener/component-cli/ociclient/oci" + "github.com/gardener/component-cli/pkg/transport/process" + processutils "github.com/gardener/component-cli/pkg/transport/process/utils" + "github.com/gardener/component-cli/pkg/utils" +) + +type ociArtifactFilter struct { + cache cache.Cache + removePatterns []string +} + +func (f *ociArtifactFilter) Process(ctx context.Context, r io.Reader, w io.Writer) error { + cd, res, blobreader, err := processutils.ReadProcessorMessage(r) + if err != nil { + return fmt.Errorf("unable to read archive: %w", err) + } + if blobreader == nil { + return errors.New("resource blob must not be nil") + } + defer blobreader.Close() + + ociArtifact, err := processutils.DeserializeOCIArtifact(blobreader, f.cache) + if err != nil { + return fmt.Errorf("unable to deserialize oci artifact: %w", err) + } + + if ociArtifact.IsIndex() { + filteredIndex, err := f.filterImageIndex(*ociArtifact.GetIndex()) + if err != nil { + return fmt.Errorf("unable to filter image index: %w", err) + } + if err := ociArtifact.SetIndex(filteredIndex); err != nil { + return fmt.Errorf("unable to set index: %w", err) + } + } else { + filteredImg, err := f.filterImage(*ociArtifact.GetManifest()) + if err != nil { + return fmt.Errorf("unable to filter image: %w", err) + } + if err := ociArtifact.SetManifest(filteredImg); err != nil { + return fmt.Errorf("unable to set manifest: %w", err) + } + } + + blobReader, err := processutils.SerializeOCIArtifact(*ociArtifact, f.cache) + if err != nil { + return fmt.Errorf("unable to serialice oci artifact: %w", err) + } + + if err = processutils.WriteProcessorMessage(*cd, res, blobReader, w); err != nil { + return fmt.Errorf("unable to write archive: %w", err) + } + + return nil +} + +func (f *ociArtifactFilter) filterImageIndex(inputIndex oci.Index) (*oci.Index, error) { + filteredImgs := []*oci.Manifest{} + for _, m := range inputIndex.Manifests { + filteredManifest, err := f.filterImage(*m) + if err != nil { + return nil, fmt.Errorf("unable to filter image %+v: %w", m, err) + } + + manifestBytes, err := json.Marshal(filteredManifest.Data) + if err != nil { + return nil, fmt.Errorf("unable to marshal manifest: ") + } + + if err := f.cache.Add(filteredManifest.Descriptor, io.NopCloser(bytes.NewReader(manifestBytes))); err != nil { + return nil, fmt.Errorf("unable to add filtered manifest to cache: %w", err) + } + + filteredImgs = append(filteredImgs, filteredManifest) + } + + filteredIndex := oci.Index{ + Manifests: filteredImgs, + Annotations: inputIndex.Annotations, + } + + return &filteredIndex, nil +} + +func (f *ociArtifactFilter) filterImage(manifest oci.Manifest) (*oci.Manifest, error) { + // diffIDs := []digest.Digest{} + // unfilteredToFilteredDigestMappings := map[digest.Digest]digest.Digest{} + filteredLayers := []ocispecv1.Descriptor{} + + for _, layer := range manifest.Data.Layers { + layerBlobReader, err := f.cache.Get(layer) + if err != nil { + return nil, err + } + + tmpfile, err := ioutil.TempFile("", "") + if err != nil { + return nil, fmt.Errorf("unable to create tempfile: %w", err) + } + defer tmpfile.Close() + var layerBlobWriter io.WriteCloser = tmpfile + + isGzipCompressedLayer := layer.MediaType == ocispecv1.MediaTypeImageLayerGzip || layer.MediaType == images.MediaTypeDockerSchema2LayerGzip + if isGzipCompressedLayer { + // TODO: detect correct compression and apply to reader and writer + layerBlobReader, err = gzip.NewReader(layerBlobReader) + if err != nil { + return nil, fmt.Errorf("unable to create gzip reader for layer: %w", err) + } + gzipw := gzip.NewWriter(layerBlobWriter) + defer gzipw.Close() + layerBlobWriter = gzipw + } + + uncompressedHasher := sha256.New() + mw := io.MultiWriter(layerBlobWriter, uncompressedHasher) + + if err = utils.FilterTARArchive(layerBlobReader, mw, f.removePatterns); err != nil { + return nil, fmt.Errorf("unable to filter layer blob: %w", err) + } + + if isGzipCompressedLayer { + // close gzip writer (flushes any unwritten data and writes gzip footer) + if err := layerBlobWriter.Close(); err != nil { + return nil, fmt.Errorf("unable to close layer writer: %w", err) + } + } + + if _, err := tmpfile.Seek(0, io.SeekStart); err != nil { + return nil, fmt.Errorf("unable to reset input file: %s", err) + } + + filteredDigest, err := digest.FromReader(tmpfile) + if err != nil { + return nil, fmt.Errorf("unable to calculate digest for layer %+v: %w", layer, err) + } + + // unfilteredToFilteredDigestMappings[layer.Digest] = filteredDigest + // diffIDs = append(diffIDs, digest.NewDigestFromEncoded(digest.SHA256, hex.EncodeToString(uncompressedHasher.Sum(nil)))) + + fstat, err := tmpfile.Stat() + if err != nil { + return nil, fmt.Errorf("unable to get file stat: %w", err) + } + + desc := ocispecv1.Descriptor{ + MediaType: layer.MediaType, + Digest: filteredDigest, + Size: fstat.Size(), + URLs: layer.URLs, + Platform: layer.Platform, + Annotations: layer.Annotations, + } + filteredLayers = append(filteredLayers, desc) + + if _, err := tmpfile.Seek(0, io.SeekStart); err != nil { + return nil, fmt.Errorf("unable to reset input file: %s", err) + } + if err := f.cache.Add(desc, tmpfile); err != nil { + return nil, fmt.Errorf("unable to add filtered layer blob to cache: %w", err) + } + } + + manifest.Data.Layers = filteredLayers + + cfgBlob, err := f.cache.Get(manifest.Data.Config) + if err != nil { + return nil, fmt.Errorf("unable to get config blob from cache: %w", err) + } + + cfgData, err := io.ReadAll(cfgBlob) + if err != nil { + return nil, fmt.Errorf("unable to read config blob: %w", err) + } + + // TODO: check which modifications on config should be performed + // var config map[string]*json.RawMessage + // if err := json.Unmarshal(data, &config); err != nil { + // return nil, fmt.Errorf("unable to unmarshal config: %w", err) + // } + // rootfs := ocispecv1.RootFS{ + // Type: "layers", + // DiffIDs: diffIDs, + // } + // rootfsRaw, err := utils.RawJSON(rootfs) + // if err != nil { + // return nil, fmt.Errorf("unable to convert rootfs to JSON: %w", err) + // } + // config["rootfs"] = rootfsRaw + // marshaledConfig, err := json.Marshal(cfgData) + // if err != nil { + // return nil, fmt.Errorf("unable to marshal config: %w", err) + // } + // configDesc := ocispecv1.Descriptor{ + // MediaType: ocispecv1.MediaTypeImageConfig, + // Digest: digest.FromBytes(marshaledConfig), + // Size: int64(len(marshaledConfig)), + // } + // manifest.Data.Config = configDesc + + if err := f.cache.Add(manifest.Data.Config, io.NopCloser(bytes.NewReader(cfgData))); err != nil { + return nil, fmt.Errorf("unable to add filtered layer blob to cache: %w", err) + } + + manifestBytes, err := json.Marshal(manifest.Data) + if err != nil { + return nil, fmt.Errorf("unable to marshal manifest: %w", err) + } + + manifest.Descriptor.Size = int64(len(manifestBytes)) + manifest.Descriptor.Digest = digest.FromBytes(manifestBytes) + + return &manifest, nil +} + +// NewOCIArtifactFilter returns a processor that filters files from oci artifact layers +func NewOCIArtifactFilter(cache cache.Cache, removePatterns []string) (process.ResourceStreamProcessor, error) { + if cache == nil { + return nil, errors.New("cache must not be nil") + } + + obj := ociArtifactFilter{ + cache: cache, + removePatterns: removePatterns, + } + return &obj, nil +} diff --git a/pkg/transport/process/processors/oci_artifact_filter_test.go b/pkg/transport/process/processors/oci_artifact_filter_test.go new file mode 100644 index 00000000..5abb5fea --- /dev/null +++ b/pkg/transport/process/processors/oci_artifact_filter_test.go @@ -0,0 +1,274 @@ +// SPDX-FileCopyrightText: 2021 SAP SE or an SAP affiliate company and Gardener contributors. +// +// SPDX-License-Identifier: Apache-2.0 +package processors_test + +import ( + "bytes" + "context" + + cdv2 "github.com/gardener/component-spec/bindings-go/apis/v2" + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + ocispecv1 "github.com/opencontainers/image-spec/specs-go/v1" + + "github.com/gardener/component-cli/ociclient/cache" + "github.com/gardener/component-cli/ociclient/oci" + "github.com/gardener/component-cli/pkg/testutils" + "github.com/gardener/component-cli/pkg/transport/process/processors" + processutils "github.com/gardener/component-cli/pkg/transport/process/utils" +) + +var _ = Describe("ociArtifactFilter", func() { + + Context("Process", func() { + + It("should filter files from oci image", func() { + expectedRes := cdv2.Resource{ + IdentityObjectMeta: cdv2.IdentityObjectMeta{ + Name: "my-res", + Version: "v0.1.0", + Type: "ociImage", + }, + } + expectedCd := cdv2.ComponentDescriptor{ + ComponentSpec: cdv2.ComponentSpec{ + Resources: []cdv2.Resource{ + expectedRes, + }, + }, + } + + removePatterns := []string{ + "filter-this/*", + } + + l1Files := map[string][]byte{ + "test": []byte("test-content"), + "filter-this/file1": []byte("file1-content"), + "filter-this/file2": []byte("file2-content"), + } + + // TODO: add gzipped layer + layers := [][]byte{ + testutils.CreateTARArchive(l1Files).Bytes(), + } + + expectedL1Files := map[string][]byte{ + "test": []byte("test-content"), + } + + expectedLayers := [][]byte{ + testutils.CreateTARArchive(expectedL1Files).Bytes(), + } + + configData := []byte("{}") + + expectedManifestData, expectedManifestDesc := testutils.CreateManifest(configData, expectedLayers, nil) + em := oci.Manifest{ + Descriptor: expectedManifestDesc, + Data: expectedManifestData, + } + expectedOciArtifact, err := oci.NewManifestArtifact(&em) + Expect(err).ToNot(HaveOccurred()) + + ociCache := cache.NewInMemoryCache() + + manifestData, manifestDesc := testutils.CreateManifest(configData, layers, ociCache) + m := oci.Manifest{ + Descriptor: manifestDesc, + Data: manifestData, + } + + ociArtifact, err := oci.NewManifestArtifact(&m) + Expect(err).ToNot(HaveOccurred()) + + r1, err := processutils.SerializeOCIArtifact(*ociArtifact, ociCache) + Expect(err).ToNot(HaveOccurred()) + defer r1.Close() + + inBuf := bytes.NewBuffer([]byte{}) + Expect(processutils.WriteProcessorMessage(expectedCd, expectedRes, r1, inBuf)).To(Succeed()) + + outbuf := bytes.NewBuffer([]byte{}) + proc, err := processors.NewOCIArtifactFilter(ociCache, removePatterns) + Expect(err).ToNot(HaveOccurred()) + Expect(proc.Process(context.TODO(), inBuf, outbuf)).To(Succeed()) + + actualCD, actualRes, actualResBlobReader, err := processutils.ReadProcessorMessage(outbuf) + Expect(err).ToNot(HaveOccurred()) + + Expect(*actualCD).To(Equal(expectedCd)) + Expect(actualRes).To(Equal(expectedRes)) + + deserializeCache := cache.NewInMemoryCache() + actualOciArtifact, err := processutils.DeserializeOCIArtifact(actualResBlobReader, deserializeCache) + Expect(err).ToNot(HaveOccurred()) + Expect(actualOciArtifact).To(Equal(expectedOciArtifact)) + + r, err := deserializeCache.Get(actualOciArtifact.GetManifest().Data.Layers[0]) + Expect(err).ToNot(HaveOccurred()) + testutils.CheckTARArchive(r, expectedL1Files) + }) + + It("should filter files from all images of an oci image index", func() { + expectedRes := cdv2.Resource{ + IdentityObjectMeta: cdv2.IdentityObjectMeta{ + Name: "my-res", + Version: "v0.1.0", + Type: "ociImage", + }, + } + expectedCd := cdv2.ComponentDescriptor{ + ComponentSpec: cdv2.ComponentSpec{ + Resources: []cdv2.Resource{ + expectedRes, + }, + }, + } + + removePatterns := []string{ + "filter-this/*", + } + + l1Files := map[string][]byte{ + "test": []byte("test-content"), + "filter-this/file1": []byte("file1-content"), + "filter-this/file2": []byte("file2-content"), + } + + // TODO: add gzipped layer + layers := [][]byte{ + testutils.CreateTARArchive(l1Files).Bytes(), + } + + expectedL1Files := map[string][]byte{ + "test": []byte("test-content"), + } + + expectedLayers := [][]byte{ + testutils.CreateTARArchive(expectedL1Files).Bytes(), + } + + configData := []byte("{}") + + expectedManifestData, expectedManifestDesc := testutils.CreateManifest(configData, expectedLayers, nil) + ei := oci.Index{ + Manifests: []*oci.Manifest{ + { + Descriptor: ocispecv1.Descriptor{ + MediaType: expectedManifestDesc.MediaType, + Digest: expectedManifestDesc.Digest, + Size: expectedManifestDesc.Size, + Platform: &ocispecv1.Platform{ + Architecture: "amd64", + OS: "linux", + }, + }, + Data: expectedManifestData, + }, + { + Descriptor: ocispecv1.Descriptor{ + MediaType: expectedManifestDesc.MediaType, + Digest: expectedManifestDesc.Digest, + Size: expectedManifestDesc.Size, + Platform: &ocispecv1.Platform{ + Architecture: "amd64", + OS: "windows", + }, + }, + Data: expectedManifestData, + }, + }, + Annotations: map[string]string{ + "test": "test", + }, + } + expectedOciArtifact, err := oci.NewIndexArtifact(&ei) + Expect(err).ToNot(HaveOccurred()) + + ociCache := cache.NewInMemoryCache() + + manifestData, manifestDesc := testutils.CreateManifest(configData, layers, ociCache) + + index := oci.Index{ + Manifests: []*oci.Manifest{ + { + Descriptor: ocispecv1.Descriptor{ + MediaType: manifestDesc.MediaType, + Digest: manifestDesc.Digest, + Size: manifestDesc.Size, + Platform: &ocispecv1.Platform{ + Architecture: "amd64", + OS: "linux", + }, + }, + Data: manifestData, + }, + { + Descriptor: ocispecv1.Descriptor{ + MediaType: manifestDesc.MediaType, + Digest: manifestDesc.Digest, + Size: manifestDesc.Size, + Platform: &ocispecv1.Platform{ + Architecture: "amd64", + OS: "windows", + }, + }, + Data: manifestData, + }, + }, + Annotations: map[string]string{ + "test": "test", + }, + } + + ociArtifact, err := oci.NewIndexArtifact(&index) + Expect(err).ToNot(HaveOccurred()) + + r1, err := processutils.SerializeOCIArtifact(*ociArtifact, ociCache) + Expect(err).ToNot(HaveOccurred()) + defer r1.Close() + + inBuf := bytes.NewBuffer([]byte{}) + Expect(processutils.WriteProcessorMessage(expectedCd, expectedRes, r1, inBuf)).To(Succeed()) + + outbuf := bytes.NewBuffer([]byte{}) + proc, err := processors.NewOCIArtifactFilter(ociCache, removePatterns) + Expect(err).ToNot(HaveOccurred()) + Expect(proc.Process(context.TODO(), inBuf, outbuf)).To(Succeed()) + + actualCD, actualRes, actualResBlobReader, err := processutils.ReadProcessorMessage(outbuf) + Expect(err).ToNot(HaveOccurred()) + + Expect(*actualCD).To(Equal(expectedCd)) + Expect(actualRes).To(Equal(expectedRes)) + + deserializeCache := cache.NewInMemoryCache() + actualOciArtifact, err := processutils.DeserializeOCIArtifact(actualResBlobReader, deserializeCache) + Expect(err).ToNot(HaveOccurred()) + Expect(actualOciArtifact).To(Equal(expectedOciArtifact)) + + firstMan := actualOciArtifact.GetIndex().Manifests[0] + fr, err := deserializeCache.Get(firstMan.Data.Layers[0]) + Expect(err).ToNot(HaveOccurred()) + testutils.CheckTARArchive(fr, expectedL1Files) + + secondMan := actualOciArtifact.GetIndex().Manifests[1] + sr, err := deserializeCache.Get(secondMan.Data.Layers[0]) + Expect(err).ToNot(HaveOccurred()) + testutils.CheckTARArchive(sr, expectedL1Files) + }) + + It("should return error if cache is nil", func() { + _, err := processors.NewOCIArtifactFilter(nil, []string{}) + Expect(err).To(MatchError("cache must not be nil")) + }) + + It("should return error if resource blob reader is nil", func() { + _, err := processors.NewOCIArtifactFilter(nil, []string{}) + Expect(err).To(MatchError("cache must not be nil")) + }) + + }) +}) From 7a92ec174fc7edf975237956a30f783e52cf012b Mon Sep 17 00:00:00 2001 From: Johannes Schicktanz Date: Tue, 21 Dec 2021 14:18:24 +0100 Subject: [PATCH 2/2] adds missing util method --- pkg/utils/utils.go | 46 +++++++++++++++++++++++++++++++++++++++++ pkg/utils/utils_test.go | 38 ++++++++++++++++++++++++++++++++++ 2 files changed, 84 insertions(+) diff --git a/pkg/utils/utils.go b/pkg/utils/utils.go index cf08e168..be5488c5 100644 --- a/pkg/utils/utils.go +++ b/pkg/utils/utils.go @@ -178,6 +178,52 @@ func BytesString(bytes uint64, accuracy int) string { return fmt.Sprintf("%s %s", stringValue, unit) } +func FilterTARArchive(inputReader io.Reader, outputWriter io.Writer, removePatterns []string) error { + if inputReader == nil { + return errors.New("inputReader must not be nil") + } + + if outputWriter == nil { + return errors.New("outputWriter must not be nil") + } + + tr := tar.NewReader(inputReader) + tw := tar.NewWriter(outputWriter) + defer tw.Close() + +NEXT_FILE: + for { + header, err := tr.Next() + if err != nil { + if err == io.EOF { + break + } + return fmt.Errorf("unable to read header: %w", err) + } + + for _, removePattern := range removePatterns { + removeFile, err := filepath.Match(removePattern, header.Name) + if err != nil { + return fmt.Errorf("unable to match filename against pattern: %w", err) + } + + if removeFile { + continue NEXT_FILE + } + } + + if err := tw.WriteHeader(header); err != nil { + return fmt.Errorf("unable to write header: %w", err) + } + + if _, err = io.Copy(tw, tr); err != nil { + return fmt.Errorf("unable to write file: %w", err) + } + } + + return nil +} + // WriteFileToTARArchive writes a new file with name=filename and content=inputReader to outputWriter func WriteFileToTARArchive(filename string, inputReader io.Reader, outputWriter *tar.Writer) error { if filename == "" { diff --git a/pkg/utils/utils_test.go b/pkg/utils/utils_test.go index ef0dd993..eb8d0129 100644 --- a/pkg/utils/utils_test.go +++ b/pkg/utils/utils_test.go @@ -11,6 +11,7 @@ import ( . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" + "github.com/gardener/component-cli/pkg/testutils" "github.com/gardener/component-cli/pkg/utils" ) @@ -83,4 +84,41 @@ var _ = Describe("utils", func() { }) + Context("FilterTARArchive", func() { + + It("should filter archive", func() { + removePatterns := []string{ + "second/*", + } + + inputFiles := map[string][]byte{ + "first/testfile": []byte("some-content"), + "second/testfile": []byte("more-content"), + "second/testfile-2": []byte("other-content"), + } + + expectedFiles := map[string][]byte{ + "first/testfile": []byte("some-content"), + } + + archive := testutils.CreateTARArchive(inputFiles) + + outBuf := bytes.NewBuffer([]byte{}) + Expect(utils.FilterTARArchive(archive, outBuf, removePatterns)).To(Succeed()) + + testutils.CheckTARArchive(outBuf, expectedFiles) + }) + + It("should return error if inputReader is nil", func() { + outWriter := bytes.NewBuffer([]byte{}) + Expect(utils.FilterTARArchive(nil, outWriter, []string{})).To(MatchError("inputReader must not be nil")) + }) + + It("should return error if outputWriter is nil", func() { + inputReader := bytes.NewReader([]byte{}) + Expect(utils.FilterTARArchive(inputReader, nil, []string{})).To(MatchError("outputWriter must not be nil")) + }) + + }) + })