From 89f1c78cd1bf9f86f45326b6c54d141e44602256 Mon Sep 17 00:00:00 2001 From: siyul-park Date: Mon, 20 Nov 2023 05:16:03 -0500 Subject: [PATCH] chore: fork for open source --- .github/codecov.yml | 11 + .github/renovate.json | 13 + .github/workflows/check.yml | 99 ++++ .gitignore | 38 ++ LICENSE | 21 + Makefile | 75 +++ README.md | 99 ++++ cmd/flag/convert.go | 14 + cmd/resource/scheme.go | 69 +++ cmd/resource/scheme_test.go | 32 ++ cmd/resource/yamljson.go | 15 + cmd/uniflow/apply/cmd.go | 145 +++++ cmd/uniflow/apply/cmd_test.go | 69 +++ cmd/uniflow/apply/flag.go | 6 + cmd/uniflow/cmd.go | 47 ++ cmd/uniflow/flag.go | 6 + cmd/uniflow/main.go | 111 ++++ cmd/uniflow/start/cmd.go | 136 +++++ cmd/uniflow/start/cmd_test.go | 90 +++ cmd/uniflow/start/flag.go | 6 + examples/boot.yaml | 145 +++++ examples/echo.yaml | 18 + examples/ping.yaml | 24 + go.mod | 60 ++ go.sum | 595 +++++++++++++++++++ internal/encoding/decoder.go | 16 + internal/encoding/encoder.go | 16 + internal/encoding/error.go | 9 + internal/encoding/group.go | 89 +++ internal/encoding/group_test.go | 80 +++ internal/pool/map.go | 21 + internal/pool/map_test.go | 28 + internal/util/compare.go | 109 ++++ internal/util/compare_test.go | 494 ++++++++++++++++ internal/util/copy.go | 27 + internal/util/copy_test.go | 40 ++ internal/util/hash.go | 7 + internal/util/kind.go | 81 +++ internal/util/ptr.go | 29 + internal/util/ptr_test.go | 41 ++ pkg/database/collection.go | 99 ++++ pkg/database/collection_test.go | 45 ++ pkg/database/database.go | 12 + pkg/database/databasetest/collection.go | 732 ++++++++++++++++++++++++ pkg/database/databasetest/database.go | 32 ++ pkg/database/databasetest/index.go | 62 ++ pkg/database/errors.go | 13 + pkg/database/filter.go | 176 ++++++ pkg/database/filter_test.go | 310 ++++++++++ pkg/database/index.go | 20 + pkg/database/memdb/collection.go | 542 ++++++++++++++++++ pkg/database/memdb/collection_test.go | 143 +++++ pkg/database/memdb/database.go | 62 ++ pkg/database/memdb/database_test.go | 25 + pkg/database/memdb/filter.go | 235 ++++++++ pkg/database/memdb/index.go | 380 ++++++++++++ pkg/database/memdb/index_test.go | 25 + pkg/database/memdb/sort.go | 35 ++ pkg/database/memdb/stream.go | 103 ++++ pkg/database/mongodb/collection.go | 262 +++++++++ pkg/database/mongodb/collection_test.go | 251 ++++++++ pkg/database/mongodb/connection.go | 56 ++ pkg/database/mongodb/connection_test.go | 53 ++ pkg/database/mongodb/database.go | 59 ++ pkg/database/mongodb/database_test.go | 56 ++ pkg/database/mongodb/encoding.go | 428 ++++++++++++++ pkg/database/mongodb/index.go | 89 +++ pkg/database/mongodb/index_test.go | 59 ++ pkg/database/mongodb/server.go | 57 ++ pkg/database/mongodb/server_test.go | 14 + pkg/database/mongodb/stream.go | 98 ++++ pkg/database/order.go | 10 + pkg/database/sort.go | 8 + pkg/hook/builder.go | 35 ++ pkg/hook/builder_test.go | 23 + pkg/hook/hook.go | 113 ++++ pkg/hook/hook_test.go | 69 +++ pkg/loader/loader.go | 522 +++++++++++++++++ pkg/loader/loader_test.go | 439 ++++++++++++++ pkg/loader/reconciler.go | 129 +++++ pkg/loader/reconciler_test.go | 80 +++ pkg/node/error.go | 8 + pkg/node/node.go | 15 + pkg/node/onetomany.go | 205 +++++++ pkg/node/onetomany_test.go | 132 +++++ pkg/node/onetoone.go | 204 +++++++ pkg/node/onetoone_test.go | 212 +++++++ pkg/node/ports.go | 8 + pkg/node/wrap.go | 19 + pkg/packet/packet.go | 42 ++ pkg/packet/packet_test.go | 31 + pkg/plugin/controllx/builder.go | 35 ++ pkg/plugin/controllx/builder_test.go | 18 + pkg/plugin/controllx/snippet.go | 187 ++++++ pkg/plugin/controllx/snippet_test.go | 310 ++++++++++ pkg/plugin/controllx/switch.go | 105 ++++ pkg/plugin/controllx/switch_test.go | 103 ++++ pkg/plugin/networkx/builder.go | 45 ++ pkg/plugin/networkx/builder_test.go | 48 ++ pkg/plugin/networkx/http.go | 563 ++++++++++++++++++ pkg/plugin/networkx/http_test.go | 143 +++++ pkg/plugin/networkx/mime.go | 294 ++++++++++ pkg/plugin/networkx/mime_test.go | 138 +++++ pkg/plugin/networkx/router.go | 522 +++++++++++++++++ pkg/plugin/networkx/router_test.go | 108 ++++ pkg/plugin/systemx/builder.go | 22 + pkg/plugin/systemx/builder_test.go | 26 + pkg/plugin/systemx/reflect.go | 264 +++++++++ pkg/plugin/systemx/reflect_test.go | 225 ++++++++ pkg/port/array.go | 29 + pkg/port/array_test.go | 17 + pkg/port/inithook.go | 85 +++ pkg/port/pipe.go | 191 +++++++ pkg/port/pipe_test.go | 155 +++++ pkg/port/port.go | 201 +++++++ pkg/port/port_test.go | 149 +++++ pkg/port/stream.go | 121 ++++ pkg/port/stream_test.go | 49 ++ pkg/primitive/binary.go | 101 ++++ pkg/primitive/binary_test.go | 44 ++ pkg/primitive/bool.go | 80 +++ pkg/primitive/bool_test.go | 37 ++ pkg/primitive/encoding.go | 67 +++ pkg/primitive/encoding_test.go | 259 +++++++++ pkg/primitive/float.go | 131 +++++ pkg/primitive/float_test.go | 68 +++ pkg/primitive/getter.go | 53 ++ pkg/primitive/int.go | 227 ++++++++ pkg/primitive/int_test.go | 133 +++++ pkg/primitive/map.go | 370 ++++++++++++ pkg/primitive/map_test.go | 116 ++++ pkg/primitive/object.go | 45 ++ pkg/primitive/pointer.go | 39 ++ pkg/primitive/pointer_test.go | 30 + pkg/primitive/shortcut.go | 27 + pkg/primitive/slice.go | 182 ++++++ pkg/primitive/slice_test.go | 95 +++ pkg/primitive/string.go | 90 +++ pkg/primitive/string_test.go | 46 ++ pkg/primitive/uint.go | 221 +++++++ pkg/primitive/uint_test.go | 133 +++++ pkg/process/process.go | 64 +++ pkg/process/process_test.go | 47 ++ pkg/process/stack.go | 307 ++++++++++ pkg/process/stack_test.go | 153 +++++ pkg/runtime/runtime.go | 144 +++++ pkg/runtime/runtime_test.go | 127 ++++ pkg/scheme/builder.go | 35 ++ pkg/scheme/builder_test.go | 33 ++ pkg/scheme/codec.go | 30 + pkg/scheme/scheme.go | 125 ++++ pkg/scheme/scheme_test.go | 71 +++ pkg/scheme/spec.go | 93 +++ pkg/scheme/unstructured.go | 189 ++++++ pkg/scheme/unstructured_test.go | 80 +++ pkg/storage/event.go | 18 + pkg/storage/filter.go | 167 ++++++ pkg/storage/storage.go | 331 +++++++++++ pkg/storage/storage_test.go | 349 +++++++++++ pkg/storage/stream.go | 83 +++ pkg/storage/stream_test.go | 34 ++ pkg/symbol/postloadhook.go | 18 + pkg/symbol/postunloadhook.go | 18 + pkg/symbol/preloadhook.go | 18 + pkg/symbol/preunloadhook.go | 18 + pkg/symbol/table.go | 155 +++++ pkg/symbol/table_test.go | 96 ++++ 167 files changed, 19418 insertions(+) create mode 100644 .github/codecov.yml create mode 100644 .github/renovate.json create mode 100644 .github/workflows/check.yml create mode 100644 .gitignore create mode 100644 LICENSE create mode 100644 Makefile create mode 100644 README.md create mode 100644 cmd/flag/convert.go create mode 100644 cmd/resource/scheme.go create mode 100644 cmd/resource/scheme_test.go create mode 100644 cmd/resource/yamljson.go create mode 100644 cmd/uniflow/apply/cmd.go create mode 100644 cmd/uniflow/apply/cmd_test.go create mode 100644 cmd/uniflow/apply/flag.go create mode 100644 cmd/uniflow/cmd.go create mode 100644 cmd/uniflow/flag.go create mode 100644 cmd/uniflow/main.go create mode 100644 cmd/uniflow/start/cmd.go create mode 100644 cmd/uniflow/start/cmd_test.go create mode 100644 cmd/uniflow/start/flag.go create mode 100644 examples/boot.yaml create mode 100644 examples/echo.yaml create mode 100644 examples/ping.yaml create mode 100644 go.mod create mode 100644 go.sum create mode 100644 internal/encoding/decoder.go create mode 100644 internal/encoding/encoder.go create mode 100644 internal/encoding/error.go create mode 100644 internal/encoding/group.go create mode 100644 internal/encoding/group_test.go create mode 100644 internal/pool/map.go create mode 100644 internal/pool/map_test.go create mode 100644 internal/util/compare.go create mode 100644 internal/util/compare_test.go create mode 100644 internal/util/copy.go create mode 100644 internal/util/copy_test.go create mode 100644 internal/util/hash.go create mode 100644 internal/util/kind.go create mode 100644 internal/util/ptr.go create mode 100644 internal/util/ptr_test.go create mode 100644 pkg/database/collection.go create mode 100644 pkg/database/collection_test.go create mode 100644 pkg/database/database.go create mode 100644 pkg/database/databasetest/collection.go create mode 100644 pkg/database/databasetest/database.go create mode 100644 pkg/database/databasetest/index.go create mode 100644 pkg/database/errors.go create mode 100644 pkg/database/filter.go create mode 100644 pkg/database/filter_test.go create mode 100644 pkg/database/index.go create mode 100644 pkg/database/memdb/collection.go create mode 100644 pkg/database/memdb/collection_test.go create mode 100644 pkg/database/memdb/database.go create mode 100644 pkg/database/memdb/database_test.go create mode 100644 pkg/database/memdb/filter.go create mode 100644 pkg/database/memdb/index.go create mode 100644 pkg/database/memdb/index_test.go create mode 100644 pkg/database/memdb/sort.go create mode 100644 pkg/database/memdb/stream.go create mode 100644 pkg/database/mongodb/collection.go create mode 100644 pkg/database/mongodb/collection_test.go create mode 100644 pkg/database/mongodb/connection.go create mode 100644 pkg/database/mongodb/connection_test.go create mode 100644 pkg/database/mongodb/database.go create mode 100644 pkg/database/mongodb/database_test.go create mode 100644 pkg/database/mongodb/encoding.go create mode 100644 pkg/database/mongodb/index.go create mode 100644 pkg/database/mongodb/index_test.go create mode 100644 pkg/database/mongodb/server.go create mode 100644 pkg/database/mongodb/server_test.go create mode 100644 pkg/database/mongodb/stream.go create mode 100644 pkg/database/order.go create mode 100644 pkg/database/sort.go create mode 100644 pkg/hook/builder.go create mode 100644 pkg/hook/builder_test.go create mode 100644 pkg/hook/hook.go create mode 100644 pkg/hook/hook_test.go create mode 100644 pkg/loader/loader.go create mode 100644 pkg/loader/loader_test.go create mode 100644 pkg/loader/reconciler.go create mode 100644 pkg/loader/reconciler_test.go create mode 100644 pkg/node/error.go create mode 100644 pkg/node/node.go create mode 100644 pkg/node/onetomany.go create mode 100644 pkg/node/onetomany_test.go create mode 100644 pkg/node/onetoone.go create mode 100644 pkg/node/onetoone_test.go create mode 100644 pkg/node/ports.go create mode 100644 pkg/node/wrap.go create mode 100644 pkg/packet/packet.go create mode 100644 pkg/packet/packet_test.go create mode 100644 pkg/plugin/controllx/builder.go create mode 100644 pkg/plugin/controllx/builder_test.go create mode 100644 pkg/plugin/controllx/snippet.go create mode 100644 pkg/plugin/controllx/snippet_test.go create mode 100644 pkg/plugin/controllx/switch.go create mode 100644 pkg/plugin/controllx/switch_test.go create mode 100644 pkg/plugin/networkx/builder.go create mode 100644 pkg/plugin/networkx/builder_test.go create mode 100644 pkg/plugin/networkx/http.go create mode 100644 pkg/plugin/networkx/http_test.go create mode 100644 pkg/plugin/networkx/mime.go create mode 100644 pkg/plugin/networkx/mime_test.go create mode 100644 pkg/plugin/networkx/router.go create mode 100644 pkg/plugin/networkx/router_test.go create mode 100644 pkg/plugin/systemx/builder.go create mode 100644 pkg/plugin/systemx/builder_test.go create mode 100644 pkg/plugin/systemx/reflect.go create mode 100644 pkg/plugin/systemx/reflect_test.go create mode 100644 pkg/port/array.go create mode 100644 pkg/port/array_test.go create mode 100644 pkg/port/inithook.go create mode 100644 pkg/port/pipe.go create mode 100644 pkg/port/pipe_test.go create mode 100644 pkg/port/port.go create mode 100644 pkg/port/port_test.go create mode 100644 pkg/port/stream.go create mode 100644 pkg/port/stream_test.go create mode 100644 pkg/primitive/binary.go create mode 100644 pkg/primitive/binary_test.go create mode 100644 pkg/primitive/bool.go create mode 100644 pkg/primitive/bool_test.go create mode 100644 pkg/primitive/encoding.go create mode 100644 pkg/primitive/encoding_test.go create mode 100644 pkg/primitive/float.go create mode 100644 pkg/primitive/float_test.go create mode 100644 pkg/primitive/getter.go create mode 100644 pkg/primitive/int.go create mode 100644 pkg/primitive/int_test.go create mode 100644 pkg/primitive/map.go create mode 100644 pkg/primitive/map_test.go create mode 100644 pkg/primitive/object.go create mode 100644 pkg/primitive/pointer.go create mode 100644 pkg/primitive/pointer_test.go create mode 100644 pkg/primitive/shortcut.go create mode 100644 pkg/primitive/slice.go create mode 100644 pkg/primitive/slice_test.go create mode 100644 pkg/primitive/string.go create mode 100644 pkg/primitive/string_test.go create mode 100644 pkg/primitive/uint.go create mode 100644 pkg/primitive/uint_test.go create mode 100644 pkg/process/process.go create mode 100644 pkg/process/process_test.go create mode 100644 pkg/process/stack.go create mode 100644 pkg/process/stack_test.go create mode 100644 pkg/runtime/runtime.go create mode 100644 pkg/runtime/runtime_test.go create mode 100644 pkg/scheme/builder.go create mode 100644 pkg/scheme/builder_test.go create mode 100644 pkg/scheme/codec.go create mode 100644 pkg/scheme/scheme.go create mode 100644 pkg/scheme/scheme_test.go create mode 100644 pkg/scheme/spec.go create mode 100644 pkg/scheme/unstructured.go create mode 100644 pkg/scheme/unstructured_test.go create mode 100644 pkg/storage/event.go create mode 100644 pkg/storage/filter.go create mode 100644 pkg/storage/storage.go create mode 100644 pkg/storage/storage_test.go create mode 100644 pkg/storage/stream.go create mode 100644 pkg/storage/stream_test.go create mode 100644 pkg/symbol/postloadhook.go create mode 100644 pkg/symbol/postunloadhook.go create mode 100644 pkg/symbol/preloadhook.go create mode 100644 pkg/symbol/preunloadhook.go create mode 100644 pkg/symbol/table.go create mode 100644 pkg/symbol/table_test.go diff --git a/.github/codecov.yml b/.github/codecov.yml new file mode 100644 index 00000000..2b44f647 --- /dev/null +++ b/.github/codecov.yml @@ -0,0 +1,11 @@ +coverage: + status: + project: + default: + threshold: 1% + patch: + default: + threshold: 1% + +comment: + require_changes: true diff --git a/.github/renovate.json b/.github/renovate.json new file mode 100644 index 00000000..d62a1e1c --- /dev/null +++ b/.github/renovate.json @@ -0,0 +1,13 @@ +{ + "$schema": "https://docs.renovatebot.com/renovate-schema.json", + "extends": ["config:base"], + "baseBranches": ["main"], + "branchPrefix": "dependency/", + "rangeStrategy": "bump", + "lockFileMaintenance": { "enabled": true }, + "packageRules": [ + { + "matchUpdateTypes": ["minor", "patch", "digest"] + } + ] +} diff --git a/.github/workflows/check.yml b/.github/workflows/check.yml new file mode 100644 index 00000000..6f6e7883 --- /dev/null +++ b/.github/workflows/check.yml @@ -0,0 +1,99 @@ +name: check + +concurrency: + group: ${{ github.uniflow }}-${{ github.ref }} + cancel-in-progress: true + +on: ["push"] + +jobs: + check: + strategy: + matrix: + os: [ ubuntu-20.04, macos-latest ] + go: [ '1.21' ] + name: Check ${{ matrix.os }} @ Go ${{ matrix.go }} + runs-on: ${{ matrix.os }} + steps: + - name: Checkout Code + uses: actions/checkout@v4 + - name: Set up Go + uses: actions/setup-go@v4 + with: + go-version: ${{ matrix.go }} + - uses: actions/cache@v3 + with: + path: | + ~/.cache/go-build + ~/go/pkg/mod + key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }} + restore-keys: | + ${{ runner.os }}-go- + + - run: make init + - run: make check test-options="-race --coverprofile=coverage.coverprofile --covermode=atomic" + + - name: Upload coverage to Codecov + if: success() && matrix.go == '1.21' && matrix.os == 'ubuntu-20.04' + uses: codecov/codecov-action@v3 + with: + token: ${{ secrets.CODECOV_TOKEN }} + file: ./coverage.coverprofile + fail_ci_if_error: false + + benchmark: + needs: check + strategy: + matrix: + os: [ ubuntu-20.04 ] + go: [ '1.21' ] + name: Benchmark comparison ${{ matrix.os }} @ Go ${{ matrix.go }} + runs-on: ${{ matrix.os }} + continue-on-error: true + steps: + - name: Maximize build space + uses: easimon/maximize-build-space@master + with: + root-reserve-mb: 512 + swap-size-mb: 1024 + remove-dotnet: 'true' + + - name: Checkout Code (Previous) + uses: actions/checkout@v4 + with: + ref: ${{ github.base_ref }} + path: previous + + - name: Checkout Code (New) + uses: actions/checkout@v4 + with: + path: new + + - name: Set up Go ${{ matrix.go }} + uses: actions/setup-go@v4 + with: + go-version: ${{ matrix.go }} + + - name: Install Dependencies + run: go install golang.org/x/perf/cmd/benchstat@latest + + - name: Init (Previous) + run: | + cd previous + make init + - name: Run Benchmark (Previous) + run: | + cd previous + go test -run="-" -bench=".*" -count=7 ./... | tee benchmark.txt + + - name: Init (New) + run: | + cd new + make init + - name: Run Benchmark (New) + run: | + cd new + go test -run="-" -bench=".*" -count=7 ./... | tee benchmark.txt + + - name: Run Benchstat + run: benchstat previous/benchmark.txt new/benchmark.txt diff --git a/.gitignore b/.gitignore new file mode 100644 index 00000000..c02adbac --- /dev/null +++ b/.gitignore @@ -0,0 +1,38 @@ +# Created by https://www.toptal.com/developers/gitignore/api/go,intellij +# Edit at https://www.toptal.com/developers/gitignore?templates=go,intellij + +### Go ### +# If you prefer the allow list template instead of the deny list, see community template: +# https://github.com/github/gitignore/blob/main/community/Golang/Go.AllowList.gitignore +# +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Test binary, built with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Dependency directories (remove the comment below to include it) +# vendor/ + +# Go workspace file +go.work + +# Intellij +.idea + +# VS Code +.vscode + +# +dist + +.*.toml + +.boot.yaml \ No newline at end of file diff --git a/LICENSE b/LICENSE new file mode 100644 index 00000000..f886ddce --- /dev/null +++ b/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2022 Ara Park + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/Makefile b/Makefile new file mode 100644 index 00000000..c737d042 --- /dev/null +++ b/Makefile @@ -0,0 +1,75 @@ +-include .env + +PACKAGE := "github.com/siyul-park/uniflow" + +GO_PACKAGE := $(shell go list ${PACKAGE}/...) + +.PHONY: init +init: + @go install -v ${GO_PACKAGE} + +.PHONY: init-staticcheck +init-staticcheck: + @go install honnef.co/go/tools/cmd/staticcheck@latest + +.PHONY: init-godoc +init-godoc: + @go install golang.org/x/tools/cmd/godoc@latest + +.PHONY: generate +generate: + @go generate ${GO_PACKAGE} + +.PHONY: build +build: + @go clean -cache + @mkdir -p dist + @go build -o dist ./... + +.PHONY: clean +clean: + @go clean -cache + @rm -rf dist + +.PHONY: tidy +tidy: + @go mod tidy + +.PHONY: check +check: lint test + +.PHONY: test +test: + @go test $(test-options) ${GO_PACKAGE} + +.PHONY: race +race: + @go test -race $(test-options) ${GO_PACKAGE} + +.PHONY: coverage +coverage: + @go test -coverprofile coverage.out -covermode count ${GO_PACKAGE} + @go tool cover -func=coverage.out | grep total + +.PHONY: benchmark +benchmark: + @go test -run="-" -bench=".*" -benchmem ${GO_PACKAGE} + +.PHONY: lint +lint: fmt vet staticcheck + +.PHONY: vet +vet: + @go vet ${GO_PACKAGE} + +.PHONY: fmt +fmt: + @go fmt ${GO_PACKAGE} + +.PHONY: staticcheck +staticcheck: init-staticcheck + @staticcheck ${GO_PACKAGE} + +.PHONY: doc +doc: init-godoc + @godoc -http=:6060 diff --git a/README.md b/README.md new file mode 100644 index 00000000..2d459419 --- /dev/null +++ b/README.md @@ -0,0 +1,99 @@ +# uniflow + +[![go report][go_report_img]][go_report_url] +[![code coverage][go_code_coverage_img]][go_code_coverage_url] +[![check][repo_check_img]][repo_check_url] +[![release][repo_releases_img]][repo_releases_url] + +> Create your uniflow and integrate it anywhere! + +Uniflow is a low-code engine for the backend. You can connect the nodes to create a flow and run it. + +## Getting Started +### Installation +First, [download][go_download_url] and install **Go**. Version `1.21` or higher is required. + +Clone the repository by using the `git clone` command: +```shell +git clone https://github.com/siyul-park/uniflow +``` + +And then init the project: +```shell +cd uniflow +make init +``` + +### Build + +Build the project using the following command: +```shell +make build +``` + +The build result is created in the `/dist`. +```shell +ls /dist +uniflow +``` + +If you want to test the project. then run the following command: +```shell +make test +``` + +### Configuration +Before use any command. You can configure environment variables. + +You can set environment variables to use `.uniflow.toml` or system environment variables. + +| TOML Key | Env Key | Default | +|---|---|---| +| database.url | DATABASE.URL | memdb:// | +| database.name | DATABASE.NAME | | + +### Start + +Uniflow is now ready to be used. Let's start the [ping](/examples/ping.yaml). + +To start uniflow, using the following command: +```shell +./dist/uniflow start --boot example/ping.yaml +``` +`--boot` is install initially if the node does not exist in namespace. + +Let's check if the started uniflow is providing a http endpoint normally. +```shell +curl localhost:8000/ping +pong# +``` + +If you wish to apply nodes to a running server, use the `apply`. + +Run the following command for more information. +```shell +./dist/uniflow start --help +``` + + + +[go_download_url]: https://golang.org/dl/ +[go_version_img]: https://img.shields.io/badge/Go-1.21+-00ADD8?style=for-the-badge&logo=go +[go_code_coverage_img]: https://codecov.io/gh/siyul-park/uniflow/graph/badge.svg?token=quEl9AbBcW +[go_code_coverage_url]: https://codecov.io/gh/siyul-park/uniflow +[go_report_img]: https://goreportcard.com/badge/github.com/siyul-park/uniflow +[go_report_url]: https://goreportcard.com/report/github.com/siyul-park/uniflow + + + +[repo_url]: https://github.com/siyul-park/uniflow +[repo_issues_url]: https://github.com/siyul-park/uniflow/issues +[repo_pull_request_url]: https://github.com/siyul-park/uniflow/pulls +[repo_discussions_url]: https://github.com/siyul-park/uniflow/discussions +[repo_releases_img]: https://img.shields.io/github/release/siyul-park/uniflow.svg +[repo_releases_url]: https://github.com/siyul-park/uniflow/releases +[repo_wiki_url]: https://github.com/siyul-park/uniflow/wiki +[repo_wiki_img]: https://img.shields.io/badge/docs-wiki_page-blue?style=for-the-badge&logo=none +[repo_wiki_faq_url]: https://github.com/siyul-park/uniflow/wiki/FAQ +[repo_check_img]: https://github.com/siyual-park/uniflow/actions/uniflows/check.yml/badge.svg +[repo_check_url]: https://github.com/siyual-park/uniflow/actions/uniflows/check.yml diff --git a/cmd/flag/convert.go b/cmd/flag/convert.go new file mode 100644 index 00000000..21c55a0a --- /dev/null +++ b/cmd/flag/convert.go @@ -0,0 +1,14 @@ +package flag + +import "github.com/iancoleman/strcase" + +func ToKey(flag string) string { + return strcase.ToSnake(flag) +} + +func ToShorthand(flag string) string { + if flag == "" { + return "" + } + return flag[0:1] +} diff --git a/cmd/resource/scheme.go b/cmd/resource/scheme.go new file mode 100644 index 00000000..3e1d9cda --- /dev/null +++ b/cmd/resource/scheme.go @@ -0,0 +1,69 @@ +package resource + +import ( + "github.com/pkg/errors" + "github.com/siyul-park/uniflow/internal/encoding" + "github.com/siyul-park/uniflow/pkg/primitive" + "github.com/siyul-park/uniflow/pkg/scheme" +) + +type ( + SpecCodecOptions struct { + Scheme *scheme.Scheme + Namespace string + } + + SpecCodec struct { + scheme *scheme.Scheme + namespace string + } +) + +func NewSpecCodec(opts ...SpecCodecOptions) *SpecCodec { + var scheme *scheme.Scheme + var namespace string + + for _, opt := range opts { + if opt.Scheme != nil { + scheme = opt.Scheme + } + if opt.Namespace != "" { + namespace = opt.Namespace + } + } + + return &SpecCodec{ + scheme: scheme, + namespace: namespace, + } +} + +func (c *SpecCodec) Decode(data any) (scheme.Spec, error) { + doc, err := primitive.MarshalBinary(data) + if err != nil { + return nil, err + } + + unstructured := scheme.NewUnstructured(doc.(*primitive.Map)) + + if unstructured.GetNamespace() == "" { + if c.namespace != "" { + unstructured.SetNamespace(c.namespace) + } else { + unstructured.SetNamespace(scheme.NamespaceDefault) + } + } + + if c.scheme == nil { + return unstructured, nil + } + + spec, ok := c.scheme.New(unstructured.GetKind()) + if !ok { + return nil, errors.WithStack(encoding.ErrUnsupportedValue) + } + if err := unstructured.Unmarshal(spec); err != nil { + return nil, err + } + return spec, nil +} diff --git a/cmd/resource/scheme_test.go b/cmd/resource/scheme_test.go new file mode 100644 index 00000000..7ca49350 --- /dev/null +++ b/cmd/resource/scheme_test.go @@ -0,0 +1,32 @@ +package resource + +import ( + "testing" + + "github.com/go-faker/faker/v4" + "github.com/oklog/ulid/v2" + "github.com/siyul-park/uniflow/pkg/scheme" + "github.com/stretchr/testify/assert" +) + +func TestSpecCodec_Decode(t *testing.T) { + s := scheme.New() + kind := faker.Word() + + s.AddKnownType(kind, &scheme.SpecMeta{}) + + codec := NewSpecCodec(SpecCodecOptions{ + Scheme: s, + }) + + data := map[string]any{ + scheme.KeyID: ulid.Make().String(), + scheme.KeyKind: kind, + } + + spec, err := codec.Decode(data) + assert.NoError(t, err) + assert.IsType(t, spec, &scheme.SpecMeta{}) + assert.Equal(t, data[scheme.KeyID], spec.GetID().String()) + assert.Equal(t, data[scheme.KeyKind], spec.GetKind()) +} diff --git a/cmd/resource/yamljson.go b/cmd/resource/yamljson.go new file mode 100644 index 00000000..612dca30 --- /dev/null +++ b/cmd/resource/yamljson.go @@ -0,0 +1,15 @@ +package resource + +import ( + "encoding/json" + "net/http" + + "gopkg.in/yaml.v3" +) + +func UnmarshalYAMLOrJSON(data []byte, v any) error { + if http.DetectContentType(data) == "application/json" { + return json.Unmarshal(data, v) + } + return yaml.Unmarshal(data, v) +} diff --git a/cmd/uniflow/apply/cmd.go b/cmd/uniflow/apply/cmd.go new file mode 100644 index 00000000..c1b28e17 --- /dev/null +++ b/cmd/uniflow/apply/cmd.go @@ -0,0 +1,145 @@ +package apply + +import ( + "io" + "io/fs" + + "github.com/oklog/ulid/v2" + "github.com/siyul-park/uniflow/cmd/flag" + "github.com/siyul-park/uniflow/cmd/resource" + "github.com/siyul-park/uniflow/internal/util" + "github.com/siyul-park/uniflow/pkg/database" + "github.com/siyul-park/uniflow/pkg/scheme" + "github.com/siyul-park/uniflow/pkg/storage" + "github.com/spf13/cobra" +) + +type ( + Config struct { + Scheme *scheme.Scheme + Database database.Database + FS fs.FS + } +) + +func NewCmd(config Config) *cobra.Command { + sc := config.Scheme + db := config.Database + fsys := config.FS + + cmd := &cobra.Command{ + Use: "apply", + Short: "Apply a configuration to a resource by file name", + RunE: func(cmd *cobra.Command, _ []string) error { + ctx := cmd.Context() + + ns, err := cmd.Flags().GetString(FlagNamespace) + if err != nil { + return err + } + fl, err := cmd.Flags().GetString(FlagFile) + if err != nil { + return err + } + + st, err := storage.New(ctx, storage.Config{ + Scheme: sc, + Database: db, + }) + if err != nil { + return err + } + + file, err := fsys.Open(fl) + if err != nil { + return err + } + defer func() { _ = file.Close() }() + + data, err := io.ReadAll(file) + if err != nil { + return err + } + + var raws []map[string]any + if err := resource.UnmarshalYAMLOrJSON(data, &raws); err != nil { + var e map[string]any + if err := resource.UnmarshalYAMLOrJSON(data, &e); err != nil { + return err + } else { + raws = []map[string]any{e} + } + } + + codec := resource.NewSpecCodec(resource.SpecCodecOptions{ + Scheme: sc, + Namespace: ns, + }) + + var specs []scheme.Spec + for _, raw := range raws { + if spec, err := codec.Decode(raw); err != nil { + return err + } else { + specs = append(specs, spec) + } + } + + for _, spec := range specs { + if util.IsZero(spec.GetID()) { + if spec.GetName() != "" { + exist, err := st.FindOne(ctx, storage.Where[string](scheme.KeyName).EQ(spec.GetName()).And(storage.Where[string](scheme.KeyNamespace).EQ(spec.GetNamespace()))) + if err != nil { + return err + } + if exist != nil { + spec.SetID(exist.GetID()) + } + } else { + spec.SetID(ulid.Make()) + } + } + } + + var ids []ulid.ULID + for _, spec := range specs { + ids = append(ids, spec.GetID()) + } + + exists, err := st.FindMany(ctx, storage.Where[ulid.ULID](scheme.KeyID).IN(ids...), &database.FindOptions{ + Limit: util.Ptr[int](len(ids)), + }) + if err != nil { + return err + } + existsIds := make(map[ulid.ULID]struct{}, len(exists)) + for _, spec := range exists { + existsIds[spec.GetID()] = struct{}{} + } + + var inserted []scheme.Spec + var updated []scheme.Spec + for _, spec := range specs { + if _, ok := existsIds[spec.GetID()]; ok { + updated = append(updated, spec) + } else { + inserted = append(inserted, spec) + } + } + + if _, err := st.InsertMany(ctx, inserted); err != nil { + return err + } + if _, err := st.UpdateMany(ctx, updated); err != nil { + return err + } + + return nil + }, + } + + cmd.PersistentFlags().StringP(FlagNamespace, flag.ToShorthand(FlagNamespace), "", "uniflow namespace") + cmd.PersistentFlags().StringP(FlagFile, flag.ToShorthand(FlagFile), "", "configuration file name") + + return cmd +} diff --git a/cmd/uniflow/apply/cmd_test.go b/cmd/uniflow/apply/cmd_test.go new file mode 100644 index 00000000..e3241ed3 --- /dev/null +++ b/cmd/uniflow/apply/cmd_test.go @@ -0,0 +1,69 @@ +package apply + +import ( + "bytes" + "context" + "encoding/json" + "testing" + "testing/fstest" + + "github.com/go-faker/faker/v4" + "github.com/oklog/ulid/v2" + "github.com/siyul-park/uniflow/pkg/database/memdb" + "github.com/siyul-park/uniflow/pkg/node" + "github.com/siyul-park/uniflow/pkg/scheme" + "github.com/siyul-park/uniflow/pkg/storage" + "github.com/stretchr/testify/assert" +) + +func TestExecute(t *testing.T) { + s := scheme.New() + db := memdb.New("") + fsys := make(fstest.MapFS) + + st, _ := storage.New(context.Background(), storage.Config{ + Scheme: s, + Database: db, + }) + + patchFilepath := "patch.json" + kind := faker.Word() + + spec := &scheme.SpecMeta{ + ID: ulid.Make(), + Kind: kind, + Namespace: scheme.NamespaceDefault, + } + + codec := scheme.CodecFunc(func(spec scheme.Spec) (node.Node, error) { + return node.NewOneToOneNode(node.OneToOneNodeConfig{ID: spec.GetID()}), nil + }) + + s.AddKnownType(kind, &scheme.SpecMeta{}) + s.AddCodec(kind, codec) + + data, _ := json.Marshal(spec) + + fsys[patchFilepath] = &fstest.MapFile{ + Data: data, + } + + output := new(bytes.Buffer) + + cmd := NewCmd(Config{ + Scheme: s, + FS: fsys, + Database: db, + }) + cmd.SetOut(output) + cmd.SetErr(output) + + cmd.SetArgs([]string{"--file", patchFilepath}) + + err := cmd.Execute() + assert.NoError(t, err) + + r, err := st.FindOne(context.Background(), storage.Where[ulid.ULID](scheme.KeyID).EQ(spec.GetID())) + assert.NoError(t, err) + assert.Equal(t, spec, r) +} diff --git a/cmd/uniflow/apply/flag.go b/cmd/uniflow/apply/flag.go new file mode 100644 index 00000000..9d6a7a69 --- /dev/null +++ b/cmd/uniflow/apply/flag.go @@ -0,0 +1,6 @@ +package apply + +const ( + FlagNamespace = "namespace" + FlagFile = "file" +) diff --git a/cmd/uniflow/cmd.go b/cmd/uniflow/cmd.go new file mode 100644 index 00000000..3be48908 --- /dev/null +++ b/cmd/uniflow/cmd.go @@ -0,0 +1,47 @@ +package main + +import ( + "io/fs" + + "github.com/siyul-park/uniflow/cmd/uniflow/apply" + "github.com/siyul-park/uniflow/cmd/uniflow/start" + "github.com/siyul-park/uniflow/pkg/database" + "github.com/siyul-park/uniflow/pkg/hook" + "github.com/siyul-park/uniflow/pkg/scheme" + "github.com/spf13/cobra" +) + +type ( + Config struct { + Scheme *scheme.Scheme + Hook *hook.Hook + Database database.Database + FS fs.FS + } +) + +func NewCmd(config Config) *cobra.Command { + sc := config.Scheme + hk := config.Hook + db := config.Database + fsys := config.FS + + cmd := &cobra.Command{ + Use: "uniflow", + Long: "Create your uniflow and integrate it anywhere!", + } + + cmd.AddCommand(start.NewCmd(start.Config{ + Scheme: sc, + Hook: hk, + Database: db, + FS: fsys, + })) + cmd.AddCommand(apply.NewCmd(apply.Config{ + Scheme: sc, + Database: db, + FS: fsys, + })) + + return cmd +} diff --git a/cmd/uniflow/flag.go b/cmd/uniflow/flag.go new file mode 100644 index 00000000..0bb16e63 --- /dev/null +++ b/cmd/uniflow/flag.go @@ -0,0 +1,6 @@ +package main + +const ( + FlagDatabaseURL = "database.url" + FlagDatabaseName = "database.name" +) diff --git a/cmd/uniflow/main.go b/cmd/uniflow/main.go new file mode 100644 index 00000000..f5b5948c --- /dev/null +++ b/cmd/uniflow/main.go @@ -0,0 +1,111 @@ +package main + +import ( + "context" + "fmt" + "os" + "strings" + + "github.com/siyul-park/uniflow/pkg/database" + "github.com/siyul-park/uniflow/pkg/database/memdb" + "github.com/siyul-park/uniflow/pkg/database/mongodb" + "github.com/siyul-park/uniflow/pkg/hook" + "github.com/siyul-park/uniflow/pkg/plugin/controllx" + "github.com/siyul-park/uniflow/pkg/plugin/networkx" + "github.com/siyul-park/uniflow/pkg/plugin/systemx" + "github.com/siyul-park/uniflow/pkg/scheme" + "github.com/siyul-park/uniflow/pkg/storage" + "github.com/spf13/viper" + "go.mongodb.org/mongo-driver/mongo" + "go.mongodb.org/mongo-driver/mongo/options" +) + +const ( + configFile = ".uniflow.toml" +) + +func init() { + viper.SetConfigFile(configFile) + viper.AutomaticEnv() + + if err := viper.ReadInConfig(); err == nil { + fmt.Println("Using config file:", viper.ConfigFileUsed()) + } +} + +func main() { + if err := execute(); err != nil { + fmt.Printf("%v", err) + os.Exit(1) + } +} + +func execute() error { + ctx := context.Background() + + sb := scheme.NewBuilder( + controllx.AddToScheme(), + networkx.AddToScheme(), + ) + hb := hook.NewBuilder( + networkx.AddToHooks(), + ) + + sc, err := sb.Build() + if err != nil { + return err + } + hk, err := hb.Build() + if err != nil { + return err + } + + db, err := loadDB(ctx) + if err != nil { + return err + } + + curDir, err := os.Getwd() + if err != nil { + return err + } + fsys := os.DirFS(curDir) + + st, err := storage.New(ctx, storage.Config{ + Scheme: sc, + Database: db, + }) + if err != nil { + return err + } + systemx.AddToScheme(st)(sc) + + cmd := NewCmd(Config{ + Scheme: sc, + Hook: hk, + Database: db, + FS: fsys, + }) + if err := cmd.Execute(); err != nil { + return err + } + return nil +} + +func loadDB(ctx context.Context) (database.Database, error) { + dbURL := viper.GetString(FlagDatabaseURL) + dbName := viper.GetString(FlagDatabaseName) + + if dbURL == "" || strings.HasPrefix(dbURL, "memdb://") { + return memdb.New(dbName), nil + } else if strings.HasPrefix(dbURL, "mongodb://") { + serverAPI := options.ServerAPI(options.ServerAPIVersion1) + opts := options.Client().ApplyURI(dbURL).SetServerAPIOptions(serverAPI) + client, err := mongo.Connect(ctx, opts) + if err != nil { + return nil, err + } + return mongodb.NewDatabase(client.Database(dbName)), nil + } + return nil, fmt.Errorf("%s is invalid", FlagDatabaseURL) +} diff --git a/cmd/uniflow/start/cmd.go b/cmd/uniflow/start/cmd.go new file mode 100644 index 00000000..c3ba645d --- /dev/null +++ b/cmd/uniflow/start/cmd.go @@ -0,0 +1,136 @@ +package start + +import ( + "io" + "io/fs" + "os" + "os/signal" + "syscall" + + "github.com/siyul-park/uniflow/cmd/flag" + "github.com/siyul-park/uniflow/cmd/resource" + "github.com/siyul-park/uniflow/internal/util" + "github.com/siyul-park/uniflow/pkg/database" + "github.com/siyul-park/uniflow/pkg/hook" + "github.com/siyul-park/uniflow/pkg/runtime" + "github.com/siyul-park/uniflow/pkg/scheme" + "github.com/siyul-park/uniflow/pkg/storage" + "github.com/spf13/cobra" +) + +type ( + Config struct { + Scheme *scheme.Scheme + Hook *hook.Hook + Database database.Database + FS fs.FS + } +) + +func NewCmd(config Config) *cobra.Command { + sc := config.Scheme + hk := config.Hook + db := config.Database + fsys := config.FS + + cmd := &cobra.Command{ + Use: "start", + Short: "Start a uniflow worker", + RunE: func(cmd *cobra.Command, _ []string) error { + ctx := cmd.Context() + + ns, err := cmd.Flags().GetString(FlagNamespace) + if err != nil { + return err + } + boot, err := cmd.Flags().GetString(FlagBoot) + if err != nil { + return err + } + + if boot != "" { + st, err := storage.New(ctx, storage.Config{ + Scheme: sc, + Database: db, + }) + if err != nil { + return err + } + + var filter *storage.Filter + if ns != "" { + filter = storage.Where[string](scheme.KeyNamespace).EQ(ns) + } + + if specs, err := st.FindMany(ctx, filter, &database.FindOptions{ + Limit: util.Ptr[int](1), + }); err != nil { + return err + } else if len(specs) == 0 { + file, err := fsys.Open(boot) + if err != nil { + return err + } + defer func() { _ = file.Close() }() + + data, err := io.ReadAll(file) + if err != nil { + return err + } + + var raws []map[string]any + if err := resource.UnmarshalYAMLOrJSON(data, &raws); err != nil { + var e map[string]any + if err := resource.UnmarshalYAMLOrJSON(data, &e); err != nil { + return err + } else { + raws = []map[string]any{e} + } + } + + codec := resource.NewSpecCodec(resource.SpecCodecOptions{ + Scheme: sc, + Namespace: ns, + }) + + var specs []scheme.Spec + for _, raw := range raws { + if spec, err := codec.Decode(raw); err != nil { + return err + } else { + specs = append(specs, spec) + } + } + + if _, err := st.InsertMany(ctx, specs); err != nil { + return err + } + } + } + + r, err := runtime.New(ctx, runtime.Config{ + Namespace: ns, + Scheme: sc, + Hooks: hk, + Database: db, + }) + if err != nil { + return err + } + + sigs := make(chan os.Signal, 1) + signal.Notify(sigs, syscall.SIGINT, syscall.SIGTERM) + go func() { + <-sigs + _ = r.Close(ctx) + }() + + return r.Start(ctx) + }, + } + + cmd.PersistentFlags().StringP(FlagNamespace, flag.ToShorthand(FlagNamespace), "", "Set the namespace. If not set it up, runs all namespaces. In this case, if namespace is sharing resources exclusively, some nodes may not run normally.") + cmd.PersistentFlags().StringP(FlagBoot, flag.ToShorthand(FlagBoot), "", "Set the boot file path that must be installed initially if the node does not exist in namespace.") + + return cmd +} diff --git a/cmd/uniflow/start/cmd_test.go b/cmd/uniflow/start/cmd_test.go new file mode 100644 index 00000000..11b6f21f --- /dev/null +++ b/cmd/uniflow/start/cmd_test.go @@ -0,0 +1,90 @@ +package start + +import ( + "bytes" + "context" + "encoding/json" + "testing" + "testing/fstest" + "time" + + "github.com/go-faker/faker/v4" + "github.com/oklog/ulid/v2" + "github.com/siyul-park/uniflow/pkg/database/memdb" + "github.com/siyul-park/uniflow/pkg/hook" + "github.com/siyul-park/uniflow/pkg/node" + "github.com/siyul-park/uniflow/pkg/scheme" + "github.com/siyul-park/uniflow/pkg/storage" + "github.com/stretchr/testify/assert" +) + +func TestExecute(t *testing.T) { + s := scheme.New() + h := hook.New() + db := memdb.New("") + fsys := make(fstest.MapFS) + + st, _ := storage.New(context.Background(), storage.Config{ + Scheme: s, + Database: db, + }) + + bootFilepath := "boot.json" + kind := faker.Word() + + spec := &scheme.SpecMeta{ + ID: ulid.Make(), + Kind: kind, + Namespace: scheme.NamespaceDefault, + } + + codec := scheme.CodecFunc(func(spec scheme.Spec) (node.Node, error) { + return node.NewOneToOneNode(node.OneToOneNodeConfig{ID: spec.GetID()}), nil + }) + + s.AddKnownType(kind, &scheme.SpecMeta{}) + s.AddCodec(kind, codec) + + data, _ := json.Marshal(spec) + + fsys[bootFilepath] = &fstest.MapFile{ + Data: data, + } + + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + + output := new(bytes.Buffer) + + cmd := NewCmd(Config{ + Scheme: s, + Hook: h, + FS: fsys, + Database: db, + }) + cmd.SetOut(output) + cmd.SetErr(output) + cmd.SetContext(ctx) + + cmd.SetArgs([]string{"--boot", bootFilepath}) + + go func() { + _ = cmd.Execute() + }() + + for { + select { + case <-ctx.Done(): + assert.Fail(t, "timeout") + return + default: + r, err := st.FindOne(ctx, storage.Where[ulid.ULID](scheme.KeyID).EQ(spec.GetID())) + assert.NoError(t, err) + if r != nil { + return + } + + // TODO: assert symbol is loaded. + } + } +} diff --git a/cmd/uniflow/start/flag.go b/cmd/uniflow/start/flag.go new file mode 100644 index 00000000..2f0596b0 --- /dev/null +++ b/cmd/uniflow/start/flag.go @@ -0,0 +1,6 @@ +package start + +const ( + FlagNamespace = "namespace" + FlagBoot = "boot" +) diff --git a/examples/boot.yaml b/examples/boot.yaml new file mode 100644 index 00000000..1a360f8f --- /dev/null +++ b/examples/boot.yaml @@ -0,0 +1,145 @@ +- kind: http + name: http + namespace: system + address: :8000 + links: + io: + - name: router + port: in + +- kind: router + name: router + namespace: system + routes: + - method: POST + path: /v1/nodes + port: out[0] + - method: GET + path: /v1/nodes + port: out[1] + - method: GET + path: /v1/nodes/:node_id + port: out[2] + - method: PATCH + path: /v1/nodes/:node_id + port: out[3] + - method: DELETE + path: /v1/nodes/:node_id + port: out[4] + links: + out[0]: + - name: post_nodes + port: in + out[1]: + - name: get_nodes + port: in + out[2]: + - name: get_node + port: in + out[3]: + - name: patch_node + port: in + out[4]: + - name: delete_node + port: in + +- kind: snippet + name: post_nodes + namespace: system + lang: jsonata + code: | + $.body + links: + out: + - name: insert + port: io + +- kind: snippet + name: get_nodes + namespace: system + lang: json + code: | + null + links: + out: + - name: select + port: io + +- kind: snippet + name: get_node + namespace: system + lang: jsonata + code: | + { "id": $.params.node_id } + links: + out: + - name: select + port: io + +- kind: snippet + name: patch_node + namespace: system + lang: jsonata + code: | + $merge([{ "id": $.params.node_id }, $.body]) + links: + out: + - name: update + port: io + +- kind: snippet + name: delete_node + namespace: system + lang: jsonata + code: | + { "id": $.params.node_id } + links: + out: + - name: delete + port: io + +- kind: reflect + name: insert + namespace: system + op: insert + links: + error: + - name: not_found + port: io + +- kind: reflect + name: select + namespace: system + op: select + links: + error: + - name: not_found + port: io + +- kind: reflect + name: update + namespace: system + op: update + links: + error: + - name: not_found + port: io + +- kind: reflect + name: delete + namespace: system + op: delete + links: + error: + - name: not_found + port: io + +- kind: snippet + name: not_found + namespace: system + lang: json + code: | + { + "body": "Not Found", + "status": 404 + } diff --git a/examples/echo.yaml b/examples/echo.yaml new file mode 100644 index 00000000..ef4349f8 --- /dev/null +++ b/examples/echo.yaml @@ -0,0 +1,18 @@ +- kind: http + name: http + address: :8000 + links: + io: + - name: router + port: in + +- kind: router + name: router + routes: + - method: GET + path: /* + port: out[0] + links: + out[0]: + - name: http + port: io diff --git a/examples/ping.yaml b/examples/ping.yaml new file mode 100644 index 00000000..820e5da9 --- /dev/null +++ b/examples/ping.yaml @@ -0,0 +1,24 @@ +- kind: http + name: http + address: :8000 + links: + io: + - name: router + port: in + +- kind: router + name: router + routes: + - method: GET + path: /ping + port: out[0] + links: + out[0]: + - name: pong + port: io + +- kind: snippet + name: pong + lang: json + code: | + "pong" diff --git a/go.mod b/go.mod new file mode 100644 index 00000000..bf6609d7 --- /dev/null +++ b/go.mod @@ -0,0 +1,60 @@ +module github.com/siyul-park/uniflow + +go 1.21.0 + +require ( + github.com/benbjohnson/immutable v0.4.3 + github.com/dop251/goja v0.0.0-20231027120936-b396bb4c349d + github.com/evanw/esbuild v0.19.5 + github.com/go-faker/faker/v4 v4.2.0 + github.com/iancoleman/strcase v0.3.0 + github.com/lithammer/dedent v1.1.0 + github.com/mitchellh/hashstructure/v2 v2.0.2 + github.com/oklog/ulid/v2 v2.1.0 + github.com/phayes/freeport v0.0.0-20220201140144-74d24b5ae9f5 + github.com/pkg/errors v0.9.1 + github.com/spf13/cobra v1.8.0 + github.com/spf13/viper v1.17.0 + github.com/stretchr/testify v1.8.4 + github.com/tryvium-travels/memongo v0.10.0 + github.com/xiatechs/jsonata-go v1.7.0 + go.mongodb.org/mongo-driver v1.12.1 + gopkg.in/yaml.v3 v3.0.1 +) + +require ( + github.com/acobaugh/osrelease v0.1.0 // indirect + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect + github.com/dlclark/regexp2 v1.10.0 // indirect + github.com/fsnotify/fsnotify v1.6.0 // indirect + github.com/go-sourcemap/sourcemap v2.1.3+incompatible // indirect + github.com/golang/snappy v0.0.4 // indirect + github.com/google/pprof v0.0.0-20230926050212-f7f687d19a98 // indirect + github.com/hashicorp/hcl v1.0.0 // indirect + github.com/inconshreveable/mousetrap v1.1.0 // indirect + github.com/klauspost/compress v1.17.0 // indirect + github.com/magiconair/properties v1.8.7 // indirect + github.com/mitchellh/mapstructure v1.5.0 // indirect + github.com/montanaflynn/stats v0.7.1 // indirect + github.com/pelletier/go-toml/v2 v2.1.0 // indirect + github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect + github.com/sagikazarmark/locafero v0.3.0 // indirect + github.com/sagikazarmark/slog-shim v0.1.0 // indirect + github.com/shopspring/decimal v1.3.1 // indirect + github.com/sourcegraph/conc v0.3.0 // indirect + github.com/spf13/afero v1.10.0 // indirect + github.com/spf13/cast v1.5.1 // indirect + github.com/spf13/pflag v1.0.5 // indirect + github.com/subosito/gotenv v1.6.0 // indirect + github.com/xdg-go/pbkdf2 v1.0.0 // indirect + github.com/xdg-go/scram v1.1.2 // indirect + github.com/xdg-go/stringprep v1.0.4 // indirect + github.com/youmark/pkcs8 v0.0.0-20201027041543-1326539a0a0a // indirect + go.uber.org/multierr v1.11.0 // indirect + golang.org/x/crypto v0.14.0 // indirect + golang.org/x/exp v0.0.0-20231006140011-7918f672742d // indirect + golang.org/x/sync v0.4.0 // indirect + golang.org/x/sys v0.13.0 // indirect + golang.org/x/text v0.13.0 // indirect + gopkg.in/ini.v1 v1.67.0 // indirect +) diff --git a/go.sum b/go.sum new file mode 100644 index 00000000..d35b19b8 --- /dev/null +++ b/go.sum @@ -0,0 +1,595 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= +cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= +cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= +cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= +cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= +cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= +cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= +cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= +cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= +cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= +cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= +cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= +cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= +cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= +cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= +cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/acobaugh/osrelease v0.1.0 h1:Yb59HQDGGNhCj4suHaFQQfBps5wyoKLSSX/J/+UifRE= +github.com/acobaugh/osrelease v0.1.0/go.mod h1:4bFEs0MtgHNHBrmHCt67gNisnabCRAlzdVasCEGHTWY= +github.com/benbjohnson/immutable v0.4.3 h1:GYHcksoJ9K6HyAUpGxwZURrbTkXA0Dh4otXGqbhdrjA= +github.com/benbjohnson/immutable v0.4.3/go.mod h1:qJIKKSmdqz1tVzNtst1DZzvaqOU1onk1rc03IeM3Owk= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/logex v1.2.0/go.mod h1:9+9sk7u7pGNWYMkh0hdiL++6OeibzJccyQU4p4MedaY= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/readline v1.5.0/go.mod h1:x22KAscuvRqlLoK9CsoYsmxoXZMMFVyOl86cAH8qUic= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/chzyer/test v0.0.0-20210722231415-061457976a23/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dlclark/regexp2 v1.4.1-0.20201116162257-a2a8dda75c91/go.mod h1:2pZnwuY/m+8K6iRw6wQdMtk+rH5tNGR1i55kozfMjCc= +github.com/dlclark/regexp2 v1.7.0/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8= +github.com/dlclark/regexp2 v1.10.0 h1:+/GIL799phkJqYW+3YbOd8LCcbHzT0Pbo8zl70MHsq0= +github.com/dlclark/regexp2 v1.10.0/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8= +github.com/dop251/goja v0.0.0-20211022113120-dc8c55024d06/go.mod h1:R9ET47fwRVRPZnOGvHxxhuZcbrMCuiqOz3Rlrh4KSnk= +github.com/dop251/goja v0.0.0-20231027120936-b396bb4c349d h1:wi6jN5LVt/ljaBG4ue79Ekzb12QfJ52L9Q98tl8SWhw= +github.com/dop251/goja v0.0.0-20231027120936-b396bb4c349d/go.mod h1:QMWlm50DNe14hD7t24KEqZuUdC9sOTy8W6XbCU1mlw4= +github.com/dop251/goja_nodejs v0.0.0-20210225215109-d91c329300e7/go.mod h1:hn7BA7c8pLvoGndExHudxTDKZ84Pyvv+90pbBjbTz0Y= +github.com/dop251/goja_nodejs v0.0.0-20211022123610-8dd9abb0616d/go.mod h1:DngW8aVqWbuLRMHItjPUyqdj+HWPvnQe8V8y1nDpIbM= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= +github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/evanw/esbuild v0.19.5 h1:9ildZqajUJzDAwNf9MyQsLh2RdDRKTq3kcyyzhE39us= +github.com/evanw/esbuild v0.19.5/go.mod h1:D2vIQZqV/vIf/VRHtViaUtViZmG7o+kKmlBfVQuRi48= +github.com/frankban/quicktest v1.14.4 h1:g2rn0vABPOOXmZUj+vbmUp0lPoXEMuhTpIluN0XL9UY= +github.com/frankban/quicktest v1.14.4/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= +github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= +github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= +github.com/go-faker/faker/v4 v4.2.0 h1:dGebOupKwssrODV51E0zbMrv5e2gO9VWSLNC1WDCpWg= +github.com/go-faker/faker/v4 v4.2.0/go.mod h1:F/bBy8GH9NxOxMInug5Gx4WYeG6fHJZ8Ol/dhcpRub4= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-sourcemap/sourcemap v2.1.3+incompatible h1:W1iEw64niKVGogNgBN3ePyLFfuisuzeidWPMPWmECqU= +github.com/go-sourcemap/sourcemap v2.1.3+incompatible/go.mod h1:F8jJfvm2KbVjc5NqelyYJmf/v5J0dwNLS2mL4sNA1Jg= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= +github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= +github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= +github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20230207041349-798e818bf904/go.mod h1:uglQLonpP8qtYCYyzA+8c/9qtqgA3qsXGYqCPKARAFg= +github.com/google/pprof v0.0.0-20230926050212-f7f687d19a98 h1:pUa4ghanp6q4IJHwE9RwLgmVFfReJN+KbQ8ExNEUUoQ= +github.com/google/pprof v0.0.0-20230926050212-f7f687d19a98/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/iancoleman/strcase v0.3.0 h1:nTXanmYxhfFAMjZL34Ov6gkzEsSJZ5DbhxWjvSASxEI= +github.com/iancoleman/strcase v0.3.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/ianlancetaylor/demangle v0.0.0-20220319035150-800ac71e25c2/go.mod h1:aYm2/VgdVmcIU8iMfdMvDMsRAQjcfZSKFby6HOFvi/w= +github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= +github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= +github.com/klauspost/compress v1.17.0 h1:Rnbp4K9EjcDuVuHtd0dgA4qNuv9yKDYKK1ulpJwgrqM= +github.com/klauspost/compress v1.17.0/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= +github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/lithammer/dedent v1.1.0 h1:VNzHMVCBNG1j0fh3OrsFRkVUwStdDArbgBWoPAffktY= +github.com/lithammer/dedent v1.1.0/go.mod h1:jrXYCQtgg0nJiN+StA2KgR7w6CiQNv9Fd/Z9BP0jIOc= +github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= +github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= +github.com/mitchellh/hashstructure/v2 v2.0.2 h1:vGKWl0YJqUNxE8d+h8f6NJLcCJrgbhC4NcD46KavDd4= +github.com/mitchellh/hashstructure/v2 v2.0.2/go.mod h1:MG3aRVU/N29oo/V/IhBX8GR/zz4kQkprJgF2EVszyDE= +github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= +github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= +github.com/montanaflynn/stats v0.7.1 h1:etflOAAHORrCC44V+aR6Ftzort912ZU+YLiSTuV8eaE= +github.com/montanaflynn/stats v0.7.1/go.mod h1:etXPPgVO6n31NxCd9KQUMvCM+ve0ruNzt6R8Bnaayow= +github.com/oklog/ulid/v2 v2.1.0 h1:+9lhoxAP56we25tyYETBBY1YLA2SaoLvUFgrP2miPJU= +github.com/oklog/ulid/v2 v2.1.0/go.mod h1:rcEKHmBBKfef9DhnvX7y1HZBYxjXb0cP5ExxNsTT1QQ= +github.com/pborman/getopt v0.0.0-20170112200414-7148bc3a4c30/go.mod h1:85jBQOZwpVEaDAr341tbn15RS4fCAsIst0qp7i8ex1o= +github.com/pelletier/go-toml/v2 v2.1.0 h1:FnwAJ4oYMvbT/34k9zzHuZNrhlz48GB3/s6at6/MHO4= +github.com/pelletier/go-toml/v2 v2.1.0/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdUvyaeZurnPPDc= +github.com/phayes/freeport v0.0.0-20220201140144-74d24b5ae9f5 h1:Ii+DKncOVM8Cu1Hc+ETb5K+23HdAMvESYE3ZJ5b5cMI= +github.com/phayes/freeport v0.0.0-20220201140144-74d24b5ae9f5/go.mod h1:iIss55rKnNBTvrwdmkUpLnDpZoAHvWaiq5+iMmen4AE= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= +github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8= +github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/sagikazarmark/locafero v0.3.0 h1:zT7VEGWC2DTflmccN/5T1etyKvxSxpHsjb9cJvm4SvQ= +github.com/sagikazarmark/locafero v0.3.0/go.mod h1:w+v7UsPNFwzF1cHuOajOOzoq4U7v/ig1mpRjqV+Bu1U= +github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE= +github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ= +github.com/shopspring/decimal v1.3.1 h1:2Usl1nmF/WZucqkFZhnfFYxxxu8LG21F6nPQBE5gKV8= +github.com/shopspring/decimal v1.3.1/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= +github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo= +github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0= +github.com/spf13/afero v1.10.0 h1:EaGW2JJh15aKOejeuJ+wpFSHnbd7GE6Wvp3TsNhb6LY= +github.com/spf13/afero v1.10.0/go.mod h1:UBogFpq8E9Hx+xc5CNTTEpTnuHVmXDwZcZcE1eb/UhQ= +github.com/spf13/cast v1.5.1 h1:R+kOtfhWQE6TVQzY+4D7wJLBgkdVasCEFxSUBYBYIlA= +github.com/spf13/cast v1.5.1/go.mod h1:b9PdjNptOpzXr7Rq1q9gJML/2cdGQAo69NKzQ10KN48= +github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0= +github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.17.0 h1:I5txKw7MJasPL/BrfkbA0Jyo/oELqVmux4pR/UxOMfI= +github.com/spf13/viper v1.17.0/go.mod h1:BmMMMLQXSbcHK6KAOiFLz0l5JHrU89OdIRHvsk0+yVI= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= +github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= +github.com/tryvium-travels/memongo v0.10.0 h1:qDhyts06xFtGJDCd9NznSDFMiKa9UrzZ8N2+2+lqb6w= +github.com/tryvium-travels/memongo v0.10.0/go.mod h1:riRUHKRQ5JbeX2ryzFfmr7P2EYXIkNwgloSQJPpBikA= +github.com/xdg-go/pbkdf2 v1.0.0 h1:Su7DPu48wXMwC3bs7MCNG+z4FhcyEuz5dlvchbq0B0c= +github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= +github.com/xdg-go/scram v1.1.2 h1:FHX5I5B4i4hKRVRBCFRxq1iQRej7WO3hhBuJf+UUySY= +github.com/xdg-go/scram v1.1.2/go.mod h1:RT/sEzTbU5y00aCK8UOx6R7YryM0iF1N2MOmC3kKLN4= +github.com/xdg-go/stringprep v1.0.4 h1:XLI/Ng3O1Atzq0oBs3TWm+5ZVgkq2aqdlvP9JtoZ6c8= +github.com/xdg-go/stringprep v1.0.4/go.mod h1:mPGuuIYwz7CmR2bT9j4GbQqutWS1zV24gijq1dTyGkM= +github.com/xiatechs/jsonata-go v1.6.9 h1:1H6J40/zQpDhcYuQwUq6JrYEJnwpzTnLvnBfDdZLD6I= +github.com/xiatechs/jsonata-go v1.6.9/go.mod h1:qc/5uRtTKE5mil6PncK/ogxFQyhqlI6YnxvdyAz57Xw= +github.com/xiatechs/jsonata-go v1.7.0 h1:eum70CuOqGEf+KtB9D9A5Ri189DLJpOe5DAHaazHa4w= +github.com/xiatechs/jsonata-go v1.7.0/go.mod h1:qc/5uRtTKE5mil6PncK/ogxFQyhqlI6YnxvdyAz57Xw= +github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA= +github.com/youmark/pkcs8 v0.0.0-20201027041543-1326539a0a0a h1:fZHgsYlfvtyqToslyjUt3VOPF4J7aK/3MPcK7xp3PDk= +github.com/youmark/pkcs8 v0.0.0-20201027041543-1326539a0a0a/go.mod h1:ul22v+Nro/R083muKhosV54bj5niojjWZvU8xrevuH4= +github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +go.mongodb.org/mongo-driver v1.12.1 h1:nLkghSU8fQNaK7oUmDhQFsnrtcoNy7Z6LVFKsEecqgE= +go.mongodb.org/mongo-driver v1.12.1/go.mod h1:/rGBTebI3XYboVmgz+Wv3Bcbl3aD0QF9zl6kDDw18rQ= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.14.0 h1:wBqGXzWJW6m1XrIKlAH0Hs1JJ7+9KBwnIO8v66Q9cHc= +golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/exp v0.0.0-20231006140011-7918f672742d h1:jtJma62tbqLibJ5sFQz8bKtEM8rJBtfilJ2qTU199MI= +golang.org/x/exp v0.0.0-20231006140011-7918f672742d/go.mod h1:ldy0pHrwJyGW56pPQzzkH36rKxoZW1tw7ZJpeKx+hdo= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.4.0 h1:zxkM55ReGkDlKSM+Fu41A+zmbZuaPVbGMzvvdUPznYQ= +golang.org/x/sync v0.4.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= +golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= +golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= +golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= +google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= +google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= +google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= +google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= +google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= +google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= +google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= +google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= +gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= diff --git a/internal/encoding/decoder.go b/internal/encoding/decoder.go new file mode 100644 index 00000000..238ef7be --- /dev/null +++ b/internal/encoding/decoder.go @@ -0,0 +1,16 @@ +package encoding + +type ( + // Decoder is the interface for decoding data. + Decoder[S, T any] interface { + Decode(source S, target T) error + } + + DecoderFunc[S, T any] func(source S, target T) error +) + +var _ Decoder[any, any] = DecoderFunc[any, any](func(source, target any) error { return nil }) + +func (dec DecoderFunc[S, T]) Decode(source S, target T) error { + return dec(source, target) +} diff --git a/internal/encoding/encoder.go b/internal/encoding/encoder.go new file mode 100644 index 00000000..22882f52 --- /dev/null +++ b/internal/encoding/encoder.go @@ -0,0 +1,16 @@ +package encoding + +type ( + // Encoder is an interface for encoding data. + Encoder[S, T any] interface { + Encode(source S) (T, error) + } + + EncoderFunc[S, T any] func(source S) (T, error) +) + +var _ Encoder[any, any] = EncoderFunc[any, any](func(_ any) (any, error) { return nil, nil }) + +func (enc EncoderFunc[S, T]) Encode(source S) (T, error) { + return enc(source) +} diff --git a/internal/encoding/error.go b/internal/encoding/error.go new file mode 100644 index 00000000..a96c8e9d --- /dev/null +++ b/internal/encoding/error.go @@ -0,0 +1,9 @@ +package encoding + +import ( + "github.com/pkg/errors" +) + +var ( + ErrUnsupportedValue = errors.New("unsupported value") +) diff --git a/internal/encoding/group.go b/internal/encoding/group.go new file mode 100644 index 00000000..40aeb728 --- /dev/null +++ b/internal/encoding/group.go @@ -0,0 +1,89 @@ +package encoding + +import ( + "sync" + + "github.com/pkg/errors" +) + +type ( + // EncoderGroup is a group of Encoder. + EncoderGroup[S, T any] struct { + encoders []Encoder[S, T] + lock sync.RWMutex + } + + // DecoderGroup is a group of Decoder. + DecoderGroup[S, T any] struct { + decoders []Decoder[S, T] + lock sync.RWMutex + } +) + +var _ Encoder[any, any] = (*EncoderGroup[any, any])(nil) +var _ Decoder[any, any] = (*DecoderGroup[any, any])(nil) + +func NewEncoderGroup[S, T any]() *EncoderGroup[S, T] { + return &EncoderGroup[S, T]{} +} + +func (e *EncoderGroup[S, T]) Add(encoder Encoder[S, T]) { + e.lock.Lock() + defer e.lock.Unlock() + + e.encoders = append(e.encoders, encoder) +} + +func (e *EncoderGroup[S, T]) Len() int { + e.lock.Lock() + defer e.lock.Unlock() + + return len(e.encoders) +} + +func (e *EncoderGroup[S, T]) Encode(source S) (T, error) { + e.lock.RLock() + defer e.lock.RUnlock() + + var zero T + for _, encoder := range e.encoders { + if target, err := encoder.Encode(source); err == nil { + return target, nil + } else if !errors.Is(err, ErrUnsupportedValue) { + return zero, err + } + } + return zero, errors.WithStack(ErrUnsupportedValue) +} + +func NewDecoderGroup[S, T any]() *DecoderGroup[S, T] { + return &DecoderGroup[S, T]{} +} + +func (d *DecoderGroup[S, T]) Add(decoder Decoder[S, T]) { + d.lock.Lock() + defer d.lock.Unlock() + + d.decoders = append(d.decoders, decoder) +} + +func (d *DecoderGroup[S, T]) Len() int { + d.lock.Lock() + defer d.lock.Unlock() + + return len(d.decoders) +} + +func (d *DecoderGroup[S, T]) Decode(source S, target T) error { + d.lock.RLock() + defer d.lock.RUnlock() + + for _, decoder := range d.decoders { + if err := decoder.Decode(source, target); err == nil { + return nil + } else if !errors.Is(err, ErrUnsupportedValue) { + return err + } + } + return errors.WithStack(ErrUnsupportedValue) +} diff --git a/internal/encoding/group_test.go b/internal/encoding/group_test.go new file mode 100644 index 00000000..11e06a55 --- /dev/null +++ b/internal/encoding/group_test.go @@ -0,0 +1,80 @@ +package encoding + +import ( + "strings" + "testing" + + "github.com/go-faker/faker/v4" + "github.com/pkg/errors" + "github.com/stretchr/testify/assert" +) + +func TestNewEncoderGroup(t *testing.T) { + e := NewEncoderGroup[any, any]() + assert.NotNil(t, e) +} + +func TestEncoderGroup_Add(t *testing.T) { + e := NewEncoderGroup[any, any]() + e.Add(EncoderFunc[any, any](func(source any) (any, error) { + return source, nil + })) + + assert.Equal(t, 1, e.Len()) +} + +func TestEncoderGroup_Encode(t *testing.T) { + e := NewEncoderGroup[any, any]() + + v := faker.UUIDHyphenated() + + suffix := faker.UUIDHyphenated() + e.Add(EncoderFunc[any, any](func(source any) (any, error) { + if s, ok := source.(string); ok { + return s + suffix, nil + } + return nil, errors.WithStack(ErrUnsupportedValue) + })) + + res, err := e.Encode(v) + assert.NoError(t, err) + assert.Equal(t, v+suffix, res) +} + +func TestNewDecoderGroup(t *testing.T) { + d := NewDecoderGroup[any, any]() + assert.NotNil(t, d) +} + +func TestDecoderGroup_Add(t *testing.T) { + d := NewDecoderGroup[any, any]() + d.Add(DecoderFunc[any, any](func(_ any, _ any) error { + return errors.WithStack(ErrUnsupportedValue) + })) + + assert.Equal(t, 1, d.Len()) +} + +func TestEncoderGroup_Decode(t *testing.T) { + d := NewDecoderGroup[any, any]() + + v := faker.UUIDHyphenated() + var res string + + suffix := faker.UUIDHyphenated() + d.Add(DecoderFunc[any, any](func(source any, target any) error { + if s, ok := source.(string); ok { + if t, ok := target.(*string); ok { + if strings.HasSuffix(s, suffix) { + *t = strings.TrimSuffix(s, suffix) + return nil + } + } + } + return errors.WithStack(ErrUnsupportedValue) + })) + + err := d.Decode(v+suffix, &res) + assert.NoError(t, err) + assert.Equal(t, v, res) +} diff --git a/internal/pool/map.go b/internal/pool/map.go new file mode 100644 index 00000000..10c00349 --- /dev/null +++ b/internal/pool/map.go @@ -0,0 +1,21 @@ +package pool + +import "sync" + +var ( + mapPool = sync.Pool{New: func() any { + return &sync.Map{} + }} +) + +func GetMap() *sync.Map { + return mapPool.Get().(*sync.Map) +} + +func PutMap(v *sync.Map) { + v.Range(func(key, _ any) bool { + v.Delete(key) + return true + }) + mapPool.Put(v) +} diff --git a/internal/pool/map_test.go b/internal/pool/map_test.go new file mode 100644 index 00000000..a4f368bf --- /dev/null +++ b/internal/pool/map_test.go @@ -0,0 +1,28 @@ +package pool + +import ( + "github.com/go-faker/faker/v4" + "github.com/stretchr/testify/assert" + "testing" +) + +func TestGetMap(t *testing.T) { + m := GetMap() + assert.NotNil(t, m) +} + +func TestPutMap(t *testing.T) { + m := GetMap() + + m.Store(faker.UUIDHyphenated(), faker.UUIDHyphenated()) + + PutMap(m) + + count := 0 + m.Range(func(_, _ any) bool { + count += 1 + return true + }) + + assert.Equal(t, 0, count) +} diff --git a/internal/util/compare.go b/internal/util/compare.go new file mode 100644 index 00000000..e4c10905 --- /dev/null +++ b/internal/util/compare.go @@ -0,0 +1,109 @@ +package util + +import ( + "math" + "reflect" +) + +func IsZero(v any) bool { + if IsNil(v) { + return true + } + return reflect.ValueOf(v).IsZero() +} + +func Equal(x any, y any) bool { + if IsNil(x) != IsNil(y) { + return false + } + c, ok := compare(reflect.ValueOf(x), reflect.ValueOf(y)) + if !ok { + if hash1, err := Hash(x); err == nil { + if hash2, err := Hash(y); err == nil { + return hash1 == hash2 + } + } + return reflect.DeepEqual(x, y) // Is unsafe compare + } + return c == 0 +} + +func Compare(x any, y any) int { + c, ok := compare(reflect.ValueOf(x), reflect.ValueOf(y)) + if !ok { + return 0 + } + return c +} + +func compare(x, y reflect.Value) (int, bool) { + x = rawValue(x) + y = rawValue(y) + + k1 := basicKind(x) + k2 := basicKind(y) + + if k1 == invalidKind || k2 == invalidKind { + return 0, false + } + if k1 == pointerKind { + return compare(x.Elem(), y) + } + if k2 == pointerKind { + return compare(x, y.Elem()) + } + + if k1 != k2 { + switch { + case k1 == intKind && k2 == uintKind: + if x.Int() < 0 { + return -1, true + } + return compareStrict(uint64(x.Int()), y.Uint()), true + case k1 == uintKind && k2 == intKind: + if y.Int() < 0 { + return 1, true + } + return compareStrict(x.Uint(), uint64(y.Int())), true + default: + return compareStrict(k1, k2), true + } + } else { + switch k1 { + case nullKind: + return 0, true + case floatKind: + return compareStrict(x.Float(), y.Float()), true + case intKind: + return compareStrict(x.Int(), y.Int()), true + case uintKind: + return compareStrict(x.Uint(), y.Uint()), true + case stringKind: + return compareStrict(x.String(), y.String()), true + case iterableKind: + for i := 0; i < int(math.Min(float64(x.Len()), float64(y.Len()))); i++ { + if c, ok := compare(x.Index(i), y.Index(i)); ok && c != 0 { + return c, true + } else if !ok { + return 0, false + } + } + return compareStrict(x.Len(), y.Len()), true + default: + return 0, false + } + } +} + +func compareStrict[T Ordered](x T, y T) int { + if x == y { + return 0 + } + if x > y { + return 1 + } + if x < y { + return -1 + } + return 0 +} diff --git a/internal/util/compare_test.go b/internal/util/compare_test.go new file mode 100644 index 00000000..4427af82 --- /dev/null +++ b/internal/util/compare_test.go @@ -0,0 +1,494 @@ +package util + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestIsZero(t *testing.T) { + var testCase = []struct { + when any + expect bool + }{ + { + when: nil, + expect: true, + }, + { + when: "", + expect: true, + }, + { + when: 0, + expect: true, + }, + { + when: false, + expect: true, + }, + { + when: struct{}{}, + expect: true, + }, + } + + for _, tc := range testCase { + assert.Equal(t, tc.expect, IsZero(tc.when)) + } +} + +func TestEqual(t *testing.T) { + var testCase = []struct { + when []any + expect bool + }{ + { + when: []any{uint8(0), uint8(0)}, + expect: true, + }, + { + when: []any{uint16(0), uint16(0)}, + expect: true, + }, + { + when: []any{uint32(0), uint32(0)}, + expect: true, + }, + { + when: []any{uint64(0), uint64(0)}, + expect: true, + }, + { + when: []any{int8(0), int8(0)}, + expect: true, + }, + { + when: []any{int16(0), int16(0)}, + expect: true, + }, + { + when: []any{int32(0), int32(0)}, + expect: true, + }, + { + when: []any{int64(0), int64(0)}, + expect: true, + }, + { + when: []any{int8(0), uint8(0)}, + expect: true, + }, + { + when: []any{int16(0), uint16(0)}, + expect: true, + }, + { + when: []any{int32(0), uint32(0)}, + expect: true, + }, + { + when: []any{int64(0), uint64(0)}, + expect: true, + }, + + { + when: []any{0, 1}, + expect: false, + }, + { + when: []any{false, true}, + expect: false, + }, + { + when: []any{"0", "1"}, + expect: false, + }, + } + + for _, tc := range testCase { + r := Equal(tc.when[0], tc.when[1]) + assert.Equal(t, tc.expect, r) + } +} + +func TestCompare(t *testing.T) { + var testCase1 = []struct { + when []any + expect int + }{ + { + when: []any{uint8(0), uint8(0)}, + expect: 0, + }, + { + when: []any{uint8(1), uint8(0)}, + expect: 1, + }, + { + when: []any{uint8(0), uint8(1)}, + expect: -1, + }, + { + when: []any{uint16(0), uint16(0)}, + expect: 0, + }, + { + when: []any{uint16(1), uint16(0)}, + expect: 1, + }, + { + when: []any{uint16(0), uint16(1)}, + expect: -1, + }, + { + when: []any{uint32(0), uint32(0)}, + expect: 0, + }, + { + when: []any{uint32(1), uint32(0)}, + expect: 1, + }, + { + when: []any{uint32(0), uint32(1)}, + expect: -1, + }, + { + when: []any{uint64(0), uint64(0)}, + expect: 0, + }, + { + when: []any{uint64(1), uint64(0)}, + expect: 1, + }, + { + when: []any{uint64(0), uint64(1)}, + expect: -1, + }, + { + when: []any{int8(0), int8(0)}, + expect: 0, + }, + { + when: []any{int8(1), int8(0)}, + expect: 1, + }, + { + when: []any{int8(0), int8(1)}, + expect: -1, + }, + { + when: []any{int16(0), int16(0)}, + expect: 0, + }, + { + when: []any{int16(1), int16(0)}, + expect: 1, + }, + { + when: []any{int16(0), int16(1)}, + expect: -1, + }, + { + when: []any{int32(0), int32(0)}, + expect: 0, + }, + { + when: []any{int32(1), int32(0)}, + expect: 1, + }, + { + when: []any{int32(0), int32(1)}, + expect: -1, + }, + { + when: []any{int64(0), int64(0)}, + expect: 0, + }, + { + when: []any{int64(1), int64(0)}, + expect: 1, + }, + { + when: []any{int64(0), int64(1)}, + expect: -1, + }, + { + when: []any{float32(0), float32(0)}, + expect: 0, + }, + { + when: []any{float32(1), float32(0)}, + expect: 1, + }, + { + when: []any{float32(0), float32(1)}, + expect: -1, + }, + { + when: []any{float64(0), float64(0)}, + expect: 0, + }, + { + when: []any{float64(1), float64(0)}, + expect: 1, + }, + { + when: []any{float64(0), float64(1)}, + expect: -1, + }, + { + when: []any{"0", "0"}, + expect: 0, + }, + { + when: []any{"1", "0"}, + expect: 1, + }, + { + when: []any{"0", "1"}, + expect: -1, + }, + { + when: []any{0, 0}, + expect: 0, + }, + { + when: []any{1, 0}, + expect: 1, + }, + { + when: []any{0, 1}, + expect: -1, + }, + { + when: []any{uint(0), uint(0)}, + expect: 0, + }, + { + when: []any{uint(1), uint(0)}, + expect: 1, + }, + { + when: []any{uint(0), uint(1)}, + expect: -1, + }, + { + when: []any{uintptr(0), uintptr(0)}, + expect: 0, + }, + { + when: []any{uintptr(1), uintptr(0)}, + expect: 1, + }, + { + when: []any{uintptr(0), uintptr(1)}, + expect: -1, + }, + { + when: []any{nil, 0}, + expect: -1, + }, + { + when: []any{0, nil}, + expect: 1, + }, + { + when: []any{nil, nil}, + expect: 0, + }, + } + + for _, tc := range testCase1 { + r := Compare(tc.when[0], tc.when[1]) + assert.Equal(t, tc.expect, r) + } + + var testCase2 = []struct { + whenX any + whenY any + expect int + ok bool + }{ + { + whenX: []uint8{uint8(0), uint8(0)}, + whenY: []uint8{uint8(0), uint8(0)}, + expect: 0, + }, + { + whenX: []uint8{uint8(0), uint8(1)}, + whenY: []uint8{uint8(0), uint8(0)}, + expect: 1, + }, + { + whenX: []uint8{uint8(0), uint8(1)}, + whenY: []uint8{uint8(1), uint8(0)}, + expect: -1, + }, + + { + whenX: []uint16{uint16(0), uint16(0)}, + whenY: []uint16{uint16(0), uint16(0)}, + expect: 0, + }, + { + whenX: []uint16{uint16(0), uint16(1)}, + whenY: []uint16{uint16(0), uint16(0)}, + expect: 1, + }, + { + whenX: []uint16{uint16(0), uint16(1)}, + whenY: []uint16{uint16(1), uint16(0)}, + expect: -1, + }, + + { + whenX: []uint32{uint32(0), uint32(0)}, + whenY: []uint32{uint32(0), uint32(0)}, + expect: 0, + }, + { + whenX: []uint32{uint32(0), uint32(1)}, + whenY: []uint32{uint32(0), uint32(0)}, + expect: 1, + }, + { + whenX: []uint32{uint32(0), uint32(1)}, + whenY: []uint32{uint32(1), uint32(0)}, + expect: -1, + }, + + { + whenX: []uint64{uint64(0), uint64(0)}, + whenY: []uint64{uint64(0), uint64(0)}, + expect: 0, + }, + { + whenX: []uint64{uint64(0), uint64(1)}, + whenY: []uint64{uint64(0), uint64(0)}, + expect: 1, + }, + { + whenX: []uint64{uint64(0), uint64(1)}, + whenY: []uint64{uint64(1), uint64(0)}, + expect: -1, + }, + + { + whenX: []int8{int8(0), int8(0)}, + whenY: []int8{int8(0), int8(0)}, + expect: 0, + }, + { + whenX: []int8{int8(0), int8(1)}, + whenY: []int8{int8(0), int8(0)}, + expect: 1, + }, + { + whenX: []int8{int8(0), int8(1)}, + whenY: []int8{int8(1), int8(0)}, + expect: -1, + }, + + { + whenX: []int16{int16(0), int16(0)}, + whenY: []int16{int16(0), int16(0)}, + expect: 0, + }, + { + whenX: []int16{int16(0), int16(1)}, + whenY: []int16{int16(0), int16(0)}, + expect: 1, + }, + { + whenX: []int16{int16(0), int16(1)}, + whenY: []int16{int16(1), int16(0)}, + expect: -1, + }, + + { + whenX: []int32{int32(0), int32(0)}, + whenY: []int32{int32(0), int32(0)}, + expect: 0, + }, + { + whenX: []int32{int32(0), int32(1)}, + whenY: []int32{int32(0), int32(0)}, + expect: 1, + }, + { + whenX: []int32{int32(0), int32(1)}, + whenY: []int32{int32(1), int32(0)}, + expect: -1, + }, + + { + whenX: []int64{int64(0), int64(0)}, + whenY: []int64{int64(0), int64(0)}, + expect: 0, + }, + { + whenX: []int64{int64(0), int64(1)}, + whenY: []int64{int64(0), int64(0)}, + expect: 1, + }, + { + whenX: []int64{int64(0), int64(1)}, + whenY: []int64{int64(1), int64(0)}, + expect: -1, + }, + + { + whenX: []float32{float32(0), float32(0)}, + whenY: []float32{float32(0), float32(0)}, + expect: 0, + }, + { + whenX: []float32{float32(0), float32(1)}, + whenY: []float32{float32(0), float32(0)}, + expect: 1, + }, + { + whenX: []float32{float32(0), float32(1)}, + whenY: []float32{float32(1), float32(0)}, + expect: -1, + }, + + { + whenX: []float64{float64(0), float64(0)}, + whenY: []float64{float64(0), float64(0)}, + expect: 0, + }, + { + whenX: []float64{float64(0), float64(1)}, + whenY: []float64{float64(0), float64(0)}, + expect: 1, + }, + { + whenX: []float64{float64(0), float64(1)}, + whenY: []float64{float64(1), float64(0)}, + expect: -1, + }, + + { + whenX: []string{"0", "0"}, + whenY: []string{"0", "0"}, + expect: 0, + }, + { + whenX: []string{"0", "1"}, + whenY: []string{"0", "0"}, + expect: 1, + }, + { + whenX: []string{"0", "1"}, + whenY: []string{"1", "0"}, + expect: -1, + }, + } + + for _, tc := range testCase2 { + r := Compare(tc.whenX, tc.whenY) + assert.Equal(t, tc.expect, r) + } +} diff --git a/internal/util/copy.go b/internal/util/copy.go new file mode 100644 index 00000000..50058965 --- /dev/null +++ b/internal/util/copy.go @@ -0,0 +1,27 @@ +package util + +import ( + "bytes" + "encoding/gob" +) + +func Copy[V any](source V) V { + if IsNil(source) { + return source + } + + var target V + + var buffer bytes.Buffer + encoder := gob.NewEncoder(&buffer) + decoder := gob.NewDecoder(&buffer) + + if err := encoder.Encode(source); err != nil { + return source + } + if err := decoder.Decode(&target); err != nil { + return source + } + + return target +} diff --git a/internal/util/copy_test.go b/internal/util/copy_test.go new file mode 100644 index 00000000..f9f58997 --- /dev/null +++ b/internal/util/copy_test.go @@ -0,0 +1,40 @@ +package util + +import ( + "fmt" + "github.com/stretchr/testify/assert" + "testing" +) + +func TestCopy(t *testing.T) { + testCases := []struct { + when any + }{ + { + when: "string", + }, + { + when: 1, + }, + { + when: true, + }, + { + when: []any{"string", 1, true}, + }, + { + when: map[string]any{ + "string": "string", + "int": 1, + "bool": true, + "arr": []any{"string", 1, true}, + }, + }, + } + + for _, tc := range testCases { + t.Run(fmt.Sprintf("%v", tc.when), func(t *testing.T) { + assert.Equal(t, tc.when, Copy(tc.when)) + }) + } +} diff --git a/internal/util/hash.go b/internal/util/hash.go new file mode 100644 index 00000000..546ffb63 --- /dev/null +++ b/internal/util/hash.go @@ -0,0 +1,7 @@ +package util + +import "github.com/mitchellh/hashstructure/v2" + +func Hash(val any) (uint64, error) { + return hashstructure.Hash(val, hashstructure.FormatV2, nil) +} diff --git a/internal/util/kind.go b/internal/util/kind.go new file mode 100644 index 00000000..662849aa --- /dev/null +++ b/internal/util/kind.go @@ -0,0 +1,81 @@ +package util + +import ( + "reflect" +) + +type ( + Ordered interface { + Integer | Float | ~string + } + Complex interface { + ~complex64 | ~complex128 + } + Float interface { + ~float32 | ~float64 + } + Integer interface { + Signed | Unsigned + } + Signed interface { + ~int | ~int8 | ~int16 | ~int32 | ~int64 + } + Unsigned interface { + ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr + } +) + +type basisKind int + +const ( + invalidKind basisKind = iota + nullKind + intKind + uintKind + floatKind + complexKind + stringKind + mapKind + structKind + iterableKind + boolKind + pointerKind +) + +func basicKind(v reflect.Value) basisKind { + if !v.IsValid() || IsNil(v.Interface()) { + return nullKind + } + + switch v.Kind() { + case reflect.Bool: + return boolKind + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return intKind + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return uintKind + case reflect.Float32, reflect.Float64: + return floatKind + case reflect.Complex64, reflect.Complex128: + return complexKind + case reflect.String: + return stringKind + case reflect.Map: + return mapKind + case reflect.Struct: + return structKind + case reflect.Slice, reflect.Array: + return iterableKind + case reflect.Pointer: + return pointerKind + } + return invalidKind +} + +func rawValue(x reflect.Value) reflect.Value { + if !x.IsValid() { + return x + } + + return reflect.ValueOf(x.Interface()) +} diff --git a/internal/util/ptr.go b/internal/util/ptr.go new file mode 100644 index 00000000..fea833af --- /dev/null +++ b/internal/util/ptr.go @@ -0,0 +1,29 @@ +package util + +import ( + "reflect" +) + +func IsNil(i any) bool { + defer func() { _ = recover() }() + return i == nil || reflect.ValueOf(i).IsNil() +} + +func Ptr[T any](value T) *T { + return &value +} + +func UnPtr[T any](value *T) T { + if !IsNil(value) { + return *value + } + var zero T + return zero +} + +func PtrTo[S any, T any](value *S, convert func(S) T) *T { + if IsNil(value) { + return nil + } + return Ptr(convert(UnPtr(value))) +} diff --git a/internal/util/ptr_test.go b/internal/util/ptr_test.go new file mode 100644 index 00000000..b0e4ef50 --- /dev/null +++ b/internal/util/ptr_test.go @@ -0,0 +1,41 @@ +package util + +import ( + "github.com/go-faker/faker/v4" + "github.com/stretchr/testify/assert" + "testing" +) + +func TestIsNil(t *testing.T) { + assert.True(t, IsNil(nil)) + assert.False(t, IsNil(1)) + + type animal interface{} + type dog struct{} + + assert.False(t, IsNil(dog{})) + + var d *dog = nil + var a animal = d + assert.True(t, IsNil(a)) + assert.Nil(t, d) +} + +func TestPtr(t *testing.T) { + value := faker.UUIDHyphenated() + assert.Equal(t, value, *Ptr(value)) +} + +func TestUnPtr(t *testing.T) { + var nilPtr *string + assert.Equal(t, "", UnPtr(nilPtr)) + + value := faker.UUIDHyphenated() + ptr := &value + assert.Equal(t, value, UnPtr(ptr)) +} + +func TestPtrTo(t *testing.T) { + assert.Nil(t, PtrTo[int, int](nil, func(s int) int { return s + 1 })) + assert.Equal(t, UnPtr(PtrTo[int, int](Ptr(1), func(s int) int { return s + 1 })), 2) +} diff --git a/pkg/database/collection.go b/pkg/database/collection.go new file mode 100644 index 00000000..ce0bd5bc --- /dev/null +++ b/pkg/database/collection.go @@ -0,0 +1,99 @@ +package database + +import ( + "context" + + "github.com/siyul-park/uniflow/pkg/primitive" +) + +type ( + // Collection is an abstracted interface for managing collection. + Collection interface { + Name() string + + Indexes() IndexView + + Watch(ctx context.Context, filter *Filter) (Stream, error) + + InsertOne(ctx context.Context, doc *primitive.Map) (primitive.Object, error) + InsertMany(ctx context.Context, docs []*primitive.Map) ([]primitive.Object, error) + + UpdateOne(ctx context.Context, filter *Filter, patch *primitive.Map, options ...*UpdateOptions) (bool, error) + UpdateMany(ctx context.Context, filter *Filter, patch *primitive.Map, options ...*UpdateOptions) (int, error) + + DeleteOne(ctx context.Context, filter *Filter) (bool, error) + DeleteMany(ctx context.Context, filter *Filter) (int, error) + + FindOne(ctx context.Context, filter *Filter, options ...*FindOptions) (*primitive.Map, error) + FindMany(ctx context.Context, filter *Filter, options ...*FindOptions) ([]*primitive.Map, error) + + Drop(ctx context.Context) error + } + + UpdateOptions struct { + Upsert *bool + } + + FindOptions struct { + Limit *int + Skip *int + Sorts []Sort + } + + Stream interface { + Next() <-chan Event + Done() <-chan struct{} + Close() error + } + + Event struct { + OP eventOP + DocumentID primitive.Object + } + + eventOP int +) + +const ( + EventInsert eventOP = iota + EventUpdate + EventDelete +) + +func MergeUpdateOptions(options []*UpdateOptions) *UpdateOptions { + if len(options) == 0 { + return nil + } + opt := &UpdateOptions{} + for _, curr := range options { + if curr == nil { + continue + } + if curr.Upsert != nil { + opt.Upsert = curr.Upsert + } + } + return opt +} + +func MergeFindOptions(options []*FindOptions) *FindOptions { + if len(options) == 0 { + return nil + } + opt := &FindOptions{} + for _, curr := range options { + if curr == nil { + continue + } + if curr.Limit != nil { + opt.Limit = curr.Limit + } + if curr.Skip != nil { + opt.Skip = curr.Skip + } + if curr.Sorts != nil { + opt.Sorts = curr.Sorts + } + } + return opt +} diff --git a/pkg/database/collection_test.go b/pkg/database/collection_test.go new file mode 100644 index 00000000..315a9b2a --- /dev/null +++ b/pkg/database/collection_test.go @@ -0,0 +1,45 @@ +package database + +import ( + "testing" + + "github.com/siyul-park/uniflow/internal/util" + "github.com/stretchr/testify/assert" +) + +func TestMergeUpdateOptions(t *testing.T) { + opt := MergeUpdateOptions([]*UpdateOptions{ + nil, + util.Ptr(UpdateOptions{ + Upsert: nil, + }), + util.Ptr(UpdateOptions{ + Upsert: util.Ptr(true), + }), + }) + + assert.Equal(t, util.Ptr(UpdateOptions{ + Upsert: util.Ptr(true), + }), opt) +} + +func TestMergeFindOptions(t *testing.T) { + opt := MergeFindOptions([]*FindOptions{ + nil, + util.Ptr(FindOptions{ + Limit: util.Ptr(1), + }), + util.Ptr(FindOptions{ + Skip: util.Ptr(1), + }), + util.Ptr(FindOptions{ + Sorts: []Sort{{Key: "", Order: OrderASC}}, + }), + }) + + assert.Equal(t, util.Ptr(FindOptions{ + Limit: util.Ptr(1), + Skip: util.Ptr(1), + Sorts: []Sort{{Key: "", Order: OrderASC}}, + }), opt) +} diff --git a/pkg/database/database.go b/pkg/database/database.go new file mode 100644 index 00000000..c3ad0fec --- /dev/null +++ b/pkg/database/database.go @@ -0,0 +1,12 @@ +package database + +import "context" + +type ( + // Database is an abstracted interface for managing database. + Database interface { + Name() string + Collection(ctx context.Context, name string) (Collection, error) + Drop(ctx context.Context) error + } +) diff --git a/pkg/database/databasetest/collection.go b/pkg/database/databasetest/collection.go new file mode 100644 index 00000000..61932a0a --- /dev/null +++ b/pkg/database/databasetest/collection.go @@ -0,0 +1,732 @@ +package databasetest + +import ( + "context" + "testing" + "time" + + "github.com/go-faker/faker/v4" + "github.com/oklog/ulid/v2" + "github.com/siyul-park/uniflow/internal/util" + "github.com/siyul-park/uniflow/pkg/database" + "github.com/siyul-park/uniflow/pkg/primitive" + "github.com/stretchr/testify/assert" +) + +const ( + benchmarkSetSize = 1000 +) + +func AssertCollectionName(t *testing.T, collection database.Collection) { + t.Helper() + + name := collection.Name() + assert.NotEmpty(t, name) +} + +func AssertCollectionIndexes(t *testing.T, collection database.Collection) { + t.Helper() + + indexes := collection.Indexes() + assert.NotNil(t, indexes) +} + +func AssertCollectionWatch(t *testing.T, collection database.Collection) { + t.Helper() + + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + stream, err := collection.Watch(ctx, nil) + assert.NoError(t, err) + defer func() { _ = stream.Close() }() + + go func() { + for { + event, ok := <-stream.Next() + if ok { + assert.NotNil(t, event.DocumentID) + } else { + return + } + } + }() + + doc := primitive.NewMap( + primitive.NewString("id"), primitive.NewBinary(ulid.Make().Bytes()), + primitive.NewString("version"), primitive.NewInt(0), + ) + + _, err = collection.InsertOne(ctx, doc) + assert.NoError(t, err) + + _, err = collection.UpdateOne(ctx, database.Where("id").EQ(doc.GetOr(primitive.NewString("id"), nil)), primitive.NewMap(primitive.NewString("version"), primitive.NewInt(1))) + assert.NoError(t, err) + + _, err = collection.DeleteOne(ctx, database.Where("id").EQ(doc.GetOr(primitive.NewString("id"), nil))) + assert.NoError(t, err) +} + +func AssertCollectionInsertOne(t *testing.T, collection database.Collection) { + t.Helper() + + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + doc := primitive.NewMap( + primitive.NewString("id"), primitive.NewBinary(ulid.Make().Bytes()), + primitive.NewString("name"), primitive.NewString(faker.Word()), + primitive.NewString("version"), primitive.NewInt(0), + primitive.NewString("deleted"), primitive.FALSE, + ) + + id, err := collection.InsertOne(ctx, doc) + assert.NoError(t, err) + assert.Equal(t, doc.GetOr(primitive.NewString("id"), nil), id) +} + +func AssertCollectionInsertMany(t *testing.T, collection database.Collection) { + t.Helper() + + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + doc := primitive.NewMap( + primitive.NewString("id"), primitive.NewBinary(ulid.Make().Bytes()), + primitive.NewString("name"), primitive.NewString(faker.Word()), + primitive.NewString("version"), primitive.NewInt(0), + primitive.NewString("deleted"), primitive.FALSE, + ) + + ids, err := collection.InsertMany(ctx, []*primitive.Map{doc}) + assert.NoError(t, err) + assert.Len(t, ids, 1) + assert.Equal(t, doc.GetOr(primitive.NewString("id"), nil), ids[0]) +} + +func AssertCollectionUpdateOne(t *testing.T, collection database.Collection) { + t.Helper() + + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + t.Run("options.Upsert = true", func(t *testing.T) { + doc := primitive.NewMap( + primitive.NewString("id"), primitive.NewBinary(ulid.Make().Bytes()), + primitive.NewString("version"), primitive.NewInt(0), + ) + + ok, err := collection.UpdateOne(ctx, database.Where("id").EQ(doc.GetOr(primitive.NewString("id"), nil)), primitive.NewMap(primitive.NewString("version"), primitive.NewInt(1)), util.Ptr(database.UpdateOptions{ + Upsert: util.Ptr(true), + })) + assert.NoError(t, err) + assert.True(t, ok) + }) + + t.Run("options.Upsert = false", func(t *testing.T) { + doc := primitive.NewMap( + primitive.NewString("id"), primitive.NewBinary(ulid.Make().Bytes()), + primitive.NewString("version"), primitive.NewInt(0), + ) + + ok, err := collection.UpdateOne(ctx, database.Where("id").EQ(doc.GetOr(primitive.NewString("id"), nil)), primitive.NewMap(primitive.NewString("version"), primitive.NewInt(1)), util.Ptr(database.UpdateOptions{ + Upsert: util.Ptr(false), + })) + assert.NoError(t, err) + assert.False(t, ok) + + _, err = collection.InsertOne(ctx, doc) + assert.NoError(t, err) + + ok, err = collection.UpdateOne(ctx, database.Where("id").EQ(doc.GetOr(primitive.NewString("id"), nil)), primitive.NewMap(primitive.NewString("version"), primitive.NewInt(1)), util.Ptr(database.UpdateOptions{ + Upsert: util.Ptr(false), + })) + assert.NoError(t, err) + assert.True(t, ok) + }) +} + +func AssertCollectionUpdateMany(t *testing.T, collection database.Collection) { + t.Helper() + + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + t.Run("options.Upsert = true", func(t *testing.T) { + doc := primitive.NewMap( + primitive.NewString("id"), primitive.NewBinary(ulid.Make().Bytes()), + primitive.NewString("version"), primitive.NewInt(0), + ) + + count, err := collection.UpdateMany(ctx, database.Where("id").EQ(doc.GetOr(primitive.NewString("id"), nil)), primitive.NewMap(primitive.NewString("version"), primitive.NewInt(1)), util.Ptr(database.UpdateOptions{ + Upsert: util.Ptr(true), + })) + assert.NoError(t, err) + assert.Equal(t, 1, count) + }) + + t.Run("options.Upsert = false", func(t *testing.T) { + doc := primitive.NewMap( + primitive.NewString("id"), primitive.NewBinary(ulid.Make().Bytes()), + primitive.NewString("version"), primitive.NewInt(0), + ) + + count, err := collection.UpdateMany(ctx, database.Where("id").EQ(doc.GetOr(primitive.NewString("id"), nil)), primitive.NewMap(primitive.NewString("version"), primitive.NewInt(1)), util.Ptr(database.UpdateOptions{ + Upsert: util.Ptr(false), + })) + assert.NoError(t, err) + assert.Equal(t, 0, count) + + _, err = collection.InsertOne(ctx, doc) + assert.NoError(t, err) + + count, err = collection.UpdateMany(ctx, database.Where("id").EQ(doc.GetOr(primitive.NewString("id"), nil)), primitive.NewMap(primitive.NewString("version"), primitive.NewInt(1)), util.Ptr(database.UpdateOptions{ + Upsert: util.Ptr(false), + })) + assert.NoError(t, err) + assert.Equal(t, 1, count) + }) +} + +func AssertCollectionDeleteOne(t *testing.T, collection database.Collection) { + t.Helper() + + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + doc := primitive.NewMap( + primitive.NewString("id"), primitive.NewBinary(ulid.Make().Bytes()), + ) + + ok, err := collection.DeleteOne(ctx, database.Where("id").EQ(doc.GetOr(primitive.NewString("id"), nil))) + assert.NoError(t, err) + assert.False(t, ok) + + _, err = collection.InsertOne(ctx, doc) + assert.NoError(t, err) + + ok, err = collection.DeleteOne(ctx, database.Where("id").EQ(doc.GetOr(primitive.NewString("id"), nil))) + assert.NoError(t, err) + assert.True(t, ok) + + ok, err = collection.DeleteOne(ctx, database.Where("id").EQ(doc.GetOr(primitive.NewString("id"), nil))) + assert.NoError(t, err) + assert.False(t, ok) +} + +func AssertCollectionDeleteMany(t *testing.T, collection database.Collection) { + t.Helper() + + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + doc := primitive.NewMap( + primitive.NewString("id"), primitive.NewBinary(ulid.Make().Bytes()), + ) + + count, err := collection.DeleteMany(ctx, database.Where("id").EQ(doc.GetOr(primitive.NewString("id"), nil))) + assert.NoError(t, err) + assert.Equal(t, 0, count) + + _, err = collection.InsertOne(ctx, doc) + assert.NoError(t, err) + + count, err = collection.DeleteMany(ctx, database.Where("id").EQ(doc.GetOr(primitive.NewString("id"), nil))) + assert.NoError(t, err) + assert.Equal(t, 1, count) + + count, err = collection.DeleteMany(ctx, database.Where("id").EQ(doc.GetOr(primitive.NewString("id"), nil))) + assert.NoError(t, err) + assert.Equal(t, 0, count) +} + +func AssertCollectionFindOne(t *testing.T, collection database.Collection) { + t.Helper() + + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + doc := primitive.NewMap( + primitive.NewString("id"), primitive.NewBinary(ulid.Make().Bytes()), + primitive.NewString("name"), primitive.NewString(faker.Word()), + primitive.NewString("version"), primitive.NewInt(0), + primitive.NewString("deleted"), primitive.FALSE, + ) + + res, err := collection.FindOne(ctx, database.Where("id").EQ(doc.GetOr(primitive.NewString("id"), nil))) + assert.NoError(t, err) + assert.Nil(t, res) + + _, err = collection.InsertOne(ctx, doc) + assert.NoError(t, err) + + t.Run(string(database.EQ), func(t *testing.T) { + res, err = collection.FindOne(ctx, database.Where("id").EQ(doc.GetOr(primitive.NewString("id"), nil))) + assert.NoError(t, err) + assert.Equal(t, doc.GetOr(primitive.NewString("id"), nil), res.GetOr(primitive.NewString("id"), nil)) + }) + + t.Run(string(database.NE), func(t *testing.T) { + res, err = collection.FindOne(ctx, database.Where("id").NE(doc.GetOr(primitive.NewString("id"), nil))) + assert.NoError(t, err) + assert.Nil(t, res) + }) + + t.Run(string(database.GT), func(t *testing.T) { + res, err = collection.FindOne(ctx, database.Where("version").GT(doc.GetOr(primitive.NewString("version"), nil))) + assert.NoError(t, err) + assert.Nil(t, res) + }) + + t.Run(string(database.GTE), func(t *testing.T) { + res, err = collection.FindOne(ctx, database.Where("version").GTE(doc.GetOr(primitive.NewString("version"), nil))) + assert.NoError(t, err) + assert.Equal(t, doc.GetOr(primitive.NewString("id"), nil), res.GetOr(primitive.NewString("id"), nil)) + }) + + t.Run(string(database.LT), func(t *testing.T) { + res, err = collection.FindOne(ctx, database.Where("version").LT(doc.GetOr(primitive.NewString("version"), nil))) + assert.NoError(t, err) + assert.Nil(t, res) + }) + + t.Run(string(database.LTE), func(t *testing.T) { + res, err = collection.FindOne(ctx, database.Where("version").LTE(doc.GetOr(primitive.NewString("version"), nil))) + assert.NoError(t, err) + assert.Equal(t, doc.GetOr(primitive.NewString("id"), nil), res.GetOr(primitive.NewString("id"), nil)) + }) + + t.Run(string(database.IN), func(t *testing.T) { + res, err = collection.FindOne(ctx, database.Where("version").IN(doc.GetOr(primitive.NewString("version"), nil))) + assert.NoError(t, err) + assert.Equal(t, doc.GetOr(primitive.NewString("id"), nil), res.GetOr(primitive.NewString("id"), nil)) + }) + + t.Run(string(database.NIN), func(t *testing.T) { + res, err = collection.FindOne(ctx, database.Where("version").NotIN(doc.GetOr(primitive.NewString("version"), nil))) + assert.NoError(t, err) + assert.Nil(t, res) + }) + + t.Run(string(database.NULL), func(t *testing.T) { + res, err = collection.FindOne(ctx, database.Where("version").IsNull()) + assert.NoError(t, err) + assert.Nil(t, res) + }) + + t.Run(string(database.NNULL), func(t *testing.T) { + res, err = collection.FindOne(ctx, database.Where("version").IsNotNull()) + assert.NoError(t, err) + assert.Equal(t, doc.GetOr(primitive.NewString("id"), nil), res.GetOr(primitive.NewString("id"), nil)) + }) + + t.Run(string(database.AND), func(t *testing.T) { + res, err = collection.FindOne(ctx, + database.Where("id").EQ(doc.GetOr(primitive.NewString("id"), nil)). + And(database.Where("name").EQ(doc.GetOr(primitive.NewString("name"), nil))), + ) + assert.NoError(t, err) + assert.Equal(t, doc.GetOr(primitive.NewString("id"), nil), res.GetOr(primitive.NewString("id"), nil)) + }) + + t.Run(string(database.OR), func(t *testing.T) { + res, err = collection.FindOne(ctx, + database.Where("id").EQ(doc.GetOr(primitive.NewString("id"), nil)). + Or(database.Where("name").EQ(doc.GetOr(primitive.NewString("name"), nil))), + ) + assert.NoError(t, err) + assert.Equal(t, doc.GetOr(primitive.NewString("id"), nil), res.GetOr(primitive.NewString("id"), nil)) + }) +} + +func AssertCollectionFindMany(t *testing.T, collection database.Collection) { + t.Helper() + + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + doc := primitive.NewMap( + primitive.NewString("id"), primitive.NewBinary(ulid.Make().Bytes()), + primitive.NewString("name"), primitive.NewString(faker.Word()), + primitive.NewString("version"), primitive.NewInt(0), + primitive.NewString("deleted"), primitive.FALSE, + ) + + res, err := collection.FindMany(ctx, database.Where("id").EQ(doc.GetOr(primitive.NewString("id"), nil))) + assert.NoError(t, err) + assert.Len(t, res, 0) + + _, err = collection.InsertOne(ctx, doc) + assert.NoError(t, err) + + t.Run(string(database.EQ), func(t *testing.T) { + res, err = collection.FindMany(ctx, database.Where("id").EQ(doc.GetOr(primitive.NewString("id"), nil))) + assert.NoError(t, err) + assert.Len(t, res, 1) + }) + + t.Run(string(database.NE), func(t *testing.T) { + res, err = collection.FindMany(ctx, database.Where("id").NE(doc.GetOr(primitive.NewString("id"), nil))) + assert.NoError(t, err) + assert.Len(t, res, 0) + }) + + t.Run(string(database.GT), func(t *testing.T) { + res, err = collection.FindMany(ctx, database.Where("version").GT(doc.GetOr(primitive.NewString("version"), nil))) + assert.NoError(t, err) + assert.Len(t, res, 0) + }) + + t.Run(string(database.GTE), func(t *testing.T) { + res, err = collection.FindMany(ctx, database.Where("version").GTE(doc.GetOr(primitive.NewString("version"), nil))) + assert.NoError(t, err) + assert.Len(t, res, 1) + }) + + t.Run(string(database.LT), func(t *testing.T) { + res, err = collection.FindMany(ctx, database.Where("version").LT(doc.GetOr(primitive.NewString("version"), nil))) + assert.NoError(t, err) + assert.Len(t, res, 0) + }) + + t.Run(string(database.LTE), func(t *testing.T) { + res, err = collection.FindMany(ctx, database.Where("version").LTE(doc.GetOr(primitive.NewString("version"), nil))) + assert.NoError(t, err) + assert.Len(t, res, 1) + }) + + t.Run(string(database.IN), func(t *testing.T) { + res, err = collection.FindMany(ctx, database.Where("version").IN(doc.GetOr(primitive.NewString("version"), nil))) + assert.NoError(t, err) + assert.Len(t, res, 1) + }) + + t.Run(string(database.NIN), func(t *testing.T) { + res, err = collection.FindMany(ctx, database.Where("version").NotIN(doc.GetOr(primitive.NewString("version"), nil))) + assert.NoError(t, err) + assert.Len(t, res, 0) + }) + + t.Run(string(database.NULL), func(t *testing.T) { + res, err = collection.FindMany(ctx, database.Where("version").IsNull()) + assert.NoError(t, err) + assert.Len(t, res, 0) + }) + + t.Run(string(database.NNULL), func(t *testing.T) { + res, err = collection.FindMany(ctx, database.Where("version").IsNotNull()) + assert.NoError(t, err) + assert.Len(t, res, 1) + }) + + t.Run(string(database.AND), func(t *testing.T) { + res, err = collection.FindMany(ctx, + database.Where("id").EQ(doc.GetOr(primitive.NewString("id"), nil)). + And(database.Where("name").EQ(doc.GetOr(primitive.NewString("name"), nil))), + ) + assert.NoError(t, err) + assert.Len(t, res, 1) + }) + + t.Run(string(database.OR), func(t *testing.T) { + res, err = collection.FindMany(ctx, + database.Where("id").EQ(doc.GetOr(primitive.NewString("id"), nil)). + Or(database.Where("name").EQ(doc.GetOr(primitive.NewString("name"), nil))), + ) + assert.NoError(t, err) + assert.Len(t, res, 1) + }) +} + +func AssertCollectionDrop(t *testing.T, collection database.Collection) { + t.Helper() + + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + _, err := collection.InsertOne(ctx, primitive.NewMap( + primitive.NewString("id"), primitive.NewBinary(ulid.Make().Bytes()), + )) + assert.NoError(t, err) + + err = collection.Drop(ctx) + assert.NoError(t, err) +} + +func BenchmarkCollectionInsertOne(b *testing.B, coll database.Collection) { + b.Helper() + + for i := 0; i < b.N; i++ { + _, err := coll.InsertOne(context.Background(), primitive.NewMap( + primitive.NewString("id"), primitive.NewBinary(ulid.Make().Bytes()), + primitive.NewString("name"), primitive.NewString(faker.Word()), + )) + assert.NoError(b, err) + } +} + +func BenchmarkCollectionInsertMany(b *testing.B, coll database.Collection) { + b.Helper() + + for i := 0; i < b.N; i++ { + var docs []*primitive.Map + for j := 0; j < 10; j++ { + docs = append(docs, primitive.NewMap( + primitive.NewString("id"), primitive.NewBinary(ulid.Make().Bytes()), + primitive.NewString("name"), primitive.NewString(faker.Word()), + )) + } + + _, err := coll.InsertMany(context.Background(), docs) + assert.NoError(b, err) + } +} + +func BenchmarkCollectionUpdateOne(b *testing.B, coll database.Collection) { + b.Helper() + b.StopTimer() + + for i := 0; i < benchmarkSetSize; i++ { + _, _ = coll.InsertOne(context.Background(), primitive.NewMap( + primitive.NewString("id"), primitive.NewBinary(ulid.Make().Bytes()), + primitive.NewString("name"), primitive.NewString(faker.Word()), + )) + } + + v := primitive.NewMap( + primitive.NewString("id"), primitive.NewBinary(ulid.Make().Bytes()), + primitive.NewString("name"), primitive.NewString(faker.Word()), + ) + + _, err := coll.InsertOne(context.Background(), v) + assert.NoError(b, err) + + b.StartTimer() + + for i := 0; i < b.N; i++ { + _, err := coll.UpdateOne(context.Background(), database.Where("id").EQ(v.GetOr(primitive.NewString("id"), nil)), primitive.NewMap( + primitive.NewString("name"), primitive.NewString(faker.Word()), + )) + assert.NoError(b, err) + } +} + +func BenchmarkCollectionUpdateMany(b *testing.B, coll database.Collection) { + b.Helper() + b.StopTimer() + + for i := 0; i < benchmarkSetSize; i++ { + _, _ = coll.InsertOne(context.Background(), primitive.NewMap( + primitive.NewString("id"), primitive.NewBinary(ulid.Make().Bytes()), + primitive.NewString("name"), primitive.NewString(faker.Word()), + )) + } + + v := primitive.NewMap( + primitive.NewString("id"), primitive.NewBinary(ulid.Make().Bytes()), + primitive.NewString("name"), primitive.NewString(faker.Word()), + ) + + var docs []*primitive.Map + for j := 0; j < 10; j++ { + docs = append(docs, primitive.NewMap( + primitive.NewString("id"), primitive.NewBinary(ulid.Make().Bytes()), + primitive.NewString("name"), v.GetOr(primitive.NewString("name"), nil), + )) + } + _, err := coll.InsertMany(context.Background(), docs) + assert.NoError(b, err) + + _, err = coll.InsertOne(context.Background(), v) + assert.NoError(b, err) + + b.StartTimer() + + for i := 0; i < b.N; i++ { + _, err := coll.UpdateMany(context.Background(), database.Where("name").EQ(v.GetOr(primitive.NewString("name"), nil)), primitive.NewMap( + primitive.NewString("name"), primitive.NewString(faker.Word()), + )) + assert.NoError(b, err) + } +} + +func BenchmarkCollectionDeleteOne(b *testing.B, coll database.Collection) { + b.Helper() + + v := primitive.NewMap( + primitive.NewString("id"), primitive.NewBinary(ulid.Make().Bytes()), + primitive.NewString("name"), primitive.NewString(faker.Word()), + ) + + for i := 0; i < b.N; i++ { + b.StopTimer() + _, err := coll.InsertOne(context.Background(), v) + assert.NoError(b, err) + b.StartTimer() + + _, err = coll.DeleteOne(context.Background(), database.Where("id").EQ(v.GetOr(primitive.NewString("id"), nil))) + assert.NoError(b, err) + } +} + +func BenchmarkCollectionDeleteMany(b *testing.B, coll database.Collection) { + b.Helper() + + v := primitive.NewMap( + primitive.NewString("id"), primitive.NewBinary(ulid.Make().Bytes()), + primitive.NewString("name"), primitive.NewString(faker.Word()), + ) + + var docs []*primitive.Map + for j := 0; j < 10; j++ { + docs = append(docs, primitive.NewMap( + primitive.NewString("id"), primitive.NewBinary(ulid.Make().Bytes()), + primitive.NewString("name"), v.GetOr(primitive.NewString("name"), nil), + )) + } + + for i := 0; i < b.N; i++ { + b.StopTimer() + + _, err := coll.InsertMany(context.Background(), docs) + assert.NoError(b, err) + + _, err = coll.InsertOne(context.Background(), v) + assert.NoError(b, err) + + b.StartTimer() + + _, err = coll.DeleteMany(context.Background(), database.Where("name").EQ(v.GetOr(primitive.NewString("name"), nil))) + assert.NoError(b, err) + } +} + +func BenchmarkCollectionFindOneWithIndex(b *testing.B, coll database.Collection) { + b.Helper() + b.StopTimer() + + for i := 0; i < benchmarkSetSize; i++ { + _, _ = coll.InsertOne(context.Background(), primitive.NewMap( + primitive.NewString("id"), primitive.NewBinary(ulid.Make().Bytes()), + primitive.NewString("name"), primitive.NewString(faker.Word()), + )) + } + + v := primitive.NewMap( + primitive.NewString("id"), primitive.NewBinary(ulid.Make().Bytes()), + primitive.NewString("name"), primitive.NewString(faker.Word()), + ) + + _, err := coll.InsertOne(context.Background(), v) + assert.NoError(b, err) + + b.StartTimer() + + for i := 0; i < b.N; i++ { + _, err := coll.FindOne(context.Background(), database.Where("id").EQ(v.GetOr(primitive.NewString("id"), nil))) + assert.NoError(b, err) + } +} + +func BenchmarkCollectionFindOneWithoutIndex(b *testing.B, coll database.Collection) { + b.Helper() + b.StopTimer() + + for i := 0; i < benchmarkSetSize; i++ { + _, _ = coll.InsertOne(context.Background(), primitive.NewMap( + primitive.NewString("id"), primitive.NewBinary(ulid.Make().Bytes()), + primitive.NewString("name"), primitive.NewString(faker.Word()), + )) + } + + v := primitive.NewMap( + primitive.NewString("id"), primitive.NewBinary(ulid.Make().Bytes()), + primitive.NewString("name"), primitive.NewString(faker.Word()), + ) + + _, err := coll.InsertOne(context.Background(), v) + assert.NoError(b, err) + + b.StartTimer() + + for i := 0; i < b.N; i++ { + _, err := coll.FindOne(context.Background(), database.Where("name").EQ(v.GetOr(primitive.NewString("name"), nil))) + assert.NoError(b, err) + } +} + +func BenchmarkCollectionFindManyWithIndex(b *testing.B, coll database.Collection) { + b.Helper() + b.StopTimer() + + for i := 0; i < benchmarkSetSize; i++ { + _, _ = coll.InsertOne(context.Background(), primitive.NewMap( + primitive.NewString("id"), primitive.NewBinary(ulid.Make().Bytes()), + primitive.NewString("name"), primitive.NewString(faker.Word()), + )) + } + + v := primitive.NewMap( + primitive.NewString("id"), primitive.NewBinary(ulid.Make().Bytes()), + primitive.NewString("name"), primitive.NewString(faker.Word()), + ) + + var docs []*primitive.Map + for j := 0; j < 10; j++ { + docs = append(docs, primitive.NewMap( + primitive.NewString("id"), primitive.NewBinary(ulid.Make().Bytes()), + primitive.NewString("name"), v.GetOr(primitive.NewString("name"), nil), + )) + } + + _, err := coll.InsertMany(context.Background(), docs) + assert.NoError(b, err) + + _, err = coll.InsertOne(context.Background(), v) + assert.NoError(b, err) + + b.StartTimer() + + for i := 0; i < b.N; i++ { + _, err := coll.FindMany(context.Background(), database.Where("id").EQ(v.GetOr(primitive.NewString("id"), nil))) + assert.NoError(b, err) + } +} + +func BenchmarkCollectionFindManyWithoutIndex(b *testing.B, coll database.Collection) { + b.Helper() + b.StopTimer() + + for i := 0; i < benchmarkSetSize; i++ { + _, _ = coll.InsertOne(context.Background(), primitive.NewMap( + primitive.NewString("id"), primitive.NewBinary(ulid.Make().Bytes()), + primitive.NewString("name"), primitive.NewString(faker.Word()), + )) + } + + v := primitive.NewMap( + primitive.NewString("id"), primitive.NewBinary(ulid.Make().Bytes()), + primitive.NewString("name"), primitive.NewString(faker.Word()), + ) + + var docs []*primitive.Map + for j := 0; j < 10; j++ { + docs = append(docs, primitive.NewMap( + primitive.NewString("id"), primitive.NewBinary(ulid.Make().Bytes()), + primitive.NewString("name"), v.GetOr(primitive.NewString("name"), nil), + )) + } + + _, err := coll.InsertMany(context.Background(), docs) + assert.NoError(b, err) + + _, err = coll.InsertOne(context.Background(), v) + assert.NoError(b, err) + + b.StartTimer() + + for i := 0; i < b.N; i++ { + _, err := coll.FindMany(context.Background(), database.Where("name").EQ(v.GetOr(primitive.NewString("name"), nil))) + assert.NoError(b, err) + } +} diff --git a/pkg/database/databasetest/database.go b/pkg/database/databasetest/database.go new file mode 100644 index 00000000..e8223239 --- /dev/null +++ b/pkg/database/databasetest/database.go @@ -0,0 +1,32 @@ +package databasetest + +import ( + "context" + "github.com/go-faker/faker/v4" + "github.com/siyul-park/uniflow/pkg/database" + "github.com/stretchr/testify/assert" + "testing" + "time" +) + +func AssertDatabaseName(t *testing.T, database database.Database) { + name := database.Name() + assert.NotEmpty(t, name) +} + +func AssertDatabaseCollection(t *testing.T, database database.Database) { + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + coll, err := database.Collection(ctx, faker.UUIDHyphenated()) + assert.NoError(t, err) + assert.NotNil(t, coll) +} + +func AssertDatabaseDrop(t *testing.T, database database.Database) { + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + err := database.Drop(ctx) + assert.NoError(t, err) +} diff --git a/pkg/database/databasetest/index.go b/pkg/database/databasetest/index.go new file mode 100644 index 00000000..7c00963f --- /dev/null +++ b/pkg/database/databasetest/index.go @@ -0,0 +1,62 @@ +package databasetest + +import ( + "context" + "testing" + "time" + + "github.com/go-faker/faker/v4" + "github.com/siyul-park/uniflow/pkg/database" + "github.com/siyul-park/uniflow/pkg/primitive" + "github.com/stretchr/testify/assert" +) + +func AssertIndexViewList(t *testing.T, indexView database.IndexView) { + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + model := database.IndexModel{ + Keys: []string{"sub_key"}, + Name: faker.UUIDHyphenated(), + Unique: false, + Partial: database.Where("type").EQ(primitive.NewString("any")), + } + + err := indexView.Create(ctx, model) + assert.NoError(t, err) + + models, err := indexView.List(ctx) + assert.NoError(t, err) + assert.Greater(t, len(models), 0) + + assert.Equal(t, model, models[len(models)-1]) +} + +func AssertIndexViewCreate(t *testing.T, indexView database.IndexView) { + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + model := database.IndexModel{ + Keys: []string{"sub_key"}, + Name: faker.UUIDHyphenated(), + } + + err := indexView.Create(ctx, model) + assert.NoError(t, err) +} + +func AssertIndexViewDrop(t *testing.T, indexView database.IndexView) { + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + model := database.IndexModel{ + Keys: []string{"sub_key"}, + Name: faker.UUIDHyphenated(), + } + + err := indexView.Create(ctx, model) + assert.NoError(t, err) + + err = indexView.Drop(ctx, model.Name) + assert.NoError(t, err) +} diff --git a/pkg/database/errors.go b/pkg/database/errors.go new file mode 100644 index 00000000..02b4b16f --- /dev/null +++ b/pkg/database/errors.go @@ -0,0 +1,13 @@ +package database + +import "github.com/pkg/errors" + +var ( + ErrCodeWrite = "failed to write" + ErrCodeRead = "failed to read" + ErrCodeDelete = "failed to delete" + + ErrWrite = errors.New(ErrCodeWrite) + ErrRead = errors.New(ErrCodeRead) + ErrDelete = errors.New(ErrCodeDelete) +) diff --git a/pkg/database/filter.go b/pkg/database/filter.go new file mode 100644 index 00000000..5fb2c18f --- /dev/null +++ b/pkg/database/filter.go @@ -0,0 +1,176 @@ +package database + +import ( + "encoding/json" + "fmt" + "strings" + + "github.com/siyul-park/uniflow/pkg/primitive" +) + +type ( + // Filter is a filter for find matched primitive. + Filter struct { + OP OP + Key string + Value any + } + + filterHelper struct { + key string + } + + OP string +) + +const ( + EQ OP = "=" + NE OP = "!=" + LT OP = "<" + LTE OP = "<=" + GT OP = ">" + GTE OP = ">=" + IN OP = "IN" + NIN OP = "NOT IN" + NULL OP = "IS NULL" + NNULL OP = "IS NOT NULL" + AND OP = "AND" + OR OP = "OR" +) + +func Where(key string) *filterHelper { + return &filterHelper{ + key: key, + } +} + +func (fh *filterHelper) EQ(value primitive.Object) *Filter { + return &Filter{ + OP: EQ, + Key: fh.key, + Value: value, + } +} + +func (fh *filterHelper) NE(value primitive.Object) *Filter { + return &Filter{ + OP: NE, + Key: fh.key, + Value: value, + } +} + +func (fh *filterHelper) LT(value primitive.Object) *Filter { + return &Filter{ + Key: fh.key, + OP: LT, + Value: value, + } +} + +func (fh *filterHelper) LTE(value primitive.Object) *Filter { + return &Filter{ + OP: LTE, + Key: fh.key, + Value: value, + } +} + +func (fh *filterHelper) GT(value primitive.Object) *Filter { + return &Filter{ + OP: GT, + Key: fh.key, + Value: value, + } +} + +func (fh *filterHelper) GTE(value primitive.Object) *Filter { + return &Filter{ + OP: GTE, + Key: fh.key, + Value: value, + } +} + +func (fh *filterHelper) IN(slice ...primitive.Object) *Filter { + return &Filter{ + OP: IN, + Key: fh.key, + Value: primitive.NewSlice(slice...), + } +} + +func (fh *filterHelper) NotIN(slice ...primitive.Object) *Filter { + return &Filter{ + OP: NIN, + Key: fh.key, + Value: primitive.NewSlice(slice...), + } +} + +func (fh *filterHelper) IsNull() *Filter { + return &Filter{ + OP: NULL, + Key: fh.key, + } +} + +func (fh *filterHelper) IsNotNull() *Filter { + return &Filter{ + OP: NNULL, + Key: fh.key, + } +} + +func (ft *Filter) And(x ...*Filter) *Filter { + var v []*Filter + for _, e := range append([]*Filter{ft}, x...) { + if e != nil { + v = append(v, e) + } + } + + return &Filter{ + OP: AND, + Value: v, + } +} + +func (ft *Filter) Or(x ...*Filter) *Filter { + var v []*Filter + for _, e := range append([]*Filter{ft}, x...) { + if e != nil { + v = append(v, e) + } + } + + return &Filter{ + OP: OR, + Value: v, + } +} + +func (ft *Filter) String() (string, error) { + if ft.OP == AND || ft.OP == OR { + var parsed []string + if value, ok := ft.Value.([]*Filter); ok { + for _, v := range value { + c, e := v.String() + if e != nil { + return "", e + } + parsed = append(parsed, "("+c+")") + } + } + return strings.Join(parsed, " "+string(ft.OP)+" "), nil + } + if ft.OP == NULL || ft.OP == NNULL { + return ft.Key + " " + string(ft.OP), nil + } + + b, err := json.Marshal(primitive.Interface(ft.Value)) + if err != nil { + return "", err + } + return fmt.Sprintf("%s %s %s", ft.Key, string(ft.OP), string(b)), nil +} diff --git a/pkg/database/filter_test.go b/pkg/database/filter_test.go new file mode 100644 index 00000000..d5b3deee --- /dev/null +++ b/pkg/database/filter_test.go @@ -0,0 +1,310 @@ +package database + +import ( + "testing" + + "github.com/go-faker/faker/v4" + "github.com/siyul-park/uniflow/pkg/primitive" + "github.com/stretchr/testify/assert" +) + +func TestWhere(t *testing.T) { + f := faker.UUIDHyphenated() + wh := Where(f) + assert.Equal(t, &filterHelper{key: f}, wh) +} + +func TestFilterHelper_EQ(t *testing.T) { + f := faker.UUIDHyphenated() + v := primitive.NewString(faker.UUIDHyphenated()) + + wh := Where(f) + + assert.Equal(t, &Filter{ + Key: f, + OP: EQ, + Value: v, + }, wh.EQ(v)) +} + +func TestFilterHelper_NE(t *testing.T) { + f := faker.UUIDHyphenated() + v := primitive.NewString(faker.UUIDHyphenated()) + + wh := Where(f) + + assert.Equal(t, &Filter{ + Key: f, + OP: NE, + Value: v, + }, wh.NE(v)) +} + +func TestFilterHelper_LT(t *testing.T) { + f := faker.UUIDHyphenated() + v := primitive.NewString(faker.UUIDHyphenated()) + + wh := Where(f) + + assert.Equal(t, &Filter{ + Key: f, + OP: LT, + Value: v, + }, wh.LT(v)) +} + +func TestFilterHelper_LTE(t *testing.T) { + f := faker.UUIDHyphenated() + v := primitive.NewString(faker.UUIDHyphenated()) + + wh := Where(f) + + assert.Equal(t, &Filter{ + Key: f, + OP: LTE, + Value: v, + }, wh.LTE(v)) +} + +func TestFilterHelper_GT(t *testing.T) { + f := faker.UUIDHyphenated() + v := primitive.NewString(faker.UUIDHyphenated()) + + wh := Where(f) + + assert.Equal(t, &Filter{ + Key: f, + OP: GT, + Value: v, + }, wh.GT(v)) +} + +func TestFilterHelper_GTE(t *testing.T) { + f := faker.UUIDHyphenated() + v := primitive.NewString(faker.UUIDHyphenated()) + + wh := Where(f) + + assert.Equal(t, &Filter{ + Key: f, + OP: GTE, + Value: v, + }, wh.GTE(v)) +} + +func TestFilterHelper_IN(t *testing.T) { + f := faker.UUIDHyphenated() + v := primitive.NewString(faker.UUIDHyphenated()) + + wh := Where(f) + + assert.Equal(t, &Filter{ + Key: f, + OP: IN, + Value: primitive.NewSlice(v), + }, wh.IN(v)) +} + +func TestFilterHelper_NotIN(t *testing.T) { + f := faker.UUIDHyphenated() + v := primitive.NewString(faker.UUIDHyphenated()) + + wh := Where(f) + + assert.Equal(t, &Filter{ + Key: f, + OP: NIN, + Value: primitive.NewSlice(v), + }, wh.NotIN(v)) +} + +func TestFilterHelper_IsNull(t *testing.T) { + f := faker.UUIDHyphenated() + + wh := Where(f) + + assert.Equal(t, &Filter{ + Key: f, + OP: NULL, + }, wh.IsNull()) +} + +func TestFilterHelper_IsNotNull(t *testing.T) { + f := faker.UUIDHyphenated() + + wh := Where(f) + + assert.Equal(t, &Filter{ + Key: f, + OP: NNULL, + }, wh.IsNotNull()) +} + +func TestFilter_And(t *testing.T) { + f1 := faker.UUIDHyphenated() + f2 := faker.UUIDHyphenated() + v1 := faker.UUIDHyphenated() + v2 := faker.UUIDHyphenated() + + q1 := Where(f1).EQ(primitive.NewString(v1)) + q2 := Where(f2).EQ(primitive.NewString(v2)) + + q := q1.And(q2) + + assert.Equal(t, &Filter{ + OP: AND, + Value: []*Filter{q1, q2}, + }, q) +} + +func TestFilter_Or(t *testing.T) { + f1 := faker.UUIDHyphenated() + f2 := faker.UUIDHyphenated() + v1 := faker.UUIDHyphenated() + v2 := faker.UUIDHyphenated() + + q1 := Where(f1).EQ(primitive.NewString(v1)) + q2 := Where(f2).EQ(primitive.NewString(v2)) + + q := q1.Or(q2) + + assert.Equal(t, &Filter{ + OP: OR, + Value: []*Filter{q1, q2}, + }, q) +} + +func TestFilter_String(t *testing.T) { + testCases := []struct { + when *Filter + expect string + }{ + { + when: Where("1").EQ(primitive.NewString("1")), + expect: "1 = \"1\"", + }, + { + when: Where("1").EQ(primitive.NewInt(1)), + expect: "1 = 1", + }, + { + when: Where("1").EQ(primitive.TRUE), + expect: "1 = true", + }, + { + when: Where("1").EQ(nil), + expect: "1 = null", + }, + + { + when: Where("1").NE(primitive.NewString("1")), + expect: "1 != \"1\"", + }, + { + when: Where("1").NE(primitive.NewInt(1)), + expect: "1 != 1", + }, + { + when: Where("1").NE(primitive.TRUE), + expect: "1 != true", + }, + { + when: Where("1").NE(nil), + expect: "1 != null", + }, + + { + when: Where("1").LT(primitive.NewString("1")), + expect: "1 < \"1\"", + }, + { + when: Where("1").LT(primitive.NewInt(1)), + expect: "1 < 1", + }, + + { + when: Where("1").LTE(primitive.NewString("1")), + expect: "1 <= \"1\"", + }, + { + when: Where("1").LTE(primitive.NewInt(1)), + expect: "1 <= 1", + }, + + { + when: Where("1").GT(primitive.NewString("1")), + expect: "1 > \"1\"", + }, + { + when: Where("1").GT(primitive.NewInt(1)), + expect: "1 > 1", + }, + + { + when: Where("1").GTE(primitive.NewString("1")), + expect: "1 >= \"1\"", + }, + { + when: Where("1").GTE(primitive.NewInt(1)), + expect: "1 >= 1", + }, + + { + when: Where("1").IN(primitive.NewString("1")), + expect: "1 IN [\"1\"]", + }, + { + when: Where("1").IN(primitive.NewInt(1)), + expect: "1 IN [1]", + }, + + { + when: Where("1").NotIN(primitive.NewString("1")), + expect: "1 NOT IN [\"1\"]", + }, + { + when: Where("1").NotIN(primitive.NewInt(1)), + expect: "1 NOT IN [1]", + }, + + { + when: Where("1").IsNull(), + expect: "1 IS NULL", + }, + { + when: Where("1").IsNotNull(), + expect: "1 IS NOT NULL", + }, + + { + when: Where("1").EQ(primitive.NewInt(1)).And(Where("2").EQ(primitive.NewInt(2))), + expect: "(1 = 1) AND (2 = 2)", + }, + { + when: Where("1").EQ(primitive.NewInt(1)).And(Where("2").EQ(primitive.NewInt(2))).And(Where("3").EQ(primitive.NewInt(3))), + expect: "((1 = 1) AND (2 = 2)) AND (3 = 3)", + }, + + { + when: Where("1").EQ(primitive.NewInt(1)).Or(Where("2").EQ(primitive.NewInt(2))), + expect: "(1 = 1) OR (2 = 2)", + }, + { + when: Where("1").EQ(primitive.NewInt(1)).Or(Where("2").EQ(primitive.NewInt(2))).Or(Where("3").EQ(primitive.NewInt(3))), + expect: "((1 = 1) OR (2 = 2)) OR (3 = 3)", + }, + + { + when: Where("1").EQ(primitive.NewInt(1)).And(Where("2").EQ(primitive.NewInt(2))).Or(Where("3").EQ(primitive.NewInt(3))), + expect: "((1 = 1) AND (2 = 2)) OR (3 = 3)", + }, + } + + for _, tc := range testCases { + t.Run(tc.expect, func(t *testing.T) { + c, err := tc.when.String() + assert.NoError(t, err) + assert.Equal(t, tc.expect, c) + }) + } +} diff --git a/pkg/database/index.go b/pkg/database/index.go new file mode 100644 index 00000000..9f6e6d74 --- /dev/null +++ b/pkg/database/index.go @@ -0,0 +1,20 @@ +package database + +import "context" + +type ( + // IndexView is an abstracted interface for be used to create, drop, and list indexes. + IndexView interface { + List(ctx context.Context) ([]IndexModel, error) + Create(ctx context.Context, index IndexModel) error + Drop(ctx context.Context, name string) error + } + + // IndexModel is a model for an index. + IndexModel struct { + Name string + Keys []string + Unique bool + Partial *Filter + } +) diff --git a/pkg/database/memdb/collection.go b/pkg/database/memdb/collection.go new file mode 100644 index 00000000..ce9d6f98 --- /dev/null +++ b/pkg/database/memdb/collection.go @@ -0,0 +1,542 @@ +package memdb + +import ( + "context" + "sort" + "sync" + + "github.com/pkg/errors" + "github.com/siyul-park/uniflow/internal/pool" + "github.com/siyul-park/uniflow/internal/util" + "github.com/siyul-park/uniflow/pkg/database" + "github.com/siyul-park/uniflow/pkg/primitive" +) + +type ( + Collection struct { + name string + data *sync.Map + indexView *IndexView + streams []*Stream + streamMatches []func(*primitive.Map) bool + dataLock sync.RWMutex + streamLock sync.RWMutex + } + + fullEvent struct { + database.Event + Document *primitive.Map + } +) + +var _ database.Collection = &Collection{} + +var ( + ErrCodePKNotFound = "primary key is not found" + ErrCodePKDuplicated = "primary key is duplicated" + + ErrPKNotFound = errors.New(ErrCodePKNotFound) + ErrPKDuplicated = errors.New(ErrCodePKDuplicated) +) + +func NewCollection(name string) *Collection { + return &Collection{ + name: name, + data: pool.GetMap(), + indexView: NewIndexView(), + dataLock: sync.RWMutex{}, + streamLock: sync.RWMutex{}, + } +} + +func (coll *Collection) Name() string { + coll.dataLock.RLock() + defer coll.dataLock.RUnlock() + + return coll.name +} + +func (coll *Collection) Indexes() database.IndexView { + coll.dataLock.RLock() + defer coll.dataLock.RUnlock() + + return coll.indexView +} + +func (coll *Collection) Watch(ctx context.Context, filter *database.Filter) (database.Stream, error) { + coll.streamLock.Lock() + defer coll.streamLock.Unlock() + + stream := NewStream() + coll.streams = append(coll.streams, stream) + coll.streamMatches = append(coll.streamMatches, ParseFilter(filter)) + + go func() { + select { + case <-stream.Done(): + coll.unwatch(stream) + case <-ctx.Done(): + _ = stream.Close() + coll.unwatch(stream) + } + }() + + return stream, nil +} + +func (coll *Collection) InsertOne(ctx context.Context, doc *primitive.Map) (primitive.Object, error) { + if id, err := coll.insertOne(ctx, doc); err != nil { + return nil, err + } else { + coll.emit(fullEvent{ + Event: database.Event{ + OP: database.EventInsert, + DocumentID: id, + }, + Document: doc, + }) + return id, nil + } +} + +func (coll *Collection) InsertMany(ctx context.Context, docs []*primitive.Map) ([]primitive.Object, error) { + if ids, err := coll.insertMany(ctx, docs); err != nil { + return nil, err + } else { + for i, doc := range docs { + coll.emit(fullEvent{ + Event: database.Event{ + OP: database.EventInsert, + DocumentID: ids[i], + }, + Document: doc, + }) + } + return ids, nil + } +} + +func (coll *Collection) UpdateOne(ctx context.Context, filter *database.Filter, patch *primitive.Map, opts ...*database.UpdateOptions) (bool, error) { + opt := database.MergeUpdateOptions(opts) + upsert := false + if opt != nil && opt.Upsert != nil { + upsert = util.UnPtr(opt.Upsert) + } + + old, err := coll.findOne(ctx, filter) + if err != nil { + return false, err + } + if old == nil && !upsert { + return false, nil + } + + var id primitive.Object + if old != nil { + id = old.GetOr(keyID, nil) + } + if id == nil { + id = patch.GetOr(keyID, nil) + } + if id == nil { + if examples, ok := FilterToExample(filter); ok { + for _, example := range examples { + if v, ok := example.Get(keyID); ok { + if id == nil { + id = v + } else { + return false, errors.Wrap(errors.WithStack(ErrPKDuplicated), database.ErrCodeWrite) + } + } + } + } + } + if id == nil { + return false, errors.Wrap(errors.WithStack(ErrPKNotFound), database.ErrCodeWrite) + } + + if old != nil { + if _, err := coll.deleteOne(ctx, old); err != nil { + return false, err + } + } + + doc := patch + if _, ok := doc.Get(keyID); !ok { + doc = doc.Set(keyID, id) + } + + if _, err := coll.insertOne(ctx, doc); err != nil { + _, _ = coll.InsertOne(ctx, old) + return false, err + } + + coll.emit(fullEvent{ + Event: database.Event{ + OP: database.EventUpdate, + DocumentID: id, + }, + Document: doc, + }) + + return true, nil +} + +func (coll *Collection) UpdateMany(ctx context.Context, filter *database.Filter, patch *primitive.Map, opts ...*database.UpdateOptions) (int, error) { + opt := database.MergeUpdateOptions(opts) + upsert := false + if opt != nil && opt.Upsert != nil { + upsert = util.UnPtr(opt.Upsert) + } + + old, err := coll.findMany(ctx, filter) + if err != nil { + return 0, err + } + if len(old) == 0 { + if !upsert { + return 0, nil + } + + id := patch.GetOr(keyID, nil) + if id == nil { + if examples, ok := FilterToExample(filter); ok { + for _, example := range examples { + if v, ok := example.Get(keyID); ok { + if id == nil { + id = v + } else { + return 0, errors.Wrap(errors.WithStack(ErrPKDuplicated), database.ErrCodeWrite) + } + } + } + } + } + + doc := patch + if _, ok := doc.Get(keyID); !ok { + doc = doc.Set(keyID, id) + } + if _, err := coll.insertOne(ctx, doc); err != nil { + return 0, err + } + return 1, nil + } + + if _, err := coll.deleteMany(ctx, old); err != nil { + return 0, err + } + + docs := make([]*primitive.Map, len(old)) + for i, doc := range old { + doc = patch.Set(keyID, doc.GetOr(keyID, nil)) + docs[i] = doc + } + if ids, err := coll.insertMany(ctx, docs); err != nil { + _, _ = coll.insertMany(ctx, old) + return 0, err + } else { + for i, doc := range docs { + coll.emit(fullEvent{ + Event: database.Event{ + OP: database.EventUpdate, + DocumentID: ids[i], + }, + Document: doc, + }) + } + } + + return len(docs), nil +} + +func (coll *Collection) DeleteOne(ctx context.Context, filter *database.Filter) (bool, error) { + if doc, err := coll.findOne(ctx, filter); err != nil { + return false, err + } else if doc, err := coll.deleteOne(ctx, doc); err != nil { + return false, err + } else { + if doc != nil { + if id, ok := doc.Get(keyID); ok { + coll.emit(fullEvent{ + Event: database.Event{ + OP: database.EventDelete, + DocumentID: id, + }, + Document: doc, + }) + } + } + return doc != nil, nil + } +} + +func (coll *Collection) DeleteMany(ctx context.Context, filter *database.Filter) (int, error) { + if docs, err := coll.findMany(ctx, filter); err != nil { + return 0, err + } else if docs, err := coll.deleteMany(ctx, docs); err != nil { + return 0, err + } else { + for _, doc := range docs { + if id, ok := doc.Get(keyID); ok { + coll.emit(fullEvent{ + Event: database.Event{ + OP: database.EventDelete, + DocumentID: id, + }, + Document: doc, + }) + } + } + return len(docs), nil + } +} + +func (coll *Collection) FindOne(ctx context.Context, filter *database.Filter, opts ...*database.FindOptions) (*primitive.Map, error) { + return coll.findOne(ctx, filter, opts...) +} + +func (coll *Collection) FindMany(ctx context.Context, filter *database.Filter, opts ...*database.FindOptions) ([]*primitive.Map, error) { + return coll.findMany(ctx, filter, opts...) +} + +func (coll *Collection) Drop(ctx context.Context) error { + data, err := func() (*sync.Map, error) { + coll.dataLock.Lock() + defer coll.dataLock.Unlock() + + data := coll.data + coll.data = pool.GetMap() + + if err := coll.indexView.deleteAll(ctx); err != nil { + return nil, err + } + + return data, nil + }() + if err != nil { + return err + } + + data.Range(func(_, val any) bool { + doc := val.(*primitive.Map) + if id, ok := doc.Get(keyID); ok { + coll.emit(fullEvent{ + Event: database.Event{ + OP: database.EventDelete, + DocumentID: id, + }, + Document: doc, + }) + } + return true + }) + + coll.streamLock.Lock() + defer coll.streamLock.Unlock() + + for _, s := range coll.streams { + if err := s.Close(); err != nil { + return err + } + } + coll.streams = nil + + return nil +} + +func (coll *Collection) insertOne(ctx context.Context, doc *primitive.Map) (primitive.Object, error) { + if ids, err := coll.insertMany(ctx, []*primitive.Map{doc}); err != nil { + return nil, err + } else { + return ids[0], nil + } +} + +func (coll *Collection) insertMany(ctx context.Context, docs []*primitive.Map) ([]primitive.Object, error) { + coll.dataLock.Lock() + defer coll.dataLock.Unlock() + + ids := make([]primitive.Object, len(docs)) + for i, doc := range docs { + if id, ok := doc.Get(keyID); !ok { + return nil, errors.Wrap(errors.WithStack(ErrPKNotFound), database.ErrCodeWrite) + } else if hash, err := util.Hash(id); err != nil { + return nil, errors.Wrap(err, database.ErrCodeWrite) + } else if _, ok := coll.data.Load(hash); ok { + return nil, errors.Wrap(errors.WithStack(ErrPKDuplicated), database.ErrCodeWrite) + } else { + ids[i] = id + } + } + + if err := coll.indexView.insertMany(ctx, docs); err != nil { + return nil, errors.Wrap(err, database.ErrCodeWrite) + } + for i, doc := range docs { + if hash, err := util.Hash(ids[i].Interface()); err != nil { + return nil, errors.Wrap(err, database.ErrCodeWrite) + } else { + coll.data.Store(hash, doc) + } + } + + return ids, nil +} + +func (coll *Collection) findOne(ctx context.Context, filter *database.Filter, opts ...*database.FindOptions) (*primitive.Map, error) { + opt := database.MergeFindOptions(append(opts, util.Ptr(database.FindOptions{Limit: util.Ptr(1)}))) + + if docs, err := coll.findMany(ctx, filter, opt); err != nil { + return nil, err + } else if len(docs) > 0 { + return docs[0], nil + } else { + return nil, nil + } +} + +func (coll *Collection) findMany(ctx context.Context, filter *database.Filter, opts ...*database.FindOptions) ([]*primitive.Map, error) { + coll.dataLock.RLock() + defer coll.dataLock.RUnlock() + + opt := database.MergeFindOptions(opts) + + limit := -1 + if opt != nil && opt.Limit != nil { + limit = util.UnPtr(opt.Limit) + } + skip := 0 + if opt != nil && opt.Skip != nil { + skip = util.UnPtr(opt.Skip) + } + var sorts []database.Sort + if opt != nil && opt.Sorts != nil { + sorts = opt.Sorts + } + + match := ParseFilter(filter) + + scanSize := limit + if skip > 0 || len(sorts) > 0 { + scanSize = -1 + } + + scan := map[uint64]*primitive.Map{} + if examples, ok := FilterToExample(filter); ok { + if ids, err := coll.indexView.findMany(ctx, examples); err == nil { + for _, id := range ids { + if scanSize == len(scan) { + break + } else if hash, err := util.Hash(id.Interface()); err != nil { + return nil, errors.Wrap(err, database.ErrCodeWrite) + } else if doc, ok := coll.data.Load(hash); ok && match(doc.(*primitive.Map)) { + scan[hash] = doc.(*primitive.Map) + } + } + } + } + if scanSize != len(scan) { + coll.data.Range(func(key, value any) bool { + if scanSize == len(scan) { + return false + } + + if match(value.(*primitive.Map)) { + scan[key.(uint64)] = value.(*primitive.Map) + } + return true + }) + } + + if skip >= len(scan) { + return nil, nil + } + + var docs []*primitive.Map + for _, doc := range scan { + docs = append(docs, doc) + } + + if len(sorts) > 0 { + compare := ParseSorts(sorts) + sort.Slice(docs, func(i, j int) bool { + return compare(docs[i], docs[j]) + }) + } + if limit >= 0 { + if len(docs) > limit+skip { + docs = docs[skip : limit+skip] + } else { + docs = docs[skip:] + } + } + return docs, nil +} + +func (coll *Collection) deleteOne(ctx context.Context, doc *primitive.Map) (*primitive.Map, error) { + if docs, err := coll.deleteMany(ctx, []*primitive.Map{doc}); err != nil { + return nil, err + } else if len(docs) > 0 { + return docs[0], nil + } else { + return nil, nil + } +} + +func (coll *Collection) deleteMany(ctx context.Context, docs []*primitive.Map) ([]*primitive.Map, error) { + coll.dataLock.Lock() + defer coll.dataLock.Unlock() + + ids := make([]primitive.Object, 0, len(docs)) + deletes := make([]*primitive.Map, 0, len(docs)) + for _, doc := range docs { + if doc == nil { + continue + } + if id, ok := doc.Get(keyID); !ok { + continue + } else { + ids = append(ids, id) + deletes = append(deletes, doc) + } + } + + if err := coll.indexView.deleteMany(ctx, deletes); err != nil { + return nil, errors.Wrap(err, database.ErrCodeDelete) + } + + for _, id := range ids { + if hash, err := util.Hash(id.Interface()); err != nil { + return nil, errors.Wrap(err, database.ErrCodeWrite) + } else { + coll.data.Delete(hash) + } + } + + return deletes, nil +} + +func (coll *Collection) unwatch(stream database.Stream) { + coll.streamLock.Lock() + defer coll.streamLock.Unlock() + + for i, s := range coll.streams { + if s == stream { + coll.streams = append(coll.streams[:i], coll.streams[i+1:]...) + coll.streamMatches = append(coll.streamMatches[:i], coll.streamMatches[i+1:]...) + return + } + } +} + +func (coll *Collection) emit(event fullEvent) { + coll.streamLock.RLock() + defer coll.streamLock.RUnlock() + + for i, s := range coll.streams { + if coll.streamMatches[i](event.Document) { + s.Emit(event.Event) + } + } +} diff --git a/pkg/database/memdb/collection_test.go b/pkg/database/memdb/collection_test.go new file mode 100644 index 00000000..0416ad3d --- /dev/null +++ b/pkg/database/memdb/collection_test.go @@ -0,0 +1,143 @@ +package memdb + +import ( + "testing" + + "github.com/go-faker/faker/v4" + "github.com/siyul-park/uniflow/pkg/database/databasetest" +) + +func TestCollection_Name(t *testing.T) { + coll := NewCollection(faker.Name()) + + databasetest.AssertCollectionName(t, coll) +} + +func TestCollection_Indexes(t *testing.T) { + coll := NewCollection(faker.Name()) + + databasetest.AssertCollectionIndexes(t, coll) +} + +func TestCollection_Watch(t *testing.T) { + coll := NewCollection(faker.Name()) + + databasetest.AssertCollectionWatch(t, coll) +} + +func TestCollection_InsertOne(t *testing.T) { + coll := NewCollection(faker.Name()) + + databasetest.AssertCollectionInsertOne(t, coll) +} + +func TestCollection_InsertMany(t *testing.T) { + coll := NewCollection(faker.Name()) + + databasetest.AssertCollectionInsertMany(t, coll) +} + +func TestCollection_UpdateOne(t *testing.T) { + coll := NewCollection(faker.Name()) + + databasetest.AssertCollectionUpdateOne(t, coll) +} + +func TestCollection_UpdateMany(t *testing.T) { + coll := NewCollection(faker.Name()) + + databasetest.AssertCollectionUpdateMany(t, coll) +} + +func TestCollection_DeleteOne(t *testing.T) { + coll := NewCollection(faker.Name()) + + databasetest.AssertCollectionDeleteOne(t, coll) +} + +func TestCollection_DeleteMany(t *testing.T) { + coll := NewCollection(faker.Name()) + + databasetest.AssertCollectionDeleteMany(t, coll) +} + +func TestCollection_FindOne(t *testing.T) { + coll := NewCollection(faker.Name()) + + databasetest.AssertCollectionFindOne(t, coll) +} + +func TestCollection_FindMany(t *testing.T) { + coll := NewCollection(faker.Name()) + databasetest.AssertCollectionFindMany(t, coll) +} + +func TestCollection_Drop(t *testing.T) { + coll := NewCollection(faker.Name()) + + databasetest.AssertCollectionDrop(t, coll) +} + +func BenchmarkCollection_InsertOne(b *testing.B) { + coll := NewCollection(faker.Name()) + + databasetest.BenchmarkCollectionInsertOne(b, coll) +} + +func BenchmarkCollection_InsertMany(b *testing.B) { + coll := NewCollection(faker.Name()) + + databasetest.BenchmarkCollectionInsertMany(b, coll) +} + +func BenchmarkCollection_UpdateOne(b *testing.B) { + coll := NewCollection(faker.Name()) + + databasetest.BenchmarkCollectionUpdateOne(b, coll) +} + +func BenchmarkCollection_UpdateMany(b *testing.B) { + coll := NewCollection(faker.Name()) + + databasetest.BenchmarkCollectionUpdateMany(b, coll) +} + +func BenchmarkCollection_DeleteOne(b *testing.B) { + coll := NewCollection(faker.Name()) + + databasetest.BenchmarkCollectionDeleteOne(b, coll) +} + +func BenchmarkCollection_DeleteMany(b *testing.B) { + coll := NewCollection(faker.Name()) + + databasetest.BenchmarkCollectionDeleteMany(b, coll) +} + +func BenchmarkCollection_FindOne(b *testing.B) { + b.Run("with index", func(b *testing.B) { + coll := NewCollection(faker.Name()) + + databasetest.BenchmarkCollectionFindOneWithIndex(b, coll) + }) + + b.Run("without index", func(b *testing.B) { + coll := NewCollection(faker.Name()) + + databasetest.BenchmarkCollectionFindOneWithoutIndex(b, coll) + }) +} + +func BenchmarkCollection_FindMany(b *testing.B) { + b.Run("with index", func(b *testing.B) { + coll := NewCollection(faker.Name()) + + databasetest.BenchmarkCollectionFindManyWithIndex(b, coll) + }) + + b.Run("without index", func(b *testing.B) { + coll := NewCollection(faker.Name()) + + databasetest.BenchmarkCollectionFindManyWithoutIndex(b, coll) + }) +} diff --git a/pkg/database/memdb/database.go b/pkg/database/memdb/database.go new file mode 100644 index 00000000..b6ad3f9a --- /dev/null +++ b/pkg/database/memdb/database.go @@ -0,0 +1,62 @@ +package memdb + +import ( + "context" + "sync" + + "github.com/siyul-park/uniflow/pkg/database" +) + +type ( + Database struct { + name string + collections map[string]*Collection + lock sync.RWMutex + } +) + +var _ database.Database = &Database{} + +func New(name string) *Database { + return &Database{ + name: name, + collections: map[string]*Collection{}, + lock: sync.RWMutex{}, + } +} + +func (db *Database) Name() string { + db.lock.RLock() + defer db.lock.RUnlock() + + return db.name +} + +func (db *Database) Collection(_ context.Context, name string) (database.Collection, error) { + db.lock.Lock() + defer db.lock.Unlock() + + if coll, ok := db.collections[name]; ok { + return coll, nil + } + + coll := NewCollection(name) + db.collections[name] = coll + + return coll, nil +} + +func (db *Database) Drop(ctx context.Context) error { + db.lock.Lock() + defer db.lock.Unlock() + + for _, coll := range db.collections { + if err := coll.Drop(ctx); err != nil { + return err + } + } + + db.collections = map[string]*Collection{} + + return nil +} diff --git a/pkg/database/memdb/database_test.go b/pkg/database/memdb/database_test.go new file mode 100644 index 00000000..3f4ef1a0 --- /dev/null +++ b/pkg/database/memdb/database_test.go @@ -0,0 +1,25 @@ +package memdb + +import ( + "github.com/go-faker/faker/v4" + "github.com/siyul-park/uniflow/pkg/database/databasetest" + "testing" +) + +func TestDatabase_Name(t *testing.T) { + db := New(faker.Word()) + + databasetest.AssertDatabaseName(t, db) +} + +func TestDatabase_Collection(t *testing.T) { + db := New(faker.Word()) + + databasetest.AssertDatabaseCollection(t, db) +} + +func TestDatabase_Drop(t *testing.T) { + db := New(faker.Word()) + + databasetest.AssertDatabaseDrop(t, db) +} diff --git a/pkg/database/memdb/filter.go b/pkg/database/memdb/filter.go new file mode 100644 index 00000000..e0d995fa --- /dev/null +++ b/pkg/database/memdb/filter.go @@ -0,0 +1,235 @@ +package memdb + +import ( + "github.com/siyul-park/uniflow/internal/util" + "github.com/siyul-park/uniflow/pkg/database" + "github.com/siyul-park/uniflow/pkg/primitive" +) + +func ParseFilter(filter *database.Filter) func(*primitive.Map) bool { + if filter == nil { + return func(_ *primitive.Map) bool { + return true + } + } + + switch filter.OP { + case database.EQ: + return func(m *primitive.Map) bool { + if v, ok := primitive.Get[primitive.Object](m, filter.Key); !ok { + return false + } else { + return util.Equal(primitive.Interface(v), primitive.Interface(filter.Value)) + } + } + case database.NE: + return func(m *primitive.Map) bool { + if v, ok := primitive.Get[primitive.Object](m, filter.Key); !ok { + return false + } else { + return !util.Equal(primitive.Interface(v), primitive.Interface(filter.Value)) + } + } + case database.LT: + return func(m *primitive.Map) bool { + if v, ok := primitive.Get[primitive.Object](m, filter.Key); !ok { + return false + } else { + return util.Compare(primitive.Interface(v), primitive.Interface(filter.Value)) < 0 + } + } + case database.LTE: + return func(m *primitive.Map) bool { + if v, ok := primitive.Get[primitive.Object](m, filter.Key); !ok { + return false + } else { + return util.Compare(primitive.Interface(v), primitive.Interface(filter.Value)) <= 0 + } + } + case database.GT: + return func(m *primitive.Map) bool { + if v, ok := primitive.Get[primitive.Object](m, filter.Key); !ok { + return false + } else { + return util.Compare(primitive.Interface(v), primitive.Interface(filter.Value)) > 0 + } + } + case database.GTE: + return func(m *primitive.Map) bool { + if v, ok := primitive.Get[primitive.Object](m, filter.Key); !ok { + return false + } else { + return util.Compare(primitive.Interface(v), primitive.Interface(filter.Value)) >= 0 + } + } + case database.IN: + return func(m *primitive.Map) bool { + if v, ok := primitive.Get[primitive.Object](m, filter.Key); !ok { + return false + } else if v == nil { + return false + } else if children, ok := filter.Value.(*primitive.Slice); !ok { + return false + } else { + for i := 0; i < children.Len(); i++ { + if util.Equal(v.Interface(), children.Get(i).Interface()) { + return true + } + } + return false + } + } + case database.NIN: + return func(m *primitive.Map) bool { + if v, ok := primitive.Get[primitive.Object](m, filter.Key); !ok { + return false + } else if v == nil { + return true + } else if children, ok := filter.Value.(*primitive.Slice); !ok { + return false + } else { + for i := 0; i < children.Len(); i++ { + if util.Equal(v.Interface(), children.Get(i).Interface()) { + return false + } + } + return true + } + } + case database.NULL: + return func(m *primitive.Map) bool { + if v, ok := primitive.Get[primitive.Object](m, filter.Key); !ok { + return false + } else { + return util.IsNil(v) + } + } + case database.NNULL: + return func(m *primitive.Map) bool { + if v, ok := primitive.Get[primitive.Object](m, filter.Key); !ok { + return false + } else { + return !util.IsNil(v) + } + } + case database.AND: + if children, ok := filter.Value.([]*database.Filter); !ok { + return func(m *primitive.Map) bool { + return false + } + } else { + parsed := make([]func(*primitive.Map) bool, len(children)) + for i, child := range children { + parsed[i] = ParseFilter(child) + } + return func(m *primitive.Map) bool { + for _, p := range parsed { + if !p(m) { + return false + } + } + return true + } + } + case database.OR: + if children, ok := filter.Value.([]*database.Filter); !ok { + return func(m *primitive.Map) bool { + return false + } + } else { + parsed := make([]func(*primitive.Map) bool, len(children)) + for i, child := range children { + parsed[i] = ParseFilter(child) + } + return func(m *primitive.Map) bool { + for _, p := range parsed { + if p(m) { + return true + } + } + return false + } + } + } + + return func(_ *primitive.Map) bool { + return false + } +} + +func FilterToExample(filter *database.Filter) ([]*primitive.Map, bool) { + if util.IsNil(filter) { + return nil, false + } + + switch filter.OP { + case database.EQ: + return []*primitive.Map{primitive.NewMap(primitive.NewString(filter.Key), filter.Value.(primitive.Object))}, true + case database.NE: + return nil, false + case database.LT: + return nil, false + case database.LTE: + return nil, false + case database.GT: + return nil, false + case database.GTE: + return nil, false + case database.IN: + if children, ok := filter.Value.(*primitive.Slice); !ok { + return nil, false + } else { + examples := make([]*primitive.Map, children.Len()) + for i := 0; i < children.Len(); i++ { + examples[i] = primitive.NewMap(primitive.NewString(filter.Key), children.Get(i)) + } + return examples, true + } + case database.NIN: + return nil, false + case database.NULL: + return []*primitive.Map{primitive.NewMap(primitive.NewString(filter.Key), nil)}, true + case database.NNULL: + return nil, false + case database.AND: + if children, ok := filter.Value.([]*database.Filter); !ok { + return nil, false + } else { + example := primitive.NewMap() + for _, child := range children { + e, _ := FilterToExample(child) + if len(e) == 0 { + } else if len(e) == 1 { + for _, k := range e[0].Keys() { + v, _ := e[0].Get(k) + + if _, ok := example.Get(k); ok { + return nil, true + } else { + example.Set(k, v) + } + } + } else { + return nil, false + } + } + return []*primitive.Map{example}, true + } + case database.OR: + if children, ok := filter.Value.([]*database.Filter); !ok { + return nil, false + } else { + var examples []*primitive.Map + for _, child := range children { + if e, ok := FilterToExample(child); ok { + examples = append(examples, e...) + } else { + return nil, false + } + } + return examples, true + } + } + + return nil, false +} diff --git a/pkg/database/memdb/index.go b/pkg/database/memdb/index.go new file mode 100644 index 00000000..4ce3895c --- /dev/null +++ b/pkg/database/memdb/index.go @@ -0,0 +1,380 @@ +package memdb + +import ( + "context" + "sync" + + "github.com/pkg/errors" + "github.com/siyul-park/uniflow/internal/pool" + "github.com/siyul-park/uniflow/internal/util" + "github.com/siyul-park/uniflow/pkg/database" + "github.com/siyul-park/uniflow/pkg/primitive" +) + +type ( + IndexView struct { + names []string + models []database.IndexModel + data []*sync.Map + lock sync.RWMutex + } +) + +var _ database.IndexView = &IndexView{} + +var ( + keyID = primitive.NewString("id") +) + +var ( + ErrIndexConflict = errors.New("index is conflict") + ErrIndexNotFound = errors.New("index is not found") + ErrInvalidDocument = errors.New("document is invalid") +) + +func NewIndexView() *IndexView { + iv := &IndexView{ + names: nil, + models: nil, + data: nil, + lock: sync.RWMutex{}, + } + _ = iv.Create(context.Background(), database.IndexModel{ + Keys: []string{"id"}, + Name: "_id", + Unique: true, + Partial: nil, + }) + + return iv +} + +func (iv *IndexView) List(_ context.Context) ([]database.IndexModel, error) { + iv.lock.RLock() + defer iv.lock.RUnlock() + + return iv.models, nil +} + +func (iv *IndexView) Create(_ context.Context, index database.IndexModel) error { + iv.lock.Lock() + defer iv.lock.Unlock() + + name := index.Name + + for i, n := range iv.names { + if n == name { + iv.names = append(iv.names[:i], iv.names[i+1:]...) + iv.models = append(iv.models[:i], iv.models[i+1:]...) + iv.data = append(iv.data[:i], iv.data[i+1:]...) + } + } + + iv.names = append(iv.names, name) + iv.models = append(iv.models, index) + iv.data = append(iv.data, pool.GetMap()) + + return nil +} + +func (iv *IndexView) Drop(_ context.Context, name string) error { + iv.lock.Lock() + defer iv.lock.Unlock() + + for i, n := range iv.names { + if n == name { + iv.names = append(iv.names[:i], iv.names[i+1:]...) + iv.models = append(iv.models[:i], iv.models[i+1:]...) + iv.data = append(iv.data[:i], iv.data[i+1:]...) + } + } + + return nil +} + +func (iv *IndexView) insertMany(ctx context.Context, docs []*primitive.Map) error { + iv.lock.Lock() + defer iv.lock.Unlock() + + for i, doc := range docs { + if err := iv.insertOne(ctx, doc); err != nil { + for i--; i >= 0; i-- { + _ = iv.deleteOne(ctx, doc) + } + return err + } + } + return nil +} + +func (iv *IndexView) deleteMany(ctx context.Context, docs []*primitive.Map) error { + iv.lock.Lock() + defer iv.lock.Unlock() + + for i, doc := range docs { + if err := iv.deleteOne(ctx, doc); err != nil { + for ; i >= 0; i-- { + _ = iv.insertOne(ctx, doc) + } + return err + } + } + return nil +} + +func (iv *IndexView) deleteAll(_ context.Context) error { + iv.lock.Lock() + defer iv.lock.Unlock() + + iv.data = nil + + return nil +} + +func (iv *IndexView) findMany(_ context.Context, examples []*primitive.Map) ([]primitive.Object, error) { + iv.lock.RLock() + defer iv.lock.RUnlock() + + ids := pool.GetMap() + defer pool.PutMap(ids) + + for _, example := range examples { + if err := func() error { + for i, model := range iv.models { + curr := iv.data[i] + + visits := make(map[string]bool, example.Len()) + for _, k := range example.Keys() { + if k, ok := k.(primitive.String); ok { + visits[k.String()] = false + } else { + return ErrInvalidDocument + } + } + next := false + + var i int + var k string + for i, k = range model.Keys { + if obj, ok := primitive.Get[any](example, k); ok { + v := primitive.Interface(obj) + + hash, err := util.Hash(v) + if err != nil { + return err + } + visits[k] = true + if sub, ok := curr.Load(hash); ok { + if i < len(model.Keys)-1 { + curr = sub.(*sync.Map) + } else { + if model.Unique { + if hsub, err := util.Hash(sub); err != nil { + return err + } else { + ids.Store(hsub, sub) + return nil + } + } else { + sub.(*sync.Map).Range(func(key, val any) bool { + ids.Store(key, val) + return true + }) + return nil + } + } + } else { + next = true + break + } + } else { + break + } + } + + for _, v := range visits { + if !v { + next = true + } + } + if next { + continue + } + + var parent []*sync.Map + parent = append(parent, curr) + + depth := len(model.Keys) - 1 + if !model.Unique { + depth += 1 + } + + for ; i < depth; i++ { + var children []*sync.Map + for _, curr := range parent { + curr.Range(func(_, value any) bool { + children = append(children, value.(*sync.Map)) + return true + }) + } + parent = children + } + + for _, curr := range parent { + curr.Range(func(k, v any) bool { + ids.Store(k, v) + return true + }) + } + + return nil + } + + return ErrIndexNotFound + }(); err != nil { + return nil, err + } + } + + var uniqueIds []primitive.Object + ids.Range(func(_, val any) bool { + uniqueIds = append(uniqueIds, val.(primitive.Object)) + return true + }) + return uniqueIds, nil +} + +func (iv *IndexView) insertOne(ctx context.Context, doc *primitive.Map) error { + id, ok := doc.Get(keyID) + if !ok { + return ErrIndexConflict + } + + for i, model := range iv.models { + if err := func() error { + curr := iv.data[i] + + if !ParseFilter(model.Partial)(doc) { + return nil + } + + for i, k := range model.Keys { + obj, _ := primitive.Get[primitive.Object](doc, k) + v := primitive.Interface(obj) + + hash, err := util.Hash(v) + if err != nil { + return err + } + if i < len(model.Keys)-1 { + cm := pool.GetMap() + sub, load := curr.LoadOrStore(hash, cm) + if load { + pool.PutMap(cm) + } + curr = sub.(*sync.Map) + } else if model.Unique { + if r, loaded := curr.LoadOrStore(hash, id); loaded && r != id { + return ErrIndexConflict + } + } else { + cm := pool.GetMap() + r, load := curr.LoadOrStore(hash, cm) + if load { + pool.PutMap(cm) + } + r.(*sync.Map).Store(hash, id) + } + } + + return nil + }(); err != nil { + _ = iv.deleteOne(ctx, doc) + return err + } + } + + return nil +} + +func (iv *IndexView) deleteOne(_ context.Context, doc *primitive.Map) error { + id, ok := doc.Get(keyID) + if !ok { + return nil + } + + hid, err := util.Hash(id) + if err != nil { + return err + } + + for i, model := range iv.models { + if err := func() error { + curr := iv.data[i] + + if !ParseFilter(model.Partial)(doc) { + return nil + } + + var nodes []*sync.Map + nodes = append(nodes, curr) + var keys []any + keys = append(keys, nil) + + for i, k := range model.Keys { + obj, _ := primitive.Get[primitive.Object](doc, k) + v := primitive.Interface(obj) + + hash, err := util.Hash(v) + if err != nil { + return err + } + + if i < len(model.Keys)-1 { + if sub, ok := curr.Load(hash); ok { + curr = sub.(*sync.Map) + + nodes = append(nodes, curr) + keys = append(keys, hash) + } else { + return nil + } + } else if model.Unique { + if r, loaded := curr.Load(hash); loaded && util.Equal(r, id) { + curr.Delete(hash) + } + } else { + if r, loaded := curr.Load(hash); loaded { + nodes = append(nodes, r.(*sync.Map)) + keys = append(keys, hash) + r.(*sync.Map).Delete(hid) + } + } + } + + for i := len(nodes) - 1; i >= 0; i-- { + node := nodes[i] + + empty := true + node.Range(func(_, _ any) bool { + empty = false + return false + }) + + if empty && i > 0 { + parent := nodes[i-1] + key := keys[i] + + parent.Delete(key) + pool.PutMap(node) + } + } + + return nil + }(); err != nil { + return err + } + } + + return nil +} diff --git a/pkg/database/memdb/index_test.go b/pkg/database/memdb/index_test.go new file mode 100644 index 00000000..aaa2fad7 --- /dev/null +++ b/pkg/database/memdb/index_test.go @@ -0,0 +1,25 @@ +package memdb + +import ( + "testing" + + "github.com/siyul-park/uniflow/pkg/database/databasetest" +) + +func TestIndexView_List(t *testing.T) { + iv := NewIndexView() + + databasetest.AssertIndexViewList(t, iv) +} + +func TestIndexView_Create(t *testing.T) { + iv := NewIndexView() + + databasetest.AssertIndexViewCreate(t, iv) +} + +func TestIndexView_Drop(t *testing.T) { + iv := NewIndexView() + + databasetest.AssertIndexViewDrop(t, iv) +} diff --git a/pkg/database/memdb/sort.go b/pkg/database/memdb/sort.go new file mode 100644 index 00000000..c1cc072f --- /dev/null +++ b/pkg/database/memdb/sort.go @@ -0,0 +1,35 @@ +package memdb + +import ( + "github.com/siyul-park/uniflow/internal/util" + "github.com/siyul-park/uniflow/pkg/database" + "github.com/siyul-park/uniflow/pkg/primitive" +) + +func ParseSorts(sorts []database.Sort) func(i, j *primitive.Map) bool { + return func(i, j *primitive.Map) bool { + for _, s := range sorts { + x, _ := primitive.Get[primitive.Object](i, s.Key) + y, _ := primitive.Get[primitive.Object](j, s.Key) + + if x == y { + continue + } else if x == nil { + return s.Order == database.OrderDESC + } else if y == nil { + return s.Order != database.OrderDESC + } + + e := util.Compare(x.Interface(), y.Interface()) + if e == 0 { + continue + } + + if s.Order == database.OrderDESC { + return e > 0 + } + return e < 0 + } + return false + } +} diff --git a/pkg/database/memdb/stream.go b/pkg/database/memdb/stream.go new file mode 100644 index 00000000..53e3655e --- /dev/null +++ b/pkg/database/memdb/stream.go @@ -0,0 +1,103 @@ +package memdb + +import ( + "sync" + + "github.com/siyul-park/uniflow/pkg/database" +) + +type ( + Stream struct { + buffer []database.Event + channel chan database.Event + pump chan struct{} + done chan struct{} + mu sync.Mutex + } +) + +func NewStream() *Stream { + s := &Stream{ + buffer: nil, + channel: make(chan database.Event), + pump: make(chan struct{}), + done: make(chan struct{}), + mu: sync.Mutex{}, + } + + go func() { + defer func() { close(s.channel) }() + + for { + select { + case <-s.done: + return + case <-s.pump: + buffer := func() []database.Event { + s.mu.Lock() + defer s.mu.Unlock() + + buffer := s.buffer + s.buffer = nil + return buffer + }() + + for _, event := range buffer { + select { + case <-s.done: + return + case s.channel <- event: + } + } + } + } + }() + + return s +} + +func (s *Stream) Next() <-chan database.Event { + return s.channel +} + +func (s *Stream) Done() <-chan struct{} { + return s.done +} + +func (s *Stream) Close() error { + s.mu.Lock() + defer s.mu.Unlock() + + select { + case <-s.done: + return nil + default: + } + + close(s.done) + s.buffer = nil + + return nil +} + +func (s *Stream) Emit(event database.Event) { + s.mu.Lock() + defer s.mu.Unlock() + + select { + case <-s.done: + default: + s.buffer = append(s.buffer, event) + s.push() + } +} + +func (p *Stream) push() { + go func() { + select { + case <-p.done: + default: + p.pump <- struct{}{} + } + }() +} diff --git a/pkg/database/mongodb/collection.go b/pkg/database/mongodb/collection.go new file mode 100644 index 00000000..a896775e --- /dev/null +++ b/pkg/database/mongodb/collection.go @@ -0,0 +1,262 @@ +package mongodb + +import ( + "context" + "sync" + + "github.com/pkg/errors" + "github.com/siyul-park/uniflow/internal/util" + "github.com/siyul-park/uniflow/pkg/database" + "github.com/siyul-park/uniflow/pkg/primitive" + "go.mongodb.org/mongo-driver/bson" + "go.mongodb.org/mongo-driver/mongo" + "go.mongodb.org/mongo-driver/mongo/options" +) + +type ( + Collection struct { + raw *mongo.Collection + lock sync.RWMutex + } +) + +var _ database.Collection = &Collection{} + +func NewCollection(coll *mongo.Collection) *Collection { + return &Collection{raw: coll} +} + +func (coll *Collection) Name() string { + return coll.raw.Name() +} + +func (coll *Collection) Indexes() database.IndexView { + coll.lock.RLock() + defer coll.lock.RUnlock() + + return UpgradeIndexView(coll.raw.Indexes()) +} + +func (coll *Collection) Watch(ctx context.Context, filter *database.Filter) (database.Stream, error) { + coll.lock.Lock() + defer coll.lock.Unlock() + + pipeline := mongo.Pipeline{} + + if filter != nil { + if match, err := MarshalFilter(filter); err != nil { + return nil, err + } else if match != nil { + pipeline = append(pipeline, bson.D{{Key: "$match", Value: match}}) + } + } + + stream, err := coll.raw.Watch(ctx, pipeline) + if err != nil { + return nil, err + } + + return UpgradeStream(stream), nil +} + +func (coll *Collection) InsertOne(ctx context.Context, doc *primitive.Map) (primitive.Object, error) { + raw, err := MarshalDocument(doc) + if err != nil { + return nil, err + } + + res, err := coll.raw.InsertOne(ctx, raw) + if err != nil { + return nil, errors.Wrap(database.ErrWrite, err.Error()) + } + + var id primitive.Object + if err := UnmarshalDocument(res.InsertedID, &id); err != nil { + return nil, err + } + return id, nil +} + +func (coll *Collection) InsertMany(ctx context.Context, docs []*primitive.Map) ([]primitive.Object, error) { + var raws bson.A + for _, doc := range docs { + if raw, err := MarshalDocument(doc); err == nil { + raws = append(raws, raw) + } else { + return nil, err + } + } + + res, err := coll.raw.InsertMany(ctx, raws) + if err != nil { + return nil, errors.Wrap(database.ErrWrite, err.Error()) + } + + var ids []primitive.Object + for _, insertedID := range res.InsertedIDs { + var id primitive.Object + if err := UnmarshalDocument(insertedID, &id); err != nil { + return nil, err + } + ids = append(ids, id) + } + + return ids, nil +} + +func (coll *Collection) UpdateOne(ctx context.Context, filter *database.Filter, patch *primitive.Map, opts ...*database.UpdateOptions) (bool, error) { + raw, err := MarshalDocument(patch) + if err != nil { + return false, err + } + f, err := MarshalFilter(filter) + if err != nil { + return false, err + } + + res, err := coll.raw.UpdateOne(ctx, f, bson.M{"$set": raw}, mongoUpdateOptions(database.MergeUpdateOptions(opts))) + if err != nil { + return false, errors.Wrap(database.ErrWrite, err.Error()) + } + + return res.UpsertedCount+res.ModifiedCount > 0, nil +} + +func (coll *Collection) UpdateMany(ctx context.Context, filter *database.Filter, patch *primitive.Map, opts ...*database.UpdateOptions) (int, error) { + raw, err := MarshalDocument(patch) + if err != nil { + return 0, err + } + f, err := MarshalFilter(filter) + if err != nil { + return 0, err + } + + res, err := coll.raw.UpdateMany(ctx, f, bson.M{"$set": raw}, mongoUpdateOptions(database.MergeUpdateOptions(opts))) + if err != nil { + return 0, errors.Wrap(database.ErrWrite, err.Error()) + } + + return int(res.UpsertedCount + res.ModifiedCount), nil +} + +func (coll *Collection) DeleteOne(ctx context.Context, filter *database.Filter) (bool, error) { + f, err := MarshalFilter(filter) + if err != nil { + return false, err + } + + res, err := coll.raw.DeleteOne(ctx, f) + if err != nil { + return false, errors.Wrap(database.ErrDelete, err.Error()) + } + + return res.DeletedCount > 0, nil +} + +func (coll *Collection) DeleteMany(ctx context.Context, filter *database.Filter) (int, error) { + f, err := MarshalFilter(filter) + if err != nil { + return 0, err + } + + res, err := coll.raw.DeleteMany(ctx, f) + if err != nil { + return 0, errors.Wrap(database.ErrDelete, err.Error()) + } + + return int(res.DeletedCount), nil +} + +func (coll *Collection) FindOne(ctx context.Context, filter *database.Filter, opts ...*database.FindOptions) (*primitive.Map, error) { + f, err := MarshalFilter(filter) + if err != nil { + return nil, err + } + + res := coll.raw.FindOne(ctx, f, mongoFindOneOptions(database.MergeFindOptions(opts))) + if res.Err() != nil { + if res.Err() == mongo.ErrNoDocuments { + return nil, nil + } + return nil, errors.Wrap(database.ErrRead, res.Err().Error()) + } + + var doc primitive.Object + var r any + if err := res.Decode(&r); err != nil { + return nil, err + } + if err := UnmarshalDocument(r, &doc); err != nil { + return nil, err + } + return doc.(*primitive.Map), nil +} + +func (coll *Collection) FindMany(ctx context.Context, filter *database.Filter, opts ...*database.FindOptions) ([]*primitive.Map, error) { + f, err := MarshalFilter(filter) + if err != nil { + return nil, err + } + + cursor, err := coll.raw.Find(ctx, f, mongoFindOptions(database.MergeFindOptions(opts))) + if err != nil { + return nil, errors.Wrap(database.ErrRead, err.Error()) + } + + var docs []*primitive.Map + for cursor.Next(ctx) { + var doc primitive.Object + var r any + if err := cursor.Decode(&r); err != nil { + return nil, err + } + if err := UnmarshalDocument(r, &doc); err != nil { + return nil, err + } + docs = append(docs, doc.(*primitive.Map)) + } + + return docs, nil +} + +func (coll *Collection) Drop(ctx context.Context) error { + coll.lock.Lock() + defer coll.lock.Unlock() + + if err := coll.raw.Drop(ctx); err != nil { + return errors.Wrap(database.ErrDelete, err.Error()) + } + + return nil +} + +func mongoUpdateOptions(opts *database.UpdateOptions) *options.UpdateOptions { + if opts == nil { + return nil + } + return util.Ptr(options.UpdateOptions{ + Upsert: opts.Upsert, + }) +} + +func mongoFindOneOptions(opts *database.FindOptions) *options.FindOneOptions { + if opts == nil { + return nil + } + return util.Ptr(options.FindOneOptions{ + Skip: util.PtrTo(opts.Skip, func(s int) int64 { return int64(s) }), + Sort: mongoSorts(opts.Sorts), + }) +} + +func mongoFindOptions(opts *database.FindOptions) *options.FindOptions { + if opts == nil { + return nil + } + return util.Ptr(options.FindOptions{ + Limit: util.PtrTo(opts.Limit, func(s int) int64 { return int64(s) }), + Skip: util.PtrTo(opts.Skip, func(s int) int64 { return int64(s) }), + Sort: mongoSorts(opts.Sorts), + }) +} diff --git a/pkg/database/mongodb/collection_test.go b/pkg/database/mongodb/collection_test.go new file mode 100644 index 00000000..ed00a210 --- /dev/null +++ b/pkg/database/mongodb/collection_test.go @@ -0,0 +1,251 @@ +package mongodb + +import ( + "context" + "testing" + "time" + + "github.com/go-faker/faker/v4" + "github.com/siyul-park/uniflow/pkg/database/databasetest" + "github.com/stretchr/testify/assert" + "github.com/tryvium-travels/memongo" + "go.mongodb.org/mongo-driver/mongo" + "go.mongodb.org/mongo-driver/mongo/options" +) + +func TestCollection_Name(t *testing.T) { + server := Server() + defer ReleaseServer(server) + + coll, err := testCollection(server) + assert.NoError(t, err) + + databasetest.AssertCollectionName(t, coll) +} + +func TestCollection_Indexes(t *testing.T) { + server := Server() + defer ReleaseServer(server) + + coll, err := testCollection(server) + assert.NoError(t, err) + + databasetest.AssertCollectionIndexes(t, coll) +} + +func TestCollection_Watch(t *testing.T) { + server := Server() + defer ReleaseServer(server) + + coll, err := testCollection(server) + assert.NoError(t, err) + + databasetest.AssertCollectionWatch(t, coll) +} + +func TestCollection_InsertOne(t *testing.T) { + server := Server() + defer ReleaseServer(server) + + coll, err := testCollection(server) + assert.NoError(t, err) + + databasetest.AssertCollectionInsertOne(t, coll) +} + +func TestCollection_InsertMany(t *testing.T) { + server := Server() + defer ReleaseServer(server) + + coll, err := testCollection(server) + assert.NoError(t, err) + + databasetest.AssertCollectionInsertMany(t, coll) +} + +func TestCollection_UpdateOne(t *testing.T) { + server := Server() + defer ReleaseServer(server) + + coll, err := testCollection(server) + assert.NoError(t, err) + + databasetest.AssertCollectionUpdateOne(t, coll) +} + +func TestCollection_UpdateMany(t *testing.T) { + server := Server() + defer ReleaseServer(server) + + coll, err := testCollection(server) + assert.NoError(t, err) + + databasetest.AssertCollectionUpdateMany(t, coll) +} + +func TestCollection_DeleteOne(t *testing.T) { + server := Server() + defer ReleaseServer(server) + + coll, err := testCollection(server) + assert.NoError(t, err) + + databasetest.AssertCollectionDeleteOne(t, coll) +} + +func TestCollection_DeleteMany(t *testing.T) { + server := Server() + defer ReleaseServer(server) + + coll, err := testCollection(server) + assert.NoError(t, err) + + databasetest.AssertCollectionDeleteMany(t, coll) +} + +func TestCollection_FindOne(t *testing.T) { + server := Server() + defer ReleaseServer(server) + + coll, err := testCollection(server) + assert.NoError(t, err) + + databasetest.AssertCollectionFindOne(t, coll) +} + +func TestCollection_FindMany(t *testing.T) { + server := Server() + defer ReleaseServer(server) + + coll, err := testCollection(server) + assert.NoError(t, err) + + databasetest.AssertCollectionFindMany(t, coll) +} + +func TestCollection_Drop(t *testing.T) { + server := Server() + defer ReleaseServer(server) + + coll, err := testCollection(server) + assert.NoError(t, err) + + databasetest.AssertCollectionDrop(t, coll) +} + +func BenchmarkCollection_InsertOne(b *testing.B) { + server := Server() + defer ReleaseServer(server) + + coll, err := testCollection(server) + assert.NoError(b, err) + + databasetest.BenchmarkCollectionInsertOne(b, coll) +} + +func BenchmarkCollection_InsertMany(b *testing.B) { + server := Server() + defer ReleaseServer(server) + + coll, err := testCollection(server) + assert.NoError(b, err) + + databasetest.BenchmarkCollectionInsertMany(b, coll) +} + +func BenchmarkCollection_UpdateOne(b *testing.B) { + server := Server() + defer ReleaseServer(server) + + coll, err := testCollection(server) + assert.NoError(b, err) + + databasetest.BenchmarkCollectionUpdateOne(b, coll) +} + +func BenchmarkCollection_UpdateMany(b *testing.B) { + server := Server() + defer ReleaseServer(server) + + coll, err := testCollection(server) + assert.NoError(b, err) + + databasetest.BenchmarkCollectionUpdateMany(b, coll) +} + +func BenchmarkCollection_DeleteOne(b *testing.B) { + server := Server() + defer ReleaseServer(server) + + coll, err := testCollection(server) + assert.NoError(b, err) + + databasetest.BenchmarkCollectionDeleteOne(b, coll) +} + +func BenchmarkCollection_DeleteMany(b *testing.B) { + server := Server() + defer ReleaseServer(server) + + coll, err := testCollection(server) + assert.NoError(b, err) + + databasetest.BenchmarkCollectionDeleteMany(b, coll) +} + +func BenchmarkCollection_FindOne(b *testing.B) { + b.Run("with index", func(b *testing.B) { + server := Server() + defer ReleaseServer(server) + + coll, err := testCollection(server) + assert.NoError(b, err) + + databasetest.BenchmarkCollectionFindOneWithIndex(b, coll) + }) + + b.Run("without index", func(b *testing.B) { + server := Server() + defer ReleaseServer(server) + + coll, err := testCollection(server) + assert.NoError(b, err) + + databasetest.BenchmarkCollectionFindOneWithoutIndex(b, coll) + }) +} + +func BenchmarkCollection_FindMany(b *testing.B) { + b.Run("with index", func(b *testing.B) { + server := Server() + defer ReleaseServer(server) + + coll, err := testCollection(server) + assert.NoError(b, err) + + databasetest.BenchmarkCollectionFindManyWithIndex(b, coll) + }) + + b.Run("without index", func(b *testing.B) { + server := Server() + defer ReleaseServer(server) + + coll, err := testCollection(server) + assert.NoError(b, err) + + databasetest.BenchmarkCollectionFindManyWithoutIndex(b, coll) + }) +} + +func testCollection(server *memongo.Server) (*Collection, error) { + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + client, err := mongo.Connect(ctx, options.Client().ApplyURI(server.URI())) + if err != nil { + return nil, err + } + db := client.Database(faker.UUIDHyphenated()) + + return NewCollection(db.Collection(faker.UUIDHyphenated())), nil +} diff --git a/pkg/database/mongodb/connection.go b/pkg/database/mongodb/connection.go new file mode 100644 index 00000000..2673287a --- /dev/null +++ b/pkg/database/mongodb/connection.go @@ -0,0 +1,56 @@ +package mongodb + +import ( + "context" + "sync" + + "github.com/siyul-park/uniflow/pkg/database" + "go.mongodb.org/mongo-driver/mongo" + "go.mongodb.org/mongo-driver/mongo/options" +) + +type ( + Connection struct { + raw *mongo.Client + databases map[string]*Database + lock sync.RWMutex + } +) + +func Connect(ctx context.Context, uri string) (*Connection, error) { + client, err := mongo.Connect(ctx, options.Client().ApplyURI(uri)) + if err != nil { + return nil, err + } + return NewConnection(client), nil +} + +func NewConnection(client *mongo.Client) *Connection { + return &Connection{ + raw: client, + databases: map[string]*Database{}, + } +} + +func (con *Connection) Database(_ context.Context, name string) (database.Database, error) { + con.lock.Lock() + defer con.lock.Unlock() + + if db, ok := con.databases[name]; ok { + return db, nil + } + + db := NewDatabase(con.raw.Database(name)) + con.databases[name] = db + + return db, nil +} + +func (con *Connection) Disconnect(ctx context.Context) error { + con.lock.Lock() + defer con.lock.Unlock() + + con.databases = map[string]*Database{} + + return con.raw.Disconnect(ctx) +} diff --git a/pkg/database/mongodb/connection_test.go b/pkg/database/mongodb/connection_test.go new file mode 100644 index 00000000..6b776318 --- /dev/null +++ b/pkg/database/mongodb/connection_test.go @@ -0,0 +1,53 @@ +package mongodb + +import ( + "context" + "testing" + "time" + + "github.com/go-faker/faker/v4" + "github.com/stretchr/testify/assert" +) + +func TestConnect(t *testing.T) { + server := Server() + defer ReleaseServer(server) + + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + con, err := Connect(ctx, server.URI()) + assert.NoError(t, err) + assert.NotNil(t, con) +} + +func TestConnection_Database(t *testing.T) { + server := Server() + defer ReleaseServer(server) + + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + con, _ := Connect(ctx, server.URI()) + + dbname := faker.UUIDHyphenated() + + db, err := con.Database(ctx, dbname) + assert.NoError(t, err) + assert.NotNil(t, db) + + assert.Equal(t, dbname, db.Name()) +} + +func TestConnection_Disconnect(t *testing.T) { + server := Server() + defer ReleaseServer(server) + + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + con, _ := Connect(ctx, server.URI()) + + err := con.Disconnect(ctx) + assert.NoError(t, err) +} diff --git a/pkg/database/mongodb/database.go b/pkg/database/mongodb/database.go new file mode 100644 index 00000000..6bd67079 --- /dev/null +++ b/pkg/database/mongodb/database.go @@ -0,0 +1,59 @@ +package mongodb + +import ( + "context" + "sync" + + "github.com/siyul-park/uniflow/pkg/database" + "go.mongodb.org/mongo-driver/mongo" +) + +type ( + Database struct { + raw *mongo.Database + collections map[string]*Collection + lock sync.RWMutex + } +) + +var _ database.Database = &Database{} + +func NewDatabase(db *mongo.Database) *Database { + return &Database{ + raw: db, + collections: map[string]*Collection{}, + } +} + +func (db *Database) Name() string { + return db.raw.Name() +} + +func (db *Database) Collection(_ context.Context, name string) (database.Collection, error) { + db.lock.Lock() + defer db.lock.Unlock() + + if coll, ok := db.collections[name]; ok { + return coll, nil + } + + coll := NewCollection(db.raw.Collection(name)) + db.collections[name] = coll + + return coll, nil +} + +func (db *Database) Drop(ctx context.Context) error { + db.lock.Lock() + defer db.lock.Unlock() + + for _, coll := range db.collections { + if err := coll.Drop(ctx); err != nil { + return err + } + } + + db.collections = map[string]*Collection{} + + return db.raw.Drop(ctx) +} diff --git a/pkg/database/mongodb/database_test.go b/pkg/database/mongodb/database_test.go new file mode 100644 index 00000000..ebd8cb02 --- /dev/null +++ b/pkg/database/mongodb/database_test.go @@ -0,0 +1,56 @@ +package mongodb + +import ( + "context" + "testing" + "time" + + "github.com/go-faker/faker/v4" + "github.com/siyul-park/uniflow/pkg/database/databasetest" + "github.com/stretchr/testify/assert" + "github.com/tryvium-travels/memongo" + "go.mongodb.org/mongo-driver/mongo" + "go.mongodb.org/mongo-driver/mongo/options" +) + +func TestDatabase_Name(t *testing.T) { + server := Server() + defer ReleaseServer(server) + + db, err := testDatabase(server) + assert.NoError(t, err) + + databasetest.AssertDatabaseName(t, db) +} + +func TestDatabase_Collection(t *testing.T) { + server := Server() + defer ReleaseServer(server) + + db, err := testDatabase(server) + assert.NoError(t, err) + + databasetest.AssertDatabaseCollection(t, db) +} + +func TestDatabase_Drop(t *testing.T) { + server := Server() + defer ReleaseServer(server) + + db, err := testDatabase(server) + assert.NoError(t, err) + + databasetest.AssertDatabaseDrop(t, db) +} + +func testDatabase(server *memongo.Server) (*Database, error) { + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + client, err := mongo.Connect(ctx, options.Client().ApplyURI(server.URI())) + if err != nil { + return nil, err + } + + return NewDatabase(client.Database(faker.UUIDHyphenated())), nil +} diff --git a/pkg/database/mongodb/encoding.go b/pkg/database/mongodb/encoding.go new file mode 100644 index 00000000..bffdb134 --- /dev/null +++ b/pkg/database/mongodb/encoding.go @@ -0,0 +1,428 @@ +package mongodb + +import ( + "strings" + + "github.com/iancoleman/strcase" + "github.com/pkg/errors" + "github.com/siyul-park/uniflow/internal/encoding" + "github.com/siyul-park/uniflow/internal/util" + "github.com/siyul-park/uniflow/pkg/database" + "github.com/siyul-park/uniflow/pkg/primitive" + "go.mongodb.org/mongo-driver/bson" + bsonprimitive "go.mongodb.org/mongo-driver/bson/primitive" +) + +var ( + documentEncoder = NewDocumentEncoder() + documentDecoder = NewDocumentDecoder() + + filterEncoder = NewFilterEncoder(documentEncoder) + filterDecoder = NewFilterDecoder(documentDecoder) +) + +var ( + toLowerCamel = changeCase(strcase.ToLowerCamel) + toSnake = changeCase(strcase.ToSnake) +) + +func MarshalFilter(v *database.Filter) (any, error) { + return filterEncoder.Encode(v) +} + +func UnmarshalFilter(data any, v **database.Filter) error { + return filterDecoder.Decode(data, v) +} + +func MarshalDocument(v primitive.Object) (any, error) { + return documentEncoder.Encode(v) +} + +func UnmarshalDocument(data any, v *primitive.Object) error { + return documentDecoder.Decode(data, v) +} + +func NewFilterEncoder(encoder encoding.Encoder[primitive.Object, any]) encoding.Encoder[*database.Filter, any] { + return encoding.EncoderFunc[*database.Filter, any](func(source *database.Filter) (any, error) { + if source == nil { + return bson.D{}, nil + } + + self := NewFilterEncoder(encoder) + + switch source.OP { + case database.AND, database.OR: + if v, ok := source.Value.([]*database.Filter); !ok { + return nil, errors.WithStack(encoding.ErrUnsupportedValue) + } else { + var values bson.A + for _, e := range v { + if value, err := self.Encode(e); err != nil { + return nil, err + } else { + values = append(values, value) + } + } + + if source.OP == database.AND { + return bson.D{{Key: "$and", Value: values}}, nil + } else if source.OP == database.OR { + return bson.D{{Key: "$or", Value: values}}, nil + } + } + case database.NULL, database.NNULL: + k := bsonKey(source.Key) + + if source.OP == database.NULL { + return bson.D{{Key: k, Value: bson.M{"$eq": nil}}}, nil + } else if source.OP == database.NNULL { + return bson.D{{Key: k, Value: bson.M{"$ne": nil}}}, nil + } + default: + if v, ok := source.Value.(primitive.Object); !ok { + return nil, errors.WithStack(encoding.ErrUnsupportedValue) + } else { + k := bsonKey(source.Key) + v, err := encoder.Encode(v) + if err != nil { + return nil, err + } + + if source.OP == database.EQ { + return bson.D{{Key: k, Value: bson.M{"$eq": v}}}, nil + } else if source.OP == database.NE { + return bson.D{{Key: k, Value: bson.M{"$ne": v}}}, nil + } else if source.OP == database.LT { + return bson.D{{Key: k, Value: bson.M{"$lt": v}}}, nil + } else if source.OP == database.LTE { + return bson.D{{Key: k, Value: bson.M{"$lte": v}}}, nil + } else if source.OP == database.GT { + return bson.D{{Key: k, Value: bson.M{"$gt": v}}}, nil + } else if source.OP == database.GTE { + return bson.D{{Key: k, Value: bson.M{"$gte": v}}}, nil + } else if source.OP == database.IN { + return bson.D{{Key: k, Value: bson.M{"$in": v}}}, nil + } else if source.OP == database.NIN { + return bson.D{{Key: k, Value: bson.M{"$nin": v}}}, nil + } + } + } + + return nil, errors.WithStack(encoding.ErrUnsupportedValue) + }) +} + +func NewFilterDecoder(decoder encoding.Decoder[any, *primitive.Object]) encoding.Decoder[any, **database.Filter] { + return encoding.DecoderFunc[any, **database.Filter](func(source any, target **database.Filter) error { + s, ok := bsonMA(source) + if !ok { + return errors.WithStack(encoding.ErrUnsupportedValue) + } + + self := NewFilterDecoder(decoder) + + var children []*database.Filter + for _, curr := range s { + for key, value := range curr { + if key == "$and" || key == "$or" { + if value, ok := bsonMA(value); !ok { + return errors.WithStack(encoding.ErrUnsupportedValue) + } else { + var values []*database.Filter + for _, v := range value { + var value *database.Filter + if err := self.Decode(v, &value); err != nil { + return err + } + values = append(values, value) + } + + if key == "$and" { + children = append(children, &database.Filter{ + OP: database.AND, + Value: values, + }) + } else if key == "$or" { + children = append(children, &database.Filter{ + OP: database.OR, + Value: values, + }) + } + } + } else if key == "$not" { + var child *database.Filter + if err := self.Decode(value, &child); err != nil { + return err + } + if child.OP == database.EQ { + child.OP = database.NE + } else if child.OP == database.NE { + child.OP = database.EQ + } else if child.OP == database.IN { + child.OP = database.NIN + } else if child.OP == database.NIN { + child.OP = database.IN + } else if child.OP == database.NULL { + child.OP = database.NNULL + } else if child.OP == database.NNULL { + child.OP = database.NULL + } else { + return errors.WithStack(encoding.ErrUnsupportedValue) + } + children = append(children, child) + } else if value, ok := bsonM(value); ok { + for op, v := range value { + if !strings.HasPrefix(op, "$") { + return errors.WithStack(encoding.ErrUnsupportedValue) + } + child := &database.Filter{ + Key: key, + } + if op == "$eq" { + if util.IsNil(v) { + child.OP = database.NULL + } else { + child.OP = database.EQ + } + } else if op == "$ne" { + if util.IsNil(v) { + child.OP = database.NNULL + } else { + child.OP = database.NE + } + } else if op == "$lt" { + child.OP = database.LT + } else if op == "$lte" { + child.OP = database.LTE + } else if op == "$gt" { + child.OP = database.GT + } else if op == "$gte" { + child.OP = database.GTE + } else if op == "$in" { + child.OP = database.IN + } else if op == "$nin" { + child.OP = database.NIN + } else { + return errors.WithStack(encoding.ErrUnsupportedValue) + } + + var value primitive.Object + if err := decoder.Decode(v, &value); err != nil { + return err + } + child.Value = value + children = append(children, child) + } + } else { + return errors.WithStack(encoding.ErrUnsupportedValue) + } + } + } + + if len(children) == 0 { + *target = nil + } else if len(children) == 1 { + *target = children[0] + } else { + *target = &database.Filter{ + OP: database.AND, + Value: children, + } + } + + return nil + }) +} + +func NewDocumentEncoder() encoding.Encoder[primitive.Object, any] { + return encoding.EncoderFunc[primitive.Object, any](func(source primitive.Object) (any, error) { + if source == nil { + return bsonprimitive.Null{}, nil + } + + self := NewDocumentEncoder() + + if s, ok := source.(primitive.Binary); ok { + return bsonprimitive.Binary{Data: []byte(s)}, nil + } else if s, ok := source.(*primitive.Map); ok { + t := make(bsonprimitive.M, s.Len()) + for _, k := range s.Keys() { + v, _ := s.Get(k) + if k, ok := k.(primitive.String); !ok { + return nil, errors.WithStack(encoding.ErrUnsupportedValue) + } else { + if v, err := self.Encode(v); err != nil { + return nil, err + } else { + t[bsonKey(k.String())] = v + } + } + } + return t, nil + } else if s, ok := source.(*primitive.Slice); ok { + t := make(bsonprimitive.A, s.Len()) + for i := 0; i < s.Len(); i++ { + if v, err := self.Encode(s.Get(i)); err != nil { + return nil, err + } else { + t[i] = v + } + } + return t, nil + } else { + return source.Interface(), nil + } + }) +} + +func NewDocumentDecoder() encoding.Decoder[any, *primitive.Object] { + return encoding.DecoderFunc[any, *primitive.Object](func(source any, target *primitive.Object) error { + self := NewDocumentDecoder() + + if source == nil { + *target = nil + return nil + } else if _, ok := source.(bsonprimitive.Null); ok { + *target = nil + return nil + } else if _, ok := source.(bsonprimitive.Undefined); ok { + *target = nil + return nil + } else if s, ok := source.(bsonprimitive.Binary); ok { + *target = primitive.NewBinary(s.Data) + return nil + } else if s, ok := source.(bsonprimitive.A); ok { + values := make([]primitive.Object, len(s)) + for i, e := range s { + var value primitive.Object + if err := self.Decode(e, &value); err != nil { + return err + } + values[i] = value + } + *target = primitive.NewSlice(values...) + return nil + } else if s, ok := source.(bsonprimitive.D); ok { + pairs := make([]primitive.Object, len(s)*2) + for i, e := range s { + var value primitive.Object + if err := self.Decode(e.Value, &value); err != nil { + return err + } + pairs[i*2] = primitive.NewString(documentKey(e.Key)) + pairs[i*2+1] = value + } + *target = primitive.NewMap(pairs...) + return nil + } else if s, ok := source.(bsonprimitive.M); ok { + pairs := make([]primitive.Object, len(s)*2) + i := 0 + for k, v := range s { + var value primitive.Object + if err := self.Decode(v, &value); err != nil { + return err + } + pairs[i*2] = primitive.NewString(documentKey(k)) + pairs[i*2+1] = value + i += 1 + } + *target = primitive.NewMap(pairs...) + return nil + } else if s, err := primitive.MarshalBinary(source); err == nil { + *target = s + return nil + } + + return errors.WithStack(encoding.ErrUnsupportedValue) + }) +} + +func mongoSorts(sorts []database.Sort) bson.D { + sort := bson.D{} + for _, s := range sorts { + sort = append(sort, bson.E{ + Key: bsonKey(s.Key), + Value: mongoOrder(s.Order), + }) + } + return sort +} + +func mongoOrder(order database.Order) int { + if order == database.OrderASC { + return 1 + } + return -1 +} + +func bsonKey(key string) string { + if key == "id" { + return "_id" + } + return toLowerCamel(key) +} + +func documentKey(key string) string { + if key == "_id" { + return "id" + } + return toSnake(key) +} + +func changeCase(convert func(string) string) func(string) string { + return func(str string) string { + var tokens []string + for _, curr := range strings.Split(str, ".") { + tokens = append(tokens, convert(curr)) + } + return strings.Join(tokens, ".") + } +} + +func bsonMA(value any) ([]bson.M, bool) { + if m, ok := bsonM(value); ok { + return []bson.M{m}, true + } + + var m []bson.M + if v, ok := value.([]bson.M); ok { + m = v + } else if v, ok := value.([]bson.D); ok { + for _, e := range v { + if e, ok := bsonM(e); ok { + m = append(m, e) + } else { + return nil, false + } + } + } else if v, ok := value.([]any); ok { + for _, e := range v { + if e, ok := bsonM(e); ok { + m = append(m, e) + } else { + return nil, false + } + } + } else { + return nil, false + } + + return m, true +} + +func bsonM(value any) (bson.M, bool) { + var m bson.M + if v, ok := value.(bson.M); ok { + m = v + } else if v, ok := value.(bson.D); ok { + m := make(bson.M, len(v)) + for _, e := range v { + m[e.Key] = e.Value + } + return m, true + } else { + return nil, false + } + + return m, true +} diff --git a/pkg/database/mongodb/index.go b/pkg/database/mongodb/index.go new file mode 100644 index 00000000..f75eb12c --- /dev/null +++ b/pkg/database/mongodb/index.go @@ -0,0 +1,89 @@ +package mongodb + +import ( + "context" + + "github.com/siyul-park/uniflow/internal/util" + "github.com/siyul-park/uniflow/pkg/database" + "go.mongodb.org/mongo-driver/bson" + "go.mongodb.org/mongo-driver/mongo" + "go.mongodb.org/mongo-driver/mongo/options" +) + +type ( + IndexView struct { + raw mongo.IndexView + } +) + +var _ database.IndexView = &IndexView{} + +func UpgradeIndexView(iv mongo.IndexView) *IndexView { + return &IndexView{raw: iv} +} + +func (iv *IndexView) List(ctx context.Context) ([]database.IndexModel, error) { + cursor, err := iv.raw.List(ctx) + if err != nil { + return nil, err + } + + var indexes []bson.M + if err := cursor.All(context.Background(), &indexes); err != nil { + return nil, err + } + + var models []database.IndexModel + for _, index := range indexes { + key, _ := index["key"].(bson.M) + name, _ := index["name"].(string) + unique, _ := index["unique"].(bool) + partialFilterExpression, _ := index["partialFilterExpression"].(bson.M) + + var keys []string + for k := range key { + keys = append(keys, documentKey(k)) + } + var partial *database.Filter + if err := UnmarshalFilter(partialFilterExpression, &partial); err != nil { + return nil, err + } + + models = append(models, database.IndexModel{ + Keys: keys, + Name: name, + Unique: unique, + Partial: partial, + }) + } + + return models, nil +} + +func (iv *IndexView) Create(ctx context.Context, index database.IndexModel) error { + keys := bson.D{} + for _, k := range index.Keys { + keys = append(keys, bson.E{Key: bsonKey(k), Value: 1}) + } + + partialFilterExpression, err := MarshalFilter(index.Partial) + if err != nil { + return err + } + + _, err = iv.raw.CreateOne(ctx, mongo.IndexModel{ + Keys: keys, + Options: &options.IndexOptions{ + Name: util.Ptr(index.Name), + Unique: util.Ptr(index.Unique), + PartialFilterExpression: partialFilterExpression, + }, + }) + + return err +} + +func (iv *IndexView) Drop(ctx context.Context, name string) error { + _, err := iv.raw.DropOne(ctx, name) + return err +} diff --git a/pkg/database/mongodb/index_test.go b/pkg/database/mongodb/index_test.go new file mode 100644 index 00000000..d8044710 --- /dev/null +++ b/pkg/database/mongodb/index_test.go @@ -0,0 +1,59 @@ +package mongodb + +import ( + "context" + "testing" + "time" + + "github.com/go-faker/faker/v4" + "github.com/siyul-park/uniflow/pkg/database/databasetest" + "github.com/stretchr/testify/assert" + "github.com/tryvium-travels/memongo" + "go.mongodb.org/mongo-driver/mongo" + "go.mongodb.org/mongo-driver/mongo/options" +) + +func TestIndexView_List(t *testing.T) { + server := Server() + defer ReleaseServer(server) + + indexView, err := testIndexView(server) + assert.NoError(t, err) + + databasetest.AssertIndexViewList(t, indexView) +} + +func TestIndexView_Create(t *testing.T) { + server := Server() + defer ReleaseServer(server) + + indexView, err := testIndexView(server) + assert.NoError(t, err) + + databasetest.AssertIndexViewCreate(t, indexView) +} + +func TestIndexView_Drop(t *testing.T) { + server := Server() + defer ReleaseServer(server) + + indexView, err := testIndexView(server) + assert.NoError(t, err) + + databasetest.AssertIndexViewDrop(t, indexView) +} + +func testIndexView(server *memongo.Server) (*IndexView, error) { + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + client, err := mongo.Connect(ctx, options.Client().ApplyURI(server.URI())) + if err != nil { + return nil, err + } + + db := client.Database(faker.UUIDHyphenated()) + coll := db.Collection(faker.UUIDHyphenated()) + + return UpgradeIndexView(coll.Indexes()), nil +} diff --git a/pkg/database/mongodb/server.go b/pkg/database/mongodb/server.go new file mode 100644 index 00000000..0779d929 --- /dev/null +++ b/pkg/database/mongodb/server.go @@ -0,0 +1,57 @@ +package mongodb + +import ( + "context" + "sync" + "time" + + "github.com/tryvium-travels/memongo" + "github.com/tryvium-travels/memongo/memongolog" + "go.mongodb.org/mongo-driver/bson" + "go.mongodb.org/mongo-driver/mongo" + "go.mongodb.org/mongo-driver/mongo/options" +) + +var ( + serverStartUpLock sync.Mutex + serverPool = sync.Pool{ + New: func() any { + serverStartUpLock.Lock() + defer serverStartUpLock.Unlock() + + opts := &memongo.Options{ + MongoVersion: "6.0.8", + LogLevel: memongolog.LogLevelWarn, + ShouldUseReplica: true, + } + + if server, err := memongo.StartWithOptions(opts); err == nil { + return server + } else { + panic(err) + } + }, + } +) + +func Server() *memongo.Server { + return serverPool.Get().(*memongo.Server) +} + +func ReleaseServer(server *memongo.Server) { + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + if client, err := mongo.Connect(ctx, options.Client().ApplyURI(server.URI()+"/retryWrites=false")); err == nil { + if databases, err := client.ListDatabaseNames(ctx, bson.D{}); err == nil { + for _, db := range databases { + _ = client.Database(db).Drop(ctx) + } + } + _ = client.Disconnect(ctx) + serverPool.Put(server) + return + } + + server.Stop() +} diff --git a/pkg/database/mongodb/server_test.go b/pkg/database/mongodb/server_test.go new file mode 100644 index 00000000..0627d872 --- /dev/null +++ b/pkg/database/mongodb/server_test.go @@ -0,0 +1,14 @@ +package mongodb + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestServerAndRelease(t *testing.T) { + server := Server() + assert.NotNil(t, server) + + ReleaseServer(server) +} diff --git a/pkg/database/mongodb/stream.go b/pkg/database/mongodb/stream.go new file mode 100644 index 00000000..9188eb84 --- /dev/null +++ b/pkg/database/mongodb/stream.go @@ -0,0 +1,98 @@ +package mongodb + +import ( + "context" + + "github.com/siyul-park/uniflow/pkg/database" + "github.com/siyul-park/uniflow/pkg/primitive" + "go.mongodb.org/mongo-driver/bson" + "go.mongodb.org/mongo-driver/mongo" +) + +type ( + Stream struct { + raw *mongo.ChangeStream + channel chan database.Event + done chan struct{} + } +) + +func UpgradeStream(stream *mongo.ChangeStream) *Stream { + s := &Stream{ + raw: stream, + channel: make(chan database.Event), + done: make(chan struct{}), + } + + go func() { + defer func() { _ = s.raw.Close(context.Background()) }() + defer func() { close(s.channel) }() + + ctx, cancel := context.WithCancel(context.Background()) + go func() { + defer cancel() + <-s.done + }() + + for { + if !s.raw.Next(ctx) { + return + } + var data bson.M + if err := stream.Decode(&data); err != nil { + return + } + + var id primitive.Object + if documentKey, ok := data["documentKey"]; ok { + if documentKey, ok := documentKey.(bson.M); ok { + if err := UnmarshalDocument(documentKey["_id"], &id); err != nil { + continue + } + } else { + continue + } + } + + e := database.Event{ + DocumentID: id, + } + switch data["operationType"] { + case "insert": + e.OP = database.EventInsert + case "update": + e.OP = database.EventUpdate + case "delete": + e.OP = database.EventDelete + } + + select { + case <-s.done: + return + case s.channel <- e: + } + } + }() + + return s +} + +func (s *Stream) Next() <-chan database.Event { + return s.channel +} + +func (s *Stream) Done() <-chan struct{} { + return s.done +} + +func (s *Stream) Close() error { + select { + case <-s.done: + return nil + default: + } + + close(s.done) + + return nil +} diff --git a/pkg/database/order.go b/pkg/database/order.go new file mode 100644 index 00000000..b1f9784d --- /dev/null +++ b/pkg/database/order.go @@ -0,0 +1,10 @@ +package database + +type ( + Order int +) + +const ( + OrderASC Order = iota + OrderDESC +) diff --git a/pkg/database/sort.go b/pkg/database/sort.go new file mode 100644 index 00000000..d6c52503 --- /dev/null +++ b/pkg/database/sort.go @@ -0,0 +1,8 @@ +package database + +type ( + Sort struct { + Key string + Order Order + } +) diff --git a/pkg/hook/builder.go b/pkg/hook/builder.go new file mode 100644 index 00000000..f4e4d2b0 --- /dev/null +++ b/pkg/hook/builder.go @@ -0,0 +1,35 @@ +package hook + +type ( + // Builder builds a new Hooks. + Builder []func(*Hook) error +) + +// NewBuilder returns a new HooksBuilder. +func NewBuilder(funcs ...func(*Hook) error) Builder { + return Builder(funcs) +} + +// AddToHooks adds all registered hook to s. +func (b *Builder) AddToHooks(h *Hook) error { + for _, f := range *b { + if err := f(h); err != nil { + return err + } + } + return nil +} + +// Register adds one or more hook. +func (b *Builder) Register(funcs ...func(*Hook) error) { + *b = append(*b, funcs...) +} + +// Build returns a new Hooks containing the registered hooks. +func (b *Builder) Build() (*Hook, error) { + h := New() + if err := b.AddToHooks(h); err != nil { + return nil, err + } + return h, nil +} diff --git a/pkg/hook/builder_test.go b/pkg/hook/builder_test.go new file mode 100644 index 00000000..3e2a36cb --- /dev/null +++ b/pkg/hook/builder_test.go @@ -0,0 +1,23 @@ +package hook + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestHooksBuilder_Register(t *testing.T) { + b := NewBuilder() + + b.Register(func(_ *Hook) error { return nil }) + assert.Len(t, b, 1) +} + +func TestHooksBuilder_AddToScheme(t *testing.T) { + b := NewBuilder() + + b.Register(func(_ *Hook) error { return nil }) + + err := b.AddToHooks(New()) + assert.NoError(t, err) +} diff --git a/pkg/hook/hook.go b/pkg/hook/hook.go new file mode 100644 index 00000000..d1d938f6 --- /dev/null +++ b/pkg/hook/hook.go @@ -0,0 +1,113 @@ +package hook + +import ( + "sync" + + "github.com/siyul-park/uniflow/pkg/node" + "github.com/siyul-park/uniflow/pkg/symbol" +) + +type ( + // Hook is a collection of hook functions. + Hook struct { + preLoadHooks []symbol.PreLoadHook + postLoadHooks []symbol.PostLoadHook + preUnloadHooks []symbol.PreUnloadHook + postUnloadHooks []symbol.PostUnloadHook + mu sync.RWMutex + } +) + +var _ symbol.PreLoadHook = &Hook{} +var _ symbol.PostLoadHook = &Hook{} +var _ symbol.PreUnloadHook = &Hook{} +var _ symbol.PostUnloadHook = &Hook{} + +// New returns a new Hooks. +func New() *Hook { + return &Hook{} +} + +// AddPreLoadHook adds a PreLoadHook. +func (h *Hook) AddPreLoadHook(hook symbol.PreLoadHook) { + h.mu.Lock() + defer h.mu.Unlock() + + h.preLoadHooks = append(h.preLoadHooks, hook) +} + +// AddPostLoadHook adds a PostLoadHook. +func (h *Hook) AddPostLoadHook(hook symbol.PostLoadHook) { + h.mu.Lock() + defer h.mu.Unlock() + + h.postLoadHooks = append(h.postLoadHooks, hook) +} + +// AddPreUnloadHook adds a PreUnloadHook. +func (h *Hook) AddPreUnloadHook(hook symbol.PreUnloadHook) { + h.mu.Lock() + defer h.mu.Unlock() + + h.preUnloadHooks = append(h.preUnloadHooks, hook) +} + +// AddPostUnloadHook adds a PostUnloadHook. +func (h *Hook) AddPostUnloadHook(hook symbol.PostUnloadHook) { + h.mu.Lock() + defer h.mu.Unlock() + + h.postUnloadHooks = append(h.postUnloadHooks, hook) +} + +// PreLoad runs PreLoadHooks. +func (h *Hook) PreLoad(n node.Node) error { + h.mu.RLock() + defer h.mu.RUnlock() + + for _, hook := range h.preLoadHooks { + if err := hook.PreLoad(n); err != nil { + return err + } + } + return nil +} + +// PostLoad runs PostLoadHooks. +func (h *Hook) PostLoad(n node.Node) error { + h.mu.RLock() + defer h.mu.RUnlock() + + for _, hook := range h.postLoadHooks { + if err := hook.PostLoad(n); err != nil { + return err + } + } + return nil +} + +// PreUnload runs PreUnloadHooks. +func (h *Hook) PreUnload(n node.Node) error { + h.mu.RLock() + defer h.mu.RUnlock() + + for _, hook := range h.preUnloadHooks { + if err := hook.PreUnload(n); err != nil { + return err + } + } + return nil +} + +// PostUnload runs PostUnloadHooks. +func (h *Hook) PostUnload(n node.Node) error { + h.mu.RLock() + defer h.mu.RUnlock() + + for _, hook := range h.postUnloadHooks { + if err := hook.PostUnload(n); err != nil { + return err + } + } + return nil +} diff --git a/pkg/hook/hook_test.go b/pkg/hook/hook_test.go new file mode 100644 index 00000000..106c8230 --- /dev/null +++ b/pkg/hook/hook_test.go @@ -0,0 +1,69 @@ +package hook + +import ( + "testing" + + "github.com/siyul-park/uniflow/pkg/node" + "github.com/siyul-park/uniflow/pkg/symbol" + "github.com/stretchr/testify/assert" +) + +func TestHook_PreLoadHook(t *testing.T) { + hooks := New() + + n := node.NewOneToOneNode(node.OneToOneNodeConfig{}) + + h := symbol.PreLoadHookFunc(func(_ node.Node) error { + return nil + }) + + hooks.AddPreLoadHook(h) + + err := hooks.PreLoad(n) + assert.NoError(t, err) +} + +func TestHook_PostLoadHook(t *testing.T) { + hooks := New() + + n := node.NewOneToOneNode(node.OneToOneNodeConfig{}) + + h := symbol.PostLoadHookFunc(func(_ node.Node) error { + return nil + }) + + hooks.AddPostLoadHook(h) + + err := hooks.PostLoad(n) + assert.NoError(t, err) +} + +func TestHook_PreUnloadHook(t *testing.T) { + hooks := New() + + n := node.NewOneToOneNode(node.OneToOneNodeConfig{}) + + h := symbol.PreUnloadHookFunc(func(_ node.Node) error { + return nil + }) + + hooks.AddPreUnloadHook(h) + + err := hooks.PreUnload(n) + assert.NoError(t, err) +} + +func TestHook_PostUnloadHook(t *testing.T) { + hooks := New() + + n := node.NewOneToOneNode(node.OneToOneNodeConfig{}) + + h := symbol.PostUnloadHookFunc(func(_ node.Node) error { + return nil + }) + + hooks.AddPostUnloadHook(h) + + err := hooks.PostUnload(n) + assert.NoError(t, err) +} diff --git a/pkg/loader/loader.go b/pkg/loader/loader.go new file mode 100644 index 00000000..d11898cc --- /dev/null +++ b/pkg/loader/loader.go @@ -0,0 +1,522 @@ +package loader + +import ( + "context" + "sync" + + "github.com/oklog/ulid/v2" + "github.com/siyul-park/uniflow/internal/util" + "github.com/siyul-park/uniflow/pkg/database/memdb" + "github.com/siyul-park/uniflow/pkg/node" + "github.com/siyul-park/uniflow/pkg/scheme" + "github.com/siyul-park/uniflow/pkg/storage" + "github.com/siyul-park/uniflow/pkg/symbol" +) + +type ( + // Config is a config for for the Loader. + Config struct { + Table *symbol.Table + Scheme *scheme.Scheme + Storage *storage.Storage + } + + // Loader loads scheme.Spec into symbol.Table. + Loader struct { + scheme *scheme.Scheme + table *symbol.Table + remote *storage.Storage + local *storage.Storage + referenced map[ulid.ULID]links + undefined map[ulid.ULID]links + mu sync.RWMutex + } + + links map[string][]scheme.PortLocation +) + +// New returns a new Loader. +func New(ctx context.Context, config Config) (*Loader, error) { + table := config.Table + scheme := config.Scheme + remote := config.Storage + + local, err := storage.New(ctx, storage.Config{ + Scheme: scheme, + Database: memdb.New(""), + }) + if err != nil { + return nil, err + } + + return &Loader{ + scheme: scheme, + table: table, + remote: remote, + local: local, + referenced: make(map[ulid.ULID]links), + undefined: make(map[ulid.ULID]links), + }, nil +} + +// LoadOne loads a single scheme.Spec from the storage.Storage +func (ld *Loader) LoadOne(ctx context.Context, filter *storage.Filter) (node.Node, error) { + ld.mu.Lock() + defer ld.mu.Unlock() + + return ld.loadOne(ctx, filter) +} + +// LoadMany loads multiple scheme.Spec from the storage.Storage +func (ld *Loader) LoadMany(ctx context.Context, filter *storage.Filter) ([]node.Node, error) { + ld.mu.Lock() + defer ld.mu.Unlock() + + return ld.loadMany(ctx, filter) +} + +// UnloadOne unloads a single scheme.Spec from the storage.Storage +func (ld *Loader) UnloadOne(ctx context.Context, filter *storage.Filter) (bool, error) { + ld.mu.Lock() + defer ld.mu.Unlock() + + return ld.unloadOne(ctx, filter) +} + +// UnloadMany unloads multiple scheme.Spec from the storage.Storage +func (ld *Loader) UnloadMany(ctx context.Context, filter *storage.Filter) (int, error) { + ld.mu.Lock() + defer ld.mu.Unlock() + + return ld.unloadMany(ctx, filter) +} + +func (ld *Loader) loadOne(ctx context.Context, filter *storage.Filter) (node.Node, error) { + remote, err := ld.remote.FindOne(ctx, filter) + if err != nil { + return nil, err + } + local, err := ld.local.FindOne(ctx, filter) + if err != nil { + return nil, err + } + + if remote != nil { + if local != nil { + if ok := util.Equal(local, remote); ok { + if n, ok := ld.table.Lookup(remote.GetID()); ok { + return n, nil + } + } + } + } else { + if local != nil { + _, err := ld.unloadOne(ctx, storage.Where[ulid.ULID](scheme.KeyID).EQ(local.GetID())) + return nil, err + } + return nil, nil + } + + if n, err := ld.scheme.Decode(remote); err != nil { + return nil, err + } else { + n, err := ld.table.Insert(n) + if err != nil { + return nil, err + } + + if local == nil { + if _, err := ld.local.InsertOne(ctx, remote); err != nil { + return nil, err + } + } else { + if _, err := ld.local.UpdateOne(ctx, remote); err != nil { + return nil, err + } + } + + if err := ld.resolveLinks(ctx, local, remote); err != nil { + return nil, err + } + + return n, nil + } +} + +func (ld *Loader) loadMany(ctx context.Context, filter *storage.Filter) ([]node.Node, error) { + remotes, err := ld.remote.FindMany(ctx, filter) + if err != nil { + return nil, err + } + locals, err := ld.local.FindMany(ctx, filter) + if err != nil { + return nil, err + } + + idToLocal := map[ulid.ULID]scheme.Spec{} + idToRemote := map[ulid.ULID]scheme.Spec{} + for _, spec := range locals { + idToLocal[spec.GetID()] = spec + } + for _, spec := range remotes { + idToRemote[spec.GetID()] = spec + } + + var removeIds []ulid.ULID + for id := range idToLocal { + if _, ok := idToRemote[id]; !ok { + removeIds = append(removeIds, id) + } + } + if len(removeIds) > 0 { + if _, err := ld.unloadMany(ctx, storage.Where[ulid.ULID](scheme.KeyID).IN(removeIds...)); err != nil { + return nil, err + } + } + + var nodes []node.Node + for id, remote := range idToRemote { + local := idToLocal[id] + if local != nil { + if ok := util.Equal(local, remote); ok { + if n, ok := ld.table.Lookup(id); ok { + nodes = append(nodes, n) + continue + } + } + } + + if n, err := ld.scheme.Decode(remote); err != nil { + return nil, err + } else { + if sym, err := ld.table.Insert(n); err != nil { + return nil, err + } else { + nodes = append(nodes, sym) + } + if local == nil { + if _, err := ld.local.InsertOne(ctx, remote); err != nil { + return nil, err + } + } else { + if _, err := ld.local.UpdateOne(ctx, remote); err != nil { + return nil, err + } + } + } + } + + for id, remote := range idToRemote { + local := idToLocal[id] + if err := ld.resolveLinks(ctx, local, remote); err != nil { + return nil, err + } + } + + return nodes, nil +} + +func (ld *Loader) unloadOne(ctx context.Context, filter *storage.Filter) (bool, error) { + local, err := ld.local.FindOne(ctx, filter) + if err != nil { + return false, err + } + if local == nil { + return false, nil + } + + if err := ld.resolveLinks(ctx, local, nil); err != nil { + return false, err + } + if _, err := ld.table.Free(local.GetID()); err != nil { + return false, err + } + return ld.local.DeleteOne(ctx, storage.Where[ulid.ULID](scheme.KeyID).EQ(local.GetID())) +} + +func (ld *Loader) unloadMany(ctx context.Context, filter *storage.Filter) (int, error) { + locals, err := ld.local.FindMany(ctx, filter) + if err != nil { + return 0, err + } + + for _, local := range locals { + if err := ld.resolveLinks(ctx, local, nil); err != nil { + return 0, err + } + if _, err := ld.table.Free(local.GetID()); err != nil { + return 0, err + } + } + + var ids []ulid.ULID + for _, local := range locals { + ids = append(ids, local.GetID()) + } + return ld.local.DeleteMany(ctx, storage.Where[ulid.ULID](scheme.KeyID).IN(ids...)) +} + +func (ld *Loader) resolveLinks(ctx context.Context, local scheme.Spec, remote scheme.Spec) error { + var n node.Node + var ok bool + + var spec scheme.Spec + var localLinks links + var remoteLinks links + + if local != nil { + spec = local + localLinks = local.GetLinks() + n, ok = ld.table.Lookup(local.GetID()) + } + if remote != nil { + spec = remote + remoteLinks = remote.GetLinks() + if !ok { + n, ok = ld.table.Lookup(remote.GetID()) + } + } + if !ok { + return nil + } + + deletions := localLinks + additions := remoteLinks + + undefined := links{} + + for name, locations := range deletions { + for _, location := range locations { + id := location.ID + + if util.IsZero(id) { + if location.Name != "" { + filter := storage.Where[string](scheme.KeyNamespace).EQ(spec.GetNamespace()) + filter = filter.And(storage.Where[string](scheme.KeyName).EQ(location.Name)) + if spec, err := ld.local.FindOne(ctx, filter); err != nil { + return err + } else if spec != nil { + id = spec.GetID() + } + } + } + + if !util.IsZero(id) { + if ref, ok := ld.table.Lookup(id); ok { + referenced := ld.referenced[ref.ID()] + var locations []scheme.PortLocation + for _, location := range referenced[location.Port] { + if location.ID != n.ID() || location.Port != name { + locations = append(locations, location) + } + } + if len(locations) > 0 { + referenced[location.Port] = locations + ld.referenced[ref.ID()] = referenced + } else if referenced != nil { + delete(referenced, location.Port) + ld.referenced[ref.ID()] = referenced + } + } + } + } + } + + for name, locations := range additions { + p1, ok := n.Port(name) + if !ok { + undefined[name] = locations + continue + } + + for _, location := range locations { + filter := storage.Where[string](scheme.KeyNamespace).EQ(spec.GetNamespace()) + if !util.IsZero(location.ID) { + filter = filter.And(storage.Where[ulid.ULID](scheme.KeyID).EQ(location.ID)) + } else if location.Name != "" { + filter = filter.And(storage.Where[string](scheme.KeyName).EQ(location.Name)) + } else { + continue + } + + // TODO: use load many + if ref, err := ld.loadOne(ctx, filter); err != nil { + return err + } else if ref != nil { + if p2, ok := ref.Port(location.Port); ok { + p1.Link(p2) + + referenced := ld.referenced[ref.ID()] + if referenced == nil { + referenced = links{} + } + referenced[location.Port] = append(referenced[location.Port], scheme.PortLocation{ + ID: n.ID(), + Port: name, + }) + ld.referenced[ref.ID()] = referenced + } else { + undefined[name] = append(undefined[name], location) + } + } else { + undefined[name] = append(undefined[name], location) + } + } + } + + undefined = diffLinks(unionLinks(ld.undefined[n.ID()], undefined), deletions) + + if len(undefined) > 0 { + ld.undefined[n.ID()] = undefined + } else { + delete(ld.undefined, n.ID()) + } + + if remote == nil { + ld.removeReference(ctx, n.ID()) + } else { + for name, locations := range ld.referenced[spec.GetID()] { + p1, ok := n.Port(name) + if !ok { + continue + } + for _, location := range locations { + if ref, ok := ld.table.Lookup(location.ID); ok { + if p2, ok := ref.Port(location.Port); ok { + p1.Link(p2) + } + } + } + } + + for id, additions := range ld.undefined { + if ref, err := ld.local.FindOne(ctx, storage.Where[ulid.ULID](scheme.KeyID).EQ(id)); err != nil { + return err + } else if ref == nil { + ld.removeReference(ctx, id) + delete(ld.undefined, id) + continue + } else if ref.GetNamespace() != spec.GetNamespace() { + continue + } + + undefined := make(links, len(additions)) + + if ref, ok := ld.table.Lookup(id); ok { + for name, locations := range additions { + p1, ok := ref.Port(name) + if !ok { + continue + } + + for _, location := range locations { + if (!util.IsZero(location.ID) && location.ID == spec.GetID()) || (location.Name != "" && location.Name == spec.GetName()) { + if p2, ok := n.Port(location.Port); ok { + p1.Link(p2) + + referenced := ld.referenced[n.ID()] + if referenced == nil { + referenced = links{} + } + referenced[location.Port] = append(referenced[location.Port], scheme.PortLocation{ + ID: ref.ID(), + Port: name, + }) + ld.referenced[n.ID()] = referenced + } else { + undefined[name] = append(undefined[name], location) + } + } else { + undefined[name] = append(undefined[name], location) + } + } + } + } + + ld.undefined[id] = undefined + } + } + + return nil +} + +func (ld *Loader) removeReference(ctx context.Context, id ulid.ULID) { + for name, locations := range ld.referenced[id] { + for _, location := range locations { + if ref, ok := ld.table.Lookup(location.ID); ok { + undefined := ld.undefined[ref.ID()] + if undefined == nil { + undefined = links{} + } + undefined[location.Port] = append(undefined[location.Port], scheme.PortLocation{ + ID: id, + Port: name, + }) + ld.undefined[ref.ID()] = undefined + } + } + } + delete(ld.referenced, id) +} + +func diffLinks(l1 links, l2 links) links { + diff := make(links, len(l1)) + for name, locations1 := range l1 { + diffLocationSet := map[scheme.PortLocation]struct{}{} + for _, location := range locations1 { + diffLocationSet[location] = struct{}{} + } + if locations2, ok := l2[name]; ok { + for _, location := range locations2 { + delete(diffLocationSet, location) + } + } + + var diffLocations []scheme.PortLocation + for location := range diffLocationSet { + diffLocations = append(diffLocations, location) + } + + if len(diffLocations) > 0 { + diff[name] = diffLocations + } + } + + if len(diff) == 0 { + return nil + } + return diff +} + +func unionLinks(l1 links, l2 links) links { + unionSet := make(map[string]map[scheme.PortLocation]struct{}, len(l1)+len(l2)) + for name, locations := range l1 { + unionLocationSet := map[scheme.PortLocation]struct{}{} + for _, location := range locations { + unionLocationSet[location] = struct{}{} + } + unionSet[name] = unionLocationSet + } + for name, locations := range l2 { + unionLocationSet := unionSet[name] + if len(unionLocationSet) == 0 { + unionLocationSet = map[scheme.PortLocation]struct{}{} + } + for _, location := range locations { + unionLocationSet[location] = struct{}{} + } + unionSet[name] = unionLocationSet + } + + union := make(links, len(unionSet)) + for name, locationSet := range unionSet { + var locations []scheme.PortLocation + for location := range locationSet { + locations = append(locations, location) + } + + union[name] = locations + } + + return union +} diff --git a/pkg/loader/loader_test.go b/pkg/loader/loader_test.go new file mode 100644 index 00000000..c9192b49 --- /dev/null +++ b/pkg/loader/loader_test.go @@ -0,0 +1,439 @@ +package loader + +import ( + "context" + "testing" + + "github.com/go-faker/faker/v4" + "github.com/oklog/ulid/v2" + "github.com/siyul-park/uniflow/pkg/database/memdb" + "github.com/siyul-park/uniflow/pkg/node" + "github.com/siyul-park/uniflow/pkg/scheme" + "github.com/siyul-park/uniflow/pkg/storage" + "github.com/siyul-park/uniflow/pkg/symbol" + "github.com/stretchr/testify/assert" +) + +func TestLoader_LoadOne(t *testing.T) { + t.Run("linked all", func(t *testing.T) { + s := scheme.New() + + st, _ := storage.New(context.Background(), storage.Config{ + Scheme: s, + Database: memdb.New(faker.Word()), + }) + + tb := symbol.NewTable() + defer func() { _ = tb.Close() }() + + ld, _ := New(context.Background(), Config{ + Scheme: s, + Storage: st, + Table: tb, + }) + + kind := faker.Word() + + spec1 := &scheme.SpecMeta{ + ID: ulid.Make(), + Kind: kind, + Namespace: scheme.NamespaceDefault, + } + spec2 := &scheme.SpecMeta{ + ID: ulid.Make(), + Kind: kind, + Namespace: scheme.NamespaceDefault, + Links: map[string][]scheme.PortLocation{ + node.PortIO: { + { + ID: spec1.GetID(), + Port: node.PortIO, + }, + }, + }, + } + + codec := scheme.CodecFunc(func(spec scheme.Spec) (node.Node, error) { + return node.NewOneToOneNode(node.OneToOneNodeConfig{ID: spec.GetID()}), nil + }) + + s.AddKnownType(kind, &scheme.SpecMeta{}) + s.AddCodec(kind, codec) + + st.InsertOne(context.Background(), spec1) + st.InsertOne(context.Background(), spec2) + + r2, err := ld.LoadOne(context.Background(), storage.Where[ulid.ULID](scheme.KeyID).EQ(spec2.GetID())) + assert.NoError(t, err) + assert.NotNil(t, r2) + + n1, ok := tb.Lookup(spec1.GetID()) + assert.True(t, ok) + + n2, ok := tb.Lookup(spec2.GetID()) + assert.True(t, ok) + + p1, _ := n1.Port(node.PortIO) + p2, _ := n2.Port(node.PortIO) + + assert.Equal(t, p1.Links(), 1) + assert.Equal(t, p2.Links(), 1) + }) + + t.Run("linked all with name", func(t *testing.T) { + s := scheme.New() + + st, _ := storage.New(context.Background(), storage.Config{ + Scheme: s, + Database: memdb.New(faker.Word()), + }) + + tb := symbol.NewTable() + defer func() { _ = tb.Close() }() + + ld, _ := New(context.Background(), Config{ + Scheme: s, + Storage: st, + Table: tb, + }) + + kind := faker.Word() + + spec1 := &scheme.SpecMeta{ + ID: ulid.Make(), + Kind: kind, + Namespace: scheme.NamespaceDefault, + Name: faker.Word(), + } + spec2 := &scheme.SpecMeta{ + ID: ulid.Make(), + Kind: kind, + Namespace: scheme.NamespaceDefault, + Name: faker.Word(), + Links: map[string][]scheme.PortLocation{ + node.PortIO: { + { + Name: spec1.Name, + Port: node.PortIO, + }, + }, + }, + } + + codec := scheme.CodecFunc(func(spec scheme.Spec) (node.Node, error) { + return node.NewOneToOneNode(node.OneToOneNodeConfig{ID: spec.GetID()}), nil + }) + + s.AddKnownType(kind, &scheme.SpecMeta{}) + s.AddCodec(kind, codec) + + st.InsertOne(context.Background(), spec1) + st.InsertOne(context.Background(), spec2) + + r2, err := ld.LoadOne(context.Background(), storage.Where[ulid.ULID](scheme.KeyID).EQ(spec2.GetID())) + assert.NoError(t, err) + assert.NotNil(t, r2) + + n1, ok := tb.Lookup(spec1.GetID()) + assert.True(t, ok) + + n2, ok := tb.Lookup(spec2.GetID()) + assert.True(t, ok) + + p1, _ := n1.Port(node.PortIO) + p2, _ := n2.Port(node.PortIO) + + assert.Equal(t, p1.Links(), 1) + assert.Equal(t, p2.Links(), 1) + }) + + t.Run("unlinked any", func(t *testing.T) { + s := scheme.New() + + st, _ := storage.New(context.Background(), storage.Config{ + Scheme: s, + Database: memdb.New(faker.Word()), + }) + + tb := symbol.NewTable() + defer func() { _ = tb.Close() }() + + ld, _ := New(context.Background(), Config{ + Scheme: s, + Storage: st, + Table: tb, + }) + + kind := faker.Word() + + spec1 := &scheme.SpecMeta{ + ID: ulid.Make(), + Kind: kind, + Namespace: scheme.NamespaceDefault, + } + spec2 := &scheme.SpecMeta{ + ID: ulid.Make(), + Kind: kind, + Namespace: scheme.NamespaceDefault, + Links: map[string][]scheme.PortLocation{ + node.PortIO: { + { + ID: spec1.GetID(), + Port: node.PortIO, + }, + }, + }, + } + + codec := scheme.CodecFunc(func(spec scheme.Spec) (node.Node, error) { + return node.NewOneToOneNode(node.OneToOneNodeConfig{ID: spec.GetID()}), nil + }) + + s.AddKnownType(kind, &scheme.SpecMeta{}) + s.AddCodec(kind, codec) + + st.InsertOne(context.Background(), spec2) + + r2, err := ld.LoadOne(context.Background(), storage.Where[ulid.ULID](scheme.KeyID).EQ(spec2.GetID())) + assert.NoError(t, err) + assert.NotNil(t, r2) + + st.InsertOne(context.Background(), spec1) + + r1, err := ld.LoadOne(context.Background(), storage.Where[ulid.ULID](scheme.KeyID).EQ(spec1.GetID())) + assert.NoError(t, err) + assert.NotNil(t, r1) + + n1, ok := tb.Lookup(spec1.GetID()) + assert.True(t, ok) + + n2, ok := tb.Lookup(spec2.GetID()) + assert.True(t, ok) + + p1, _ := n1.Port(node.PortIO) + p2, _ := n2.Port(node.PortIO) + + assert.Equal(t, p1.Links(), 1) + assert.Equal(t, p2.Links(), 1) + }) + + t.Run("relink any", func(t *testing.T) { + s := scheme.New() + + st, _ := storage.New(context.Background(), storage.Config{ + Scheme: s, + Database: memdb.New(faker.Word()), + }) + + tb := symbol.NewTable() + defer func() { _ = tb.Close() }() + + ld, _ := New(context.Background(), Config{ + Scheme: s, + Storage: st, + Table: tb, + }) + + kind := faker.Word() + + spec1 := &scheme.SpecMeta{ + ID: ulid.Make(), + Kind: kind, + Namespace: scheme.NamespaceDefault, + } + spec2 := &scheme.SpecMeta{ + ID: ulid.Make(), + Kind: kind, + Namespace: scheme.NamespaceDefault, + Links: map[string][]scheme.PortLocation{ + node.PortIO: { + { + ID: spec1.GetID(), + Port: node.PortIO, + }, + }, + }, + } + + codec := scheme.CodecFunc(func(spec scheme.Spec) (node.Node, error) { + return node.NewOneToOneNode(node.OneToOneNodeConfig{ID: spec.GetID()}), nil + }) + + s.AddKnownType(kind, &scheme.SpecMeta{}) + s.AddCodec(kind, codec) + + st.InsertOne(context.Background(), spec1) + st.InsertOne(context.Background(), spec2) + + r2, err := ld.LoadOne(context.Background(), storage.Where[ulid.ULID](scheme.KeyID).EQ(spec2.GetID())) + assert.NoError(t, err) + assert.NotNil(t, r2) + + ok, err := ld.UnloadOne(context.Background(), storage.Where[ulid.ULID](scheme.KeyID).EQ(spec1.GetID())) + assert.NoError(t, err) + assert.True(t, ok) + + r1, err := ld.LoadOne(context.Background(), storage.Where[ulid.ULID](scheme.KeyID).EQ(spec1.GetID())) + assert.NoError(t, err) + assert.NotNil(t, r1) + + n1, ok := tb.Lookup(spec1.GetID()) + assert.True(t, ok) + + n2, ok := tb.Lookup(spec2.GetID()) + assert.True(t, ok) + + p1, _ := n1.Port(node.PortIO) + p2, _ := n2.Port(node.PortIO) + + assert.Equal(t, p1.Links(), 1) + assert.GreaterOrEqual(t, p2.Links(), 1) + }) +} + +func TestLoader_LoadMany(t *testing.T) { + s := scheme.New() + + st, _ := storage.New(context.Background(), storage.Config{ + Scheme: s, + Database: memdb.New(faker.Word()), + }) + + tb := symbol.NewTable() + defer func() { _ = tb.Close() }() + + ld, _ := New(context.Background(), Config{ + Scheme: s, + Storage: st, + Table: tb, + }) + + kind := faker.Word() + + spec1 := &scheme.SpecMeta{ + ID: ulid.Make(), + Kind: kind, + Namespace: scheme.NamespaceDefault, + } + spec2 := &scheme.SpecMeta{ + ID: ulid.Make(), + Kind: kind, + Namespace: scheme.NamespaceDefault, + Links: map[string][]scheme.PortLocation{ + node.PortIO: { + { + ID: spec1.GetID(), + Port: node.PortIO, + }, + }, + }, + } + + codec := scheme.CodecFunc(func(spec scheme.Spec) (node.Node, error) { + return node.NewOneToOneNode(node.OneToOneNodeConfig{ID: spec.GetID()}), nil + }) + + s.AddKnownType(kind, &scheme.SpecMeta{}) + s.AddCodec(kind, codec) + + st.InsertOne(context.Background(), spec1) + st.InsertOne(context.Background(), spec2) + + r, err := ld.LoadMany(context.Background(), nil) + assert.NoError(t, err) + assert.Len(t, r, 2) + + _, ok := tb.Lookup(spec1.GetID()) + assert.True(t, ok) + + _, ok = tb.Lookup(spec2.GetID()) + assert.True(t, ok) +} + +func TestLoader_UnloadOne(t *testing.T) { + s := scheme.New() + + st, _ := storage.New(context.Background(), storage.Config{ + Scheme: s, + Database: memdb.New(faker.Word()), + }) + + tb := symbol.NewTable() + defer func() { _ = tb.Close() }() + + ld, _ := New(context.Background(), Config{ + Scheme: s, + Storage: st, + Table: tb, + }) + + kind := faker.Word() + + spec := &scheme.SpecMeta{ + ID: ulid.Make(), + Kind: kind, + Namespace: scheme.NamespaceDefault, + } + + codec := scheme.CodecFunc(func(spec scheme.Spec) (node.Node, error) { + return node.NewOneToOneNode(node.OneToOneNodeConfig{ID: spec.GetID()}), nil + }) + + s.AddKnownType(kind, &scheme.SpecMeta{}) + s.AddCodec(kind, codec) + + st.InsertOne(context.Background(), spec) + + _, _ = ld.LoadOne(context.Background(), storage.Where[ulid.ULID](scheme.KeyID).EQ(spec.GetID())) + + ok, err := ld.UnloadOne(context.Background(), storage.Where[ulid.ULID](scheme.KeyID).EQ(spec.GetID())) + assert.NoError(t, err) + assert.True(t, ok) + + _, ok = tb.Lookup(spec.GetID()) + assert.False(t, ok) +} + +func TestLoader_UnloadMany(t *testing.T) { + s := scheme.New() + + st, _ := storage.New(context.Background(), storage.Config{ + Scheme: s, + Database: memdb.New(faker.Word()), + }) + + tb := symbol.NewTable() + defer func() { _ = tb.Close() }() + + ld, _ := New(context.Background(), Config{ + Scheme: s, + Storage: st, + Table: tb, + }) + + kind := faker.Word() + + spec := &scheme.SpecMeta{ + ID: ulid.Make(), + Kind: kind, + Namespace: scheme.NamespaceDefault, + } + + codec := scheme.CodecFunc(func(spec scheme.Spec) (node.Node, error) { + return node.NewOneToOneNode(node.OneToOneNodeConfig{ID: spec.GetID()}), nil + }) + + s.AddKnownType(kind, &scheme.SpecMeta{}) + s.AddCodec(kind, codec) + + st.InsertOne(context.Background(), spec) + + _, _ = ld.LoadOne(context.Background(), storage.Where[ulid.ULID](scheme.KeyID).EQ(spec.GetID())) + + count, err := ld.UnloadMany(context.Background(), storage.Where[ulid.ULID](scheme.KeyID).EQ(spec.GetID())) + assert.NoError(t, err) + assert.Equal(t, 1, count) + + _, ok := tb.Lookup(spec.GetID()) + assert.False(t, ok) +} diff --git a/pkg/loader/reconciler.go b/pkg/loader/reconciler.go new file mode 100644 index 00000000..bc97a1cb --- /dev/null +++ b/pkg/loader/reconciler.go @@ -0,0 +1,129 @@ +package loader + +import ( + "context" + "sync" + + "github.com/oklog/ulid/v2" + "github.com/siyul-park/uniflow/pkg/scheme" + "github.com/siyul-park/uniflow/pkg/storage" +) + +type ( + // ReconcilerConfig is a config for for the Reconciler. + ReconcilerConfig struct { + Remote *storage.Storage + Loader *Loader + Filter *storage.Filter + } + + // Reconciler keeps up to date symbol.Table by tracking changes to the scheme.Spec. + Reconciler struct { + remote *storage.Storage + loader *Loader + filter *storage.Filter + stream *storage.Stream + done chan struct{} + mu sync.Mutex + } +) + +// NewReconciler returns a new Reconciler. +func NewReconciler(config ReconcilerConfig) *Reconciler { + remote := config.Remote + loader := config.Loader + filter := config.Filter + + return &Reconciler{ + remote: remote, + loader: loader, + filter: filter, + done: make(chan struct{}), + } +} + +// Watch starts to watch the changes. +func (r *Reconciler) Watch(ctx context.Context) error { + _, err := r.watch(ctx) + return err +} + +// Reconcile starts to reflects the changes. +func (r *Reconciler) Reconcile(ctx context.Context) error { + stream, err := r.watch(ctx) + if err != nil { + return err + } + + for { + select { + case <-r.done: + return nil + case <-ctx.Done(): + return ctx.Err() + case <-stream.Done(): + stream, err = r.watch(ctx) + if err != nil { + return err + } + case event, ok := <-stream.Next(): + if !ok { + return nil + } + + if _, err := r.loader.LoadOne(ctx, storage.Where[ulid.ULID](scheme.KeyID).EQ(event.NodeID)); err != nil { + return err + } + } + } +} + +// Close closes the Reconciler. +func (r *Reconciler) Close() error { + r.mu.Lock() + defer r.mu.Unlock() + + select { + case <-r.done: + return nil + default: + } + + if r.stream == nil { + return nil + } + if err := r.stream.Close(); err != nil { + return err + } + r.stream = nil + close(r.done) + return nil +} + +func (r *Reconciler) watch(ctx context.Context) (*storage.Stream, error) { + r.mu.Lock() + defer r.mu.Unlock() + + if r.stream != nil { + return r.stream, nil + } + s, err := r.remote.Watch(ctx, r.filter) + if err != nil { + return nil, err + } + + go func() { + select { + case <-s.Done(): + r.mu.Lock() + defer r.mu.Unlock() + + r.stream = nil + case <-r.done: + return + } + }() + + r.stream = s + return s, nil +} diff --git a/pkg/loader/reconciler_test.go b/pkg/loader/reconciler_test.go new file mode 100644 index 00000000..14d84a5b --- /dev/null +++ b/pkg/loader/reconciler_test.go @@ -0,0 +1,80 @@ +package loader + +import ( + "context" + "testing" + "time" + + "github.com/go-faker/faker/v4" + "github.com/oklog/ulid/v2" + "github.com/siyul-park/uniflow/pkg/database/memdb" + "github.com/siyul-park/uniflow/pkg/node" + "github.com/siyul-park/uniflow/pkg/scheme" + "github.com/siyul-park/uniflow/pkg/storage" + "github.com/siyul-park/uniflow/pkg/symbol" + "github.com/stretchr/testify/assert" +) + +func TestReconciler_Reconcile(t *testing.T) { + s := scheme.New() + + st, _ := storage.New(context.Background(), storage.Config{ + Scheme: s, + Database: memdb.New(faker.Word()), + }) + + tb := symbol.NewTable() + defer func() { _ = tb.Close() }() + + ld, _ := New(context.Background(), Config{ + Scheme: s, + Storage: st, + Table: tb, + }) + + r := NewReconciler(ReconcilerConfig{ + Remote: st, + Loader: ld, + }) + defer func() { _ = r.Close() }() + + err := r.Watch(context.Background()) + assert.NoError(t, err) + + go func() { + err := r.Reconcile(context.Background()) + assert.NoError(t, err) + }() + + kind := faker.Word() + + m := &scheme.SpecMeta{ + ID: ulid.Make(), + Kind: kind, + Namespace: scheme.NamespaceDefault, + } + + codec := scheme.CodecFunc(func(spec scheme.Spec) (node.Node, error) { + return node.NewOneToOneNode(node.OneToOneNodeConfig{ID: spec.GetID()}), nil + }) + + s.AddKnownType(kind, &scheme.SpecMeta{}) + s.AddCodec(kind, codec) + + st.InsertOne(context.Background(), m) + + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + + for { + select { + case <-ctx.Done(): + assert.Fail(t, "timeout") + return + default: + if _, ok := tb.Lookup(m.GetID()); ok { + return + } + } + } +} diff --git a/pkg/node/error.go b/pkg/node/error.go new file mode 100644 index 00000000..77f6d272 --- /dev/null +++ b/pkg/node/error.go @@ -0,0 +1,8 @@ +package node + +import "errors" + +var ( + ErrInvalidPacket = errors.New("packet is invalid") + ErrDiscardPacket = errors.New("packet is discard") +) diff --git a/pkg/node/node.go b/pkg/node/node.go new file mode 100644 index 00000000..2eeecfea --- /dev/null +++ b/pkg/node/node.go @@ -0,0 +1,15 @@ +package node + +import ( + "github.com/oklog/ulid/v2" + "github.com/siyul-park/uniflow/pkg/port" +) + +type ( + // Node is an operational unit that processes *packet.Packet. + Node interface { + ID() ulid.ULID + Port(name string) (*port.Port, bool) + Close() error + } +) diff --git a/pkg/node/onetomany.go b/pkg/node/onetomany.go new file mode 100644 index 00000000..57fd88ff --- /dev/null +++ b/pkg/node/onetomany.go @@ -0,0 +1,205 @@ +package node + +import ( + "sync" + + "github.com/oklog/ulid/v2" + "github.com/siyul-park/uniflow/internal/util" + "github.com/siyul-park/uniflow/pkg/packet" + "github.com/siyul-park/uniflow/pkg/port" + "github.com/siyul-park/uniflow/pkg/process" +) + +type ( + // OneToManyNodeConfig is a config for ActionNode. + OneToManyNodeConfig struct { + ID ulid.ULID + Action func(*process.Process, *packet.Packet) ([]*packet.Packet, *packet.Packet) + } + + // OneToManyNode provide process *packet.Packet one source and many distance. + OneToManyNode struct { + id ulid.ULID + action func(*process.Process, *packet.Packet) ([]*packet.Packet, *packet.Packet) + inPort *port.Port + outPorts []*port.Port + errPort *port.Port + mu sync.RWMutex + } +) + +var _ Node = &OneToManyNode{} + +// NewOneToManyNode returns a new OneToManyNode. +func NewOneToManyNode(config OneToManyNodeConfig) *OneToManyNode { + id := config.ID + action := config.Action + + if util.IsZero(id) { + id = ulid.Make() + } + if action == nil { + action = func(_ *process.Process, _ *packet.Packet) ([]*packet.Packet, *packet.Packet) { + return nil, nil + } + } + + n := &OneToManyNode{ + id: id, + action: action, + inPort: port.New(), + outPorts: nil, + errPort: port.New(), + } + + n.inPort.AddInitHook(port.InitHookFunc(n.forward)) + n.errPort.AddInitHook(port.InitHookFunc(func(proc *process.Process) { + n.mu.RLock() + defer n.mu.RUnlock() + + errStream := n.errPort.Open(proc) + + n.backward(proc, errStream) + })) + + return n +} + +func (n *OneToManyNode) ID() ulid.ULID { + n.mu.RLock() + defer n.mu.RUnlock() + + return n.id +} + +func (n *OneToManyNode) Port(name string) (*port.Port, bool) { + n.mu.Lock() + defer n.mu.Unlock() + + switch name { + case PortIn: + return n.inPort, true + case PortErr: + return n.errPort, true + default: + } + + if i, ok := port.GetIndex(PortOut, name); ok { + for j := 0; j <= i; j++ { + if len(n.outPorts) <= j { + outPort := port.New() + outPort.AddInitHook(port.InitHookFunc(func(proc *process.Process) { + n.mu.RLock() + defer n.mu.RUnlock() + + outStream := outPort.Open(proc) + + n.backward(proc, outStream) + })) + n.outPorts = append(n.outPorts, outPort) + } + } + + return n.outPorts[i], true + } + + return nil, false +} + +func (n *OneToManyNode) Close() error { + n.mu.Lock() + defer n.mu.Unlock() + + n.inPort.Close() + for _, p := range n.outPorts { + p.Close() + } + n.errPort.Close() + + return nil +} + +func (n *OneToManyNode) forward(proc *process.Process) { + n.mu.RLock() + defer n.mu.RUnlock() + + inStream := n.inPort.Open(proc) + outStreams := make([]*port.Stream, len(n.outPorts)) + for i, p := range n.outPorts { + outStreams[i] = p.Open(proc) + } + errStream := n.errPort.Open(proc) + + for func() bool { + inPck, ok := <-inStream.Receive() + if !ok { + return false + } + + if outPcks, errPck := n.action(proc, inPck); errPck != nil { + if errPck == inPck { + errPck = packet.New(errPck.Payload()) + } + proc.Stack().Link(inPck.ID(), errPck.ID()) + if errStream.Links() > 0 { + proc.Stack().Push(errPck.ID(), inStream.ID()) + errStream.Send(errPck) + } else { + inStream.Send(errPck) + } + } else if len(outPcks) > 0 && len(outStreams) > 0 { + var ok bool + for i, outPck := range outPcks { + if len(outStreams) <= i { + break + } + if outPck == nil { + continue + } + outStream := outStreams[i] + + if outStream.Links() > 0 { + if outPck == inPck { + outPck = packet.New(outPck.Payload()) + } + proc.Stack().Link(inPck.ID(), outPck.ID()) + proc.Stack().Push(outPck.ID(), inStream.ID()) + outStream.Send(outPck) + + ok = true + } + } + + if !ok { + proc.Stack().Clear(inPck.ID()) + } + } else { + proc.Stack().Clear(inPck.ID()) + } + + return true + }() { + } +} + +func (n *OneToManyNode) backward(proc *process.Process, outStream *port.Stream) { + n.mu.RLock() + defer n.mu.RUnlock() + + var inStream *port.Stream + + for { + backPck, ok := <-outStream.Receive() + if !ok { + return + } + + if inStream == nil { + inStream = n.inPort.Open(proc) + } + + if _, ok := proc.Stack().Pop(backPck.ID(), inStream.ID()); ok { + inStream.Send(backPck) + } + } +} diff --git a/pkg/node/onetomany_test.go b/pkg/node/onetomany_test.go new file mode 100644 index 00000000..7b9855e8 --- /dev/null +++ b/pkg/node/onetomany_test.go @@ -0,0 +1,132 @@ +package node + +import ( + "context" + "testing" + "time" + + "github.com/go-faker/faker/v4" + "github.com/siyul-park/uniflow/pkg/packet" + "github.com/siyul-park/uniflow/pkg/port" + "github.com/siyul-park/uniflow/pkg/primitive" + "github.com/siyul-park/uniflow/pkg/process" + "github.com/stretchr/testify/assert" +) + +func TestNewOneToManyNode(t *testing.T) { + n := NewOneToManyNode(OneToManyNodeConfig{ + Action: func(_ *process.Process, inPck *packet.Packet) ([]*packet.Packet, *packet.Packet) { + return []*packet.Packet{inPck}, nil + }, + }) + assert.NotNil(t, n) + assert.NotZero(t, n.ID()) + + assert.NoError(t, n.Close()) +} + +func TestOneToManyNode_Port(t *testing.T) { + n := NewOneToManyNode(OneToManyNodeConfig{ + Action: func(_ *process.Process, inPck *packet.Packet) ([]*packet.Packet, *packet.Packet) { + return []*packet.Packet{inPck}, nil + }, + }) + defer func() { _ = n.Close() }() + + p, ok := n.Port(PortIn) + assert.True(t, ok) + assert.NotNil(t, p) + + p, ok = n.Port(port.SetIndex(PortOut, 0)) + assert.True(t, ok) + assert.NotNil(t, p) + + p, ok = n.Port(PortErr) + assert.True(t, ok) + assert.NotNil(t, p) +} + +func TestOneToManyNode_Send(t *testing.T) { + t.Run("return out", func(t *testing.T) { + n := NewOneToManyNode(OneToManyNodeConfig{ + Action: func(_ *process.Process, inPck *packet.Packet) ([]*packet.Packet, *packet.Packet) { + return []*packet.Packet{inPck}, nil + }, + }) + defer func() { _ = n.Close() }() + + in := port.New() + inPort, _ := n.Port(PortIn) + inPort.Link(in) + + out := port.New() + outPort, _ := n.Port(port.SetIndex(PortOut, 0)) + outPort.Link(out) + + proc := process.New() + defer proc.Close() + + inStream := in.Open(proc) + outStream := out.Open(proc) + + inPayload := primitive.NewString(faker.UUIDHyphenated()) + inPck := packet.New(inPayload) + + inStream.Send(inPck) + + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + + select { + case outPck := <-outStream.Receive(): + assert.Equal(t, inPayload, outPck.Payload()) + outStream.Send(outPck) + select { + case outPck := <-inStream.Receive(): + assert.NotNil(t, outPck) + case <-ctx.Done(): + assert.Fail(t, "timeout") + } + case <-ctx.Done(): + assert.Fail(t, "timeout") + } + }) + + t.Run("return err", func(t *testing.T) { + n := NewOneToManyNode(OneToManyNodeConfig{ + Action: func(_ *process.Process, inPck *packet.Packet) ([]*packet.Packet, *packet.Packet) { + return nil, packet.New(primitive.NewString(faker.Word())) + }, + }) + defer func() { _ = n.Close() }() + + in := port.New() + inPort, _ := n.Port(PortIn) + inPort.Link(in) + + err := port.New() + errPort, _ := n.Port(PortErr) + errPort.Link(err) + + proc := process.New() + defer proc.Close() + + inStream := in.Open(proc) + errStream := err.Open(proc) + + inPayload := primitive.NewString(faker.UUIDHyphenated()) + inPck := packet.New(inPayload) + + inStream.Send(inPck) + + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + + select { + case outPck := <-errStream.Receive(): + assert.NotNil(t, outPck) + case <-ctx.Done(): + assert.Fail(t, "timeout") + } + }) +} diff --git a/pkg/node/onetoone.go b/pkg/node/onetoone.go new file mode 100644 index 00000000..0c0a28a8 --- /dev/null +++ b/pkg/node/onetoone.go @@ -0,0 +1,204 @@ +package node + +import ( + "sync" + + "github.com/oklog/ulid/v2" + "github.com/siyul-park/uniflow/internal/util" + "github.com/siyul-park/uniflow/pkg/packet" + "github.com/siyul-park/uniflow/pkg/port" + "github.com/siyul-park/uniflow/pkg/process" +) + +type ( + // OneToOneNodeConfig is a config for ActionNode. + OneToOneNodeConfig struct { + ID ulid.ULID + Action func(*process.Process, *packet.Packet) (*packet.Packet, *packet.Packet) + } + + // OneToOneNode provide process *packet.Packet one source and onde distance. + OneToOneNode struct { + id ulid.ULID + action func(*process.Process, *packet.Packet) (*packet.Packet, *packet.Packet) + ioPort *port.Port + inPort *port.Port + outPort *port.Port + errPort *port.Port + mu sync.RWMutex + } +) + +var _ Node = &OneToOneNode{} + +// NewOneToOneNode returns a new OneToOneNode. +func NewOneToOneNode(config OneToOneNodeConfig) *OneToOneNode { + id := config.ID + action := config.Action + + if util.IsZero(id) { + id = ulid.Make() + } + if action == nil { + action = func(_ *process.Process, _ *packet.Packet) (*packet.Packet, *packet.Packet) { + return nil, nil + } + } + + n := &OneToOneNode{ + id: id, + action: action, + ioPort: port.New(), + inPort: port.New(), + outPort: port.New(), + errPort: port.New(), + } + + n.ioPort.AddInitHook(port.InitHookFunc(func(proc *process.Process) { + n.mu.RLock() + defer n.mu.RUnlock() + + ioStream := n.ioPort.Open(proc) + + n.forward(proc, ioStream, ioStream) + })) + n.inPort.AddInitHook(port.InitHookFunc(func(proc *process.Process) { + n.mu.RLock() + defer n.mu.RUnlock() + + inStream := n.inPort.Open(proc) + outStream := n.outPort.Open(proc) + + n.forward(proc, inStream, outStream) + })) + n.outPort.AddInitHook(port.InitHookFunc(func(proc *process.Process) { + n.mu.RLock() + defer n.mu.RUnlock() + + outStream := n.outPort.Open(proc) + + n.backward(proc, outStream) + })) + n.errPort.AddInitHook(port.InitHookFunc(func(proc *process.Process) { + n.mu.RLock() + defer n.mu.RUnlock() + + errStream := n.errPort.Open(proc) + + n.backward(proc, errStream) + })) + + return n +} + +func (n *OneToOneNode) ID() ulid.ULID { + n.mu.RLock() + defer n.mu.RUnlock() + + return n.id +} + +func (n *OneToOneNode) Port(name string) (*port.Port, bool) { + n.mu.RLock() + defer n.mu.RUnlock() + + switch name { + case PortIO: + return n.ioPort, true + case PortIn: + return n.inPort, true + case PortOut: + return n.outPort, true + case PortErr: + return n.errPort, true + default: + } + + return nil, false +} + +func (n *OneToOneNode) Close() error { + n.mu.Lock() + defer n.mu.Unlock() + + n.ioPort.Close() + n.inPort.Close() + n.outPort.Close() + n.errPort.Close() + + return nil +} + +func (n *OneToOneNode) forward(proc *process.Process, inStream *port.Stream, outStream *port.Stream) { + n.mu.RLock() + defer n.mu.RUnlock() + + errStream := n.errPort.Open(proc) + + for func() bool { + inPck, ok := <-inStream.Receive() + if !ok { + return false + } + + if outPck, errPck := n.action(proc, inPck); errPck != nil { + if errPck == inPck { + errPck = packet.New(errPck.Payload()) + } + proc.Stack().Link(inPck.ID(), errPck.ID()) + if errStream.Links() > 0 { + proc.Stack().Push(errPck.ID(), inStream.ID()) + errStream.Send(errPck) + } else { + inStream.Send(errPck) + } + } else if outPck != nil && outStream.Links() > 0 { + if outPck == inPck { + outPck = packet.New(outPck.Payload()) + } + proc.Stack().Link(inPck.ID(), outPck.ID()) + if outStream != inStream { + proc.Stack().Push(outPck.ID(), inStream.ID()) + outStream.Send(outPck) + } else { + inStream.Send(outPck) + } + } else { + proc.Stack().Clear(inPck.ID()) + } + + return true + }() { + } +} + +func (n *OneToOneNode) backward(proc *process.Process, outStream *port.Stream) { + n.mu.RLock() + defer n.mu.RUnlock() + + var ioStream *port.Stream + var inStream *port.Stream + + for func() bool { + backPck, ok := <-outStream.Receive() + if !ok { + return false + } + + if ioStream == nil { + ioStream = n.ioPort.Open(proc) + } + if inStream == nil { + inStream = n.inPort.Open(proc) + } + + if _, ok := proc.Stack().Pop(backPck.ID(), ioStream.ID()); ok { + ioStream.Send(backPck) + } else if _, ok := proc.Stack().Pop(backPck.ID(), inStream.ID()); ok { + inStream.Send(backPck) + } + + return true + }() { + } +} diff --git a/pkg/node/onetoone_test.go b/pkg/node/onetoone_test.go new file mode 100644 index 00000000..fdb3fe1f --- /dev/null +++ b/pkg/node/onetoone_test.go @@ -0,0 +1,212 @@ +package node + +import ( + "context" + "testing" + "time" + + "github.com/go-faker/faker/v4" + "github.com/siyul-park/uniflow/pkg/packet" + "github.com/siyul-park/uniflow/pkg/port" + "github.com/siyul-park/uniflow/pkg/primitive" + "github.com/siyul-park/uniflow/pkg/process" + "github.com/stretchr/testify/assert" +) + +func TestNewOneToOneNode(t *testing.T) { + n := NewOneToOneNode(OneToOneNodeConfig{ + Action: func(_ *process.Process, inPck *packet.Packet) (*packet.Packet, *packet.Packet) { + return inPck, nil + }, + }) + assert.NotNil(t, n) + assert.NotZero(t, n.ID()) + + assert.NoError(t, n.Close()) +} + +func TestOneToOneNode_Port(t *testing.T) { + n := NewOneToOneNode(OneToOneNodeConfig{ + Action: func(_ *process.Process, inPck *packet.Packet) (*packet.Packet, *packet.Packet) { + return inPck, nil + }, + }) + defer func() { _ = n.Close() }() + + p, ok := n.Port(PortIO) + assert.True(t, ok) + assert.NotNil(t, p) + + p, ok = n.Port(PortIn) + assert.True(t, ok) + assert.NotNil(t, p) + + p, ok = n.Port(PortOut) + assert.True(t, ok) + assert.NotNil(t, p) + + p, ok = n.Port(PortErr) + assert.True(t, ok) + assert.NotNil(t, p) +} + +func TestOneToOneNode_Send(t *testing.T) { + t.Run("IO", func(t *testing.T) { + t.Run("return out", func(t *testing.T) { + n := NewOneToOneNode(OneToOneNodeConfig{ + Action: func(_ *process.Process, inPck *packet.Packet) (*packet.Packet, *packet.Packet) { + return inPck, nil + }, + }) + defer func() { _ = n.Close() }() + + io := port.New() + ioPort, _ := n.Port(PortIO) + ioPort.Link(io) + + proc := process.New() + defer proc.Close() + + ioStream := io.Open(proc) + + inPayload := primitive.NewString(faker.UUIDHyphenated()) + inPck := packet.New(inPayload) + + ioStream.Send(inPck) + + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + + select { + case outPck := <-ioStream.Receive(): + assert.Equal(t, inPayload, outPck.Payload()) + case <-ctx.Done(): + assert.Fail(t, "timeout") + } + }) + + t.Run("return err", func(t *testing.T) { + n := NewOneToOneNode(OneToOneNodeConfig{ + Action: func(_ *process.Process, inPck *packet.Packet) (*packet.Packet, *packet.Packet) { + return nil, packet.New(primitive.NewString(faker.Word())) + }, + }) + defer func() { _ = n.Close() }() + + io := port.New() + ioPort, _ := n.Port(PortIO) + ioPort.Link(io) + + err := port.New() + errPort, _ := n.Port(PortErr) + errPort.Link(err) + + proc := process.New() + defer proc.Close() + + ioStream := io.Open(proc) + errStream := err.Open(proc) + + inPayload := primitive.NewString(faker.UUIDHyphenated()) + inPck := packet.New(inPayload) + + ioStream.Send(inPck) + + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + + select { + case outPck := <-errStream.Receive(): + assert.NotNil(t, outPck) + case <-ctx.Done(): + assert.Fail(t, "timeout") + } + }) + }) + + t.Run("In/Out", func(t *testing.T) { + t.Run("return out", func(t *testing.T) { + n := NewOneToOneNode(OneToOneNodeConfig{ + Action: func(_ *process.Process, inPck *packet.Packet) (*packet.Packet, *packet.Packet) { + return inPck, nil + }, + }) + defer func() { _ = n.Close() }() + + in := port.New() + inPort, _ := n.Port(PortIn) + inPort.Link(in) + + out := port.New() + outPort, _ := n.Port(PortOut) + outPort.Link(out) + + proc := process.New() + defer proc.Close() + + inStream := in.Open(proc) + outStream := out.Open(proc) + + inPayload := primitive.NewString(faker.UUIDHyphenated()) + inPck := packet.New(inPayload) + + inStream.Send(inPck) + + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + + select { + case outPck := <-outStream.Receive(): + assert.Equal(t, inPayload, outPck.Payload()) + + outStream.Send(outPck) + select { + case outPck := <-inStream.Receive(): + assert.NotNil(t, outPck) + case <-ctx.Done(): + assert.Fail(t, "timeout") + } + case <-ctx.Done(): + assert.Fail(t, "timeout") + } + }) + + t.Run("return err", func(t *testing.T) { + n := NewOneToOneNode(OneToOneNodeConfig{ + Action: func(_ *process.Process, inPck *packet.Packet) (*packet.Packet, *packet.Packet) { + return nil, packet.New(primitive.NewString(faker.Word())) + }, + }) + defer func() { _ = n.Close() }() + + in := port.New() + inPort, _ := n.Port(PortIn) + inPort.Link(in) + + err := port.New() + errPort, _ := n.Port(PortErr) + errPort.Link(err) + + proc := process.New() + defer proc.Close() + + inStream := in.Open(proc) + errStream := err.Open(proc) + + inPayload := primitive.NewString(faker.UUIDHyphenated()) + inPck := packet.New(inPayload) + + inStream.Send(inPck) + + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + + select { + case outPck := <-errStream.Receive(): + assert.NotNil(t, outPck) + case <-ctx.Done(): + assert.Fail(t, "timeout") + } + }) + }) +} diff --git a/pkg/node/ports.go b/pkg/node/ports.go new file mode 100644 index 00000000..d84d193c --- /dev/null +++ b/pkg/node/ports.go @@ -0,0 +1,8 @@ +package node + +const ( + PortIO = "io" + PortIn = "in" + PortOut = "out" + PortErr = "error" +) diff --git a/pkg/node/wrap.go b/pkg/node/wrap.go new file mode 100644 index 00000000..42dbe2b2 --- /dev/null +++ b/pkg/node/wrap.go @@ -0,0 +1,19 @@ +package node + +type ( + Wrapper interface { + Wrap(Node) error + Unwrap() Node + } +) + +// Unwrap unwraps all nested Wrapper. +func Unwrap(node Node) Node { + for { + if wrapper, ok := node.(Wrapper); ok { + node = wrapper.Unwrap() + } else { + return node + } + } +} diff --git a/pkg/packet/packet.go b/pkg/packet/packet.go new file mode 100644 index 00000000..624e70ef --- /dev/null +++ b/pkg/packet/packet.go @@ -0,0 +1,42 @@ +package packet + +import ( + "github.com/oklog/ulid/v2" + "github.com/siyul-park/uniflow/pkg/primitive" +) + +type ( + // Packet is a formalized block of data. + Packet struct { + id ulid.ULID + payload primitive.Object + } +) + +func NewError(err error, cause *Packet) *Packet { + var pairs []primitive.Object + pairs = append(pairs, primitive.NewString("error"), primitive.NewString(err.Error())) + if cause != nil { + pairs = append(pairs, primitive.NewString("cause"), cause.Payload()) + } + + return New(primitive.NewMap(pairs...)) +} + +// New returns a new Packet. +func New(payload primitive.Object) *Packet { + return &Packet{ + id: ulid.Make(), + payload: payload, + } +} + +// ID returns the ID of the Packet +func (pck *Packet) ID() ulid.ULID { + return pck.id +} + +// Payload returns the payload of the Packet. +func (pck *Packet) Payload() primitive.Object { + return pck.payload +} diff --git a/pkg/packet/packet_test.go b/pkg/packet/packet_test.go new file mode 100644 index 00000000..c81233a2 --- /dev/null +++ b/pkg/packet/packet_test.go @@ -0,0 +1,31 @@ +package packet + +import ( + "errors" + "testing" + + "github.com/go-faker/faker/v4" + "github.com/siyul-park/uniflow/pkg/primitive" + "github.com/stretchr/testify/assert" +) + +func TestNewError(t *testing.T) { + err := errors.New(faker.Sentence()) + + pck1 := New(primitive.NewString(faker.Word())) + pck2 := NewError(err, pck1) + + assert.NotNil(t, pck2) + assert.NotZero(t, pck2.ID()) + + payload, ok := pck2.Payload().(*primitive.Map) + assert.True(t, ok) + assert.Equal(t, err.Error(), payload.GetOr(primitive.NewString("error"), nil).Interface()) + assert.Equal(t, pck1.Payload(), payload.GetOr(primitive.NewString("cause"), nil)) +} + +func TestNew(t *testing.T) { + pck := New(nil) + assert.NotNil(t, pck) + assert.NotZero(t, pck.ID()) +} diff --git a/pkg/plugin/controllx/builder.go b/pkg/plugin/controllx/builder.go new file mode 100644 index 00000000..7f92e71e --- /dev/null +++ b/pkg/plugin/controllx/builder.go @@ -0,0 +1,35 @@ +package controllx + +import ( + "github.com/siyul-park/uniflow/pkg/node" + "github.com/siyul-park/uniflow/pkg/scheme" +) + +func AddToScheme() func(*scheme.Scheme) error { + return func(s *scheme.Scheme) error { + s.AddKnownType(KindSnippet, &SnippetSpec{}) + s.AddCodec(KindSnippet, scheme.CodecWithType[*SnippetSpec](func(spec *SnippetSpec) (node.Node, error) { + return NewSnippetNode(SnippetNodeConfig{ + ID: spec.ID, + Lang: spec.Lang, + Code: spec.Code, + }) + })) + + s.AddKnownType(KindSwitch, &SwitchSpec{}) + s.AddCodec(KindSwitch, scheme.CodecWithType[*SwitchSpec](func(spec *SwitchSpec) (node.Node, error) { + n := NewSwitchNode(SwitchNodeConfig{ + ID: spec.ID, + }) + for _, v := range spec.Match { + if err := n.Add(v.When, v.Port); err != nil { + _ = n.Close() + return nil, err + } + } + return n, nil + })) + + return nil + } +} diff --git a/pkg/plugin/controllx/builder_test.go b/pkg/plugin/controllx/builder_test.go new file mode 100644 index 00000000..6339aaaf --- /dev/null +++ b/pkg/plugin/controllx/builder_test.go @@ -0,0 +1,18 @@ +package controllx + +import ( + "testing" + + "github.com/siyul-park/uniflow/pkg/scheme" + "github.com/stretchr/testify/assert" +) + +func TestAddToScheme(t *testing.T) { + s := scheme.New() + + err := AddToScheme()(s) + assert.NoError(t, err) + + _, ok := s.Codec(KindSnippet) + assert.True(t, ok) +} diff --git a/pkg/plugin/controllx/snippet.go b/pkg/plugin/controllx/snippet.go new file mode 100644 index 00000000..de93a671 --- /dev/null +++ b/pkg/plugin/controllx/snippet.go @@ -0,0 +1,187 @@ +package controllx + +import ( + "encoding/json" + "reflect" + "strings" + "sync" + + "github.com/dop251/goja" + "github.com/evanw/esbuild/pkg/api" + "github.com/iancoleman/strcase" + "github.com/oklog/ulid/v2" + "github.com/pkg/errors" + "github.com/siyul-park/uniflow/pkg/node" + "github.com/siyul-park/uniflow/pkg/packet" + "github.com/siyul-park/uniflow/pkg/primitive" + "github.com/siyul-park/uniflow/pkg/process" + "github.com/siyul-park/uniflow/pkg/scheme" + "github.com/xiatechs/jsonata-go" +) + +type ( + SnippetNodeConfig struct { + ID ulid.ULID + Lang string + Code string + } + + SnippetNode struct { + *node.OneToOneNode + run func(any) (any, error) + } + + SnippetSpec struct { + scheme.SpecMeta `map:",inline"` + Lang string `map:"lang"` + Code string `map:"code"` + } + + fieldNameMapper struct{} +) + +const ( + KindSnippet = "snippet" +) + +const ( + LangTypescript = "typescript" + LangJavascript = "javascript" + LangJSON = "json" + LangJSONata = "jsonata" +) + +var _ node.Node = &SnippetNode{} + +var ( + ErrEntryPointNotUndeclared = errors.New("entry point is undeclared") + ErrNotSupportedLanguage = errors.New("language is not supported") +) + +func NewSnippetNode(config SnippetNodeConfig) (*SnippetNode, error) { + defer func() { _ = recover() }() + + id := config.ID + lang := config.Lang + code := config.Code + + run, err := compile(lang, code) + if err != nil { + return nil, err + } + + n := &SnippetNode{ + run: run, + } + n.OneToOneNode = node.NewOneToOneNode(node.OneToOneNodeConfig{ + ID: id, + Action: n.action, + }) + + return n, nil +} + +func (n *SnippetNode) action(proc *process.Process, inPck *packet.Packet) (*packet.Packet, *packet.Packet) { + inPayload := inPck.Payload() + + var input any + if inPayload != nil { + input = inPayload.Interface() + } + + if output, err := n.run(input); err != nil { + return nil, packet.NewError(err, inPck) + } else if outPayload, err := primitive.MarshalText(output); err != nil { + return nil, packet.NewError(err, inPck) + } else { + return packet.New(outPayload), nil + } +} + +func compile(lang, code string) (func(any) (any, error), error) { + switch lang { + case LangJSON: + var val any + if err := json.Unmarshal([]byte(code), &val); err != nil { + return nil, err + } + + return func(payload any) (any, error) { + return val, nil + }, nil + case LangTypescript, LangJavascript: + if lang == LangTypescript { + result := api.Transform(code, api.TransformOptions{ + Loader: api.LoaderTS, + }) + if len(result.Errors) > 0 { + var msgs []string + for _, msg := range result.Errors { + msgs = append(msgs, msg.Text) + } + return nil, errors.New(strings.Join(msgs, ", ")) + } + code = string(result.Code) + } + program, err := goja.Compile("", code, true) + if err != nil { + return nil, err + } + + vm := goja.New() + if _, err := vm.RunProgram(program); err != nil { + return nil, err + } + if _, ok := goja.AssertFunction(vm.Get("main")); !ok { + return nil, errors.WithStack(ErrEntryPointNotUndeclared) + } + + vmPool := &sync.Pool{ + New: func() any { + vm := goja.New() + _, _ = vm.RunProgram(program) + vm.SetFieldNameMapper(&fieldNameMapper{}) + return vm + }, + } + + return func(payload any) (any, error) { + vm := vmPool.Get().(*goja.Runtime) + defer vmPool.Put(vm) + + main, ok := goja.AssertFunction(vm.Get("main")) + if !ok { + return nil, errors.WithStack(ErrEntryPointNotUndeclared) + } + + if output, err := main(goja.Undefined(), vm.ToValue(payload)); err != nil { + return nil, err + } else { + return output.Export(), nil + } + }, nil + case LangJSONata: + exp, err := jsonata.Compile(code) + if err != nil { + return nil, err + } + + return func(payload any) (any, error) { + if output, err := exp.Eval(payload); err != nil { + return nil, err + } else { + return output, nil + } + }, nil + default: + return nil, errors.WithStack(ErrNotSupportedLanguage) + } +} + +func (*fieldNameMapper) FieldName(t reflect.Type, f reflect.StructField) string { + return strcase.ToLowerCamel(f.Name) +} + +func (*fieldNameMapper) MethodName(t reflect.Type, m reflect.Method) string { + return strcase.ToLowerCamel(m.Name) +} diff --git a/pkg/plugin/controllx/snippet_test.go b/pkg/plugin/controllx/snippet_test.go new file mode 100644 index 00000000..29915706 --- /dev/null +++ b/pkg/plugin/controllx/snippet_test.go @@ -0,0 +1,310 @@ +package controllx + +import ( + "context" + "fmt" + "testing" + "time" + + "github.com/go-faker/faker/v4" + "github.com/siyul-park/uniflow/pkg/node" + "github.com/siyul-park/uniflow/pkg/packet" + "github.com/siyul-park/uniflow/pkg/port" + "github.com/siyul-park/uniflow/pkg/primitive" + "github.com/siyul-park/uniflow/pkg/process" + "github.com/stretchr/testify/assert" +) + +func TestNewSnippetNode(t *testing.T) { + n, err := NewSnippetNode(SnippetNodeConfig{ + Lang: LangJSON, + Code: "{}", + }) + assert.NoError(t, err) + assert.NotNil(t, n) + assert.NotZero(t, n.ID()) + + _ = n.Close() +} + +func TestSnippetNode_Send(t *testing.T) { + t.Run(LangTypescript, func(t *testing.T) { + n, _ := NewSnippetNode(SnippetNodeConfig{ + Lang: LangTypescript, + Code: ` +function main(inPayload: any): any { + return inPayload; +} + `, + }) + defer func() { _ = n.Close() }() + + io := port.New() + ioPort, _ := n.Port(node.PortIO) + ioPort.Link(io) + + proc := process.New() + defer proc.Close() + + ioStream := io.Open(proc) + + inPayload := primitive.NewString(faker.UUIDHyphenated()) + inPck := packet.New(inPayload) + + ioStream.Send(inPck) + + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + + select { + case outPck := <-ioStream.Receive(): + assert.Equal(t, inPayload, outPck.Payload()) + case <-ctx.Done(): + assert.Fail(t, "timeout") + } + }) + + t.Run(LangJavascript, func(t *testing.T) { + n, _ := NewSnippetNode(SnippetNodeConfig{ + Lang: LangJavascript, + Code: ` +function main(inPayload) { + return inPayload; +} + `, + }) + defer func() { _ = n.Close() }() + + io := port.New() + ioPort, _ := n.Port(node.PortIO) + ioPort.Link(io) + + proc := process.New() + defer proc.Close() + + ioStream := io.Open(proc) + + inPayload := primitive.NewString(faker.UUIDHyphenated()) + inPck := packet.New(inPayload) + + ioStream.Send(inPck) + + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + + select { + case outPck := <-ioStream.Receive(): + assert.Equal(t, inPayload, outPck.Payload()) + case <-ctx.Done(): + assert.Fail(t, "timeout") + } + }) + + t.Run(LangJSON, func(t *testing.T) { + data := faker.UUIDHyphenated() + + n, _ := NewSnippetNode(SnippetNodeConfig{ + Lang: LangJSON, + Code: fmt.Sprintf("\"%s\"", data), + }) + defer func() { _ = n.Close() }() + + io := port.New() + ioPort, _ := n.Port(node.PortIO) + ioPort.Link(io) + + proc := process.New() + defer proc.Close() + + ioStream := io.Open(proc) + + inPayload := primitive.NewString(faker.UUIDHyphenated()) + inPck := packet.New(inPayload) + + ioStream.Send(inPck) + + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + + select { + case outPck := <-ioStream.Receive(): + assert.Equal(t, data, outPck.Payload().Interface()) + case <-ctx.Done(): + assert.Fail(t, "timeout") + } + }) + + t.Run(LangJSONata, func(t *testing.T) { + n, _ := NewSnippetNode(SnippetNodeConfig{ + Lang: LangJSONata, + Code: "$", + }) + defer func() { _ = n.Close() }() + + io := port.New() + ioPort, _ := n.Port(node.PortIO) + ioPort.Link(io) + + proc := process.New() + defer proc.Close() + + ioStream := io.Open(proc) + + inPayload := primitive.NewString(faker.UUIDHyphenated()) + inPck := packet.New(inPayload) + + ioStream.Send(inPck) + + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + + select { + case outPck := <-ioStream.Receive(): + assert.Equal(t, inPayload, outPck.Payload()) + case <-ctx.Done(): + assert.Fail(t, "timeout") + } + }) +} + +func BenchmarkSnippetNode_Send(b *testing.B) { + b.Run(LangTypescript, func(b *testing.B) { + n, _ := NewSnippetNode(SnippetNodeConfig{ + Lang: LangTypescript, + Code: ` +function main(inPayload: any): any { + return inPayload; +} + `, + }) + defer func() { _ = n.Close() }() + + io := port.New() + ioPort, _ := n.Port(node.PortIO) + ioPort.Link(io) + + for i := 0; i < b.N; i++ { + proc := process.New() + defer proc.Close() + + ioStream := io.Open(proc) + + inPayload := primitive.NewString(faker.UUIDHyphenated()) + inPck := packet.New(inPayload) + + ioStream.Send(inPck) + + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + + select { + case <-ioStream.Receive(): + case <-ctx.Done(): + assert.Fail(b, "timeout") + } + } + }) + + b.Run(LangJavascript, func(b *testing.B) { + n, _ := NewSnippetNode(SnippetNodeConfig{ + Lang: LangJavascript, + Code: ` +function main(inPayload) { + return inPayload; +} + `, + }) + defer func() { _ = n.Close() }() + + io := port.New() + ioPort, _ := n.Port(node.PortIO) + ioPort.Link(io) + + for i := 0; i < b.N; i++ { + proc := process.New() + defer proc.Close() + + ioStream := io.Open(proc) + + inPayload := primitive.NewString(faker.UUIDHyphenated()) + inPck := packet.New(inPayload) + + ioStream.Send(inPck) + + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + + select { + case <-ioStream.Receive(): + case <-ctx.Done(): + assert.Fail(b, "timeout") + } + } + }) + + b.Run(LangJSON, func(b *testing.B) { + n, _ := NewSnippetNode(SnippetNodeConfig{ + Lang: LangJSON, + Code: fmt.Sprintf("\"%s\"", faker.UUIDHyphenated()), + }) + defer func() { _ = n.Close() }() + + io := port.New() + ioPort, _ := n.Port(node.PortIO) + ioPort.Link(io) + + for i := 0; i < b.N; i++ { + proc := process.New() + defer proc.Close() + + ioStream := io.Open(proc) + + inPayload := primitive.NewString(faker.UUIDHyphenated()) + inPck := packet.New(inPayload) + + ioStream.Send(inPck) + + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + + select { + case <-ioStream.Receive(): + case <-ctx.Done(): + assert.Fail(b, "timeout") + } + } + }) + + b.Run(LangJSONata, func(b *testing.B) { + n, _ := NewSnippetNode(SnippetNodeConfig{ + Lang: LangJSONata, + Code: "$", + }) + defer func() { _ = n.Close() }() + + io := port.New() + ioPort, _ := n.Port(node.PortIO) + ioPort.Link(io) + + for i := 0; i < b.N; i++ { + proc := process.New() + defer proc.Close() + + ioStream := io.Open(proc) + + inPayload := primitive.NewString(faker.UUIDHyphenated()) + inPck := packet.New(inPayload) + + ioStream.Send(inPck) + + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + + select { + case <-ioStream.Receive(): + case <-ctx.Done(): + assert.Fail(b, "timeout") + } + } + }) +} diff --git a/pkg/plugin/controllx/switch.go b/pkg/plugin/controllx/switch.go new file mode 100644 index 00000000..841a6362 --- /dev/null +++ b/pkg/plugin/controllx/switch.go @@ -0,0 +1,105 @@ +package controllx + +import ( + "sync" + + "github.com/oklog/ulid/v2" + "github.com/siyul-park/uniflow/internal/util" + "github.com/siyul-park/uniflow/pkg/node" + "github.com/siyul-park/uniflow/pkg/packet" + "github.com/siyul-park/uniflow/pkg/port" + "github.com/siyul-park/uniflow/pkg/process" + "github.com/siyul-park/uniflow/pkg/scheme" + "github.com/xiatechs/jsonata-go" +) + +type ( + SwitchNodeConfig struct { + ID ulid.ULID + } + + SwitchNode struct { + *node.OneToManyNode + conditions []condition + mu sync.RWMutex + } + + SwitchSpec struct { + scheme.SpecMeta `map:",inline"` + Match []Condition `map:"match"` + } + + Condition struct { + When string `map:"when"` + Port string `map:"port"` + } + + condition struct { + when *jsonata.Expr + port string + } +) + +const ( + KindSwitch = "switch" +) + +var _ node.Node = &SwitchNode{} + +func NewSwitchNode(config SwitchNodeConfig) *SwitchNode { + id := config.ID + + n := &SwitchNode{} + n.OneToManyNode = node.NewOneToManyNode(node.OneToManyNodeConfig{ + ID: id, + Action: n.action, + }) + + return n +} + +func (n *SwitchNode) Add(when string, port string) error { + n.mu.Lock() + defer n.mu.Unlock() + + exp, err := jsonata.Compile(when) + if err != nil { + return err + } + + n.conditions = append(n.conditions, condition{when: exp, port: port}) + return nil +} + +func (n *SwitchNode) Close() error { + n.mu.Lock() + defer n.mu.Unlock() + + n.conditions = nil + return n.OneToManyNode.Close() +} + +func (n *SwitchNode) action(proc *process.Process, inPck *packet.Packet) ([]*packet.Packet, *packet.Packet) { + n.mu.RLock() + defer n.mu.RUnlock() + + inPayload := inPck.Payload() + + var input any + if inPayload != nil { + input = inPayload.Interface() + } + + for _, cond := range n.conditions { + if output, _ := cond.when.Eval(input); !util.IsZero(output) { + if i, ok := port.GetIndex(node.PortOut, cond.port); ok { + outPcks := make([]*packet.Packet, i+1) + outPcks[i] = inPck + + return outPcks, nil + } + } + } + + return nil, packet.NewError(node.ErrDiscardPacket, inPck) +} diff --git a/pkg/plugin/controllx/switch_test.go b/pkg/plugin/controllx/switch_test.go new file mode 100644 index 00000000..22fc7a5e --- /dev/null +++ b/pkg/plugin/controllx/switch_test.go @@ -0,0 +1,103 @@ +package controllx + +import ( + "context" + "fmt" + "testing" + "time" + + "github.com/siyul-park/uniflow/pkg/node" + "github.com/siyul-park/uniflow/pkg/packet" + "github.com/siyul-park/uniflow/pkg/port" + "github.com/siyul-park/uniflow/pkg/primitive" + "github.com/siyul-park/uniflow/pkg/process" + "github.com/stretchr/testify/assert" +) + +func TestNewSwitchNode(t *testing.T) { + n := NewSwitchNode(SwitchNodeConfig{}) + assert.NotNil(t, n) + assert.NotZero(t, n.ID()) + + _ = n.Close() +} + +func TestSwitchNode_Send(t *testing.T) { + n := NewSwitchNode(SwitchNodeConfig{}) + defer func() { _ = n.Close() }() + + in := port.New() + inPort, _ := n.Port(node.PortIn) + inPort.Link(in) + + err := n.Add("$.a", "out[0]") + assert.NoError(t, err) + err = n.Add("$.b", "out[1]") + assert.NoError(t, err) + err = n.Add("$.a = $.b", "out[2]") + assert.NoError(t, err) + err = n.Add("true", "out[3]") + assert.NoError(t, err) + + testCases := []struct { + when any + expect string + }{ + { + when: map[string]bool{ + "a": true, + }, + expect: "out[0]", + }, + { + when: map[string]bool{ + "b": true, + }, + expect: "out[1]", + }, + { + when: map[string]int{ + "a": 0, + "b": 0, + }, + expect: "out[2]", + }, + { + when: map[string]any{ + "a": 0, + "b": false, + }, + expect: "out[3]", + }, + } + + for _, tc := range testCases { + t.Run(fmt.Sprintf("%v", tc.when), func(t *testing.T) { + out := port.New() + defer out.Close() + outPort, _ := n.Port(tc.expect) + outPort.Link(out) + + proc := process.New() + defer proc.Close() + + inStream := in.Open(proc) + outStream := out.Open(proc) + + inPayload, err := primitive.MarshalText(tc.when) + assert.NoError(t, err) + + inStream.Send(packet.New(inPayload)) + + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + + select { + case outPck := <-outStream.Receive(): + assert.Equal(t, tc.when, outPck.Payload().Interface()) + case <-ctx.Done(): + assert.Fail(t, "timeout") + } + }) + } +} diff --git a/pkg/plugin/networkx/builder.go b/pkg/plugin/networkx/builder.go new file mode 100644 index 00000000..23d339a2 --- /dev/null +++ b/pkg/plugin/networkx/builder.go @@ -0,0 +1,45 @@ +package networkx + +import ( + "github.com/siyul-park/uniflow/pkg/hook" + "github.com/siyul-park/uniflow/pkg/node" + "github.com/siyul-park/uniflow/pkg/scheme" + "github.com/siyul-park/uniflow/pkg/symbol" +) + +func AddToHooks() func(*hook.Hook) error { + return func(h *hook.Hook) error { + h.AddPostLoadHook(symbol.PostLoadHookFunc(func(n node.Node) error { + if n, ok := n.(*HTTPNode); ok { + go func() { n.Start() }() + } + return nil + })) + return nil + } +} + +func AddToScheme() func(*scheme.Scheme) error { + return func(s *scheme.Scheme) error { + s.AddKnownType(KindHTTP, &HTTPSpec{}) + s.AddCodec(KindHTTP, scheme.CodecWithType[*HTTPSpec](func(spec *HTTPSpec) (node.Node, error) { + return NewHTTPNode(HTTPNodeConfig{ + ID: spec.ID, + Address: spec.Address, + }), nil + })) + + s.AddKnownType(KindRouter, &RouterSpec{}) + s.AddCodec(KindRouter, scheme.CodecWithType[*RouterSpec](func(spec *RouterSpec) (node.Node, error) { + n := NewRouterNode(RouterNodeConfig{ + ID: spec.ID, + }) + for _, r := range spec.Routes { + n.Add(r.Method, r.Path, r.Port) + } + return n, nil + })) + + return nil + } +} diff --git a/pkg/plugin/networkx/builder_test.go b/pkg/plugin/networkx/builder_test.go new file mode 100644 index 00000000..a87c7388 --- /dev/null +++ b/pkg/plugin/networkx/builder_test.go @@ -0,0 +1,48 @@ +package networkx + +import ( + "fmt" + "testing" + + "github.com/phayes/freeport" + "github.com/siyul-park/uniflow/pkg/hook" + "github.com/siyul-park/uniflow/pkg/scheme" + "github.com/stretchr/testify/assert" +) + +func TestAddToHooks(t *testing.T) { + hk := hook.New() + + err := AddToHooks()(hk) + assert.NoError(t, err) + + port, err := freeport.GetFreePort() + assert.NoError(t, err) + + n := NewHTTPNode(HTTPNodeConfig{ + Address: fmt.Sprintf(":%d", port), + }) + + err = hk.PostLoad(n) + assert.NoError(t, err) + + errChan := make(chan error) + + err = n.WaitForListen(errChan) + + assert.NoError(t, err) + assert.NoError(t, n.Close()) +} + +func TestAddToScheme(t *testing.T) { + s := scheme.New() + + err := AddToScheme()(s) + assert.NoError(t, err) + + _, ok := s.Codec(KindHTTP) + assert.True(t, ok) + + _, ok = s.KnownType(KindHTTP) + assert.True(t, ok) +} diff --git a/pkg/plugin/networkx/http.go b/pkg/plugin/networkx/http.go new file mode 100644 index 00000000..f6873688 --- /dev/null +++ b/pkg/plugin/networkx/http.go @@ -0,0 +1,563 @@ +package networkx + +import ( + "context" + "crypto/tls" + "errors" + "io" + "net" + "net/http" + "net/url" + "regexp" + "strconv" + "strings" + "sync" + "time" + + "github.com/oklog/ulid/v2" + "github.com/siyul-park/uniflow/internal/util" + "github.com/siyul-park/uniflow/pkg/node" + "github.com/siyul-park/uniflow/pkg/packet" + "github.com/siyul-park/uniflow/pkg/port" + "github.com/siyul-park/uniflow/pkg/primitive" + "github.com/siyul-park/uniflow/pkg/process" + "github.com/siyul-park/uniflow/pkg/scheme" +) + +type ( + HTTPNodeConfig struct { + ID ulid.ULID + Address string + } + HTTPNode struct { + id ulid.ULID + address string + server *http.Server + listener net.Listener + listenerNetwork string + ioPort *port.Port + errPort *port.Port + mu sync.RWMutex + } + + HTTPPayload struct { + Proto string `map:"proto,omitempty"` + Path string `map:"path,omitempty"` + Method string `map:"method,omitempty"` + Header http.Header `map:"header,omitempty"` + Query url.Values `map:"query,omitempty"` + Cookies []*http.Cookie `map:"cookies,omitempty"` + Body primitive.Object `map:"body,omitempty"` + Status int `map:"status"` + } + + HTTPSpec struct { + scheme.SpecMeta `map:",inline"` + Address string `map:"address"` + } + + tcpKeepAliveListener struct { + *net.TCPListener + } +) + +const ( + KindHTTP = "http" +) + +var _ node.Node = &HTTPNode{} +var _ http.Handler = &HTTPNode{} + +const ( + HeaderAccept = "Accept" + HeaderAcceptCharset = "Accept-Charset" + HeaderAcceptEncoding = "Accept-Encoding" + HeaderAcceptLanguage = "Accept-Language" + HeaderAllow = "Allow" + HeaderAuthorization = "Authorization" + HeaderContentDisposition = "Content-Disposition" + HeaderContentEncoding = "Content-Encoding" + HeaderContentLength = "Content-Length" + HeaderContentType = "Content-Type" + HeaderCookie = "Cookie" + HeaderSetCookie = "Set-Cookie" + HeaderIfModifiedSince = "If-Modified-Since" + HeaderLastModified = "Last-Modified" + HeaderLocation = "Location" + HeaderRetryAfter = "Retry-After" + HeaderUpgrade = "Upgrade" + HeaderUpgradeInsecureRequests = "Upgrade-Insecure-Requests" + HeaderVary = "Vary" + HeaderWWWAuthenticate = "WWW-Authenticate" + HeaderForwarded = "Forwarded" + HeaderXForwardedFor = "X-Forwarded-For" + HeaderXForwardedHost = "X-Forwarded-Host" + HeaderXForwardedProto = "X-Forwarded-Proto" + HeaderXForwardedProtocol = "X-Forwarded-Protocol" + HeaderXForwardedSsl = "X-Forwarded-Ssl" + HeaderXUrlScheme = "X-Url-Scheme" + HeaderXHTTPMethodOverride = "X-HTTP-Method-Override" + HeaderXRealIP = "X-Real-Ip" + HeaderXRequestID = "X-Request-Id" + HeaderXCorrelationID = "X-Correlation-Id" + HeaderXRequestedWith = "X-Requested-With" + HeaderServer = "Server" + HeaderOrigin = "Origin" + HeaderCacheControl = "Cache-Control" + HeaderConnection = "Connection" + HeaderDate = "Date" + HeaderDeviceMemory = "Device-Memory" + HeaderDNT = "DNT" + HeaderDownlink = "Downlink" + HeaderDPR = "DPR" + HeaderEarlyData = "Early-Data" + HeaderECT = "ECT" + HeaderExpect = "Expect" + HeaderExpectCT = "Expect-CT" + HeaderFrom = "From" + HeaderHost = "Host" + HeaderIfMatch = "If-Match" + HeaderIfNoneMatch = "If-None-Match" + HeaderIfRange = "If-Range" + HeaderIfUnmodifiedSince = "If-Unmodified-Since" + HeaderKeepAlive = "Keep-Alive" + HeaderMaxForwards = "Max-Forwards" + HeaderProxyAuthorization = "Proxy-Authorization" + HeaderRange = "Range" + HeaderReferer = "Referer" + HeaderRTT = "RTT" + HeaderSaveData = "Save-Data" + HeaderTE = "TE" + HeaderTk = "Tk" + HeaderTrailer = "Trailer" + HeaderTransferEncoding = "Transfer-Encoding" + HeaderUserAgent = "User-Agent" + HeaderVia = "Via" + HeaderViewportWidth = "Viewport-Width" + HeaderWantDigest = "Want-Digest" + HeaderWarning = "Warning" + HeaderWidth = "Width" + + HeaderAccessControlRequestMethod = "Access-Control-Request-Method" + HeaderAccessControlRequestHeaders = "Access-Control-Request-Headers" + HeaderAccessControlAllowOrigin = "Access-Control-Allow-Origin" + HeaderAccessControlAllowMethods = "Access-Control-Allow-Methods" + HeaderAccessControlAllowHeaders = "Access-Control-Allow-Headers" + HeaderAccessControlAllowCredentials = "Access-Control-Allow-Credentials" + HeaderAccessControlExposeHeaders = "Access-Control-Expose-Headers" + HeaderAccessControlMaxAge = "Access-Control-Max-Age" + + HeaderStrictTransportSecurity = "Strict-Transport-Security" + HeaderXContentTypeOptions = "X-Content-Type-Options" + HeaderXXSSProtection = "X-XSS-Protection" + HeaderXFrameOptions = "X-Frame-Options" + HeaderContentSecurityPolicy = "Content-Security-Policy" + HeaderContentSecurityPolicyReportOnly = "Content-Security-Policy-Report-Only" + HeaderXCSRFToken = "X-CSRF-Token" + HeaderReferrerPolicy = "Referrer-Policy" +) + +var ( + BadRequest = NewHTTPPayload(http.StatusBadRequest) // HTTP 400 Bad Request + Unauthorized = NewHTTPPayload(http.StatusUnauthorized) // HTTP 401 Unauthorized + PaymentRequired = NewHTTPPayload(http.StatusPaymentRequired) // HTTP 402 Payment Required + Forbidden = NewHTTPPayload(http.StatusForbidden) // HTTP 403 Forbidden + NotFound = NewHTTPPayload(http.StatusNotFound) // HTTP 404 Not Found + MethodNotAllowed = NewHTTPPayload(http.StatusMethodNotAllowed) // HTTP 405 Method Not Allowed + NotAcceptable = NewHTTPPayload(http.StatusNotAcceptable) // HTTP 406 Not Acceptable + ProxyAuthRequired = NewHTTPPayload(http.StatusProxyAuthRequired) // HTTP 407 Proxy AuthRequired + RequestTimeout = NewHTTPPayload(http.StatusRequestTimeout) // HTTP 408 Request Timeout + Conflict = NewHTTPPayload(http.StatusConflict) // HTTP 409 Conflict + Gone = NewHTTPPayload(http.StatusGone) // HTTP 410 Gone + LengthRequired = NewHTTPPayload(http.StatusLengthRequired) // HTTP 411 Length Required + PreconditionFailed = NewHTTPPayload(http.StatusPreconditionFailed) // HTTP 412 Precondition Failed + StatusRequestEntityTooLarge = NewHTTPPayload(http.StatusRequestEntityTooLarge) // HTTP 413 Payload Too Large + RequestURITooLong = NewHTTPPayload(http.StatusRequestURITooLong) // HTTP 414 URI Too Long + UnsupportedMediaType = NewHTTPPayload(http.StatusUnsupportedMediaType) // HTTP 415 Unsupported Media Type + RequestedRangeNotSatisfiable = NewHTTPPayload(http.StatusRequestedRangeNotSatisfiable) // HTTP 416 Range Not Satisfiable + ExpectationFailed = NewHTTPPayload(http.StatusExpectationFailed) // HTTP 417 Expectation Failed + Teapot = NewHTTPPayload(http.StatusTeapot) // HTTP 418 I'm a teapot + MisdirectedRequest = NewHTTPPayload(http.StatusMisdirectedRequest) // HTTP 421 Misdirected Request + UnprocessableEntity = NewHTTPPayload(http.StatusUnprocessableEntity) // HTTP 422 Unprocessable Entity + Locked = NewHTTPPayload(http.StatusLocked) // HTTP 423 Locked + FailedDependency = NewHTTPPayload(http.StatusFailedDependency) // HTTP 424 Failed Dependency + TooEarly = NewHTTPPayload(http.StatusTooEarly) // HTTP 425 Too Early + UpgradeRequired = NewHTTPPayload(http.StatusUpgradeRequired) // HTTP 426 Upgrade Required + PreconditionRequired = NewHTTPPayload(http.StatusPreconditionRequired) // HTTP 428 Precondition Required + TooManyRequests = NewHTTPPayload(http.StatusTooManyRequests) // HTTP 429 Too Many Requests + RequestHeaderFieldsTooLarge = NewHTTPPayload(http.StatusRequestHeaderFieldsTooLarge) // HTTP 431 Request Header Fields Too Large + UnavailableForLegalReasons = NewHTTPPayload(http.StatusUnavailableForLegalReasons) // HTTP 451 Unavailable For Legal Reasons + InternalServerError = NewHTTPPayload(http.StatusInternalServerError) // HTTP 500 Internal Server Error + NotImplemented = NewHTTPPayload(http.StatusNotImplemented) // HTTP 501 Not Implemented + BadGateway = NewHTTPPayload(http.StatusBadGateway) // HTTP 502 Bad Gateway + ServiceUnavailable = NewHTTPPayload(http.StatusServiceUnavailable) // HTTP 503 Service Unavailable + GatewayTimeout = NewHTTPPayload(http.StatusGatewayTimeout) // HTTP 504 Gateway Timeout + HTTPVersionNotSupported = NewHTTPPayload(http.StatusHTTPVersionNotSupported) // HTTP 505 HTTP Version Not Supported + VariantAlsoNegotiates = NewHTTPPayload(http.StatusVariantAlsoNegotiates) // HTTP 506 Variant Also Negotiates + InsufficientStorage = NewHTTPPayload(http.StatusInsufficientStorage) // HTTP 507 Insufficient Storage + LoopDetected = NewHTTPPayload(http.StatusLoopDetected) // HTTP 508 Loop Detected + NotExtended = NewHTTPPayload(http.StatusNotExtended) // HTTP 510 Not Extended + NetworkAuthenticationRequired = NewHTTPPayload(http.StatusNetworkAuthenticationRequired) // HTTP 511 Network Authentication Required +) + +var ( + ErrInvalidListenerNetwork = errors.New("invalid listener network") +) + +var ( + forbiddenResponseHeaderRegexps []*regexp.Regexp +) + +func init() { + // https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers + forbiddenResponseHeaderPatterns := []string{ + HeaderAccept, HeaderAcceptCharset, HeaderAcceptEncoding, HeaderAcceptLanguage, + HeaderAuthorization, + HeaderConnection, + HeaderCookie, + HeaderDate, + HeaderDeviceMemory, + HeaderDNT, + HeaderDownlink, + HeaderDPR, + HeaderEarlyData, + HeaderECT, + HeaderExpect, HeaderExpectCT, + HeaderForwarded, + HeaderXForwardedFor, HeaderXForwardedHost, HeaderXForwardedProto, HeaderXForwardedProtocol, + HeaderFrom, + HeaderHost, + HeaderIfMatch, HeaderIfModifiedSince, HeaderIfNoneMatch, HeaderIfRange, HeaderIfUnmodifiedSince, + HeaderKeepAlive, + HeaderMaxForwards, + HeaderOrigin, + HeaderProxyAuthorization, + HeaderRange, + HeaderReferer, + HeaderRTT, + HeaderSaveData, + "Sec-.*", + HeaderTE, + HeaderTk, + HeaderTrailer, HeaderTransferEncoding, + HeaderUpgrade, HeaderUpgradeInsecureRequests, + HeaderUserAgent, + HeaderVia, + HeaderViewportWidth, + HeaderWantDigest, + HeaderWarning, + HeaderWidth, + } + + for _, pattern := range forbiddenResponseHeaderPatterns { + forbiddenResponseHeaderRegexps = append(forbiddenResponseHeaderRegexps, regexp.MustCompile(pattern)) + } +} + +func NewHTTPNode(config HTTPNodeConfig) *HTTPNode { + id := config.ID + address := config.Address + + if util.IsZero(id) { + id = ulid.Make() + } + + n := &HTTPNode{ + id: id, + address: address, + server: new(http.Server), + listenerNetwork: "tcp", + ioPort: port.New(), + errPort: port.New(), + } + n.server.Handler = n + + return n +} + +func (n *HTTPNode) ID() ulid.ULID { + n.mu.RLock() + defer n.mu.RUnlock() + + return n.id +} + +func (n *HTTPNode) Port(name string) (*port.Port, bool) { + n.mu.RLock() + defer n.mu.RUnlock() + + switch name { + case node.PortIO: + return n.ioPort, true + case node.PortErr: + return n.errPort, true + default: + } + + return nil, false +} + +func (n *HTTPNode) ListenerAddr() net.Addr { + n.mu.RLock() + defer n.mu.RUnlock() + if n.listener == nil { + return nil + } + return n.listener.Addr() +} + +func (n *HTTPNode) WaitForListen(errChan <-chan error) error { + ctx, cancel := context.WithTimeout(context.Background(), 200*time.Millisecond) + defer cancel() + + ticker := time.NewTicker(5 * time.Millisecond) + defer ticker.Stop() + + for { + select { + case <-ctx.Done(): + return ctx.Err() + case <-ticker.C: + addr := n.ListenerAddr() + if addr != nil && strings.Contains(addr.String(), ":") { + return nil + } + case err := <-errChan: + if err == http.ErrServerClosed { + return nil + } + return err + } + } +} + +func (n *HTTPNode) Start() error { + n.mu.Lock() + n.server.Addr = n.address + if err := n.configureServer(); err != nil { + n.mu.Unlock() + return err + } + n.mu.Unlock() + return n.server.Serve(n.listener) +} + +func (n *HTTPNode) Close() error { + n.mu.RLock() + defer n.mu.RUnlock() + + if err := n.server.Close(); err != nil { + return err + } + n.ioPort.Close() + n.errPort.Close() + + return nil +} + +func (n *HTTPNode) ServeHTTP(w http.ResponseWriter, r *http.Request) { + n.mu.RLock() + defer n.mu.RUnlock() + + proc := process.New() + defer func() { + proc.Stack().Wait() + proc.Close() + }() + + go func() { + select { + case <-r.Context().Done(): + proc.Close() + case <-proc.Done(): + } + }() + + outStream := n.ioPort.Open(proc) + inStream := n.ioPort.Open(proc) + + req, err := n.request(r) + if err != nil { + _ = n.response(r, w, n.errorPayload(proc, UnsupportedMediaType)) + return + } + outPayload, err := primitive.MarshalText(req) + if err != nil { + _ = n.response(r, w, n.errorPayload(proc, BadRequest)) + return + } + outPck := packet.New(outPayload) + outStream.Send(outPck) + + inPck, ok := <-inStream.Receive() + if !ok { + _ = n.response(r, w, n.errorPayload(proc, ServiceUnavailable)) + return + } + proc.Stack().Clear(inPck.ID()) + + inPayload := inPck.Payload() + + var res HTTPPayload + if err := primitive.Unmarshal(inPayload, &res); err != nil { + res.Body = inPayload + } + + if err := n.response(r, w, res); err != nil { + _ = n.response(r, w, n.errorPayload(proc, InternalServerError)) + } +} + +func (n *HTTPNode) request(r *http.Request) (HTTPPayload, error) { + contentType := r.Header.Get(HeaderContentType) + + if b, err := io.ReadAll(r.Body); err != nil { + return HTTPPayload{}, err + } else if b, err := UnmarshalMIME(b, &contentType); err != nil { + return HTTPPayload{}, err + } else { + r.Header.Set(HeaderContentType, contentType) + return HTTPPayload{ + Proto: r.Proto, + Path: r.URL.Path, + Method: r.Method, + Header: r.Header, + Query: r.URL.Query(), + Cookies: r.Cookies(), + Body: b, + }, nil + } +} + +func (n *HTTPNode) response(r *http.Request, w http.ResponseWriter, res HTTPPayload) error { + if r.Method == http.MethodHead { + res.Header.Del(HeaderContentType) + res.Body = nil + if res.Status == 200 { + res.Status = 204 + } + } + + contentType := res.Header.Get(HeaderContentType) + b, err := MarshalMIME(res.Body, &contentType) + if err != nil { + return err + } + if res.Header == nil { + res.Header = http.Header{} + } + res.Header.Set(HeaderContentType, contentType) + + for key := range w.Header() { + w.Header().Del(key) + } + for key, headers := range res.Header { + if isForbiddenResponseHeader(key) { + continue + } + for _, header := range headers { + w.Header().Add(key, header) + } + } + w.Header().Set(HeaderContentLength, strconv.Itoa(len(b))) + w.Header().Set(HeaderContentType, contentType) + + status := res.Status + if status == 0 { + if len(b) == 0 { + status = http.StatusNoContent + } else { + status = http.StatusOK + } + } + w.WriteHeader(status) + + if _, err := w.Write(b); err != nil { + return err + } + if f, ok := w.(http.Flusher); ok { + f.Flush() + } + return nil +} + +func (n *HTTPNode) errorPayload(proc *process.Process, err HTTPPayload) HTTPPayload { + if n.errPort.Links() == 0 { + return err + } + + errPayload, _ := primitive.MarshalText(err) + errPck := packet.New(errPayload) + errStream := n.errPort.Open(proc) + errStream.Send(errPck) + + outPck, ok := <-errStream.Receive() + if !ok { + return err + } + + var res HTTPPayload + if err := primitive.Unmarshal(outPck.Payload(), &res); err != nil { + _ = primitive.Unmarshal(outPck.Payload(), &res.Body) + } + return res +} + +func (n *HTTPNode) configureServer() error { + if n.listener == nil { + l, err := newListener(n.server.Addr, n.listenerNetwork) + if err != nil { + return err + } + + if n.server.TLSConfig != nil { + n.listener = tls.NewListener(l, n.server.TLSConfig) + } else { + n.listener = l + } + } + return nil +} + +func NewHTTPPayload(status int, body ...primitive.Object) HTTPPayload { + he := HTTPPayload{Status: status, Body: primitive.NewString(http.StatusText(status))} + if len(body) > 0 { + he.Body = body[0] + } + return he +} + +func isForbiddenResponseHeader(header string) bool { + h := []byte(header) + forbidden := false + for _, forbiddenHeader := range forbiddenResponseHeaderRegexps { + if forbiddenHeader.Match(h) { + forbidden = true + break + } + } + return forbidden +} + +func newListener(address, network string) (*tcpKeepAliveListener, error) { + if network != "tcp" && network != "tcp4" && network != "tcp6" { + return nil, ErrInvalidListenerNetwork + } + l, err := net.Listen(network, address) + if err != nil { + return nil, err + } + return &tcpKeepAliveListener{l.(*net.TCPListener)}, nil +} + +func (ln tcpKeepAliveListener) Accept() (c net.Conn, err error) { + if c, err = ln.AcceptTCP(); err != nil { + return + } else if err = c.(*net.TCPConn).SetKeepAlive(true); err != nil { + return + } + // Ignore error from setting the KeepAlivePeriod as some systems, such as + // OpenBSD, do not support setting TCP_USER_TIMEOUT on IPPROTO_TCP + _ = c.(*net.TCPConn).SetKeepAlivePeriod(3 * time.Minute) + return +} diff --git a/pkg/plugin/networkx/http_test.go b/pkg/plugin/networkx/http_test.go new file mode 100644 index 00000000..238cca9a --- /dev/null +++ b/pkg/plugin/networkx/http_test.go @@ -0,0 +1,143 @@ +package networkx + +import ( + "fmt" + "net/http" + "net/http/httptest" + "testing" + + "github.com/phayes/freeport" + "github.com/siyul-park/uniflow/pkg/node" + "github.com/siyul-park/uniflow/pkg/packet" + "github.com/siyul-park/uniflow/pkg/port" + "github.com/siyul-park/uniflow/pkg/primitive" + "github.com/siyul-park/uniflow/pkg/process" + "github.com/stretchr/testify/assert" +) + +func TestNewHTTPNode(t *testing.T) { + port, err := freeport.GetFreePort() + assert.NoError(t, err) + + n := NewHTTPNode(HTTPNodeConfig{ + Address: fmt.Sprintf(":%d", port), + }) + assert.NotNil(t, n) + assert.NotZero(t, n.ID()) + + _ = n.Close() +} + +func TestHTTPNode_Port(t *testing.T) { + port, err := freeport.GetFreePort() + assert.NoError(t, err) + + n := NewHTTPNode(HTTPNodeConfig{ + Address: fmt.Sprintf(":%d", port), + }) + defer func() { _ = n.Close() }() + + p, ok := n.Port(node.PortIO) + assert.True(t, ok) + assert.NotNil(t, p) + + p, ok = n.Port(node.PortErr) + assert.True(t, ok) + assert.NotNil(t, p) +} + +func TestHTTPNode_StartAndClose(t *testing.T) { + port, err := freeport.GetFreePort() + assert.NoError(t, err) + + n := NewHTTPNode(HTTPNodeConfig{ + Address: fmt.Sprintf(":%d", port), + }) + + errChan := make(chan error) + + go func() { + if err := n.Start(); err != nil { + errChan <- err + } + }() + + err = n.WaitForListen(errChan) + + assert.NoError(t, err) + assert.NoError(t, n.Close()) +} + +func TestHTTPNode_ServeHTTP(t *testing.T) { + t.Run("Hello World", func(t *testing.T) { + n := NewHTTPNode(HTTPNodeConfig{}) + defer func() { _ = n.Close() }() + + io := port.New() + ioPort, _ := n.Port(node.PortIO) + ioPort.Link(io) + + io.AddInitHook(port.InitHookFunc(func(proc *process.Process) { + ioStream := io.Open(proc) + + for { + inPck, ok := <-ioStream.Receive() + if !ok { + return + } + + outPck := packet.New(primitive.NewMap( + primitive.NewString("body"), primitive.NewString("Hello World!"), + primitive.NewString("status"), primitive.NewInt(200), + )) + proc.Stack().Link(inPck.ID(), outPck.ID()) + ioStream.Send(outPck) + } + })) + + r := httptest.NewRequest(http.MethodGet, "/", nil) + w := httptest.NewRecorder() + + n.ServeHTTP(w, r) + + assert.Equal(t, 200, w.Result().StatusCode) + assert.Equal(t, TextPlainCharsetUTF8, w.Header().Get(HeaderContentType)) + assert.Equal(t, "Hello World!", w.Body.String()) + }) + + t.Run("HTTPError", func(t *testing.T) { + n := NewHTTPNode(HTTPNodeConfig{}) + defer func() { _ = n.Close() }() + + httpErr := NotFound + + io := port.New() + ioPort, _ := n.Port(node.PortIO) + ioPort.Link(io) + + io.AddInitHook(port.InitHookFunc(func(proc *process.Process) { + ioStream := io.Open(proc) + + for { + inPck, ok := <-ioStream.Receive() + if !ok { + return + } + + outPayload, _ := primitive.MarshalText(httpErr) + outPck := packet.New(outPayload) + proc.Stack().Link(inPck.ID(), outPck.ID()) + ioStream.Send(outPck) + } + })) + + r := httptest.NewRequest(http.MethodGet, "/", nil) + w := httptest.NewRecorder() + + n.ServeHTTP(w, r) + + assert.Equal(t, httpErr.Status, w.Result().StatusCode) + assert.Equal(t, TextPlainCharsetUTF8, w.Header().Get(HeaderContentType)) + assert.Equal(t, httpErr.Body.Interface(), w.Body.String()) + }) +} diff --git a/pkg/plugin/networkx/mime.go b/pkg/plugin/networkx/mime.go new file mode 100644 index 00000000..fa708e07 --- /dev/null +++ b/pkg/plugin/networkx/mime.go @@ -0,0 +1,294 @@ +package networkx + +import ( + "bytes" + "crypto/rand" + "encoding/json" + "encoding/xml" + "fmt" + "io" + "mime" + "mime/multipart" + "net/http" + "net/url" + + "github.com/pkg/errors" + "github.com/siyul-park/uniflow/internal/encoding" + "github.com/siyul-park/uniflow/pkg/primitive" +) + +const ( + ApplicationJSON = "application/json" + ApplicationJSONCharsetUTF8 = ApplicationJSON + "; " + charsetUTF8 + ApplicationJavaScript = "application/javascript" + ApplicationJavaScriptCharsetUTF8 = ApplicationJavaScript + "; " + charsetUTF8 + ApplicationXML = "application/xml" + ApplicationXMLCharsetUTF8 = ApplicationXML + "; " + charsetUTF8 + TextXML = "text/xml" + TextXMLCharsetUTF8 = TextXML + "; " + charsetUTF8 + ApplicationForm = "application/x-www-form-urlencoded" + ApplicationProtobuf = "application/protobuf" + ApplicationMsgpack = "application/msgpack" + TextHTML = "text/html" + TextHTMLCharsetUTF8 = TextHTML + "; " + charsetUTF8 + TextPlain = "text/plain" + TextPlainCharsetUTF8 = TextPlain + "; " + charsetUTF8 + MultipartForm = "multipart/form-data" + OctetStream = "application/octet-stream" +) + +const ( + charsetUTF8 = "charset=utf-8" +) + +func MarshalMIME(value primitive.Object, typ *string) ([]byte, error) { + if typ == nil { + content := "" + typ = &content + } + + if value == nil { + return nil, nil + } else if v, ok := value.(primitive.String); ok { + data := []byte(v.String()) + if *typ == "" { + *typ = http.DetectContentType(data) + } + return data, nil + } else if v, ok := value.(primitive.Binary); ok { + data := v.Bytes() + if *typ == "" { + *typ = http.DetectContentType(data) + } + return data, nil + } + + if *typ == "" { + *typ = ApplicationJSONCharsetUTF8 + } + + mediatype, params, err := mime.ParseMediaType(*typ) + if err != nil { + return nil, errors.WithStack(encoding.ErrUnsupportedValue) + } + + switch mediatype { + case ApplicationJSON: + return json.Marshal(value.Interface()) + case ApplicationXML, TextXML: + return xml.Marshal(value.Interface()) + case ApplicationForm: + if v, ok := value.(*primitive.Map); !ok { + return nil, errors.WithStack(encoding.ErrUnsupportedValue) + } else { + urlValues := url.Values{} + for _, key := range v.Keys() { + if k, ok := key.(primitive.String); ok { + value := v.GetOr(k, nil) + if v, ok := value.(primitive.String); ok { + urlValues.Add(k.String(), v.String()) + } else if v, ok := value.(*primitive.Slice); ok { + for i := 0; i < v.Len(); i++ { + if e, ok := v.Get(i).(primitive.String); ok { + urlValues.Add(k.String(), e.String()) + } + } + } + } + } + return []byte(urlValues.Encode()), nil + } + case TextPlain: + return []byte(fmt.Sprintf("%v", value.Interface())), nil + case MultipartForm: + boundary, ok := params["boundary"] + if !ok { + boundary = randomMultiPartBoundary() + params["boundary"] = boundary + *typ = mime.FormatMediaType(mediatype, params) + } + + bodyBuffer := new(bytes.Buffer) + mw := multipart.NewWriter(bodyBuffer) + if err := mw.SetBoundary(boundary); err != nil { + return nil, err + } + + writeField := func(obj *primitive.Map, key primitive.Object) error { + if key, ok := key.(primitive.String); ok { + elements := obj.GetOr(key, nil) + if e, ok := elements.(primitive.String); ok { + if err := mw.WriteField(key.String(), e.String()); err != nil { + return err + } + } else if e, ok := elements.(*primitive.Slice); ok { + for i := 0; i < e.Len(); i++ { + if e, ok := e.Get(i).(primitive.String); ok { + if err := mw.WriteField(key.String(), e.String()); err != nil { + return err + } + } + } + } + } + return nil + } + writeFields := func(value primitive.Object) error { + if value, ok := value.(*primitive.Map); ok { + for _, key := range value.Keys() { + if err := writeField(value, key); err != nil { + return err + } + } + } + return nil + } + + writeFiles := func(value primitive.Object) error { + if value, ok := value.(*primitive.Map); ok { + for _, key := range value.Keys() { + if key, ok := key.(primitive.String); ok { + elements := value.GetOr(key, nil) + if e, ok := elements.(*primitive.Map); ok { + filename, ok := e.GetOr(primitive.NewString("filename"), nil).(primitive.String) + if !ok { + continue + } + writer, err := mw.CreateFormFile(key.String(), filename.String()) + if err != nil { + return err + } + + data, ok := e.Get(primitive.NewString("data")) + if !ok { + continue + } + if d, ok := data.(primitive.Binary); ok { + if _, err := writer.Write(d.Bytes()); err != nil { + return err + } + } else if d, ok := data.(primitive.String); ok { + if _, err := writer.Write([]byte(d.String())); err != nil { + return err + } + } + } + } + } + } + return nil + } + + if v, ok := value.(*primitive.Map); ok { + for _, key := range v.Keys() { + value := v.GetOr(key, nil) + + if key == primitive.NewString("value") { + writeFields(value) + } else if key == primitive.NewString("file") { + writeFiles(value) + } else { + writeField(v, key) + } + } + } + + if err := mw.Close(); err != nil { + return nil, err + } + return bodyBuffer.Bytes(), nil + } + + return nil, errors.WithStack(encoding.ErrUnsupportedValue) +} + +func UnmarshalMIME(data []byte, typ *string) (primitive.Object, error) { + if len(data) == 0 { + return nil, nil + } + + if typ == nil { + content := "" + typ = &content + } + if *typ == "" { + *typ = http.DetectContentType(data) + } + + mediatype, params, err := mime.ParseMediaType(*typ) + if err != nil { + if len(data) == 0 { + return nil, nil + } + return nil, errors.WithStack(encoding.ErrUnsupportedValue) + } + + switch mediatype { + case ApplicationJSON: + var v any + if err := json.Unmarshal(data, &v); err != nil { + return nil, err + } + return primitive.MarshalText(v) + case ApplicationXML, TextXML: + var v any + if err := xml.Unmarshal(data, &v); err != nil { + return nil, err + } + return primitive.MarshalText(v) + case ApplicationForm: + v, err := url.ParseQuery(string(data)) + if err != nil { + return nil, err + } + return primitive.MarshalText(v) + case TextPlain: + return primitive.NewString(string(data)), nil + case MultipartForm: + reader := multipart.NewReader(bytes.NewReader(data), params["boundary"]) + form, err := reader.ReadForm(int64(len(data))) + if err != nil { + return nil, err + } + defer form.RemoveAll() + + formFile := map[string][]map[string]any{} + for name, fhs := range form.File { + for _, fh := range fhs { + file, err := fh.Open() + if err != nil { + return nil, err + } + data, err := io.ReadAll(file) + if err != nil { + return nil, err + } + + formFile[name] = append(formFile[name], map[string]any{ + "filename": fh.Filename, + "header": fh.Header, + "size": fh.Size, + "data": data, + }) + } + } + + return primitive.MarshalText(map[string]any{ + "value": form.Value, + "file": formFile, + }) + case OctetStream: + return primitive.NewBinary(data), nil + default: + return primitive.NewBinary(data), nil + } +} + +func randomMultiPartBoundary() string { + var buf [30]byte + _, err := io.ReadFull(rand.Reader, buf[:]) + if err != nil { + panic(err) + } + return fmt.Sprintf("%x", buf[:]) +} diff --git a/pkg/plugin/networkx/mime_test.go b/pkg/plugin/networkx/mime_test.go new file mode 100644 index 00000000..f99cd5a2 --- /dev/null +++ b/pkg/plugin/networkx/mime_test.go @@ -0,0 +1,138 @@ +package networkx + +import ( + "strings" + "testing" + + "github.com/lithammer/dedent" + "github.com/siyul-park/uniflow/pkg/primitive" + "github.com/stretchr/testify/assert" +) + +func TestMarshalMIME(t *testing.T) { + testCases := []struct { + whenPayload primitive.Object + whenContentType string + expectPayload []byte + }{ + { + whenPayload: primitive.NewMap( + primitive.NewString("foo"), primitive.NewFloat64(1), + primitive.NewString("bar"), primitive.NewFloat64(2), + ), + whenContentType: ApplicationJSON, + expectPayload: []byte(`{"bar":2,"foo":1}`), + }, + // TODO: add xml test case + { + whenPayload: primitive.NewMap( + primitive.NewString("foo"), primitive.NewSlice(primitive.NewString("foo")), + primitive.NewString("bar"), primitive.NewSlice(primitive.NewString("bar")), + ), + whenContentType: ApplicationForm, + expectPayload: []byte("bar=bar&foo=foo"), + }, + { + whenPayload: primitive.NewString("testtesttest"), + whenContentType: TextPlain, + expectPayload: []byte("testtesttest"), + }, + { + whenPayload: primitive.NewMap( + primitive.NewString("value"), primitive.NewMap( + primitive.NewString("test"), primitive.NewSlice(primitive.NewString("test")), + ), + primitive.NewString("file"), primitive.NewMap(), + ), + whenContentType: MultipartForm + "; boundary=MyBoundary", + expectPayload: []byte(deIndent(` + --MyBoundary + Content-Disposition: form-data; name="test" + + test + --MyBoundary-- + + `)), + }, + } + for _, tc := range testCases { + t.Run(tc.whenContentType, func(t *testing.T) { + encode, err := MarshalMIME(tc.whenPayload, &tc.whenContentType) + assert.NoError(t, err) + assert.Equal(t, tc.expectPayload, encode) + }) + } +} + +func TestUnmarshalMIME(t *testing.T) { + testCases := []struct { + whenPayload []byte + whenContentType string + expectPayload primitive.Object + }{ + { + whenPayload: []byte(` + { + "foo": 1, + "bar": 2 + } + `), + whenContentType: ApplicationJSON, + expectPayload: primitive.NewMap( + primitive.NewString("foo"), primitive.NewFloat64(1), + primitive.NewString("bar"), primitive.NewFloat64(2), + ), + }, + // TODO: add xml test case + { + whenPayload: []byte("foo=foo&bar=bar"), + whenContentType: ApplicationForm, + expectPayload: primitive.NewMap( + primitive.NewString("foo"), primitive.NewSlice(primitive.NewString("foo")), + primitive.NewString("bar"), primitive.NewSlice(primitive.NewString("bar")), + ), + }, + { + whenPayload: []byte("testtesttest"), + whenContentType: TextPlain, + expectPayload: primitive.NewString("testtesttest"), + }, + { + whenPayload: []byte(deIndent(` + --MyBoundary + Content-Disposition: form-data; name="test" + + test + --MyBoundary-- + + `)), + whenContentType: MultipartForm + "; boundary=MyBoundary", + expectPayload: primitive.NewMap( + primitive.NewString("value"), primitive.NewMap( + primitive.NewString("test"), primitive.NewSlice(primitive.NewString("test")), + ), + primitive.NewString("file"), primitive.NewMap(), + ), + }, + { + whenPayload: []byte("testtesttest"), + whenContentType: OctetStream, + expectPayload: primitive.NewBinary([]byte("testtesttest")), + }, + } + + for _, tc := range testCases { + t.Run(tc.whenContentType, func(t *testing.T) { + decode, err := UnmarshalMIME(tc.whenPayload, &tc.whenContentType) + assert.NoError(t, err) + assert.Equal(t, tc.expectPayload.Interface(), decode.Interface()) + }) + } +} + +func deIndent(str string) string { + str = strings.TrimPrefix(str, "\n") + str = dedent.Dedent(str) + str = strings.TrimSuffix(str, "\n") + return strings.ReplaceAll(str, "\n", "\r\n") +} diff --git a/pkg/plugin/networkx/router.go b/pkg/plugin/networkx/router.go new file mode 100644 index 00000000..946b2781 --- /dev/null +++ b/pkg/plugin/networkx/router.go @@ -0,0 +1,522 @@ +package networkx + +import ( + "bytes" + "net/http" + "sync" + + "github.com/oklog/ulid/v2" + "github.com/siyul-park/uniflow/pkg/node" + "github.com/siyul-park/uniflow/pkg/packet" + "github.com/siyul-park/uniflow/pkg/port" + "github.com/siyul-park/uniflow/pkg/primitive" + "github.com/siyul-park/uniflow/pkg/process" + "github.com/siyul-park/uniflow/pkg/scheme" +) + +type ( + RouterNodeConfig struct { + ID ulid.ULID + } + + RouterNode struct { + *node.OneToManyNode + tree *route + mu sync.RWMutex + } + + RouterSpec struct { + scheme.SpecMeta `map:",inline"` + Routes []RouteInfo `map:"routes"` + } + + RouteInfo struct { + Method string `map:"method"` + Path string `map:"path"` + Port string `map:"port"` + } + + route struct { + kind routeKind + prefix string + parent *route + staticChildren []*route + paramChild *route + anyChild *route + paramNames []string + methods map[string]string + } + routeKind uint8 +) + +const ( + KindRouter = "router" +) + +const ( + staticKind routeKind = iota + paramKind + anyKind + + paramLabel = byte(':') + anyLabel = byte('*') +) + +const ( + KeyMethod = "method" + KeyPath = "path" + KeyParams = "params" +) + +var _ node.Node = &RouterNode{} + +func NewRouterNode(config RouterNodeConfig) *RouterNode { + id := config.ID + + n := &RouterNode{ + tree: &route{ + methods: map[string]string{}, + }, + } + n.OneToManyNode = node.NewOneToManyNode(node.OneToManyNodeConfig{ + ID: id, + Action: n.action, + }) + + return n +} + +func (n *RouterNode) Add(method, path, port string) { + n.mu.Lock() + defer n.mu.Unlock() + + if path == "" { + path = "/" + } + if path[0] != '/' { + path = "/" + path + } + + var paramNames []string + + for i, lcpIndex := 0, len(path); i < lcpIndex; i++ { + if path[i] == ':' { + if i > 0 && path[i-1] == '\\' { + path = path[:i-1] + path[i:] + i-- + lcpIndex-- + continue + } + j := i + 1 + + n.insert(method, path[:i], staticKind, nil, "") + for ; i < lcpIndex && path[i] != '/'; i++ { + } + + paramNames = append(paramNames, path[j:i]) + path = path[:j] + path[i:] + i, lcpIndex = j, len(path) + + if i == lcpIndex { + n.insert(method, path[:i], paramKind, paramNames, port) + } else { + n.insert(method, path[:i], paramKind, nil, "") + } + } else if path[i] == '*' { + n.insert(method, path[:i], staticKind, nil, "") + paramNames = append(paramNames, "*") + n.insert(method, path[:i+1], anyKind, paramNames, port) + } + } + + n.insert(method, path, staticKind, paramNames, port) +} + +func (n *RouterNode) Close() error { + n.tree = &route{ + methods: map[string]string{}, + } + return n.OneToManyNode.Close() +} + +func (n *RouterNode) action(proc *process.Process, inPck *packet.Packet) ([]*packet.Packet, *packet.Packet) { + n.mu.RLock() + defer n.mu.RUnlock() + + inPayload, ok := inPck.Payload().(*primitive.Map) + if !ok { + return nil, packet.NewError(node.ErrInvalidPacket, inPck) + } + method, ok := primitive.Get[string](inPayload, KeyMethod) + if !ok { + return nil, packet.NewError(node.ErrInvalidPacket, inPck) + } + path, ok := primitive.Get[string](inPayload, KeyPath) + if !ok { + return nil, packet.NewError(node.ErrInvalidPacket, inPck) + } + + pre, cur, values := n.find(method, path) + + if cur != nil { + p := cur.methods[method] + var paramPairs []primitive.Object + for i, v := range values { + paramPairs = append(paramPairs, primitive.NewString(cur.paramNames[i])) + paramPairs = append(paramPairs, primitive.NewString(v)) + } + + if i, ok := port.GetIndex(node.PortOut, p); ok { + outPayload := inPayload.Set(primitive.NewString(KeyParams), primitive.NewMap(paramPairs...)) + outPck := packet.New(outPayload) + outPcks := make([]*packet.Packet, i+1) + outPcks[i] = outPck + + return outPcks, nil + } + } else if pre != nil { + buf := new(bytes.Buffer) + buf.WriteString(http.MethodOptions) + for k := range pre.methods { + if k == http.MethodOptions { + continue + } + buf.WriteString(", ") + buf.WriteString(k) + } + + header := http.Header(map[string][]string{ + HeaderAllow: {buf.String()}, + }) + + if method == http.MethodOptions { + errPayload, _ := primitive.MarshalText(HTTPPayload{ + Header: header, + Status: http.StatusNoContent, + }) + return nil, packet.New(errPayload) + } else { + errPayload, _ := primitive.MarshalText(HTTPPayload{ + Header: header, + Body: primitive.NewString(http.StatusText(http.StatusMethodNotAllowed)), + Status: http.StatusMethodNotAllowed, + }) + return nil, packet.New(errPayload) + } + } + + errPayload, _ := primitive.MarshalText(NotFound) + return nil, packet.New(errPayload) +} + +func (n *RouterNode) insert(method, path string, kind routeKind, paramNames []string, port string) { + currentRoute := n.tree + search := path + + for { + searchLen := len(search) + prefixLen := len(currentRoute.prefix) + lcpLen := 0 + + // LCP - Longest Common Prefix (https://en.wikipedia.org/wiki/LCP_array) + max := prefixLen + if searchLen < max { + max = searchLen + } + for ; lcpLen < max && search[lcpLen] == currentRoute.prefix[lcpLen]; lcpLen++ { + } + + if lcpLen == 0 { + // At root node + currentRoute.prefix = search + if port != "" { + currentRoute.kind = kind + currentRoute.paramNames = paramNames + currentRoute.methods[method] = port + } + } else if lcpLen < prefixLen { + r := &route{ + kind: currentRoute.kind, + prefix: currentRoute.prefix[lcpLen:], + parent: currentRoute, + staticChildren: currentRoute.staticChildren, + paramChild: currentRoute.paramChild, + anyChild: currentRoute.anyChild, + paramNames: currentRoute.paramNames, + methods: currentRoute.methods, + } + for _, child := range currentRoute.staticChildren { + child.parent = r + } + if currentRoute.paramChild != nil { + currentRoute.paramChild.parent = r + } + if currentRoute.anyChild != nil { + currentRoute.anyChild.parent = r + } + + // Reset parent node + currentRoute.kind = staticKind + currentRoute.prefix = currentRoute.prefix[:lcpLen] + currentRoute.staticChildren = nil + currentRoute.paramNames = nil + currentRoute.paramChild = nil + currentRoute.anyChild = nil + currentRoute.methods = map[string]string{} + + // Only Static children could reach here + currentRoute.addStaticChild(r) + + if lcpLen == searchLen { + // At parent node + currentRoute.kind = kind + if port != "" { + currentRoute.paramNames = paramNames + currentRoute.methods[method] = port + } + } else { + // Create child node + r = &route{ + kind: kind, + prefix: search[lcpLen:], + parent: currentRoute, + methods: map[string]string{}, + } + if port != "" { + r.paramNames = paramNames + r.methods[method] = port + } + // Only Static children could reach here + currentRoute.addStaticChild(r) + } + } else if lcpLen < searchLen { + search = search[lcpLen:] + c := currentRoute.findChildWithLabel(search[0]) + if c != nil { + // Go deeper + currentRoute = c + continue + } + // Create child node + r := &route{ + kind: kind, + prefix: search, + parent: currentRoute, + methods: map[string]string{}, + } + if port != "" { + r.paramNames = paramNames + r.methods[method] = port + } + + switch kind { + case staticKind: + currentRoute.addStaticChild(r) + case paramKind: + currentRoute.paramChild = r + case anyKind: + currentRoute.anyChild = r + } + } else { + // Node already exists + if port != "" { + currentRoute.paramNames = paramNames + currentRoute.methods[method] = port + } + } + return + } +} + +func (n *RouterNode) find(method, path string) (*route, *route, []string) { + n.mu.RLock() + defer n.mu.RUnlock() + + currentRoute := n.tree + + var ( + bestMatchRoute *route + previousBestMatchRoute *route + search = path + searchIndex = 0 + paramValues []string + ) + + backtrackToNextRouteKind := func(fromKind routeKind) (nextNodeKind routeKind, valid bool) { + previous := currentRoute + currentRoute = previous.parent + valid = currentRoute != nil + + // Next node type by priority + if previous.kind == anyKind { + nextNodeKind = staticKind + } else { + nextNodeKind = previous.kind + 1 + } + + if fromKind == staticKind { + // when backtracking is done from static basisKind block we did not change search so nothing to restore + return + } + + // restore search to value it was before we move to current node we are backtracking from. + if previous.kind == staticKind { + searchIndex -= len(previous.prefix) + } else if len(paramValues) > 0 { + searchIndex -= len(paramValues[len(paramValues)-1]) + paramValues = paramValues[:len(paramValues)-1] + } + search = path[searchIndex:] + return + } + + for { + prefixLen := 0 + lcpLen := 0 + + if currentRoute.kind == staticKind { + searchLen := len(search) + prefixLen = len(currentRoute.prefix) + + // LCP - Longest Common Prefix (https://en.wikipedia.org/wiki/LCP_array) + max := prefixLen + if searchLen < max { + max = searchLen + } + for ; lcpLen < max && search[lcpLen] == currentRoute.prefix[lcpLen]; lcpLen++ { + } + } + + if lcpLen != prefixLen { + // No matching prefix, let's backtrack to the first possible alternative node of the decision path + rk, ok := backtrackToNextRouteKind(staticKind) + if !ok { + return nil, nil, nil + } else if rk == paramKind { + goto Param + } else { + // Not found (this should never be possible for static node we are looking currently) + break + } + } + + // The full prefix has matched, remove the prefix from the remaining search + search = search[lcpLen:] + searchIndex = searchIndex + lcpLen + + // Finish routing if is no request path remaining to search + if search == "" { + if currentRoute.hasPort() { + if previousBestMatchRoute == nil { + previousBestMatchRoute = currentRoute + } + if _, ok := currentRoute.methods[method]; ok { + bestMatchRoute = currentRoute + break + } + } + } + + // Static node + if search != "" { + if child := currentRoute.findStaticChild(search[0]); child != nil { + currentRoute = child + continue + } + } + + Param: + // Param node + if child := currentRoute.paramChild; search != "" && child != nil { + currentRoute = child + i := 0 + l := len(search) + if currentRoute.isLeaf() { + // when param node does not have any children (path param is last piece of route path) then param node should + // act similarly to any node - consider all remaining search as match + i = l + } else { + for ; i < l && search[i] != '/'; i++ { + } + } + + paramValues = append(paramValues, search[:i]) + search = search[i:] + searchIndex = searchIndex + i + continue + } + + Any: + // Any node + if child := currentRoute.anyChild; child != nil { + // If any node is found, use remaining path for paramValues + currentRoute = child + paramValues = append(paramValues, search) + + // update indexes/search in case we need to backtrack when no handler match is found + searchIndex += +len(search) + search = "" + + if _, ok := currentRoute.methods[method]; ok { + bestMatchRoute = currentRoute + break + } + if previousBestMatchRoute == nil { + previousBestMatchRoute = currentRoute + } + } + + // Let's backtrack to the first possible alternative node of the decision path + rk, ok := backtrackToNextRouteKind(anyKind) + if !ok { + break // No other possibilities on the decision path + } else if rk == paramKind { + goto Param + } else if rk == anyKind { + goto Any + } else { + // Not found + break + } + } + + return previousBestMatchRoute, bestMatchRoute, paramValues +} + +func (r *route) addStaticChild(c *route) { + r.staticChildren = append(r.staticChildren, c) +} + +func (r *route) findChildWithLabel(l byte) *route { + if c := r.findStaticChild(l); c != nil { + return c + } + if l == paramLabel { + return r.paramChild + } + if l == anyLabel { + return r.anyChild + } + return nil +} + +func (r *route) findStaticChild(l byte) *route { + for _, c := range r.staticChildren { + if c.label() == l { + return c + } + } + return nil +} + +func (r *route) isLeaf() bool { + return len(r.staticChildren) == 0 && r.paramChild == nil && r.anyChild == nil +} + +func (r *route) hasPort() bool { + return len(r.methods) > 0 +} + +func (r *route) label() byte { + return r.prefix[0] +} diff --git a/pkg/plugin/networkx/router_test.go b/pkg/plugin/networkx/router_test.go new file mode 100644 index 00000000..db5efcbc --- /dev/null +++ b/pkg/plugin/networkx/router_test.go @@ -0,0 +1,108 @@ +package networkx + +import ( + "context" + "net/http" + "testing" + "time" + + "github.com/siyul-park/uniflow/pkg/node" + "github.com/siyul-park/uniflow/pkg/packet" + "github.com/siyul-park/uniflow/pkg/port" + "github.com/siyul-park/uniflow/pkg/primitive" + "github.com/siyul-park/uniflow/pkg/process" + "github.com/stretchr/testify/assert" +) + +func TestNewRouterNode(t *testing.T) { + n := NewRouterNode(RouterNodeConfig{}) + assert.NotNil(t, n) + assert.NotZero(t, n.ID()) + + _ = n.Close() +} + +func TestRouterNode_Send(t *testing.T) { + n := NewRouterNode(RouterNodeConfig{}) + defer func() { _ = n.Close() }() + + in := port.New() + inPort, _ := n.Port(node.PortIn) + inPort.Link(in) + + n.Add(http.MethodGet, "/*", port.SetIndex(node.PortOut, 0)) + n.Add(http.MethodGet, "/:1/second", port.SetIndex(node.PortOut, 1)) + n.Add(http.MethodGet, "/:1/:2", port.SetIndex(node.PortOut, 2)) + + var testCases = []struct { + name string + whenURL string + expectPort string + expectParams map[string]string + }{ + { + name: "route /first to /*", + whenURL: "/first", + expectPort: port.SetIndex(node.PortOut, 0), + expectParams: map[string]string{"*": "first"}, + }, + { + name: "route /first/second to /:1/second", + whenURL: "/first/second", + expectPort: port.SetIndex(node.PortOut, 1), + expectParams: map[string]string{"1": "first"}, + }, + { + name: "route /first/second-new to /:1/:2", + whenURL: "/first/second-new", + expectPort: port.SetIndex(node.PortOut, 2), + expectParams: map[string]string{ + "1": "first", + "2": "second-new", + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + out := port.New() + defer out.Close() + outPort, _ := n.Port(tc.expectPort) + outPort.Link(out) + + proc := process.New() + defer proc.Close() + + inStream := in.Open(proc) + outStream := out.Open(proc) + + inStream.Send(packet.New(primitive.NewMap( + primitive.NewString(KeyMethod), primitive.NewString(http.MethodGet), + primitive.NewString(KeyPath), primitive.NewString(tc.whenURL), + ))) + + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + + select { + case outPck := <-outStream.Receive(): + outPayload, ok := outPck.Payload().(*primitive.Map) + assert.True(t, ok) + + param, ok := outPayload.Get(primitive.NewString(KeyParams)) + assert.True(t, ok) + method, ok := outPayload.Get(primitive.NewString(KeyMethod)) + assert.True(t, ok) + path, ok := outPayload.Get(primitive.NewString(KeyPath)) + assert.True(t, ok) + + assert.Equal(t, tc.expectParams, param.Interface()) + assert.Equal(t, http.MethodGet, method.Interface()) + assert.Equal(t, tc.whenURL, path.Interface()) + case <-ctx.Done(): + assert.Fail(t, "timeout") + } + }) + } + +} diff --git a/pkg/plugin/systemx/builder.go b/pkg/plugin/systemx/builder.go new file mode 100644 index 00000000..e10adf44 --- /dev/null +++ b/pkg/plugin/systemx/builder.go @@ -0,0 +1,22 @@ +package systemx + +import ( + "github.com/siyul-park/uniflow/pkg/node" + "github.com/siyul-park/uniflow/pkg/scheme" + "github.com/siyul-park/uniflow/pkg/storage" +) + +func AddToScheme(storage *storage.Storage) func(*scheme.Scheme) error { + return func(s *scheme.Scheme) error { + s.AddKnownType(KindReflect, &ReflectSpec{}) + s.AddCodec(KindReflect, scheme.CodecWithType[*ReflectSpec](func(spec *ReflectSpec) (node.Node, error) { + return NewReflectNode(ReflectNodeConfig{ + ID: spec.ID, + OP: spec.OP, + Storage: storage, + }), nil + })) + + return nil + } +} diff --git a/pkg/plugin/systemx/builder_test.go b/pkg/plugin/systemx/builder_test.go new file mode 100644 index 00000000..cba531e8 --- /dev/null +++ b/pkg/plugin/systemx/builder_test.go @@ -0,0 +1,26 @@ +package systemx + +import ( + "context" + "testing" + + "github.com/go-faker/faker/v4" + "github.com/siyul-park/uniflow/pkg/database/memdb" + "github.com/siyul-park/uniflow/pkg/scheme" + "github.com/siyul-park/uniflow/pkg/storage" + "github.com/stretchr/testify/assert" +) + +func TestAddToScheme(t *testing.T) { + s := scheme.New() + st, _ := storage.New(context.Background(), storage.Config{ + Scheme: s, + Database: memdb.New(faker.Word()), + }) + + err := AddToScheme(st)(s) + assert.NoError(t, err) + + _, ok := s.Codec(KindReflect) + assert.True(t, ok) +} diff --git a/pkg/plugin/systemx/reflect.go b/pkg/plugin/systemx/reflect.go new file mode 100644 index 00000000..f7797808 --- /dev/null +++ b/pkg/plugin/systemx/reflect.go @@ -0,0 +1,264 @@ +package systemx + +import ( + "context" + + "github.com/oklog/ulid/v2" + "github.com/siyul-park/uniflow/internal/util" + "github.com/siyul-park/uniflow/pkg/database" + "github.com/siyul-park/uniflow/pkg/node" + "github.com/siyul-park/uniflow/pkg/packet" + "github.com/siyul-park/uniflow/pkg/primitive" + "github.com/siyul-park/uniflow/pkg/process" + "github.com/siyul-park/uniflow/pkg/scheme" + "github.com/siyul-park/uniflow/pkg/storage" +) + +type ( + ReflectNodeConfig struct { + ID ulid.ULID + OP string + Storage *storage.Storage + } + + ReflectNode struct { + *node.OneToOneNode + op string + storage *storage.Storage + } + + ReflectSpec struct { + scheme.SpecMeta `map:",inline"` + OP string `map:"op"` + } +) + +const ( + KindReflect = "reflect" +) + +const ( + OPDelete = "delete" + OPInsert = "insert" + OPSelect = "select" + OPUpdate = "update" +) + +func NewReflectNode(config ReflectNodeConfig) *ReflectNode { + id := config.ID + op := config.OP + storage := config.Storage + + n := &ReflectNode{ + op: op, + storage: storage, + } + n.OneToOneNode = node.NewOneToOneNode(node.OneToOneNodeConfig{ + ID: id, + Action: n.action, + }) + + return n +} + +func (n *ReflectNode) action(proc *process.Process, inPck *packet.Packet) (*packet.Packet, *packet.Packet) { + ctx, cancel := context.WithCancel(context.Background()) + go func() { + <-proc.Done() + cancel() + }() + + inPayload := inPck.Payload() + + batch := true + var examples []*primitive.Map + if v, ok := inPayload.(*primitive.Map); ok { + examples = append(examples, v) + batch = false + } else if v, ok := inPayload.(*primitive.Slice); ok { + for i := 0; i < v.Len(); i++ { + if e, ok := v.Get(i).(*primitive.Map); ok { + examples = append(examples, e) + } + } + } + + switch n.op { + case OPDelete: + filter, err := examplesToFilter(examples) + if err != nil { + return nil, packet.NewError(err, inPck) + } + + specs, err := n.storage.FindMany(ctx, filter) + if err != nil { + return nil, packet.NewError(err, inPck) + } + + var ids []ulid.ULID + for _, spec := range specs { + ids = append(ids, spec.GetID()) + } + + if _, err := n.storage.DeleteMany(ctx, storage.Where[ulid.ULID](scheme.KeyID).IN(ids...)); err != nil { + return nil, packet.NewError(err, inPck) + } + + if len(specs) == 0 { + return nil, inPck + } + if outPayload, err := specsToExamples(specs, batch); err != nil { + return nil, packet.NewError(err, inPck) + } else { + return packet.New(outPayload), nil + } + case OPInsert: + specs := examplesToSpecs(examples) + + ids, err := n.storage.InsertMany(ctx, specs) + if err != nil { + return nil, packet.NewError(err, inPck) + } + + specs, err = n.storage.FindMany(ctx, storage.Where[ulid.ULID](scheme.KeyID).IN(ids...), &database.FindOptions{ + Limit: util.Ptr[int](len(ids)), + }) + if err != nil { + return nil, packet.NewError(err, inPck) + } + + if len(specs) == 0 { + return nil, inPck + } + if outPayload, err := specsToExamples(specs, batch); err != nil { + return nil, packet.NewError(err, inPck) + } else { + return packet.New(outPayload), nil + } + case OPSelect: + filter, err := examplesToFilter(examples) + if err != nil { + return nil, packet.NewError(err, inPck) + } + + specs, err := n.storage.FindMany(ctx, filter) + if err != nil { + return nil, packet.NewError(err, inPck) + } + + if len(specs) == 0 { + return nil, inPck + } + if outPayload, err := specsToExamples(specs, batch); err != nil { + return nil, packet.NewError(err, inPck) + } else { + return packet.New(outPayload), nil + } + case OPUpdate: + specs := examplesToSpecs(examples) + + var ids []ulid.ULID + patches := map[ulid.ULID]*primitive.Map{} + for i, spec := range specs { + id := spec.GetID() + + if !util.IsZero(id) { + ids = append(ids, id) + patches[id] = examples[i] + } + } + + specs, err := n.storage.FindMany(ctx, storage.Where[ulid.ULID](scheme.KeyID).IN(ids...), &database.FindOptions{ + Limit: util.Ptr[int](len(ids)), + }) + if err != nil { + return nil, packet.NewError(err, inPck) + } + + var merges []scheme.Spec + for _, spec := range specs { + unstructured := scheme.NewUnstructured(nil) + if err := unstructured.Marshal(spec); err != nil { + return nil, packet.NewError(err, inPck) + } + + patch := patches[spec.GetID()] + + doc := unstructured.Doc() + for _, k := range patch.Keys() { + doc = doc.Set(k, patch.GetOr(k, nil)) + } + + merges = append(merges, scheme.NewUnstructured(doc)) + } + + if _, err := n.storage.UpdateMany(ctx, merges); err != nil { + return nil, packet.NewError(err, inPck) + } + + specs, err = n.storage.FindMany(ctx, storage.Where[ulid.ULID](scheme.KeyID).IN(ids...), &database.FindOptions{ + Limit: util.Ptr[int](len(ids)), + }) + if err != nil { + return nil, packet.NewError(err, inPck) + } + + if len(specs) == 0 { + return nil, inPck + } + if outPayload, err := specsToExamples(specs, batch); err != nil { + return nil, packet.NewError(err, inPck) + } else { + return packet.New(outPayload), nil + } + } + + return inPck, nil +} + +func examplesToFilter(examples []*primitive.Map) (*storage.Filter, error) { + var filter *storage.Filter + for _, example := range examples { + var sub *storage.Filter + + spec := scheme.SpecMeta{} + unstructured := scheme.NewUnstructured(example) + if err := unstructured.Unmarshal(&spec); err != nil { + return nil, err + } + + if !util.IsZero(spec.ID) { + sub = sub.And(storage.Where[ulid.ULID](scheme.KeyID).EQ(spec.ID)) + } + if !util.IsZero(spec.Kind) { + sub = sub.And(storage.Where[string](scheme.KeyKind).EQ(spec.Kind)) + } + if !util.IsZero(spec.Name) { + sub = sub.And(storage.Where[string](scheme.KeyName).EQ(spec.Name)) + } + if !util.IsZero(spec.Namespace) { + sub = sub.And(storage.Where[string](scheme.KeyName).EQ(spec.Namespace)) + } + + filter = filter.And(sub) + } + + return filter, nil +} + +func examplesToSpecs(examples []*primitive.Map) []scheme.Spec { + var specs []scheme.Spec + for _, example := range examples { + unstructured := scheme.NewUnstructured(example) + specs = append(specs, unstructured) + } + return specs +} + +func specsToExamples(specs []scheme.Spec, batch bool) (primitive.Object, error) { + if batch || len(specs) > 1 { + return primitive.MarshalText(specs) + } else { + return primitive.MarshalText(specs[0]) + } +} diff --git a/pkg/plugin/systemx/reflect_test.go b/pkg/plugin/systemx/reflect_test.go new file mode 100644 index 00000000..5ef1ec87 --- /dev/null +++ b/pkg/plugin/systemx/reflect_test.go @@ -0,0 +1,225 @@ +package systemx + +import ( + "context" + "testing" + "time" + + "github.com/go-faker/faker/v4" + "github.com/oklog/ulid/v2" + "github.com/siyul-park/uniflow/pkg/database/memdb" + "github.com/siyul-park/uniflow/pkg/node" + "github.com/siyul-park/uniflow/pkg/packet" + "github.com/siyul-park/uniflow/pkg/port" + "github.com/siyul-park/uniflow/pkg/primitive" + "github.com/siyul-park/uniflow/pkg/process" + "github.com/siyul-park/uniflow/pkg/scheme" + "github.com/siyul-park/uniflow/pkg/storage" + "github.com/stretchr/testify/assert" +) + +func TestNewReflectNode(t *testing.T) { + s := scheme.New() + st, _ := storage.New(context.Background(), storage.Config{ + Scheme: s, + Database: memdb.New(faker.Word()), + }) + + n := NewReflectNode(ReflectNodeConfig{ + OP: OPSelect, + Storage: st, + }) + assert.NotNil(t, n) + assert.NotZero(t, n.ID()) + + _ = n.Close() +} + +func TestReflectNode_Send(t *testing.T) { + s := scheme.New() + + st, _ := storage.New(context.Background(), storage.Config{ + Scheme: s, + Database: memdb.New(faker.Word()), + }) + + s.AddKnownType(KindReflect, &ReflectSpec{}) + s.AddCodec(KindReflect, scheme.CodecWithType[*ReflectSpec](func(spec *ReflectSpec) (node.Node, error) { + return NewReflectNode(ReflectNodeConfig{ + ID: spec.ID, + OP: spec.OP, + Storage: st, + }), nil + })) + + t.Run(OPDelete, func(t *testing.T) { + n := NewReflectNode(ReflectNodeConfig{ + OP: OPDelete, + Storage: st, + }) + defer func() { _ = n.Close() }() + + io := port.New() + ioPort, _ := n.Port(node.PortIO) + ioPort.Link(io) + + proc := process.New() + defer proc.Close() + + ioStream := io.Open(proc) + + id, _ := st.InsertOne(context.Background(), &ReflectSpec{ + SpecMeta: scheme.SpecMeta{ + ID: ulid.Make(), + Kind: KindReflect, + }, + OP: OPDelete, + }) + + inPayload := primitive.NewMap( + primitive.NewString(scheme.KeyID), primitive.NewString(id.String()), + ) + inPck := packet.New(inPayload) + + ioStream.Send(inPck) + + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + + select { + case outPck := <-ioStream.Receive(): + outPayload, ok := outPck.Payload().(*primitive.Map) + assert.True(t, ok) + assert.Equal(t, id.String(), primitive.Interface(outPayload.GetOr(primitive.NewString(scheme.KeyID), nil))) + case <-ctx.Done(): + assert.Fail(t, "timeout") + } + }) + + t.Run(OPInsert, func(t *testing.T) { + n := NewReflectNode(ReflectNodeConfig{ + OP: OPInsert, + Storage: st, + }) + defer func() { _ = n.Close() }() + + io := port.New() + ioPort, _ := n.Port(node.PortIO) + ioPort.Link(io) + + proc := process.New() + defer proc.Close() + + ioStream := io.Open(proc) + + inPayload := primitive.NewMap( + primitive.NewString(scheme.KeyID), primitive.NewString(ulid.Make().String()), + primitive.NewString("kind"), primitive.NewString(KindReflect), + primitive.NewString("op"), primitive.NewString(OPInsert), + ) + inPck := packet.New(inPayload) + + ioStream.Send(inPck) + + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + + select { + case outPck := <-ioStream.Receive(): + outPayload, ok := outPck.Payload().(*primitive.Map) + assert.True(t, ok) + assert.NotNil(t, outPayload.GetOr(primitive.NewString(scheme.KeyID), nil)) + case <-ctx.Done(): + assert.Fail(t, "timeout") + } + }) + + t.Run(OPSelect, func(t *testing.T) { + n := NewReflectNode(ReflectNodeConfig{ + OP: OPSelect, + Storage: st, + }) + defer func() { _ = n.Close() }() + + io := port.New() + ioPort, _ := n.Port(node.PortIO) + ioPort.Link(io) + + proc := process.New() + defer proc.Close() + + ioStream := io.Open(proc) + + id, _ := st.InsertOne(context.Background(), &ReflectSpec{ + SpecMeta: scheme.SpecMeta{ + ID: ulid.Make(), + Kind: KindReflect, + }, + OP: OPSelect, + }) + + inPayload := primitive.NewMap( + primitive.NewString(scheme.KeyID), primitive.NewString(id.String()), + ) + inPck := packet.New(inPayload) + + ioStream.Send(inPck) + + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + + select { + case outPck := <-ioStream.Receive(): + outPayload, ok := outPck.Payload().(*primitive.Map) + assert.True(t, ok) + assert.Equal(t, id.String(), primitive.Interface(outPayload.GetOr(primitive.NewString(scheme.KeyID), nil))) + case <-ctx.Done(): + assert.Fail(t, "timeout") + } + }) + + t.Run(OPUpdate, func(t *testing.T) { + n := NewReflectNode(ReflectNodeConfig{ + OP: OPUpdate, + Storage: st, + }) + defer func() { _ = n.Close() }() + + io := port.New() + ioPort, _ := n.Port(node.PortIO) + ioPort.Link(io) + + proc := process.New() + defer proc.Close() + + ioStream := io.Open(proc) + + id, _ := st.InsertOne(context.Background(), &ReflectSpec{ + SpecMeta: scheme.SpecMeta{ + ID: ulid.Make(), + Kind: KindReflect, + }, + OP: OPInsert, + }) + + inPayload := primitive.NewMap( + primitive.NewString(scheme.KeyID), primitive.NewString(id.String()), + primitive.NewString("op"), primitive.NewString(OPUpdate), + ) + inPck := packet.New(inPayload) + + ioStream.Send(inPck) + + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + + select { + case outPck := <-ioStream.Receive(): + outPayload, ok := outPck.Payload().(*primitive.Map) + assert.True(t, ok) + assert.Equal(t, id.String(), primitive.Interface(outPayload.GetOr(primitive.NewString(scheme.KeyID), nil))) + case <-ctx.Done(): + assert.Fail(t, "timeout") + } + }) +} diff --git a/pkg/port/array.go b/pkg/port/array.go new file mode 100644 index 00000000..4bbc69d4 --- /dev/null +++ b/pkg/port/array.go @@ -0,0 +1,29 @@ +package port + +import ( + "fmt" + "regexp" + "strconv" +) + +// GetIndex is return index of the given port. +func GetIndex(source string, target string) (int, bool) { + regex, err := regexp.Compile(source + `\[(\d+)\]`) + if err != nil { + return 0, false + } + groups := regex.FindAllStringSubmatch(target, -1) + if len(groups) == 0 { + return 0, false + } + i, err := strconv.Atoi(groups[0][1]) + if err != nil { + return 0, false + } + return i, true +} + +// SetIndex is return full port name of the given port and index. +func SetIndex(source string, index int) string { + return fmt.Sprintf(source+"[%d]", index) +} diff --git a/pkg/port/array_test.go b/pkg/port/array_test.go new file mode 100644 index 00000000..fbd8b86b --- /dev/null +++ b/pkg/port/array_test.go @@ -0,0 +1,17 @@ +package port + +import ( + "testing" + + "github.com/go-faker/faker/v4" + "github.com/stretchr/testify/assert" +) + +func TestGetAndSetIndexInPort(t *testing.T) { + port := faker.Word() + index := 0 + + i, ok := GetIndex(port, SetIndex(port, index)) + assert.True(t, ok) + assert.Equal(t, index, i) +} diff --git a/pkg/port/inithook.go b/pkg/port/inithook.go new file mode 100644 index 00000000..470e9bd2 --- /dev/null +++ b/pkg/port/inithook.go @@ -0,0 +1,85 @@ +package port + +import ( + "sync" + + "github.com/siyul-park/uniflow/pkg/process" +) + +type ( + // InitHook is a hook that is called when Port is initialized by Process. + InitHook interface { + Init(proc *process.Process) + } + + InitHookFunc func(proc *process.Process) + + // InitOnceHook is a hook that runs only once per process.process. + InitOnceHook struct { + init InitHook + processes map[*process.Process]struct{} + mu sync.RWMutex + } +) + +var _ InitHook = InitHookFunc(func(proc *process.Process) {}) +var _ InitHook = &InitOnceHook{} + +func (h InitHookFunc) Init(proc *process.Process) { + h(proc) +} + +// InitOnce returns a new InitOnceHook. +func InitOnce(h InitHook) *InitOnceHook { + return &InitOnceHook{ + init: h, + processes: make(map[*process.Process]struct{}), + } +} + +func (h *InitOnceHook) Init(proc *process.Process) { + if ok := func() bool { + h.mu.RLock() + defer h.mu.RUnlock() + + _, ok := h.processes[proc] + return !ok + }(); !ok { + return + } + + if ok := func() bool { + h.mu.Lock() + defer h.mu.Unlock() + + _, ok := h.processes[proc] + if ok { + return false + } + + h.processes[proc] = struct{}{} + go func() { + <-proc.Done() + + h.mu.Lock() + defer h.mu.Unlock() + + delete(h.processes, proc) + }() + + return true + }(); !ok { + return + } + + h.init.Init(proc) +} + +func (h *InitOnceHook) Close() { + h.mu.Lock() + defer h.mu.Unlock() + + for proc := range h.processes { + delete(h.processes, proc) + } +} diff --git a/pkg/port/pipe.go b/pkg/port/pipe.go new file mode 100644 index 00000000..70b2db9a --- /dev/null +++ b/pkg/port/pipe.go @@ -0,0 +1,191 @@ +package port + +import ( + "sync" + + "github.com/siyul-park/uniflow/pkg/packet" +) + +type ( + // ReadPipe is a Pipe that can be Receive Packet. + ReadPipe struct { + in chan *packet.Packet + out chan *packet.Packet + done chan struct{} + mu sync.RWMutex + } + + // WritePipe is a Pipe that can be Send Packet. + WritePipe struct { + links []*ReadPipe + done chan struct{} + mu sync.RWMutex + } +) + +// NewReadPipe returns a new ReadPipe. +func NewReadPipe() *ReadPipe { + p := &ReadPipe{ + in: make(chan *packet.Packet), + out: make(chan *packet.Packet), + done: make(chan struct{}), + mu: sync.RWMutex{}, + } + + go func() { + defer close(p.out) + buffer := make([]*packet.Packet, 0, 4) + + loop: + for { + packet, ok := <-p.in + if !ok { + break loop + } + select { + case p.out <- packet: + continue + default: + } + buffer = append(buffer, packet) + for len(buffer) > 0 { + select { + case packet, ok := <-p.in: + if !ok { + break loop + } + buffer = append(buffer, packet) + + case p.out <- buffer[0]: + buffer = buffer[1:] + } + } + } + for len(buffer) > 0 { + p.out <- buffer[0] + buffer = buffer[1:] + } + }() + + return p +} + +// Receive returns a channel that receives Packet. +func (p *ReadPipe) Receive() <-chan *packet.Packet { + return p.out +} + +// Done returns a channel that is closed when the ReadPipe is closed. +func (p *ReadPipe) Done() <-chan struct{} { + return p.done +} + +// Close closes the ReadPipe. +// Packet that are not processed will be discard. +func (p *ReadPipe) Close() { + p.mu.Lock() + defer p.mu.Unlock() + + select { + case <-p.done: + return + default: + } + + close(p.done) + close(p.in) +} + +func (p *ReadPipe) send(pck *packet.Packet) { + p.mu.RLock() + defer p.mu.RUnlock() + + select { + case <-p.done: + default: + p.in <- pck + } +} + +// NewWritePipe returns a new WritePipe. +func NewWritePipe() *WritePipe { + return &WritePipe{ + links: nil, + done: make(chan struct{}), + mu: sync.RWMutex{}, + } +} + +// Send a Packet to all linked ReadPipe. +func (p *WritePipe) Send(pck *packet.Packet) { + p.mu.Lock() + defer p.mu.Unlock() + + wg := &sync.WaitGroup{} + for _, l := range p.links { + wg.Add(1) + l := l + go func() { + defer wg.Done() + l.send(pck) + }() + } + wg.Wait() +} + +// Link a ReadPipe to enable communication with each other. +func (p *WritePipe) Link(pipe *ReadPipe) { + p.mu.Lock() + defer p.mu.Unlock() + + for _, l := range p.links { + if l == pipe { + return + } + } + + p.links = append(p.links, pipe) + + go func() { + select { + case <-p.Done(): + pipe.Close() + case <-pipe.Done(): + p.Unlink(pipe) + } + }() +} + +// Unlink removes the linked ReadPipe from being able to communicate further. +func (p *WritePipe) Unlink(pipe *ReadPipe) { + p.mu.Lock() + defer p.mu.Unlock() + + for i, l := range p.links { + if l == pipe { + p.links = append(p.links[:i], p.links[i+1:]...) + return + } + } +} + +// Done returns a channel that is closed when the WritePipe is closed. +func (p *WritePipe) Done() <-chan struct{} { + return p.done +} + +// Close closes the WritePipe. +// Packet that are not processed will be discard. +func (p *WritePipe) Close() { + p.mu.Lock() + defer p.mu.Unlock() + + select { + case <-p.done: + return + default: + } + + close(p.done) + p.links = nil +} diff --git a/pkg/port/pipe_test.go b/pkg/port/pipe_test.go new file mode 100644 index 00000000..85e320f9 --- /dev/null +++ b/pkg/port/pipe_test.go @@ -0,0 +1,155 @@ +package port + +import ( + "testing" + + "github.com/siyul-park/uniflow/pkg/packet" + "github.com/stretchr/testify/assert" +) + +func TestPipe_Link(t *testing.T) { + t.Run("1:1", func(t *testing.T) { + read := NewReadPipe() + defer read.Close() + write := NewWritePipe() + defer write.Close() + + write.Link(read) + + pck := packet.New(nil) + write.Send(pck) + + assert.Equal(t, pck, <-read.Receive()) + }) + + t.Run("1:N", func(t *testing.T) { + read1 := NewReadPipe() + defer read1.Close() + read2 := NewReadPipe() + defer read2.Close() + write := NewWritePipe() + defer write.Close() + + write.Link(read1) + write.Link(read2) + + pck := packet.New(nil) + write.Send(pck) + + assert.Equal(t, pck, <-read1.Receive()) + assert.Equal(t, pck, <-read2.Receive()) + }) +} + +func TestPipe_Unlink(t *testing.T) { + read := NewReadPipe() + defer read.Close() + write := NewWritePipe() + defer write.Close() + + write.Link(read) + write.Unlink(read) + + pck := packet.New(nil) + write.Send(pck) + + select { + case <-read.Receive(): + assert.Fail(t, "pipe should not receive and packet.") + default: + } +} + +func TestPipe_SendAndReceive(t *testing.T) { + t.Run("Not Closed", func(t *testing.T) { + read := NewReadPipe() + defer read.Close() + write := NewWritePipe() + defer write.Close() + + write.Link(read) + + pck1 := packet.New(nil) + pck2 := packet.New(nil) + + write.Send(pck1) + write.Send(pck2) + + assert.Equal(t, pck1, <-read.Receive()) + assert.Equal(t, pck2, <-read.Receive()) + }) + + t.Run("Closed", func(t *testing.T) { + read := NewReadPipe() + defer read.Close() + write := NewWritePipe() + defer write.Close() + + write.Link(read) + write.Close() + + pck1 := packet.New(nil) + pck2 := packet.New(nil) + + write.Send(pck1) + write.Send(pck2) + + assert.Nil(t, <-read.Receive()) + assert.Nil(t, <-read.Receive()) + }) +} + +func TestPipe_Close(t *testing.T) { + t.Run("ReadPipe", func(t *testing.T) { + pipe := NewReadPipe() + defer pipe.Close() + + select { + case <-pipe.Done(): + assert.Fail(t, "pipe.Done() is not empty.") + default: + } + + pipe.Close() + + select { + case <-pipe.Done(): + default: + assert.Fail(t, "pipe.Done() is empty.") + } + }) + t.Run("WritePipe", func(t *testing.T) { + pipe := NewWritePipe() + defer pipe.Close() + + select { + case <-pipe.Done(): + assert.Fail(t, "pipe.Done() is not empty.") + default: + } + + pipe.Close() + + select { + case <-pipe.Done(): + default: + assert.Fail(t, "pipe.Done() is empty.") + } + }) +} + +func BenchmarkPipe_SendAndReceive(b *testing.B) { + read := NewReadPipe() + defer read.Close() + write := NewWritePipe() + defer write.Close() + + write.Link(read) + + pck := packet.New(nil) + + for i := 0; i < b.N; i++ { + write.Send(pck) + <-read.Receive() + } +} diff --git a/pkg/port/port.go b/pkg/port/port.go new file mode 100644 index 00000000..651b1d5e --- /dev/null +++ b/pkg/port/port.go @@ -0,0 +1,201 @@ +package port + +import ( + "sync" + + "github.com/siyul-park/uniflow/pkg/process" +) + +type ( + // Port is a linking terminal that allows *packet.Packet to be exchanged. + Port struct { + streams map[*process.Process]*Stream + links []*Port + initHooks []InitHook + done chan struct{} + mu sync.RWMutex + } +) + +// New returns a new Port. +func New() *Port { + return &Port{ + streams: make(map[*process.Process]*Stream), + done: make(chan struct{}), + } +} + +// AddInitHook adds a InitHook. +func (p *Port) AddInitHook(hook InitHook) { + p.mu.Lock() + defer p.mu.Unlock() + + p.initHooks = append(p.initHooks, hook) +} + +// Link connects two Port to enable communication with each other. +func (p *Port) Link(port *Port) { + p.link(port) + port.link(p) +} + +// Unlink removes the linked Port from being able to communicate further. +func (p *Port) Unlink(port *Port) { + p.unlink(port) + port.unlink(p) +} + +// Links return length of linked. +func (p *Port) Links() int { + p.mu.RLock() + defer p.mu.RUnlock() + + return len(p.links) +} + +// Open Stream to communicate. For each process, Stream is opened independently. +// When Process is closed, Stream is also closed. Stream Send and Receive Packet to Broadcast to all other Port connected to the Port. +func (p *Port) Open(proc *process.Process) *Stream { + select { + case <-proc.Done(): + stream := NewStream() + stream.Close() + return stream + case <-p.Done(): + stream := NewStream() + stream.Close() + return stream + default: + if stream, ok := func() (*Stream, bool) { + p.mu.RLock() + defer p.mu.RUnlock() + + stream, ok := p.streams[proc] + return stream, ok + }(); ok { + return stream + } + + stream, ok := func() (*Stream, bool) { + p.mu.Lock() + defer p.mu.Unlock() + + stream, ok := p.streams[proc] + if ok { + return stream, true + } + stream = NewStream() + p.streams[proc] = stream + return stream, false + }() + if ok { + return stream + } + + p.mu.RLock() + links := p.links + inits := p.initHooks + p.mu.RUnlock() + + for _, link := range links { + stream.Link(link.Open(proc)) + } + + go func() { + select { + case <-p.Done(): + case <-proc.Done(): + p.mu.Lock() + defer p.mu.Unlock() + + delete(p.streams, proc) + + stream.Close() + case <-stream.Done(): + p.mu.Lock() + defer p.mu.Unlock() + + delete(p.streams, proc) + } + }() + + for _, hook := range inits { + hook := hook + go func() { hook.Init(proc) }() + } + + return stream + } +} + +// Done returns a channel that is closed when the Port is closed. +func (p *Port) Done() <-chan struct{} { + return p.done +} + +// Close the Port. +// All Stream currently open will also be shut down and any Packet that are not processed will be discard. +func (p *Port) Close() { + p.mu.Lock() + defer p.mu.Unlock() + + select { + case <-p.done: + return + default: + } + + for _, stream := range p.streams { + stream.Close() + } + + p.streams = nil + p.links = nil + + p.initHooks = nil + + close(p.done) +} + +func (p *Port) link(port *Port) { + if p == port { + return + } + + if ok := func() bool { + p.mu.Lock() + defer p.mu.Unlock() + + for _, link := range p.links { + if link == port { + return false + } + } + + p.links = append(p.links, port) + return true + }(); !ok { + return + } + + go func() { + select { + case <-p.Done(): + return + case <-port.Done(): + p.unlink(port) + } + }() +} + +func (p *Port) unlink(port *Port) { + p.mu.Lock() + defer p.mu.Unlock() + + for i, link := range p.links { + if port == link { + p.links = append(p.links[:i], p.links[i+1:]...) + break + } + } +} diff --git a/pkg/port/port_test.go b/pkg/port/port_test.go new file mode 100644 index 00000000..34093fdb --- /dev/null +++ b/pkg/port/port_test.go @@ -0,0 +1,149 @@ +package port + +import ( + "testing" + "time" + + "github.com/siyul-park/uniflow/pkg/packet" + "github.com/siyul-park/uniflow/pkg/process" + "github.com/stretchr/testify/assert" +) + +func TestNew(t *testing.T) { + port := New() + defer port.Close() + + assert.NotNil(t, port) +} + +func TestPort_Link(t *testing.T) { + port1 := New() + defer port1.Close() + port2 := New() + defer port2.Close() + + port1.Link(port2) + + proc := process.New() + + stream1 := port1.Open(proc) + stream2 := port2.Open(proc) + + pck1 := packet.New(nil) + pck2 := packet.New(nil) + + stream1.Send(pck1) + stream2.Send(pck2) + + assert.Equal(t, pck1, <-stream2.Receive()) + assert.Equal(t, pck2, <-stream1.Receive()) +} + +func TestPort_UnLink(t *testing.T) { + port1 := New() + defer port1.Close() + port2 := New() + defer port2.Close() + + port1.Link(port2) + port1.Unlink(port2) + + proc := process.New() + + stream1 := port1.Open(proc) + stream2 := port2.Open(proc) + + pck1 := packet.New(nil) + pck2 := packet.New(nil) + + stream1.Send(pck1) + stream2.Send(pck2) + + select { + case <-stream1.Receive(): + assert.Fail(t, "pipe should not receive and packet.") + default: + } + select { + case <-stream2.Receive(): + assert.Fail(t, "pipe should not receive and packet.") + default: + } +} + +func TestPortLinks(t *testing.T) { + port1 := New() + defer port1.Close() + port2 := New() + defer port2.Close() + + assert.Equal(t, port1.Links(), 0) + assert.Equal(t, port2.Links(), 0) + + port1.Link(port2) + + assert.Equal(t, port1.Links(), 1) + assert.Equal(t, port2.Links(), 1) +} + +func TestPort_Open(t *testing.T) { + port := New() + defer port.Close() + + t.Run("process not closed", func(t *testing.T) { + proc := process.New() + stream := port.Open(proc) + + proc.Close() + + select { + case <-stream.Done(): + case <-time.Tick(time.Second): + assert.Fail(t, "pipe.Done() is empty.") + } + }) + + t.Run("process closed", func(t *testing.T) { + proc := process.New() + proc.Close() + + stream := port.Open(proc) + + select { + case <-stream.Done(): + default: + assert.Fail(t, "stream.Done() is empty.") + } + }) +} + +func TestPort_Close(t *testing.T) { + port := New() + defer port.Close() + proc := process.New() + stream := port.Open(proc) + + port.Close() + + select { + case <-stream.Done(): + default: + assert.Fail(t, "stream.Done() is empty.") + } + + select { + case <-port.Done(): + default: + assert.Fail(t, "port.Done() is empty.") + } +} + +func BenchmarkPort_Open(b *testing.B) { + port := New() + defer port.Close() + + for i := 0; i < b.N; i++ { + proc := process.New() + _ = port.Open(proc) + } +} diff --git a/pkg/port/stream.go b/pkg/port/stream.go new file mode 100644 index 00000000..bcf9f83e --- /dev/null +++ b/pkg/port/stream.go @@ -0,0 +1,121 @@ +package port + +import ( + "sync" + + "github.com/oklog/ulid/v2" + "github.com/siyul-park/uniflow/pkg/packet" +) + +type ( + // Stream is a channel where you can exchange *packet.Packet. + Stream struct { + id ulid.ULID + read *ReadPipe + write *WritePipe + links []*Stream + done chan struct{} + mu sync.RWMutex + } +) + +// NewStream returns a new Stream. +func NewStream() *Stream { + return &Stream{ + id: ulid.Make(), + read: NewReadPipe(), + write: NewWritePipe(), + done: make(chan struct{}), + } +} + +// ID returns the ID. +func (s *Stream) ID() ulid.ULID { + s.mu.RLock() + defer s.mu.RUnlock() + + return s.id +} + +// Send sends a Packet to linked Stream. +func (s *Stream) Send(pck *packet.Packet) { + s.write.Send(pck) +} + +// Receive receives a Packet from linked Stream. +func (s *Stream) Receive() <-chan *packet.Packet { + return s.read.Receive() +} + +// Link connects two Stream to enable communication with each other. +func (s *Stream) Link(stream *Stream) { + s.link(stream) + stream.link(s) +} + +// Unlink removes the linked Stream from being able to communicate further. +func (s *Stream) Unlink(stream *Stream) { + s.unlink(stream) + stream.unlink(s) +} + +// Links returns length of linked. +func (s *Stream) Links() int { + s.mu.RLock() + defer s.mu.RUnlock() + + return len(s.links) +} + +// Done returns a channel which is closed when the Stream is closed. +func (s *Stream) Done() <-chan struct{} { + return s.done +} + +// Close closes the Stream. +// Shut down and any Packet that are not processed will be discard. +func (s *Stream) Close() { + s.mu.Lock() + defer s.mu.Unlock() + + select { + case <-s.done: + return + default: + } + close(s.done) + + s.read.Close() + s.write.Close() +} + +func (s *Stream) link(stream *Stream) { + if stream == s { + return + } + + s.mu.Lock() + defer s.mu.Unlock() + + for _, link := range s.links { + if stream == link { + return + } + } + + s.links = append(s.links, stream) + s.write.Link(stream.read) +} + +func (s *Stream) unlink(stream *Stream) { + s.mu.Lock() + defer s.mu.Unlock() + + for i, link := range s.links { + if stream == link { + s.links = append(s.links[:i], s.links[i+1:]...) + s.write.Unlink(stream.read) + break + } + } +} diff --git a/pkg/port/stream_test.go b/pkg/port/stream_test.go new file mode 100644 index 00000000..2c12eb65 --- /dev/null +++ b/pkg/port/stream_test.go @@ -0,0 +1,49 @@ +package port + +import ( + "testing" + + "github.com/siyul-park/uniflow/pkg/packet" + "github.com/stretchr/testify/assert" +) + +func TestStream_New(t *testing.T) { + stream := NewStream() + + select { + case <-stream.Done(): + assert.Fail(t, "stream.Done() is empty.") + default: + } +} + +func TestStream_Link(t *testing.T) { + stream1 := NewStream() + stream2 := NewStream() + + stream1.Link(stream2) + + pck := packet.New(nil) + + stream1.Send(pck) + + assert.Equal(t, pck, <-stream2.Receive()) +} + +func TestStream_Unlink(t *testing.T) { + stream1 := NewStream() + stream2 := NewStream() + + stream1.Link(stream2) + stream1.Unlink(stream2) + + pck := packet.New(nil) + + stream1.Send(pck) + + select { + case <-stream2.Receive(): + assert.Fail(t, "stream should not receive and packet.") + default: + } +} diff --git a/pkg/primitive/binary.go b/pkg/primitive/binary.go new file mode 100644 index 00000000..b7a8aa0d --- /dev/null +++ b/pkg/primitive/binary.go @@ -0,0 +1,101 @@ +package primitive + +import ( + "encoding" + "fmt" + "hash/fnv" + "reflect" + + "github.com/pkg/errors" + encoding2 "github.com/siyul-park/uniflow/internal/encoding" +) + +type ( + // Binary is a representation of a []byte. + Binary []byte +) + +var _ Object = (Binary)(nil) + +// NewBinary returns a new Binary. +func NewBinary(value []byte) Binary { + return Binary(value) +} + +func (o Binary) Len() int { + return len([]byte(o)) +} + +func (o Binary) Get(index int) byte { + if index >= len([]byte(o)) { + return 0 + } + return o[index] +} + +// Bytes returns a raw representation. +func (o Binary) Bytes() []byte { + return []byte(o) +} + +func (o Binary) Kind() Kind { + return KindBinary +} + +func (o Binary) Hash() uint32 { + h := fnv.New32() + h.Write([]byte{byte(KindBinary), 0}) + h.Write([]byte(o)) + + return h.Sum32() +} + +func (o Binary) Interface() any { + return []byte(o) +} + +// NewBinaryEncoder is encode byte like to Binary. +func NewBinaryEncoder() encoding2.Encoder[any, Object] { + return encoding2.EncoderFunc[any, Object](func(source any) (Object, error) { + if s, ok := source.(encoding.BinaryMarshaler); ok { + if data, err := s.MarshalBinary(); err != nil { + return nil, err + } else { + return NewBinary(data), nil + } + } else if s := reflect.ValueOf(source); (s.Kind() == reflect.Slice || s.Kind() == reflect.Array) && s.Type().Elem().Kind() == reflect.Uint8 { + return NewBinary(s.Bytes()), nil + } + return nil, errors.WithStack(encoding2.ErrUnsupportedValue) + }) +} + +// NewBinaryDecoder is decode Binary to byte like. +func NewBinaryDecoder() encoding2.Decoder[Object, any] { + return encoding2.DecoderFunc[Object, any](func(source Object, target any) error { + if s, ok := source.(Binary); ok { + if t, ok := target.(encoding.BinaryUnmarshaler); ok { + return t.UnmarshalBinary(s.Bytes()) + } else if t := reflect.ValueOf(target); t.Kind() == reflect.Pointer { + if (t.Elem().Kind() == reflect.Slice || t.Elem().Kind() == reflect.Array) && t.Elem().Type().Elem().Kind() == reflect.Uint8 { + for i := 0; i < s.Len(); i++ { + if t.Elem().Len() < i+1 { + if t.Elem().Kind() == reflect.Slice { + t.Elem().Set(reflect.Append(t.Elem(), reflect.ValueOf(s.Get(i)))) + } else { + return errors.WithMessage(encoding2.ErrUnsupportedValue, fmt.Sprintf("index(%d) is exceeded len(%d)", i, t.Elem().Len())) + } + } else { + t.Elem().Index(i).Set(reflect.ValueOf(s.Get(i))) + } + } + return nil + } else if t.Elem().Type() == typeAny { + t.Elem().Set(reflect.ValueOf(s.Interface())) + return nil + } + } + } + return errors.WithStack(encoding2.ErrUnsupportedValue) + }) +} diff --git a/pkg/primitive/binary_test.go b/pkg/primitive/binary_test.go new file mode 100644 index 00000000..3f48fd17 --- /dev/null +++ b/pkg/primitive/binary_test.go @@ -0,0 +1,44 @@ +package primitive + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestNewBinary(t *testing.T) { + v := NewBinary([]byte{0}) + + assert.Equal(t, KindBinary, v.Kind()) + assert.Equal(t, []byte{0}, v.Interface()) +} + +func TestBinary_Hash(t *testing.T) { + assert.NotEqual(t, NewBinary([]byte{0}).Hash(), NewBinary([]byte{1}).Hash()) + assert.Equal(t, NewBinary(nil).Hash(), NewBinary(nil).Hash()) + assert.Equal(t, NewBinary([]byte{0}).Hash(), NewBinary([]byte{0}).Hash()) +} + +func TestBinary_Get(t *testing.T) { + v := NewBinary([]byte{0}) + + assert.Equal(t, 1, v.Len()) + assert.Equal(t, byte(0), v.Get(0)) +} + +func TestBinary_Encode(t *testing.T) { + e := NewBinaryEncoder() + + v, err := e.Encode([]byte{0}) + assert.NoError(t, err) + assert.Equal(t, NewBinary([]byte{0}), v) +} + +func TestBinary_Decode(t *testing.T) { + d := NewBinaryDecoder() + + var v []byte + err := d.Decode(NewBinary([]byte{0}), &v) + assert.NoError(t, err) + assert.Equal(t, []byte{0}, v) +} diff --git a/pkg/primitive/bool.go b/pkg/primitive/bool.go new file mode 100644 index 00000000..a098dad6 --- /dev/null +++ b/pkg/primitive/bool.go @@ -0,0 +1,80 @@ +package primitive + +import ( + "hash/fnv" + "reflect" + + "github.com/pkg/errors" + "github.com/siyul-park/uniflow/internal/encoding" +) + +type ( + // Bool is a representation of a bool + Bool bool +) + +var _ Object = (Bool)(false) + +var ( + TRUE = NewBool(true) + FALSE = NewBool(false) +) + +// NewBool returns a new Bool. +func NewBool(value bool) Bool { + return Bool(value) +} + +// Bool returns a raw representation. +func (o Bool) Bool() bool { + return bool(o) +} + +func (o Bool) Kind() Kind { + return KindBool +} + +func (o Bool) Hash() uint32 { + var v byte + if o { + v |= 1 + } + + h := fnv.New32() + h.Write([]byte{byte(KindBool), 0}) + h.Write([]byte{v}) + + return h.Sum32() +} + +func (o Bool) Interface() any { + return bool(o) +} + +// NewBoolEncoder is encode bool to Bool. +func NewBoolEncoder() encoding.Encoder[any, Object] { + return encoding.EncoderFunc[any, Object](func(source any) (Object, error) { + if s := reflect.ValueOf(source); s.Kind() == reflect.Bool { + return NewBool(s.Bool()), nil + } + return nil, errors.WithStack(encoding.ErrUnsupportedValue) + }) +} + +// NewBoolDecoder is decode Bool to bool. +func NewBoolDecoder() encoding.Decoder[Object, any] { + return encoding.DecoderFunc[Object, any](func(source Object, target any) error { + if s, ok := source.(Bool); ok { + if t := reflect.ValueOf(target); t.Kind() == reflect.Pointer { + if t.Elem().Kind() == reflect.Bool { + t.Elem().Set(reflect.ValueOf(s.Bool())) + return nil + } else if t.Elem().Type() == typeAny { + t.Elem().Set(reflect.ValueOf(s.Interface())) + return nil + } + } + } + return errors.WithStack(encoding.ErrUnsupportedValue) + }) +} diff --git a/pkg/primitive/bool_test.go b/pkg/primitive/bool_test.go new file mode 100644 index 00000000..643b4161 --- /dev/null +++ b/pkg/primitive/bool_test.go @@ -0,0 +1,37 @@ +package primitive + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestNewBool(t *testing.T) { + v := NewBool(true) + + assert.Equal(t, KindBool, v.Kind()) + assert.Equal(t, true, v.Interface()) +} + +func TestBool_Hash(t *testing.T) { + assert.NotEqual(t, TRUE.Hash(), FALSE.Hash()) + assert.Equal(t, TRUE.Hash(), TRUE.Hash()) + assert.Equal(t, FALSE.Hash(), FALSE.Hash()) +} + +func TestBool_Encode(t *testing.T) { + e := NewBoolEncoder() + + v, err := e.Encode(true) + assert.NoError(t, err) + assert.Equal(t, TRUE, v) +} + +func TestBool_Decode(t *testing.T) { + d := NewBoolDecoder() + + var v bool + err := d.Decode(TRUE, &v) + assert.NoError(t, err) + assert.Equal(t, true, v) +} diff --git a/pkg/primitive/encoding.go b/pkg/primitive/encoding.go new file mode 100644 index 00000000..d612007a --- /dev/null +++ b/pkg/primitive/encoding.go @@ -0,0 +1,67 @@ +package primitive + +import ( + "reflect" + + "github.com/siyul-park/uniflow/internal/encoding" +) + +var ( + textEncoder = encoding.NewEncoderGroup[any, Object]() + binaryEncoder = encoding.NewEncoderGroup[any, Object]() + decoder = encoding.NewDecoderGroup[Object, any]() +) + +var ( + typeAny = reflect.TypeOf((*any)(nil)).Elem() +) + +func init() { + textEncoder.Add(NewShortcutEncoder()) + textEncoder.Add(NewBoolEncoder()) + textEncoder.Add(NewFloatEncoder()) + textEncoder.Add(NewIntEncoder()) + textEncoder.Add(NewUintEncoder()) + textEncoder.Add(NewStringEncoder()) + textEncoder.Add(NewBinaryEncoder()) + textEncoder.Add(NewSliceEncoder(textEncoder)) + textEncoder.Add(NewMapEncoder(textEncoder)) + textEncoder.Add(NewPointerEncoder(textEncoder)) + + binaryEncoder.Add(NewShortcutEncoder()) + binaryEncoder.Add(NewBoolEncoder()) + binaryEncoder.Add(NewFloatEncoder()) + binaryEncoder.Add(NewIntEncoder()) + binaryEncoder.Add(NewUintEncoder()) + binaryEncoder.Add(NewBinaryEncoder()) + binaryEncoder.Add(NewStringEncoder()) + binaryEncoder.Add(NewSliceEncoder(binaryEncoder)) + binaryEncoder.Add(NewMapEncoder(binaryEncoder)) + binaryEncoder.Add(NewPointerEncoder(binaryEncoder)) + + decoder.Add(NewShortcutDecoder()) + decoder.Add(NewBoolDecoder()) + decoder.Add(NewFloatDecoder()) + decoder.Add(NewIntDecoder()) + decoder.Add(NewUintDecoder()) + decoder.Add(NewStringDecoder()) + decoder.Add(NewBinaryDecoder()) + decoder.Add(NewSliceDecoder(decoder)) + decoder.Add(NewMapDecoder(decoder)) + decoder.Add(NewPointerDecoder(decoder)) +} + +// MarshalText returns the Object of v. +func MarshalText(v any) (Object, error) { + return textEncoder.Encode(v) +} + +// MarshalBinary returns the Object of v. +func MarshalBinary(v any) (Object, error) { + return binaryEncoder.Encode(v) +} + +// Unmarshal parses the Object and stores the result. +func Unmarshal(data Object, v any) error { + return decoder.Decode(data, v) +} diff --git a/pkg/primitive/encoding_test.go b/pkg/primitive/encoding_test.go new file mode 100644 index 00000000..4b5ab241 --- /dev/null +++ b/pkg/primitive/encoding_test.go @@ -0,0 +1,259 @@ +package primitive + +import ( + "fmt" + "reflect" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestMarshalText(t *testing.T) { + var testCase = []struct { + when any + expect Object + }{ + { + when: nil, + expect: nil, + }, + { + when: []byte{0}, + expect: NewBinary([]byte{0}), + }, + { + when: true, + expect: TRUE, + }, + { + when: 0, + expect: NewInt(0), + }, + { + when: int8(0), + expect: NewInt8(0), + }, + { + when: int16(0), + expect: NewInt16(0), + }, + { + when: int32(0), + expect: NewInt32(0), + }, + { + when: int64(0), + expect: NewInt64(0), + }, + { + when: uint8(0), + expect: NewUint8(0), + }, + { + when: uint16(0), + expect: NewUint16(0), + }, + { + when: uint32(0), + expect: NewUint32(0), + }, + { + when: uint64(0), + expect: NewUint64(0), + }, + { + when: float32(0), + expect: NewFloat32(0), + }, + { + when: float64(0), + expect: NewFloat64(0), + }, + { + when: "a", + expect: NewString("a"), + }, + { + when: []string{"a", "b", "c"}, + expect: NewSlice(NewString("a"), NewString("b"), NewString("c")), + }, + { + when: map[string]string{"a": "a", "b": "b", "c": "c"}, + expect: NewMap(NewString("a"), NewString("a"), NewString("b"), NewString("b"), NewString("c"), NewString("c")), + }, + } + + for _, tc := range testCase { + t.Run(fmt.Sprintf("%v", tc.when), func(t *testing.T) { + res, err := MarshalText(tc.when) + assert.NoError(t, err) + assert.Equal(t, tc.expect, res) + }) + } +} + +func TestMarshalBinary(t *testing.T) { + var testCase = []struct { + when any + expect Object + }{ + { + when: nil, + expect: nil, + }, + { + when: []byte{0}, + expect: NewBinary([]byte{0}), + }, + { + when: true, + expect: TRUE, + }, + { + when: 0, + expect: NewInt(0), + }, + { + when: int8(0), + expect: NewInt8(0), + }, + { + when: int16(0), + expect: NewInt16(0), + }, + { + when: int32(0), + expect: NewInt32(0), + }, + { + when: int64(0), + expect: NewInt64(0), + }, + { + when: uint8(0), + expect: NewUint8(0), + }, + { + when: uint16(0), + expect: NewUint16(0), + }, + { + when: uint32(0), + expect: NewUint32(0), + }, + { + when: uint64(0), + expect: NewUint64(0), + }, + { + when: float32(0), + expect: NewFloat32(0), + }, + { + when: float64(0), + expect: NewFloat64(0), + }, + { + when: "a", + expect: NewString("a"), + }, + { + when: []string{"a", "b", "c"}, + expect: NewSlice(NewString("a"), NewString("b"), NewString("c")), + }, + { + when: map[string]string{"a": "a", "b": "b", "c": "c"}, + expect: NewMap(NewString("a"), NewString("a"), NewString("b"), NewString("b"), NewString("c"), NewString("c")), + }, + } + + for _, tc := range testCase { + t.Run(fmt.Sprintf("%v", tc.when), func(t *testing.T) { + res, err := MarshalBinary(tc.when) + assert.NoError(t, err) + assert.Equal(t, tc.expect, res) + }) + } +} + +func TestUnmarshal(t *testing.T) { + var testCase = []struct { + when Object + expect any + }{ + { + expect: []byte{0}, + when: NewBinary([]byte{0}), + }, + { + when: TRUE, + expect: true, + }, + { + when: NewInt(0), + expect: 0, + }, + { + when: NewInt8(0), + expect: int8(0), + }, + { + when: NewInt16(0), + expect: int16(0), + }, + { + when: NewInt32(0), + expect: int32(0), + }, + { + when: NewInt64(0), + expect: int64(0), + }, + { + when: NewUint8(0), + expect: uint8(0), + }, + { + when: NewUint16(0), + expect: uint16(0), + }, + { + when: NewUint32(0), + expect: uint32(0), + }, + { + when: NewUint64(0), + expect: uint64(0), + }, + { + when: NewFloat32(0), + expect: float32(0), + }, + { + when: NewFloat64(0), + expect: float64(0), + }, + { + when: NewString("a"), + expect: "a", + }, + { + when: NewSlice(NewString("a"), NewString("b"), NewString("c")), + expect: []string{"a", "b", "c"}, + }, + { + when: NewMap(NewString("a"), NewString("a"), NewString("b"), NewString("b"), NewString("c"), NewString("c")), + expect: map[string]string{"a": "a", "b": "b", "c": "c"}, + }, + } + + for _, tc := range testCase { + t.Run(fmt.Sprintf("%v", tc.when), func(t *testing.T) { + zero := reflect.New(reflect.ValueOf(tc.expect).Type()) + + err := Unmarshal(tc.when, zero.Interface()) + assert.NoError(t, err) + assert.Equal(t, tc.expect, zero.Elem().Interface()) + }) + } +} diff --git a/pkg/primitive/float.go b/pkg/primitive/float.go new file mode 100644 index 00000000..93f7585a --- /dev/null +++ b/pkg/primitive/float.go @@ -0,0 +1,131 @@ +package primitive + +import ( + "encoding/binary" + "hash/fnv" + "math" + "reflect" + + "github.com/pkg/errors" + "github.com/siyul-park/uniflow/internal/encoding" +) + +type ( + Float interface { + Object + Float() float64 + } + // Float32 is a representation of a float64. + Float32 float32 + // Float64 is a representation of a float64. + Float64 float64 +) + +var _ Float = (Float32)(0) +var _ Float = (Float64)(0) + +// NewFloat64 returns a new Float64. +func NewFloat32(value float32) Float32 { + return Float32(value) +} + +// Float returns a raw representation. +func (o Float32) Float() float64 { + return float64(o) +} + +func (o Float32) Kind() Kind { + return KindFloat32 +} + +func (o Float32) Hash() uint32 { + var buf [4]byte + binary.BigEndian.PutUint32(buf[:], math.Float32bits(float32(o))) + + h := fnv.New32() + h.Write([]byte{byte(KindFloat32), 0}) + h.Write(buf[:]) + + return h.Sum32() +} + +func (o Float32) Interface() any { + return float32(o) +} + +// NewFloat64 returns a new Float64. +func NewFloat64(value float64) Float64 { + return Float64(value) +} + +// Float returns a raw representation. +func (o Float64) Float() float64 { + return float64(o) +} + +func (o Float64) Kind() Kind { + return KindFloat64 +} + +func (o Float64) Hash() uint32 { + var buf [8]byte + binary.BigEndian.PutUint64(buf[:], math.Float64bits(float64(o))) + + h := fnv.New32() + h.Write([]byte{byte(KindFloat64), 0}) + h.Write(buf[:]) + + return h.Sum32() +} + +func (o Float64) Interface() any { + return float64(o) +} + +// NewFloatEncoder is encode float to Float. +func NewFloatEncoder() encoding.Encoder[any, Object] { + return encoding.EncoderFunc[any, Object](func(source any) (Object, error) { + if s := reflect.ValueOf(source); s.Kind() == reflect.Float32 { + return NewFloat32(float32(s.Float())), nil + } else if s := reflect.ValueOf(source); s.Kind() == reflect.Float64 { + return NewFloat64(float64(s.Float())), nil + } + return nil, errors.WithStack(encoding.ErrUnsupportedValue) + }) +} + +// NewFloatDecoder is decode Float to float. +func NewFloatDecoder() encoding.Decoder[Object, any] { + return encoding.DecoderFunc[Object, any](func(source Object, target any) error { + if s, ok := source.(Float); ok { + if t := reflect.ValueOf(target); t.Kind() == reflect.Pointer { + if t.Elem().Kind() == reflect.Float32 { + t.Elem().Set(reflect.ValueOf(float32(s.Float()))) + return nil + } else if t.Elem().Kind() == reflect.Float64 { + t.Elem().Set(reflect.ValueOf(float64(s.Float()))) + return nil + } else if t.Elem().Kind() == reflect.Int { + t.Elem().Set(reflect.ValueOf(int(s.Float()))) + return nil + } else if t.Elem().Kind() == reflect.Int8 { + t.Elem().Set(reflect.ValueOf(int8(s.Float()))) + return nil + } else if t.Elem().Kind() == reflect.Int16 { + t.Elem().Set(reflect.ValueOf(int16(s.Float()))) + return nil + } else if t.Elem().Kind() == reflect.Int32 { + t.Elem().Set(reflect.ValueOf(int32(s.Float()))) + return nil + } else if t.Elem().Kind() == reflect.Int64 { + t.Elem().Set(reflect.ValueOf(int64(s.Float()))) + return nil + } else if t.Elem().Type() == typeAny { + t.Elem().Set(reflect.ValueOf(s.Interface())) + return nil + } + } + } + return errors.WithStack(encoding.ErrUnsupportedValue) + }) +} diff --git a/pkg/primitive/float_test.go b/pkg/primitive/float_test.go new file mode 100644 index 00000000..6de5373d --- /dev/null +++ b/pkg/primitive/float_test.go @@ -0,0 +1,68 @@ +package primitive + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestNewFloat(t *testing.T) { + t.Run("32", func(t *testing.T) { + v := NewFloat32(0) + + assert.Equal(t, KindFloat32, v.Kind()) + assert.Equal(t, float32(0), v.Interface()) + }) + t.Run("64", func(t *testing.T) { + v := NewFloat64(0) + + assert.Equal(t, KindFloat64, v.Kind()) + assert.Equal(t, float64(0), v.Interface()) + }) +} + +func TestFloat_Hash(t *testing.T) { + t.Run("32", func(t *testing.T) { + assert.NotEqual(t, NewFloat32(0).Hash(), NewFloat32(1).Hash()) + assert.Equal(t, NewFloat32(0).Hash(), NewFloat32(0).Hash()) + assert.Equal(t, NewFloat32(1).Hash(), NewFloat32(1).Hash()) + }) + + t.Run("64", func(t *testing.T) { + assert.NotEqual(t, NewFloat64(0).Hash(), NewFloat64(1).Hash()) + assert.Equal(t, NewFloat64(0).Hash(), NewFloat64(0).Hash()) + assert.Equal(t, NewFloat64(1).Hash(), NewFloat64(1).Hash()) + }) +} + +func TestFloat_Encode(t *testing.T) { + e := NewFloatEncoder() + + t.Run("32", func(t *testing.T) { + v, err := e.Encode(float32(1)) + assert.NoError(t, err) + assert.Equal(t, NewFloat32(1), v) + }) + t.Run("64", func(t *testing.T) { + v, err := e.Encode(float64(1)) + assert.NoError(t, err) + assert.Equal(t, NewFloat64(1), v) + }) +} + +func TestFloat_Decode(t *testing.T) { + d := NewFloatDecoder() + + t.Run("32", func(t *testing.T) { + var v float32 + err := d.Decode(NewFloat32(1), &v) + assert.NoError(t, err) + assert.Equal(t, float32(1), v) + }) + t.Run("64", func(t *testing.T) { + var v float64 + err := d.Decode(NewFloat64(1), &v) + assert.NoError(t, err) + assert.Equal(t, float64(1), v) + }) +} diff --git a/pkg/primitive/getter.go b/pkg/primitive/getter.go new file mode 100644 index 00000000..b0ed5ff7 --- /dev/null +++ b/pkg/primitive/getter.go @@ -0,0 +1,53 @@ +package primitive + +import ( + "regexp" + "strconv" + "strings" +) + +var ( + numberSubPath = regexp.MustCompile(`\[([0-9]+)\]`) +) + +func Get[T any](v Object, path string) (T, bool) { + paths := parsePath(path) + + var zero T + + cur := v + for _, path := range paths { + switch v := cur.(type) { + case *Map: + child, ok := v.Get(NewString(path)) + if !ok { + return zero, false + } + cur = child + + case *Slice: + index, err := strconv.Atoi(path) + if err != nil || index >= v.Len() { + return zero, false + } + cur = v.Get(index) + default: + return zero, false + } + } + + if v, ok := cur.(T); ok { + return v, true + } else if cur == nil { + return zero, false + } else if v, ok := cur.Interface().(T); ok { + return v, true + } else { + return zero, false + } +} + +func parsePath(key string) []string { + key = numberSubPath.ReplaceAllString(key, ".$1") + return strings.Split(key, ".") +} diff --git a/pkg/primitive/int.go b/pkg/primitive/int.go new file mode 100644 index 00000000..804271a9 --- /dev/null +++ b/pkg/primitive/int.go @@ -0,0 +1,227 @@ +package primitive + +import ( + "hash/fnv" + "reflect" + "unsafe" + + "github.com/pkg/errors" + "github.com/siyul-park/uniflow/internal/encoding" +) + +type ( + Integer interface { + Object + Int() int64 + } + // Int is a representation of a int. + Int int + // Int8 is a representation of a int8. + Int8 int8 + // Int16 is a representation of a int16. + Int16 int16 + // Int32 is a representation of a int32. + Int32 int32 + // Int64 is a representation of a int64. + Int64 int64 +) + +var _ Integer = (Int)(0) +var _ Integer = (Int8)(0) +var _ Integer = (Int16)(0) +var _ Integer = (Int32)(0) +var _ Integer = (Int64)(0) + +// NewInt returns a new Int. +func NewInt(value int) Int { + return Int(value) +} + +// Int returns a raw representation. +func (o Int) Int() int64 { + return int64(o) +} + +func (o Int) Kind() Kind { + return KindInt +} + +func (o Int) Hash() uint32 { + buf := *(*[unsafe.Sizeof(o)]byte)(unsafe.Pointer(&o)) + + h := fnv.New32() + h.Write([]byte{byte(KindInt), 0}) + h.Write(buf[:]) + + return h.Sum32() +} + +func (o Int) Interface() any { + return int(o) +} + +// NewInt8 returns a new Int8. +func NewInt8(value int8) Int8 { + return Int8(value) +} + +// Int returns a raw representation. +func (o Int8) Int() int64 { + return int64(o) +} + +func (o Int8) Kind() Kind { + return KindInt8 +} + +func (o Int8) Hash() uint32 { + buf := *(*[unsafe.Sizeof(o)]byte)(unsafe.Pointer(&o)) + + h := fnv.New32() + h.Write([]byte{byte(KindInt8), 0}) + h.Write(buf[:]) + + return h.Sum32() +} + +func (o Int8) Interface() any { + return int8(o) +} + +// NewInt16 returns a new Int16. +func NewInt16(value int16) Int16 { + return Int16(value) +} + +// Int returns a raw representation. +func (o Int16) Int() int64 { + return int64(o) +} + +func (o Int16) Kind() Kind { + return KindInt16 +} + +func (o Int16) Hash() uint32 { + buf := *(*[unsafe.Sizeof(o)]byte)(unsafe.Pointer(&o)) + + h := fnv.New32() + h.Write([]byte{byte(KindInt16), 0}) + h.Write(buf[:]) + + return h.Sum32() +} + +func (o Int16) Interface() any { + return int16(o) +} + +// NewInt32 returns a new Int32. +func NewInt32(value int32) Int32 { + return Int32(value) +} + +// Int returns a raw representation. +func (o Int32) Int() int64 { + return int64(o) +} + +func (o Int32) Kind() Kind { + return KindInt32 +} + +func (o Int32) Hash() uint32 { + buf := *(*[unsafe.Sizeof(o)]byte)(unsafe.Pointer(&o)) + + h := fnv.New32() + h.Write([]byte{byte(KindInt32), 0}) + h.Write(buf[:]) + + return h.Sum32() +} + +func (o Int32) Interface() any { + return int32(o) +} + +// NewInt64 returns a new Int64. +func NewInt64(value int64) Int64 { + return Int64(value) +} + +// Int returns a raw representation. +func (o Int64) Int() int64 { + return int64(o) +} + +func (o Int64) Kind() Kind { + return KindInt64 +} + +func (o Int64) Hash() uint32 { + buf := *(*[unsafe.Sizeof(o)]byte)(unsafe.Pointer(&o)) + + h := fnv.New32() + h.Write([]byte{byte(KindInt64), 0}) + h.Write(buf[:]) + + return h.Sum32() +} + +func (o Int64) Interface() any { + return int64(o) +} + +// NewIntEncoder is encode int to Int. +func NewIntEncoder() encoding.Encoder[any, Object] { + return encoding.EncoderFunc[any, Object](func(source any) (Object, error) { + if s := reflect.ValueOf(source); s.Kind() == reflect.Int { + return NewInt(int(s.Int())), nil + } else if s := reflect.ValueOf(source); s.Kind() == reflect.Int8 { + return NewInt8(int8(s.Int())), nil + } else if s := reflect.ValueOf(source); s.Kind() == reflect.Int16 { + return NewInt16(int16(s.Int())), nil + } else if s := reflect.ValueOf(source); s.Kind() == reflect.Int32 { + return NewInt32(int32(s.Int())), nil + } else if s := reflect.ValueOf(source); s.Kind() == reflect.Int64 { + return NewInt64(int64(s.Int())), nil + } + return nil, errors.WithStack(encoding.ErrUnsupportedValue) + }) +} + +// NewIntDecoder is decode Int to int. +func NewIntDecoder() encoding.Decoder[Object, any] { + return encoding.DecoderFunc[Object, any](func(source Object, target any) error { + if s, ok := source.(Integer); ok { + if t := reflect.ValueOf(target); t.Kind() == reflect.Pointer { + if t.Elem().Kind() == reflect.Int { + t.Elem().Set(reflect.ValueOf(int(s.Int()))) + return nil + } else if t.Elem().Kind() == reflect.Int8 { + t.Elem().Set(reflect.ValueOf(int8(s.Int()))) + return nil + } else if t.Elem().Kind() == reflect.Int16 { + t.Elem().Set(reflect.ValueOf(int16(s.Int()))) + return nil + } else if t.Elem().Kind() == reflect.Int32 { + t.Elem().Set(reflect.ValueOf(int32(s.Int()))) + return nil + } else if t.Elem().Kind() == reflect.Int64 { + t.Elem().Set(reflect.ValueOf(int64(s.Int()))) + return nil + } else if t.Elem().Kind() == reflect.Float32 { + t.Elem().Set(reflect.ValueOf(float32(s.Int()))) + return nil + } else if t.Elem().Kind() == reflect.Float64 { + t.Elem().Set(reflect.ValueOf(float64(s.Int()))) + return nil + } else if t.Elem().Type() == typeAny { + t.Elem().Set(reflect.ValueOf(s.Interface())) + return nil + } + } + } + return errors.WithStack(encoding.ErrUnsupportedValue) + }) +} diff --git a/pkg/primitive/int_test.go b/pkg/primitive/int_test.go new file mode 100644 index 00000000..7bfc7b87 --- /dev/null +++ b/pkg/primitive/int_test.go @@ -0,0 +1,133 @@ +package primitive + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestNewInt(t *testing.T) { + t.Run("", func(t *testing.T) { + v := NewInt(0) + + assert.Equal(t, KindInt, v.Kind()) + assert.Equal(t, int(0), v.Interface()) + }) + t.Run("8", func(t *testing.T) { + v := NewInt8(0) + + assert.Equal(t, KindInt8, v.Kind()) + assert.Equal(t, int8(0), v.Interface()) + }) + t.Run("16", func(t *testing.T) { + v := NewInt16(0) + + assert.Equal(t, KindInt16, v.Kind()) + assert.Equal(t, int16(0), v.Interface()) + }) + t.Run("32", func(t *testing.T) { + v := NewInt32(0) + + assert.Equal(t, KindInt32, v.Kind()) + assert.Equal(t, int32(0), v.Interface()) + }) + t.Run("64", func(t *testing.T) { + v := NewInt64(0) + + assert.Equal(t, KindInt64, v.Kind()) + assert.Equal(t, int64(0), v.Interface()) + }) +} + +func TestInt_Hash(t *testing.T) { + t.Run("", func(t *testing.T) { + assert.NotEqual(t, NewInt(0).Hash(), NewInt(1).Hash()) + assert.Equal(t, NewInt(0).Hash(), NewInt(0).Hash()) + assert.Equal(t, NewInt(1).Hash(), NewInt(1).Hash()) + }) + t.Run("8", func(t *testing.T) { + assert.NotEqual(t, NewInt8(0).Hash(), NewInt8(1).Hash()) + assert.Equal(t, NewInt8(0).Hash(), NewInt8(0).Hash()) + assert.Equal(t, NewInt8(1).Hash(), NewInt8(1).Hash()) + }) + t.Run("16", func(t *testing.T) { + assert.NotEqual(t, NewInt16(0).Hash(), NewInt16(1).Hash()) + assert.Equal(t, NewInt16(0).Hash(), NewInt16(0).Hash()) + assert.Equal(t, NewInt16(1).Hash(), NewInt16(1).Hash()) + }) + t.Run("32", func(t *testing.T) { + assert.NotEqual(t, NewInt32(0).Hash(), NewInt32(1).Hash()) + assert.Equal(t, NewInt32(0).Hash(), NewInt32(0).Hash()) + assert.Equal(t, NewInt32(1).Hash(), NewInt32(1).Hash()) + }) + t.Run("64", func(t *testing.T) { + assert.NotEqual(t, NewInt64(0).Hash(), NewInt64(1).Hash()) + assert.Equal(t, NewInt64(0).Hash(), NewInt64(0).Hash()) + assert.Equal(t, NewInt64(1).Hash(), NewInt64(1).Hash()) + }) +} + +func TestInt_Encode(t *testing.T) { + e := NewIntEncoder() + + t.Run("", func(t *testing.T) { + v, err := e.Encode(int(1)) + assert.NoError(t, err) + assert.Equal(t, NewInt(1), v) + }) + t.Run("8", func(t *testing.T) { + v, err := e.Encode(int8(1)) + assert.NoError(t, err) + assert.Equal(t, NewInt8(1), v) + }) + t.Run("16", func(t *testing.T) { + v, err := e.Encode(int16(1)) + assert.NoError(t, err) + assert.Equal(t, NewInt16(1), v) + }) + t.Run("32", func(t *testing.T) { + v, err := e.Encode(int32(1)) + assert.NoError(t, err) + assert.Equal(t, NewInt32(1), v) + }) + t.Run("64", func(t *testing.T) { + v, err := e.Encode(int64(1)) + assert.NoError(t, err) + assert.Equal(t, NewInt64(1), v) + }) +} + +func TestInt_Decode(t *testing.T) { + d := NewIntDecoder() + + t.Run("", func(t *testing.T) { + var v int + err := d.Decode(NewInt(1), &v) + assert.NoError(t, err) + assert.Equal(t, int(1), v) + }) + t.Run("8", func(t *testing.T) { + var v int8 + err := d.Decode(NewInt8(1), &v) + assert.NoError(t, err) + assert.Equal(t, int8(1), v) + }) + t.Run("16", func(t *testing.T) { + var v int16 + err := d.Decode(NewInt16(1), &v) + assert.NoError(t, err) + assert.Equal(t, int16(1), v) + }) + t.Run("32", func(t *testing.T) { + var v int32 + err := d.Decode(NewInt32(1), &v) + assert.NoError(t, err) + assert.Equal(t, int32(1), v) + }) + t.Run("64", func(t *testing.T) { + var v int64 + err := d.Decode(NewInt64(1), &v) + assert.NoError(t, err) + assert.Equal(t, int64(1), v) + }) +} diff --git a/pkg/primitive/map.go b/pkg/primitive/map.go new file mode 100644 index 00000000..8009f84a --- /dev/null +++ b/pkg/primitive/map.go @@ -0,0 +1,370 @@ +package primitive + +import ( + "fmt" + "hash/fnv" + "reflect" + "strings" + "unsafe" + + "github.com/benbjohnson/immutable" + "github.com/iancoleman/strcase" + "github.com/pkg/errors" + "github.com/siyul-park/uniflow/internal/encoding" +) + +type ( + // Map is a representation of a map. + Map struct { + value *immutable.SortedMap[Object, Object] + } + + mapTag struct { + alias string + ignore bool + omitempty bool + inline bool + } + + comparer struct{} +) + +const ( + tagMap = "map" +) + +var _ Object = (*Map)(nil) +var _ immutable.Comparer[Object] = (*comparer)(nil) + +// NewMap returns a new Map. +func NewMap(pairs ...Object) *Map { + b := immutable.NewSortedMapBuilder[Object, Object](&comparer{}) + for i := 0; i < len(pairs)/2; i++ { + k := pairs[i*2] + v := pairs[i*2+1] + + b.Set(k, v) + } + return &Map{value: b.Map()} +} + +func (o *Map) Get(key Object) (Object, bool) { + return o.value.Get(key) +} + +func (o *Map) GetOr(key, value Object) Object { + if v, ok := o.Get(key); ok { + return v + } + return value +} + +func (o *Map) Set(key, value Object) *Map { + return &Map{value: o.value.Set(key, value)} +} + +func (o *Map) Delete(key Object) *Map { + return &Map{value: o.value.Delete(key)} +} + +func (o *Map) Keys() []Object { + var keys []Object + + itr := o.value.Iterator() + for !itr.Done() { + k, _, _ := itr.Next() + keys = append(keys, k) + } + return keys +} + +func (o *Map) Len() int { + return o.value.Len() +} + +// Map returns a raw representation. +func (o *Map) Map() map[any]any { + m := make(map[any]any, o.value.Len()) + + itr := o.value.Iterator() + for !itr.Done() { + k, v, _ := itr.Next() + + // FIXME: check interface is can't be map key. + if k != nil { + if v != nil { + m[k.Interface()] = v.Interface() + } else { + m[k.Interface()] = nil + } + } + } + + return m +} + +func (o *Map) Kind() Kind { + return KindMap +} + +func (o *Map) Hash() uint32 { + h := fnv.New32() + h.Write([]byte{byte(KindMap), 0}) + + itr := o.value.Iterator() + for !itr.Done() { + k, v, _ := itr.Next() + + if k != nil { + hash := k.Hash() + buf := *(*[unsafe.Sizeof(hash)]byte)(unsafe.Pointer(&hash)) + h.Write(buf[:]) + } else { + h.Write([]byte{0}) + } + if v != nil { + hash := v.Hash() + buf := *(*[unsafe.Sizeof(hash)]byte)(unsafe.Pointer(&hash)) + h.Write(buf[:]) + } else { + h.Write([]byte{0}) + } + } + + return h.Sum32() +} + +func (o *Map) Interface() any { + var keys []any + var values []any + + itr := o.value.Iterator() + for !itr.Done() { + k, v, _ := itr.Next() + + // FIXME: check interface is can't be map key. + if k != nil { + keys = append(keys, k.Interface()) + if v != nil { + values = append(values, v.Interface()) + } else { + values = append(values, nil) + } + } + } + + keyType := typeAny + valueType := typeAny + + for i, key := range keys { + typ := reflect.TypeOf(key) + if i == 0 { + keyType = typ + } else if keyType != typ { + keyType = typeAny + } + } + for i, value := range values { + typ := reflect.TypeOf(value) + if i == 0 { + valueType = typ + } else if valueType != typ { + valueType = typeAny + } + } + + t := reflect.MakeMapWithSize(reflect.MapOf(keyType, valueType), len(keys)) + for i, key := range keys { + value := values[i] + t.SetMapIndex(reflect.ValueOf(key), reflect.ValueOf(value)) + } + return t.Interface() +} + +func (*comparer) Compare(a Object, b Object) int { + if a == nil { + return -1 + } else if b == nil { + return 1 + } else if a.Kind() > b.Kind() { + return 1 + } else if a.Kind() < b.Kind() { + return -1 + } + + hashA := a.Hash() + hashB := b.Hash() + + if hashA > hashB { + return 1 + } else if hashA < hashB { + return -1 + } + + // FIXME: hash conflict. + return 0 +} + +// NewMapEncoder is encode map or struct to Map. +func NewMapEncoder(encoder encoding.Encoder[any, Object]) encoding.Encoder[any, Object] { + return encoding.EncoderFunc[any, Object](func(source any) (Object, error) { + if s := reflect.ValueOf(source); s.Kind() == reflect.Map { + pairs := make([]Object, len(s.MapKeys())*2) + for i, k := range s.MapKeys() { + if k, err := encoder.Encode(k.Interface()); err != nil { + return nil, errors.WithMessage(err, fmt.Sprintf("key(%v) can't encode", k.Interface())) + } else { + pairs[i*2] = k + } + if v, err := encoder.Encode(s.MapIndex(k).Interface()); err != nil { + return nil, errors.WithMessage(err, fmt.Sprintf("value(%v) can't encode", s.MapIndex(k).Interface())) + } else { + pairs[i*2+1] = v + } + } + return NewMap(pairs...), nil + } else if s := reflect.ValueOf(source); s.Kind() == reflect.Struct { + pairs := make([]Object, 0, s.NumField()*2) + for i := 0; i < s.NumField(); i++ { + field := s.Type().Field(i) + if !field.IsExported() { + continue + } + + v := s.FieldByName(field.Name) + tag := getMapTag(s.Type(), field) + + if tag.ignore || (tag.omitempty && v.IsZero()) { + continue + } + + if v, err := encoder.Encode(v.Interface()); err != nil { + return nil, errors.WithMessage(err, fmt.Sprintf("field(%s) can't encode", field.Name)) + } else { + if tag.inline { + if v, ok := v.(*Map); ok { + for _, k := range v.Keys() { + pairs = append(pairs, k) + pairs = append(pairs, v.GetOr(k, nil)) + } + } else { + return nil, errors.WithStack(encoding.ErrUnsupportedValue) + } + } else { + pairs = append(pairs, NewString(tag.alias)) + pairs = append(pairs, v) + } + } + } + return NewMap(pairs...), nil + } + return nil, errors.WithStack(encoding.ErrUnsupportedValue) + }) +} + +// NewMapDecoder is decode Map to map or struct. +func NewMapDecoder(decoder encoding.Decoder[Object, any]) encoding.Decoder[Object, any] { + return encoding.DecoderFunc[Object, any](func(source Object, target any) error { + if s, ok := source.(*Map); ok { + if t := reflect.ValueOf(target); t.Kind() == reflect.Pointer { + if t.Elem().Kind() == reflect.Map { + if t.Elem().IsNil() { + t.Elem().Set(reflect.MakeMapWithSize(t.Type().Elem(), s.Len())) + } + + keyType := t.Elem().Type().Key() + valueType := t.Elem().Type().Elem() + + for _, key := range s.Keys() { + value, _ := s.Get(key) + + k := reflect.New(keyType) + v := reflect.New(valueType) + + if err := decoder.Decode(key, k.Interface()); err != nil { + return errors.WithMessage(err, fmt.Sprintf("key(%v) cannot be decoded", key.Interface())) + } else if err := decoder.Decode(value, v.Interface()); err != nil { + return errors.WithMessage(err, fmt.Sprintf("value(%v) corresponding to the key(%v) cannot be decoded", value.Interface(), key.Interface())) + } + + t.Elem().SetMapIndex(k.Elem(), v.Elem()) + } + return nil + } else if t.Elem().Kind() == reflect.Struct { + for i := 0; i < t.Elem().NumField(); i++ { + field := t.Elem().Type().Field(i) + if !field.IsExported() { + continue + } + + v := t.Elem().FieldByName(field.Name) + tag := getMapTag(t.Type().Elem(), field) + + if tag.ignore { + continue + } else if tag.inline { + if err := decoder.Decode(s, v.Addr().Interface()); err != nil { + return err + } else { + continue + } + } + + value, ok := s.Get(NewString(tag.alias)) + if !ok || reflect.ValueOf(value.Interface()).IsZero() { + if tag.omitempty { + continue + } else { + return errors.WithMessage(encoding.ErrUnsupportedValue, fmt.Sprintf("key(%v) is zero value", field.Name)) + } + } else if err := decoder.Decode(value, v.Addr().Interface()); err != nil { + return errors.WithMessage(err, fmt.Sprintf("value(%v) corresponding to the key(%v) cannot be decoded", value.Interface(), field.Name)) + } + } + return nil + } else if t.Elem().Type() == typeAny { + t.Elem().Set(reflect.ValueOf(s.Interface())) + return nil + } + } + } + return errors.WithStack(encoding.ErrUnsupportedValue) + }) +} + +func getMapTag(t reflect.Type, f reflect.StructField) mapTag { + k := strcase.ToSnake(f.Name) + tag := f.Tag.Get(tagMap) + + if tag != "" { + if tag == "-" { + return mapTag{ + ignore: true, + } + } + + if index := strings.Index(tag, ","); index != -1 { + mtag := mapTag{} + mtag.alias = k + if tag[:index] != "" { + mtag.alias = tag[:index] + } + + if tag[index+1:] == "omitempty" { + mtag.omitempty = true + } else if tag[index+1:] == "inline" { + mtag.alias = "" + mtag.inline = true + } + return mtag + } else { + return mapTag{ + alias: tag, + } + } + } + + return mapTag{ + alias: k, + } +} diff --git a/pkg/primitive/map_test.go b/pkg/primitive/map_test.go new file mode 100644 index 00000000..fd599552 --- /dev/null +++ b/pkg/primitive/map_test.go @@ -0,0 +1,116 @@ +package primitive + +import ( + "testing" + + "github.com/go-faker/faker/v4" + "github.com/stretchr/testify/assert" +) + +func TestNewMap(t *testing.T) { + k1 := NewString(faker.Word()) + v1 := NewString(faker.Word()) + + o := NewMap(k1, v1) + + assert.Equal(t, KindMap, o.Kind()) + assert.Equal(t, map[string]string{k1.String(): v1.String()}, o.Interface()) +} + +func TestMap_Hash(t *testing.T) { + k1 := NewString(faker.Word()) + k2 := NewString(faker.Word()) + v1 := NewString(faker.Word()) + v2 := NewString(faker.Word()) + + assert.NotEqual(t, NewMap(k1, v1).Hash(), NewMap(k2, v2).Hash()) + assert.Equal(t, NewMap().Hash(), NewMap().Hash()) + assert.Equal(t, NewMap(k1, v1).Hash(), NewMap(k1, v1).Hash()) + assert.Equal(t, NewMap(k1, v1, k2, v2).Hash(), NewMap(k2, v2, k1, v1).Hash()) +} + +func TestMap_GetAndSetAndDelete(t *testing.T) { + k1 := NewString(faker.Word()) + v1 := NewString(faker.Word()) + + o := NewMap() + o = o.Set(k1, v1) + + r1, ok := o.Get(k1) + assert.True(t, ok) + assert.Equal(t, v1, r1) + + o = o.Delete(k1) + + r2, ok := o.Get(k1) + assert.False(t, ok) + assert.Nil(t, r2) +} + +func TestMap_Keys(t *testing.T) { + k1 := NewString(faker.Word()) + v1 := NewString(faker.Word()) + + o := NewMap(k1, v1) + + assert.Len(t, o.Keys(), 1) + assert.Contains(t, o.Keys(), k1) +} + +func TestMap_Len(t *testing.T) { + k1 := NewString(faker.Word()) + v1 := NewString(faker.Word()) + + o1 := NewMap() + o2 := NewMap(k1, v1) + + assert.Equal(t, 0, o1.Len()) + assert.Equal(t, 1, o2.Len()) +} + +func TestMap_Encode(t *testing.T) { + e := NewMapEncoder(NewStringEncoder()) + + t.Run("map", func(t *testing.T) { + k1 := NewString(faker.Word()) + v1 := NewString(faker.Word()) + + v, err := e.Encode(map[string]string{k1.String(): v1.String()}) + assert.NoError(t, err) + assert.Equal(t, NewMap(k1, v1), v) + }) + + t.Run("struct", func(t *testing.T) { + v1 := NewString(faker.Word()) + + v, err := e.Encode(struct { + K1 string + }{ + K1: v1.String(), + }) + assert.NoError(t, err) + assert.Equal(t, NewMap(NewString("k_1"), v1).Hash(), v.Hash()) + }) +} + +func TestMap_Decode(t *testing.T) { + d := NewMapDecoder(NewStringDecoder()) + + t.Run("map", func(t *testing.T) { + k1 := NewString(faker.Word()) + v1 := NewString(faker.Word()) + + var v map[string]string + err := d.Decode(NewMap(k1, v1), &v) + assert.NoError(t, err) + assert.Equal(t, map[string]string{k1.String(): v1.String()}, v) + }) + t.Run("struct", func(t *testing.T) { + v1 := NewString(faker.Word()) + + var v struct{ K1 string } + err := d.Decode(NewMap(NewString("k_1"), v1), &v) + assert.NoError(t, err) + assert.Equal(t, v1.String(), v.K1) + }) +} diff --git a/pkg/primitive/object.go b/pkg/primitive/object.go new file mode 100644 index 00000000..3939177c --- /dev/null +++ b/pkg/primitive/object.go @@ -0,0 +1,45 @@ +package primitive + +import "github.com/siyul-park/uniflow/internal/util" + +type ( + // Object is an atomic type. + Object interface { + Kind() Kind + Hash() uint32 + Interface() any + } + + Kind uint +) + +const ( + KindInvalid Kind = iota + KindBinary + KindBool + KindInt + KindInt8 + KindInt16 + KindInt32 + KindInt64 + KindUint + KindUint8 + KindUint16 + KindUint32 + KindUint64 + KindFloat32 + KindFloat64 + KindMap + KindSlice + KindString +) + +func Interface(v any) any { + if util.IsNil(v) { + return nil + } else if v, ok := v.(Object); !ok { + return nil + } else { + return v.Interface() + } +} diff --git a/pkg/primitive/pointer.go b/pkg/primitive/pointer.go new file mode 100644 index 00000000..df4cfd34 --- /dev/null +++ b/pkg/primitive/pointer.go @@ -0,0 +1,39 @@ +package primitive + +import ( + "reflect" + + "github.com/pkg/errors" + "github.com/siyul-park/uniflow/internal/encoding" + "github.com/siyul-park/uniflow/internal/util" +) + +// NewPointerEncoder is encode *T to T. +func NewPointerEncoder(encoder encoding.Encoder[any, Object]) encoding.Encoder[any, Object] { + return encoding.EncoderFunc[any, Object](func(source any) (Object, error) { + if util.IsNil(source) { + return nil, nil + } + if s := reflect.ValueOf(source); s.Kind() == reflect.Pointer { + return encoder.Encode(s.Elem().Interface()) + } + return nil, errors.WithStack(encoding.ErrUnsupportedValue) + }) +} + +// NewPointerDecoder is decode T to *T. +func NewPointerDecoder(decoder encoding.Decoder[Object, any]) encoding.Decoder[Object, any] { + return encoding.DecoderFunc[Object, any](func(source Object, target any) error { + if util.IsNil(source) { + return nil + } + if t := reflect.ValueOf(target); t.Kind() == reflect.Pointer && t.Elem().Kind() == reflect.Pointer { + if t.Elem().IsNil() { + zero := reflect.New(t.Type().Elem().Elem()) + t.Elem().Set(zero) + } + return decoder.Decode(source, t.Elem().Interface()) + } + return errors.WithStack(encoding.ErrUnsupportedValue) + }) +} diff --git a/pkg/primitive/pointer_test.go b/pkg/primitive/pointer_test.go new file mode 100644 index 00000000..0ab8e31b --- /dev/null +++ b/pkg/primitive/pointer_test.go @@ -0,0 +1,30 @@ +package primitive + +import ( + "testing" + + "github.com/go-faker/faker/v4" + "github.com/stretchr/testify/assert" +) + +func TestPointer_Encode(t *testing.T) { + e := NewPointerEncoder(NewStringEncoder()) + + r1 := faker.Word() + v1 := NewString(r1) + + v, err := e.Encode(&r1) + assert.NoError(t, err) + assert.Equal(t, v1, v) +} + +func TestPointer_Decode(t *testing.T) { + d := NewPointerDecoder(NewStringDecoder()) + + v1 := NewString(faker.Word()) + + var v *string + err := d.Decode(v1, &v) + assert.NoError(t, err) + assert.Equal(t, v1.String(), *v) +} diff --git a/pkg/primitive/shortcut.go b/pkg/primitive/shortcut.go new file mode 100644 index 00000000..2af7e91b --- /dev/null +++ b/pkg/primitive/shortcut.go @@ -0,0 +1,27 @@ +package primitive + +import ( + "github.com/pkg/errors" + "github.com/siyul-park/uniflow/internal/encoding" +) + +// NewPointerEncoder is encode Object to Object. +func NewShortcutEncoder() encoding.Encoder[any, Object] { + return encoding.EncoderFunc[any, Object](func(source any) (Object, error) { + if s, ok := source.(Object); ok { + return s, nil + } + return nil, errors.WithStack(encoding.ErrUnsupportedValue) + }) +} + +// NewShortcutDecoder is decode Object to Object. +func NewShortcutDecoder() encoding.Decoder[Object, any] { + return encoding.DecoderFunc[Object, any](func(source Object, target any) error { + if t, ok := target.(*Object); ok { + *t = source + return nil + } + return errors.WithStack(encoding.ErrUnsupportedValue) + }) +} diff --git a/pkg/primitive/slice.go b/pkg/primitive/slice.go new file mode 100644 index 00000000..7faa9ba6 --- /dev/null +++ b/pkg/primitive/slice.go @@ -0,0 +1,182 @@ +package primitive + +import ( + "fmt" + "hash/fnv" + "reflect" + "unsafe" + + "github.com/benbjohnson/immutable" + "github.com/pkg/errors" + "github.com/siyul-park/uniflow/internal/encoding" +) + +type ( + // Slice is a representation of a slice. + Slice struct { + value *immutable.List[Object] + } +) + +var _ Object = (*Slice)(nil) + +// NewSlice returns a new Slice. +func NewSlice(values ...Object) *Slice { + b := immutable.NewListBuilder[Object]() + for _, v := range values { + b.Append(v) + } + return &Slice{value: b.List()} +} + +func (o *Slice) Prepend(value Object) *Slice { + return &Slice{value: o.value.Prepend(value)} +} + +func (o *Slice) Append(value Object) *Slice { + return &Slice{value: o.value.Append(value)} +} + +func (o *Slice) Sub(start, end int) *Slice { + return &Slice{value: o.value.Slice(start, end)} +} + +func (o *Slice) Get(index int) Object { + if index >= o.value.Len() { + return nil + } + return o.value.Get(index) +} + +func (o *Slice) Set(index int, value Object) *Slice { + if index < 0 && index >= o.value.Len() { + return o + } + return &Slice{value: o.value.Set(index, value)} +} + +func (o *Slice) Len() int { + return o.value.Len() +} + +// Slice returns a raw representation. +func (o *Slice) Slice() []any { + // TODO: support more type defined slice. + s := make([]any, o.value.Len()) + + itr := o.value.Iterator() + for !itr.Done() { + i, v := itr.Next() + + if v != nil { + s[i] = v.Interface() + } + } + + return s +} + +func (o *Slice) Kind() Kind { + return KindSlice +} + +func (o *Slice) Hash() uint32 { + h := fnv.New32() + h.Write([]byte{byte(KindSlice), 0}) + + itr := o.value.Iterator() + for !itr.Done() { + _, v := itr.Next() + + if v != nil { + hash := v.Hash() + buf := *(*[unsafe.Sizeof(hash)]byte)(unsafe.Pointer(&hash)) + h.Write(buf[:]) + } else { + h.Write([]byte{0}) + } + } + + return h.Sum32() +} + +func (o *Slice) Interface() any { + var values []any + itr := o.value.Iterator() + for !itr.Done() { + _, v := itr.Next() + if v != nil { + values = append(values, v.Interface()) + } else { + values = append(values, nil) + } + } + + valueType := typeAny + + for i, value := range values { + typ := reflect.TypeOf(value) + if i == 0 { + valueType = typ + } else if valueType != typ { + valueType = typeAny + } + } + + t := reflect.MakeSlice(reflect.SliceOf(valueType), o.value.Len(), o.value.Len()) + for i, value := range values { + t.Index(i).Set(reflect.ValueOf(value)) + } + return t.Interface() +} + +// NewSliceEncoder is encode slice or array to Slice. +func NewSliceEncoder(encoder encoding.Encoder[any, Object]) encoding.Encoder[any, Object] { + return encoding.EncoderFunc[any, Object](func(source any) (Object, error) { + if s := reflect.ValueOf(source); s.Kind() == reflect.Slice || s.Kind() == reflect.Array { + values := make([]Object, s.Len()) + for i := 0; i < s.Len(); i++ { + if v, err := encoder.Encode(s.Index(i).Interface()); err != nil { + return nil, err + } else { + values[i] = v + } + } + return NewSlice(values...), nil + } + return nil, errors.WithStack(encoding.ErrUnsupportedValue) + }) +} + +// NewSliceDecoder is decode Slice to slice or array. +func NewSliceDecoder(decoder encoding.Decoder[Object, any]) encoding.Decoder[Object, any] { + return encoding.DecoderFunc[Object, any](func(source Object, target any) error { + if s, ok := source.(*Slice); ok { + if t := reflect.ValueOf(target); t.Kind() == reflect.Pointer { + if t.Elem().Kind() == reflect.Slice || t.Elem().Kind() == reflect.Array { + for i := 0; i < s.Len(); i++ { + value := s.Get(i) + v := reflect.New(t.Elem().Type().Elem()) + if err := decoder.Decode(value, v.Interface()); err != nil { + return errors.WithMessage(err, fmt.Sprintf("value(%v) corresponding to the index(%v) cannot be decoded", value.Interface(), i)) + } + if t.Elem().Len() < i+1 { + if t.Elem().Kind() == reflect.Slice { + t.Elem().Set(reflect.Append(t.Elem(), v.Elem())) + } else { + return errors.WithMessage(encoding.ErrUnsupportedValue, fmt.Sprintf("index(%d) is exceeded len(%d)", i, t.Elem().Len())) + } + } else { + t.Elem().Index(i).Set(v.Elem()) + } + } + return nil + } else if t.Elem().Type() == typeAny { + t.Elem().Set(reflect.ValueOf(s.Interface())) + return nil + } + } + } + return errors.WithStack(encoding.ErrUnsupportedValue) + }) +} diff --git a/pkg/primitive/slice_test.go b/pkg/primitive/slice_test.go new file mode 100644 index 00000000..a2f2bd0b --- /dev/null +++ b/pkg/primitive/slice_test.go @@ -0,0 +1,95 @@ +package primitive + +import ( + "testing" + + "github.com/go-faker/faker/v4" + "github.com/stretchr/testify/assert" +) + +func TestNewSlice(t *testing.T) { + v1 := NewString(faker.Word()) + + o := NewSlice(v1) + + assert.Equal(t, KindSlice, o.Kind()) + assert.Equal(t, []string{v1.String()}, o.Interface()) +} + +func TestSlice_Hash(t *testing.T) { + v1 := NewString(faker.Word()) + v2 := NewString(faker.Word()) + + assert.NotEqual(t, NewSlice(v1, v2).Hash(), NewSlice(v2, v1).Hash()) + assert.Equal(t, NewSlice().Hash(), NewSlice().Hash()) + assert.Equal(t, NewSlice(v1, v2).Hash(), NewSlice(v1, v2).Hash()) +} + +func TestSlice_GetAndSet(t *testing.T) { + v1 := NewString(faker.Word()) + v2 := NewString(faker.Word()) + + o := NewSlice(v1) + + r1 := o.Get(0) + assert.Equal(t, v1, r1) + + r2 := o.Get(1) + assert.Nil(t, r2) + + o = o.Set(0, v2) + + r3 := o.Get(0) + assert.Equal(t, v2, r3) +} + +func TestSlice_Prepend(t *testing.T) { + v := NewString(faker.Word()) + + o := NewSlice() + o = o.Prepend(v) + + assert.Equal(t, 1, o.Len()) +} + +func TestSlice_Append(t *testing.T) { + v := NewString(faker.Word()) + + o := NewSlice() + o = o.Append(v) + + assert.Equal(t, 1, o.Len()) +} + +func TestSlice_Sub(t *testing.T) { + v1 := NewString(faker.Word()) + v2 := NewString(faker.Word()) + + o := NewSlice(v1, v2) + o = o.Sub(0, 1) + + assert.Equal(t, 1, o.Len()) +} + +func TestSlice_Encode(t *testing.T) { + e := NewSliceEncoder(NewStringEncoder()) + + v1 := NewString(faker.Word()) + v2 := NewString(faker.Word()) + + v, err := e.Encode([]string{v1.String(), v2.String()}) + assert.NoError(t, err) + assert.Equal(t, NewSlice(v1, v2), v) +} + +func TestSlice_Decode(t *testing.T) { + d := NewSliceDecoder(NewStringDecoder()) + + v1 := NewString(faker.Word()) + v2 := NewString(faker.Word()) + + var v []string + err := d.Decode(NewSlice(v1, v2), &v) + assert.NoError(t, err) + assert.Equal(t, []string{v1.String(), v2.String()}, v) +} diff --git a/pkg/primitive/string.go b/pkg/primitive/string.go new file mode 100644 index 00000000..eb700178 --- /dev/null +++ b/pkg/primitive/string.go @@ -0,0 +1,90 @@ +package primitive + +import ( + "encoding" + "hash/fnv" + "reflect" + + "github.com/pkg/errors" + encoding2 "github.com/siyul-park/uniflow/internal/encoding" +) + +type ( + // String is a representation of a string. + String string +) + +var _ Object = (String)("") + +// NewString returns a new String. +func NewString(value string) String { + return String(value) +} + +func (o String) Len() int { + return len([]rune(o)) +} + +func (o String) Get(index int) rune { + if index >= len([]rune(o)) { + return rune(0) + } + return []rune(o)[index] +} + +// String returns a raw representation. +func (o String) String() string { + return string(o) +} + +func (o String) Kind() Kind { + return KindString +} + +func (o String) Hash() uint32 { + h := fnv.New32() + h.Write([]byte{byte(KindString), 0}) + h.Write([]byte(o)) + + return h.Sum32() +} + +func (o String) Interface() any { + return string(o) +} + +// NewStringEncoder is encode string to String. +func NewStringEncoder() encoding2.Encoder[any, Object] { + return encoding2.EncoderFunc[any, Object](func(source any) (Object, error) { + if s, ok := source.(encoding.TextMarshaler); ok { + if text, err := s.MarshalText(); err != nil { + return nil, err + } else { + return NewString(string(text)), nil + } + } else if s := reflect.ValueOf(source); s.Kind() == reflect.String { + return NewString(s.String()), nil + } + return nil, errors.WithStack(encoding2.ErrUnsupportedValue) + }) +} + +// NewStringDecoder is decode String to string. +func NewStringDecoder() encoding2.Decoder[Object, any] { + return encoding2.DecoderFunc[Object, any](func(source Object, target any) error { + if s, ok := source.(String); ok { + if t, ok := target.(encoding.TextUnmarshaler); ok { + return t.UnmarshalText([]byte(s.String())) + } else if t := reflect.ValueOf(target); t.Kind() == reflect.Pointer { + if t.Elem().Kind() == reflect.String { + t.Elem().Set(reflect.ValueOf(s.String())) + return nil + } else if t.Elem().Type() == typeAny { + t.Elem().Set(reflect.ValueOf(s.Interface())) + return nil + } + } + } + return errors.WithStack(encoding2.ErrUnsupportedValue) + }) +} diff --git a/pkg/primitive/string_test.go b/pkg/primitive/string_test.go new file mode 100644 index 00000000..10998c28 --- /dev/null +++ b/pkg/primitive/string_test.go @@ -0,0 +1,46 @@ +package primitive + +import ( + "testing" + + "github.com/go-faker/faker/v4" + "github.com/stretchr/testify/assert" +) + +func TestNewString(t *testing.T) { + raw := faker.Word() + v := NewString(raw) + + assert.Equal(t, KindString, v.Kind()) + assert.Equal(t, raw, v.Interface()) +} + +func TestString_Hash(t *testing.T) { + assert.NotEqual(t, NewString("A").Hash(), NewString("B").Hash()) + assert.Equal(t, NewString("").Hash(), NewString("").Hash()) + assert.Equal(t, NewString("A").Hash(), NewString("A").Hash()) +} + +func TestString_Get(t *testing.T) { + v := NewString("A") + + assert.Equal(t, 1, v.Len()) + assert.Equal(t, rune('A'), v.Get(0)) +} + +func TestString_Encode(t *testing.T) { + e := NewStringEncoder() + + v, err := e.Encode("A") + assert.NoError(t, err) + assert.Equal(t, NewString("A"), v) +} + +func TestString_Decode(t *testing.T) { + d := NewStringDecoder() + + var v string + err := d.Decode(NewString("A"), &v) + assert.NoError(t, err) + assert.Equal(t, "A", v) +} diff --git a/pkg/primitive/uint.go b/pkg/primitive/uint.go new file mode 100644 index 00000000..8a8b4517 --- /dev/null +++ b/pkg/primitive/uint.go @@ -0,0 +1,221 @@ +package primitive + +import ( + "hash/fnv" + "reflect" + "unsafe" + + "github.com/pkg/errors" + "github.com/siyul-park/uniflow/internal/encoding" +) + +type ( + Uinteger interface { + Object + Uint() uint64 + } + // Uint is a representation of a uint. + Uint uint + // Uint8 is a representation of a uint8. + Uint8 uint8 + // Uint16 is a representation of a uint16. + Uint16 uint16 + // Uint32 is a representation of a uint32. + Uint32 uint32 + // Uint64 is a representation of a uint64. + Uint64 uint64 +) + +var _ Uinteger = (Uint)(0) +var _ Uinteger = (Uint8)(0) +var _ Uinteger = (Uint16)(0) +var _ Uinteger = (Uint32)(0) +var _ Uinteger = (Uint64)(0) + +// NewUint returns a new Uint. +func NewUint(value uint) Uint { + return Uint(value) +} + +// Uint returns a raw representation. +func (o Uint) Uint() uint64 { + return uint64(o) +} + +func (o Uint) Kind() Kind { + return KindUint +} + +func (o Uint) Hash() uint32 { + buf := *(*[unsafe.Sizeof(o)]byte)(unsafe.Pointer(&o)) + + h := fnv.New32() + h.Write([]byte{byte(KindUint), 0}) + h.Write(buf[:]) + + return h.Sum32() +} + +func (o Uint) Interface() any { + return uint(o) +} + +// NewUint8 returns a new Uint8. +func NewUint8(value uint8) Uint8 { + return Uint8(value) +} + +// Uint returns a raw representation. +func (o Uint8) Uint() uint64 { + return uint64(o) +} + +func (o Uint8) Kind() Kind { + return KindUint8 +} + +func (o Uint8) Hash() uint32 { + buf := *(*[unsafe.Sizeof(o)]byte)(unsafe.Pointer(&o)) + + h := fnv.New32() + h.Write([]byte{byte(KindUint8), 0}) + h.Write(buf[:]) + + return h.Sum32() +} + +func (o Uint8) Interface() any { + return uint8(o) +} + +// NewUint16 returns a new Uint16. +func NewUint16(value uint16) Uint16 { + return Uint16(value) +} + +// Uint returns a raw representation. +func (o Uint16) Uint() uint64 { + return uint64(o) +} + +func (o Uint16) Kind() Kind { + return KindUint16 +} + +func (o Uint16) Hash() uint32 { + buf := *(*[unsafe.Sizeof(o)]byte)(unsafe.Pointer(&o)) + + h := fnv.New32() + h.Write([]byte{byte(KindUint16), 0}) + h.Write(buf[:]) + + return h.Sum32() +} + +func (o Uint16) Interface() any { + return uint16(o) +} + +// NewUint32 returns a new Uint32. +func NewUint32(value uint32) Uint32 { + return Uint32(value) +} + +// Uint returns a raw representation. +func (o Uint32) Uint() uint64 { + return uint64(o) +} + +func (o Uint32) Kind() Kind { + return KindUint32 +} + +func (o Uint32) Hash() uint32 { + buf := *(*[unsafe.Sizeof(o)]byte)(unsafe.Pointer(&o)) + + h := fnv.New32() + h.Write([]byte{byte(KindUint32), 0}) + h.Write(buf[:]) + + return h.Sum32() +} + +func (o Uint32) Interface() any { + return uint32(o) +} + +// NewUint64 returns a new Uint64. +func NewUint64(value uint64) Uint64 { + return Uint64(value) +} + +// Uint returns a raw representation. +func (o Uint64) Uint() uint64 { + return uint64(o) +} + +func (o Uint64) Kind() Kind { + return KindUint64 +} + +func (o Uint64) Hash() uint32 { + buf := *(*[unsafe.Sizeof(o)]byte)(unsafe.Pointer(&o)) + + h := fnv.New32() + h.Write([]byte{byte(KindUint64), 0}) + h.Write(buf[:]) + + return h.Sum32() +} + +func (o Uint64) Interface() any { + return uint64(o) +} + +// NewUintEncoder is encode uint to Uint. +func NewUintEncoder() encoding.Encoder[any, Object] { + return encoding.EncoderFunc[any, Object](func(source any) (Object, error) { + if s := reflect.ValueOf(source); s.Kind() == reflect.Uint { + return NewUint(uint(s.Uint())), nil + } else if s := reflect.ValueOf(source); s.Kind() == reflect.Uint8 { + return NewUint8(uint8(s.Uint())), nil + } else if s := reflect.ValueOf(source); s.Kind() == reflect.Uint16 { + return NewUint16(uint16(s.Uint())), nil + } else if s := reflect.ValueOf(source); s.Kind() == reflect.Uint32 { + return NewUint32(uint32(s.Uint())), nil + } else if s := reflect.ValueOf(source); s.Kind() == reflect.Uint64 { + return NewUint64(uint64(s.Uint())), nil + } + return nil, errors.WithStack(encoding.ErrUnsupportedValue) + }) +} + +// NewUintDecoder is decode Uint to uint. +func NewUintDecoder() encoding.Decoder[Object, any] { + return encoding.DecoderFunc[Object, any](func(source Object, target any) error { + if s, ok := source.(Uinteger); ok { + if t := reflect.ValueOf(target); t.Kind() == reflect.Pointer { + if t.Elem().Kind() == reflect.Uint { + t.Elem().Set(reflect.ValueOf(uint(s.Uint()))) + return nil + } else if t.Elem().Kind() == reflect.Uint8 { + t.Elem().Set(reflect.ValueOf(uint8(s.Uint()))) + return nil + } else if t.Elem().Kind() == reflect.Uint16 { + t.Elem().Set(reflect.ValueOf(uint16(s.Uint()))) + return nil + } else if t.Elem().Kind() == reflect.Uint32 { + t.Elem().Set(reflect.ValueOf(uint32(s.Uint()))) + return nil + } else if t.Elem().Kind() == reflect.Uint64 { + t.Elem().Set(reflect.ValueOf(uint64(s.Uint()))) + return nil + } else if t.Elem().Type() == typeAny { + t.Elem().Set(reflect.ValueOf(s.Interface())) + return nil + } + } + } + return errors.WithStack(encoding.ErrUnsupportedValue) + }) +} diff --git a/pkg/primitive/uint_test.go b/pkg/primitive/uint_test.go new file mode 100644 index 00000000..163b4372 --- /dev/null +++ b/pkg/primitive/uint_test.go @@ -0,0 +1,133 @@ +package primitive + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestNewUint(t *testing.T) { + t.Run("", func(t *testing.T) { + v := NewUint(0) + + assert.Equal(t, KindUint, v.Kind()) + assert.Equal(t, uint(0), v.Interface()) + }) + t.Run("8", func(t *testing.T) { + v := NewUint8(0) + + assert.Equal(t, KindUint8, v.Kind()) + assert.Equal(t, uint8(0), v.Interface()) + }) + t.Run("16", func(t *testing.T) { + v := NewUint16(0) + + assert.Equal(t, KindUint16, v.Kind()) + assert.Equal(t, uint16(0), v.Interface()) + }) + t.Run("32", func(t *testing.T) { + v := NewUint32(0) + + assert.Equal(t, KindUint32, v.Kind()) + assert.Equal(t, uint32(0), v.Interface()) + }) + t.Run("64", func(t *testing.T) { + v := NewUint64(0) + + assert.Equal(t, KindUint64, v.Kind()) + assert.Equal(t, uint64(0), v.Interface()) + }) +} + +func TestUint_Hash(t *testing.T) { + t.Run("", func(t *testing.T) { + assert.NotEqual(t, NewUint(0).Hash(), NewUint(1).Hash()) + assert.Equal(t, NewUint(0).Hash(), NewUint(0).Hash()) + assert.Equal(t, NewUint(1).Hash(), NewUint(1).Hash()) + }) + t.Run("8", func(t *testing.T) { + assert.NotEqual(t, NewUint8(0).Hash(), NewUint8(1).Hash()) + assert.Equal(t, NewUint8(0).Hash(), NewUint8(0).Hash()) + assert.Equal(t, NewUint8(1).Hash(), NewUint8(1).Hash()) + }) + t.Run("16", func(t *testing.T) { + assert.NotEqual(t, NewUint16(0).Hash(), NewUint16(1).Hash()) + assert.Equal(t, NewUint16(0).Hash(), NewUint16(0).Hash()) + assert.Equal(t, NewUint16(1).Hash(), NewUint16(1).Hash()) + }) + t.Run("32", func(t *testing.T) { + assert.NotEqual(t, NewUint32(0).Hash(), NewUint32(1).Hash()) + assert.Equal(t, NewUint32(0).Hash(), NewUint32(0).Hash()) + assert.Equal(t, NewUint32(1).Hash(), NewUint32(1).Hash()) + }) + t.Run("64", func(t *testing.T) { + assert.NotEqual(t, NewUint64(0).Hash(), NewUint64(1).Hash()) + assert.Equal(t, NewUint64(0).Hash(), NewUint64(0).Hash()) + assert.Equal(t, NewUint64(1).Hash(), NewUint64(1).Hash()) + }) +} + +func TestUint_Encode(t *testing.T) { + e := NewUintEncoder() + + t.Run("", func(t *testing.T) { + v, err := e.Encode(uint(1)) + assert.NoError(t, err) + assert.Equal(t, NewUint(1), v) + }) + t.Run("8", func(t *testing.T) { + v, err := e.Encode(uint8(1)) + assert.NoError(t, err) + assert.Equal(t, NewUint8(1), v) + }) + t.Run("16", func(t *testing.T) { + v, err := e.Encode(uint16(1)) + assert.NoError(t, err) + assert.Equal(t, NewUint16(1), v) + }) + t.Run("32", func(t *testing.T) { + v, err := e.Encode(uint32(1)) + assert.NoError(t, err) + assert.Equal(t, NewUint32(1), v) + }) + t.Run("64", func(t *testing.T) { + v, err := e.Encode(uint64(1)) + assert.NoError(t, err) + assert.Equal(t, NewUint64(1), v) + }) +} + +func TestUint_Decode(t *testing.T) { + d := NewUintDecoder() + + t.Run("", func(t *testing.T) { + var v uint + err := d.Decode(NewUint(1), &v) + assert.NoError(t, err) + assert.Equal(t, uint(1), v) + }) + t.Run("8", func(t *testing.T) { + var v uint8 + err := d.Decode(NewUint8(1), &v) + assert.NoError(t, err) + assert.Equal(t, uint8(1), v) + }) + t.Run("16", func(t *testing.T) { + var v uint16 + err := d.Decode(NewUint16(1), &v) + assert.NoError(t, err) + assert.Equal(t, uint16(1), v) + }) + t.Run("32", func(t *testing.T) { + var v uint32 + err := d.Decode(NewUint32(1), &v) + assert.NoError(t, err) + assert.Equal(t, uint32(1), v) + }) + t.Run("64", func(t *testing.T) { + var v uint64 + err := d.Decode(NewUint64(1), &v) + assert.NoError(t, err) + assert.Equal(t, uint64(1), v) + }) +} diff --git a/pkg/process/process.go b/pkg/process/process.go new file mode 100644 index 00000000..2a2bee77 --- /dev/null +++ b/pkg/process/process.go @@ -0,0 +1,64 @@ +package process + +import ( + "sync" + + "github.com/oklog/ulid/v2" +) + +type ( + // Process is a processing unit that isolates data processing with others. + Process struct { + id ulid.ULID + stack *Stack + done chan struct{} + mu sync.RWMutex + } +) + +// New creates a new Process. +func New() *Process { + return &Process{ + id: ulid.Make(), + stack: NewStack(), + done: make(chan struct{}), + mu: sync.RWMutex{}, + } +} + +// ID returns the ID. +func (p *Process) ID() ulid.ULID { + p.mu.RLock() + defer p.mu.RUnlock() + + return p.id +} + +// Stack returns a Stack +func (p *Process) Stack() *Stack { + p.mu.RLock() + defer p.mu.RUnlock() + + return p.stack +} + +// Done returns a channel that is closed when is closed. +func (p *Process) Done() <-chan struct{} { + return p.done +} + +// Close closes the Process. +func (p *Process) Close() { + p.mu.Lock() + defer p.mu.Unlock() + + select { + case <-p.done: + return + default: + } + + close(p.done) + + p.stack.Close() +} diff --git a/pkg/process/process_test.go b/pkg/process/process_test.go new file mode 100644 index 00000000..84e1e3aa --- /dev/null +++ b/pkg/process/process_test.go @@ -0,0 +1,47 @@ +package process + +import ( + "testing" + + "github.com/oklog/ulid/v2" + "github.com/stretchr/testify/assert" +) + +func TestNew(t *testing.T) { + proc := New() + defer proc.Close() + + assert.NotNil(t, proc) +} + +func TestProcess_ID(t *testing.T) { + proc := New() + defer proc.Close() + + assert.NotEqual(t, ulid.ULID{}, proc.ID()) +} + +func TestProcess_Stack(t *testing.T) { + proc := New() + defer proc.Close() + + assert.NotNil(t, proc.Stack()) +} + +func TestProcess_Close(t *testing.T) { + proc := New() + + select { + case <-proc.Done(): + assert.Fail(t, "proc.Done() is not empty.") + default: + } + + proc.Close() + + select { + case <-proc.Done(): + default: + assert.Fail(t, "proc.Done() is empty.") + } +} diff --git a/pkg/process/stack.go b/pkg/process/stack.go new file mode 100644 index 00000000..46b6a280 --- /dev/null +++ b/pkg/process/stack.go @@ -0,0 +1,307 @@ +package process + +import ( + "sync" + + "github.com/oklog/ulid/v2" +) + +type ( + // Stack is trace object. + Stack struct { + stems map[ulid.ULID][]ulid.ULID + leaves map[ulid.ULID][]ulid.ULID + stacks map[ulid.ULID][]ulid.ULID + heads map[ulid.ULID][]ulid.ULID + wait sync.RWMutex + mu sync.RWMutex + } +) + +// NewStack returns a new Stack. +func NewStack() *Stack { + return &Stack{ + stems: make(map[ulid.ULID][]ulid.ULID), + leaves: make(map[ulid.ULID][]ulid.ULID), + stacks: make(map[ulid.ULID][]ulid.ULID), + heads: make(map[ulid.ULID][]ulid.ULID), + } +} + +// Link adds an relation. +func (s *Stack) Link(stem, leaf ulid.ULID) { + s.mu.Lock() + defer s.mu.Unlock() + + if stem == leaf { + return + } + + if s.stems == nil || s.leaves == nil { + return + } + + for _, cur := range s.stems[leaf] { + if cur == stem { + return + } + } + + s.stems[leaf] = append(s.stems[leaf], stem) + s.leaves[stem] = append(s.leaves[stem], leaf) +} + +// Unlink deletes an relation. +func (s *Stack) Unlink(stem, leaf ulid.ULID) { + s.mu.Lock() + defer s.mu.Unlock() + + if stem == leaf { + return + } + + if s.stems == nil || s.leaves == nil { + return + } + + for i, cur := range s.stems[leaf] { + if cur == stem { + s.stems[leaf] = append(s.stems[leaf][:i], s.stems[leaf][i+1:]...) + if len(s.stems[leaf]) == 0 { + delete(s.stems, leaf) + } + return + } + } + for i, cur := range s.leaves[leaf] { + if cur == leaf { + s.leaves[stem] = append(s.leaves[stem][:i], s.leaves[stem][i+1:]...) + if len(s.leaves[stem]) == 0 { + delete(s.leaves, stem) + } + return + } + } +} + +// Push pushes the value. +func (s *Stack) Push(key, value ulid.ULID) { + s.mu.Lock() + defer s.mu.Unlock() + + if s.stacks == nil { + return + } + s.stacks[key] = append(s.stacks[key], value) + s.wait.RLock() +} + +// Pop pops the value. +func (s *Stack) Pop(key, value ulid.ULID) (ulid.ULID, bool) { + s.mu.Lock() + defer s.mu.Unlock() + + if s.stems == nil || s.leaves == nil || s.stacks == nil || s.heads == nil { + return ulid.ULID{}, false + } + + heads, ok := s.heads[key] + if !ok { + heads = []ulid.ULID{key} + } + + visits := map[ulid.ULID]struct{}{} + for { + for i, head := range heads { + if _, ok := visits[head]; ok && len(s.leaves[head]) != 0 { + continue + } + visits[head] = struct{}{} + + if steams := s.clean(head); steams != nil { + delete(s.heads, head) + + heads = append(heads[:i], heads[i+1:]...) + heads = append(heads, steams...) + } + } + + next := false + for _, head := range heads { + if _, ok := visits[head]; !ok { + next = true + break + } + } + if !next { + break + } + } + if len(heads) > 0 { + s.heads[key] = heads + } + + for i, head := range heads { + stacks := s.stacks[head] + if len(stacks) > 0 && stacks[len(stacks)-1] == value { + stacks = stacks[:len(stacks)-1] + + s.stacks[head] = stacks + if len(s.stacks[head]) == 0 { + delete(s.stacks, head) + + heads = append(heads[:i], heads[i+1:]...) + heads = append(heads, s.stems[head]...) + + s.heads[key] = heads + if len(s.heads[key]) == 0 { + delete(s.heads, key) + } + + s.clean(head) + } + + s.wait.RUnlock() + return head, true + } + } + + return ulid.ULID{}, false +} + +// Clear removes a link from the child. +func (s *Stack) Clear(key ulid.ULID) { + s.mu.Lock() + defer s.mu.Unlock() + + if s.stems == nil || s.leaves == nil || s.stacks == nil || s.heads == nil { + return + } + + heads, ok := s.heads[key] + if !ok { + heads = []ulid.ULID{key} + } + + visits := map[ulid.ULID]struct{}{} + for { + for i, head := range heads { + if _, ok := visits[head]; ok && len(s.leaves[head]) != 0 { + continue + } + visits[head] = struct{}{} + + heads = append(heads[:i], heads[i+1:]...) + heads = append(heads, s.stems[head]...) + + if len(s.leaves[head]) == 0 { + for range s.stacks[head] { + s.wait.RUnlock() + } + + delete(s.stacks, head) + delete(s.heads, head) + + s.clean(head) + } + } + + next := false + for _, head := range heads { + if _, ok := visits[head]; !ok { + next = true + break + } + } + if !next { + break + } + } +} + +// Len return the number of values. +func (s *Stack) Len(key ulid.ULID) int { + s.mu.RLock() + defer s.mu.RUnlock() + + if s.stems == nil || s.leaves == nil || s.stacks == nil || s.heads == nil { + return 0 + } + + heads, ok := s.heads[key] + if !ok { + heads = []ulid.ULID{key} + } + + visits := map[ulid.ULID]struct{}{} + count := 0 + for { + for i, head := range heads { + if _, ok := visits[head]; ok { + continue + } + visits[head] = struct{}{} + + heads = append(heads[:i], heads[i+1:]...) + heads = append(heads, s.stems[head]...) + + count += len(s.stacks[head]) + } + + next := false + for _, head := range heads { + if _, ok := visits[head]; !ok { + next = true + break + } + } + if !next { + break + } + } + + return count +} + +// Wait blocks until is empty. +func (s *Stack) Wait() { + s.wait.Lock() + defer s.wait.Unlock() +} + +// Close closes all resources. +func (s *Stack) Close() { + s.mu.Lock() + defer s.mu.Unlock() + + for _, stacks := range s.stacks { + for range stacks { + s.wait.RUnlock() + } + } + + s.stems = nil + s.stacks = nil + s.heads = nil +} + +func (s *Stack) clean(head ulid.ULID) []ulid.ULID { + if len(s.leaves[head]) > 0 || len(s.stacks[head]) > 0 { + return nil + } + + for _, stem := range s.stems[head] { + for j, cur := range s.leaves[stem] { + if cur == head { + s.leaves[stem] = append(s.leaves[stem][:j], s.leaves[stem][j+1:]...) + if len(s.leaves[stem]) == 0 { + delete(s.leaves, stem) + } + } + } + } + stems := s.stems[head] + delete(s.stems, head) + + return stems +} diff --git a/pkg/process/stack_test.go b/pkg/process/stack_test.go new file mode 100644 index 00000000..e4a920b3 --- /dev/null +++ b/pkg/process/stack_test.go @@ -0,0 +1,153 @@ +package process + +import ( + "context" + "testing" + "time" + + "github.com/oklog/ulid/v2" + "github.com/stretchr/testify/assert" +) + +func TestStack_Pop(t *testing.T) { + st := NewStack() + defer st.Close() + + k1 := ulid.Make() + k2 := ulid.Make() + k3 := ulid.Make() + + v1 := ulid.Make() + v2 := ulid.Make() + v3 := ulid.Make() + + st.Link(k1, k2) + st.Link(k2, k3) + + st.Push(k1, v1) + st.Push(k2, v2) + st.Push(k2, v3) + + h1, ok := st.Pop(k3, v3) + assert.True(t, ok) + assert.Equal(t, k2, h1) + + h2, ok := st.Pop(k3, v2) + assert.True(t, ok) + assert.Equal(t, k2, h2) + + h3, ok := st.Pop(k3, v1) + assert.True(t, ok) + assert.Equal(t, k1, h3) + + assert.Equal(t, 0, st.Len(k3)) +} + +func TestStack_Len(t *testing.T) { + st := NewStack() + defer st.Close() + + k1 := ulid.Make() + k2 := ulid.Make() + + v1 := ulid.Make() + v2 := ulid.Make() + v3 := ulid.Make() + + st.Link(k1, k2) + + st.Push(k1, v1) + st.Push(k2, v2) + st.Push(k2, v3) + + assert.Equal(t, 1, st.Len(k1)) + assert.Equal(t, 3, st.Len(k2)) +} + +func TestStack_Wait(t *testing.T) { + t.Run("empty", func(t *testing.T) { + st := NewStack() + defer st.Close() + + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + + done := make(chan struct{}) + go func() { + st.Wait() + close(done) + }() + + select { + case <-ctx.Done(): + assert.Fail(t, "timeout") + case <-done: + } + }) + + t.Run("not empty", func(t *testing.T) { + st := NewStack() + defer st.Close() + + k1 := ulid.Make() + v1 := ulid.Make() + + st.Push(k1, v1) + + ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond) + defer cancel() + + done := make(chan struct{}) + go func() { + st.Wait() + close(done) + }() + + select { + case <-ctx.Done(): + case <-done: + assert.Fail(t, "timeout") + } + }) +} + +func TestStack_Clear(t *testing.T) { + st := NewStack() + defer st.Close() + + k1 := ulid.Make() + k2 := ulid.Make() + k3 := ulid.Make() + k4 := ulid.Make() + + v1 := ulid.Make() + v2 := ulid.Make() + v3 := ulid.Make() + + st.Link(k1, k2) + st.Link(k2, k3) + st.Link(k2, k4) + + st.Push(k1, v1) + st.Push(k2, v2) + st.Push(k2, v3) + + st.Clear(k4) + + _, ok := st.Pop(k4, v3) + assert.False(t, ok) + + _, ok = st.Pop(k4, v2) + assert.False(t, ok) + + _, ok = st.Pop(k3, v3) + assert.True(t, ok) + + _, ok = st.Pop(k3, v2) + assert.True(t, ok) + + _, ok = st.Pop(k3, v1) + assert.True(t, ok) + + assert.Equal(t, 0, st.Len(k3)) +} diff --git a/pkg/runtime/runtime.go b/pkg/runtime/runtime.go new file mode 100644 index 00000000..ff75175a --- /dev/null +++ b/pkg/runtime/runtime.go @@ -0,0 +1,144 @@ +package runtime + +import ( + "context" + + "github.com/oklog/ulid/v2" + "github.com/siyul-park/uniflow/pkg/database" + "github.com/siyul-park/uniflow/pkg/database/memdb" + "github.com/siyul-park/uniflow/pkg/hook" + "github.com/siyul-park/uniflow/pkg/loader" + "github.com/siyul-park/uniflow/pkg/node" + "github.com/siyul-park/uniflow/pkg/scheme" + "github.com/siyul-park/uniflow/pkg/storage" + "github.com/siyul-park/uniflow/pkg/symbol" +) + +type ( + // Config is a config for for the Runtime. + Config struct { + Namespace string + Hooks *hook.Hook + Scheme *scheme.Scheme + Database database.Database + } + + // Runtime is an execution environment that runs Flows. + Runtime struct { + namespace string + hooks *hook.Hook + scheme *scheme.Scheme + storage *storage.Storage + table *symbol.Table + loader *loader.Loader + reconciler *loader.Reconciler + } +) + +// New returns a new Runtime. +func New(ctx context.Context, config Config) (*Runtime, error) { + ns := config.Namespace + hk := config.Hooks + sc := config.Scheme + db := config.Database + + if hk == nil { + hk = hook.New() + } + if sc == nil { + sc = scheme.New() + } + if db == nil { + db = memdb.New("") + } + + st, err := storage.New(ctx, storage.Config{ + Scheme: sc, + Database: db, + }) + if err != nil { + return nil, err + } + + tb := symbol.NewTable(symbol.TableOptions{ + PreLoadHooks: []symbol.PreLoadHook{hk}, + PostLoadHooks: []symbol.PostLoadHook{hk}, + PreUnloadHooks: []symbol.PreUnloadHook{hk}, + PostUnloadHooks: []symbol.PostUnloadHook{hk}, + }) + + ld, err := loader.New(ctx, loader.Config{ + Scheme: sc, + Storage: st, + Table: tb, + }) + if err != nil { + return nil, err + } + + var filter *storage.Filter + if ns != "" { + filter = storage.Where[string](scheme.KeyNamespace).EQ(ns) + } + rc := loader.NewReconciler(loader.ReconcilerConfig{ + Remote: st, + Loader: ld, + Filter: filter, + }) + + return &Runtime{ + namespace: ns, + hooks: hk, + scheme: sc, + storage: st, + table: tb, + loader: ld, + reconciler: rc, + }, nil +} + +// Lookup lookup node.Node in symbol.Table, and if it not exist load it from storage.Storage. +func (r *Runtime) Lookup(ctx context.Context, id ulid.ULID) (node.Node, error) { + filter := storage.Where[ulid.ULID](scheme.KeyID).EQ(id) + if r.namespace != "" { + filter = filter.And(storage.Where[string](scheme.KeyNamespace).EQ(r.namespace)) + } + if s, ok := r.table.Lookup(id); !ok { + return r.loader.LoadOne(ctx, filter) + } else { + return s, nil + } +} + +// Free unload node.Node from symbol.Table. +func (r *Runtime) Free(ctx context.Context, id ulid.ULID) (bool, error) { + return r.loader.UnloadOne(ctx, storage.Where[ulid.ULID](scheme.KeyID).EQ(id)) +} + +// Start starts the Runtime. +// Runtime load all scheme.Spec as node.Node from the database.Collection, +// and then keeps node.Node up-to-date and runs by continuously tracking scheme.Spec. +func (r *Runtime) Start(ctx context.Context) error { + if err := r.reconciler.Watch(ctx); err != nil { + return err + } + var filter *storage.Filter + if r.namespace != "" { + filter = filter.And(storage.Where[string](scheme.KeyNamespace).EQ(r.namespace)) + } + if _, err := r.loader.LoadMany(ctx, filter); err != nil { + return err + } + return r.reconciler.Reconcile(ctx) +} + +// Close is close the Runtime. +func (r *Runtime) Close(ctx context.Context) error { + if err := r.reconciler.Close(); err != nil { + return err + } + if _, err := r.loader.UnloadMany(ctx, nil); err != nil { + return err + } + return r.table.Close() +} diff --git a/pkg/runtime/runtime_test.go b/pkg/runtime/runtime_test.go new file mode 100644 index 00000000..20050f45 --- /dev/null +++ b/pkg/runtime/runtime_test.go @@ -0,0 +1,127 @@ +package runtime + +import ( + "context" + "testing" + + "github.com/go-faker/faker/v4" + "github.com/oklog/ulid/v2" + "github.com/siyul-park/uniflow/pkg/database/memdb" + "github.com/siyul-park/uniflow/pkg/node" + "github.com/siyul-park/uniflow/pkg/scheme" + "github.com/siyul-park/uniflow/pkg/storage" + "github.com/stretchr/testify/assert" +) + +func TestRuntime_Lookup(t *testing.T) { + kind := faker.Word() + + sb := scheme.NewBuilder(func(s *scheme.Scheme) error { + s.AddKnownType(kind, &scheme.SpecMeta{}) + s.AddCodec(kind, scheme.CodecFunc(func(spec scheme.Spec) (node.Node, error) { + return node.NewOneToOneNode(node.OneToOneNodeConfig{ID: spec.GetID()}), nil + })) + return nil + }) + s, _ := sb.Build() + + db := memdb.New(faker.Word()) + + st, _ := storage.New(context.Background(), storage.Config{ + Scheme: s, + Database: db, + }) + + r, _ := New(context.Background(), Config{ + Scheme: s, + Database: db, + }) + defer func() { _ = r.Close(context.Background()) }() + + spec := &scheme.SpecMeta{ + ID: ulid.Make(), + Kind: kind, + } + + _, _ = st.InsertOne(context.Background(), spec) + + n, err := r.Lookup(context.Background(), spec.ID) + assert.NoError(t, err) + assert.NotNil(t, n) +} + +func TestRuntime_Free(t *testing.T) { + kind := faker.Word() + + sb := scheme.NewBuilder(func(s *scheme.Scheme) error { + s.AddKnownType(kind, &scheme.SpecMeta{}) + s.AddCodec(kind, scheme.CodecFunc(func(spec scheme.Spec) (node.Node, error) { + return node.NewOneToOneNode(node.OneToOneNodeConfig{ID: spec.GetID()}), nil + })) + return nil + }) + s, _ := sb.Build() + + db := memdb.New(faker.Word()) + + st, _ := storage.New(context.Background(), storage.Config{ + Scheme: s, + Database: db, + }) + + r, _ := New(context.Background(), Config{ + Scheme: s, + Database: db, + }) + defer func() { _ = r.Close(context.Background()) }() + + spec := &scheme.SpecMeta{ + ID: ulid.Make(), + Kind: kind, + } + + _, _ = st.InsertOne(context.Background(), spec) + _, _ = r.Lookup(context.Background(), spec.ID) + + ok, err := r.Free(context.Background(), spec.ID) + assert.NoError(t, err) + assert.True(t, ok) +} + +func TestRuntime_Start(t *testing.T) { + kind := faker.Word() + + sb := scheme.NewBuilder(func(s *scheme.Scheme) error { + s.AddKnownType(kind, &scheme.SpecMeta{}) + s.AddCodec(kind, scheme.CodecFunc(func(spec scheme.Spec) (node.Node, error) { + return node.NewOneToOneNode(node.OneToOneNodeConfig{ID: spec.GetID()}), nil + })) + return nil + }) + s, _ := sb.Build() + + db := memdb.New(faker.Word()) + + st, _ := storage.New(context.Background(), storage.Config{ + Scheme: s, + Database: db, + }) + + r, _ := New(context.Background(), Config{ + Scheme: s, + Database: db, + }) + defer func() { _ = r.Close(context.Background()) }() + + spec := &scheme.SpecMeta{ + ID: ulid.Make(), + Kind: kind, + } + + _, _ = st.InsertOne(context.Background(), spec) + + go func() { + err := r.Start(context.Background()) + assert.NoError(t, err) + }() +} diff --git a/pkg/scheme/builder.go b/pkg/scheme/builder.go new file mode 100644 index 00000000..e4a311fa --- /dev/null +++ b/pkg/scheme/builder.go @@ -0,0 +1,35 @@ +package scheme + +type ( + // Builder builds a new Scheme. + Builder []func(*Scheme) error +) + +// NewBuilder returns a new SchemeBuilder. +func NewBuilder(funcs ...func(*Scheme) error) Builder { + return Builder(funcs) +} + +// AddToScheme adds all registered types to s. +func (b *Builder) AddToScheme(s *Scheme) error { + for _, f := range *b { + if err := f(s); err != nil { + return err + } + } + return nil +} + +// Register adds one or more Spec. +func (b *Builder) Register(funcs ...func(*Scheme) error) { + *b = append(*b, funcs...) +} + +// Build returns a new Scheme containing the registered types. +func (b *Builder) Build() (*Scheme, error) { + s := New() + if err := b.AddToScheme(s); err != nil { + return nil, err + } + return s, nil +} diff --git a/pkg/scheme/builder_test.go b/pkg/scheme/builder_test.go new file mode 100644 index 00000000..86614b40 --- /dev/null +++ b/pkg/scheme/builder_test.go @@ -0,0 +1,33 @@ +package scheme + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestSchemeBuilder_Register(t *testing.T) { + b := NewBuilder() + + b.Register(func(_ *Scheme) error { return nil }) + assert.Len(t, b, 1) +} + +func TestSchemeBuilder_AddToScheme(t *testing.T) { + b := NewBuilder() + + b.Register(func(_ *Scheme) error { return nil }) + + err := b.AddToScheme(New()) + assert.NoError(t, err) +} + +func TestSchemeBuilder_Build(t *testing.T) { + b := NewBuilder() + + b.Register(func(_ *Scheme) error { return nil }) + + s, err := b.Build() + assert.NoError(t, err) + assert.NotNil(t, s) +} diff --git a/pkg/scheme/codec.go b/pkg/scheme/codec.go new file mode 100644 index 00000000..c3cce757 --- /dev/null +++ b/pkg/scheme/codec.go @@ -0,0 +1,30 @@ +package scheme + +import ( + "github.com/pkg/errors" + "github.com/siyul-park/uniflow/internal/encoding" + "github.com/siyul-park/uniflow/pkg/node" +) + +type ( + // Codec is the interface for decoding Spec to node.Node. + Codec interface { + Decode(spec Spec) (node.Node, error) + } + + CodecFunc func(spec Spec) (node.Node, error) +) + +func CodecWithType[T Spec](decode func(spec T) (node.Node, error)) Codec { + return CodecFunc(func(spec Spec) (node.Node, error) { + if spec, ok := spec.(T); !ok { + return nil, errors.WithStack(encoding.ErrUnsupportedValue) + } else { + return decode(spec) + } + }) +} + +func (c CodecFunc) Decode(spec Spec) (node.Node, error) { + return c(spec) +} diff --git a/pkg/scheme/scheme.go b/pkg/scheme/scheme.go new file mode 100644 index 00000000..70caa97c --- /dev/null +++ b/pkg/scheme/scheme.go @@ -0,0 +1,125 @@ +package scheme + +import ( + "reflect" + "sync" + + "github.com/pkg/errors" + "github.com/siyul-park/uniflow/internal/encoding" + "github.com/siyul-park/uniflow/pkg/node" +) + +type ( + // Scheme defines methods for decode Spec. + Scheme struct { + types map[string]reflect.Type + codecs map[string]Codec + mu sync.RWMutex + } +) + +var _ Codec = &Scheme{} + +// New returns a new Scheme. +func New() *Scheme { + return &Scheme{ + types: make(map[string]reflect.Type), + codecs: make(map[string]Codec), + } +} + +// AddKnownType adds a new Type and Spec to the Scheme. +func (s *Scheme) AddKnownType(kind string, spec Spec) { + s.mu.Lock() + defer s.mu.Unlock() + + s.types[kind] = reflect.TypeOf(spec) +} + +// KnownType returns the reflect.Type of the Spec with the given kind. +func (s *Scheme) KnownType(kind string) (reflect.Type, bool) { + s.mu.RLock() + defer s.mu.RUnlock() + + t, ok := s.types[kind] + return t, ok +} + +// AddCodec adds a new Codec to the Scheme. +func (s *Scheme) AddCodec(kind string, codec Codec) { + s.mu.Lock() + defer s.mu.Unlock() + + s.codecs[kind] = codec +} + +// Codec returns Codec with the given kind. +func (s *Scheme) Codec(kind string) (Codec, bool) { + s.mu.RLock() + defer s.mu.RUnlock() + + c, ok := s.codecs[kind] + return c, ok +} + +// New returns a new Spec with the given kind. +func (s *Scheme) New(kind string) (Spec, bool) { + s.mu.RLock() + defer s.mu.RUnlock() + + if t, ok := s.types[kind]; !ok { + return nil, false + } else { + zero := reflect.New(t) + if zero.Elem().Kind() == reflect.Pointer { + zero.Elem().Set(reflect.New(t.Elem())) + } + v, ok := zero.Elem().Interface().(Spec) + return v, ok + } +} + +// Decode decodes the given Spec into a node.Node. +func (s *Scheme) Decode(spec Spec) (node.Node, error) { + s.mu.RLock() + defer s.mu.RUnlock() + + kind := spec.GetKind() + if kind == "" { + if kinds := s.Kinds(spec); len(kinds) > 0 { + kind = kinds[0] + } + } + + if unstructured, ok := spec.(*Unstructured); ok { + if structured, ok := s.New(kind); ok { + if err := unstructured.Unmarshal(structured); err != nil { + return nil, err + } else { + spec = structured + } + } + } + + if codec, ok := s.codecs[kind]; ok { + return codec.Decode(spec) + } + return nil, errors.WithStack(encoding.ErrUnsupportedValue) +} + +// Kinds returns the kinds of the given Spec. +func (s *Scheme) Kinds(spec Spec) []string { + s.mu.RLock() + defer s.mu.RUnlock() + + typ := reflect.TypeOf(spec) + + var kinds []string + for kind, t := range s.types { + if t == typ { + kinds = append(kinds, kind) + } + } + + return kinds +} diff --git a/pkg/scheme/scheme_test.go b/pkg/scheme/scheme_test.go new file mode 100644 index 00000000..f956e555 --- /dev/null +++ b/pkg/scheme/scheme_test.go @@ -0,0 +1,71 @@ +package scheme + +import ( + "reflect" + "testing" + + "github.com/go-faker/faker/v4" + "github.com/siyul-park/uniflow/pkg/node" + "github.com/stretchr/testify/assert" +) + +func TestScheme_KnownType(t *testing.T) { + s := New() + kind := faker.Word() + + s.AddKnownType(kind, &SpecMeta{}) + + typ, ok := s.KnownType(kind) + assert.True(t, ok) + assert.Equal(t, reflect.TypeOf(&SpecMeta{}), typ) +} + +func TestScheme_Codec(t *testing.T) { + s := New() + kind := faker.Word() + + c := CodecFunc(func(spec Spec) (node.Node, error) { + return node.NewOneToOneNode(node.OneToOneNodeConfig{ID: spec.GetID()}), nil + }) + + s.AddCodec(kind, c) + + _, ok := s.Codec(kind) + assert.True(t, ok) +} + +func TestScheme_New(t *testing.T) { + s := New() + kind := faker.Word() + + s.AddKnownType(kind, &SpecMeta{}) + + spec, ok := s.New(kind) + assert.True(t, ok) + assert.IsType(t, spec, &SpecMeta{}) +} + +func TestScheme_Decode(t *testing.T) { + s := New() + kind := faker.Word() + + s.AddKnownType(kind, &SpecMeta{}) + s.AddCodec(kind, CodecFunc(func(spec Spec) (node.Node, error) { + return node.NewOneToOneNode(node.OneToOneNodeConfig{}), nil + })) + + n, err := s.Decode(&SpecMeta{}) + assert.NoError(t, err) + assert.NotNil(t, n) +} + +func TestScheme_Kinds(t *testing.T) { + s := New() + kind := faker.Word() + + s.AddKnownType(kind, &SpecMeta{}) + + kinds := s.Kinds(&SpecMeta{}) + assert.Len(t, kinds, 1) + assert.Equal(t, kind, kinds[0]) +} diff --git a/pkg/scheme/spec.go b/pkg/scheme/spec.go new file mode 100644 index 00000000..69fb59f6 --- /dev/null +++ b/pkg/scheme/spec.go @@ -0,0 +1,93 @@ +package scheme + +import ( + "github.com/oklog/ulid/v2" +) + +type ( + // Spec is a specification that defines how node.Node should be defined and linked. + Spec interface { + // GetID returns the ID. + GetID() ulid.ULID + // SetID set the ID. + SetID(val ulid.ULID) + // GetKind returns the Kind. + GetKind() string + // SetKind set the Kind. + SetKind(val string) + // GetNamespace returns the Namespace. + GetNamespace() string + // SetNamespace set the Namespace. + SetNamespace(val string) + // GetName returns the Name. + GetName() string + // SetName set the Name. + SetName(val string) + // GetLinks returns the Links. + GetLinks() map[string][]PortLocation + // SetLinks set the Links. + SetLinks(val map[string][]PortLocation) + } + + // SpecMeta is metadata that all persisted resources must have, which includes all objects users must create. + SpecMeta struct { + ID ulid.ULID `json:"id,omitempty" yaml:"id,omitempty" map:"id,omitempty"` + Kind string `json:"kind,omitempty" yaml:"kind,omitempty" map:"kind,omitempty"` + Namespace string `json:"namespace,omitempty" yaml:"namespace,omitempty" map:"namespace,omitempty"` + Name string `json:"name,omitempty" yaml:"name,omitempty" map:"name,omitempty"` + Links map[string][]PortLocation `json:"links,omitempty" yaml:"links,omitempty" map:"links,omitempty"` + } + + // PortLocation is the location of a port in the network. + PortLocation struct { + ID ulid.ULID `json:"id,omitempty" yaml:"id,omitempty" map:"id,omitempty"` + Name string `json:"name,omitempty" yaml:"name,omitempty" map:"name,omitempty"` + Port string `json:"port" yaml:"port" map:"port"` + } +) + +var _ Spec = &SpecMeta{} + +const ( + NamespaceDefault = "default" +) + +func (m *SpecMeta) GetID() ulid.ULID { + return m.ID +} + +func (m *SpecMeta) SetID(val ulid.ULID) { + m.ID = val +} + +func (m *SpecMeta) GetKind() string { + return m.Kind +} + +func (m *SpecMeta) SetKind(val string) { + m.Kind = val +} + +func (m *SpecMeta) GetNamespace() string { + return m.Namespace +} + +func (m *SpecMeta) SetNamespace(val string) { + m.Namespace = val +} + +func (m *SpecMeta) GetName() string { + return m.Name +} + +func (m *SpecMeta) SetName(val string) { + m.Name = val +} + +func (m *SpecMeta) GetLinks() map[string][]PortLocation { + return m.Links +} + +func (m *SpecMeta) SetLinks(val map[string][]PortLocation) { + m.Links = val +} diff --git a/pkg/scheme/unstructured.go b/pkg/scheme/unstructured.go new file mode 100644 index 00000000..ce438aba --- /dev/null +++ b/pkg/scheme/unstructured.go @@ -0,0 +1,189 @@ +package scheme + +import ( + "sync" + + "github.com/oklog/ulid/v2" + "github.com/siyul-park/uniflow/internal/util" + "github.com/siyul-park/uniflow/pkg/primitive" +) + +type ( + + // Unstructured is an Spec that is not marshaled for structuring. + Unstructured struct { + doc *primitive.Map + mu sync.RWMutex + } +) + +var _ Spec = &Unstructured{} + +const ( + KeyID = "id" + KeyKind = "kind" + KeyNamespace = "namespace" + KeyName = "name" + KeyLinks = "links" +) + +// NewUnstructured returns a new Unstructured. +func NewUnstructured(doc *primitive.Map) *Unstructured { + if doc == nil { + doc = primitive.NewMap() + } + + u := &Unstructured{doc: doc} + + if v := u.GetID(); !util.IsZero(v) { + u.SetID(v) + } + if v := u.GetLinks(); !util.IsZero(v) { + u.SetLinks(v) + } + + return u +} + +// GetID returns the ID of the Unstructured. +func (u *Unstructured) GetID() ulid.ULID { + var val ulid.ULID + _ = u.Get(KeyID, &val) + return val +} + +// SetID sets the ID of the Unstructured. +func (u *Unstructured) SetID(val ulid.ULID) { + u.Set(KeyID, val) +} + +// GetKind returns the Kind of the Unstructured. +func (u *Unstructured) GetKind() string { + var val string + _ = u.Get(KeyKind, &val) + return val +} + +// SetKind sets the Kind of the Unstructured. +func (u *Unstructured) SetKind(val string) { + u.Set(KeyKind, val) +} + +// GetNamespace returns the Namespace of the Unstructured. +func (u *Unstructured) GetNamespace() string { + var val string + _ = u.Get(KeyNamespace, &val) + return val + +} + +// SetNamespace sets the Namespace of the Unstructured. +func (u *Unstructured) SetNamespace(val string) { + u.Set(KeyNamespace, val) +} + +// GetName returns the Name of the Unstructured. +func (u *Unstructured) GetName() string { + var val string + _ = u.Get(KeyName, &val) + return val + +} + +// SetName sets the Name of the Unstructured. +func (u *Unstructured) SetName(val string) { + u.Set(KeyName, val) +} + +// GetLinks returns the Links of the Unstructured. +func (u *Unstructured) GetLinks() map[string][]PortLocation { + var val map[string][]PortLocation + _ = u.Get(KeyLinks, &val) + return val +} + +// SetLinks sets the Links of the Unstructured. +func (u *Unstructured) SetLinks(val map[string][]PortLocation) { + u.Set(KeyLinks, val) +} + +// Get returns the value of the given key. +func (u *Unstructured) Get(key string, val any) error { + u.mu.RLock() + defer u.mu.RUnlock() + + if v, ok := u.doc.Get(primitive.NewString(key)); ok { + if err := primitive.Unmarshal(v, val); err != nil { + return err + } + } + return nil +} + +// Set sets the val of the given key. +func (u *Unstructured) Set(key string, val any) error { + u.mu.Lock() + defer u.mu.Unlock() + + if v, err := primitive.MarshalBinary(val); err != nil { + return err + } else { + u.doc = u.doc.Set(primitive.NewString(key), v) + } + return nil +} + +// GetOrSet returns the value of the given key. if the value is not exist, sets the val of the given key. +func (u *Unstructured) GetOrSet(key string, val any) error { + u.mu.Lock() + defer u.mu.Unlock() + + if v, ok := u.doc.Get(primitive.NewString(key)); ok { + if err := primitive.Unmarshal(v, val); err != nil { + return err + } + } else if v, err := primitive.MarshalBinary(val); err != nil { + return err + } else { + u.doc = u.doc.Set(primitive.NewString(key), v) + } + return nil +} + +// Doc returns the raw object of the Unstructured. +func (u *Unstructured) Doc() *primitive.Map { + u.mu.RLock() + defer u.mu.RUnlock() + + return u.doc +} + +// Marshall sets the spec as a marshal and raw object to use. +func (u *Unstructured) Marshal(spec Spec) error { + u.mu.RLock() + defer u.mu.RUnlock() + + if spec, ok := spec.(*Unstructured); ok { + u.doc = spec.doc + return nil + } + + if spec, err := primitive.MarshalBinary(spec); err != nil { + return err + } else { + u.doc = spec.(*primitive.Map) + } + return nil +} + +// Unmarshal unmarshal the stored raw object and stores it in spec. +func (u *Unstructured) Unmarshal(spec Spec) error { + u.mu.RLock() + defer u.mu.RUnlock() + + if spec, ok := spec.(*Unstructured); ok { + spec.doc = u.doc + return nil + } + return primitive.Unmarshal(u.doc, spec) +} diff --git a/pkg/scheme/unstructured_test.go b/pkg/scheme/unstructured_test.go new file mode 100644 index 00000000..55c5d29d --- /dev/null +++ b/pkg/scheme/unstructured_test.go @@ -0,0 +1,80 @@ +package scheme + +import ( + "testing" + + "github.com/go-faker/faker/v4" + "github.com/oklog/ulid/v2" + "github.com/stretchr/testify/assert" +) + +func TestUnstructured_GetAndSetID(t *testing.T) { + id := ulid.Make() + + u := NewUnstructured(nil) + + u.SetID(id) + assert.Equal(t, id, u.GetID()) +} + +func TestUnstructured_GetAndSetKind(t *testing.T) { + kind := faker.Word() + + u := NewUnstructured(nil) + + u.SetKind(kind) + assert.Equal(t, kind, u.GetKind()) +} + +func TestUnstructured_GetAndNamespace(t *testing.T) { + namespace := faker.Word() + + u := NewUnstructured(nil) + + u.SetNamespace(namespace) + assert.Equal(t, namespace, u.GetNamespace()) +} + +func TestUnstructured_GetAndLinks(t *testing.T) { + links := map[string][]PortLocation{ + faker.Word(): { + { + ID: ulid.Make(), + Port: faker.Word(), + }, + }, + } + + u := NewUnstructured(nil) + + u.SetLinks(links) + assert.Equal(t, links, u.GetLinks()) +} + +func TestUnstructured_Marshal(t *testing.T) { + u := NewUnstructured(nil) + spec := &SpecMeta{ + ID: ulid.Make(), + Kind: faker.Word(), + } + + err := u.Marshal(spec) + assert.NoError(t, err) + assert.Equal(t, spec.GetID(), u.GetID()) + assert.Equal(t, spec.GetKind(), u.GetKind()) +} + +func TestUnstructured_Unmarshal(t *testing.T) { + u := NewUnstructured(nil) + spec := &SpecMeta{} + + _ = u.Marshal(&SpecMeta{ + ID: ulid.Make(), + Kind: faker.Word(), + }) + + err := u.Unmarshal(spec) + assert.NoError(t, err) + assert.Equal(t, u.GetID(), spec.GetID()) + assert.Equal(t, u.GetKind(), spec.GetKind()) +} diff --git a/pkg/storage/event.go b/pkg/storage/event.go new file mode 100644 index 00000000..37eda30a --- /dev/null +++ b/pkg/storage/event.go @@ -0,0 +1,18 @@ +package storage + +import "github.com/oklog/ulid/v2" + +type ( + // Event is an event that occurs when an scheme.Spec is changed. + Event struct { + OP eventOP + NodeID ulid.ULID + } + eventOP int +) + +const ( + EventInsert eventOP = iota + EventUpdate + EventDelete +) diff --git a/pkg/storage/filter.go b/pkg/storage/filter.go new file mode 100644 index 00000000..ecd0c972 --- /dev/null +++ b/pkg/storage/filter.go @@ -0,0 +1,167 @@ +package storage + +import ( + "github.com/siyul-park/uniflow/pkg/database" + "github.com/siyul-park/uniflow/pkg/primitive" +) + +type ( + // Filter is a filter for find matched primitive. + Filter struct { + OP database.OP + Key string + Value any + } + + filterHelper[T any] struct { + key string + } +) + +func Where[T any](key string) *filterHelper[T] { + return &filterHelper[T]{ + key: key, + } +} + +func (fh *filterHelper[T]) EQ(value T) *Filter { + return &Filter{ + OP: database.EQ, + Key: fh.key, + Value: value, + } +} + +func (fh *filterHelper[T]) NE(value T) *Filter { + return &Filter{ + OP: database.NE, + Key: fh.key, + Value: value, + } +} + +func (fh *filterHelper[T]) LT(value T) *Filter { + return &Filter{ + OP: database.LT, + Key: fh.key, + Value: value, + } +} + +func (fh *filterHelper[T]) LTE(value T) *Filter { + return &Filter{ + OP: database.LTE, + Key: fh.key, + Value: value, + } +} + +func (fh *filterHelper[T]) GT(value T) *Filter { + return &Filter{ + OP: database.GT, + Key: fh.key, + Value: value, + } +} + +func (fh *filterHelper[T]) GTE(value T) *Filter { + return &Filter{ + OP: database.GTE, + Key: fh.key, + Value: value, + } +} + +func (fh *filterHelper[T]) IN(slice ...T) *Filter { + value := make([]any, len(slice)) + for i, e := range slice { + value[i] = e + } + return &Filter{ + OP: database.IN, + Key: fh.key, + Value: value, + } +} + +func (fh *filterHelper[T]) NotIN(slice ...T) *Filter { + value := make([]any, len(slice)) + for i, e := range slice { + value[i] = e + } + return &Filter{ + OP: database.NIN, + Key: fh.key, + Value: value, + } +} + +func (fh *filterHelper[T]) IsNull() *Filter { + return &Filter{ + OP: database.NULL, + Key: fh.key, + } +} + +func (fh *filterHelper[T]) IsNotNull() *Filter { + return &Filter{ + OP: database.NNULL, + Key: fh.key, + } +} + +func (ft *Filter) And(x ...*Filter) *Filter { + var v []*Filter + for _, e := range append([]*Filter{ft}, x...) { + if e != nil { + v = append(v, e) + } + } + + return &Filter{ + OP: database.AND, + Value: v, + } +} + +func (ft *Filter) Or(x ...*Filter) *Filter { + var v []*Filter + for _, e := range append([]*Filter{ft}, x...) { + if e != nil { + v = append(v, e) + } + } + + return &Filter{ + OP: database.OR, + Value: v, + } +} + +func (ft *Filter) Encode() (*database.Filter, error) { + if ft == nil { + return nil, nil + } + if ft.OP == database.AND || ft.OP == database.OR { + var values []*database.Filter + if value, ok := ft.Value.([]*Filter); ok { + for _, v := range value { + if v, err := v.Encode(); err != nil { + return nil, err + } else { + values = append(values, v) + } + } + } + return &database.Filter{OP: database.AND, Value: values}, nil + } + if ft.OP == database.NULL || ft.OP == database.NNULL { + return &database.Filter{OP: ft.OP, Key: ft.Key}, nil + } + + if v, err := primitive.MarshalBinary(ft.Value); err != nil { + return nil, err + } else { + return &database.Filter{OP: ft.OP, Key: ft.Key, Value: v}, nil + } +} diff --git a/pkg/storage/storage.go b/pkg/storage/storage.go new file mode 100644 index 00000000..d1ee8cd3 --- /dev/null +++ b/pkg/storage/storage.go @@ -0,0 +1,331 @@ +package storage + +import ( + "context" + "sync" + + "github.com/oklog/ulid/v2" + "github.com/siyul-park/uniflow/internal/util" + "github.com/siyul-park/uniflow/pkg/database" + "github.com/siyul-park/uniflow/pkg/primitive" + "github.com/siyul-park/uniflow/pkg/scheme" +) + +type ( + // Config is a config for Storage. + Config struct { + Scheme *scheme.Scheme + Database database.Database + } + + // Storage is the storage that stores scheme.Spec. + Storage struct { + scheme *scheme.Scheme + collection database.Collection + mu sync.RWMutex + } +) + +const ( + CollectionNodes = "nodes" +) + +var ( + indexes = []database.IndexModel{ + { + Name: "namespace_name", + Keys: []string{scheme.KeyNamespace, scheme.KeyName}, + Unique: true, + Partial: database.Where(scheme.KeyName).NE(primitive.NewString("")).And(database.Where(scheme.KeyName).IsNotNull()), + }, + } +) + +// New returns a new Storage. +func New(ctx context.Context, config Config) (*Storage, error) { + scheme := config.Scheme + db := config.Database + + collection, err := db.Collection(ctx, CollectionNodes) + if err != nil { + return nil, err + } + + s := &Storage{ + scheme: scheme, + collection: collection, + } + + if exists, err := s.collection.Indexes().List(ctx); err != nil { + return nil, err + } else { + for _, index := range indexes { + index = database.IndexModel{ + Name: index.Name, + Keys: index.Keys, + Unique: index.Unique, + Partial: index.Partial, + } + + var ok bool + for _, i := range exists { + if i.Name == index.Name { + if ok := util.Equal(i, index); !ok { + s.collection.Indexes().Drop(ctx, i.Name) + } + break + } + } + if ok { + continue + } + s.collection.Indexes().Create(ctx, index) + } + } + + return s, nil +} + +// Watch returns Stream to track changes. +func (s *Storage) Watch(ctx context.Context, filter *Filter) (*Stream, error) { + f, err := filter.Encode() + if err != nil { + return nil, err + } + + stream, err := s.collection.Watch(ctx, f) + if err != nil { + return nil, err + } + return NewStream(stream), nil +} + +// InsertOne inserts a single scheme.Spec and return ID. +func (s *Storage) InsertOne(ctx context.Context, spec scheme.Spec) (ulid.ULID, error) { + s.mu.RLock() + defer s.mu.RUnlock() + + unstructured := scheme.NewUnstructured(nil) + + if err := unstructured.Marshal(spec); err != nil { + return ulid.ULID{}, err + } + if unstructured.GetNamespace() == "" { + unstructured.SetNamespace(scheme.NamespaceDefault) + } + if util.IsZero(unstructured.GetID()) { + unstructured.SetID(ulid.Make()) + } + + if err := s.validate(unstructured); err != nil { + return ulid.ULID{}, err + } + + var id ulid.ULID + if pk, err := s.collection.InsertOne(ctx, unstructured.Doc()); err != nil { + return ulid.ULID{}, err + } else if err := primitive.Unmarshal(pk, &id); err != nil { + _, _ = s.collection.DeleteOne(ctx, database.Where(scheme.KeyID).EQ(pk)) + return ulid.ULID{}, err + } else { + return id, nil + } +} + +// InsertMany inserts multiple scheme.Spec and return IDs. +func (s *Storage) InsertMany(ctx context.Context, objs []scheme.Spec) ([]ulid.ULID, error) { + s.mu.RLock() + defer s.mu.RUnlock() + + var docs []*primitive.Map + for _, spec := range objs { + unstructured := scheme.NewUnstructured(nil) + + if err := unstructured.Marshal(spec); err != nil { + return nil, err + } + if unstructured.GetNamespace() == "" { + unstructured.SetNamespace(scheme.NamespaceDefault) + } + if util.IsZero(unstructured.GetID()) { + unstructured.SetID(ulid.Make()) + } + + if err := s.validate(unstructured); err != nil { + return nil, err + } + + docs = append(docs, unstructured.Doc()) + } + + ids := make([]ulid.ULID, 0) + if pks, err := s.collection.InsertMany(ctx, docs); err != nil { + return nil, err + } else if err := primitive.Unmarshal(primitive.NewSlice(pks...), &ids); err != nil { + _, _ = s.collection.DeleteMany(ctx, database.Where(scheme.KeyID).IN(pks...)) + return nil, err + } else { + return ids, nil + } +} + +// UpdateOne updates a single scheme.Spec and returns success or failure. +func (s *Storage) UpdateOne(ctx context.Context, spec scheme.Spec) (bool, error) { + s.mu.RLock() + defer s.mu.RUnlock() + + unstructured := scheme.NewUnstructured(nil) + + if err := unstructured.Marshal(spec); err != nil { + return false, err + } + if unstructured.GetNamespace() == "" { + unstructured.SetNamespace(scheme.NamespaceDefault) + } + if util.IsZero(unstructured.GetID()) { + return false, nil + } + + if err := s.validate(unstructured); err != nil { + return false, err + } + + filter, _ := Where[ulid.ULID](scheme.KeyID).EQ(unstructured.GetID()).Encode() + return s.collection.UpdateOne(ctx, filter, unstructured.Doc()) +} + +// UpdateMany multiple scheme.Spec and return the number of success. +func (s *Storage) UpdateMany(ctx context.Context, objs []scheme.Spec) (int, error) { + s.mu.RLock() + defer s.mu.RUnlock() + + var unstructureds []*scheme.Unstructured + for _, spec := range objs { + unstructured := scheme.NewUnstructured(nil) + + if err := unstructured.Marshal(spec); err != nil { + return 0, err + } + if unstructured.GetNamespace() == "" { + unstructured.SetNamespace(scheme.NamespaceDefault) + } + if util.IsZero(unstructured.GetID()) { + continue + } + + if err := s.validate(unstructured); err != nil { + return 0, err + } + + unstructureds = append(unstructureds, unstructured) + } + + count := 0 + for _, unstructured := range unstructureds { + filter, _ := Where[ulid.ULID](scheme.KeyID).EQ(unstructured.GetID()).Encode() + if ok, err := s.collection.UpdateOne(ctx, filter, unstructured.Doc()); err != nil { + return count, err + } else if ok { + count += 1 + } + } + + return count, nil +} + +// DeleteOne deletes a single scheme.Spec and returns success or failure. +func (s *Storage) DeleteOne(ctx context.Context, filter *Filter) (bool, error) { + s.mu.RLock() + defer s.mu.RUnlock() + + f, err := filter.Encode() + if err != nil { + return false, err + } + + return s.collection.DeleteOne(ctx, f) +} + +// DeleteMany deletes multiple scheme.Spec and returns the number of success. +func (s *Storage) DeleteMany(ctx context.Context, filter *Filter) (int, error) { + s.mu.RLock() + defer s.mu.RUnlock() + + f, err := filter.Encode() + if err != nil { + return 0, err + } + + return s.collection.DeleteMany(ctx, f) +} + +// FindOne return the single scheme.Spec which is matched by the filter. +func (s *Storage) FindOne(ctx context.Context, filter *Filter, options ...*database.FindOptions) (scheme.Spec, error) { + s.mu.RLock() + defer s.mu.RUnlock() + + f, err := filter.Encode() + if err != nil { + return nil, err + } + + if doc, err := s.collection.FindOne(ctx, f, options...); err != nil { + return nil, err + } else if doc != nil { + unstructured := scheme.NewUnstructured(doc) + if spec, ok := s.scheme.New(unstructured.GetKind()); !ok { + return unstructured, nil + } else if err := unstructured.Unmarshal(spec); err != nil { + return nil, err + } else { + return spec, nil + } + } + + return nil, nil +} + +// FindMany returns multiple scheme.Spec which is matched by the filter. +func (s *Storage) FindMany(ctx context.Context, filter *Filter, options ...*database.FindOptions) ([]scheme.Spec, error) { + s.mu.RLock() + defer s.mu.RUnlock() + + f, err := filter.Encode() + if err != nil { + return nil, err + } + + var specs []scheme.Spec + if docs, err := s.collection.FindMany(ctx, f, options...); err != nil { + return nil, err + } else { + for _, doc := range docs { + if doc == nil { + continue + } + unstructured := scheme.NewUnstructured(doc) + if spec, ok := s.scheme.New(unstructured.GetKind()); !ok { + specs = append(specs, unstructured) + } else if err := unstructured.Unmarshal(spec); err != nil { + return nil, err + } else { + specs = append(specs, spec) + } + } + + return specs, nil + } +} + +func (s *Storage) validate(unstructured *scheme.Unstructured) error { + if spec, ok := s.scheme.New(unstructured.GetKind()); ok { + if err := unstructured.Unmarshal(spec); err != nil { + return err + } else if n, err := s.scheme.Decode(spec); err != nil { + return err + } else { + _ = n.Close() + } + } + return nil +} diff --git a/pkg/storage/storage_test.go b/pkg/storage/storage_test.go new file mode 100644 index 00000000..0cc5a508 --- /dev/null +++ b/pkg/storage/storage_test.go @@ -0,0 +1,349 @@ +package storage + +import ( + "context" + "testing" + + "github.com/go-faker/faker/v4" + "github.com/oklog/ulid/v2" + "github.com/siyul-park/uniflow/pkg/database/memdb" + "github.com/siyul-park/uniflow/pkg/node" + "github.com/siyul-park/uniflow/pkg/scheme" + "github.com/stretchr/testify/assert" +) + +func TestStorage_Watch(t *testing.T) { + kind := faker.Word() + + s := scheme.New() + s.AddKnownType(kind, &scheme.SpecMeta{}) + s.AddCodec(kind, scheme.CodecFunc(func(spec scheme.Spec) (node.Node, error) { + return node.NewOneToOneNode(node.OneToOneNodeConfig{ + ID: spec.GetID(), + }), nil + })) + + st, _ := New(context.Background(), Config{ + Scheme: s, + Database: memdb.New(faker.Word()), + }) + + spec := &scheme.SpecMeta{ + ID: ulid.Make(), + Kind: kind, + Namespace: scheme.NamespaceDefault, + } + + stream, err := st.Watch(context.Background(), nil) + assert.NoError(t, err) + defer func() { _ = stream.Close() }() + + go func() { + for { + event, ok := <-stream.Next() + if ok { + assert.NotNil(t, event.NodeID) + } else { + return + } + } + }() + + _, _ = st.InsertOne(context.Background(), spec) + _, _ = st.UpdateOne(context.Background(), spec) + _, _ = st.DeleteOne(context.Background(), Where[ulid.ULID](scheme.KeyID).EQ(spec.GetID())) +} + +func TestStorage_InsertOne(t *testing.T) { + kind := faker.Word() + + s := scheme.New() + s.AddKnownType(kind, &scheme.SpecMeta{}) + s.AddCodec(kind, scheme.CodecFunc(func(spec scheme.Spec) (node.Node, error) { + return node.NewOneToOneNode(node.OneToOneNodeConfig{ + ID: spec.GetID(), + }), nil + })) + + st, _ := New(context.Background(), Config{ + Scheme: s, + Database: memdb.New(faker.Word()), + }) + + spec := &scheme.SpecMeta{ + ID: ulid.Make(), + Kind: kind, + } + + id, err := st.InsertOne(context.Background(), spec) + assert.NoError(t, err) + assert.Equal(t, spec.ID, id) +} + +func TestStorage_InsertMany(t *testing.T) { + kind := faker.Word() + + s := scheme.New() + s.AddKnownType(kind, &scheme.SpecMeta{}) + s.AddCodec(kind, scheme.CodecFunc(func(spec scheme.Spec) (node.Node, error) { + return node.NewOneToOneNode(node.OneToOneNodeConfig{ + ID: spec.GetID(), + }), nil + })) + + st, _ := New(context.Background(), Config{ + Scheme: s, + Database: memdb.New(faker.Word()), + }) + + spec := []scheme.Spec{ + &scheme.SpecMeta{ + ID: ulid.Make(), + Kind: kind, + Namespace: scheme.NamespaceDefault, + }, + &scheme.SpecMeta{ + ID: ulid.Make(), + Kind: kind, + Namespace: scheme.NamespaceDefault, + }, + } + + ids, err := st.InsertMany(context.Background(), spec) + assert.NoError(t, err) + assert.Len(t, ids, len(spec)) + for i, spec := range spec { + assert.Equal(t, spec.GetID(), ids[i]) + } +} + +func TestStorage_UpdateOne(t *testing.T) { + kind := faker.Word() + + s := scheme.New() + s.AddKnownType(kind, &scheme.SpecMeta{}) + s.AddCodec(kind, scheme.CodecFunc(func(spec scheme.Spec) (node.Node, error) { + return node.NewOneToOneNode(node.OneToOneNodeConfig{ + ID: spec.GetID(), + }), nil + })) + + st, _ := New(context.Background(), Config{ + Scheme: s, + Database: memdb.New(faker.Word()), + }) + + spec := &scheme.SpecMeta{ + ID: ulid.Make(), + Kind: kind, + } + + ok, err := st.UpdateOne(context.Background(), spec) + assert.NoError(t, err) + assert.False(t, ok) + + _, _ = st.InsertOne(context.Background(), spec) + + ok, err = st.UpdateOne(context.Background(), spec) + assert.NoError(t, err) + assert.True(t, ok) +} + +func TestStorage_UpdateMany(t *testing.T) { + kind := faker.Word() + + s := scheme.New() + s.AddKnownType(kind, &scheme.SpecMeta{}) + s.AddCodec(kind, scheme.CodecFunc(func(spec scheme.Spec) (node.Node, error) { + return node.NewOneToOneNode(node.OneToOneNodeConfig{ + ID: spec.GetID(), + }), nil + })) + + st, _ := New(context.Background(), Config{ + Scheme: s, + Database: memdb.New(faker.Word()), + }) + + spec := []scheme.Spec{ + &scheme.SpecMeta{ + ID: ulid.Make(), + Kind: kind, + Namespace: scheme.NamespaceDefault, + }, + &scheme.SpecMeta{ + ID: ulid.Make(), + Kind: kind, + Namespace: scheme.NamespaceDefault, + }, + } + + count, err := st.UpdateMany(context.Background(), spec) + assert.NoError(t, err) + assert.Equal(t, 0, count) + + _, _ = st.InsertMany(context.Background(), spec) + + count, err = st.UpdateMany(context.Background(), spec) + assert.NoError(t, err) + assert.Equal(t, len(spec), count) +} + +func TestStorage_DeleteOne(t *testing.T) { + kind := faker.Word() + + s := scheme.New() + s.AddKnownType(kind, &scheme.SpecMeta{}) + s.AddCodec(kind, scheme.CodecFunc(func(spec scheme.Spec) (node.Node, error) { + return node.NewOneToOneNode(node.OneToOneNodeConfig{ + ID: spec.GetID(), + }), nil + })) + + st, _ := New(context.Background(), Config{ + Scheme: s, + Database: memdb.New(faker.Word()), + }) + + spec := &scheme.SpecMeta{ + ID: ulid.Make(), + Kind: kind, + Namespace: scheme.NamespaceDefault, + } + + ok, err := st.DeleteOne(context.Background(), Where[ulid.ULID](scheme.KeyID).EQ(spec.GetID())) + assert.NoError(t, err) + assert.False(t, ok) + + _, _ = st.InsertOne(context.Background(), spec) + + ok, err = st.DeleteOne(context.Background(), Where[ulid.ULID](scheme.KeyID).EQ(spec.GetID())) + assert.NoError(t, err) + assert.True(t, ok) +} + +func TestStorage_DeleteMany(t *testing.T) { + kind := faker.Word() + + s := scheme.New() + s.AddKnownType(kind, &scheme.SpecMeta{}) + s.AddCodec(kind, scheme.CodecFunc(func(spec scheme.Spec) (node.Node, error) { + return node.NewOneToOneNode(node.OneToOneNodeConfig{ + ID: spec.GetID(), + }), nil + })) + + st, _ := New(context.Background(), Config{ + Scheme: s, + Database: memdb.New(faker.Word()), + }) + + spec := &scheme.SpecMeta{ + ID: ulid.Make(), + Kind: kind, + Namespace: scheme.NamespaceDefault, + } + + count, err := st.DeleteMany(context.Background(), Where[ulid.ULID](scheme.KeyID).EQ(spec.GetID())) + assert.NoError(t, err) + assert.Equal(t, 0, count) + + _, _ = st.InsertOne(context.Background(), spec) + + count, err = st.DeleteMany(context.Background(), Where[ulid.ULID](scheme.KeyID).EQ(spec.GetID())) + assert.NoError(t, err) + assert.Equal(t, 1, count) +} + +func TestStorage_FindOne(t *testing.T) { + t.Run("id", func(t *testing.T) { + kind := faker.Word() + + s := scheme.New() + s.AddKnownType(kind, &scheme.SpecMeta{}) + s.AddCodec(kind, scheme.CodecFunc(func(spec scheme.Spec) (node.Node, error) { + return node.NewOneToOneNode(node.OneToOneNodeConfig{ + ID: spec.GetID(), + }), nil + })) + + st, _ := New(context.Background(), Config{ + Scheme: s, + Database: memdb.New(faker.Word()), + }) + + spec := &scheme.SpecMeta{ + ID: ulid.Make(), + Kind: kind, + Namespace: scheme.NamespaceDefault, + } + + _, _ = st.InsertOne(context.Background(), spec) + + def, err := st.FindOne(context.Background(), Where[ulid.ULID](scheme.KeyID).EQ(spec.GetID())) + assert.NoError(t, err) + assert.NotNil(t, def) + assert.Equal(t, spec.GetID(), def.GetID()) + }) + + t.Run("namespace, name", func(t *testing.T) { + kind := faker.Word() + + s := scheme.New() + s.AddKnownType(kind, &scheme.SpecMeta{}) + s.AddCodec(kind, scheme.CodecFunc(func(spec scheme.Spec) (node.Node, error) { + return node.NewOneToOneNode(node.OneToOneNodeConfig{ + ID: spec.GetID(), + }), nil + })) + + st, _ := New(context.Background(), Config{ + Scheme: s, + Database: memdb.New(faker.Word()), + }) + + spec := &scheme.SpecMeta{ + ID: ulid.Make(), + Kind: kind, + Namespace: scheme.NamespaceDefault, + Name: faker.Word(), + } + + _, _ = st.InsertOne(context.Background(), spec) + + def, err := st.FindOne(context.Background(), Where[string](scheme.KeyNamespace).EQ(spec.GetNamespace()).And(Where[string](scheme.KeyName).EQ(spec.GetName()))) + assert.NoError(t, err) + assert.NotNil(t, def) + assert.Equal(t, spec.GetID(), def.GetID()) + }) +} + +func TestStorage_FindMany(t *testing.T) { + kind := faker.Word() + + s := scheme.New() + s.AddKnownType(kind, &scheme.SpecMeta{}) + s.AddCodec(kind, scheme.CodecFunc(func(spec scheme.Spec) (node.Node, error) { + return node.NewOneToOneNode(node.OneToOneNodeConfig{ + ID: spec.GetID(), + }), nil + })) + + st, _ := New(context.Background(), Config{ + Scheme: s, + Database: memdb.New(faker.Word()), + }) + + spec := &scheme.SpecMeta{ + ID: ulid.Make(), + Kind: kind, + Namespace: scheme.NamespaceDefault, + } + + _, _ = st.InsertOne(context.Background(), spec) + + defs, err := st.FindMany(context.Background(), Where[ulid.ULID](scheme.KeyID).EQ(spec.GetID())) + assert.NoError(t, err) + assert.Len(t, defs, 1) + assert.Equal(t, spec.GetID(), defs[0].GetID()) +} diff --git a/pkg/storage/stream.go b/pkg/storage/stream.go new file mode 100644 index 00000000..75dcf480 --- /dev/null +++ b/pkg/storage/stream.go @@ -0,0 +1,83 @@ +package storage + +import ( + "github.com/oklog/ulid/v2" + "github.com/siyul-park/uniflow/pkg/database" + "github.com/siyul-park/uniflow/pkg/primitive" +) + +type ( + // Stream is a stream to track scheme.Spec is changed. + Stream struct { + stream database.Stream + channel chan Event + done chan struct{} + } +) + +// NewStream returns a new Stream. +func NewStream(stream database.Stream) *Stream { + s := &Stream{ + stream: stream, + channel: make(chan Event), + done: make(chan struct{}), + } + + go func() { + defer func() { close(s.channel) }() + + for { + select { + case <-s.done: + return + case <-s.stream.Done(): + _ = s.Close() + return + case e := <-s.stream.Next(): + var id ulid.ULID + if err := primitive.Unmarshal(e.DocumentID, &id); err != nil { + continue + } + var op eventOP + if e.OP == database.EventInsert { + op = EventInsert + } else if e.OP == database.EventUpdate { + op = EventUpdate + } else if e.OP == database.EventDelete { + op = EventDelete + } + + select { + case <-s.done: + return + case s.channel <- Event{OP: op, NodeID: id}: + } + } + } + }() + + return s +} + +// Next returns a channel that is received Event. +func (s *Stream) Next() <-chan Event { + return s.channel +} + +// Done returns a channel that is closed when the Stream is closed. +func (s *Stream) Done() <-chan struct{} { + return s.done +} + +// Close closes the Stream. +func (s *Stream) Close() error { + select { + case <-s.done: + return nil + default: + } + + close(s.done) + + return s.stream.Close() +} diff --git a/pkg/storage/stream_test.go b/pkg/storage/stream_test.go new file mode 100644 index 00000000..4003890a --- /dev/null +++ b/pkg/storage/stream_test.go @@ -0,0 +1,34 @@ +package storage + +import ( + "context" + "testing" + "time" + + "github.com/oklog/ulid/v2" + "github.com/siyul-park/uniflow/pkg/database" + "github.com/siyul-park/uniflow/pkg/database/memdb" + "github.com/siyul-park/uniflow/pkg/primitive" + "github.com/stretchr/testify/assert" +) + +func TestStream_Next(t *testing.T) { + rawStream := memdb.NewStream() + + stream := NewStream(rawStream) + defer func() { _ = stream.Close() }() + event := database.Event{OP: database.EventInsert, DocumentID: primitive.NewBinary(ulid.Make().Bytes())} + + rawStream.Emit(event) + + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + + select { + case evt, ok := <-stream.Next(): + assert.True(t, ok) + assert.NotZero(t, evt.NodeID) + case <-ctx.Done(): + assert.Fail(t, "timeout") + } +} diff --git a/pkg/symbol/postloadhook.go b/pkg/symbol/postloadhook.go new file mode 100644 index 00000000..707be9f9 --- /dev/null +++ b/pkg/symbol/postloadhook.go @@ -0,0 +1,18 @@ +package symbol + +import "github.com/siyul-park/uniflow/pkg/node" + +type ( + // PostLoadHook is a hook that is called after a node is loaded. + PostLoadHook interface { + PostLoad(n node.Node) error + } + + PostLoadHookFunc func(n node.Node) error +) + +var _ PostLoadHook = PostLoadHookFunc(func(n node.Node) error { return nil }) + +func (f PostLoadHookFunc) PostLoad(n node.Node) error { + return f(n) +} diff --git a/pkg/symbol/postunloadhook.go b/pkg/symbol/postunloadhook.go new file mode 100644 index 00000000..bf5ccc23 --- /dev/null +++ b/pkg/symbol/postunloadhook.go @@ -0,0 +1,18 @@ +package symbol + +import "github.com/siyul-park/uniflow/pkg/node" + +type ( + // PostUnloadHook is a hook that is called after a node is unloaded. + PostUnloadHook interface { + PostUnload(n node.Node) error + } + + PostUnloadHookFunc func(n node.Node) error +) + +var _ PostUnloadHook = PostUnloadHookFunc(func(n node.Node) error { return nil }) + +func (f PostUnloadHookFunc) PostUnload(n node.Node) error { + return f(n) +} diff --git a/pkg/symbol/preloadhook.go b/pkg/symbol/preloadhook.go new file mode 100644 index 00000000..015e2500 --- /dev/null +++ b/pkg/symbol/preloadhook.go @@ -0,0 +1,18 @@ +package symbol + +import "github.com/siyul-park/uniflow/pkg/node" + +type ( + // PreLoadHook is a hook that is called before a node.Node is loaded. + PreLoadHook interface { + PreLoad(n node.Node) error + } + + PreLoadHookFunc func(n node.Node) error +) + +var _ PreLoadHook = PreLoadHookFunc(func(n node.Node) error { return nil }) + +func (f PreLoadHookFunc) PreLoad(n node.Node) error { + return f(n) +} diff --git a/pkg/symbol/preunloadhook.go b/pkg/symbol/preunloadhook.go new file mode 100644 index 00000000..df07a152 --- /dev/null +++ b/pkg/symbol/preunloadhook.go @@ -0,0 +1,18 @@ +package symbol + +import "github.com/siyul-park/uniflow/pkg/node" + +type ( + // PreUnloadHook is a hook that is called before a node.Node is unloaded. + PreUnloadHook interface { + PreUnload(n node.Node) error + } + + PreUnloadHookFunc func(n node.Node) error +) + +var _ PreUnloadHook = PreUnloadHookFunc(func(n node.Node) error { return nil }) + +func (f PreUnloadHookFunc) PreUnload(n node.Node) error { + return f(n) +} diff --git a/pkg/symbol/table.go b/pkg/symbol/table.go new file mode 100644 index 00000000..2888a9ec --- /dev/null +++ b/pkg/symbol/table.go @@ -0,0 +1,155 @@ +package symbol + +import ( + "sync" + + "github.com/oklog/ulid/v2" + "github.com/siyul-park/uniflow/pkg/node" +) + +type ( + // TableOptions is a options for Table. + TableOptions struct { + PreLoadHooks []PreLoadHook + PostLoadHooks []PostLoadHook + PreUnloadHooks []PreUnloadHook + PostUnloadHooks []PostUnloadHook + } + + // Table is the storage that manages Symbol. + Table struct { + data map[ulid.ULID]node.Node + preLoadHooks []PreLoadHook + postLoadHooks []PostLoadHook + preUnloadHooks []PreUnloadHook + postUnloadHooks []PostUnloadHook + mu sync.RWMutex + } +) + +// NewTable returns a new SymbolTable +func NewTable(opts ...TableOptions) *Table { + var preLoadHooks []PreLoadHook + var postLoadHooks []PostLoadHook + var preUnloadHooks []PreUnloadHook + var postUnloadHooks []PostUnloadHook + + for _, opt := range opts { + preLoadHooks = append(preLoadHooks, opt.PreLoadHooks...) + postLoadHooks = append(postLoadHooks, opt.PostLoadHooks...) + preUnloadHooks = append(preUnloadHooks, opt.PreUnloadHooks...) + postUnloadHooks = append(postUnloadHooks, opt.PostUnloadHooks...) + } + + return &Table{ + data: make(map[ulid.ULID]node.Node), + preLoadHooks: preLoadHooks, + postLoadHooks: postLoadHooks, + preUnloadHooks: preUnloadHooks, + postUnloadHooks: postUnloadHooks, + } +} + +// Insert inserts a node.Node. +func (t *Table) Insert(n node.Node) (node.Node, error) { + t.mu.Lock() + defer t.mu.Unlock() + + if origin, ok := t.data[n.ID()]; ok { + if err := t.preUnload(origin); err != nil { + return nil, err + } + if err := origin.Close(); err != nil { + return nil, err + } + if err := t.postUnload(origin); err != nil { + return nil, err + } + } + + if err := t.preLoad(n); err != nil { + return nil, err + } + t.data[n.ID()] = n + if err := t.postLoad(n); err != nil { + return nil, err + } + + return n, nil +} + +// Free removes a Symbol. +func (t *Table) Free(id ulid.ULID) (bool, error) { + t.mu.Lock() + defer t.mu.Unlock() + + if n, ok := t.data[id]; ok { + if err := n.Close(); err != nil { + return false, err + } + delete(t.data, id) + return true, nil + } + + return false, nil +} + +// Lookup returns a node.Node. +func (t *Table) Lookup(id ulid.ULID) (node.Node, bool) { + t.mu.RLock() + defer t.mu.RUnlock() + + n, ok := t.data[id] + return n, ok +} + +// Close closes the SymbolTable. +func (t *Table) Close() error { + t.mu.Lock() + defer t.mu.Unlock() + + for id, n := range t.data { + if err := n.Close(); err != nil { + return err + } + delete(t.data, id) + } + + return nil +} + +func (t *Table) preLoad(n node.Node) error { + for _, hook := range t.preLoadHooks { + if err := hook.PreLoad(n); err != nil { + return err + } + } + return nil +} + +func (t *Table) postLoad(n node.Node) error { + for _, hook := range t.postLoadHooks { + if err := hook.PostLoad(n); err != nil { + return err + } + } + return nil +} + +func (t *Table) preUnload(n node.Node) error { + for _, hook := range t.preUnloadHooks { + if err := hook.PreUnload(n); err != nil { + return err + } + } + return nil +} + +func (t *Table) postUnload(n node.Node) error { + for _, hook := range t.postUnloadHooks { + if err := hook.PostUnload(n); err != nil { + return err + } + } + return nil +} diff --git a/pkg/symbol/table_test.go b/pkg/symbol/table_test.go new file mode 100644 index 00000000..13da21d6 --- /dev/null +++ b/pkg/symbol/table_test.go @@ -0,0 +1,96 @@ +package symbol + +import ( + "testing" + + "github.com/oklog/ulid/v2" + "github.com/siyul-park/uniflow/pkg/node" + "github.com/stretchr/testify/assert" +) + +func TestTable_Insert(t *testing.T) { + t.Run("not exists", func(t *testing.T) { + tb := NewTable() + defer func() { _ = tb.Close() }() + + n := node.NewOneToOneNode(node.OneToOneNodeConfig{}) + + s, err := tb.Insert(n) + assert.NoError(t, err) + assert.NotNil(t, s) + assert.Equal(t, n.ID(), s.ID()) + }) + + t.Run("exists", func(t *testing.T) { + tb := NewTable() + defer func() { _ = tb.Close() }() + + id := ulid.Make() + + n1 := node.NewOneToOneNode(node.OneToOneNodeConfig{ID: id}) + n2 := node.NewOneToOneNode(node.OneToOneNodeConfig{ID: id}) + + s1, err := tb.Insert(n1) + assert.NoError(t, err) + assert.NotNil(t, s1) + assert.Equal(t, n1.ID(), s1.ID()) + + s2, err := tb.Insert(n1) + assert.NoError(t, err) + assert.NotNil(t, s2) + assert.Equal(t, n2.ID(), s2.ID()) + }) +} + +func TestTable_Free(t *testing.T) { + t.Run("not exists", func(t *testing.T) { + tb := NewTable() + defer func() { _ = tb.Close() }() + + n := node.NewOneToOneNode(node.OneToOneNodeConfig{}) + + ok, err := tb.Free(n.ID()) + assert.NoError(t, err) + assert.False(t, ok) + }) + + t.Run("exists", func(t *testing.T) { + tb := NewTable() + defer func() { _ = tb.Close() }() + + n := node.NewOneToOneNode(node.OneToOneNodeConfig{}) + + tb.Insert(n) + + ok, err := tb.Free(n.ID()) + assert.NoError(t, err) + assert.True(t, ok) + }) +} + +func TestTable_Lookup(t *testing.T) { + t.Run("not exists", func(t *testing.T) { + tb := NewTable() + defer func() { _ = tb.Close() }() + + n := node.NewOneToOneNode(node.OneToOneNodeConfig{}) + + s, ok := tb.Lookup(n.ID()) + assert.False(t, ok) + assert.Nil(t, s) + }) + + t.Run("exists", func(t *testing.T) { + tb := NewTable() + defer func() { _ = tb.Close() }() + + n := node.NewOneToOneNode(node.OneToOneNodeConfig{}) + + tb.Insert(n) + + s, ok := tb.Lookup(n.ID()) + assert.True(t, ok) + assert.NotNil(t, s) + assert.Equal(t, n.ID(), s.ID()) + }) +}