diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml new file mode 100644 index 0000000000..4b86d7197f --- /dev/null +++ b/.github/ISSUE_TEMPLATE/config.yml @@ -0,0 +1,8 @@ +blank_issues_enabled: false +contact_links: + - name: Getting Help on IPFS + url: https://ipfs.io/help + about: All information about how and where to get help on IPFS. + - name: IPFS Official Forum + url: https://discuss.ipfs.io + about: Please post general questions, support requests, and discussions here. diff --git a/blockservice/.github/ISSUE_TEMPLATE/open_an_issue.md b/.github/ISSUE_TEMPLATE/open_an_issue.md similarity index 100% rename from blockservice/.github/ISSUE_TEMPLATE/open_an_issue.md rename to .github/ISSUE_TEMPLATE/open_an_issue.md diff --git a/.github/config.yml b/.github/config.yml new file mode 100644 index 0000000000..ed26646a0f --- /dev/null +++ b/.github/config.yml @@ -0,0 +1,68 @@ +# Configuration for welcome - https://github.com/behaviorbot/welcome + +# Configuration for new-issue-welcome - https://github.com/behaviorbot/new-issue-welcome +# Comment to be posted to on first time issues +newIssueWelcomeComment: > + Thank you for submitting your first issue to this repository! A maintainer + will be here shortly to triage and review. + + In the meantime, please double-check that you have provided all the + necessary information to make this process easy! Any information that can + help save additional round trips is useful! We currently aim to give + initial feedback within **two business days**. If this does not happen, feel + free to leave a comment. + + Please keep an eye on how this issue will be labeled, as labels give an + overview of priorities, assignments and additional actions requested by the + maintainers: + + - "Priority" labels will show how urgent this is for the team. + - "Status" labels will show if this is ready to be worked on, blocked, or in progress. + - "Need" labels will indicate if additional input or analysis is required. + + Finally, remember to use https://discuss.ipfs.io if you just need general + support. + +# Configuration for new-pr-welcome - https://github.com/behaviorbot/new-pr-welcome +# Comment to be posted to on PRs from first time contributors in your repository +newPRWelcomeComment: > + Thank you for submitting this PR! + + A maintainer will be here shortly to review it. + + We are super grateful, but we are also overloaded! Help us by making sure + that: + + * The context for this PR is clear, with relevant discussion, decisions + and stakeholders linked/mentioned. + + * Your contribution itself is clear (code comments, self-review for the + rest) and in its best form. Follow the [code contribution + guidelines](https://github.com/ipfs/community/blob/master/CONTRIBUTING.md#code-contribution-guidelines) + if they apply. + + Getting other community members to do a review would be great help too on + complex PRs (you can ask in the chats/forums). If you are unsure about + something, just leave us a comment. + + Next steps: + + * A maintainer will triage and assign priority to this PR, commenting on + any missing things and potentially assigning a reviewer for high + priority items. + + * The PR gets reviews, discussed and approvals as needed. + + * The PR is merged by maintainers when it has been approved and comments addressed. + + We currently aim to provide initial feedback/triaging within **two business + days**. Please keep an eye on any labelling actions, as these will indicate + priorities and status of your contribution. + + We are very grateful for your contribution! + + +# Configuration for first-pr-merge - https://github.com/behaviorbot/first-pr-merge +# Comment to be posted to on pull requests merged by a first time user +# Currently disabled +#firstPRMergeComment: "" diff --git a/.github/workflows/automerge.yml b/.github/workflows/automerge.yml new file mode 100644 index 0000000000..3833fc2291 --- /dev/null +++ b/.github/workflows/automerge.yml @@ -0,0 +1,11 @@ +# File managed by web3-bot. DO NOT EDIT. +# See https://github.com/protocol/.github/ for details. + +name: Automerge +on: [ pull_request ] + +jobs: + automerge: + uses: protocol/.github/.github/workflows/automerge.yml@master + with: + job: 'automerge' diff --git a/.github/workflows/gateway-sharness.yml b/.github/workflows/gateway-sharness.yml new file mode 100644 index 0000000000..88901fc172 --- /dev/null +++ b/.github/workflows/gateway-sharness.yml @@ -0,0 +1,51 @@ +name: Gateway Sharness + +on: + workflow_dispatch: + pull_request: + paths: ['gateway/**'] + push: + branches: ['main'] + paths: ['gateway/**'] + +jobs: + sharness: + runs-on: ubuntu-latest + defaults: + run: + shell: bash + steps: + - name: Setup Go + uses: actions/setup-go@v3 + with: + go-version: 1.19.1 + - name: Checkout go-libipfs + uses: actions/checkout@v3 + with: + path: go-libipfs + - name: Checkout Kubo + uses: actions/checkout@v3 + with: + repository: ipfs/kubo + path: kubo + - name: Install Missing Tools + run: sudo apt install -y socat net-tools fish libxml2-utils + - name: Restore Go Cache + uses: protocol/cache-go-action@v1 + with: + name: ${{ github.job }} + - name: Replace go-libipfs in Kubo go.mod + run: | + go mod edit -replace=github.com/ipfs/go-libipfs=../go-libipfs + go mod tidy + cat go.mod + working-directory: kubo + - name: Install sharness dependencies + run: make test_sharness_deps + working-directory: kubo + - name: Run Kubo Sharness Tests + run: find . -maxdepth 1 -name "*gateway*.sh" -print0 | xargs -0 -I {} bash -c "echo {}; {}" + working-directory: kubo/test/sharness + - name: Run Kubo CLI Tests + run: go test -v -run=Gateway . + working-directory: kubo/test/cli diff --git a/.github/workflows/go-check.yml b/.github/workflows/go-check.yml new file mode 100644 index 0000000000..cc65ce68a9 --- /dev/null +++ b/.github/workflows/go-check.yml @@ -0,0 +1,67 @@ +# File managed by web3-bot. DO NOT EDIT. +# See https://github.com/protocol/.github/ for details. + +on: [push, pull_request] +name: Go Checks + +jobs: + unit: + runs-on: ubuntu-latest + name: All + steps: + - uses: actions/checkout@v3 + with: + submodules: recursive + - id: config + uses: protocol/.github/.github/actions/read-config@master + - uses: actions/setup-go@v3 + with: + go-version: 1.20.x + - name: Run repo-specific setup + uses: ./.github/actions/go-check-setup + if: hashFiles('./.github/actions/go-check-setup') != '' + - name: Install staticcheck + run: go install honnef.co/go/tools/cmd/staticcheck@4970552d932f48b71485287748246cf3237cebdf # 2023.1 (v0.4.0) + - name: Check that go.mod is tidy + uses: protocol/multiple-go-modules@v1.2 + with: + run: | + go mod tidy + if [[ -n $(git ls-files --other --exclude-standard --directory -- go.sum) ]]; then + echo "go.sum was added by go mod tidy" + exit 1 + fi + git diff --exit-code -- go.sum go.mod + - name: gofmt + if: success() || failure() # run this step even if the previous one failed + run: | + out=$(gofmt -s -l .) + if [[ -n "$out" ]]; then + echo $out | awk '{print "::error file=" $0 ",line=0,col=0::File is not gofmt-ed."}' + exit 1 + fi + - name: go vet + if: success() || failure() # run this step even if the previous one failed + uses: protocol/multiple-go-modules@v1.2 + with: + run: go vet ./... + - name: staticcheck + if: success() || failure() # run this step even if the previous one failed + uses: protocol/multiple-go-modules@v1.2 + with: + run: | + set -o pipefail + staticcheck ./... | sed -e 's@\(.*\)\.go@./\1.go@g' + - name: go generate + uses: protocol/multiple-go-modules@v1.2 + if: (success() || failure()) && fromJSON(steps.config.outputs.json).gogenerate == true + with: + run: | + git clean -fd # make sure there aren't untracked files / directories + go generate -x ./... + # check if go generate modified or added any files + if ! $(git add . && git diff-index HEAD --exit-code --quiet); then + echo "go generated caused changes to the repository:" + git status --short + exit 1 + fi diff --git a/.github/workflows/go-test.yml b/.github/workflows/go-test.yml new file mode 100644 index 0000000000..c5cb3efc7a --- /dev/null +++ b/.github/workflows/go-test.yml @@ -0,0 +1,76 @@ +# File managed by web3-bot. DO NOT EDIT. +# See https://github.com/protocol/.github/ for details. + +on: [push, pull_request] +name: Go Test + +jobs: + unit: + strategy: + fail-fast: false + matrix: + os: [ "ubuntu", "windows", "macos" ] + go: ["1.19.x","1.20.x"] + env: + COVERAGES: "" + runs-on: ${{ fromJSON(vars[format('UCI_GO_TEST_RUNNER_{0}', matrix.os)] || format('"{0}-latest"', matrix.os)) }} + name: ${{ matrix.os }} (go ${{ matrix.go }}) + steps: + - uses: actions/checkout@v3 + with: + submodules: recursive + - id: config + uses: protocol/.github/.github/actions/read-config@master + - uses: actions/setup-go@v3 + with: + go-version: ${{ matrix.go }} + - name: Go information + run: | + go version + go env + - name: Use msys2 on windows + if: matrix.os == 'windows' + shell: bash + # The executable for msys2 is also called bash.cmd + # https://github.com/actions/virtual-environments/blob/main/images/win/Windows2019-Readme.md#shells + # If we prepend its location to the PATH + # subsequent 'shell: bash' steps will use msys2 instead of gitbash + run: echo "C:/msys64/usr/bin" >> $GITHUB_PATH + - name: Run repo-specific setup + uses: ./.github/actions/go-test-setup + if: hashFiles('./.github/actions/go-test-setup') != '' + - name: Run tests + if: contains(fromJSON(steps.config.outputs.json).skipOSes, matrix.os) == false + uses: protocol/multiple-go-modules@v1.2 + with: + # Use -coverpkg=./..., so that we include cross-package coverage. + # If package ./A imports ./B, and ./A's tests also cover ./B, + # this means ./B's coverage will be significantly higher than 0%. + run: go test -v -shuffle=on -coverprofile=module-coverage.txt -coverpkg=./... ./... + - name: Run tests (32 bit) + # can't run 32 bit tests on OSX. + if: matrix.os != 'macos' && + fromJSON(steps.config.outputs.json).skip32bit != true && + contains(fromJSON(steps.config.outputs.json).skipOSes, matrix.os) == false + uses: protocol/multiple-go-modules@v1.2 + env: + GOARCH: 386 + with: + run: | + export "PATH=$PATH_386:$PATH" + go test -v -shuffle=on ./... + - name: Run tests with race detector + # speed things up. Windows and OSX VMs are slow + if: matrix.os == 'ubuntu' && + contains(fromJSON(steps.config.outputs.json).skipOSes, matrix.os) == false + uses: protocol/multiple-go-modules@v1.2 + with: + run: go test -v -race ./... + - name: Collect coverage files + shell: bash + run: echo "COVERAGES=$(find . -type f -name 'module-coverage.txt' | tr -s '\n' ',' | sed 's/,$//')" >> $GITHUB_ENV + - name: Upload coverage to Codecov + uses: codecov/codecov-action@d9f34f8cd5cb3b3eb79b3e4b5dae3a16df499a70 # v3.1.1 + with: + files: '${{ env.COVERAGES }}' + env_vars: OS=${{ matrix.os }}, GO=${{ matrix.go }} diff --git a/.github/workflows/release-check.yml b/.github/workflows/release-check.yml new file mode 100644 index 0000000000..e2408e37c4 --- /dev/null +++ b/.github/workflows/release-check.yml @@ -0,0 +1,13 @@ +# File managed by web3-bot. DO NOT EDIT. +# See https://github.com/protocol/.github/ for details. + +name: Release Checker +on: + pull_request_target: + paths: [ 'version.json' ] + +jobs: + release-check: + uses: protocol/.github/.github/workflows/release-check.yml@master + with: + go-version: 1.20.x diff --git a/.github/workflows/releaser.yml b/.github/workflows/releaser.yml new file mode 100644 index 0000000000..cdccbf873d --- /dev/null +++ b/.github/workflows/releaser.yml @@ -0,0 +1,11 @@ +# File managed by web3-bot. DO NOT EDIT. +# See https://github.com/protocol/.github/ for details. + +name: Releaser +on: + push: + paths: [ 'version.json' ] + +jobs: + releaser: + uses: protocol/.github/.github/workflows/releaser.yml@master diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml new file mode 100644 index 0000000000..6f6d895d19 --- /dev/null +++ b/.github/workflows/stale.yml @@ -0,0 +1,26 @@ +name: Close and mark stale issue + +on: + schedule: + - cron: '0 0 * * *' + +jobs: + stale: + + runs-on: ubuntu-latest + permissions: + issues: write + pull-requests: write + + steps: + - uses: actions/stale@v3 + with: + repo-token: ${{ secrets.GITHUB_TOKEN }} + stale-issue-message: 'Oops, seems like we needed more information for this issue, please comment with more details or this issue will be closed in 7 days.' + close-issue-message: 'This issue was closed because it is missing author input.' + stale-issue-label: 'kind/stale' + any-of-labels: 'need/author-input' + exempt-issue-labels: 'need/triage,need/community-input,need/maintainer-input,need/maintainers-input,need/analysis,status/blocked,status/in-progress,status/ready,status/deferred,status/inactive' + days-before-issue-stale: 6 + days-before-issue-close: 7 + enable-statistics: true diff --git a/.github/workflows/tagpush.yml b/.github/workflows/tagpush.yml new file mode 100644 index 0000000000..d84996187a --- /dev/null +++ b/.github/workflows/tagpush.yml @@ -0,0 +1,12 @@ +# File managed by web3-bot. DO NOT EDIT. +# See https://github.com/protocol/.github/ for details. + +name: Tag Push Checker +on: + push: + tags: + - v* + +jobs: + releaser: + uses: protocol/.github/.github/workflows/tagpush.yml@master diff --git a/.github/workflows/test-examples.yml b/.github/workflows/test-examples.yml new file mode 100644 index 0000000000..bf91f726b1 --- /dev/null +++ b/.github/workflows/test-examples.yml @@ -0,0 +1,54 @@ +on: [push, pull_request] +name: Go Test Examples + +jobs: + unit: + defaults: + run: + working-directory: examples + strategy: + fail-fast: false + matrix: + os: [ "ubuntu", "windows", "macos" ] + go: [ "1.19.x", "1.20.x" ] + env: + COVERAGES: "" + runs-on: ${{ format('{0}-latest', matrix.os) }} + name: ${{ matrix.os }} (go ${{ matrix.go }}) + steps: + - uses: actions/checkout@v3 + with: + submodules: recursive + - uses: actions/setup-go@v3 + with: + go-version: ${{ matrix.go }} + - name: Go information + run: | + go version + go env + - name: Use msys2 on windows + if: ${{ matrix.os == 'windows' }} + shell: bash + # The executable for msys2 is also called bash.cmd + # https://github.com/actions/virtual-environments/blob/main/images/win/Windows2019-Readme.md#shells + # If we prepend its location to the PATH + # subsequent 'shell: bash' steps will use msys2 instead of gitbash + run: echo "C:/msys64/usr/bin" >> $GITHUB_PATH + - name: Run tests + uses: protocol/multiple-go-modules@v1.2 + with: + run: go test -v -shuffle=on ./... + - name: Run tests (32 bit) + if: ${{ matrix.os != 'macos' }} # can't run 32 bit tests on OSX. + uses: protocol/multiple-go-modules@v1.2 + env: + GOARCH: 386 + with: + run: | + export "PATH=${{ env.PATH_386 }}:$PATH" + go test -v -shuffle=on ./... + - name: Run tests with race detector + if: ${{ matrix.os == 'ubuntu' }} # speed things up. Windows and OSX VMs are slow + uses: protocol/multiple-go-modules@v1.2 + with: + run: go test -v -race ./... diff --git a/LICENSE.md b/LICENSE.md new file mode 100644 index 0000000000..2fa16a1537 --- /dev/null +++ b/LICENSE.md @@ -0,0 +1,229 @@ +The contents of this repository are Copyright (c) corresponding authors and +contributors, licensed under the `Permissive License Stack` meaning either of: + +- Apache-2.0 Software License: https://www.apache.org/licenses/LICENSE-2.0 + ([...4tr2kfsq](https://dweb.link/ipfs/bafkreiankqxazcae4onkp436wag2lj3ccso4nawxqkkfckd6cg4tr2kfsq)) + +- MIT Software License: https://opensource.org/licenses/MIT + ([...vljevcba](https://dweb.link/ipfs/bafkreiepofszg4gfe2gzuhojmksgemsub2h4uy2gewdnr35kswvljevcba)) + +You may not use the contents of this repository except in compliance +with one of the listed Licenses. For an extended clarification of the +intent behind the choice of Licensing please refer to +https://protocol.ai/blog/announcing-the-permissive-license-stack/ + +Unless required by applicable law or agreed to in writing, software +distributed under the terms listed in this notice is distributed on +an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, +either express or implied. See each License for the specific language +governing permissions and limitations under that License. + + +`SPDX-License-Identifier: Apache-2.0 OR MIT` + +Verbatim copies of both licenses are included below: + +
Apache-2.0 Software License + +``` + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS +``` +
+ +
MIT Software License + +``` +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. +``` +
diff --git a/README.md b/README.md new file mode 100644 index 0000000000..b5554ebc7f --- /dev/null +++ b/README.md @@ -0,0 +1,109 @@ +

+Boxo 🍌 +
+Boxo logo +
+

+

A library for building IPFS applications and implementations.

+ +
+ +[![Go Test](https://github.com/ipfs/boxo/actions/workflows/go-test.yml/badge.svg)](https://github.com/ipfs/boxo/actions/workflows/go-test.yml) +[![Go Docs](https://img.shields.io/badge/godoc-reference-blue.svg)](https://pkg.go.dev/github.com/ipfs/boxo) +[![codecov](https://codecov.io/gh/ipfs/boxo/branch/main/graph/badge.svg?token=9eG7d8fbCB)](https://codecov.io/gh/ipfs/boxo) + + + +- [About](#about) + - [Motivation](#motivation) +- [What kind of components does Boxo have?](#what-kind-of-components-does-boxo-have) + - [Does Boxo == IPFS?](#does-boxo--ipfs) + - [Is everything related to IPFS in the Go ecosystem in this repo?](#is-everything-related-to-ipfs-in-the-go-ecosystem-in-this-repo) +- [Getting started](#getting-started) +- [Should I add my IPFS component to Boxo?](#should-i-add-my-ipfs-component-to-boxo) +- [Help](#help) +- [Governance and Access](#governance-and-access) +- [Release Process](#release-process) +- [Related Items](#related-items) +- [License](#license) + + + +## About + +Boxo is a component library for building IPFS applications and implementations in Go. + +Some scenarios in which you may find Boxo helpful: + +* You are building an application that interacts with the IPFS network +* You are building an IPFS implementation +* You want to reuse some components of IPFS such as its Kademlia DHT, Bitswap, data encoding, etc. +* You want to experiment with IPFS + +Boxo powers [Kubo](https://github.com/ipfs/kubo), which is [the most popular IPFS implementation](https://github.com/protocol/network-measurements/tree/master/reports), +so its code has been battle-tested on the IPFS network for years, and is well-understood by the community. + +### Motivation +**TL;DR** The goal of this repo is to help people build things. Previously users struggled to find existing useful code or to figure out how to use what they did find. We observed many running Kubo and using its HTTP RPC API. This repo aims to do better. We're taking the libraries that many were already effectively relying on in production and making them more easily discoverable and usable. + +The maintainers primarily aim to help people trying to build with IPFS in Go that were previously either giving up or relying on the [Kubo HTTP RPC API](https://docs.ipfs.tech/reference/kubo/rpc/). Some of these people will end up being better served by IPFS tooling in other languages (e.g., Javascript, Rust, Java, Python), but for those who are either looking to write in Go or to leverage the set of IPFS tooling we already have in Go we’d like to make their lives easier. + +We’d also like to make life easier on ourselves as the maintainers by reducing the maintenance burden that comes from being the owners on [many repos](https://github.com/ipfs/kubo/issues/8543) and then use that time to contribute more to the community in the form of easier to use libraries, better implementations, improved protocols, new protocols, etc. + +Boxo is not exhaustive nor comprehensive--there are plenty of useful IPFS protocols, specs, libraries, etc. that are not in Boxo. The goal of Boxo is to provide cohesive and well-maintained components for common IPFS use cases. + +## What kind of components does Boxo have? + +Boxo includes high-quality components useful for interacting with IPFS protocols, public and private IPFS networks, and content-addressed data, such as: + +- Content routing (DHT, delegated content routing, providing) +- Data transfer (gateways, Bitswap, incremental verification) +- Naming and mutability (name resolution, IPNS) +- Interacting with public and private IPFS networks +- Working with content-addressed data + +Boxo aims to provide a cohesive interface into these components. Note that not all of the underlying components necessarily reside in this respository. + +### Does Boxo == IPFS? +No. This repo houses some IPFS functionality written in Go that has been useful in practice, and is maintained by a group that has long term commitments to the IPFS project + +### Is everything related to IPFS in the Go ecosystem in this repo? + +No. Not everything related to IPFS is intended to be in Boxo. View it as a starter toolbox (potentially among multiple). If you’d like to build an IPFS implementation with Go, here are some tools you might want that are maintained by a group that has long term commitments to the IPFS project. There are certainly repos that others maintainer that aren't included here (e.g., ipfs/go-car) which are still useful to IPFS implementations. It's expected and fine for new IPFS functionality to be developed that won't be part of Boxo. + +## Getting started +See [examples](./examples/README.md). + +## Should I add my IPFS component to Boxo? +We happily accept external contributions! However, Boxo maintains a high quality bar, so code accepted into Boxo must meet some minimum maintenance criteria: + +* Actively maintained + * Must be actively used by, or will be included in software that is actively used by, a significant number of users or production systems. Code that is not actively used cannot be properly maintained. + * Must have multiple engineers who are willing and able to maintain the relevant code in Boxo for a long period of time. + * If either of these changes, Boxo maintainers will consider removing the component from Boxo. +* Adequately tested + * At least with unit tests + * Ideally also including integration tests with other components +* Adequately documented + * Godocs at minimum + * Complex components should have their own doc.go or README.md describing the component, its use cases, tradeoffs, design rationale, etc. +* If the maintainers are not Boxo maintainers, then the component must include a CODEOWNERS file with at least two code owners who can commit to reviewing PRs + +If you have some experimental component that you think would benefit the IPFS community, we suggest you build the component in your own repository until it's clear that there's community demand for it, and then open an issue/PR in this repository to discuss including it in Boxo. + +## Help + +If you have questions, feel free to open an issue. You can also find the Boxo maintainers in [Filecoin Slack](https://filecoin.io/slack/) at #Boxo-maintainers. (If you would like to engage via IPFS Discord or ipfs.io Matrix, please drop into the #ipfs-implementers channel/room or file an issue, and we'll get bridging from #Boxo-maintainers to these other chat platforms.) + +## Governance and Access +See [CODEOWNERS](./docs/CODEOWNERS) for the current maintainers list. Governance for graduating additional maintainers hasn't been established. Repo permissions are all managed through [ipfs/github-mgmt](https://github.com/ipfs/github-mgmt). + +## Release Process +To be documented: https://github.com/ipfs/boxo/issues/170 + +## Related Items +* [Initial proposal for "Consolidate IPFS Repositories" that spawned this project](https://github.com/ipfs/kubo/issues/8543) + +## License + +[SPDX-License-Identifier: Apache-2.0 OR MIT](LICENSE.md) diff --git a/bitswap/.gitignore b/bitswap/.gitignore new file mode 100644 index 0000000000..a9a5aecf42 --- /dev/null +++ b/bitswap/.gitignore @@ -0,0 +1 @@ +tmp diff --git a/bitswap/README.md b/bitswap/README.md new file mode 100644 index 0000000000..c8fd819e86 --- /dev/null +++ b/bitswap/README.md @@ -0,0 +1,120 @@ +go-bitswap +================== + +> An implementation of the bitswap protocol in go! + +## Table of Contents + +- [Background](#background) +- [Usage](#usage) +- [Implementation](#implementation) +- [Contribute](#contribute) +- [License](#license) + +## Background + +Bitswap is the data trading module for ipfs. It manages requesting and sending +blocks to and from other peers in the network. Bitswap has two main jobs: +- to acquire blocks requested by the client from the network +- to judiciously send blocks in its possession to other peers who want them + +Bitswap is a message based protocol, as opposed to request-response. All messages +contain wantlists or blocks. + +A node sends a wantlist to tell peers which blocks it wants. When a node receives +a wantlist it should check which blocks it has from the wantlist, and consider +sending the matching blocks to the requestor. + +When a node receives blocks that it asked for, the node should send out a +notification called a 'Cancel' to tell its peers that the node no longer +wants those blocks. + +`go-bitswap` provides an implementation of the Bitswap protocol in go. + +[Learn more about how Bitswap works](./client/docs/how-bitswap-works.md) + +## Usage + +### Initializing a Bitswap Exchange + +```golang +import ( + "context" + bitswap "github.com/ipfs/boxo/bitswap" + bsnet "github.com/ipfs/boxo/bitswap/network" + blockstore "github.com/ipfs/go-ipfs-blockstore" + "github.com/libp2p/go-libp2p-core/routing" + "github.com/libp2p/go-libp2p-core/host" +) + +var ctx context.Context +var host host.Host +var router routing.ContentRouting +var bstore blockstore.Blockstore + +network := bsnet.NewFromIpfsHost(host, router) +exchange := bitswap.New(ctx, network, bstore) +``` + +Parameter Notes: + +1. `ctx` is just the parent context for all of Bitswap +2. `network` is a network abstraction provided to Bitswap on top of libp2p & content routing. +3. `bstore` is an IPFS blockstore + +### Get A Block Synchronously + +```golang +var c cid.Cid +var ctx context.Context +var exchange bitswap.Bitswap + +block, err := exchange.GetBlock(ctx, c) +``` + +Parameter Notes: + +1. `ctx` is the context for this request, which can be cancelled to cancel the request +2. `c` is the content ID of the block you're requesting + +### Get Several Blocks Asynchronously + +```golang +var cids []cid.Cid +var ctx context.Context +var exchange bitswap.Bitswap + +blockChannel, err := exchange.GetBlocks(ctx, cids) +``` + +Parameter Notes: + +1. `ctx` is the context for this request, which can be cancelled to cancel the request +2. `cids` is a slice of content IDs for the blocks you're requesting + +### Get Related Blocks Faster With Sessions + +In IPFS, content blocks are often connected to each other through a MerkleDAG. If you know ahead of time that block requests are related, Bitswap can make several optimizations internally in how it requests those blocks in order to get them faster. Bitswap provides a mechanism called a Bitswap Session to manage a series of block requests as part of a single higher level operation. You should initialize a Bitswap Session any time you intend to make a series of block requests that are related -- and whose responses are likely to come from the same peers. + +```golang +var ctx context.Context +var cids []cids.cid +var exchange bitswap.Bitswap + +session := exchange.NewSession(ctx) +blocksChannel, err := session.GetBlocks(ctx, cids) +// later +var relatedCids []cids.cid +relatedBlocksChannel, err := session.GetBlocks(ctx, relatedCids) +``` + +Note that `NewSession` returns an interface with `GetBlock` and `GetBlocks` methods that have the same signature as the overall Bitswap exchange. + +### Tell bitswap a new block was added to the local datastore + +```golang +var blk blocks.Block +var exchange bitswap.Bitswap + +err := exchange.HasBlock(blk) +``` diff --git a/bitswap/benchmarks_test.go b/bitswap/benchmarks_test.go new file mode 100644 index 0000000000..5db94c2ca8 --- /dev/null +++ b/bitswap/benchmarks_test.go @@ -0,0 +1,679 @@ +package bitswap_test + +import ( + "context" + "encoding/json" + "fmt" + "math" + "math/rand" + "os" + "strconv" + "sync" + "testing" + "time" + + "github.com/ipfs/boxo/bitswap/internal/testutil" + blocks "github.com/ipfs/boxo/blocks" + protocol "github.com/libp2p/go-libp2p/core/protocol" + + "github.com/ipfs/boxo/bitswap" + bsnet "github.com/ipfs/boxo/bitswap/network" + testinstance "github.com/ipfs/boxo/bitswap/testinstance" + tn "github.com/ipfs/boxo/bitswap/testnet" + mockrouting "github.com/ipfs/boxo/routing/mock" + cid "github.com/ipfs/go-cid" + delay "github.com/ipfs/go-ipfs-delay" +) + +type fetchFunc func(b *testing.B, bs *bitswap.Bitswap, ks []cid.Cid) + +type distFunc func(b *testing.B, provs []testinstance.Instance, blocks []blocks.Block) + +type runStats struct { + DupsRcvd uint64 + BlksRcvd uint64 + MsgSent uint64 + MsgRecd uint64 + Time time.Duration + Name string +} + +var benchmarkLog []runStats + +type bench struct { + name string + nodeCount int + blockCount int + distFn distFunc + fetchFn fetchFunc +} + +var benches = []bench{ + // Fetch from two seed nodes that both have all 100 blocks + // - request one at a time, in series + {"3Nodes-AllToAll-OneAtATime", 3, 100, allToAll, oneAtATime}, + // - request all 100 with a single GetBlocks() call + {"3Nodes-AllToAll-BigBatch", 3, 100, allToAll, batchFetchAll}, + + // Fetch from two seed nodes, one at a time, where: + // - node A has blocks 0 - 74 + // - node B has blocks 25 - 99 + {"3Nodes-Overlap1-OneAtATime", 3, 100, overlap1, oneAtATime}, + + // Fetch from two seed nodes, where: + // - node A has even blocks + // - node B has odd blocks + // - both nodes have every third block + + // - request one at a time, in series + {"3Nodes-Overlap3-OneAtATime", 3, 100, overlap2, oneAtATime}, + // - request 10 at a time, in series + {"3Nodes-Overlap3-BatchBy10", 3, 100, overlap2, batchFetchBy10}, + // - request all 100 in parallel as individual GetBlock() calls + {"3Nodes-Overlap3-AllConcurrent", 3, 100, overlap2, fetchAllConcurrent}, + // - request all 100 with a single GetBlocks() call + {"3Nodes-Overlap3-BigBatch", 3, 100, overlap2, batchFetchAll}, + // - request 1, then 10, then 89 blocks (similar to how IPFS would fetch a file) + {"3Nodes-Overlap3-UnixfsFetch", 3, 100, overlap2, unixfsFileFetch}, + + // Fetch from nine seed nodes, all nodes have all blocks + // - request one at a time, in series + {"10Nodes-AllToAll-OneAtATime", 10, 100, allToAll, oneAtATime}, + // - request 10 at a time, in series + {"10Nodes-AllToAll-BatchFetchBy10", 10, 100, allToAll, batchFetchBy10}, + // - request all 100 with a single GetBlocks() call + {"10Nodes-AllToAll-BigBatch", 10, 100, allToAll, batchFetchAll}, + // - request all 100 in parallel as individual GetBlock() calls + {"10Nodes-AllToAll-AllConcurrent", 10, 100, allToAll, fetchAllConcurrent}, + // - request 1, then 10, then 89 blocks (similar to how IPFS would fetch a file) + {"10Nodes-AllToAll-UnixfsFetch", 10, 100, allToAll, unixfsFileFetch}, + // - follow a typical IPFS request pattern for 1000 blocks + {"10Nodes-AllToAll-UnixfsFetchLarge", 10, 1000, allToAll, unixfsFileFetchLarge}, + + // Fetch from nine seed nodes, blocks are distributed randomly across all nodes (no dups) + // - request one at a time, in series + {"10Nodes-OnePeerPerBlock-OneAtATime", 10, 100, onePeerPerBlock, oneAtATime}, + // - request all 100 with a single GetBlocks() call + {"10Nodes-OnePeerPerBlock-BigBatch", 10, 100, onePeerPerBlock, batchFetchAll}, + // - request 1, then 10, then 89 blocks (similar to how IPFS would fetch a file) + {"10Nodes-OnePeerPerBlock-UnixfsFetch", 10, 100, onePeerPerBlock, unixfsFileFetch}, + + // Fetch from 199 seed nodes, all nodes have all blocks, fetch all 20 blocks with a single GetBlocks() call + {"200Nodes-AllToAll-BigBatch", 200, 20, allToAll, batchFetchAll}, +} + +func BenchmarkFixedDelay(b *testing.B) { + benchmarkLog = nil + fixedDelay := delay.Fixed(10 * time.Millisecond) + bstoreLatency := time.Duration(0) + + for _, bch := range benches { + b.Run(bch.name, func(b *testing.B) { + subtestDistributeAndFetch(b, bch.nodeCount, bch.blockCount, fixedDelay, bstoreLatency, bch.distFn, bch.fetchFn) + }) + } + + out, _ := json.MarshalIndent(benchmarkLog, "", " ") + _ = os.WriteFile("tmp/benchmark.json", out, 0666) + printResults(benchmarkLog) +} + +type mixedBench struct { + bench + fetcherCount int // number of nodes that fetch data + oldSeedCount int // number of seed nodes running old version of Bitswap +} + +var mixedBenches = []mixedBench{ + {bench{"3Nodes-Overlap3-OneAtATime", 3, 10, overlap2, oneAtATime}, 1, 2}, + {bench{"3Nodes-AllToAll-OneAtATime", 3, 10, allToAll, oneAtATime}, 1, 2}, + {bench{"3Nodes-Overlap3-AllConcurrent", 3, 10, overlap2, fetchAllConcurrent}, 1, 2}, + // mixedBench{bench{"3Nodes-Overlap3-UnixfsFetch", 3, 100, overlap2, unixfsFileFetch}, 1, 2}, +} + +func BenchmarkFetchFromOldBitswap(b *testing.B) { + benchmarkLog = nil + fixedDelay := delay.Fixed(10 * time.Millisecond) + bstoreLatency := time.Duration(0) + + for _, bch := range mixedBenches { + b.Run(bch.name, func(b *testing.B) { + fetcherCount := bch.fetcherCount + oldSeedCount := bch.oldSeedCount + newSeedCount := bch.nodeCount - (fetcherCount + oldSeedCount) + + net := tn.VirtualNetwork(mockrouting.NewServer(), fixedDelay) + + // Simulate an older Bitswap node (old protocol ID) that doesn't + // send DONT_HAVE responses + oldProtocol := []protocol.ID{bsnet.ProtocolBitswapOneOne} + oldNetOpts := []bsnet.NetOpt{bsnet.SupportedProtocols(oldProtocol)} + oldBsOpts := []bitswap.Option{bitswap.SetSendDontHaves(false)} + oldNodeGenerator := testinstance.NewTestInstanceGenerator(net, oldNetOpts, oldBsOpts) + + // Regular new Bitswap node + newNodeGenerator := testinstance.NewTestInstanceGenerator(net, nil, nil) + var instances []testinstance.Instance + + // Create new nodes (fetchers + seeds) + for i := 0; i < fetcherCount+newSeedCount; i++ { + inst := newNodeGenerator.Next() + instances = append(instances, inst) + } + // Create old nodes (just seeds) + for i := 0; i < oldSeedCount; i++ { + inst := oldNodeGenerator.Next() + instances = append(instances, inst) + } + // Connect all the nodes together + testinstance.ConnectInstances(instances) + + // Generate blocks, with a smaller root block + rootBlock := testutil.GenerateBlocksOfSize(1, rootBlockSize) + blocks := testutil.GenerateBlocksOfSize(bch.blockCount, stdBlockSize) + blocks[0] = rootBlock[0] + + // Run the distribution + runDistributionMulti(b, instances[:fetcherCount], instances[fetcherCount:], blocks, bstoreLatency, bch.distFn, bch.fetchFn) + + newNodeGenerator.Close() + oldNodeGenerator.Close() + }) + } + + out, _ := json.MarshalIndent(benchmarkLog, "", " ") + _ = os.WriteFile("tmp/benchmark.json", out, 0666) + printResults(benchmarkLog) +} + +const datacenterSpeed = 5 * time.Millisecond +const fastSpeed = 60 * time.Millisecond +const mediumSpeed = 200 * time.Millisecond +const slowSpeed = 800 * time.Millisecond +const superSlowSpeed = 4000 * time.Millisecond +const datacenterDistribution = 3 * time.Millisecond +const distribution = 20 * time.Millisecond +const datacenterBandwidth = 125000000.0 +const datacenterBandwidthDeviation = 3000000.0 +const fastBandwidth = 1250000.0 +const fastBandwidthDeviation = 300000.0 +const mediumBandwidth = 500000.0 +const mediumBandwidthDeviation = 80000.0 +const slowBandwidth = 100000.0 +const slowBandwidthDeviation = 16500.0 +const rootBlockSize = 800 +const stdBlockSize = 8000 +const largeBlockSize = int64(256 * 1024) + +func BenchmarkRealWorld(b *testing.B) { + benchmarkLog = nil + benchmarkSeed, err := strconv.ParseInt(os.Getenv("BENCHMARK_SEED"), 10, 64) + var randomGen *rand.Rand = nil + if err == nil { + randomGen = rand.New(rand.NewSource(benchmarkSeed)) + } + + fastNetworkDelayGenerator := tn.InternetLatencyDelayGenerator( + mediumSpeed-fastSpeed, slowSpeed-fastSpeed, + 0.0, 0.0, distribution, randomGen) + fastNetworkDelay := delay.Delay(fastSpeed, fastNetworkDelayGenerator) + fastBandwidthGenerator := tn.VariableRateLimitGenerator(fastBandwidth, fastBandwidthDeviation, randomGen) + averageNetworkDelayGenerator := tn.InternetLatencyDelayGenerator( + mediumSpeed-fastSpeed, slowSpeed-fastSpeed, + 0.3, 0.3, distribution, randomGen) + averageNetworkDelay := delay.Delay(fastSpeed, averageNetworkDelayGenerator) + averageBandwidthGenerator := tn.VariableRateLimitGenerator(mediumBandwidth, mediumBandwidthDeviation, randomGen) + slowNetworkDelayGenerator := tn.InternetLatencyDelayGenerator( + mediumSpeed-fastSpeed, superSlowSpeed-fastSpeed, + 0.3, 0.3, distribution, randomGen) + slowNetworkDelay := delay.Delay(fastSpeed, slowNetworkDelayGenerator) + slowBandwidthGenerator := tn.VariableRateLimitGenerator(slowBandwidth, slowBandwidthDeviation, randomGen) + bstoreLatency := time.Duration(0) + + b.Run("200Nodes-AllToAll-BigBatch-FastNetwork", func(b *testing.B) { + subtestDistributeAndFetchRateLimited(b, 300, 200, fastNetworkDelay, fastBandwidthGenerator, stdBlockSize, bstoreLatency, allToAll, batchFetchAll) + }) + b.Run("200Nodes-AllToAll-BigBatch-AverageVariableSpeedNetwork", func(b *testing.B) { + subtestDistributeAndFetchRateLimited(b, 300, 200, averageNetworkDelay, averageBandwidthGenerator, stdBlockSize, bstoreLatency, allToAll, batchFetchAll) + }) + b.Run("200Nodes-AllToAll-BigBatch-SlowVariableSpeedNetwork", func(b *testing.B) { + subtestDistributeAndFetchRateLimited(b, 300, 200, slowNetworkDelay, slowBandwidthGenerator, stdBlockSize, bstoreLatency, allToAll, batchFetchAll) + }) + out, _ := json.MarshalIndent(benchmarkLog, "", " ") + _ = os.WriteFile("tmp/rw-benchmark.json", out, 0666) + printResults(benchmarkLog) +} + +func BenchmarkDatacenter(b *testing.B) { + benchmarkLog = nil + benchmarkSeed, err := strconv.ParseInt(os.Getenv("BENCHMARK_SEED"), 10, 64) + var randomGen *rand.Rand = nil + if err == nil { + randomGen = rand.New(rand.NewSource(benchmarkSeed)) + } + + datacenterNetworkDelayGenerator := tn.InternetLatencyDelayGenerator( + fastSpeed-datacenterSpeed, (fastSpeed-datacenterSpeed)/2, + 0.0, 0.0, datacenterDistribution, randomGen) + datacenterNetworkDelay := delay.Delay(datacenterSpeed, datacenterNetworkDelayGenerator) + datacenterBandwidthGenerator := tn.VariableRateLimitGenerator(datacenterBandwidth, datacenterBandwidthDeviation, randomGen) + bstoreLatency := time.Millisecond * 25 + + b.Run("3Nodes-Overlap3-UnixfsFetch", func(b *testing.B) { + subtestDistributeAndFetchRateLimited(b, 3, 100, datacenterNetworkDelay, datacenterBandwidthGenerator, largeBlockSize, bstoreLatency, allToAll, unixfsFileFetch) + }) + out, _ := json.MarshalIndent(benchmarkLog, "", " ") + _ = os.WriteFile("tmp/rb-benchmark.json", out, 0666) + printResults(benchmarkLog) +} + +func BenchmarkDatacenterMultiLeechMultiSeed(b *testing.B) { + benchmarkLog = nil + benchmarkSeed, err := strconv.ParseInt(os.Getenv("BENCHMARK_SEED"), 10, 64) + var randomGen *rand.Rand = nil + if err == nil { + randomGen = rand.New(rand.NewSource(benchmarkSeed)) + } + + datacenterNetworkDelayGenerator := tn.InternetLatencyDelayGenerator( + fastSpeed-datacenterSpeed, (fastSpeed-datacenterSpeed)/2, + 0.0, 0.0, datacenterDistribution, randomGen) + datacenterNetworkDelay := delay.Delay(datacenterSpeed, datacenterNetworkDelayGenerator) + datacenterBandwidthGenerator := tn.VariableRateLimitGenerator(datacenterBandwidth, datacenterBandwidthDeviation, randomGen) + bstoreLatency := time.Millisecond * 25 + + b.Run("3Leech3Seed-AllToAll-UnixfsFetch", func(b *testing.B) { + d := datacenterNetworkDelay + rateLimitGenerator := datacenterBandwidthGenerator + blockSize := largeBlockSize + df := allToAll + ff := unixfsFileFetchLarge + numnodes := 6 + numblks := 1000 + + for i := 0; i < b.N; i++ { + net := tn.RateLimitedVirtualNetwork(mockrouting.NewServer(), d, rateLimitGenerator) + + ig := testinstance.NewTestInstanceGenerator(net, nil, nil) + defer ig.Close() + + instances := ig.Instances(numnodes) + blocks := testutil.GenerateBlocksOfSize(numblks, blockSize) + runDistributionMulti(b, instances[:3], instances[3:], blocks, bstoreLatency, df, ff) + } + }) + + out, _ := json.MarshalIndent(benchmarkLog, "", " ") + _ = os.WriteFile("tmp/rb-benchmark.json", out, 0666) + printResults(benchmarkLog) +} + +func subtestDistributeAndFetch(b *testing.B, numnodes, numblks int, d delay.D, bstoreLatency time.Duration, df distFunc, ff fetchFunc) { + for i := 0; i < b.N; i++ { + net := tn.VirtualNetwork(mockrouting.NewServer(), d) + + ig := testinstance.NewTestInstanceGenerator(net, nil, nil) + + instances := ig.Instances(numnodes) + rootBlock := testutil.GenerateBlocksOfSize(1, rootBlockSize) + blocks := testutil.GenerateBlocksOfSize(numblks, stdBlockSize) + blocks[0] = rootBlock[0] + runDistribution(b, instances, blocks, bstoreLatency, df, ff) + ig.Close() + } +} + +func subtestDistributeAndFetchRateLimited(b *testing.B, numnodes, numblks int, d delay.D, rateLimitGenerator tn.RateLimitGenerator, blockSize int64, bstoreLatency time.Duration, df distFunc, ff fetchFunc) { + for i := 0; i < b.N; i++ { + net := tn.RateLimitedVirtualNetwork(mockrouting.NewServer(), d, rateLimitGenerator) + + ig := testinstance.NewTestInstanceGenerator(net, nil, nil) + defer ig.Close() + + instances := ig.Instances(numnodes) + rootBlock := testutil.GenerateBlocksOfSize(1, rootBlockSize) + blocks := testutil.GenerateBlocksOfSize(numblks, blockSize) + blocks[0] = rootBlock[0] + runDistribution(b, instances, blocks, bstoreLatency, df, ff) + } +} + +func runDistributionMulti(b *testing.B, fetchers []testinstance.Instance, seeds []testinstance.Instance, blocks []blocks.Block, bstoreLatency time.Duration, df distFunc, ff fetchFunc) { + // Distribute blocks to seed nodes + df(b, seeds, blocks) + + // Set the blockstore latency on seed nodes + if bstoreLatency > 0 { + for _, i := range seeds { + i.SetBlockstoreLatency(bstoreLatency) + } + } + + // Fetch blocks (from seed nodes to leech nodes) + var ks []cid.Cid + for _, blk := range blocks { + ks = append(ks, blk.Cid()) + } + + start := time.Now() + var wg sync.WaitGroup + for _, fetcher := range fetchers { + wg.Add(1) + + go func(ftchr testinstance.Instance) { + defer wg.Done() + + ff(b, ftchr.Exchange, ks) + }(fetcher) + } + wg.Wait() + + // Collect statistics + fetcher := fetchers[0] + st, err := fetcher.Exchange.Stat() + if err != nil { + b.Fatal(err) + } + + for _, fetcher := range fetchers { + nst := fetcher.Adapter.Stats() + stats := runStats{ + Time: time.Since(start), + MsgRecd: nst.MessagesRecvd, + MsgSent: nst.MessagesSent, + DupsRcvd: st.DupBlksReceived, + BlksRcvd: st.BlocksReceived, + Name: b.Name(), + } + benchmarkLog = append(benchmarkLog, stats) + } + // b.Logf("send/recv: %d / %d (dups: %d)", nst.MessagesSent, nst.MessagesRecvd, st.DupBlksReceived) +} + +func runDistribution(b *testing.B, instances []testinstance.Instance, blocks []blocks.Block, bstoreLatency time.Duration, df distFunc, ff fetchFunc) { + numnodes := len(instances) + fetcher := instances[numnodes-1] + + // Distribute blocks to seed nodes + seeds := instances[:numnodes-1] + df(b, seeds, blocks) + + // Set the blockstore latency on seed nodes + if bstoreLatency > 0 { + for _, i := range seeds { + i.SetBlockstoreLatency(bstoreLatency) + } + } + + // Fetch blocks (from seed nodes to leech nodes) + var ks []cid.Cid + for _, blk := range blocks { + ks = append(ks, blk.Cid()) + } + + start := time.Now() + ff(b, fetcher.Exchange, ks) + + // Collect statistics + st, err := fetcher.Exchange.Stat() + if err != nil { + b.Fatal(err) + } + + nst := fetcher.Adapter.Stats() + stats := runStats{ + Time: time.Since(start), + MsgRecd: nst.MessagesRecvd, + MsgSent: nst.MessagesSent, + DupsRcvd: st.DupBlksReceived, + BlksRcvd: st.BlocksReceived, + Name: b.Name(), + } + benchmarkLog = append(benchmarkLog, stats) + // b.Logf("send/recv: %d / %d (dups: %d)", nst.MessagesSent, nst.MessagesRecvd, st.DupBlksReceived) +} + +func allToAll(b *testing.B, provs []testinstance.Instance, blocks []blocks.Block) { + for _, p := range provs { + if err := p.Blockstore().PutMany(context.Background(), blocks); err != nil { + b.Fatal(err) + } + } +} + +// overlap1 gives the first 75 blocks to the first peer, and the last 75 blocks +// to the second peer. This means both peers have the middle 50 blocks +func overlap1(b *testing.B, provs []testinstance.Instance, blks []blocks.Block) { + if len(provs) != 2 { + b.Fatal("overlap1 only works with 2 provs") + } + bill := provs[0] + jeff := provs[1] + + if err := bill.Blockstore().PutMany(context.Background(), blks[:75]); err != nil { + b.Fatal(err) + } + if err := jeff.Blockstore().PutMany(context.Background(), blks[25:]); err != nil { + b.Fatal(err) + } +} + +// overlap2 gives every even numbered block to the first peer, odd numbered +// blocks to the second. it also gives every third block to both peers +func overlap2(b *testing.B, provs []testinstance.Instance, blks []blocks.Block) { + if len(provs) != 2 { + b.Fatal("overlap2 only works with 2 provs") + } + bill := provs[0] + jeff := provs[1] + + for i, blk := range blks { + even := i%2 == 0 + third := i%3 == 0 + if third || even { + if err := bill.Blockstore().Put(context.Background(), blk); err != nil { + b.Fatal(err) + } + } + if third || !even { + if err := jeff.Blockstore().Put(context.Background(), blk); err != nil { + b.Fatal(err) + } + } + } +} + +// onePeerPerBlock picks a random peer to hold each block +// with this layout, we shouldnt actually ever see any duplicate blocks +// but we're mostly just testing performance of the sync algorithm +func onePeerPerBlock(b *testing.B, provs []testinstance.Instance, blks []blocks.Block) { + for _, blk := range blks { + err := provs[rand.Intn(len(provs))].Blockstore().Put(context.Background(), blk) + if err != nil { + b.Fatal(err) + } + } +} + +func oneAtATime(b *testing.B, bs *bitswap.Bitswap, ks []cid.Cid) { + ses := bs.NewSession(context.Background()) + for _, c := range ks { + _, err := ses.GetBlock(context.Background(), c) + if err != nil { + b.Fatal(err) + } + } + // b.Logf("Session fetch latency: %s", ses.GetAverageLatency()) +} + +// fetch data in batches, 10 at a time +func batchFetchBy10(b *testing.B, bs *bitswap.Bitswap, ks []cid.Cid) { + ses := bs.NewSession(context.Background()) + for i := 0; i < len(ks); i += 10 { + out, err := ses.GetBlocks(context.Background(), ks[i:i+10]) + if err != nil { + b.Fatal(err) + } + for range out { + } + } +} + +// fetch each block at the same time concurrently +func fetchAllConcurrent(b *testing.B, bs *bitswap.Bitswap, ks []cid.Cid) { + ses := bs.NewSession(context.Background()) + + var wg sync.WaitGroup + for _, c := range ks { + wg.Add(1) + go func(c cid.Cid) { + defer wg.Done() + _, err := ses.GetBlock(context.Background(), c) + if err != nil { + b.Error(err) + } + }(c) + } + wg.Wait() +} + +func batchFetchAll(b *testing.B, bs *bitswap.Bitswap, ks []cid.Cid) { + ses := bs.NewSession(context.Background()) + out, err := ses.GetBlocks(context.Background(), ks) + if err != nil { + b.Fatal(err) + } + for range out { + } +} + +// simulates the fetch pattern of trying to sync a unixfs file graph as fast as possible +func unixfsFileFetch(b *testing.B, bs *bitswap.Bitswap, ks []cid.Cid) { + ses := bs.NewSession(context.Background()) + _, err := ses.GetBlock(context.Background(), ks[0]) + if err != nil { + b.Fatal(err) + } + + out, err := ses.GetBlocks(context.Background(), ks[1:11]) + if err != nil { + b.Fatal(err) + } + for range out { + } + + out, err = ses.GetBlocks(context.Background(), ks[11:]) + if err != nil { + b.Fatal(err) + } + for range out { + } +} + +func unixfsFileFetchLarge(b *testing.B, bs *bitswap.Bitswap, ks []cid.Cid) { + ses := bs.NewSession(context.Background()) + _, err := ses.GetBlock(context.Background(), ks[0]) + if err != nil { + b.Fatal(err) + } + + out, err := ses.GetBlocks(context.Background(), ks[1:11]) + if err != nil { + b.Fatal(err) + } + for range out { + } + + out, err = ses.GetBlocks(context.Background(), ks[11:100]) + if err != nil { + b.Fatal(err) + } + for range out { + } + + rest := ks[100:] + for len(rest) > 0 { + var batch [][]cid.Cid + for i := 0; i < 5 && len(rest) > 0; i++ { + cnt := 10 + if len(rest) < 10 { + cnt = len(rest) + } + group := rest[:cnt] + rest = rest[cnt:] + batch = append(batch, group) + } + + var anyErr error + var wg sync.WaitGroup + for _, group := range batch { + wg.Add(1) + go func(grp []cid.Cid) { + defer wg.Done() + + out, err = ses.GetBlocks(context.Background(), grp) + if err != nil { + anyErr = err + } + for range out { + } + }(group) + } + wg.Wait() + + // Note: b.Fatal() cannot be called from within a go-routine + if anyErr != nil { + b.Fatal(anyErr) + } + } +} + +func printResults(rs []runStats) { + nameOrder := make([]string, 0) + names := make(map[string]struct{}) + for i := 0; i < len(rs); i++ { + if _, ok := names[rs[i].Name]; !ok { + nameOrder = append(nameOrder, rs[i].Name) + names[rs[i].Name] = struct{}{} + } + } + + for i := 0; i < len(names); i++ { + name := nameOrder[i] + count := 0 + sent := 0.0 + rcvd := 0.0 + dups := 0.0 + blks := 0.0 + elpd := 0.0 + for i := 0; i < len(rs); i++ { + if rs[i].Name == name { + count++ + sent += float64(rs[i].MsgSent) + rcvd += float64(rs[i].MsgRecd) + dups += float64(rs[i].DupsRcvd) + blks += float64(rs[i].BlksRcvd) + elpd += float64(rs[i].Time) + } + } + sent /= float64(count) + rcvd /= float64(count) + dups /= float64(count) + blks /= float64(count) + + label := fmt.Sprintf("%s (%d runs / %.2fs):", name, count, elpd/1000000000.0) + fmt.Printf("%-75s %s: sent %d, recv %d, dups %d / %d\n", + label, + fmtDuration(time.Duration(int64(math.Round(elpd/float64(count))))), + int64(math.Round(sent)), int64(math.Round(rcvd)), + int64(math.Round(dups)), int64(math.Round(blks))) + } +} + +func fmtDuration(d time.Duration) string { + d = d.Round(time.Millisecond) + s := d / time.Second + d -= s * time.Second + ms := d / time.Millisecond + return fmt.Sprintf("%d.%03ds", s, ms) +} diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go new file mode 100644 index 0000000000..79b3d12d7a --- /dev/null +++ b/bitswap/bitswap.go @@ -0,0 +1,181 @@ +package bitswap + +import ( + "context" + "fmt" + + "github.com/ipfs/boxo/bitswap/client" + "github.com/ipfs/boxo/bitswap/internal/defaults" + "github.com/ipfs/boxo/bitswap/message" + "github.com/ipfs/boxo/bitswap/network" + "github.com/ipfs/boxo/bitswap/server" + "github.com/ipfs/boxo/bitswap/tracer" + "github.com/ipfs/go-metrics-interface" + + blocks "github.com/ipfs/boxo/blocks" + blockstore "github.com/ipfs/boxo/blockstore" + exchange "github.com/ipfs/boxo/exchange" + "github.com/ipfs/go-cid" + logging "github.com/ipfs/go-log" + "github.com/libp2p/go-libp2p/core/peer" + + "go.uber.org/multierr" +) + +var log = logging.Logger("bitswap") + +// old interface we are targeting +type bitswap interface { + Close() error + GetBlock(ctx context.Context, k cid.Cid) (blocks.Block, error) + GetBlocks(ctx context.Context, keys []cid.Cid) (<-chan blocks.Block, error) + GetWantBlocks() []cid.Cid + GetWantHaves() []cid.Cid + GetWantlist() []cid.Cid + IsOnline() bool + LedgerForPeer(p peer.ID) *server.Receipt + NewSession(ctx context.Context) exchange.Fetcher + NotifyNewBlocks(ctx context.Context, blks ...blocks.Block) error + PeerConnected(p peer.ID) + PeerDisconnected(p peer.ID) + ReceiveError(err error) + ReceiveMessage(ctx context.Context, p peer.ID, incoming message.BitSwapMessage) + Stat() (*Stat, error) + WantlistForPeer(p peer.ID) []cid.Cid +} + +var _ exchange.SessionExchange = (*Bitswap)(nil) +var _ bitswap = (*Bitswap)(nil) +var HasBlockBufferSize = defaults.HasBlockBufferSize + +type Bitswap struct { + *client.Client + *server.Server + + tracer tracer.Tracer + net network.BitSwapNetwork +} + +func New(ctx context.Context, net network.BitSwapNetwork, bstore blockstore.Blockstore, options ...Option) *Bitswap { + bs := &Bitswap{ + net: net, + } + + var serverOptions []server.Option + var clientOptions []client.Option + + for _, o := range options { + switch typedOption := o.v.(type) { + case server.Option: + serverOptions = append(serverOptions, typedOption) + case client.Option: + clientOptions = append(clientOptions, typedOption) + case option: + typedOption(bs) + default: + panic(fmt.Errorf("unknown option type passed to bitswap.New, got: %T, %v; expected: %T, %T or %T", typedOption, typedOption, server.Option(nil), client.Option(nil), option(nil))) + } + } + + if bs.tracer != nil { + var tracer tracer.Tracer = nopReceiveTracer{bs.tracer} + clientOptions = append(clientOptions, client.WithTracer(tracer)) + serverOptions = append(serverOptions, server.WithTracer(tracer)) + } + + if HasBlockBufferSize != defaults.HasBlockBufferSize { + serverOptions = append(serverOptions, server.HasBlockBufferSize(HasBlockBufferSize)) + } + + ctx = metrics.CtxSubScope(ctx, "bitswap") + + bs.Server = server.New(ctx, net, bstore, serverOptions...) + bs.Client = client.New(ctx, net, bstore, append(clientOptions, client.WithBlockReceivedNotifier(bs.Server))...) + net.Start(bs) // use the polyfill receiver to log received errors and trace messages only once + + return bs +} + +func (bs *Bitswap) NotifyNewBlocks(ctx context.Context, blks ...blocks.Block) error { + return multierr.Combine( + bs.Client.NotifyNewBlocks(ctx, blks...), + bs.Server.NotifyNewBlocks(ctx, blks...), + ) +} + +type Stat struct { + Wantlist []cid.Cid + Peers []string + BlocksReceived uint64 + DataReceived uint64 + DupBlksReceived uint64 + DupDataReceived uint64 + MessagesReceived uint64 + BlocksSent uint64 + DataSent uint64 + ProvideBufLen int +} + +func (bs *Bitswap) Stat() (*Stat, error) { + cs, err := bs.Client.Stat() + if err != nil { + return nil, err + } + ss, err := bs.Server.Stat() + if err != nil { + return nil, err + } + + return &Stat{ + Wantlist: cs.Wantlist, + BlocksReceived: cs.BlocksReceived, + DataReceived: cs.DataReceived, + DupBlksReceived: cs.DupBlksReceived, + DupDataReceived: cs.DupDataReceived, + MessagesReceived: cs.MessagesReceived, + Peers: ss.Peers, + BlocksSent: ss.BlocksSent, + DataSent: ss.DataSent, + ProvideBufLen: ss.ProvideBufLen, + }, nil +} + +func (bs *Bitswap) Close() error { + bs.net.Stop() + return multierr.Combine( + bs.Client.Close(), + bs.Server.Close(), + ) +} + +func (bs *Bitswap) WantlistForPeer(p peer.ID) []cid.Cid { + if p == bs.net.Self() { + return bs.Client.GetWantlist() + } + return bs.Server.WantlistForPeer(p) +} + +func (bs *Bitswap) PeerConnected(p peer.ID) { + bs.Client.PeerConnected(p) + bs.Server.PeerConnected(p) +} + +func (bs *Bitswap) PeerDisconnected(p peer.ID) { + bs.Client.PeerDisconnected(p) + bs.Server.PeerDisconnected(p) +} + +func (bs *Bitswap) ReceiveError(err error) { + log.Infof("Bitswap Client ReceiveError: %s", err) + // TODO log the network error + // TODO bubble the network error up to the parent context/error logger +} + +func (bs *Bitswap) ReceiveMessage(ctx context.Context, p peer.ID, incoming message.BitSwapMessage) { + if bs.tracer != nil { + bs.tracer.MessageReceived(p, incoming) + } + + bs.Client.ReceiveMessage(ctx, p, incoming) + bs.Server.ReceiveMessage(ctx, p, incoming) +} diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go new file mode 100644 index 0000000000..adbd81935f --- /dev/null +++ b/bitswap/bitswap_test.go @@ -0,0 +1,862 @@ +package bitswap_test + +import ( + "bytes" + "context" + "fmt" + "os" + "sync" + "testing" + "time" + + "github.com/ipfs/boxo/bitswap" + bsmsg "github.com/ipfs/boxo/bitswap/message" + "github.com/ipfs/boxo/bitswap/server" + testinstance "github.com/ipfs/boxo/bitswap/testinstance" + tn "github.com/ipfs/boxo/bitswap/testnet" + blocks "github.com/ipfs/boxo/blocks" + "github.com/ipfs/boxo/internal/test" + mockrouting "github.com/ipfs/boxo/routing/mock" + cid "github.com/ipfs/go-cid" + detectrace "github.com/ipfs/go-detect-race" + blocksutil "github.com/ipfs/go-ipfs-blocksutil" + delay "github.com/ipfs/go-ipfs-delay" + ipld "github.com/ipfs/go-ipld-format" + tu "github.com/libp2p/go-libp2p-testing/etc" + p2ptestutil "github.com/libp2p/go-libp2p-testing/netutil" + peer "github.com/libp2p/go-libp2p/core/peer" +) + +func isCI() bool { + // https://github.blog/changelog/2020-04-15-github-actions-sets-the-ci-environment-variable-to-true/ + return os.Getenv("CI") != "" +} + +func addBlock(t *testing.T, ctx context.Context, inst testinstance.Instance, blk blocks.Block) { + t.Helper() + err := inst.Blockstore().Put(ctx, blk) + if err != nil { + t.Fatal(err) + } + err = inst.Exchange.NotifyNewBlocks(ctx, blk) + if err != nil { + t.Fatal(err) + } +} + +// FIXME the tests are really sensitive to the network delay. fix them to work +// well under varying conditions +const kNetworkDelay = 0 * time.Millisecond + +func TestClose(t *testing.T) { + test.Flaky(t) + + vnet := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay)) + ig := testinstance.NewTestInstanceGenerator(vnet, nil, nil) + defer ig.Close() + bgen := blocksutil.NewBlockGenerator() + + block := bgen.Next() + bitswap := ig.Next() + + bitswap.Exchange.Close() + _, err := bitswap.Exchange.GetBlock(context.Background(), block.Cid()) + if err == nil { + t.Fatal("expected GetBlock to fail") + } +} + +func TestProviderForKeyButNetworkCannotFind(t *testing.T) { // TODO revisit this + test.Flaky(t) + + rs := mockrouting.NewServer() + net := tn.VirtualNetwork(rs, delay.Fixed(kNetworkDelay)) + ig := testinstance.NewTestInstanceGenerator(net, nil, nil) + defer ig.Close() + + block := blocks.NewBlock([]byte("block")) + pinfo := p2ptestutil.RandTestBogusIdentityOrFatal(t) + err := rs.Client(pinfo).Provide(context.Background(), block.Cid(), true) // but not on network + if err != nil { + t.Fatal(err) + } + + solo := ig.Next() + defer solo.Exchange.Close() + + ctx, cancel := context.WithTimeout(context.Background(), time.Nanosecond) + defer cancel() + _, err = solo.Exchange.GetBlock(ctx, block.Cid()) + + if err != context.DeadlineExceeded { + t.Fatal("Expected DeadlineExceeded error") + } +} + +func TestGetBlockFromPeerAfterPeerAnnounces(t *testing.T) { + test.Flaky(t) + + net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay)) + block := blocks.NewBlock([]byte("block")) + ig := testinstance.NewTestInstanceGenerator(net, nil, nil) + defer ig.Close() + + peers := ig.Instances(2) + hasBlock := peers[0] + defer hasBlock.Exchange.Close() + + addBlock(t, context.Background(), hasBlock, block) + + wantsBlock := peers[1] + defer wantsBlock.Exchange.Close() + + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + received, err := wantsBlock.Exchange.GetBlock(ctx, block.Cid()) + if err != nil { + t.Log(err) + t.Fatal("Expected to succeed") + } + + if !bytes.Equal(block.RawData(), received.RawData()) { + t.Fatal("Data doesn't match") + } +} + +func TestDoesNotProvideWhenConfiguredNotTo(t *testing.T) { + test.Flaky(t) + + net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay)) + block := blocks.NewBlock([]byte("block")) + bsOpts := []bitswap.Option{bitswap.ProvideEnabled(false), bitswap.ProviderSearchDelay(50 * time.Millisecond)} + ig := testinstance.NewTestInstanceGenerator(net, nil, bsOpts) + defer ig.Close() + + hasBlock := ig.Next() + defer hasBlock.Exchange.Close() + + wantsBlock := ig.Next() + defer wantsBlock.Exchange.Close() + + addBlock(t, context.Background(), hasBlock, block) + + ctx, cancel := context.WithTimeout(context.Background(), 60*time.Millisecond) + defer cancel() + + ns := wantsBlock.Exchange.NewSession(ctx) + + received, err := ns.GetBlock(ctx, block.Cid()) + if received != nil { + t.Fatalf("Expected to find nothing, found %s", received) + } + + if err != context.DeadlineExceeded { + t.Fatal("Expected deadline exceeded") + } +} + +// Tests that a received block is not stored in the blockstore if the block was +// not requested by the client +func TestUnwantedBlockNotAdded(t *testing.T) { + test.Flaky(t) + + net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay)) + block := blocks.NewBlock([]byte("block")) + bsMessage := bsmsg.New(true) + bsMessage.AddBlock(block) + + ig := testinstance.NewTestInstanceGenerator(net, nil, nil) + defer ig.Close() + + peers := ig.Instances(2) + hasBlock := peers[0] + defer hasBlock.Exchange.Close() + + addBlock(t, context.Background(), hasBlock, block) + + doesNotWantBlock := peers[1] + defer doesNotWantBlock.Exchange.Close() + + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + + doesNotWantBlock.Exchange.ReceiveMessage(ctx, hasBlock.Peer, bsMessage) + + blockInStore, err := doesNotWantBlock.Blockstore().Has(ctx, block.Cid()) + if err != nil || blockInStore { + t.Fatal("Unwanted block added to block store") + } +} + +// Tests that a received block is returned to the client and stored in the +// blockstore in the following scenario: +// - the want for the block has been requested by the client +// - the want for the block has not yet been sent out to a peer +// +// (because the live request queue is full) +func TestPendingBlockAdded(t *testing.T) { + test.Flaky(t) + + ctx := context.Background() + net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay)) + bg := blocksutil.NewBlockGenerator() + sessionBroadcastWantCapacity := 4 + + ig := testinstance.NewTestInstanceGenerator(net, nil, nil) + defer ig.Close() + + instance := ig.Instances(1)[0] + defer instance.Exchange.Close() + + oneSecCtx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + + // Request enough blocks to exceed the session's broadcast want list + // capacity (by one block). The session will put the remaining block + // into the "tofetch" queue + blks := bg.Blocks(sessionBroadcastWantCapacity + 1) + ks := make([]cid.Cid, 0, len(blks)) + for _, b := range blks { + ks = append(ks, b.Cid()) + } + outch, err := instance.Exchange.GetBlocks(ctx, ks) + if err != nil { + t.Fatal(err) + } + + // Wait a little while to make sure the session has time to process the wants + time.Sleep(time.Millisecond * 20) + + // Simulate receiving a message which contains the block in the "tofetch" queue + lastBlock := blks[len(blks)-1] + bsMessage := bsmsg.New(true) + bsMessage.AddBlock(lastBlock) + unknownPeer := peer.ID("QmUHfvCQrzyR6vFXmeyCptfCWedfcmfa12V6UuziDtrw23") + instance.Exchange.ReceiveMessage(oneSecCtx, unknownPeer, bsMessage) + + // Make sure Bitswap adds the block to the output channel + blkrecvd, ok := <-outch + if !ok { + t.Fatal("timed out waiting for block") + } + if !blkrecvd.Cid().Equals(lastBlock.Cid()) { + t.Fatal("received wrong block") + } +} + +func TestLargeSwarm(t *testing.T) { + test.Flaky(t) + + if testing.Short() { + t.SkipNow() + } + numInstances := 100 + numBlocks := 2 + if detectrace.WithRace() { + // when running with the race detector, 500 instances launches + // well over 8k goroutines. This hits a race detector limit. + numInstances = 20 + } else if isCI() { + numInstances = 200 + } else { + t.Parallel() + } + PerformDistributionTest(t, numInstances, numBlocks) +} + +func TestLargeFile(t *testing.T) { + if testing.Short() { + t.SkipNow() + } + + if !isCI() { + t.Parallel() + } + + numInstances := 10 + numBlocks := 100 + PerformDistributionTest(t, numInstances, numBlocks) +} + +func TestLargeFileTwoPeers(t *testing.T) { + test.Flaky(t) + + if testing.Short() { + t.SkipNow() + } + numInstances := 2 + numBlocks := 100 + PerformDistributionTest(t, numInstances, numBlocks) +} + +func PerformDistributionTest(t *testing.T, numInstances, numBlocks int) { + test.Flaky(t) + + ctx := context.Background() + if testing.Short() { + t.SkipNow() + } + net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay)) + ig := testinstance.NewTestInstanceGenerator(net, nil, []bitswap.Option{ + bitswap.TaskWorkerCount(5), + bitswap.EngineTaskWorkerCount(5), + bitswap.MaxOutstandingBytesPerPeer(1 << 20), + }) + defer ig.Close() + bg := blocksutil.NewBlockGenerator() + + instances := ig.Instances(numInstances) + blocks := bg.Blocks(numBlocks) + + t.Log("Give the blocks to the first instance") + + var blkeys []cid.Cid + first := instances[0] + for _, b := range blocks { + blkeys = append(blkeys, b.Cid()) + addBlock(t, ctx, first, b) + } + + t.Log("Distribute!") + + wg := sync.WaitGroup{} + errs := make(chan error) + + for _, inst := range instances[1:] { + wg.Add(1) + go func(inst testinstance.Instance) { + defer wg.Done() + outch, err := inst.Exchange.GetBlocks(ctx, blkeys) + if err != nil { + errs <- err + } + for range outch { + } + }(inst) + } + + go func() { + wg.Wait() + close(errs) + }() + + for err := range errs { + if err != nil { + t.Fatal(err) + } + } +} + +// TODO simplify this test. get to the _essence_! +func TestSendToWantingPeer(t *testing.T) { + test.Flaky(t) + + if testing.Short() { + t.SkipNow() + } + + net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay)) + ig := testinstance.NewTestInstanceGenerator(net, nil, nil) + defer ig.Close() + bg := blocksutil.NewBlockGenerator() + + peers := ig.Instances(2) + peerA := peers[0] + peerB := peers[1] + + t.Logf("Session %v\n", peerA.Peer) + t.Logf("Session %v\n", peerB.Peer) + + waitTime := time.Second * 5 + + alpha := bg.Next() + // peerA requests and waits for block alpha + ctx, cancel := context.WithTimeout(context.Background(), waitTime) + defer cancel() + alphaPromise, err := peerA.Exchange.GetBlocks(ctx, []cid.Cid{alpha.Cid()}) + if err != nil { + t.Fatal(err) + } + + // peerB announces to the network that he has block alpha + addBlock(t, ctx, peerB, alpha) + + // At some point, peerA should get alpha (or timeout) + blkrecvd, ok := <-alphaPromise + if !ok { + t.Fatal("context timed out and broke promise channel!") + } + + if !blkrecvd.Cid().Equals(alpha.Cid()) { + t.Fatal("Wrong block!") + } + +} + +func TestEmptyKey(t *testing.T) { + test.Flaky(t) + + net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay)) + ig := testinstance.NewTestInstanceGenerator(net, nil, nil) + defer ig.Close() + bs := ig.Instances(1)[0].Exchange + + ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) + defer cancel() + + _, err := bs.GetBlock(ctx, cid.Cid{}) + if !ipld.IsNotFound(err) { + t.Error("empty str key should return ErrNotFound") + } +} + +func assertStat(t *testing.T, st *bitswap.Stat, sblks, rblks, sdata, rdata uint64) { + if sblks != st.BlocksSent { + t.Errorf("mismatch in blocks sent: %d vs %d", sblks, st.BlocksSent) + } + + if rblks != st.BlocksReceived { + t.Errorf("mismatch in blocks recvd: %d vs %d", rblks, st.BlocksReceived) + } + + if sdata != st.DataSent { + t.Errorf("mismatch in data sent: %d vs %d", sdata, st.DataSent) + } + + if rdata != st.DataReceived { + t.Errorf("mismatch in data recvd: %d vs %d", rdata, st.DataReceived) + } +} + +func TestBasicBitswap(t *testing.T) { + test.Flaky(t) + + net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay)) + ig := testinstance.NewTestInstanceGenerator(net, nil, nil) + defer ig.Close() + bg := blocksutil.NewBlockGenerator() + + t.Log("Test a one node trying to get one block from another") + + instances := ig.Instances(3) + blocks := bg.Blocks(1) + + // First peer has block + addBlock(t, context.Background(), instances[0], blocks[0]) + + ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) + defer cancel() + + // Second peer broadcasts want for block CID + // (Received by first and third peers) + blk, err := instances[1].Exchange.GetBlock(ctx, blocks[0].Cid()) + if err != nil { + t.Fatal(err) + } + + // When second peer receives block, it should send out a cancel, so third + // peer should no longer keep second peer's want + if err = tu.WaitFor(ctx, func() error { + if len(instances[2].Exchange.WantlistForPeer(instances[1].Peer)) != 0 { + return fmt.Errorf("should have no items in other peers wantlist") + } + if len(instances[1].Exchange.GetWantlist()) != 0 { + return fmt.Errorf("shouldnt have anything in wantlist") + } + return nil + }); err != nil { + t.Fatal(err) + } + + st0, err := instances[0].Exchange.Stat() + if err != nil { + t.Fatal(err) + } + st1, err := instances[1].Exchange.Stat() + if err != nil { + t.Fatal(err) + } + + st2, err := instances[2].Exchange.Stat() + if err != nil { + t.Fatal(err) + } + + t.Log("stat node 0") + assertStat(t, st0, 1, 0, uint64(len(blk.RawData())), 0) + t.Log("stat node 1") + assertStat(t, st1, 0, 1, 0, uint64(len(blk.RawData()))) + t.Log("stat node 2") + assertStat(t, st2, 0, 0, 0, 0) + + if !bytes.Equal(blk.RawData(), blocks[0].RawData()) { + t.Errorf("blocks aren't equal: expected %v, actual %v", blocks[0].RawData(), blk.RawData()) + } + + t.Log(blk) + for _, inst := range instances { + err := inst.Exchange.Close() + if err != nil { + t.Fatal(err) + } + } +} + +func TestDoubleGet(t *testing.T) { + test.Flaky(t) + + net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay)) + ig := testinstance.NewTestInstanceGenerator(net, nil, nil) + defer ig.Close() + bg := blocksutil.NewBlockGenerator() + + t.Log("Test a one node trying to get one block from another") + + instances := ig.Instances(2) + blocks := bg.Blocks(1) + + // NOTE: A race condition can happen here where these GetBlocks requests go + // through before the peers even get connected. This is okay, bitswap + // *should* be able to handle this. + ctx1, cancel1 := context.WithCancel(context.Background()) + blkch1, err := instances[1].Exchange.GetBlocks(ctx1, []cid.Cid{blocks[0].Cid()}) + if err != nil { + t.Fatal(err) + } + + ctx2, cancel2 := context.WithCancel(context.Background()) + defer cancel2() + + blkch2, err := instances[1].Exchange.GetBlocks(ctx2, []cid.Cid{blocks[0].Cid()}) + if err != nil { + t.Fatal(err) + } + + // ensure both requests make it into the wantlist at the same time + time.Sleep(time.Millisecond * 20) + cancel1() + + _, ok := <-blkch1 + if ok { + t.Fatal("expected channel to be closed") + } + + addBlock(t, context.Background(), instances[0], blocks[0]) + + select { + case blk, ok := <-blkch2: + if !ok { + t.Fatal("expected to get the block here") + } + t.Log(blk) + case <-time.After(time.Second * 5): + p1wl := instances[0].Exchange.WantlistForPeer(instances[1].Peer) + if len(p1wl) != 1 { + t.Logf("wantlist view didnt have 1 item (had %d)", len(p1wl)) + } else if !p1wl[0].Equals(blocks[0].Cid()) { + t.Logf("had 1 item, it was wrong: %s %s", blocks[0].Cid(), p1wl[0]) + } else { + t.Log("had correct wantlist, somehow") + } + t.Fatal("timed out waiting on block") + } + + for _, inst := range instances { + err := inst.Exchange.Close() + if err != nil { + t.Fatal(err) + } + } +} + +func TestWantlistCleanup(t *testing.T) { + test.Flaky(t) + + net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay)) + ig := testinstance.NewTestInstanceGenerator(net, nil, nil) + defer ig.Close() + bg := blocksutil.NewBlockGenerator() + + instances := ig.Instances(2) + instance := instances[0] + bswap := instance.Exchange + blocks := bg.Blocks(20) + + var keys []cid.Cid + for _, b := range blocks { + keys = append(keys, b.Cid()) + } + + // Once context times out, key should be removed from wantlist + ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond*50) + defer cancel() + _, err := bswap.GetBlock(ctx, keys[0]) + if err != context.DeadlineExceeded { + t.Fatal("shouldnt have fetched any blocks") + } + + time.Sleep(time.Millisecond * 50) + + if len(bswap.GetWantHaves()) > 0 { + t.Fatal("should not have anyting in wantlist") + } + + // Once context times out, keys should be removed from wantlist + ctx, cancel = context.WithTimeout(context.Background(), time.Millisecond*50) + defer cancel() + _, err = bswap.GetBlocks(ctx, keys[:10]) + if err != nil { + t.Fatal(err) + } + + <-ctx.Done() + time.Sleep(time.Millisecond * 50) + + if len(bswap.GetWantHaves()) > 0 { + t.Fatal("should not have anyting in wantlist") + } + + // Send want for single block, with no timeout + _, err = bswap.GetBlocks(context.Background(), keys[:1]) + if err != nil { + t.Fatal(err) + } + + // Send want for 10 blocks + ctx, cancel = context.WithCancel(context.Background()) + _, err = bswap.GetBlocks(ctx, keys[10:]) + if err != nil { + t.Fatal(err) + } + + // Even after 50 milli-seconds we haven't explicitly cancelled anything + // and no timeouts have expired, so we should have 11 want-haves + time.Sleep(time.Millisecond * 50) + if len(bswap.GetWantHaves()) != 11 { + t.Fatal("should have 11 keys in wantlist") + } + + // Cancel the timeout for the request for 10 blocks. This should remove + // the want-haves + cancel() + + // Once the cancel is processed, we are left with the request for 1 block + time.Sleep(time.Millisecond * 50) + if !(len(bswap.GetWantHaves()) == 1 && bswap.GetWantHaves()[0] == keys[0]) { + t.Fatal("should only have keys[0] in wantlist") + } +} + +func assertLedgerMatch(ra, rb *server.Receipt) error { + if ra.Sent != rb.Recv { + return fmt.Errorf("mismatch in ledgers (exchanged bytes): %d sent vs %d recvd", ra.Sent, rb.Recv) + } + + if ra.Recv != rb.Sent { + return fmt.Errorf("mismatch in ledgers (exchanged bytes): %d recvd vs %d sent", ra.Recv, rb.Sent) + } + + if ra.Exchanged != rb.Exchanged { + return fmt.Errorf("mismatch in ledgers (exchanged blocks): %d vs %d ", ra.Exchanged, rb.Exchanged) + } + + return nil +} + +func assertLedgerEqual(ra, rb *server.Receipt) error { + if ra.Value != rb.Value { + return fmt.Errorf("mismatch in ledgers (value/debt ratio): %f vs %f ", ra.Value, rb.Value) + } + + if ra.Sent != rb.Sent { + return fmt.Errorf("mismatch in ledgers (sent bytes): %d vs %d", ra.Sent, rb.Sent) + } + + if ra.Recv != rb.Recv { + return fmt.Errorf("mismatch in ledgers (recvd bytes): %d vs %d", ra.Recv, rb.Recv) + } + + if ra.Exchanged != rb.Exchanged { + return fmt.Errorf("mismatch in ledgers (exchanged blocks): %d vs %d ", ra.Exchanged, rb.Exchanged) + } + + return nil +} + +func newReceipt(sent, recv, exchanged uint64) *server.Receipt { + return &server.Receipt{ + Peer: "test", + Value: float64(sent) / (1 + float64(recv)), + Sent: sent, + Recv: recv, + Exchanged: exchanged, + } +} + +func TestBitswapLedgerOneWay(t *testing.T) { + test.Flaky(t) + + net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay)) + ig := testinstance.NewTestInstanceGenerator(net, nil, nil) + defer ig.Close() + bg := blocksutil.NewBlockGenerator() + + t.Log("Test ledgers match when one peer sends block to another") + + instances := ig.Instances(2) + blocks := bg.Blocks(1) + addBlock(t, context.Background(), instances[0], blocks[0]) + + ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) + defer cancel() + blk, err := instances[1].Exchange.GetBlock(ctx, blocks[0].Cid()) + if err != nil { + t.Fatal(err) + } + + ra := instances[0].Exchange.LedgerForPeer(instances[1].Peer) + rb := instances[1].Exchange.LedgerForPeer(instances[0].Peer) + + // compare peer ledger receipts + err = assertLedgerMatch(ra, rb) + if err != nil { + t.Fatal(err) + } + + // check that receipts have intended values + ratest := newReceipt(1, 0, 1) + err = assertLedgerEqual(ratest, ra) + if err != nil { + t.Fatal(err) + } + rbtest := newReceipt(0, 1, 1) + err = assertLedgerEqual(rbtest, rb) + if err != nil { + t.Fatal(err) + } + + t.Log(blk) + for _, inst := range instances { + err := inst.Exchange.Close() + if err != nil { + t.Fatal(err) + } + } +} + +func TestBitswapLedgerTwoWay(t *testing.T) { + test.Flaky(t) + + net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay)) + ig := testinstance.NewTestInstanceGenerator(net, nil, nil) + defer ig.Close() + bg := blocksutil.NewBlockGenerator() + + t.Log("Test ledgers match when two peers send one block to each other") + + instances := ig.Instances(2) + blocks := bg.Blocks(2) + addBlock(t, context.Background(), instances[0], blocks[0]) + addBlock(t, context.Background(), instances[1], blocks[1]) + + ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) + defer cancel() + _, err := instances[1].Exchange.GetBlock(ctx, blocks[0].Cid()) + if err != nil { + t.Fatal(err) + } + + ctx, cancel = context.WithTimeout(context.Background(), time.Second*5) + defer cancel() + blk, err := instances[0].Exchange.GetBlock(ctx, blocks[1].Cid()) + if err != nil { + t.Fatal(err) + } + + ra := instances[0].Exchange.LedgerForPeer(instances[1].Peer) + rb := instances[1].Exchange.LedgerForPeer(instances[0].Peer) + + // compare peer ledger receipts + err = assertLedgerMatch(ra, rb) + if err != nil { + t.Fatal(err) + } + + // check that receipts have intended values + rtest := newReceipt(1, 1, 2) + err = assertLedgerEqual(rtest, ra) + if err != nil { + t.Fatal(err) + } + + err = assertLedgerEqual(rtest, rb) + if err != nil { + t.Fatal(err) + } + + t.Log(blk) + for _, inst := range instances { + err := inst.Exchange.Close() + if err != nil { + t.Fatal(err) + } + } +} + +type testingScoreLedger struct { + scorePeer server.ScorePeerFunc + started chan struct{} + closed chan struct{} +} + +func newTestingScoreLedger() *testingScoreLedger { + return &testingScoreLedger{ + nil, + make(chan struct{}), + make(chan struct{}), + } +} + +func (tsl *testingScoreLedger) GetReceipt(p peer.ID) *server.Receipt { + return nil +} +func (tsl *testingScoreLedger) AddToSentBytes(p peer.ID, n int) {} +func (tsl *testingScoreLedger) AddToReceivedBytes(p peer.ID, n int) {} +func (tsl *testingScoreLedger) PeerConnected(p peer.ID) {} +func (tsl *testingScoreLedger) PeerDisconnected(p peer.ID) {} +func (tsl *testingScoreLedger) Start(scorePeer server.ScorePeerFunc) { + tsl.scorePeer = scorePeer + close(tsl.started) +} +func (tsl *testingScoreLedger) Stop() { + close(tsl.closed) +} + +// Tests start and stop of a custom decision logic +func TestWithScoreLedger(t *testing.T) { + test.Flaky(t) + + tsl := newTestingScoreLedger() + net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay)) + bsOpts := []bitswap.Option{bitswap.WithScoreLedger(tsl)} + ig := testinstance.NewTestInstanceGenerator(net, nil, bsOpts) + defer ig.Close() + i := ig.Next() + defer i.Exchange.Close() + + select { + case <-tsl.started: + if tsl.scorePeer == nil { + t.Fatal("Expected the score function to be initialized") + } + case <-time.After(time.Second * 5): + t.Fatal("Expected the score ledger to be started within 5s") + } + + i.Exchange.Close() + select { + case <-tsl.closed: + case <-time.After(time.Second * 5): + t.Fatal("Expected the score ledger to be closed within 5s") + } +} diff --git a/bitswap/client/bitswap_with_sessions_test.go b/bitswap/client/bitswap_with_sessions_test.go new file mode 100644 index 0000000000..d01b9ecd53 --- /dev/null +++ b/bitswap/client/bitswap_with_sessions_test.go @@ -0,0 +1,499 @@ +package client_test + +import ( + "context" + "fmt" + "testing" + "time" + + "github.com/ipfs/boxo/bitswap" + "github.com/ipfs/boxo/bitswap/client/internal/session" + testinstance "github.com/ipfs/boxo/bitswap/testinstance" + tn "github.com/ipfs/boxo/bitswap/testnet" + blocks "github.com/ipfs/boxo/blocks" + "github.com/ipfs/boxo/internal/test" + mockrouting "github.com/ipfs/boxo/routing/mock" + cid "github.com/ipfs/go-cid" + blocksutil "github.com/ipfs/go-ipfs-blocksutil" + delay "github.com/ipfs/go-ipfs-delay" + tu "github.com/libp2p/go-libp2p-testing/etc" +) + +func getVirtualNetwork() tn.Network { + // FIXME: the tests are really sensitive to the network delay. fix them to work + // well under varying conditions + return tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(0)) +} + +func addBlock(t *testing.T, ctx context.Context, inst testinstance.Instance, blk blocks.Block) { + t.Helper() + err := inst.Blockstore().Put(ctx, blk) + if err != nil { + t.Fatal(err) + } + err = inst.Exchange.NotifyNewBlocks(ctx, blk) + if err != nil { + t.Fatal(err) + } +} + +func TestBasicSessions(t *testing.T) { + test.Flaky(t) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + vnet := getVirtualNetwork() + ig := testinstance.NewTestInstanceGenerator(vnet, nil, nil) + defer ig.Close() + bgen := blocksutil.NewBlockGenerator() + + block := bgen.Next() + inst := ig.Instances(2) + + a := inst[0] + b := inst[1] + + // Add a block to Peer B + if err := b.Blockstore().Put(ctx, block); err != nil { + t.Fatal(err) + } + + // Create a session on Peer A + sesa := a.Exchange.NewSession(ctx) + + // Get the block + blkout, err := sesa.GetBlock(ctx, block.Cid()) + if err != nil { + t.Fatal(err) + } + + if !blkout.Cid().Equals(block.Cid()) { + t.Fatal("got wrong block") + } +} + +func assertBlockLists(got, exp []blocks.Block) error { + if len(got) != len(exp) { + return fmt.Errorf("got wrong number of blocks, %d != %d", len(got), len(exp)) + } + + h := cid.NewSet() + for _, b := range got { + h.Add(b.Cid()) + } + for _, b := range exp { + if !h.Has(b.Cid()) { + return fmt.Errorf("didnt have: %s", b.Cid()) + } + } + return nil +} + +func TestSessionBetweenPeers(t *testing.T) { + test.Flaky(t) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + vnet := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(time.Millisecond)) + ig := testinstance.NewTestInstanceGenerator(vnet, nil, []bitswap.Option{bitswap.SetSimulateDontHavesOnTimeout(false)}) + defer ig.Close() + bgen := blocksutil.NewBlockGenerator() + + inst := ig.Instances(10) + + // Add 101 blocks to Peer A + blks := bgen.Blocks(101) + if err := inst[0].Blockstore().PutMany(ctx, blks); err != nil { + t.Fatal(err) + } + + var cids []cid.Cid + for _, blk := range blks { + cids = append(cids, blk.Cid()) + } + + // Create a session on Peer B + ses := inst[1].Exchange.NewSession(ctx) + if _, err := ses.GetBlock(ctx, cids[0]); err != nil { + t.Fatal(err) + } + blks = blks[1:] + cids = cids[1:] + + // Fetch blocks with the session, 10 at a time + for i := 0; i < 10; i++ { + ch, err := ses.GetBlocks(ctx, cids[i*10:(i+1)*10]) + if err != nil { + t.Fatal(err) + } + + var got []blocks.Block + for b := range ch { + got = append(got, b) + } + if err := assertBlockLists(got, blks[i*10:(i+1)*10]); err != nil { + t.Fatal(err) + } + } + + // Uninvolved nodes should receive + // - initial broadcast want-have of root block + // - CANCEL (when Peer A receives the root block from Peer B) + for _, is := range inst[2:] { + stat, err := is.Exchange.Stat() + if err != nil { + t.Fatal(err) + } + if stat.MessagesReceived > 2 { + t.Fatal("uninvolved nodes should only receive two messages", stat.MessagesReceived) + } + } +} + +func TestSessionSplitFetch(t *testing.T) { + test.Flaky(t) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + vnet := getVirtualNetwork() + ig := testinstance.NewTestInstanceGenerator(vnet, nil, nil) + defer ig.Close() + bgen := blocksutil.NewBlockGenerator() + + inst := ig.Instances(11) + + // Add 10 distinct blocks to each of 10 peers + blks := bgen.Blocks(100) + for i := 0; i < 10; i++ { + if err := inst[i].Blockstore().PutMany(ctx, blks[i*10:(i+1)*10]); err != nil { + t.Fatal(err) + } + } + + var cids []cid.Cid + for _, blk := range blks { + cids = append(cids, blk.Cid()) + } + + // Create a session on the remaining peer and fetch all the blocks 10 at a time + ses := inst[10].Exchange.NewSession(ctx).(*session.Session) + ses.SetBaseTickDelay(time.Millisecond * 10) + + for i := 0; i < 10; i++ { + ch, err := ses.GetBlocks(ctx, cids[i*10:(i+1)*10]) + if err != nil { + t.Fatal(err) + } + + var got []blocks.Block + for b := range ch { + got = append(got, b) + } + if err := assertBlockLists(got, blks[i*10:(i+1)*10]); err != nil { + t.Fatal(err) + } + } +} + +func TestFetchNotConnected(t *testing.T) { + test.Flaky(t) + + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) + defer cancel() + + vnet := getVirtualNetwork() + ig := testinstance.NewTestInstanceGenerator(vnet, nil, []bitswap.Option{bitswap.ProviderSearchDelay(10 * time.Millisecond)}) + defer ig.Close() + bgen := blocksutil.NewBlockGenerator() + + other := ig.Next() + + // Provide 10 blocks on Peer A + blks := bgen.Blocks(10) + for _, block := range blks { + addBlock(t, ctx, other, block) + } + + var cids []cid.Cid + for _, blk := range blks { + cids = append(cids, blk.Cid()) + } + + // Request blocks with Peer B + // Note: Peer A and Peer B are not initially connected, so this tests + // that Peer B will search for and find Peer A + thisNode := ig.Next() + ses := thisNode.Exchange.NewSession(ctx).(*session.Session) + ses.SetBaseTickDelay(time.Millisecond * 10) + + ch, err := ses.GetBlocks(ctx, cids) + if err != nil { + t.Fatal(err) + } + + var got []blocks.Block + for b := range ch { + got = append(got, b) + } + if err := assertBlockLists(got, blks); err != nil { + t.Fatal(err) + } +} + +func TestFetchAfterDisconnect(t *testing.T) { + test.Flaky(t) + + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) + defer cancel() + + vnet := getVirtualNetwork() + ig := testinstance.NewTestInstanceGenerator(vnet, nil, []bitswap.Option{ + bitswap.ProviderSearchDelay(10 * time.Millisecond), + bitswap.RebroadcastDelay(delay.Fixed(15 * time.Millisecond)), + }) + defer ig.Close() + bgen := blocksutil.NewBlockGenerator() + + inst := ig.Instances(2) + peerA := inst[0] + peerB := inst[1] + + // Provide 5 blocks on Peer A + blks := bgen.Blocks(10) + var cids []cid.Cid + for _, blk := range blks { + cids = append(cids, blk.Cid()) + } + + firstBlks := blks[:5] + for _, block := range firstBlks { + addBlock(t, ctx, peerA, block) + } + + // Request all blocks with Peer B + ses := peerB.Exchange.NewSession(ctx).(*session.Session) + ses.SetBaseTickDelay(time.Millisecond * 10) + + ch, err := ses.GetBlocks(ctx, cids) + if err != nil { + t.Fatal(err) + } + + // Should get first 5 blocks + var got []blocks.Block + for i := 0; i < 5; i++ { + b := <-ch + got = append(got, b) + } + + if err := assertBlockLists(got, blks[:5]); err != nil { + t.Fatal(err) + } + + // Break connection + err = peerA.Adapter.DisconnectFrom(ctx, peerB.Peer) + if err != nil { + t.Fatal(err) + } + + time.Sleep(20 * time.Millisecond) + + // Provide remaining blocks + lastBlks := blks[5:] + for _, block := range lastBlks { + addBlock(t, ctx, peerA, block) + } + + // Peer B should call FindProviders() and find Peer A + + // Should get last 5 blocks + for i := 0; i < 5; i++ { + select { + case b := <-ch: + got = append(got, b) + case <-ctx.Done(): + } + } + + if err := assertBlockLists(got, blks); err != nil { + t.Fatal(err) + } +} + +func TestInterestCacheOverflow(t *testing.T) { + test.Flaky(t) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + vnet := getVirtualNetwork() + ig := testinstance.NewTestInstanceGenerator(vnet, nil, nil) + defer ig.Close() + bgen := blocksutil.NewBlockGenerator() + + blks := bgen.Blocks(2049) + inst := ig.Instances(2) + + a := inst[0] + b := inst[1] + + ses := a.Exchange.NewSession(ctx) + zeroch, err := ses.GetBlocks(ctx, []cid.Cid{blks[0].Cid()}) + if err != nil { + t.Fatal(err) + } + + var restcids []cid.Cid + for _, blk := range blks[1:] { + restcids = append(restcids, blk.Cid()) + } + + restch, err := ses.GetBlocks(ctx, restcids) + if err != nil { + t.Fatal(err) + } + + // wait to ensure that all the above cids were added to the sessions cache + time.Sleep(time.Millisecond * 50) + + addBlock(t, ctx, b, blks[0]) + + select { + case blk, ok := <-zeroch: + if ok && blk.Cid().Equals(blks[0].Cid()) { + // success! + } else { + t.Fatal("failed to get the block") + } + case <-restch: + t.Fatal("should not get anything on restch") + case <-time.After(time.Second * 5): + t.Fatal("timed out waiting for block") + } +} + +func TestPutAfterSessionCacheEvict(t *testing.T) { + test.Flaky(t) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + vnet := getVirtualNetwork() + ig := testinstance.NewTestInstanceGenerator(vnet, nil, nil) + defer ig.Close() + bgen := blocksutil.NewBlockGenerator() + + blks := bgen.Blocks(2500) + inst := ig.Instances(1) + + a := inst[0] + + ses := a.Exchange.NewSession(ctx) + + var allcids []cid.Cid + for _, blk := range blks[1:] { + allcids = append(allcids, blk.Cid()) + } + + blkch, err := ses.GetBlocks(ctx, allcids) + if err != nil { + t.Fatal(err) + } + + // wait to ensure that all the above cids were added to the sessions cache + time.Sleep(time.Millisecond * 50) + + addBlock(t, ctx, a, blks[17]) + + select { + case <-blkch: + case <-time.After(time.Millisecond * 50): + t.Fatal("timed out waiting for block") + } +} + +func TestMultipleSessions(t *testing.T) { + test.Flaky(t) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + vnet := getVirtualNetwork() + ig := testinstance.NewTestInstanceGenerator(vnet, nil, nil) + defer ig.Close() + bgen := blocksutil.NewBlockGenerator() + + blk := bgen.Blocks(1)[0] + inst := ig.Instances(2) + + a := inst[0] + b := inst[1] + + ctx1, cancel1 := context.WithCancel(ctx) + ses := a.Exchange.NewSession(ctx1) + + blkch, err := ses.GetBlocks(ctx, []cid.Cid{blk.Cid()}) + if err != nil { + t.Fatal(err) + } + cancel1() + + ses2 := a.Exchange.NewSession(ctx) + blkch2, err := ses2.GetBlocks(ctx, []cid.Cid{blk.Cid()}) + if err != nil { + t.Fatal(err) + } + + time.Sleep(time.Millisecond * 10) + addBlock(t, ctx, b, blk) + + select { + case <-blkch2: + case <-time.After(time.Second * 20): + t.Fatal("bad juju") + } + _ = blkch +} + +func TestWantlistClearsOnCancel(t *testing.T) { + test.Flaky(t) + + ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) + defer cancel() + + vnet := getVirtualNetwork() + ig := testinstance.NewTestInstanceGenerator(vnet, nil, nil) + defer ig.Close() + bgen := blocksutil.NewBlockGenerator() + + blks := bgen.Blocks(10) + var cids []cid.Cid + for _, blk := range blks { + cids = append(cids, blk.Cid()) + } + + inst := ig.Instances(1) + + a := inst[0] + + ctx1, cancel1 := context.WithCancel(ctx) + ses := a.Exchange.NewSession(ctx1) + + _, err := ses.GetBlocks(ctx, cids) + if err != nil { + t.Fatal(err) + } + cancel1() + + if err := tu.WaitFor(ctx, func() error { + if len(a.Exchange.GetWantlist()) > 0 { + return fmt.Errorf("expected empty wantlist") + } + return nil + }); err != nil { + t.Fatal(err) + } +} diff --git a/bitswap/client/client.go b/bitswap/client/client.go new file mode 100644 index 0000000000..21dece5ae6 --- /dev/null +++ b/bitswap/client/client.go @@ -0,0 +1,479 @@ +// Package bitswap implements the IPFS exchange interface with the BitSwap +// bilateral exchange protocol. +package client + +import ( + "context" + "errors" + + "sync" + "time" + + delay "github.com/ipfs/go-ipfs-delay" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" + + bsbpm "github.com/ipfs/boxo/bitswap/client/internal/blockpresencemanager" + bsgetter "github.com/ipfs/boxo/bitswap/client/internal/getter" + bsmq "github.com/ipfs/boxo/bitswap/client/internal/messagequeue" + "github.com/ipfs/boxo/bitswap/client/internal/notifications" + bspm "github.com/ipfs/boxo/bitswap/client/internal/peermanager" + bspqm "github.com/ipfs/boxo/bitswap/client/internal/providerquerymanager" + bssession "github.com/ipfs/boxo/bitswap/client/internal/session" + bssim "github.com/ipfs/boxo/bitswap/client/internal/sessioninterestmanager" + bssm "github.com/ipfs/boxo/bitswap/client/internal/sessionmanager" + bsspm "github.com/ipfs/boxo/bitswap/client/internal/sessionpeermanager" + "github.com/ipfs/boxo/bitswap/internal" + "github.com/ipfs/boxo/bitswap/internal/defaults" + bsmsg "github.com/ipfs/boxo/bitswap/message" + bmetrics "github.com/ipfs/boxo/bitswap/metrics" + bsnet "github.com/ipfs/boxo/bitswap/network" + "github.com/ipfs/boxo/bitswap/tracer" + blocks "github.com/ipfs/boxo/blocks" + blockstore "github.com/ipfs/boxo/blockstore" + exchange "github.com/ipfs/boxo/exchange" + "github.com/ipfs/go-cid" + logging "github.com/ipfs/go-log" + "github.com/ipfs/go-metrics-interface" + process "github.com/jbenet/goprocess" + procctx "github.com/jbenet/goprocess/context" + "github.com/libp2p/go-libp2p/core/peer" +) + +var log = logging.Logger("bitswap-client") + +// Option defines the functional option type that can be used to configure +// bitswap instances +type Option func(*Client) + +// ProviderSearchDelay overwrites the global provider search delay +func ProviderSearchDelay(newProvSearchDelay time.Duration) Option { + return func(bs *Client) { + bs.provSearchDelay = newProvSearchDelay + } +} + +// RebroadcastDelay overwrites the global provider rebroadcast delay +func RebroadcastDelay(newRebroadcastDelay delay.D) Option { + return func(bs *Client) { + bs.rebroadcastDelay = newRebroadcastDelay + } +} + +func SetSimulateDontHavesOnTimeout(send bool) Option { + return func(bs *Client) { + bs.simulateDontHavesOnTimeout = send + } +} + +// Configures the Client to use given tracer. +// This provides methods to access all messages sent and received by the Client. +// This interface can be used to implement various statistics (this is original intent). +func WithTracer(tap tracer.Tracer) Option { + return func(bs *Client) { + bs.tracer = tap + } +} + +func WithBlockReceivedNotifier(brn BlockReceivedNotifier) Option { + return func(bs *Client) { + bs.blockReceivedNotifier = brn + } +} + +type BlockReceivedNotifier interface { + // ReceivedBlocks notifies the decision engine that a peer is well-behaving + // and gave us useful data, potentially increasing its score and making us + // send them more data in exchange. + ReceivedBlocks(peer.ID, []blocks.Block) +} + +// New initializes a Bitswap client that runs until client.Close is called. +func New(parent context.Context, network bsnet.BitSwapNetwork, bstore blockstore.Blockstore, options ...Option) *Client { + // important to use provided parent context (since it may include important + // loggable data). It's probably not a good idea to allow bitswap to be + // coupled to the concerns of the ipfs daemon in this way. + // + // FIXME(btc) Now that bitswap manages itself using a process, it probably + // shouldn't accept a context anymore. Clients should probably use Close() + // exclusively. We should probably find another way to share logging data + ctx, cancelFunc := context.WithCancel(parent) + + px := process.WithTeardown(func() error { + return nil + }) + + // onDontHaveTimeout is called when a want-block is sent to a peer that + // has an old version of Bitswap that doesn't support DONT_HAVE messages, + // or when no response is received within a timeout. + var sm *bssm.SessionManager + var bs *Client + onDontHaveTimeout := func(p peer.ID, dontHaves []cid.Cid) { + // Simulate a message arriving with DONT_HAVEs + if bs.simulateDontHavesOnTimeout { + sm.ReceiveFrom(ctx, p, nil, nil, dontHaves) + } + } + peerQueueFactory := func(ctx context.Context, p peer.ID) bspm.PeerQueue { + return bsmq.New(ctx, p, network, onDontHaveTimeout) + } + + sim := bssim.New() + bpm := bsbpm.New() + pm := bspm.New(ctx, peerQueueFactory, network.Self()) + pqm := bspqm.New(ctx, network) + + sessionFactory := func( + sessctx context.Context, + sessmgr bssession.SessionManager, + id uint64, + spm bssession.SessionPeerManager, + sim *bssim.SessionInterestManager, + pm bssession.PeerManager, + bpm *bsbpm.BlockPresenceManager, + notif notifications.PubSub, + provSearchDelay time.Duration, + rebroadcastDelay delay.D, + self peer.ID) bssm.Session { + return bssession.New(sessctx, sessmgr, id, spm, pqm, sim, pm, bpm, notif, provSearchDelay, rebroadcastDelay, self) + } + sessionPeerManagerFactory := func(ctx context.Context, id uint64) bssession.SessionPeerManager { + return bsspm.New(id, network.ConnectionManager()) + } + notif := notifications.New() + sm = bssm.New(ctx, sessionFactory, sim, sessionPeerManagerFactory, bpm, pm, notif, network.Self()) + + bs = &Client{ + blockstore: bstore, + network: network, + process: px, + pm: pm, + pqm: pqm, + sm: sm, + sim: sim, + notif: notif, + counters: new(counters), + dupMetric: bmetrics.DupHist(ctx), + allMetric: bmetrics.AllHist(ctx), + provSearchDelay: defaults.ProvSearchDelay, + rebroadcastDelay: delay.Fixed(time.Minute), + simulateDontHavesOnTimeout: true, + } + + // apply functional options before starting and running bitswap + for _, option := range options { + option(bs) + } + + bs.pqm.Startup() + + // bind the context and process. + // do it over here to avoid closing before all setup is done. + go func() { + <-px.Closing() // process closes first + sm.Shutdown() + cancelFunc() + notif.Shutdown() + }() + procctx.CloseAfterContext(px, ctx) // parent cancelled first + + return bs +} + +// Client instances implement the bitswap protocol. +type Client struct { + pm *bspm.PeerManager + + // the provider query manager manages requests to find providers + pqm *bspqm.ProviderQueryManager + + // network delivers messages on behalf of the session + network bsnet.BitSwapNetwork + + // blockstore is the local database + // NB: ensure threadsafety + blockstore blockstore.Blockstore + + // manages channels of outgoing blocks for sessions + notif notifications.PubSub + + process process.Process + + // Counters for various statistics + counterLk sync.Mutex + counters *counters + + // Metrics interface metrics + dupMetric metrics.Histogram + allMetric metrics.Histogram + + // External statistics interface + tracer tracer.Tracer + + // the SessionManager routes requests to interested sessions + sm *bssm.SessionManager + + // the SessionInterestManager keeps track of which sessions are interested + // in which CIDs + sim *bssim.SessionInterestManager + + // how long to wait before looking for providers in a session + provSearchDelay time.Duration + + // how often to rebroadcast providing requests to find more optimized providers + rebroadcastDelay delay.D + + blockReceivedNotifier BlockReceivedNotifier + + // whether we should actually simulate dont haves on request timeout + simulateDontHavesOnTimeout bool +} + +type counters struct { + blocksRecvd uint64 + dupBlocksRecvd uint64 + dupDataRecvd uint64 + dataRecvd uint64 + messagesRecvd uint64 +} + +// GetBlock attempts to retrieve a particular block from peers within the +// deadline enforced by the context. +func (bs *Client) GetBlock(ctx context.Context, k cid.Cid) (blocks.Block, error) { + ctx, span := internal.StartSpan(ctx, "GetBlock", trace.WithAttributes(attribute.String("Key", k.String()))) + defer span.End() + return bsgetter.SyncGetBlock(ctx, k, bs.GetBlocks) +} + +// GetBlocks returns a channel where the caller may receive blocks that +// correspond to the provided |keys|. Returns an error if BitSwap is unable to +// begin this request within the deadline enforced by the context. +// +// NB: Your request remains open until the context expires. To conserve +// resources, provide a context with a reasonably short deadline (ie. not one +// that lasts throughout the lifetime of the server) +func (bs *Client) GetBlocks(ctx context.Context, keys []cid.Cid) (<-chan blocks.Block, error) { + ctx, span := internal.StartSpan(ctx, "GetBlocks", trace.WithAttributes(attribute.Int("NumKeys", len(keys)))) + defer span.End() + session := bs.sm.NewSession(ctx, bs.provSearchDelay, bs.rebroadcastDelay) + return session.GetBlocks(ctx, keys) +} + +// NotifyNewBlocks announces the existence of blocks to this bitswap service. +// Bitswap itself doesn't store new blocks. It's the caller responsibility to ensure +// that those blocks are available in the blockstore before calling this function. +func (bs *Client) NotifyNewBlocks(ctx context.Context, blks ...blocks.Block) error { + ctx, span := internal.StartSpan(ctx, "NotifyNewBlocks") + defer span.End() + + select { + case <-bs.process.Closing(): + return errors.New("bitswap is closed") + default: + } + + blkCids := make([]cid.Cid, len(blks)) + for i, blk := range blks { + blkCids[i] = blk.Cid() + } + + // Send all block keys (including duplicates) to any sessions that want them. + // (The duplicates are needed by sessions for accounting purposes) + bs.sm.ReceiveFrom(ctx, "", blkCids, nil, nil) + + // Publish the block to any Bitswap clients that had requested blocks. + // (the sessions use this pubsub mechanism to inform clients of incoming + // blocks) + bs.notif.Publish(blks...) + + return nil +} + +// receiveBlocksFrom process blocks received from the network +func (bs *Client) receiveBlocksFrom(ctx context.Context, from peer.ID, blks []blocks.Block, haves []cid.Cid, dontHaves []cid.Cid) error { + select { + case <-bs.process.Closing(): + return errors.New("bitswap is closed") + default: + } + + wanted, notWanted := bs.sim.SplitWantedUnwanted(blks) + for _, b := range notWanted { + log.Debugf("[recv] block not in wantlist; cid=%s, peer=%s", b.Cid(), from) + } + + allKs := make([]cid.Cid, 0, len(blks)) + for _, b := range blks { + allKs = append(allKs, b.Cid()) + } + + // Inform the PeerManager so that we can calculate per-peer latency + combined := make([]cid.Cid, 0, len(allKs)+len(haves)+len(dontHaves)) + combined = append(combined, allKs...) + combined = append(combined, haves...) + combined = append(combined, dontHaves...) + bs.pm.ResponseReceived(from, combined) + + // Send all block keys (including duplicates) to any sessions that want them for accounting purpose. + bs.sm.ReceiveFrom(ctx, from, allKs, haves, dontHaves) + + if bs.blockReceivedNotifier != nil { + bs.blockReceivedNotifier.ReceivedBlocks(from, wanted) + } + + // Publish the block to any Bitswap clients that had requested blocks. + // (the sessions use this pubsub mechanism to inform clients of incoming + // blocks) + for _, b := range wanted { + bs.notif.Publish(b) + } + + for _, b := range wanted { + log.Debugw("Bitswap.GetBlockRequest.End", "cid", b.Cid()) + } + + return nil +} + +// ReceiveMessage is called by the network interface when a new message is +// received. +func (bs *Client) ReceiveMessage(ctx context.Context, p peer.ID, incoming bsmsg.BitSwapMessage) { + bs.counterLk.Lock() + bs.counters.messagesRecvd++ + bs.counterLk.Unlock() + + if bs.tracer != nil { + bs.tracer.MessageReceived(p, incoming) + } + + iblocks := incoming.Blocks() + + if len(iblocks) > 0 { + bs.updateReceiveCounters(iblocks) + for _, b := range iblocks { + log.Debugf("[recv] block; cid=%s, peer=%s", b.Cid(), p) + } + } + + haves := incoming.Haves() + dontHaves := incoming.DontHaves() + if len(iblocks) > 0 || len(haves) > 0 || len(dontHaves) > 0 { + // Process blocks + err := bs.receiveBlocksFrom(ctx, p, iblocks, haves, dontHaves) + if err != nil { + log.Warnf("ReceiveMessage recvBlockFrom error: %s", err) + return + } + } +} + +func (bs *Client) updateReceiveCounters(blocks []blocks.Block) { + // Check which blocks are in the datastore + // (Note: any errors from the blockstore are simply logged out in + // blockstoreHas()) + blocksHas := bs.blockstoreHas(blocks) + + bs.counterLk.Lock() + defer bs.counterLk.Unlock() + + // Do some accounting for each block + for i, b := range blocks { + has := blocksHas[i] + + blkLen := len(b.RawData()) + bs.allMetric.Observe(float64(blkLen)) + if has { + bs.dupMetric.Observe(float64(blkLen)) + } + + c := bs.counters + + c.blocksRecvd++ + c.dataRecvd += uint64(blkLen) + if has { + c.dupBlocksRecvd++ + c.dupDataRecvd += uint64(blkLen) + } + } +} + +func (bs *Client) blockstoreHas(blks []blocks.Block) []bool { + res := make([]bool, len(blks)) + + wg := sync.WaitGroup{} + for i, block := range blks { + wg.Add(1) + go func(i int, b blocks.Block) { + defer wg.Done() + + has, err := bs.blockstore.Has(context.TODO(), b.Cid()) + if err != nil { + log.Infof("blockstore.Has error: %s", err) + has = false + } + + res[i] = has + }(i, block) + } + wg.Wait() + + return res +} + +// PeerConnected is called by the network interface +// when a peer initiates a new connection to bitswap. +func (bs *Client) PeerConnected(p peer.ID) { + bs.pm.Connected(p) +} + +// PeerDisconnected is called by the network interface when a peer +// closes a connection +func (bs *Client) PeerDisconnected(p peer.ID) { + bs.pm.Disconnected(p) +} + +// ReceiveError is called by the network interface when an error happens +// at the network layer. Currently just logs error. +func (bs *Client) ReceiveError(err error) { + log.Infof("Bitswap Client ReceiveError: %s", err) + // TODO log the network error + // TODO bubble the network error up to the parent context/error logger +} + +// Close is called to shutdown the Client +func (bs *Client) Close() error { + return bs.process.Close() +} + +// GetWantlist returns the current local wantlist (both want-blocks and +// want-haves). +func (bs *Client) GetWantlist() []cid.Cid { + return bs.pm.CurrentWants() +} + +// GetWantBlocks returns the current list of want-blocks. +func (bs *Client) GetWantBlocks() []cid.Cid { + return bs.pm.CurrentWantBlocks() +} + +// GetWanthaves returns the current list of want-haves. +func (bs *Client) GetWantHaves() []cid.Cid { + return bs.pm.CurrentWantHaves() +} + +// IsOnline is needed to match go-ipfs-exchange-interface +func (bs *Client) IsOnline() bool { + return true +} + +// NewSession generates a new Bitswap session. You should use this, rather +// that calling Client.GetBlocks, any time you intend to do several related +// block requests in a row. The session returned will have it's own GetBlocks +// method, but the session will use the fact that the requests are related to +// be more efficient in its requests to peers. If you are using a session +// from go-blockservice, it will create a bitswap session automatically. +func (bs *Client) NewSession(ctx context.Context) exchange.Fetcher { + ctx, span := internal.StartSpan(ctx, "NewSession") + defer span.End() + return bs.sm.NewSession(ctx, bs.provSearchDelay, bs.rebroadcastDelay) +} diff --git a/bitswap/client/docs/go-bitswap.png b/bitswap/client/docs/go-bitswap.png new file mode 100644 index 0000000000..805bf6562a Binary files /dev/null and b/bitswap/client/docs/go-bitswap.png differ diff --git a/bitswap/client/docs/go-bitswap.puml b/bitswap/client/docs/go-bitswap.puml new file mode 100644 index 0000000000..af9134d7e5 --- /dev/null +++ b/bitswap/client/docs/go-bitswap.puml @@ -0,0 +1,49 @@ +@startuml Bitswap Components + +node "Top Level Interface" { + [Bitswap] +} + +node "Sending Blocks" { + [Bitswap] --* [Engine] + [Engine] -left-* [Ledger] + [Engine] -right-* [PeerTaskQueue] + [Engine] --> [TaskWorker (workers.go)] +} + +node "Providing" { + [Bitswap] --* [Provide Collector (workers.go)] + [Provide Collector (workers.go)] --* [Provide Worker (workers.go)] +} + +node "Finding Providers" { + [Bitswap] --* [ProvideQueryManager] +} + +node "Sessions (smart requests)" { + [Bitswap] --* [SessionManager] + [SessionManager] --> [SessionInterestManager] + [SessionManager] --o [Session] + [SessionManager] --> [BlockPresenceManager] + [Session] --* [sessionWantSender] + [Session] --* [SessionPeerManager] + [Session] --> [ProvideQueryManager] + [Session] --* [sessionWants] + [Session] --> [SessionInterestManager] + [sessionWantSender] --> [BlockPresenceManager] +} + +node "Requesting Blocks" { + [SessionManager] --> [PeerManager] + [sessionWantSender] --> [PeerManager] + [PeerManager] --* [MessageQueue] +} + +node "Network" { + [BitSwapNetwork] + [MessageQueue] --> [BitSwapNetwork] + [ProvideQueryManager] --> [BitSwapNetwork] + [TaskWorker (workers.go)] --> [BitSwapNetwork] + [Provide Worker (workers.go)] --> [BitSwapNetwork] +} +@enduml \ No newline at end of file diff --git a/bitswap/client/docs/how-bitswap-works.md b/bitswap/client/docs/how-bitswap-works.md new file mode 100644 index 0000000000..303b05763a --- /dev/null +++ b/bitswap/client/docs/how-bitswap-works.md @@ -0,0 +1,143 @@ +How Bitswap Works +================= + +When a client requests blocks, Bitswap sends the CID of those blocks to its peers as "wants". When Bitswap receives a "want" from a peer, it responds with the corresponding block. + +### Requesting Blocks + +#### Sessions + +Bitswap Sessions allow the client to make related requests to the same group of peers. For example typically requests to fetch all the blocks in a file would be made with a single session. + +#### Discovery + +To discover which peers have a block, Bitswap broadcasts a `want-have` message to all peers it is connected to asking if they have the block. + +Any peers that have the block respond with a `HAVE` message. They are added to the Session. + +If no connected peers have the block, Bitswap queries the DHT to find peers that have the block. + +### Wants + +When the client requests a block, Bitswap sends a `want-have` message with the block CID to all peers in the Session to ask who has the block. + +Bitswap simultaneously sends a `want-block` message to one of the peers in the Session to request the block. If the peer does not have the block, it responds with a `DONT_HAVE` message. In that case Bitswap selects another peer and sends the `want-block` to that peer. + +If no peers have the block, Bitswap broadcasts a `want-have` to all connected peers, and queries the DHT to find peers that have the block. + +#### Peer Selection + +Bitswap uses a probabilistic algorithm to select which peer to send `want-block` to, favouring peers that +- sent `HAVE` for the block +- were discovered as providers of the block in the DHT +- were first to send blocks to previous session requests + +The selection algorithm includes some randomness so as to allow peers that are discovered later, but are more responsive, to rise in the ranking. + +#### Periodic Search Widening + +Periodically the Bitswap Session selects a random CID from the list of "pending wants" (wants that have been sent but for which no block has been received). Bitswap broadcasts a `want-have` to all connected peers and queries the DHT for the CID. + +### Serving Blocks + +#### Processing Requests + +When Bitswap receives a `want-have` it checks if the block is in the local blockstore. + +If the block is in the local blockstore Bitswap responds with `HAVE`. If the block is small Bitswap sends the block itself instead of `HAVE`. + +If the block is not in the local blockstore, Bitswap checks the `send-dont-have` flag on the request. If `send-dont-have` is true, Bitswap sends `DONT_HAVE`. Otherwise it does not respond. + +#### Processing Incoming Blocks + +When Bitswap receives a block, it checks to see if any peers sent `want-have` or `want-block` for the block. If so it sends `HAVE` or the block itself to those peers. + +#### Priority + +Bitswap keeps requests from each peer in separate queues, ordered by the priority specified in the request message. + +To select which peer to send the next response to, Bitswap chooses the peer with the least amount of data in its send queue. That way it will tend to "keep peers busy" by always keeping some data in each peer's send queue. + + +Implementation +============== + +![Bitswap Components](./go-bitswap.png) + +### Bitswap + +The Bitswap class receives incoming messages and implements the Exchange API. + +When a message is received, Bitswap +- Records some statistics about the message +- Informs the Engine of any new wants + So that the Engine can send responses to the wants +- Informs the Engine of any received blocks + So that the Engine can send the received blocks to any peers that want them +- Informs the SessionManager of received blocks, HAVEs and DONT_HAVEs + So that the SessionManager can inform interested sessions + +When the client makes an API call, Bitswap creates a new Session and calls the corresponding method (eg `GetBlocks()`). + +### Sending Blocks + +When the Engine is informed of new wants it +- Adds the wants to the Ledger (peer A wants block with CID Qmhash...) +- Checks the blockstore for the corresponding blocks, and adds a task to the PeerTaskQueue + - If the blockstore does not have a wanted block, adds a `DONT_HAVE` task + - If the blockstore has the block + - for a `want-have` adds a `HAVE` task + - for a `want-block` adds a `block` task + +When the Engine is informed of new blocks it checks the Ledger to see if any peers want information about those blocks. +- For each block + - For each peer that sent a `want-have` for the corresponding block + Adds a `HAVE` task to the PeerTaskQueue + - For each peer that sent a `want-block` for the corresponding block + Adds a `block` task to the PeerTaskQueue + +The Engine periodically pops tasks off the PeerTaskQueue, and creates a message with `blocks`, `HAVEs` and `DONT_HAVEs`. +The PeerTaskQueue prioritizes tasks such that the peers with the least amount of data in their send queue are highest priority, so as to "keep peers busy". + +### Requesting Blocks + +When the SessionManager is informed of a new message, it +- informs the BlockPresenceManager + The BlockPresenceManager keeps track of which peers have sent HAVES and DONT_HAVEs for each block +- informs the Sessions that are interested in the received blocks and wants +- informs the PeerManager of received blocks + The PeerManager checks if any wants were send to a peer for the received blocks. If so it sends a `CANCEL` message to those peers. + +### Sessions + +The Session starts in "discovery" mode. This means it doesn't have any peers yet, and needs to discover which peers have the blocks it wants. + +When the client initially requests blocks from a Session, the Session +- informs the SessionInterestManager that it is interested in the want +- informs the sessionWantManager of the want +- tells the PeerManager to broadcast a `want-have` to all connected peers so as to discover which peers have the block +- queries the ProviderQueryManager to discover which peers have the block + +When the session receives a message with `HAVE` or a `block`, it informs the SessionPeerManager. The SessionPeerManager keeps track of all peers in the session. +When the session receives a message with a `block` it informs the SessionInterestManager. + +Once the session has peers it is no longer in "discovery" mode. When the client requests subsequent blocks the Session informs the sessionWantSender. The sessionWantSender tells the PeerManager to send `want-have` and `want-block` to peers in the session. + +For each block that the Session wants, the sessionWantSender decides which peer is most likely to have a block by checking with the BlockPresenceManager which peers have sent a `HAVE` for the block. If no peers or multiple peers have sent `HAVE`, a peer is chosen probabilistically according to which how many times each peer was first to send a block in response to previous wants requested by the Session. The sessionWantSender sends a single "optimistic" `want-block` to the chosen peer, and sends `want-have` to all other peers in the Session. +When a peer responds with `DONT_HAVE`, the Session sends `want-block` to the next best peer, and so on until the block is received. + +### PeerManager + +The PeerManager creates a MessageQueue for each peer that connects to Bitswap. It remembers which `want-have` / `want-block` has been sent to each peer, and directs any new wants to the correct peer. +The MessageQueue groups together wants into a message, and sends the message to the peer. It monitors for timeouts and simulates a `DONT_HAVE` response if a peer takes too long to respond. + +### Finding Providers + +When bitswap can't find a connected peer who already has the block it wants, it falls back to querying a content routing system (a DHT in IPFS's case) to try to locate a peer with the block. + +Bitswap routes these requests through the ProviderQueryManager system, which rate-limits these requests and also deduplicates in-process requests. + +### Providing + +As a bitswap client receives blocks, by default it announces them on the provided content routing system (again, a DHT in most cases). This behaviour can be disabled by passing `bitswap.ProvideEnabled(false)` as a parameter when initializing Bitswap. IPFS currently has its own experimental provider system ([go-ipfs-provider](https://github.com/ipfs/go-ipfs-provider)) which will eventually replace Bitswap's system entirely. + diff --git a/bitswap/client/internal/blockpresencemanager/blockpresencemanager.go b/bitswap/client/internal/blockpresencemanager/blockpresencemanager.go new file mode 100644 index 0000000000..1b76acc5b8 --- /dev/null +++ b/bitswap/client/internal/blockpresencemanager/blockpresencemanager.go @@ -0,0 +1,121 @@ +package blockpresencemanager + +import ( + "sync" + + cid "github.com/ipfs/go-cid" + peer "github.com/libp2p/go-libp2p/core/peer" +) + +// BlockPresenceManager keeps track of which peers have indicated that they +// have or explicitly don't have a block +type BlockPresenceManager struct { + sync.RWMutex + presence map[cid.Cid]map[peer.ID]bool +} + +func New() *BlockPresenceManager { + return &BlockPresenceManager{ + presence: make(map[cid.Cid]map[peer.ID]bool), + } +} + +// ReceiveFrom is called when a peer sends us information about which blocks +// it has and does not have +func (bpm *BlockPresenceManager) ReceiveFrom(p peer.ID, haves []cid.Cid, dontHaves []cid.Cid) { + bpm.Lock() + defer bpm.Unlock() + + for _, c := range haves { + bpm.updateBlockPresence(p, c, true) + } + for _, c := range dontHaves { + bpm.updateBlockPresence(p, c, false) + } +} + +func (bpm *BlockPresenceManager) updateBlockPresence(p peer.ID, c cid.Cid, present bool) { + _, ok := bpm.presence[c] + if !ok { + bpm.presence[c] = make(map[peer.ID]bool) + } + + // Make sure not to change HAVE to DONT_HAVE + has, pok := bpm.presence[c][p] + if pok && has { + return + } + bpm.presence[c][p] = present +} + +// PeerHasBlock indicates whether the given peer has sent a HAVE for the given +// cid +func (bpm *BlockPresenceManager) PeerHasBlock(p peer.ID, c cid.Cid) bool { + bpm.RLock() + defer bpm.RUnlock() + + return bpm.presence[c][p] +} + +// PeerDoesNotHaveBlock indicates whether the given peer has sent a DONT_HAVE +// for the given cid +func (bpm *BlockPresenceManager) PeerDoesNotHaveBlock(p peer.ID, c cid.Cid) bool { + bpm.RLock() + defer bpm.RUnlock() + + have, known := bpm.presence[c][p] + return known && !have +} + +// Filters the keys such that all the given peers have received a DONT_HAVE +// for a key. +// This allows us to know if we've exhausted all possibilities of finding +// the key with the peers we know about. +func (bpm *BlockPresenceManager) AllPeersDoNotHaveBlock(peers []peer.ID, ks []cid.Cid) []cid.Cid { + bpm.RLock() + defer bpm.RUnlock() + + var res []cid.Cid + for _, c := range ks { + if bpm.allDontHave(peers, c) { + res = append(res, c) + } + } + return res +} + +func (bpm *BlockPresenceManager) allDontHave(peers []peer.ID, c cid.Cid) bool { + // Check if we know anything about the cid's block presence + ps, cok := bpm.presence[c] + if !cok { + return false + } + + // Check if we explicitly know that all the given peers do not have the cid + for _, p := range peers { + if has, pok := ps[p]; !pok || has { + return false + } + } + return true +} + +// RemoveKeys cleans up the given keys from the block presence map +func (bpm *BlockPresenceManager) RemoveKeys(ks []cid.Cid) { + bpm.Lock() + defer bpm.Unlock() + + for _, c := range ks { + delete(bpm.presence, c) + } +} + +// HasKey indicates whether the BlockPresenceManager is tracking the given key +// (used by the tests) +func (bpm *BlockPresenceManager) HasKey(c cid.Cid) bool { + bpm.Lock() + defer bpm.Unlock() + + _, ok := bpm.presence[c] + return ok +} diff --git a/bitswap/client/internal/blockpresencemanager/blockpresencemanager_test.go b/bitswap/client/internal/blockpresencemanager/blockpresencemanager_test.go new file mode 100644 index 0000000000..5e30073a36 --- /dev/null +++ b/bitswap/client/internal/blockpresencemanager/blockpresencemanager_test.go @@ -0,0 +1,244 @@ +package blockpresencemanager + +import ( + "testing" + + "github.com/ipfs/boxo/bitswap/internal/testutil" + "github.com/ipfs/boxo/internal/test" + cid "github.com/ipfs/go-cid" + peer "github.com/libp2p/go-libp2p/core/peer" +) + +const ( + expHasFalseMsg = "Expected PeerHasBlock to return false" + expHasTrueMsg = "Expected PeerHasBlock to return true" + expDoesNotHaveFalseMsg = "Expected PeerDoesNotHaveBlock to return false" + expDoesNotHaveTrueMsg = "Expected PeerDoesNotHaveBlock to return true" +) + +func TestBlockPresenceManager(t *testing.T) { + test.Flaky(t) + + bpm := New() + + p := testutil.GeneratePeers(1)[0] + cids := testutil.GenerateCids(2) + c0 := cids[0] + c1 := cids[1] + + // Nothing stored yet, both PeerHasBlock and PeerDoesNotHaveBlock should + // return false + if bpm.PeerHasBlock(p, c0) { + t.Fatal(expHasFalseMsg) + } + if bpm.PeerDoesNotHaveBlock(p, c0) { + t.Fatal(expDoesNotHaveFalseMsg) + } + + // HAVE cid0 / DONT_HAVE cid1 + bpm.ReceiveFrom(p, []cid.Cid{c0}, []cid.Cid{c1}) + + // Peer has received HAVE for cid0 + if !bpm.PeerHasBlock(p, c0) { + t.Fatal(expHasTrueMsg) + } + if bpm.PeerDoesNotHaveBlock(p, c0) { + t.Fatal(expDoesNotHaveFalseMsg) + } + + // Peer has received DONT_HAVE for cid1 + if !bpm.PeerDoesNotHaveBlock(p, c1) { + t.Fatal(expDoesNotHaveTrueMsg) + } + if bpm.PeerHasBlock(p, c1) { + t.Fatal(expHasFalseMsg) + } + + // HAVE cid1 / DONT_HAVE cid0 + bpm.ReceiveFrom(p, []cid.Cid{c1}, []cid.Cid{c0}) + + // DONT_HAVE cid0 should NOT over-write earlier HAVE cid0 + if bpm.PeerDoesNotHaveBlock(p, c0) { + t.Fatal(expDoesNotHaveFalseMsg) + } + if !bpm.PeerHasBlock(p, c0) { + t.Fatal(expHasTrueMsg) + } + + // HAVE cid1 should over-write earlier DONT_HAVE cid1 + if !bpm.PeerHasBlock(p, c1) { + t.Fatal(expHasTrueMsg) + } + if bpm.PeerDoesNotHaveBlock(p, c1) { + t.Fatal(expDoesNotHaveFalseMsg) + } + + // Remove cid0 + bpm.RemoveKeys([]cid.Cid{c0}) + + // Nothing stored, both PeerHasBlock and PeerDoesNotHaveBlock should + // return false + if bpm.PeerHasBlock(p, c0) { + t.Fatal(expHasFalseMsg) + } + if bpm.PeerDoesNotHaveBlock(p, c0) { + t.Fatal(expDoesNotHaveFalseMsg) + } + + // Remove cid1 + bpm.RemoveKeys([]cid.Cid{c1}) + + // Nothing stored, both PeerHasBlock and PeerDoesNotHaveBlock should + // return false + if bpm.PeerHasBlock(p, c1) { + t.Fatal(expHasFalseMsg) + } + if bpm.PeerDoesNotHaveBlock(p, c1) { + t.Fatal(expDoesNotHaveFalseMsg) + } +} + +func TestAddRemoveMulti(t *testing.T) { + test.Flaky(t) + + bpm := New() + + peers := testutil.GeneratePeers(2) + p0 := peers[0] + p1 := peers[1] + cids := testutil.GenerateCids(3) + c0 := cids[0] + c1 := cids[1] + c2 := cids[2] + + // p0: HAVE cid0, cid1 / DONT_HAVE cid1, cid2 + // p1: HAVE cid1, cid2 / DONT_HAVE cid0 + bpm.ReceiveFrom(p0, []cid.Cid{c0, c1}, []cid.Cid{c1, c2}) + bpm.ReceiveFrom(p1, []cid.Cid{c1, c2}, []cid.Cid{c0}) + + // Peer 0 should end up with + // - HAVE cid0 + // - HAVE cid1 + // - DONT_HAVE cid2 + if !bpm.PeerHasBlock(p0, c0) { + t.Fatal(expHasTrueMsg) + } + if !bpm.PeerHasBlock(p0, c1) { + t.Fatal(expHasTrueMsg) + } + if !bpm.PeerDoesNotHaveBlock(p0, c2) { + t.Fatal(expDoesNotHaveTrueMsg) + } + + // Peer 1 should end up with + // - HAVE cid1 + // - HAVE cid2 + // - DONT_HAVE cid0 + if !bpm.PeerHasBlock(p1, c1) { + t.Fatal(expHasTrueMsg) + } + if !bpm.PeerHasBlock(p1, c2) { + t.Fatal(expHasTrueMsg) + } + if !bpm.PeerDoesNotHaveBlock(p1, c0) { + t.Fatal(expDoesNotHaveTrueMsg) + } + + // Remove cid1 and cid2. Should end up with + // Peer 0: HAVE cid0 + // Peer 1: DONT_HAVE cid0 + bpm.RemoveKeys([]cid.Cid{c1, c2}) + if !bpm.PeerHasBlock(p0, c0) { + t.Fatal(expHasTrueMsg) + } + if !bpm.PeerDoesNotHaveBlock(p1, c0) { + t.Fatal(expDoesNotHaveTrueMsg) + } + + // The other keys should have been cleared, so both HasBlock() and + // DoesNotHaveBlock() should return false + if bpm.PeerHasBlock(p0, c1) { + t.Fatal(expHasFalseMsg) + } + if bpm.PeerDoesNotHaveBlock(p0, c1) { + t.Fatal(expDoesNotHaveFalseMsg) + } + if bpm.PeerHasBlock(p0, c2) { + t.Fatal(expHasFalseMsg) + } + if bpm.PeerDoesNotHaveBlock(p0, c2) { + t.Fatal(expDoesNotHaveFalseMsg) + } + if bpm.PeerHasBlock(p1, c1) { + t.Fatal(expHasFalseMsg) + } + if bpm.PeerDoesNotHaveBlock(p1, c1) { + t.Fatal(expDoesNotHaveFalseMsg) + } + if bpm.PeerHasBlock(p1, c2) { + t.Fatal(expHasFalseMsg) + } + if bpm.PeerDoesNotHaveBlock(p1, c2) { + t.Fatal(expDoesNotHaveFalseMsg) + } +} + +func TestAllPeersDoNotHaveBlock(t *testing.T) { + test.Flaky(t) + + bpm := New() + + peers := testutil.GeneratePeers(3) + p0 := peers[0] + p1 := peers[1] + p2 := peers[2] + + cids := testutil.GenerateCids(3) + c0 := cids[0] + c1 := cids[1] + c2 := cids[2] + + // c0 c1 c2 + // p0 ? N N + // p1 N Y ? + // p2 Y Y N + bpm.ReceiveFrom(p0, []cid.Cid{}, []cid.Cid{c1, c2}) + bpm.ReceiveFrom(p1, []cid.Cid{c1}, []cid.Cid{c0}) + bpm.ReceiveFrom(p2, []cid.Cid{c0, c1}, []cid.Cid{c2}) + + type testcase struct { + peers []peer.ID + ks []cid.Cid + exp []cid.Cid + } + + testcases := []testcase{ + {[]peer.ID{p0}, []cid.Cid{c0}, []cid.Cid{}}, + {[]peer.ID{p1}, []cid.Cid{c0}, []cid.Cid{c0}}, + {[]peer.ID{p2}, []cid.Cid{c0}, []cid.Cid{}}, + + {[]peer.ID{p0}, []cid.Cid{c1}, []cid.Cid{c1}}, + {[]peer.ID{p1}, []cid.Cid{c1}, []cid.Cid{}}, + {[]peer.ID{p2}, []cid.Cid{c1}, []cid.Cid{}}, + + {[]peer.ID{p0}, []cid.Cid{c2}, []cid.Cid{c2}}, + {[]peer.ID{p1}, []cid.Cid{c2}, []cid.Cid{}}, + {[]peer.ID{p2}, []cid.Cid{c2}, []cid.Cid{c2}}, + + // p0 recieved DONT_HAVE for c1 & c2 (but not for c0) + {[]peer.ID{p0}, []cid.Cid{c0, c1, c2}, []cid.Cid{c1, c2}}, + {[]peer.ID{p0, p1}, []cid.Cid{c0, c1, c2}, []cid.Cid{}}, + // Both p0 and p2 received DONT_HAVE for c2 + {[]peer.ID{p0, p2}, []cid.Cid{c0, c1, c2}, []cid.Cid{c2}}, + {[]peer.ID{p0, p1, p2}, []cid.Cid{c0, c1, c2}, []cid.Cid{}}, + } + + for i, tc := range testcases { + if !testutil.MatchKeysIgnoreOrder( + bpm.AllPeersDoNotHaveBlock(tc.peers, tc.ks), + tc.exp, + ) { + t.Fatalf("test case %d failed: expected matching keys", i) + } + } +} diff --git a/bitswap/client/internal/getter/getter.go b/bitswap/client/internal/getter/getter.go new file mode 100644 index 0000000000..cc8d4b2ab5 --- /dev/null +++ b/bitswap/client/internal/getter/getter.go @@ -0,0 +1,138 @@ +package getter + +import ( + "context" + "errors" + + "github.com/ipfs/boxo/bitswap/client/internal" + notifications "github.com/ipfs/boxo/bitswap/client/internal/notifications" + logging "github.com/ipfs/go-log" + + blocks "github.com/ipfs/boxo/blocks" + cid "github.com/ipfs/go-cid" + ipld "github.com/ipfs/go-ipld-format" +) + +var log = logging.Logger("bitswap") + +// GetBlocksFunc is any function that can take an array of CIDs and return a +// channel of incoming blocks. +type GetBlocksFunc func(context.Context, []cid.Cid) (<-chan blocks.Block, error) + +// SyncGetBlock takes a block cid and an async function for getting several +// blocks that returns a channel, and uses that function to return the +// block syncronously. +func SyncGetBlock(p context.Context, k cid.Cid, gb GetBlocksFunc) (blocks.Block, error) { + p, span := internal.StartSpan(p, "Getter.SyncGetBlock") + defer span.End() + + if !k.Defined() { + log.Error("undefined cid in GetBlock") + return nil, ipld.ErrNotFound{Cid: k} + } + + // Any async work initiated by this function must end when this function + // returns. To ensure this, derive a new context. Note that it is okay to + // listen on parent in this scope, but NOT okay to pass |parent| to + // functions called by this one. Otherwise those functions won't return + // when this context's cancel func is executed. This is difficult to + // enforce. May this comment keep you safe. + ctx, cancel := context.WithCancel(p) + defer cancel() + + promise, err := gb(ctx, []cid.Cid{k}) + if err != nil { + return nil, err + } + + select { + case block, ok := <-promise: + if !ok { + select { + case <-ctx.Done(): + return nil, ctx.Err() + default: + return nil, errors.New("promise channel was closed") + } + } + return block, nil + case <-p.Done(): + return nil, p.Err() + } +} + +// WantFunc is any function that can express a want for set of blocks. +type WantFunc func(context.Context, []cid.Cid) + +// AsyncGetBlocks take a set of block cids, a pubsub channel for incoming +// blocks, a want function, and a close function, and returns a channel of +// incoming blocks. +func AsyncGetBlocks(ctx context.Context, sessctx context.Context, keys []cid.Cid, notif notifications.PubSub, + want WantFunc, cwants func([]cid.Cid)) (<-chan blocks.Block, error) { + ctx, span := internal.StartSpan(ctx, "Getter.AsyncGetBlocks") + defer span.End() + + // If there are no keys supplied, just return a closed channel + if len(keys) == 0 { + out := make(chan blocks.Block) + close(out) + return out, nil + } + + // Use a PubSub notifier to listen for incoming blocks for each key + remaining := cid.NewSet() + promise := notif.Subscribe(ctx, keys...) + for _, k := range keys { + log.Debugw("Bitswap.GetBlockRequest.Start", "cid", k) + remaining.Add(k) + } + + // Send the want request for the keys to the network + want(ctx, keys) + + out := make(chan blocks.Block) + go handleIncoming(ctx, sessctx, remaining, promise, out, cwants) + return out, nil +} + +// Listens for incoming blocks, passing them to the out channel. +// If the context is cancelled or the incoming channel closes, calls cfun with +// any keys corresponding to blocks that were never received. +func handleIncoming(ctx context.Context, sessctx context.Context, remaining *cid.Set, + in <-chan blocks.Block, out chan blocks.Block, cfun func([]cid.Cid)) { + + ctx, cancel := context.WithCancel(ctx) + + // Clean up before exiting this function, and call the cancel function on + // any remaining keys + defer func() { + cancel() + close(out) + // can't just defer this call on its own, arguments are resolved *when* the defer is created + cfun(remaining.Keys()) + }() + + for { + select { + case blk, ok := <-in: + // If the channel is closed, we're done (note that PubSub closes + // the channel once all the keys have been received) + if !ok { + return + } + + remaining.Remove(blk.Cid()) + select { + case out <- blk: + case <-ctx.Done(): + return + case <-sessctx.Done(): + return + } + case <-ctx.Done(): + return + case <-sessctx.Done(): + return + } + } +} diff --git a/bitswap/client/internal/messagequeue/donthavetimeoutmgr.go b/bitswap/client/internal/messagequeue/donthavetimeoutmgr.go new file mode 100644 index 0000000000..e1b42c421b --- /dev/null +++ b/bitswap/client/internal/messagequeue/donthavetimeoutmgr.go @@ -0,0 +1,398 @@ +package messagequeue + +import ( + "context" + "sync" + "time" + + "github.com/benbjohnson/clock" + cid "github.com/ipfs/go-cid" + "github.com/libp2p/go-libp2p/p2p/protocol/ping" +) + +const ( + // dontHaveTimeout is used to simulate a DONT_HAVE when communicating with + // a peer whose Bitswap client doesn't support the DONT_HAVE response, + // or when the peer takes too long to respond. + // If the peer doesn't respond to a want-block within the timeout, the + // local node assumes that the peer doesn't have the block. + dontHaveTimeout = 5 * time.Second + + // maxExpectedWantProcessTime is the maximum amount of time we expect a + // peer takes to process a want and initiate sending a response to us + maxExpectedWantProcessTime = 2 * time.Second + + // maxTimeout is the maximum allowed timeout, regardless of latency + maxTimeout = dontHaveTimeout + maxExpectedWantProcessTime + + // pingLatencyMultiplier is multiplied by the average ping time to + // get an upper bound on how long we expect to wait for a peer's response + // to arrive + pingLatencyMultiplier = 3 + + // messageLatencyAlpha is the alpha supplied to the message latency EWMA + messageLatencyAlpha = 0.5 + + // To give a margin for error, the timeout is calculated as + // messageLatencyMultiplier * message latency + messageLatencyMultiplier = 2 +) + +// PeerConnection is a connection to a peer that can be pinged, and the +// average latency measured +type PeerConnection interface { + // Ping the peer + Ping(context.Context) ping.Result + // The average latency of all pings + Latency() time.Duration +} + +// pendingWant keeps track of a want that has been sent and we're waiting +// for a response or for a timeout to expire +type pendingWant struct { + c cid.Cid + active bool + sent time.Time +} + +// dontHaveTimeoutMgr simulates a DONT_HAVE message if the peer takes too long +// to respond to a message. +// The timeout is based on latency - we start with a default latency, while +// we ping the peer to estimate latency. If we receive a response from the +// peer we use the response latency. +type dontHaveTimeoutMgr struct { + clock clock.Clock + ctx context.Context + shutdown func() + peerConn PeerConnection + onDontHaveTimeout func([]cid.Cid) + defaultTimeout time.Duration + maxTimeout time.Duration + pingLatencyMultiplier int + messageLatencyMultiplier int + maxExpectedWantProcessTime time.Duration + + // All variables below here must be protected by the lock + lk sync.RWMutex + // has the timeout manager started + started bool + // wants that are active (waiting for a response or timeout) + activeWants map[cid.Cid]*pendingWant + // queue of wants, from oldest to newest + wantQueue []*pendingWant + // time to wait for a response (depends on latency) + timeout time.Duration + // ewma of message latency (time from message sent to response received) + messageLatency *latencyEwma + // timer used to wait until want at front of queue expires + checkForTimeoutsTimer *clock.Timer + // used for testing -- timeoutsTriggered when a scheduled dont have timeouts were triggered + timeoutsTriggered chan struct{} +} + +// newDontHaveTimeoutMgr creates a new dontHaveTimeoutMgr +// onDontHaveTimeout is called when pending keys expire (not cancelled before timeout) +func newDontHaveTimeoutMgr(pc PeerConnection, onDontHaveTimeout func([]cid.Cid), clock clock.Clock) *dontHaveTimeoutMgr { + return newDontHaveTimeoutMgrWithParams(pc, onDontHaveTimeout, dontHaveTimeout, maxTimeout, + pingLatencyMultiplier, messageLatencyMultiplier, maxExpectedWantProcessTime, clock, nil) +} + +// newDontHaveTimeoutMgrWithParams is used by the tests +func newDontHaveTimeoutMgrWithParams( + pc PeerConnection, + onDontHaveTimeout func([]cid.Cid), + defaultTimeout time.Duration, + maxTimeout time.Duration, + pingLatencyMultiplier int, + messageLatencyMultiplier int, + maxExpectedWantProcessTime time.Duration, + clock clock.Clock, + timeoutsTriggered chan struct{}) *dontHaveTimeoutMgr { + + ctx, shutdown := context.WithCancel(context.Background()) + mqp := &dontHaveTimeoutMgr{ + clock: clock, + ctx: ctx, + shutdown: shutdown, + peerConn: pc, + activeWants: make(map[cid.Cid]*pendingWant), + timeout: defaultTimeout, + messageLatency: &latencyEwma{alpha: messageLatencyAlpha}, + defaultTimeout: defaultTimeout, + maxTimeout: maxTimeout, + pingLatencyMultiplier: pingLatencyMultiplier, + messageLatencyMultiplier: messageLatencyMultiplier, + maxExpectedWantProcessTime: maxExpectedWantProcessTime, + onDontHaveTimeout: onDontHaveTimeout, + timeoutsTriggered: timeoutsTriggered, + } + + return mqp +} + +// Shutdown the dontHaveTimeoutMgr. Any subsequent call to Start() will be ignored +func (dhtm *dontHaveTimeoutMgr) Shutdown() { + dhtm.shutdown() + + dhtm.lk.Lock() + defer dhtm.lk.Unlock() + + // Clear any pending check for timeouts + if dhtm.checkForTimeoutsTimer != nil { + dhtm.checkForTimeoutsTimer.Stop() + } +} + +// Start the dontHaveTimeoutMgr. This method is idempotent +func (dhtm *dontHaveTimeoutMgr) Start() { + dhtm.lk.Lock() + defer dhtm.lk.Unlock() + + // Make sure the dont have timeout manager hasn't already been started + if dhtm.started { + return + } + dhtm.started = true + + // If we already have a measure of latency to the peer, use it to + // calculate a reasonable timeout + latency := dhtm.peerConn.Latency() + if latency.Nanoseconds() > 0 { + dhtm.timeout = dhtm.calculateTimeoutFromPingLatency(latency) + return + } + + // Otherwise measure latency by pinging the peer + go dhtm.measurePingLatency() +} + +// UpdateMessageLatency is called when we receive a response from the peer. +// It is the time between sending a request and receiving the corresponding +// response. +func (dhtm *dontHaveTimeoutMgr) UpdateMessageLatency(elapsed time.Duration) { + dhtm.lk.Lock() + defer dhtm.lk.Unlock() + + // Update the message latency and the timeout + dhtm.messageLatency.update(elapsed) + oldTimeout := dhtm.timeout + dhtm.timeout = dhtm.calculateTimeoutFromMessageLatency() + + // If the timeout has decreased + if dhtm.timeout < oldTimeout { + // Check if after changing the timeout there are any pending wants that + // are now over the timeout + dhtm.checkForTimeouts() + } +} + +// measurePingLatency measures the latency to the peer by pinging it +func (dhtm *dontHaveTimeoutMgr) measurePingLatency() { + // Wait up to defaultTimeout for a response to the ping + ctx, cancel := context.WithTimeout(dhtm.ctx, dhtm.defaultTimeout) + defer cancel() + + // Ping the peer + res := dhtm.peerConn.Ping(ctx) + if res.Error != nil { + // If there was an error, we'll just leave the timeout as + // defaultTimeout + return + } + + // Get the average latency to the peer + latency := dhtm.peerConn.Latency() + + dhtm.lk.Lock() + defer dhtm.lk.Unlock() + + // A message has arrived so we already set the timeout based on message latency + if dhtm.messageLatency.samples > 0 { + return + } + + // Calculate a reasonable timeout based on latency + dhtm.timeout = dhtm.calculateTimeoutFromPingLatency(latency) + + // Check if after changing the timeout there are any pending wants that are + // now over the timeout + dhtm.checkForTimeouts() +} + +// checkForTimeouts checks pending wants to see if any are over the timeout. +// Note: this function should only be called within the lock. +func (dhtm *dontHaveTimeoutMgr) checkForTimeouts() { + + if len(dhtm.wantQueue) == 0 { + return + } + + // Figure out which of the blocks that were wanted were not received + // within the timeout + expired := make([]cid.Cid, 0, len(dhtm.activeWants)) + for len(dhtm.wantQueue) > 0 { + pw := dhtm.wantQueue[0] + + // If the want is still active + if pw.active { + // The queue is in order from earliest to latest, so if we + // didn't find an expired entry we can stop iterating + if dhtm.clock.Since(pw.sent) < dhtm.timeout { + break + } + + // Add the want to the expired list + expired = append(expired, pw.c) + // Remove the want from the activeWants map + delete(dhtm.activeWants, pw.c) + } + + // Remove expired or cancelled wants from the want queue + dhtm.wantQueue = dhtm.wantQueue[1:] + } + + // Fire the timeout event for the expired wants + if len(expired) > 0 { + go dhtm.fireTimeout(expired) + } + + if len(dhtm.wantQueue) == 0 { + return + } + + // Make sure the timeout manager is still running + if dhtm.ctx.Err() != nil { + return + } + + // Schedule the next check for the moment when the oldest pending want will + // timeout + oldestStart := dhtm.wantQueue[0].sent + until := oldestStart.Add(dhtm.timeout).Sub(dhtm.clock.Now()) + if dhtm.checkForTimeoutsTimer == nil { + dhtm.checkForTimeoutsTimer = dhtm.clock.Timer(until) + go dhtm.consumeTimeouts() + } else { + dhtm.checkForTimeoutsTimer.Stop() + dhtm.checkForTimeoutsTimer.Reset(until) + } +} + +func (dhtm *dontHaveTimeoutMgr) consumeTimeouts() { + for { + select { + case <-dhtm.ctx.Done(): + return + case <-dhtm.checkForTimeoutsTimer.C: + dhtm.lk.Lock() + dhtm.checkForTimeouts() + dhtm.lk.Unlock() + } + } +} + +// AddPending adds the given keys that will expire if not cancelled before +// the timeout +func (dhtm *dontHaveTimeoutMgr) AddPending(ks []cid.Cid) { + if len(ks) == 0 { + return + } + + start := dhtm.clock.Now() + + dhtm.lk.Lock() + defer dhtm.lk.Unlock() + + queueWasEmpty := len(dhtm.activeWants) == 0 + + // Record the start time for each key + for _, c := range ks { + if _, ok := dhtm.activeWants[c]; !ok { + pw := pendingWant{ + c: c, + sent: start, + active: true, + } + dhtm.activeWants[c] = &pw + dhtm.wantQueue = append(dhtm.wantQueue, &pw) + } + } + + // If there was already an earlier pending item in the queue, then there + // must already be a timeout check scheduled. If there is nothing in the + // queue then we should make sure to schedule a check. + if queueWasEmpty { + dhtm.checkForTimeouts() + } +} + +// CancelPending is called when we receive a response for a key +func (dhtm *dontHaveTimeoutMgr) CancelPending(ks []cid.Cid) { + dhtm.lk.Lock() + defer dhtm.lk.Unlock() + + // Mark the wants as cancelled + for _, c := range ks { + if pw, ok := dhtm.activeWants[c]; ok { + pw.active = false + delete(dhtm.activeWants, c) + } + } +} + +// fireTimeout fires the onDontHaveTimeout method with the timed out keys +func (dhtm *dontHaveTimeoutMgr) fireTimeout(pending []cid.Cid) { + // Make sure the timeout manager has not been shut down + if dhtm.ctx.Err() != nil { + return + } + + // Fire the timeout + dhtm.onDontHaveTimeout(pending) + + // signal a timeout fired + if dhtm.timeoutsTriggered != nil { + dhtm.timeoutsTriggered <- struct{}{} + } +} + +// calculateTimeoutFromPingLatency calculates a reasonable timeout derived from latency +func (dhtm *dontHaveTimeoutMgr) calculateTimeoutFromPingLatency(latency time.Duration) time.Duration { + // The maximum expected time for a response is + // the expected time to process the want + (latency * multiplier) + // The multiplier is to provide some padding for variable latency. + timeout := dhtm.maxExpectedWantProcessTime + time.Duration(dhtm.pingLatencyMultiplier)*latency + if timeout > dhtm.maxTimeout { + timeout = dhtm.maxTimeout + } + return timeout +} + +// calculateTimeoutFromMessageLatency calculates a timeout derived from message latency +func (dhtm *dontHaveTimeoutMgr) calculateTimeoutFromMessageLatency() time.Duration { + timeout := dhtm.messageLatency.latency * time.Duration(dhtm.messageLatencyMultiplier) + if timeout > dhtm.maxTimeout { + timeout = dhtm.maxTimeout + } + return timeout +} + +// latencyEwma is an EWMA of message latency +type latencyEwma struct { + alpha float64 + samples uint64 + latency time.Duration +} + +// update the EWMA with the given sample +func (le *latencyEwma) update(elapsed time.Duration) { + le.samples++ + + // Initially set alpha to be 1.0 / + alpha := 1.0 / float64(le.samples) + if alpha < le.alpha { + // Once we have enough samples, clamp alpha + alpha = le.alpha + } + le.latency = time.Duration(float64(elapsed)*alpha + (1-alpha)*float64(le.latency)) +} diff --git a/bitswap/client/internal/messagequeue/donthavetimeoutmgr_test.go b/bitswap/client/internal/messagequeue/donthavetimeoutmgr_test.go new file mode 100644 index 0000000000..a6a28aab16 --- /dev/null +++ b/bitswap/client/internal/messagequeue/donthavetimeoutmgr_test.go @@ -0,0 +1,468 @@ +package messagequeue + +import ( + "context" + "fmt" + "sync" + "testing" + "time" + + "github.com/benbjohnson/clock" + "github.com/ipfs/boxo/bitswap/internal/testutil" + "github.com/ipfs/boxo/internal/test" + cid "github.com/ipfs/go-cid" + "github.com/libp2p/go-libp2p/p2p/protocol/ping" +) + +type mockPeerConn struct { + err error + latency time.Duration + latencies []time.Duration + clock clock.Clock + pinged chan struct{} +} + +func (pc *mockPeerConn) Ping(ctx context.Context) ping.Result { + timer := pc.clock.Timer(pc.latency) + pc.pinged <- struct{}{} + select { + case <-timer.C: + if pc.err != nil { + return ping.Result{Error: pc.err} + } + pc.latencies = append(pc.latencies, pc.latency) + case <-ctx.Done(): + } + return ping.Result{RTT: pc.latency} +} + +func (pc *mockPeerConn) Latency() time.Duration { + sum := time.Duration(0) + if len(pc.latencies) == 0 { + return sum + } + for _, l := range pc.latencies { + sum += l + } + return sum / time.Duration(len(pc.latencies)) +} + +type timeoutRecorder struct { + timedOutKs []cid.Cid + lk sync.Mutex +} + +func (tr *timeoutRecorder) onTimeout(tks []cid.Cid) { + tr.lk.Lock() + defer tr.lk.Unlock() + + tr.timedOutKs = append(tr.timedOutKs, tks...) +} + +func (tr *timeoutRecorder) timedOutCount() int { + tr.lk.Lock() + defer tr.lk.Unlock() + + return len(tr.timedOutKs) +} + +func (tr *timeoutRecorder) clear() { + tr.lk.Lock() + defer tr.lk.Unlock() + + tr.timedOutKs = nil +} + +func TestDontHaveTimeoutMgrTimeout(t *testing.T) { + test.Flaky(t) + + firstks := testutil.GenerateCids(2) + secondks := append(firstks, testutil.GenerateCids(3)...) + latency := time.Millisecond * 20 + latMultiplier := 2 + expProcessTime := 5 * time.Millisecond + expectedTimeout := expProcessTime + latency*time.Duration(latMultiplier) + clock := clock.NewMock() + pinged := make(chan struct{}) + pc := &mockPeerConn{latency: latency, clock: clock, pinged: pinged} + tr := timeoutRecorder{} + timeoutsTriggered := make(chan struct{}) + dhtm := newDontHaveTimeoutMgrWithParams(pc, tr.onTimeout, + dontHaveTimeout, maxTimeout, latMultiplier, messageLatencyMultiplier, expProcessTime, clock, timeoutsTriggered) + dhtm.Start() + defer dhtm.Shutdown() + <-pinged + // Add first set of keys + dhtm.AddPending(firstks) + + // Wait for less than the expected timeout + clock.Add(expectedTimeout - 10*time.Millisecond) + + // At this stage no keys should have timed out + if tr.timedOutCount() > 0 { + t.Fatal("expected timeout not to have happened yet") + } + + // Add second set of keys + dhtm.AddPending(secondks) + + // Wait until after the expected timeout + clock.Add(20 * time.Millisecond) + + <-timeoutsTriggered + + // At this stage first set of keys should have timed out + if tr.timedOutCount() != len(firstks) { + t.Fatal("expected timeout", tr.timedOutCount(), len(firstks)) + } + // Clear the recorded timed out keys + tr.clear() + + // Sleep until the second set of keys should have timed out + clock.Add(expectedTimeout + 10*time.Millisecond) + + <-timeoutsTriggered + + // At this stage all keys should have timed out. The second set included + // the first set of keys, but they were added before the first set timed + // out, so only the remaining keys should have beed added. + if tr.timedOutCount() != len(secondks)-len(firstks) { + t.Fatal("expected second set of keys to timeout") + } +} + +func TestDontHaveTimeoutMgrCancel(t *testing.T) { + test.Flaky(t) + + ks := testutil.GenerateCids(3) + latency := time.Millisecond * 10 + latMultiplier := 1 + expProcessTime := time.Duration(0) + expectedTimeout := latency + clock := clock.NewMock() + pinged := make(chan struct{}) + pc := &mockPeerConn{latency: latency, clock: clock, pinged: pinged} + tr := timeoutRecorder{} + timeoutsTriggered := make(chan struct{}) + dhtm := newDontHaveTimeoutMgrWithParams(pc, tr.onTimeout, + dontHaveTimeout, maxTimeout, latMultiplier, messageLatencyMultiplier, expProcessTime, clock, timeoutsTriggered) + dhtm.Start() + defer dhtm.Shutdown() + <-pinged + + // Add keys + dhtm.AddPending(ks) + clock.Add(5 * time.Millisecond) + + // Cancel keys + cancelCount := 1 + dhtm.CancelPending(ks[:cancelCount]) + + // Wait for the expected timeout + clock.Add(expectedTimeout) + + <-timeoutsTriggered + + // At this stage all non-cancelled keys should have timed out + if tr.timedOutCount() != len(ks)-cancelCount { + t.Fatal("expected timeout") + } +} + +func TestDontHaveTimeoutWantCancelWant(t *testing.T) { + test.Flaky(t) + + ks := testutil.GenerateCids(3) + latency := time.Millisecond * 20 + latMultiplier := 1 + expProcessTime := time.Duration(0) + expectedTimeout := latency + clock := clock.NewMock() + pinged := make(chan struct{}) + pc := &mockPeerConn{latency: latency, clock: clock, pinged: pinged} + tr := timeoutRecorder{} + timeoutsTriggered := make(chan struct{}) + + dhtm := newDontHaveTimeoutMgrWithParams(pc, tr.onTimeout, + dontHaveTimeout, maxTimeout, latMultiplier, messageLatencyMultiplier, expProcessTime, clock, timeoutsTriggered) + dhtm.Start() + defer dhtm.Shutdown() + <-pinged + + // Add keys + dhtm.AddPending(ks) + + // Wait for a short time + clock.Add(expectedTimeout - 10*time.Millisecond) + + // Cancel two keys + dhtm.CancelPending(ks[:2]) + + clock.Add(5 * time.Millisecond) + + // Add back one cancelled key + dhtm.AddPending(ks[:1]) + + // Wait till after initial timeout + clock.Add(10 * time.Millisecond) + + <-timeoutsTriggered + + // At this stage only the key that was never cancelled should have timed out + if tr.timedOutCount() != 1 { + t.Fatal("expected one key to timeout") + } + + // Wait till after added back key should time out + clock.Add(latency) + + <-timeoutsTriggered + + // At this stage the key that was added back should also have timed out + if tr.timedOutCount() != 2 { + t.Fatal("expected added back key to timeout") + } +} + +func TestDontHaveTimeoutRepeatedAddPending(t *testing.T) { + test.Flaky(t) + + ks := testutil.GenerateCids(10) + latency := time.Millisecond * 5 + latMultiplier := 1 + expProcessTime := time.Duration(0) + clock := clock.NewMock() + pinged := make(chan struct{}) + pc := &mockPeerConn{latency: latency, clock: clock, pinged: pinged} + tr := timeoutRecorder{} + timeoutsTriggered := make(chan struct{}) + + dhtm := newDontHaveTimeoutMgrWithParams(pc, tr.onTimeout, + dontHaveTimeout, maxTimeout, latMultiplier, messageLatencyMultiplier, expProcessTime, clock, timeoutsTriggered) + dhtm.Start() + defer dhtm.Shutdown() + <-pinged + + // Add keys repeatedly + for _, c := range ks { + dhtm.AddPending([]cid.Cid{c}) + } + + // Wait for the expected timeout + clock.Add(latency + 5*time.Millisecond) + + <-timeoutsTriggered + + // At this stage all keys should have timed out + if tr.timedOutCount() != len(ks) { + t.Fatal("expected timeout") + } +} + +func TestDontHaveTimeoutMgrMessageLatency(t *testing.T) { + test.Flaky(t) + + ks := testutil.GenerateCids(2) + latency := time.Millisecond * 40 + latMultiplier := 1 + expProcessTime := time.Duration(0) + msgLatencyMultiplier := 1 + clock := clock.NewMock() + pinged := make(chan struct{}) + pc := &mockPeerConn{latency: latency, clock: clock, pinged: pinged} + tr := timeoutRecorder{} + timeoutsTriggered := make(chan struct{}) + + dhtm := newDontHaveTimeoutMgrWithParams(pc, tr.onTimeout, + dontHaveTimeout, maxTimeout, latMultiplier, msgLatencyMultiplier, expProcessTime, clock, timeoutsTriggered) + dhtm.Start() + defer dhtm.Shutdown() + <-pinged + // Add keys + dhtm.AddPending(ks) + + // expectedTimeout + // = expProcessTime + latency*time.Duration(latMultiplier) + // = 0 + 40ms * 1 + // = 40ms + + // Wait for less than the expected timeout + clock.Add(25 * time.Millisecond) + + // Receive two message latency updates + dhtm.UpdateMessageLatency(time.Millisecond * 20) + dhtm.UpdateMessageLatency(time.Millisecond * 10) + + // alpha is 0.5 so timeout should be + // = (20ms * alpha) + (10ms * (1 - alpha)) + // = (20ms * 0.5) + (10ms * 0.5) + // = 15ms + // We've already slept for 25ms so with the new 15ms timeout + // the keys should have timed out + + // Give the queue some time to process the updates + clock.Add(5 * time.Millisecond) + + <-timeoutsTriggered + + if tr.timedOutCount() != len(ks) { + t.Fatal("expected keys to timeout") + } +} + +func TestDontHaveTimeoutMgrMessageLatencyMax(t *testing.T) { + test.Flaky(t) + + ks := testutil.GenerateCids(2) + clock := clock.NewMock() + pinged := make(chan struct{}) + pc := &mockPeerConn{latency: time.Second, clock: clock, pinged: pinged} + tr := timeoutRecorder{} + msgLatencyMultiplier := 1 + testMaxTimeout := time.Millisecond * 10 + timeoutsTriggered := make(chan struct{}) + + dhtm := newDontHaveTimeoutMgrWithParams(pc, tr.onTimeout, + dontHaveTimeout, testMaxTimeout, pingLatencyMultiplier, msgLatencyMultiplier, maxExpectedWantProcessTime, clock, timeoutsTriggered) + dhtm.Start() + defer dhtm.Shutdown() + <-pinged + // Add keys + dhtm.AddPending(ks) + + // Receive a message latency update that would make the timeout greater + // than the maximum timeout + dhtm.UpdateMessageLatency(testMaxTimeout * 4) + + // Sleep until just after the maximum timeout + clock.Add(testMaxTimeout + 5*time.Millisecond) + + <-timeoutsTriggered + + // Keys should have timed out + if tr.timedOutCount() != len(ks) { + t.Fatal("expected keys to timeout") + } +} + +func TestDontHaveTimeoutMgrUsesDefaultTimeoutIfPingError(t *testing.T) { + test.Flaky(t) + + ks := testutil.GenerateCids(2) + latency := time.Millisecond * 1 + latMultiplier := 2 + expProcessTime := 2 * time.Millisecond + defaultTimeout := 10 * time.Millisecond + expectedTimeout := expProcessTime + defaultTimeout + tr := timeoutRecorder{} + clock := clock.NewMock() + pinged := make(chan struct{}) + pc := &mockPeerConn{latency: latency, clock: clock, pinged: pinged, err: fmt.Errorf("ping error")} + timeoutsTriggered := make(chan struct{}) + + dhtm := newDontHaveTimeoutMgrWithParams(pc, tr.onTimeout, + defaultTimeout, dontHaveTimeout, latMultiplier, messageLatencyMultiplier, expProcessTime, clock, timeoutsTriggered) + dhtm.Start() + defer dhtm.Shutdown() + <-pinged + + // Add keys + dhtm.AddPending(ks) + + // Sleep for less than the expected timeout + clock.Add(expectedTimeout - 5*time.Millisecond) + + // At this stage no timeout should have happened yet + if tr.timedOutCount() > 0 { + t.Fatal("expected timeout not to have happened yet") + } + + // Sleep until after the expected timeout + clock.Add(10 * time.Millisecond) + + <-timeoutsTriggered + + // Now the keys should have timed out + if tr.timedOutCount() != len(ks) { + t.Fatal("expected timeout") + } +} + +func TestDontHaveTimeoutMgrUsesDefaultTimeoutIfLatencyLonger(t *testing.T) { + test.Flaky(t) + + ks := testutil.GenerateCids(2) + latency := time.Millisecond * 200 + latMultiplier := 1 + expProcessTime := time.Duration(0) + defaultTimeout := 100 * time.Millisecond + clock := clock.NewMock() + pinged := make(chan struct{}) + pc := &mockPeerConn{latency: latency, clock: clock, pinged: pinged} + tr := timeoutRecorder{} + timeoutsTriggered := make(chan struct{}) + + dhtm := newDontHaveTimeoutMgrWithParams(pc, tr.onTimeout, + defaultTimeout, dontHaveTimeout, latMultiplier, messageLatencyMultiplier, expProcessTime, clock, timeoutsTriggered) + dhtm.Start() + defer dhtm.Shutdown() + <-pinged + + // Add keys + dhtm.AddPending(ks) + + // Sleep for less than the default timeout + clock.Add(defaultTimeout - 50*time.Millisecond) + + // At this stage no timeout should have happened yet + if tr.timedOutCount() > 0 { + t.Fatal("expected timeout not to have happened yet") + } + + // Sleep until after the default timeout + clock.Add(defaultTimeout * 2) + + <-timeoutsTriggered + + // Now the keys should have timed out + if tr.timedOutCount() != len(ks) { + t.Fatal("expected timeout") + } +} + +func TestDontHaveTimeoutNoTimeoutAfterShutdown(t *testing.T) { + test.Flaky(t) + + ks := testutil.GenerateCids(2) + latency := time.Millisecond * 10 + latMultiplier := 1 + expProcessTime := time.Duration(0) + clock := clock.NewMock() + pinged := make(chan struct{}) + pc := &mockPeerConn{latency: latency, clock: clock, pinged: pinged} + tr := timeoutRecorder{} + timeoutsTriggered := make(chan struct{}) + + dhtm := newDontHaveTimeoutMgrWithParams(pc, tr.onTimeout, + dontHaveTimeout, maxTimeout, latMultiplier, messageLatencyMultiplier, expProcessTime, clock, timeoutsTriggered) + dhtm.Start() + defer dhtm.Shutdown() + <-pinged + + // Add keys + dhtm.AddPending(ks) + + // Wait less than the timeout + clock.Add(latency - 5*time.Millisecond) + + // Shutdown the manager + dhtm.Shutdown() + + // Wait for the expected timeout + clock.Add(10 * time.Millisecond) + + // Manager was shut down so timeout should not have fired + if tr.timedOutCount() != 0 { + t.Fatal("expected no timeout after shutdown") + } +} diff --git a/bitswap/client/internal/messagequeue/messagequeue.go b/bitswap/client/internal/messagequeue/messagequeue.go new file mode 100644 index 0000000000..b529bde4ed --- /dev/null +++ b/bitswap/client/internal/messagequeue/messagequeue.go @@ -0,0 +1,843 @@ +package messagequeue + +import ( + "context" + "math" + "sync" + "time" + + "github.com/benbjohnson/clock" + bswl "github.com/ipfs/boxo/bitswap/client/wantlist" + bsmsg "github.com/ipfs/boxo/bitswap/message" + pb "github.com/ipfs/boxo/bitswap/message/pb" + bsnet "github.com/ipfs/boxo/bitswap/network" + cid "github.com/ipfs/go-cid" + logging "github.com/ipfs/go-log" + peer "github.com/libp2p/go-libp2p/core/peer" + "github.com/libp2p/go-libp2p/p2p/protocol/ping" + "go.uber.org/zap" +) + +var log = logging.Logger("bitswap") +var sflog = log.Desugar() + +const ( + defaultRebroadcastInterval = 30 * time.Second + // maxRetries is the number of times to attempt to send a message before + // giving up + maxRetries = 3 + sendTimeout = 30 * time.Second + // maxMessageSize is the maximum message size in bytes + maxMessageSize = 1024 * 1024 * 2 + // sendErrorBackoff is the time to wait before retrying to connect after + // an error when trying to send a message + sendErrorBackoff = 100 * time.Millisecond + // maxPriority is the max priority as defined by the bitswap protocol + maxPriority = math.MaxInt32 + // sendMessageDebounce is the debounce duration when calling sendMessage() + sendMessageDebounce = time.Millisecond + // when we reach sendMessageCutoff wants/cancels, we'll send the message immediately. + sendMessageCutoff = 256 + // when we debounce for more than sendMessageMaxDelay, we'll send the + // message immediately. + sendMessageMaxDelay = 20 * time.Millisecond + // The maximum amount of time in which to accept a response as being valid + // for latency calculation (as opposed to discarding it as an outlier) + maxValidLatency = 30 * time.Second +) + +// MessageNetwork is any network that can connect peers and generate a message +// sender. +type MessageNetwork interface { + ConnectTo(context.Context, peer.ID) error + NewMessageSender(context.Context, peer.ID, *bsnet.MessageSenderOpts) (bsnet.MessageSender, error) + Latency(peer.ID) time.Duration + Ping(context.Context, peer.ID) ping.Result + Self() peer.ID +} + +// MessageQueue implements queue of want messages to send to peers. +type MessageQueue struct { + ctx context.Context + shutdown func() + p peer.ID + network MessageNetwork + dhTimeoutMgr DontHaveTimeoutManager + + // The maximum size of a message in bytes. Any overflow is put into the + // next message + maxMessageSize int + + // The amount of time to wait when there's an error sending to a peer + // before retrying + sendErrorBackoff time.Duration + + // The maximum amount of time in which to accept a response as being valid + // for latency calculation + maxValidLatency time.Duration + + // Signals that there are outgoing wants / cancels ready to be processed + outgoingWork chan time.Time + + // Channel of CIDs of blocks / HAVEs / DONT_HAVEs received from the peer + responses chan []cid.Cid + + // Take lock whenever any of these variables are modified + wllock sync.Mutex + bcstWants recallWantlist + peerWants recallWantlist + cancels *cid.Set + priority int32 + + // Dont touch any of these variables outside of run loop + sender bsnet.MessageSender + rebroadcastIntervalLk sync.RWMutex + rebroadcastInterval time.Duration + rebroadcastTimer *clock.Timer + // For performance reasons we just clear out the fields of the message + // instead of creating a new one every time. + msg bsmsg.BitSwapMessage + + // For simulating time -- uses mock in test + clock clock.Clock + + // Used to track things that happen asynchronously -- used only in test + events chan messageEvent +} + +// recallWantlist keeps a list of pending wants and a list of sent wants +type recallWantlist struct { + // The list of wants that have not yet been sent + pending *bswl.Wantlist + // The list of wants that have been sent + sent *bswl.Wantlist + // The time at which each want was sent + sentAt map[cid.Cid]time.Time +} + +func newRecallWantList() recallWantlist { + return recallWantlist{ + pending: bswl.New(), + sent: bswl.New(), + sentAt: make(map[cid.Cid]time.Time), + } +} + +// Add want to the pending list +func (r *recallWantlist) Add(c cid.Cid, priority int32, wtype pb.Message_Wantlist_WantType) { + r.pending.Add(c, priority, wtype) +} + +// Remove wants from both the pending list and the list of sent wants +func (r *recallWantlist) Remove(c cid.Cid) { + r.pending.Remove(c) + r.sent.Remove(c) + delete(r.sentAt, c) +} + +// Remove wants by type from both the pending list and the list of sent wants +func (r *recallWantlist) RemoveType(c cid.Cid, wtype pb.Message_Wantlist_WantType) { + r.pending.RemoveType(c, wtype) + r.sent.RemoveType(c, wtype) + if _, ok := r.sent.Contains(c); !ok { + delete(r.sentAt, c) + } +} + +// MarkSent moves the want from the pending to the sent list +// +// Returns true if the want was marked as sent. Returns false if the want wasn't +// pending. +func (r *recallWantlist) MarkSent(e bswl.Entry) bool { + if !r.pending.RemoveType(e.Cid, e.WantType) { + return false + } + r.sent.Add(e.Cid, e.Priority, e.WantType) + return true +} + +// SentAt records the time at which a want was sent +func (r *recallWantlist) SentAt(c cid.Cid, at time.Time) { + // The want may have been cancelled in the interim + if _, ok := r.sent.Contains(c); ok { + if _, ok := r.sentAt[c]; !ok { + r.sentAt[c] = at + } + } +} + +// ClearSentAt clears out the record of the time a want was sent. +// We clear the sent at time when we receive a response for a key as we +// only need the first response for latency measurement. +func (r *recallWantlist) ClearSentAt(c cid.Cid) { + delete(r.sentAt, c) +} + +type peerConn struct { + p peer.ID + network MessageNetwork +} + +func newPeerConnection(p peer.ID, network MessageNetwork) *peerConn { + return &peerConn{p, network} +} + +func (pc *peerConn) Ping(ctx context.Context) ping.Result { + return pc.network.Ping(ctx, pc.p) +} + +func (pc *peerConn) Latency() time.Duration { + return pc.network.Latency(pc.p) +} + +// Fires when a timeout occurs waiting for a response from a peer running an +// older version of Bitswap that doesn't support DONT_HAVE messages. +type OnDontHaveTimeout func(peer.ID, []cid.Cid) + +// DontHaveTimeoutManager pings a peer to estimate latency so it can set a reasonable +// upper bound on when to consider a DONT_HAVE request as timed out (when connected to +// a peer that doesn't support DONT_HAVE messages) +type DontHaveTimeoutManager interface { + // Start the manager (idempotent) + Start() + // Shutdown the manager (Shutdown is final, manager cannot be restarted) + Shutdown() + // AddPending adds the wants as pending a response. If the are not + // cancelled before the timeout, the OnDontHaveTimeout method will be called. + AddPending([]cid.Cid) + // CancelPending removes the wants + CancelPending([]cid.Cid) + // UpdateMessageLatency informs the manager of a new latency measurement + UpdateMessageLatency(time.Duration) +} + +// New creates a new MessageQueue. +func New(ctx context.Context, p peer.ID, network MessageNetwork, onDontHaveTimeout OnDontHaveTimeout) *MessageQueue { + onTimeout := func(ks []cid.Cid) { + log.Infow("Bitswap: timeout waiting for blocks", "cids", ks, "peer", p) + onDontHaveTimeout(p, ks) + } + clock := clock.New() + dhTimeoutMgr := newDontHaveTimeoutMgr(newPeerConnection(p, network), onTimeout, clock) + return newMessageQueue(ctx, p, network, maxMessageSize, sendErrorBackoff, maxValidLatency, dhTimeoutMgr, clock, nil) +} + +type messageEvent int + +const ( + messageQueued messageEvent = iota + messageFinishedSending + latenciesRecorded +) + +// This constructor is used by the tests +func newMessageQueue( + ctx context.Context, + p peer.ID, + network MessageNetwork, + maxMsgSize int, + sendErrorBackoff time.Duration, + maxValidLatency time.Duration, + dhTimeoutMgr DontHaveTimeoutManager, + clock clock.Clock, + events chan messageEvent) *MessageQueue { + + ctx, cancel := context.WithCancel(ctx) + return &MessageQueue{ + ctx: ctx, + shutdown: cancel, + p: p, + network: network, + dhTimeoutMgr: dhTimeoutMgr, + maxMessageSize: maxMsgSize, + bcstWants: newRecallWantList(), + peerWants: newRecallWantList(), + cancels: cid.NewSet(), + outgoingWork: make(chan time.Time, 1), + responses: make(chan []cid.Cid, 8), + rebroadcastInterval: defaultRebroadcastInterval, + sendErrorBackoff: sendErrorBackoff, + maxValidLatency: maxValidLatency, + priority: maxPriority, + // For performance reasons we just clear out the fields of the message + // after using it, instead of creating a new one every time. + msg: bsmsg.New(false), + clock: clock, + events: events, + } +} + +// Add want-haves that are part of a broadcast to all connected peers +func (mq *MessageQueue) AddBroadcastWantHaves(wantHaves []cid.Cid) { + if len(wantHaves) == 0 { + return + } + + mq.wllock.Lock() + defer mq.wllock.Unlock() + + for _, c := range wantHaves { + mq.bcstWants.Add(c, mq.priority, pb.Message_Wantlist_Have) + mq.priority-- + + // We're adding a want-have for the cid, so clear any pending cancel + // for the cid + mq.cancels.Remove(c) + } + + // Schedule a message send + mq.signalWorkReady() +} + +// Add want-haves and want-blocks for the peer for this message queue. +func (mq *MessageQueue) AddWants(wantBlocks []cid.Cid, wantHaves []cid.Cid) { + if len(wantBlocks) == 0 && len(wantHaves) == 0 { + return + } + + mq.wllock.Lock() + defer mq.wllock.Unlock() + + for _, c := range wantHaves { + mq.peerWants.Add(c, mq.priority, pb.Message_Wantlist_Have) + mq.priority-- + + // We're adding a want-have for the cid, so clear any pending cancel + // for the cid + mq.cancels.Remove(c) + } + for _, c := range wantBlocks { + mq.peerWants.Add(c, mq.priority, pb.Message_Wantlist_Block) + mq.priority-- + + // We're adding a want-block for the cid, so clear any pending cancel + // for the cid + mq.cancels.Remove(c) + } + + // Schedule a message send + mq.signalWorkReady() +} + +// Add cancel messages for the given keys. +func (mq *MessageQueue) AddCancels(cancelKs []cid.Cid) { + if len(cancelKs) == 0 { + return + } + + // Cancel any outstanding DONT_HAVE timers + mq.dhTimeoutMgr.CancelPending(cancelKs) + + mq.wllock.Lock() + + workReady := false + + // Remove keys from broadcast and peer wants, and add to cancels + for _, c := range cancelKs { + // Check if a want for the key was sent + _, wasSentBcst := mq.bcstWants.sent.Contains(c) + _, wasSentPeer := mq.peerWants.sent.Contains(c) + + // Remove the want from tracking wantlists + mq.bcstWants.Remove(c) + mq.peerWants.Remove(c) + + // Only send a cancel if a want was sent + if wasSentBcst || wasSentPeer { + mq.cancels.Add(c) + workReady = true + } + } + + mq.wllock.Unlock() + + // Unlock first to be nice to the scheduler. + + // Schedule a message send + if workReady { + mq.signalWorkReady() + } +} + +// ResponseReceived is called when a message is received from the network. +// ks is the set of blocks, HAVEs and DONT_HAVEs in the message +// Note that this is just used to calculate latency. +func (mq *MessageQueue) ResponseReceived(ks []cid.Cid) { + if len(ks) == 0 { + return + } + + // These messages are just used to approximate latency, so if we get so + // many responses that they get backed up, just ignore the overflow. + select { + case mq.responses <- ks: + default: + } +} + +// SetRebroadcastInterval sets a new interval on which to rebroadcast the full wantlist +func (mq *MessageQueue) SetRebroadcastInterval(delay time.Duration) { + mq.rebroadcastIntervalLk.Lock() + mq.rebroadcastInterval = delay + if mq.rebroadcastTimer != nil { + mq.rebroadcastTimer.Reset(delay) + } + mq.rebroadcastIntervalLk.Unlock() +} + +// Startup starts the processing of messages and rebroadcasting. +func (mq *MessageQueue) Startup() { + mq.rebroadcastIntervalLk.RLock() + mq.rebroadcastTimer = mq.clock.Timer(mq.rebroadcastInterval) + mq.rebroadcastIntervalLk.RUnlock() + go mq.runQueue() +} + +// Shutdown stops the processing of messages for a message queue. +func (mq *MessageQueue) Shutdown() { + mq.shutdown() +} + +func (mq *MessageQueue) onShutdown() { + // Shut down the DONT_HAVE timeout manager + mq.dhTimeoutMgr.Shutdown() + + // Reset the streamMessageSender + if mq.sender != nil { + _ = mq.sender.Reset() + } +} + +func (mq *MessageQueue) runQueue() { + defer mq.onShutdown() + + // Create a timer for debouncing scheduled work. + scheduleWork := mq.clock.Timer(0) + if !scheduleWork.Stop() { + // Need to drain the timer if Stop() returns false + // See: https://golang.org/pkg/time/#Timer.Stop + <-scheduleWork.C + } + + var workScheduled time.Time + for mq.ctx.Err() == nil { + select { + case <-mq.rebroadcastTimer.C: + mq.rebroadcastWantlist() + + case when := <-mq.outgoingWork: + // If we have work scheduled, cancel the timer. If we + // don't, record when the work was scheduled. + // We send the time on the channel so we accurately + // track delay. + if workScheduled.IsZero() { + workScheduled = when + } else if !scheduleWork.Stop() { + // Need to drain the timer if Stop() returns false + <-scheduleWork.C + } + + // If we have too many updates and/or we've waited too + // long, send immediately. + if mq.pendingWorkCount() > sendMessageCutoff || + mq.clock.Since(workScheduled) >= sendMessageMaxDelay { + mq.sendIfReady() + workScheduled = time.Time{} + } else { + // Otherwise, extend the timer. + scheduleWork.Reset(sendMessageDebounce) + if mq.events != nil { + mq.events <- messageQueued + } + } + + case <-scheduleWork.C: + // We have work scheduled and haven't seen any updates + // in sendMessageDebounce. Send immediately. + workScheduled = time.Time{} + mq.sendIfReady() + + case res := <-mq.responses: + // We received a response from the peer, calculate latency + mq.handleResponse(res) + + case <-mq.ctx.Done(): + return + } + } +} + +// Periodically resend the list of wants to the peer +func (mq *MessageQueue) rebroadcastWantlist() { + mq.rebroadcastIntervalLk.RLock() + mq.rebroadcastTimer.Reset(mq.rebroadcastInterval) + mq.rebroadcastIntervalLk.RUnlock() + + // If some wants were transferred from the rebroadcast list + if mq.transferRebroadcastWants() { + // Send them out + mq.sendMessage() + } +} + +// Transfer wants from the rebroadcast lists into the pending lists. +func (mq *MessageQueue) transferRebroadcastWants() bool { + mq.wllock.Lock() + defer mq.wllock.Unlock() + + // Check if there are any wants to rebroadcast + if mq.bcstWants.sent.Len() == 0 && mq.peerWants.sent.Len() == 0 { + return false + } + + // Copy sent wants into pending wants lists + mq.bcstWants.pending.Absorb(mq.bcstWants.sent) + mq.peerWants.pending.Absorb(mq.peerWants.sent) + + return true +} + +func (mq *MessageQueue) signalWorkReady() { + select { + case mq.outgoingWork <- mq.clock.Now(): + default: + } +} + +func (mq *MessageQueue) sendIfReady() { + if mq.hasPendingWork() { + mq.sendMessage() + } +} + +func (mq *MessageQueue) sendMessage() { + sender, err := mq.initializeSender() + if err != nil { + // If we fail to initialize the sender, the networking layer will + // emit a Disconnect event and the MessageQueue will get cleaned up + log.Infof("Could not open message sender to peer %s: %s", mq.p, err) + mq.Shutdown() + return + } + + // Make sure the DONT_HAVE timeout manager has started + // Note: Start is idempotent + mq.dhTimeoutMgr.Start() + + // Convert want lists to a Bitswap Message + message, onSent := mq.extractOutgoingMessage(mq.sender.SupportsHave()) + + // After processing the message, clear out its fields to save memory + defer mq.msg.Reset(false) + + if message.Empty() { + return + } + + wantlist := message.Wantlist() + mq.logOutgoingMessage(wantlist) + + if err := sender.SendMsg(mq.ctx, message); err != nil { + // If the message couldn't be sent, the networking layer will + // emit a Disconnect event and the MessageQueue will get cleaned up + log.Infof("Could not send message to peer %s: %s", mq.p, err) + mq.Shutdown() + return + } + + // Record sent time so as to calculate message latency + onSent() + + // Set a timer to wait for responses + mq.simulateDontHaveWithTimeout(wantlist) + + // If the message was too big and only a subset of wants could be + // sent, schedule sending the rest of the wants in the next + // iteration of the event loop. + if mq.hasPendingWork() { + mq.signalWorkReady() + } +} + +// If want-block times out, simulate a DONT_HAVE reponse. +// This is necessary when making requests to peers running an older version of +// Bitswap that doesn't support the DONT_HAVE response, and is also useful to +// mitigate getting blocked by a peer that takes a long time to respond. +func (mq *MessageQueue) simulateDontHaveWithTimeout(wantlist []bsmsg.Entry) { + // Get the CID of each want-block that expects a DONT_HAVE response + wants := make([]cid.Cid, 0, len(wantlist)) + + mq.wllock.Lock() + + for _, entry := range wantlist { + if entry.WantType == pb.Message_Wantlist_Block && entry.SendDontHave { + // Unlikely, but just in case check that the block hasn't been + // received in the interim + c := entry.Cid + if _, ok := mq.peerWants.sent.Contains(c); ok { + wants = append(wants, c) + } + } + } + + mq.wllock.Unlock() + + // Add wants to DONT_HAVE timeout manager + mq.dhTimeoutMgr.AddPending(wants) +} + +// handleResponse is called when a response is received from the peer, +// with the CIDs of received blocks / HAVEs / DONT_HAVEs +func (mq *MessageQueue) handleResponse(ks []cid.Cid) { + now := mq.clock.Now() + earliest := time.Time{} + + mq.wllock.Lock() + + // Check if the keys in the response correspond to any request that was + // sent to the peer. + // + // - Find the earliest request so as to calculate the longest latency as + // we want to be conservative when setting the timeout + // - Ignore latencies that are very long, as these are likely to be outliers + // caused when + // - we send a want to peer A + // - peer A does not have the block + // - peer A later receives the block from peer B + // - peer A sends us HAVE / block + for _, c := range ks { + if at, ok := mq.bcstWants.sentAt[c]; ok { + if (earliest.IsZero() || at.Before(earliest)) && now.Sub(at) < mq.maxValidLatency { + earliest = at + } + mq.bcstWants.ClearSentAt(c) + } + if at, ok := mq.peerWants.sentAt[c]; ok { + if (earliest.IsZero() || at.Before(earliest)) && now.Sub(at) < mq.maxValidLatency { + earliest = at + } + // Clear out the sent time for the CID because we only want to + // record the latency between the request and the first response + // for that CID (not subsequent responses) + mq.peerWants.ClearSentAt(c) + } + } + + mq.wllock.Unlock() + + if !earliest.IsZero() { + // Inform the timeout manager of the calculated latency + mq.dhTimeoutMgr.UpdateMessageLatency(now.Sub(earliest)) + } + if mq.events != nil { + mq.events <- latenciesRecorded + } +} + +func (mq *MessageQueue) logOutgoingMessage(wantlist []bsmsg.Entry) { + // Save some CPU cycles and allocations if log level is higher than debug + if ce := sflog.Check(zap.DebugLevel, "sent message"); ce == nil { + return + } + + self := mq.network.Self() + for _, e := range wantlist { + if e.Cancel { + if e.WantType == pb.Message_Wantlist_Have { + log.Debugw("sent message", + "type", "CANCEL_WANT_HAVE", + "cid", e.Cid, + "local", self, + "to", mq.p, + ) + } else { + log.Debugw("sent message", + "type", "CANCEL_WANT_BLOCK", + "cid", e.Cid, + "local", self, + "to", mq.p, + ) + } + } else { + if e.WantType == pb.Message_Wantlist_Have { + log.Debugw("sent message", + "type", "WANT_HAVE", + "cid", e.Cid, + "local", self, + "to", mq.p, + ) + } else { + log.Debugw("sent message", + "type", "WANT_BLOCK", + "cid", e.Cid, + "local", self, + "to", mq.p, + ) + } + } + } +} + +// Whether there is work to be processed +func (mq *MessageQueue) hasPendingWork() bool { + return mq.pendingWorkCount() > 0 +} + +// The amount of work that is waiting to be processed +func (mq *MessageQueue) pendingWorkCount() int { + mq.wllock.Lock() + defer mq.wllock.Unlock() + + return mq.bcstWants.pending.Len() + mq.peerWants.pending.Len() + mq.cancels.Len() +} + +// Convert the lists of wants into a Bitswap message +func (mq *MessageQueue) extractOutgoingMessage(supportsHave bool) (bsmsg.BitSwapMessage, func()) { + // Get broadcast and regular wantlist entries. + mq.wllock.Lock() + peerEntries := mq.peerWants.pending.Entries() + bcstEntries := mq.bcstWants.pending.Entries() + cancels := mq.cancels.Keys() + if !supportsHave { + filteredPeerEntries := peerEntries[:0] + // If the remote peer doesn't support HAVE / DONT_HAVE messages, + // don't send want-haves (only send want-blocks) + // + // Doing this here under the lock makes everything else in this + // function simpler. + // + // TODO: We should _try_ to avoid recording these in the first + // place if possible. + for _, e := range peerEntries { + if e.WantType == pb.Message_Wantlist_Have { + mq.peerWants.RemoveType(e.Cid, pb.Message_Wantlist_Have) + } else { + filteredPeerEntries = append(filteredPeerEntries, e) + } + } + peerEntries = filteredPeerEntries + } + mq.wllock.Unlock() + + // We prioritize cancels, then regular wants, then broadcast wants. + + var ( + msgSize = 0 // size of message so far + sentCancels = 0 // number of cancels in message + sentPeerEntries = 0 // number of peer entries in message + sentBcstEntries = 0 // number of broadcast entries in message + ) + + // Add each cancel to the message + for _, c := range cancels { + msgSize += mq.msg.Cancel(c) + sentCancels++ + + if msgSize >= mq.maxMessageSize { + goto FINISH + } + } + + // Next, add the wants. If we have too many entries to fit into a single + // message, sort by priority and include the high priority ones first. + + for _, e := range peerEntries { + msgSize += mq.msg.AddEntry(e.Cid, e.Priority, e.WantType, true) + sentPeerEntries++ + + if msgSize >= mq.maxMessageSize { + goto FINISH + } + } + + // Add each broadcast want-have to the message + for _, e := range bcstEntries { + // Broadcast wants are sent as want-have + wantType := pb.Message_Wantlist_Have + + // If the remote peer doesn't support HAVE / DONT_HAVE messages, + // send a want-block instead + if !supportsHave { + wantType = pb.Message_Wantlist_Block + } + + msgSize += mq.msg.AddEntry(e.Cid, e.Priority, wantType, false) + sentBcstEntries++ + + if msgSize >= mq.maxMessageSize { + goto FINISH + } + } + +FINISH: + + // Finally, re-take the lock, mark sent and remove any entries from our + // message that we've decided to cancel at the last minute. + mq.wllock.Lock() + for i, e := range peerEntries[:sentPeerEntries] { + if !mq.peerWants.MarkSent(e) { + // It changed. + mq.msg.Remove(e.Cid) + peerEntries[i].Cid = cid.Undef + } + } + + for i, e := range bcstEntries[:sentBcstEntries] { + if !mq.bcstWants.MarkSent(e) { + mq.msg.Remove(e.Cid) + bcstEntries[i].Cid = cid.Undef + } + } + + for _, c := range cancels[:sentCancels] { + if !mq.cancels.Has(c) { + mq.msg.Remove(c) + } else { + mq.cancels.Remove(c) + } + } + mq.wllock.Unlock() + + // When the message has been sent, record the time at which each want was + // sent so we can calculate message latency + onSent := func() { + now := mq.clock.Now() + + mq.wllock.Lock() + defer mq.wllock.Unlock() + + for _, e := range peerEntries[:sentPeerEntries] { + if e.Cid.Defined() { // Check if want was cancelled in the interim + mq.peerWants.SentAt(e.Cid, now) + } + } + + for _, e := range bcstEntries[:sentBcstEntries] { + if e.Cid.Defined() { // Check if want was cancelled in the interim + mq.bcstWants.SentAt(e.Cid, now) + } + } + if mq.events != nil { + mq.events <- messageFinishedSending + } + } + + return mq.msg, onSent +} + +func (mq *MessageQueue) initializeSender() (bsnet.MessageSender, error) { + if mq.sender == nil { + opts := &bsnet.MessageSenderOpts{ + MaxRetries: maxRetries, + SendTimeout: sendTimeout, + SendErrorBackoff: sendErrorBackoff, + } + nsender, err := mq.network.NewMessageSender(mq.ctx, mq.p, opts) + if err != nil { + return nil, err + } + + mq.sender = nsender + } + return mq.sender, nil +} diff --git a/bitswap/client/internal/messagequeue/messagequeue_test.go b/bitswap/client/internal/messagequeue/messagequeue_test.go new file mode 100644 index 0000000000..59788f50b6 --- /dev/null +++ b/bitswap/client/internal/messagequeue/messagequeue_test.go @@ -0,0 +1,862 @@ +package messagequeue + +import ( + "context" + "fmt" + "math" + "math/rand" + "sync" + "testing" + "time" + + "github.com/benbjohnson/clock" + "github.com/ipfs/boxo/bitswap/internal/testutil" + bsmsg "github.com/ipfs/boxo/bitswap/message" + pb "github.com/ipfs/boxo/bitswap/message/pb" + bsnet "github.com/ipfs/boxo/bitswap/network" + "github.com/ipfs/boxo/internal/test" + cid "github.com/ipfs/go-cid" + peer "github.com/libp2p/go-libp2p/core/peer" + "github.com/libp2p/go-libp2p/p2p/protocol/ping" +) + +type fakeMessageNetwork struct { + connectError error + messageSenderError error + messageSender bsnet.MessageSender +} + +func (fmn *fakeMessageNetwork) ConnectTo(context.Context, peer.ID) error { + return fmn.connectError +} + +func (fmn *fakeMessageNetwork) NewMessageSender(context.Context, peer.ID, *bsnet.MessageSenderOpts) (bsnet.MessageSender, error) { + if fmn.messageSenderError == nil { + return fmn.messageSender, nil + } + return nil, fmn.messageSenderError +} + +func (fms *fakeMessageNetwork) Self() peer.ID { return "" } +func (fms *fakeMessageNetwork) Latency(peer.ID) time.Duration { return 0 } +func (fms *fakeMessageNetwork) Ping(context.Context, peer.ID) ping.Result { + return ping.Result{Error: fmt.Errorf("ping error")} +} + +type fakeDontHaveTimeoutMgr struct { + lk sync.Mutex + ks []cid.Cid + latencyUpds []time.Duration +} + +func (fp *fakeDontHaveTimeoutMgr) Start() {} +func (fp *fakeDontHaveTimeoutMgr) Shutdown() {} +func (fp *fakeDontHaveTimeoutMgr) AddPending(ks []cid.Cid) { + fp.lk.Lock() + defer fp.lk.Unlock() + + s := cid.NewSet() + for _, c := range append(fp.ks, ks...) { + s.Add(c) + } + fp.ks = s.Keys() +} +func (fp *fakeDontHaveTimeoutMgr) CancelPending(ks []cid.Cid) { + fp.lk.Lock() + defer fp.lk.Unlock() + + s := cid.NewSet() + for _, c := range fp.ks { + s.Add(c) + } + for _, c := range ks { + s.Remove(c) + } + fp.ks = s.Keys() +} +func (fp *fakeDontHaveTimeoutMgr) UpdateMessageLatency(elapsed time.Duration) { + fp.lk.Lock() + defer fp.lk.Unlock() + + fp.latencyUpds = append(fp.latencyUpds, elapsed) +} +func (fp *fakeDontHaveTimeoutMgr) latencyUpdates() []time.Duration { + fp.lk.Lock() + defer fp.lk.Unlock() + + return fp.latencyUpds +} +func (fp *fakeDontHaveTimeoutMgr) pendingCount() int { + fp.lk.Lock() + defer fp.lk.Unlock() + + return len(fp.ks) +} + +type fakeMessageSender struct { + lk sync.Mutex + reset chan<- struct{} + messagesSent chan<- []bsmsg.Entry + supportsHave bool +} + +func newFakeMessageSender(reset chan<- struct{}, + messagesSent chan<- []bsmsg.Entry, supportsHave bool) *fakeMessageSender { + + return &fakeMessageSender{ + reset: reset, + messagesSent: messagesSent, + supportsHave: supportsHave, + } +} + +func (fms *fakeMessageSender) SendMsg(ctx context.Context, msg bsmsg.BitSwapMessage) error { + fms.lk.Lock() + defer fms.lk.Unlock() + + fms.messagesSent <- msg.Wantlist() + return nil +} +func (fms *fakeMessageSender) Close() error { return nil } +func (fms *fakeMessageSender) Reset() error { fms.reset <- struct{}{}; return nil } +func (fms *fakeMessageSender) SupportsHave() bool { return fms.supportsHave } + +func mockTimeoutCb(peer.ID, []cid.Cid) {} + +func collectMessages(ctx context.Context, + t *testing.T, + messagesSent <-chan []bsmsg.Entry, + timeout time.Duration) [][]bsmsg.Entry { + var messagesReceived [][]bsmsg.Entry + timeoutctx, cancel := context.WithTimeout(ctx, timeout) + defer cancel() + for { + select { + case messageReceived := <-messagesSent: + messagesReceived = append(messagesReceived, messageReceived) + case <-timeoutctx.Done(): + return messagesReceived + } + } +} + +func totalEntriesLength(messages [][]bsmsg.Entry) int { + totalLength := 0 + for _, m := range messages { + totalLength += len(m) + } + return totalLength +} + +func expectEvent(t *testing.T, events <-chan messageEvent, expectedEvent messageEvent) { + evt := <-events + if evt != expectedEvent { + t.Fatal("message not queued") + } +} + +func TestStartupAndShutdown(t *testing.T) { + test.Flaky(t) + + ctx := context.Background() + messagesSent := make(chan []bsmsg.Entry) + resetChan := make(chan struct{}, 1) + fakeSender := newFakeMessageSender(resetChan, messagesSent, true) + fakenet := &fakeMessageNetwork{nil, nil, fakeSender} + peerID := testutil.GeneratePeers(1)[0] + messageQueue := New(ctx, peerID, fakenet, mockTimeoutCb) + bcstwh := testutil.GenerateCids(10) + + messageQueue.Startup() + messageQueue.AddBroadcastWantHaves(bcstwh) + messages := collectMessages(ctx, t, messagesSent, 100*time.Millisecond) + if len(messages) != 1 { + t.Fatal("wrong number of messages were sent for broadcast want-haves") + } + + firstMessage := messages[0] + if len(firstMessage) != len(bcstwh) { + t.Fatal("did not add all wants to want list") + } + for _, entry := range firstMessage { + if entry.Cancel { + t.Fatal("initial add sent cancel entry when it should not have") + } + } + + messageQueue.Shutdown() + + timeoutctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) + defer cancel() + select { + case <-resetChan: + case <-timeoutctx.Done(): + t.Fatal("message sender should have been reset but wasn't") + } +} + +func TestSendingMessagesDeduped(t *testing.T) { + test.Flaky(t) + + ctx := context.Background() + messagesSent := make(chan []bsmsg.Entry) + resetChan := make(chan struct{}, 1) + fakeSender := newFakeMessageSender(resetChan, messagesSent, true) + fakenet := &fakeMessageNetwork{nil, nil, fakeSender} + peerID := testutil.GeneratePeers(1)[0] + messageQueue := New(ctx, peerID, fakenet, mockTimeoutCb) + wantHaves := testutil.GenerateCids(10) + wantBlocks := testutil.GenerateCids(10) + + messageQueue.Startup() + messageQueue.AddWants(wantBlocks, wantHaves) + messageQueue.AddWants(wantBlocks, wantHaves) + messages := collectMessages(ctx, t, messagesSent, 100*time.Millisecond) + + if totalEntriesLength(messages) != len(wantHaves)+len(wantBlocks) { + t.Fatal("Messages were not deduped") + } +} + +func TestSendingMessagesPartialDupe(t *testing.T) { + test.Flaky(t) + + ctx := context.Background() + messagesSent := make(chan []bsmsg.Entry) + resetChan := make(chan struct{}, 1) + fakeSender := newFakeMessageSender(resetChan, messagesSent, true) + fakenet := &fakeMessageNetwork{nil, nil, fakeSender} + peerID := testutil.GeneratePeers(1)[0] + messageQueue := New(ctx, peerID, fakenet, mockTimeoutCb) + wantHaves := testutil.GenerateCids(10) + wantBlocks := testutil.GenerateCids(10) + + messageQueue.Startup() + messageQueue.AddWants(wantBlocks[:8], wantHaves[:8]) + messageQueue.AddWants(wantBlocks[3:], wantHaves[3:]) + messages := collectMessages(ctx, t, messagesSent, 20*time.Millisecond) + + if totalEntriesLength(messages) != len(wantHaves)+len(wantBlocks) { + t.Fatal("messages were not correctly deduped") + } +} + +func TestSendingMessagesPriority(t *testing.T) { + test.Flaky(t) + + ctx := context.Background() + messagesSent := make(chan []bsmsg.Entry) + resetChan := make(chan struct{}, 1) + fakeSender := newFakeMessageSender(resetChan, messagesSent, true) + fakenet := &fakeMessageNetwork{nil, nil, fakeSender} + peerID := testutil.GeneratePeers(1)[0] + messageQueue := New(ctx, peerID, fakenet, mockTimeoutCb) + wantHaves1 := testutil.GenerateCids(5) + wantHaves2 := testutil.GenerateCids(5) + wantHaves := append(wantHaves1, wantHaves2...) + wantBlocks1 := testutil.GenerateCids(5) + wantBlocks2 := testutil.GenerateCids(5) + wantBlocks := append(wantBlocks1, wantBlocks2...) + + messageQueue.Startup() + messageQueue.AddWants(wantBlocks1, wantHaves1) + messageQueue.AddWants(wantBlocks2, wantHaves2) + messages := collectMessages(ctx, t, messagesSent, 20*time.Millisecond) + + if totalEntriesLength(messages) != len(wantHaves)+len(wantBlocks) { + t.Fatal("wrong number of wants") + } + byCid := make(map[cid.Cid]bsmsg.Entry) + for _, entry := range messages[0] { + byCid[entry.Cid] = entry + } + + // Check that earliest want-haves have highest priority + for i := range wantHaves { + if i > 0 { + if byCid[wantHaves[i]].Priority > byCid[wantHaves[i-1]].Priority { + t.Fatal("earliest want-haves should have higher priority") + } + } + } + + // Check that earliest want-blocks have highest priority + for i := range wantBlocks { + if i > 0 { + if byCid[wantBlocks[i]].Priority > byCid[wantBlocks[i-1]].Priority { + t.Fatal("earliest want-blocks should have higher priority") + } + } + } + + // Check that want-haves have higher priority than want-blocks within + // same group + for i := range wantHaves1 { + if i > 0 { + if byCid[wantHaves[i]].Priority <= byCid[wantBlocks[0]].Priority { + t.Fatal("want-haves should have higher priority than want-blocks") + } + } + } + + // Check that all items in first group have higher priority than first item + // in second group + for i := range wantHaves1 { + if i > 0 { + if byCid[wantHaves[i]].Priority <= byCid[wantHaves2[0]].Priority { + t.Fatal("items in first group should have higher priority than items in second group") + } + } + } +} + +func TestCancelOverridesPendingWants(t *testing.T) { + test.Flaky(t) + + ctx := context.Background() + messagesSent := make(chan []bsmsg.Entry) + resetChan := make(chan struct{}, 1) + fakeSender := newFakeMessageSender(resetChan, messagesSent, true) + fakenet := &fakeMessageNetwork{nil, nil, fakeSender} + peerID := testutil.GeneratePeers(1)[0] + messageQueue := New(ctx, peerID, fakenet, mockTimeoutCb) + + wantHaves := testutil.GenerateCids(2) + wantBlocks := testutil.GenerateCids(2) + cancels := []cid.Cid{wantBlocks[0], wantHaves[0]} + + messageQueue.Startup() + messageQueue.AddWants(wantBlocks, wantHaves) + messageQueue.AddCancels(cancels) + messages := collectMessages(ctx, t, messagesSent, 100*time.Millisecond) + + if totalEntriesLength(messages) != len(wantHaves)+len(wantBlocks)-len(cancels) { + t.Fatal("Wrong message count") + } + + // Cancelled 1 want-block and 1 want-have before they were sent + // so that leaves 1 want-block and 1 want-have + wb, wh, cl := filterWantTypes(messages[0]) + if len(wb) != 1 || !wb[0].Equals(wantBlocks[1]) { + t.Fatal("Expected 1 want-block") + } + if len(wh) != 1 || !wh[0].Equals(wantHaves[1]) { + t.Fatal("Expected 1 want-have") + } + // Cancelled wants before they were sent, so no cancel should be sent + // to the network + if len(cl) != 0 { + t.Fatal("Expected no cancels") + } + + // Cancel the remaining want-blocks and want-haves + cancels = append(wantHaves, wantBlocks...) + messageQueue.AddCancels(cancels) + messages = collectMessages(ctx, t, messagesSent, 100*time.Millisecond) + + // The remaining 2 cancels should be sent to the network as they are for + // wants that were sent to the network + _, _, cl = filterWantTypes(messages[0]) + if len(cl) != 2 { + t.Fatal("Expected 2 cancels") + } +} + +func TestWantOverridesPendingCancels(t *testing.T) { + test.Flaky(t) + + ctx := context.Background() + messagesSent := make(chan []bsmsg.Entry) + resetChan := make(chan struct{}, 1) + fakeSender := newFakeMessageSender(resetChan, messagesSent, true) + fakenet := &fakeMessageNetwork{nil, nil, fakeSender} + peerID := testutil.GeneratePeers(1)[0] + messageQueue := New(ctx, peerID, fakenet, mockTimeoutCb) + + cids := testutil.GenerateCids(3) + wantBlocks := cids[:1] + wantHaves := cids[1:] + + messageQueue.Startup() + + // Add 1 want-block and 2 want-haves + messageQueue.AddWants(wantBlocks, wantHaves) + + messages := collectMessages(ctx, t, messagesSent, 100*time.Millisecond) + if totalEntriesLength(messages) != len(wantBlocks)+len(wantHaves) { + t.Fatal("Wrong message count", totalEntriesLength(messages)) + } + + // Cancel existing wants + messageQueue.AddCancels(cids) + // Override one cancel with a want-block (before cancel is sent to network) + messageQueue.AddWants(cids[:1], []cid.Cid{}) + + messages = collectMessages(ctx, t, messagesSent, 100*time.Millisecond) + if totalEntriesLength(messages) != 3 { + t.Fatal("Wrong message count", totalEntriesLength(messages)) + } + + // Should send 1 want-block and 2 cancels + wb, wh, cl := filterWantTypes(messages[0]) + if len(wb) != 1 { + t.Fatal("Expected 1 want-block") + } + if len(wh) != 0 { + t.Fatal("Expected 0 want-have") + } + if len(cl) != 2 { + t.Fatal("Expected 2 cancels") + } +} + +func TestWantlistRebroadcast(t *testing.T) { + test.Flaky(t) + + ctx := context.Background() + messagesSent := make(chan []bsmsg.Entry) + resetChan := make(chan struct{}, 1) + fakeSender := newFakeMessageSender(resetChan, messagesSent, true) + fakenet := &fakeMessageNetwork{nil, nil, fakeSender} + peerID := testutil.GeneratePeers(1)[0] + dhtm := &fakeDontHaveTimeoutMgr{} + clock := clock.NewMock() + events := make(chan messageEvent) + messageQueue := newMessageQueue(ctx, peerID, fakenet, maxMessageSize, sendErrorBackoff, maxValidLatency, dhtm, clock, events) + bcstwh := testutil.GenerateCids(10) + wantHaves := testutil.GenerateCids(10) + wantBlocks := testutil.GenerateCids(10) + + // Add some broadcast want-haves + messageQueue.Startup() + messageQueue.AddBroadcastWantHaves(bcstwh) + expectEvent(t, events, messageQueued) + clock.Add(sendMessageDebounce) + message := <-messagesSent + expectEvent(t, events, messageFinishedSending) + + // All broadcast want-haves should have been sent + if len(message) != len(bcstwh) { + t.Fatal("wrong number of wants") + } + + // Tell message queue to rebroadcast after 5ms, then wait 8ms + messageQueue.SetRebroadcastInterval(5 * time.Millisecond) + clock.Add(8 * time.Millisecond) + message = <-messagesSent + expectEvent(t, events, messageFinishedSending) + + // All the want-haves should have been rebroadcast + if len(message) != len(bcstwh) { + t.Fatal("did not rebroadcast all wants") + } + + // Tell message queue to rebroadcast after a long time (so it doesn't + // interfere with the next message collection), then send out some + // regular wants and collect them + messageQueue.SetRebroadcastInterval(1 * time.Second) + messageQueue.AddWants(wantBlocks, wantHaves) + expectEvent(t, events, messageQueued) + clock.Add(10 * time.Millisecond) + message = <-messagesSent + expectEvent(t, events, messageFinishedSending) + + // All new wants should have been sent + if len(message) != len(wantHaves)+len(wantBlocks) { + t.Fatal("wrong number of wants") + } + + select { + case <-messagesSent: + t.Fatal("should only be one message in queue") + default: + } + + // Tell message queue to rebroadcast after 10ms, then wait 15ms + messageQueue.SetRebroadcastInterval(10 * time.Millisecond) + clock.Add(15 * time.Millisecond) + message = <-messagesSent + expectEvent(t, events, messageFinishedSending) + + // Both original and new wants should have been rebroadcast + totalWants := len(bcstwh) + len(wantHaves) + len(wantBlocks) + if len(message) != totalWants { + t.Fatal("did not rebroadcast all wants") + } + + // Cancel some of the wants + messageQueue.SetRebroadcastInterval(1 * time.Second) + cancels := append([]cid.Cid{bcstwh[0]}, wantHaves[0], wantBlocks[0]) + messageQueue.AddCancels(cancels) + expectEvent(t, events, messageQueued) + clock.Add(10 * time.Millisecond) + message = <-messagesSent + expectEvent(t, events, messageFinishedSending) + + select { + case <-messagesSent: + t.Fatal("should only be one message in queue") + default: + } + + // Cancels for each want should have been sent + if len(message) != len(cancels) { + t.Fatal("wrong number of cancels") + } + for _, entry := range message { + if !entry.Cancel { + t.Fatal("expected cancels") + } + } + + // Tell message queue to rebroadcast after 10ms, then wait 15ms + messageQueue.SetRebroadcastInterval(10 * time.Millisecond) + clock.Add(15 * time.Millisecond) + message = <-messagesSent + expectEvent(t, events, messageFinishedSending) + + if len(message) != totalWants-len(cancels) { + t.Fatal("did not rebroadcast all wants") + } +} + +func TestSendingLargeMessages(t *testing.T) { + test.Flaky(t) + + ctx := context.Background() + messagesSent := make(chan []bsmsg.Entry) + resetChan := make(chan struct{}, 1) + fakeSender := newFakeMessageSender(resetChan, messagesSent, true) + fakenet := &fakeMessageNetwork{nil, nil, fakeSender} + dhtm := &fakeDontHaveTimeoutMgr{} + peerID := testutil.GeneratePeers(1)[0] + + wantBlocks := testutil.GenerateCids(10) + entrySize := 44 + maxMsgSize := entrySize * 3 // 3 wants + messageQueue := newMessageQueue(ctx, peerID, fakenet, maxMsgSize, sendErrorBackoff, maxValidLatency, dhtm, clock.New(), nil) + + messageQueue.Startup() + messageQueue.AddWants(wantBlocks, []cid.Cid{}) + messages := collectMessages(ctx, t, messagesSent, 100*time.Millisecond) + + // want-block has size 44, so with maxMsgSize 44 * 3 (3 want-blocks), then if + // we send 10 want-blocks we should expect 4 messages: + // [***] [***] [***] [*] + if len(messages) != 4 { + t.Fatal("expected 4 messages to be sent, got", len(messages)) + } + if totalEntriesLength(messages) != len(wantBlocks) { + t.Fatal("wrong number of wants") + } +} + +func TestSendToPeerThatDoesntSupportHave(t *testing.T) { + test.Flaky(t) + + ctx := context.Background() + messagesSent := make(chan []bsmsg.Entry) + resetChan := make(chan struct{}, 1) + fakeSender := newFakeMessageSender(resetChan, messagesSent, false) + fakenet := &fakeMessageNetwork{nil, nil, fakeSender} + peerID := testutil.GeneratePeers(1)[0] + + messageQueue := New(ctx, peerID, fakenet, mockTimeoutCb) + messageQueue.Startup() + + // If the remote peer doesn't support HAVE / DONT_HAVE messages + // - want-blocks should be sent normally + // - want-haves should not be sent + // - broadcast want-haves should be sent as want-blocks + + // Check broadcast want-haves + bcwh := testutil.GenerateCids(10) + messageQueue.AddBroadcastWantHaves(bcwh) + messages := collectMessages(ctx, t, messagesSent, 100*time.Millisecond) + + if len(messages) != 1 { + t.Fatal("wrong number of messages were sent", len(messages)) + } + wl := messages[0] + if len(wl) != len(bcwh) { + t.Fatal("wrong number of entries in wantlist", len(wl)) + } + for _, entry := range wl { + if entry.WantType != pb.Message_Wantlist_Block { + t.Fatal("broadcast want-haves should be sent as want-blocks") + } + } + + // Check regular want-haves and want-blocks + wbs := testutil.GenerateCids(10) + whs := testutil.GenerateCids(10) + messageQueue.AddWants(wbs, whs) + messages = collectMessages(ctx, t, messagesSent, 100*time.Millisecond) + + if len(messages) != 1 { + t.Fatal("wrong number of messages were sent", len(messages)) + } + wl = messages[0] + if len(wl) != len(wbs) { + t.Fatal("should only send want-blocks (no want-haves)", len(wl)) + } + for _, entry := range wl { + if entry.WantType != pb.Message_Wantlist_Block { + t.Fatal("should only send want-blocks") + } + } +} + +func TestSendToPeerThatDoesntSupportHaveMonitorsTimeouts(t *testing.T) { + test.Flaky(t) + + ctx := context.Background() + messagesSent := make(chan []bsmsg.Entry) + resetChan := make(chan struct{}, 1) + fakeSender := newFakeMessageSender(resetChan, messagesSent, false) + fakenet := &fakeMessageNetwork{nil, nil, fakeSender} + peerID := testutil.GeneratePeers(1)[0] + + dhtm := &fakeDontHaveTimeoutMgr{} + messageQueue := newMessageQueue(ctx, peerID, fakenet, maxMessageSize, sendErrorBackoff, maxValidLatency, dhtm, clock.New(), nil) + messageQueue.Startup() + + wbs := testutil.GenerateCids(10) + messageQueue.AddWants(wbs, nil) + collectMessages(ctx, t, messagesSent, 100*time.Millisecond) + + // Check want-blocks are added to DontHaveTimeoutMgr + if dhtm.pendingCount() != len(wbs) { + t.Fatal("want-blocks not added to DontHaveTimeoutMgr") + } + + cancelCount := 2 + messageQueue.AddCancels(wbs[:cancelCount]) + collectMessages(ctx, t, messagesSent, 100*time.Millisecond) + + // Check want-blocks are removed from DontHaveTimeoutMgr + if dhtm.pendingCount() != len(wbs)-cancelCount { + t.Fatal("want-blocks not removed from DontHaveTimeoutMgr") + } +} + +func TestResponseReceived(t *testing.T) { + test.Flaky(t) + + ctx := context.Background() + messagesSent := make(chan []bsmsg.Entry) + resetChan := make(chan struct{}, 1) + fakeSender := newFakeMessageSender(resetChan, messagesSent, false) + fakenet := &fakeMessageNetwork{nil, nil, fakeSender} + peerID := testutil.GeneratePeers(1)[0] + + dhtm := &fakeDontHaveTimeoutMgr{} + clock := clock.NewMock() + events := make(chan messageEvent) + messageQueue := newMessageQueue(ctx, peerID, fakenet, maxMessageSize, sendErrorBackoff, maxValidLatency, dhtm, clock, events) + messageQueue.Startup() + + cids := testutil.GenerateCids(10) + + // Add some wants + messageQueue.AddWants(cids[:5], nil) + expectEvent(t, events, messageQueued) + clock.Add(sendMessageDebounce) + <-messagesSent + expectEvent(t, events, messageFinishedSending) + + // simulate 10 milliseconds passing + clock.Add(10 * time.Millisecond) + + // Add some wants and wait another 10ms + messageQueue.AddWants(cids[5:8], nil) + expectEvent(t, events, messageQueued) + clock.Add(10 * time.Millisecond) + <-messagesSent + expectEvent(t, events, messageFinishedSending) + + // Receive a response for some of the wants from both groups + messageQueue.ResponseReceived([]cid.Cid{cids[0], cids[6], cids[9]}) + + // Check that message queue informs DHTM of received responses + expectEvent(t, events, latenciesRecorded) + upds := dhtm.latencyUpdates() + if len(upds) != 1 { + t.Fatal("expected one latency update") + } + // Elapsed time should be between when the first want was sent and the + // response received (about 20ms) + if upds[0] != 20*time.Millisecond { + t.Fatal("expected latency to be time since oldest message sent") + } +} + +func TestResponseReceivedAppliesForFirstResponseOnly(t *testing.T) { + test.Flaky(t) + + ctx := context.Background() + messagesSent := make(chan []bsmsg.Entry) + resetChan := make(chan struct{}, 1) + fakeSender := newFakeMessageSender(resetChan, messagesSent, false) + fakenet := &fakeMessageNetwork{nil, nil, fakeSender} + peerID := testutil.GeneratePeers(1)[0] + + dhtm := &fakeDontHaveTimeoutMgr{} + messageQueue := newMessageQueue(ctx, peerID, fakenet, maxMessageSize, sendErrorBackoff, maxValidLatency, dhtm, clock.New(), nil) + messageQueue.Startup() + + cids := testutil.GenerateCids(2) + + // Add some wants and wait 10ms + messageQueue.AddWants(cids, nil) + collectMessages(ctx, t, messagesSent, 100*time.Millisecond) + + // Receive a response for the wants + messageQueue.ResponseReceived(cids) + + // Wait another 10ms + time.Sleep(10 * time.Millisecond) + + // Message queue should inform DHTM of first response + upds := dhtm.latencyUpdates() + if len(upds) != 1 { + t.Fatal("expected one latency update") + } + + // Receive a second response for the same wants + messageQueue.ResponseReceived(cids) + + // Wait for the response to be processed by the message queue + time.Sleep(10 * time.Millisecond) + + // Message queue should not inform DHTM of second response because the + // CIDs are a subset of the first response + upds = dhtm.latencyUpdates() + if len(upds) != 1 { + t.Fatal("expected one latency update") + } +} + +func TestResponseReceivedDiscardsOutliers(t *testing.T) { + test.Flaky(t) + + ctx := context.Background() + messagesSent := make(chan []bsmsg.Entry) + resetChan := make(chan struct{}, 1) + fakeSender := newFakeMessageSender(resetChan, messagesSent, false) + fakenet := &fakeMessageNetwork{nil, nil, fakeSender} + peerID := testutil.GeneratePeers(1)[0] + + maxValLatency := 30 * time.Millisecond + dhtm := &fakeDontHaveTimeoutMgr{} + clock := clock.NewMock() + events := make(chan messageEvent) + messageQueue := newMessageQueue(ctx, peerID, fakenet, maxMessageSize, sendErrorBackoff, maxValLatency, dhtm, clock, events) + messageQueue.Startup() + + cids := testutil.GenerateCids(4) + + // Add some wants and wait 20ms + messageQueue.AddWants(cids[:2], nil) + expectEvent(t, events, messageQueued) + clock.Add(sendMessageDebounce) + <-messagesSent + expectEvent(t, events, messageFinishedSending) + + clock.Add(20 * time.Millisecond) + + // Add some more wants and wait long enough that the first wants will be + // outside the maximum valid latency, but the second wants will be inside + messageQueue.AddWants(cids[2:], nil) + expectEvent(t, events, messageQueued) + clock.Add(sendMessageDebounce) + <-messagesSent + expectEvent(t, events, messageFinishedSending) + + clock.Add(maxValLatency - 10*time.Millisecond + sendMessageDebounce) + // Receive a response for the wants + messageQueue.ResponseReceived(cids) + + // Check that the latency calculation excludes the first wants + // (because they're older than max valid latency) + expectEvent(t, events, latenciesRecorded) + upds := dhtm.latencyUpdates() + if len(upds) != 1 { + t.Fatal("expected one latency update") + } + // Elapsed time should not include outliers + if upds[0] > maxValLatency { + t.Fatal("expected latency calculation to discard outliers") + } +} + +func filterWantTypes(wantlist []bsmsg.Entry) ([]cid.Cid, []cid.Cid, []cid.Cid) { + var wbs []cid.Cid + var whs []cid.Cid + var cls []cid.Cid + for _, e := range wantlist { + if e.Cancel { + cls = append(cls, e.Cid) + } else if e.WantType == pb.Message_Wantlist_Block { + wbs = append(wbs, e.Cid) + } else { + whs = append(whs, e.Cid) + } + } + return wbs, whs, cls +} + +// Simplistic benchmark to allow us to simulate conditions on the gateways +func BenchmarkMessageQueue(b *testing.B) { + ctx := context.Background() + + createQueue := func() *MessageQueue { + messagesSent := make(chan []bsmsg.Entry) + resetChan := make(chan struct{}, 1) + fakeSender := newFakeMessageSender(resetChan, messagesSent, true) + fakenet := &fakeMessageNetwork{nil, nil, fakeSender} + dhtm := &fakeDontHaveTimeoutMgr{} + peerID := testutil.GeneratePeers(1)[0] + + messageQueue := newMessageQueue(ctx, peerID, fakenet, maxMessageSize, sendErrorBackoff, maxValidLatency, dhtm, clock.New(), nil) + messageQueue.Startup() + + go func() { + for { + <-messagesSent + time.Sleep(time.Duration(rand.Intn(1000)) * time.Millisecond) + } + }() + + return messageQueue + } + + // Create a handful of message queues to start with + var qs []*MessageQueue + for i := 0; i < 5; i++ { + qs = append(qs, createQueue()) + } + + for n := 0; n < b.N; n++ { + // Create a new message queue every 10 ticks + if n%10 == 0 { + qs = append(qs, createQueue()) + } + + // Pick a random message queue, favoring those created later + qn := len(qs) + i := int(math.Floor(float64(qn) * float64(1-rand.Float32()*rand.Float32()))) + if i >= qn { // because of floating point math + i = qn - 1 + } + + // Alternately add either a few wants or a lot of broadcast wants + if rand.Intn(2) == 0 { + wants := testutil.GenerateCids(10) + qs[i].AddWants(wants[:2], wants[2:]) + } else { + wants := testutil.GenerateCids(60) + qs[i].AddBroadcastWantHaves(wants) + } + } +} diff --git a/bitswap/client/internal/notifications/notifications.go b/bitswap/client/internal/notifications/notifications.go new file mode 100644 index 0000000000..707837d629 --- /dev/null +++ b/bitswap/client/internal/notifications/notifications.go @@ -0,0 +1,139 @@ +package notifications + +import ( + "context" + "sync" + + pubsub "github.com/cskr/pubsub" + blocks "github.com/ipfs/boxo/blocks" + cid "github.com/ipfs/go-cid" +) + +const bufferSize = 16 + +// PubSub is a simple interface for publishing blocks and being able to subscribe +// for cids. It's used internally by bitswap to decouple receiving blocks +// and actually providing them back to the GetBlocks caller. +type PubSub interface { + Publish(blocks ...blocks.Block) + Subscribe(ctx context.Context, keys ...cid.Cid) <-chan blocks.Block + Shutdown() +} + +// New generates a new PubSub interface. +func New() PubSub { + return &impl{ + wrapped: *pubsub.New(bufferSize), + closed: make(chan struct{}), + } +} + +type impl struct { + lk sync.RWMutex + wrapped pubsub.PubSub + + closed chan struct{} +} + +func (ps *impl) Publish(blocks ...blocks.Block) { + ps.lk.RLock() + defer ps.lk.RUnlock() + select { + case <-ps.closed: + return + default: + } + + for _, block := range blocks { + ps.wrapped.Pub(block, block.Cid().KeyString()) + } +} + +func (ps *impl) Shutdown() { + ps.lk.Lock() + defer ps.lk.Unlock() + select { + case <-ps.closed: + return + default: + } + close(ps.closed) + ps.wrapped.Shutdown() +} + +// Subscribe returns a channel of blocks for the given |keys|. |blockChannel| +// is closed if the |ctx| times out or is cancelled, or after receiving the blocks +// corresponding to |keys|. +func (ps *impl) Subscribe(ctx context.Context, keys ...cid.Cid) <-chan blocks.Block { + + blocksCh := make(chan blocks.Block, len(keys)) + valuesCh := make(chan interface{}, len(keys)) // provide our own channel to control buffer, prevent blocking + if len(keys) == 0 { + close(blocksCh) + return blocksCh + } + + // prevent shutdown + ps.lk.RLock() + defer ps.lk.RUnlock() + + select { + case <-ps.closed: + close(blocksCh) + return blocksCh + default: + } + + // AddSubOnceEach listens for each key in the list, and closes the channel + // once all keys have been received + ps.wrapped.AddSubOnceEach(valuesCh, toStrings(keys)...) + go func() { + defer func() { + close(blocksCh) + + ps.lk.RLock() + defer ps.lk.RUnlock() + // Don't touch the pubsub instance if we're + // already closed. + select { + case <-ps.closed: + return + default: + } + + ps.wrapped.Unsub(valuesCh) + }() + + for { + select { + case <-ctx.Done(): + return + case <-ps.closed: + case val, ok := <-valuesCh: + if !ok { + return + } + block, ok := val.(blocks.Block) + if !ok { + return + } + select { + case <-ctx.Done(): + return + case blocksCh <- block: // continue + case <-ps.closed: + } + } + } + }() + + return blocksCh +} + +func toStrings(keys []cid.Cid) []string { + strs := make([]string, 0, len(keys)) + for _, key := range keys { + strs = append(strs, key.KeyString()) + } + return strs +} diff --git a/bitswap/client/internal/notifications/notifications_test.go b/bitswap/client/internal/notifications/notifications_test.go new file mode 100644 index 0000000000..7ad2dc2fb4 --- /dev/null +++ b/bitswap/client/internal/notifications/notifications_test.go @@ -0,0 +1,202 @@ +package notifications + +import ( + "bytes" + "context" + "testing" + "time" + + blocks "github.com/ipfs/boxo/blocks" + "github.com/ipfs/boxo/internal/test" + cid "github.com/ipfs/go-cid" + blocksutil "github.com/ipfs/go-ipfs-blocksutil" +) + +func TestDuplicates(t *testing.T) { + test.Flaky(t) + + b1 := blocks.NewBlock([]byte("1")) + b2 := blocks.NewBlock([]byte("2")) + + n := New() + defer n.Shutdown() + ch := n.Subscribe(context.Background(), b1.Cid(), b2.Cid()) + + n.Publish(b1) + blockRecvd, ok := <-ch + if !ok { + t.Fail() + } + assertBlocksEqual(t, b1, blockRecvd) + + n.Publish(b1) // ignored duplicate + + n.Publish(b2) + blockRecvd, ok = <-ch + if !ok { + t.Fail() + } + assertBlocksEqual(t, b2, blockRecvd) +} + +func TestPublishSubscribe(t *testing.T) { + test.Flaky(t) + + blockSent := blocks.NewBlock([]byte("Greetings from The Interval")) + + n := New() + defer n.Shutdown() + ch := n.Subscribe(context.Background(), blockSent.Cid()) + + n.Publish(blockSent) + blockRecvd, ok := <-ch + if !ok { + t.Fail() + } + + assertBlocksEqual(t, blockRecvd, blockSent) + +} + +func TestSubscribeMany(t *testing.T) { + test.Flaky(t) + + e1 := blocks.NewBlock([]byte("1")) + e2 := blocks.NewBlock([]byte("2")) + + n := New() + defer n.Shutdown() + ch := n.Subscribe(context.Background(), e1.Cid(), e2.Cid()) + + n.Publish(e1) + r1, ok := <-ch + if !ok { + t.Fatal("didn't receive first expected block") + } + assertBlocksEqual(t, e1, r1) + + n.Publish(e2) + r2, ok := <-ch + if !ok { + t.Fatal("didn't receive second expected block") + } + assertBlocksEqual(t, e2, r2) +} + +// TestDuplicateSubscribe tests a scenario where a given block +// would be requested twice at the same time. +func TestDuplicateSubscribe(t *testing.T) { + test.Flaky(t) + + e1 := blocks.NewBlock([]byte("1")) + + n := New() + defer n.Shutdown() + ch1 := n.Subscribe(context.Background(), e1.Cid()) + ch2 := n.Subscribe(context.Background(), e1.Cid()) + + n.Publish(e1) + r1, ok := <-ch1 + if !ok { + t.Fatal("didn't receive first expected block") + } + assertBlocksEqual(t, e1, r1) + + r2, ok := <-ch2 + if !ok { + t.Fatal("didn't receive second expected block") + } + assertBlocksEqual(t, e1, r2) +} + +func TestShutdownBeforeUnsubscribe(t *testing.T) { + test.Flaky(t) + + e1 := blocks.NewBlock([]byte("1")) + + n := New() + ctx, cancel := context.WithCancel(context.Background()) + ch := n.Subscribe(ctx, e1.Cid()) // no keys provided + n.Shutdown() + cancel() + + select { + case _, ok := <-ch: + if ok { + t.Fatal("channel should have been closed") + } + case <-time.After(5 * time.Second): + t.Fatal("channel should have been closed") + } +} + +func TestSubscribeIsANoopWhenCalledWithNoKeys(t *testing.T) { + test.Flaky(t) + + n := New() + defer n.Shutdown() + ch := n.Subscribe(context.Background()) // no keys provided + if _, ok := <-ch; ok { + t.Fatal("should be closed if no keys provided") + } +} + +func TestCarryOnWhenDeadlineExpires(t *testing.T) { + test.Flaky(t) + + impossibleDeadline := time.Nanosecond + fastExpiringCtx, cancel := context.WithTimeout(context.Background(), impossibleDeadline) + defer cancel() + + n := New() + defer n.Shutdown() + block := blocks.NewBlock([]byte("A Missed Connection")) + blockChannel := n.Subscribe(fastExpiringCtx, block.Cid()) + + assertBlockChannelNil(t, blockChannel) +} + +func TestDoesNotDeadLockIfContextCancelledBeforePublish(t *testing.T) { + test.Flaky(t) + + g := blocksutil.NewBlockGenerator() + ctx, cancel := context.WithCancel(context.Background()) + n := New() + defer n.Shutdown() + + t.Log("generate a large number of blocks. exceed default buffer") + bs := g.Blocks(1000) + ks := func() []cid.Cid { + var keys []cid.Cid + for _, b := range bs { + keys = append(keys, b.Cid()) + } + return keys + }() + + _ = n.Subscribe(ctx, ks...) // ignore received channel + + t.Log("cancel context before any blocks published") + cancel() + for _, b := range bs { + n.Publish(b) + } + + t.Log("publishing the large number of blocks to the ignored channel must not deadlock") +} + +func assertBlockChannelNil(t *testing.T, blockChannel <-chan blocks.Block) { + _, ok := <-blockChannel + if ok { + t.Fail() + } +} + +func assertBlocksEqual(t *testing.T, a, b blocks.Block) { + if !bytes.Equal(a.RawData(), b.RawData()) { + t.Fatal("blocks aren't equal") + } + if a.Cid() != b.Cid() { + t.Fatal("block keys aren't equal") + } +} diff --git a/bitswap/client/internal/peermanager/peermanager.go b/bitswap/client/internal/peermanager/peermanager.go new file mode 100644 index 0000000000..dbce5bdd60 --- /dev/null +++ b/bitswap/client/internal/peermanager/peermanager.go @@ -0,0 +1,246 @@ +package peermanager + +import ( + "context" + "sync" + + logging "github.com/ipfs/go-log" + "github.com/ipfs/go-metrics-interface" + + cid "github.com/ipfs/go-cid" + peer "github.com/libp2p/go-libp2p/core/peer" +) + +var log = logging.Logger("bs:peermgr") + +// PeerQueue provides a queue of messages to be sent for a single peer. +type PeerQueue interface { + AddBroadcastWantHaves([]cid.Cid) + AddWants([]cid.Cid, []cid.Cid) + AddCancels([]cid.Cid) + ResponseReceived(ks []cid.Cid) + Startup() + Shutdown() +} + +type Session interface { + ID() uint64 + SignalAvailability(peer.ID, bool) +} + +// PeerQueueFactory provides a function that will create a PeerQueue. +type PeerQueueFactory func(ctx context.Context, p peer.ID) PeerQueue + +// PeerManager manages a pool of peers and sends messages to peers in the pool. +type PeerManager struct { + // sync access to peerQueues and peerWantManager + pqLk sync.RWMutex + // peerQueues -- interact through internal utility functions get/set/remove/iterate + peerQueues map[peer.ID]PeerQueue + pwm *peerWantManager + + createPeerQueue PeerQueueFactory + ctx context.Context + + psLk sync.RWMutex + sessions map[uint64]Session + peerSessions map[peer.ID]map[uint64]struct{} + + self peer.ID +} + +// New creates a new PeerManager, given a context and a peerQueueFactory. +func New(ctx context.Context, createPeerQueue PeerQueueFactory, self peer.ID) *PeerManager { + wantGauge := metrics.NewCtx(ctx, "wantlist_total", "Number of items in wantlist.").Gauge() + wantBlockGauge := metrics.NewCtx(ctx, "want_blocks_total", "Number of want-blocks in wantlist.").Gauge() + return &PeerManager{ + peerQueues: make(map[peer.ID]PeerQueue), + pwm: newPeerWantManager(wantGauge, wantBlockGauge), + createPeerQueue: createPeerQueue, + ctx: ctx, + self: self, + + sessions: make(map[uint64]Session), + peerSessions: make(map[peer.ID]map[uint64]struct{}), + } +} + +func (pm *PeerManager) AvailablePeers() []peer.ID { + // TODO: Rate-limit peers + return pm.ConnectedPeers() +} + +// ConnectedPeers returns a list of peers this PeerManager is managing. +func (pm *PeerManager) ConnectedPeers() []peer.ID { + pm.pqLk.RLock() + defer pm.pqLk.RUnlock() + + peers := make([]peer.ID, 0, len(pm.peerQueues)) + for p := range pm.peerQueues { + peers = append(peers, p) + } + return peers +} + +// Connected is called to add a new peer to the pool, and send it an initial set +// of wants. +func (pm *PeerManager) Connected(p peer.ID) { + pm.pqLk.Lock() + defer pm.pqLk.Unlock() + + pq := pm.getOrCreate(p) + + // Inform the peer want manager that there's a new peer + pm.pwm.addPeer(pq, p) + + // Inform the sessions that the peer has connected + pm.signalAvailability(p, true) +} + +// Disconnected is called to remove a peer from the pool. +func (pm *PeerManager) Disconnected(p peer.ID) { + pm.pqLk.Lock() + defer pm.pqLk.Unlock() + + pq, ok := pm.peerQueues[p] + + if !ok { + return + } + + // Inform the sessions that the peer has disconnected + pm.signalAvailability(p, false) + + // Clean up the peer + delete(pm.peerQueues, p) + pq.Shutdown() + pm.pwm.removePeer(p) +} + +// ResponseReceived is called when a message is received from the network. +// ks is the set of blocks, HAVEs and DONT_HAVEs in the message +// Note that this is just used to calculate latency. +func (pm *PeerManager) ResponseReceived(p peer.ID, ks []cid.Cid) { + pm.pqLk.Lock() + pq, ok := pm.peerQueues[p] + pm.pqLk.Unlock() + + if ok { + pq.ResponseReceived(ks) + } +} + +// BroadcastWantHaves broadcasts want-haves to all peers (used by the session +// to discover seeds). +// For each peer it filters out want-haves that have previously been sent to +// the peer. +func (pm *PeerManager) BroadcastWantHaves(ctx context.Context, wantHaves []cid.Cid) { + pm.pqLk.Lock() + defer pm.pqLk.Unlock() + + pm.pwm.broadcastWantHaves(wantHaves) +} + +// SendWants sends the given want-blocks and want-haves to the given peer. +// It filters out wants that have previously been sent to the peer. +func (pm *PeerManager) SendWants(ctx context.Context, p peer.ID, wantBlocks []cid.Cid, wantHaves []cid.Cid) { + pm.pqLk.Lock() + defer pm.pqLk.Unlock() + + if _, ok := pm.peerQueues[p]; ok { + pm.pwm.sendWants(p, wantBlocks, wantHaves) + } +} + +// SendCancels sends cancels for the given keys to all peers who had previously +// received a want for those keys. +func (pm *PeerManager) SendCancels(ctx context.Context, cancelKs []cid.Cid) { + pm.pqLk.Lock() + defer pm.pqLk.Unlock() + + // Send a CANCEL to each peer that has been sent a want-block or want-have + pm.pwm.sendCancels(cancelKs) +} + +// CurrentWants returns the list of pending wants (both want-haves and want-blocks). +func (pm *PeerManager) CurrentWants() []cid.Cid { + pm.pqLk.RLock() + defer pm.pqLk.RUnlock() + + return pm.pwm.getWants() +} + +// CurrentWantBlocks returns the list of pending want-blocks +func (pm *PeerManager) CurrentWantBlocks() []cid.Cid { + pm.pqLk.RLock() + defer pm.pqLk.RUnlock() + + return pm.pwm.getWantBlocks() +} + +// CurrentWantHaves returns the list of pending want-haves +func (pm *PeerManager) CurrentWantHaves() []cid.Cid { + pm.pqLk.RLock() + defer pm.pqLk.RUnlock() + + return pm.pwm.getWantHaves() +} + +func (pm *PeerManager) getOrCreate(p peer.ID) PeerQueue { + pq, ok := pm.peerQueues[p] + if !ok { + pq = pm.createPeerQueue(pm.ctx, p) + pq.Startup() + pm.peerQueues[p] = pq + } + return pq +} + +// RegisterSession tells the PeerManager that the given session is interested +// in events about the given peer. +func (pm *PeerManager) RegisterSession(p peer.ID, s Session) { + pm.psLk.Lock() + defer pm.psLk.Unlock() + + if _, ok := pm.sessions[s.ID()]; !ok { + pm.sessions[s.ID()] = s + } + + if _, ok := pm.peerSessions[p]; !ok { + pm.peerSessions[p] = make(map[uint64]struct{}) + } + pm.peerSessions[p][s.ID()] = struct{}{} +} + +// UnregisterSession tells the PeerManager that the given session is no longer +// interested in PeerManager events. +func (pm *PeerManager) UnregisterSession(ses uint64) { + pm.psLk.Lock() + defer pm.psLk.Unlock() + + for p := range pm.peerSessions { + delete(pm.peerSessions[p], ses) + if len(pm.peerSessions[p]) == 0 { + delete(pm.peerSessions, p) + } + } + + delete(pm.sessions, ses) +} + +// signalAvailability is called when a peer's connectivity changes. +// It informs interested sessions. +func (pm *PeerManager) signalAvailability(p peer.ID, isConnected bool) { + pm.psLk.Lock() + defer pm.psLk.Unlock() + + sesIds, ok := pm.peerSessions[p] + if !ok { + return + } + for sesId := range sesIds { + if s, ok := pm.sessions[sesId]; ok { + s.SignalAvailability(p, isConnected) + } + } +} diff --git a/bitswap/client/internal/peermanager/peermanager_test.go b/bitswap/client/internal/peermanager/peermanager_test.go new file mode 100644 index 0000000000..40e1f072cc --- /dev/null +++ b/bitswap/client/internal/peermanager/peermanager_test.go @@ -0,0 +1,391 @@ +package peermanager + +import ( + "context" + "math/rand" + "testing" + "time" + + "github.com/ipfs/boxo/bitswap/internal/testutil" + "github.com/ipfs/boxo/internal/test" + cid "github.com/ipfs/go-cid" + "github.com/libp2p/go-libp2p/core/peer" +) + +type msg struct { + p peer.ID + wantBlocks []cid.Cid + wantHaves []cid.Cid + cancels []cid.Cid +} + +type mockPeerQueue struct { + p peer.ID + msgs chan msg +} + +func (fp *mockPeerQueue) Startup() {} +func (fp *mockPeerQueue) Shutdown() {} + +func (fp *mockPeerQueue) AddBroadcastWantHaves(whs []cid.Cid) { + fp.msgs <- msg{fp.p, nil, whs, nil} +} +func (fp *mockPeerQueue) AddWants(wbs []cid.Cid, whs []cid.Cid) { + fp.msgs <- msg{fp.p, wbs, whs, nil} +} +func (fp *mockPeerQueue) AddCancels(cs []cid.Cid) { + fp.msgs <- msg{fp.p, nil, nil, cs} +} +func (fp *mockPeerQueue) ResponseReceived(ks []cid.Cid) { +} + +type peerWants struct { + wantHaves []cid.Cid + wantBlocks []cid.Cid + cancels []cid.Cid +} + +func collectMessages(ch chan msg, timeout time.Duration) map[peer.ID]peerWants { + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + + collected := make(map[peer.ID]peerWants) + for { + select { + case m := <-ch: + pw, ok := collected[m.p] + if !ok { + pw = peerWants{} + } + pw.wantHaves = append(pw.wantHaves, m.wantHaves...) + pw.wantBlocks = append(pw.wantBlocks, m.wantBlocks...) + pw.cancels = append(pw.cancels, m.cancels...) + collected[m.p] = pw + case <-ctx.Done(): + return collected + } + } +} + +func makePeerQueueFactory(msgs chan msg) PeerQueueFactory { + return func(ctx context.Context, p peer.ID) PeerQueue { + return &mockPeerQueue{ + p: p, + msgs: msgs, + } + } +} + +func TestAddingAndRemovingPeers(t *testing.T) { + test.Flaky(t) + + ctx := context.Background() + msgs := make(chan msg, 16) + peerQueueFactory := makePeerQueueFactory(msgs) + + tp := testutil.GeneratePeers(6) + self, peer1, peer2, peer3, peer4, peer5 := tp[0], tp[1], tp[2], tp[3], tp[4], tp[5] + peerManager := New(ctx, peerQueueFactory, self) + + peerManager.Connected(peer1) + peerManager.Connected(peer2) + peerManager.Connected(peer3) + + connectedPeers := peerManager.ConnectedPeers() + + if !testutil.ContainsPeer(connectedPeers, peer1) || + !testutil.ContainsPeer(connectedPeers, peer2) || + !testutil.ContainsPeer(connectedPeers, peer3) { + t.Fatal("Peers not connected that should be connected") + } + + if testutil.ContainsPeer(connectedPeers, peer4) || + testutil.ContainsPeer(connectedPeers, peer5) { + t.Fatal("Peers connected that shouldn't be connected") + } + + // disconnect a peer + peerManager.Disconnected(peer1) + connectedPeers = peerManager.ConnectedPeers() + + if testutil.ContainsPeer(connectedPeers, peer1) { + t.Fatal("Peer should have been disconnected but was not") + } + + // reconnect peer + peerManager.Connected(peer1) + connectedPeers = peerManager.ConnectedPeers() + + if !testutil.ContainsPeer(connectedPeers, peer1) { + t.Fatal("Peer should have been connected but was not") + } +} + +func TestBroadcastOnConnect(t *testing.T) { + test.Flaky(t) + + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + msgs := make(chan msg, 16) + peerQueueFactory := makePeerQueueFactory(msgs) + tp := testutil.GeneratePeers(2) + self, peer1 := tp[0], tp[1] + peerManager := New(ctx, peerQueueFactory, self) + + cids := testutil.GenerateCids(2) + peerManager.BroadcastWantHaves(ctx, cids) + + // Connect with two broadcast wants for first peer + peerManager.Connected(peer1) + collected := collectMessages(msgs, 2*time.Millisecond) + + if len(collected[peer1].wantHaves) != 2 { + t.Fatal("Expected want-haves to be sent to newly connected peer") + } +} + +func TestBroadcastWantHaves(t *testing.T) { + test.Flaky(t) + + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + msgs := make(chan msg, 16) + peerQueueFactory := makePeerQueueFactory(msgs) + tp := testutil.GeneratePeers(3) + self, peer1, peer2 := tp[0], tp[1], tp[2] + peerManager := New(ctx, peerQueueFactory, self) + + cids := testutil.GenerateCids(3) + + // Broadcast the first two. + peerManager.BroadcastWantHaves(ctx, cids[:2]) + + // First peer should get them. + peerManager.Connected(peer1) + collected := collectMessages(msgs, 2*time.Millisecond) + + if len(collected[peer1].wantHaves) != 2 { + t.Fatal("Expected want-haves to be sent to newly connected peer") + } + + // Connect to second peer + peerManager.Connected(peer2) + + // Send a broadcast to all peers, including cid that was already sent to + // first peer + peerManager.BroadcastWantHaves(ctx, []cid.Cid{cids[0], cids[2]}) + collected = collectMessages(msgs, 2*time.Millisecond) + + // One of the want-haves was already sent to peer1 + if len(collected[peer1].wantHaves) != 1 { + t.Fatalf("Expected 1 want-haves to be sent to first peer, got %d", + len(collected[peer1].wantHaves)) + } + if len(collected[peer2].wantHaves) != 3 { + t.Fatalf("Expected 3 want-haves to be sent to second peer, got %d", + len(collected[peer2].wantHaves)) + } +} + +func TestSendWants(t *testing.T) { + test.Flaky(t) + + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + msgs := make(chan msg, 16) + peerQueueFactory := makePeerQueueFactory(msgs) + tp := testutil.GeneratePeers(2) + self, peer1 := tp[0], tp[1] + peerManager := New(ctx, peerQueueFactory, self) + cids := testutil.GenerateCids(4) + + peerManager.Connected(peer1) + peerManager.SendWants(ctx, peer1, []cid.Cid{cids[0]}, []cid.Cid{cids[2]}) + collected := collectMessages(msgs, 2*time.Millisecond) + + if len(collected[peer1].wantHaves) != 1 { + t.Fatal("Expected want-have to be sent to peer") + } + if len(collected[peer1].wantBlocks) != 1 { + t.Fatal("Expected want-block to be sent to peer") + } + + peerManager.SendWants(ctx, peer1, []cid.Cid{cids[0], cids[1]}, []cid.Cid{cids[2], cids[3]}) + collected = collectMessages(msgs, 2*time.Millisecond) + + // First want-have and want-block should be filtered (because they were + // already sent) + if len(collected[peer1].wantHaves) != 1 { + t.Fatal("Expected want-have to be sent to peer") + } + if len(collected[peer1].wantBlocks) != 1 { + t.Fatal("Expected want-block to be sent to peer") + } +} + +func TestSendCancels(t *testing.T) { + test.Flaky(t) + + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + msgs := make(chan msg, 16) + peerQueueFactory := makePeerQueueFactory(msgs) + tp := testutil.GeneratePeers(3) + self, peer1, peer2 := tp[0], tp[1], tp[2] + peerManager := New(ctx, peerQueueFactory, self) + cids := testutil.GenerateCids(4) + + // Connect to peer1 and peer2 + peerManager.Connected(peer1) + peerManager.Connected(peer2) + + // Send 2 want-blocks and 1 want-have to peer1 + peerManager.SendWants(ctx, peer1, []cid.Cid{cids[0], cids[1]}, []cid.Cid{cids[2]}) + + // Clear messages + collectMessages(msgs, 2*time.Millisecond) + + // Send cancels for 1 want-block and 1 want-have + peerManager.SendCancels(ctx, []cid.Cid{cids[0], cids[2]}) + collected := collectMessages(msgs, 2*time.Millisecond) + + if _, ok := collected[peer2]; ok { + t.Fatal("Expected no cancels to be sent to peer that was not sent messages") + } + if len(collected[peer1].cancels) != 2 { + t.Fatal("Expected cancel to be sent for want-block and want-have sent to peer") + } + + // Send cancels for all cids + peerManager.SendCancels(ctx, cids) + collected = collectMessages(msgs, 2*time.Millisecond) + + if _, ok := collected[peer2]; ok { + t.Fatal("Expected no cancels to be sent to peer that was not sent messages") + } + if len(collected[peer1].cancels) != 1 { + t.Fatal("Expected cancel to be sent for remaining want-block") + } +} + +func (s *sess) ID() uint64 { + return s.id +} +func (s *sess) SignalAvailability(p peer.ID, isAvailable bool) { + s.available[p] = isAvailable +} + +type sess struct { + id uint64 + available map[peer.ID]bool +} + +func newSess(id uint64) *sess { + return &sess{id, make(map[peer.ID]bool)} +} + +func TestSessionRegistration(t *testing.T) { + test.Flaky(t) + + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + msgs := make(chan msg, 16) + peerQueueFactory := makePeerQueueFactory(msgs) + + tp := testutil.GeneratePeers(3) + self, p1, p2 := tp[0], tp[1], tp[2] + peerManager := New(ctx, peerQueueFactory, self) + + id := uint64(1) + s := newSess(id) + peerManager.RegisterSession(p1, s) + if s.available[p1] { + t.Fatal("Expected peer not be available till connected") + } + peerManager.RegisterSession(p2, s) + if s.available[p2] { + t.Fatal("Expected peer not be available till connected") + } + + peerManager.Connected(p1) + if !s.available[p1] { + t.Fatal("Expected signal callback") + } + peerManager.Connected(p2) + if !s.available[p2] { + t.Fatal("Expected signal callback") + } + + peerManager.Disconnected(p1) + if s.available[p1] { + t.Fatal("Expected signal callback") + } + if !s.available[p2] { + t.Fatal("Expected signal callback only for disconnected peer") + } + + peerManager.UnregisterSession(id) + + peerManager.Connected(p1) + if s.available[p1] { + t.Fatal("Expected no signal callback (session unregistered)") + } +} + +type benchPeerQueue struct { +} + +func (*benchPeerQueue) Startup() {} +func (*benchPeerQueue) Shutdown() {} + +func (*benchPeerQueue) AddBroadcastWantHaves(whs []cid.Cid) {} +func (*benchPeerQueue) AddWants(wbs []cid.Cid, whs []cid.Cid) {} +func (*benchPeerQueue) AddCancels(cs []cid.Cid) {} +func (*benchPeerQueue) ResponseReceived(ks []cid.Cid) {} + +// Simplistic benchmark to allow us to stress test +func BenchmarkPeerManager(b *testing.B) { + b.StopTimer() + + ctx := context.Background() + + peerQueueFactory := func(ctx context.Context, p peer.ID) PeerQueue { + return &benchPeerQueue{} + } + + self := testutil.GeneratePeers(1)[0] + peers := testutil.GeneratePeers(500) + peerManager := New(ctx, peerQueueFactory, self) + + // Create a bunch of connections + connected := 0 + for i := 0; i < len(peers); i++ { + peerManager.Connected(peers[i]) + connected++ + } + + var wanted []cid.Cid + + b.StartTimer() + for n := 0; n < b.N; n++ { + // Pick a random peer + i := rand.Intn(connected) + + // Alternately add either a few wants or many broadcast wants + r := rand.Intn(8) + if r == 0 { + wants := testutil.GenerateCids(10) + peerManager.SendWants(ctx, peers[i], wants[:2], wants[2:]) + wanted = append(wanted, wants...) + } else if r == 1 { + wants := testutil.GenerateCids(30) + peerManager.BroadcastWantHaves(ctx, wants) + wanted = append(wanted, wants...) + } else { + limit := len(wanted) / 10 + cancel := wanted[:limit] + wanted = wanted[limit:] + peerManager.SendCancels(ctx, cancel) + } + } +} diff --git a/bitswap/client/internal/peermanager/peerwantmanager.go b/bitswap/client/internal/peermanager/peerwantmanager.go new file mode 100644 index 0000000000..0bc4732ca5 --- /dev/null +++ b/bitswap/client/internal/peermanager/peerwantmanager.go @@ -0,0 +1,464 @@ +package peermanager + +import ( + "bytes" + "fmt" + + cid "github.com/ipfs/go-cid" + peer "github.com/libp2p/go-libp2p/core/peer" +) + +// Gauge can be used to keep track of a metric that increases and decreases +// incrementally. It is used by the peerWantManager to track the number of +// want-blocks that are active (ie sent but no response received) +type Gauge interface { + Inc() + Dec() +} + +// peerWantManager keeps track of which want-haves and want-blocks have been +// sent to each peer, so that the PeerManager doesn't send duplicates. +type peerWantManager struct { + // peerWants maps peers to outstanding wants. + // A peer's wants is the _union_ of the broadcast wants and the wants in + // this list. + peerWants map[peer.ID]*peerWant + + // Reverse index of all wants in peerWants. + wantPeers map[cid.Cid]map[peer.ID]struct{} + + // broadcastWants tracks all the current broadcast wants. + broadcastWants *cid.Set + + // Keeps track of the number of active want-haves & want-blocks + wantGauge Gauge + // Keeps track of the number of active want-blocks + wantBlockGauge Gauge +} + +type peerWant struct { + wantBlocks *cid.Set + wantHaves *cid.Set + peerQueue PeerQueue +} + +// New creates a new peerWantManager with a Gauge that keeps track of the +// number of active want-blocks (ie sent but no response received) +func newPeerWantManager(wantGauge Gauge, wantBlockGauge Gauge) *peerWantManager { + return &peerWantManager{ + broadcastWants: cid.NewSet(), + peerWants: make(map[peer.ID]*peerWant), + wantPeers: make(map[cid.Cid]map[peer.ID]struct{}), + wantGauge: wantGauge, + wantBlockGauge: wantBlockGauge, + } +} + +// addPeer adds a peer whose wants we need to keep track of. It sends the +// current list of broadcast wants to the peer. +func (pwm *peerWantManager) addPeer(peerQueue PeerQueue, p peer.ID) { + if _, ok := pwm.peerWants[p]; ok { + return + } + + pwm.peerWants[p] = &peerWant{ + wantBlocks: cid.NewSet(), + wantHaves: cid.NewSet(), + peerQueue: peerQueue, + } + + // Broadcast any live want-haves to the newly connected peer + if pwm.broadcastWants.Len() > 0 { + wants := pwm.broadcastWants.Keys() + peerQueue.AddBroadcastWantHaves(wants) + } +} + +// RemovePeer removes a peer and its associated wants from tracking +func (pwm *peerWantManager) removePeer(p peer.ID) { + pws, ok := pwm.peerWants[p] + if !ok { + return + } + + // Clean up want-blocks + _ = pws.wantBlocks.ForEach(func(c cid.Cid) error { + // Clean up want-blocks from the reverse index + pwm.reverseIndexRemove(c, p) + + // Decrement the gauges by the number of pending want-blocks to the peer + peerCounts := pwm.wantPeerCounts(c) + if peerCounts.wantBlock == 0 { + pwm.wantBlockGauge.Dec() + } + if !peerCounts.wanted() { + pwm.wantGauge.Dec() + } + + return nil + }) + + // Clean up want-haves + _ = pws.wantHaves.ForEach(func(c cid.Cid) error { + // Clean up want-haves from the reverse index + pwm.reverseIndexRemove(c, p) + + // Decrement the gauge by the number of pending want-haves to the peer + peerCounts := pwm.wantPeerCounts(c) + if !peerCounts.wanted() { + pwm.wantGauge.Dec() + } + return nil + }) + + delete(pwm.peerWants, p) +} + +// broadcastWantHaves sends want-haves to any peers that have not yet been sent them. +func (pwm *peerWantManager) broadcastWantHaves(wantHaves []cid.Cid) { + unsent := make([]cid.Cid, 0, len(wantHaves)) + for _, c := range wantHaves { + if pwm.broadcastWants.Has(c) { + // Already a broadcast want, skip it. + continue + } + pwm.broadcastWants.Add(c) + unsent = append(unsent, c) + + // If no peer has a pending want for the key + if _, ok := pwm.wantPeers[c]; !ok { + // Increment the total wants gauge + pwm.wantGauge.Inc() + } + } + + if len(unsent) == 0 { + return + } + + // Allocate a single buffer to filter broadcast wants for each peer + bcstWantsBuffer := make([]cid.Cid, 0, len(unsent)) + + // Send broadcast wants to each peer + for _, pws := range pwm.peerWants { + peerUnsent := bcstWantsBuffer[:0] + for _, c := range unsent { + // If we've already sent a want to this peer, skip them. + if !pws.wantBlocks.Has(c) && !pws.wantHaves.Has(c) { + peerUnsent = append(peerUnsent, c) + } + } + + if len(peerUnsent) > 0 { + pws.peerQueue.AddBroadcastWantHaves(peerUnsent) + } + } +} + +// sendWants only sends the peer the want-blocks and want-haves that have not +// already been sent to it. +func (pwm *peerWantManager) sendWants(p peer.ID, wantBlocks []cid.Cid, wantHaves []cid.Cid) { + fltWantBlks := make([]cid.Cid, 0, len(wantBlocks)) + fltWantHvs := make([]cid.Cid, 0, len(wantHaves)) + + // Get the existing want-blocks and want-haves for the peer + pws, ok := pwm.peerWants[p] + if !ok { + // In practice this should never happen + log.Errorf("sendWants() called with peer %s but peer not found in peerWantManager", string(p)) + return + } + + // Iterate over the requested want-blocks + for _, c := range wantBlocks { + // If the want-block hasn't been sent to the peer + if pws.wantBlocks.Has(c) { + continue + } + + // Increment the want gauges + peerCounts := pwm.wantPeerCounts(c) + if peerCounts.wantBlock == 0 { + pwm.wantBlockGauge.Inc() + } + if !peerCounts.wanted() { + pwm.wantGauge.Inc() + } + + // Make sure the CID is no longer recorded as a want-have + pws.wantHaves.Remove(c) + + // Record that the CID was sent as a want-block + pws.wantBlocks.Add(c) + + // Add the CID to the results + fltWantBlks = append(fltWantBlks, c) + + // Update the reverse index + pwm.reverseIndexAdd(c, p) + } + + // Iterate over the requested want-haves + for _, c := range wantHaves { + // If we've already broadcasted this want, don't bother with a + // want-have. + if pwm.broadcastWants.Has(c) { + continue + } + + // If the CID has not been sent as a want-block or want-have + if !pws.wantBlocks.Has(c) && !pws.wantHaves.Has(c) { + // Increment the total wants gauge + peerCounts := pwm.wantPeerCounts(c) + if !peerCounts.wanted() { + pwm.wantGauge.Inc() + } + + // Record that the CID was sent as a want-have + pws.wantHaves.Add(c) + + // Add the CID to the results + fltWantHvs = append(fltWantHvs, c) + + // Update the reverse index + pwm.reverseIndexAdd(c, p) + } + } + + // Send the want-blocks and want-haves to the peer + pws.peerQueue.AddWants(fltWantBlks, fltWantHvs) +} + +// sendCancels sends a cancel to each peer to which a corresponding want was +// sent +func (pwm *peerWantManager) sendCancels(cancelKs []cid.Cid) { + if len(cancelKs) == 0 { + return + } + + // Record how many peers have a pending want-block and want-have for each + // key to be cancelled + peerCounts := make(map[cid.Cid]wantPeerCnts, len(cancelKs)) + for _, c := range cancelKs { + peerCounts[c] = pwm.wantPeerCounts(c) + } + + // Create a buffer to use for filtering cancels per peer, with the + // broadcast wants at the front of the buffer (broadcast wants are sent to + // all peers) + broadcastCancels := make([]cid.Cid, 0, len(cancelKs)) + for _, c := range cancelKs { + if pwm.broadcastWants.Has(c) { + broadcastCancels = append(broadcastCancels, c) + } + } + + // Send cancels to a particular peer + send := func(p peer.ID, pws *peerWant) { + // Start from the broadcast cancels + toCancel := broadcastCancels + + // For each key to be cancelled + for _, c := range cancelKs { + // Check if a want was sent for the key + if !pws.wantBlocks.Has(c) && !pws.wantHaves.Has(c) { + continue + } + + // Unconditionally remove from the want lists. + pws.wantBlocks.Remove(c) + pws.wantHaves.Remove(c) + + // If it's a broadcast want, we've already added it to + // the peer cancels. + if !pwm.broadcastWants.Has(c) { + toCancel = append(toCancel, c) + } + } + + // Send cancels to the peer + if len(toCancel) > 0 { + pws.peerQueue.AddCancels(toCancel) + } + } + + if len(broadcastCancels) > 0 { + // If a broadcast want is being cancelled, send the cancel to all + // peers + for p, pws := range pwm.peerWants { + send(p, pws) + } + } else { + // Only send cancels to peers that received a corresponding want + cancelPeers := make(map[peer.ID]struct{}, len(pwm.wantPeers[cancelKs[0]])) + for _, c := range cancelKs { + for p := range pwm.wantPeers[c] { + cancelPeers[p] = struct{}{} + } + } + for p := range cancelPeers { + pws, ok := pwm.peerWants[p] + if !ok { + // Should never happen but check just in case + log.Errorf("sendCancels - peerWantManager index missing peer %s", p) + continue + } + + send(p, pws) + } + } + + // Decrement the wants gauges + for _, c := range cancelKs { + peerCnts := peerCounts[c] + + // If there were any peers that had a pending want-block for the key + if peerCnts.wantBlock > 0 { + // Decrement the want-block gauge + pwm.wantBlockGauge.Dec() + } + + // If there was a peer that had a pending want or it was a broadcast want + if peerCnts.wanted() { + // Decrement the total wants gauge + pwm.wantGauge.Dec() + } + } + + // Remove cancelled broadcast wants + for _, c := range broadcastCancels { + pwm.broadcastWants.Remove(c) + } + + // Batch-remove the reverse-index. There's no need to clear this index + // peer-by-peer. + for _, c := range cancelKs { + delete(pwm.wantPeers, c) + } +} + +// wantPeerCnts stores the number of peers that have pending wants for a CID +type wantPeerCnts struct { + // number of peers that have a pending want-block for the CID + wantBlock int + // number of peers that have a pending want-have for the CID + wantHave int + // whether the CID is a broadcast want + isBroadcast bool +} + +// wanted returns true if any peer wants the CID or it's a broadcast want +func (pwm *wantPeerCnts) wanted() bool { + return pwm.wantBlock > 0 || pwm.wantHave > 0 || pwm.isBroadcast +} + +// wantPeerCounts counts how many peers have a pending want-block and want-have +// for the given CID +func (pwm *peerWantManager) wantPeerCounts(c cid.Cid) wantPeerCnts { + blockCount := 0 + haveCount := 0 + for p := range pwm.wantPeers[c] { + pws, ok := pwm.peerWants[p] + if !ok { + log.Errorf("reverse index has extra peer %s for key %s in peerWantManager", string(p), c) + continue + } + + if pws.wantBlocks.Has(c) { + blockCount++ + } else if pws.wantHaves.Has(c) { + haveCount++ + } + } + + return wantPeerCnts{blockCount, haveCount, pwm.broadcastWants.Has(c)} +} + +// Add the peer to the list of peers that have sent a want with the cid +func (pwm *peerWantManager) reverseIndexAdd(c cid.Cid, p peer.ID) bool { + peers, ok := pwm.wantPeers[c] + if !ok { + peers = make(map[peer.ID]struct{}, 10) + pwm.wantPeers[c] = peers + } + peers[p] = struct{}{} + return !ok +} + +// Remove the peer from the list of peers that have sent a want with the cid +func (pwm *peerWantManager) reverseIndexRemove(c cid.Cid, p peer.ID) { + if peers, ok := pwm.wantPeers[c]; ok { + delete(peers, p) + if len(peers) == 0 { + delete(pwm.wantPeers, c) + } + } +} + +// GetWantBlocks returns the set of all want-blocks sent to all peers +func (pwm *peerWantManager) getWantBlocks() []cid.Cid { + res := cid.NewSet() + + // Iterate over all known peers + for _, pws := range pwm.peerWants { + // Iterate over all want-blocks + _ = pws.wantBlocks.ForEach(func(c cid.Cid) error { + // Add the CID to the results + res.Add(c) + return nil + }) + } + + return res.Keys() +} + +// GetWantHaves returns the set of all want-haves sent to all peers +func (pwm *peerWantManager) getWantHaves() []cid.Cid { + res := cid.NewSet() + + // Iterate over all peers with active wants. + for _, pws := range pwm.peerWants { + // Iterate over all want-haves + _ = pws.wantHaves.ForEach(func(c cid.Cid) error { + // Add the CID to the results + res.Add(c) + return nil + }) + } + _ = pwm.broadcastWants.ForEach(func(c cid.Cid) error { + res.Add(c) + return nil + }) + + return res.Keys() +} + +// GetWants returns the set of all wants (both want-blocks and want-haves). +func (pwm *peerWantManager) getWants() []cid.Cid { + res := pwm.broadcastWants.Keys() + + // Iterate over all targeted wants, removing ones that are also in the + // broadcast list. + for c := range pwm.wantPeers { + if pwm.broadcastWants.Has(c) { + continue + } + res = append(res, c) + } + + return res +} + +func (pwm *peerWantManager) String() string { + var b bytes.Buffer + for p, ws := range pwm.peerWants { + b.WriteString(fmt.Sprintf("Peer %s: %d want-have / %d want-block:\n", p, ws.wantHaves.Len(), ws.wantBlocks.Len())) + for _, c := range ws.wantHaves.Keys() { + b.WriteString(fmt.Sprintf(" want-have %s\n", c)) + } + for _, c := range ws.wantBlocks.Keys() { + b.WriteString(fmt.Sprintf(" want-block %s\n", c)) + } + } + return b.String() +} diff --git a/bitswap/client/internal/peermanager/peerwantmanager_test.go b/bitswap/client/internal/peermanager/peerwantmanager_test.go new file mode 100644 index 0000000000..618217d6b2 --- /dev/null +++ b/bitswap/client/internal/peermanager/peerwantmanager_test.go @@ -0,0 +1,531 @@ +package peermanager + +import ( + "testing" + + "github.com/ipfs/boxo/bitswap/internal/testutil" + "github.com/ipfs/boxo/internal/test" + cid "github.com/ipfs/go-cid" + peer "github.com/libp2p/go-libp2p/core/peer" +) + +type gauge struct { + count int +} + +func (g *gauge) Inc() { + g.count++ +} +func (g *gauge) Dec() { + g.count-- +} + +type mockPQ struct { + bcst []cid.Cid + wbs []cid.Cid + whs []cid.Cid + cancels []cid.Cid +} + +func (mpq *mockPQ) clear() { + mpq.bcst = nil + mpq.wbs = nil + mpq.whs = nil + mpq.cancels = nil +} + +func (mpq *mockPQ) Startup() {} +func (mpq *mockPQ) Shutdown() {} + +func (mpq *mockPQ) AddBroadcastWantHaves(whs []cid.Cid) { + mpq.bcst = append(mpq.bcst, whs...) +} +func (mpq *mockPQ) AddWants(wbs []cid.Cid, whs []cid.Cid) { + mpq.wbs = append(mpq.wbs, wbs...) + mpq.whs = append(mpq.whs, whs...) +} +func (mpq *mockPQ) AddCancels(cs []cid.Cid) { + mpq.cancels = append(mpq.cancels, cs...) +} +func (mpq *mockPQ) ResponseReceived(ks []cid.Cid) { +} + +func clearSent(pqs map[peer.ID]PeerQueue) { + for _, pqi := range pqs { + pqi.(*mockPQ).clear() + } +} + +func TestEmpty(t *testing.T) { + test.Flaky(t) + + pwm := newPeerWantManager(&gauge{}, &gauge{}) + + if len(pwm.getWantBlocks()) > 0 { + t.Fatal("Expected GetWantBlocks() to have length 0") + } + if len(pwm.getWantHaves()) > 0 { + t.Fatal("Expected GetWantHaves() to have length 0") + } +} + +func TestPWMBroadcastWantHaves(t *testing.T) { + test.Flaky(t) + + pwm := newPeerWantManager(&gauge{}, &gauge{}) + + peers := testutil.GeneratePeers(3) + cids := testutil.GenerateCids(2) + cids2 := testutil.GenerateCids(2) + cids3 := testutil.GenerateCids(2) + + peerQueues := make(map[peer.ID]PeerQueue) + for _, p := range peers[:2] { + pq := &mockPQ{} + peerQueues[p] = pq + pwm.addPeer(pq, p) + if len(pq.bcst) > 0 { + t.Errorf("expected no broadcast wants") + } + } + + // Broadcast 2 cids to 2 peers + pwm.broadcastWantHaves(cids) + for _, pqi := range peerQueues { + pq := pqi.(*mockPQ) + if len(pq.bcst) != 2 { + t.Fatal("Expected 2 want-haves") + } + if !testutil.MatchKeysIgnoreOrder(pq.bcst, cids) { + t.Fatal("Expected all cids to be broadcast") + } + } + + // Broadcasting same cids should have no effect + clearSent(peerQueues) + pwm.broadcastWantHaves(cids) + for _, pqi := range peerQueues { + pq := pqi.(*mockPQ) + if len(pq.bcst) != 0 { + t.Fatal("Expected 0 want-haves") + } + } + + // Broadcast 2 other cids + clearSent(peerQueues) + pwm.broadcastWantHaves(cids2) + for _, pqi := range peerQueues { + pq := pqi.(*mockPQ) + if len(pq.bcst) != 2 { + t.Fatal("Expected 2 want-haves") + } + if !testutil.MatchKeysIgnoreOrder(pq.bcst, cids2) { + t.Fatal("Expected all new cids to be broadcast") + } + } + + // Broadcast mix of old and new cids + clearSent(peerQueues) + pwm.broadcastWantHaves(append(cids, cids3...)) + for _, pqi := range peerQueues { + pq := pqi.(*mockPQ) + if len(pq.bcst) != 2 { + t.Fatal("Expected 2 want-haves") + } + // Only new cids should be broadcast + if !testutil.MatchKeysIgnoreOrder(pq.bcst, cids3) { + t.Fatal("Expected all new cids to be broadcast") + } + } + + // Sending want-block for a cid should prevent broadcast to that peer + clearSent(peerQueues) + cids4 := testutil.GenerateCids(4) + wantBlocks := []cid.Cid{cids4[0], cids4[2]} + p0 := peers[0] + p1 := peers[1] + pwm.sendWants(p0, wantBlocks, []cid.Cid{}) + + pwm.broadcastWantHaves(cids4) + pq0 := peerQueues[p0].(*mockPQ) + if len(pq0.bcst) != 2 { // only broadcast 2 / 4 want-haves + t.Fatal("Expected 2 want-haves") + } + if !testutil.MatchKeysIgnoreOrder(pq0.bcst, []cid.Cid{cids4[1], cids4[3]}) { + t.Fatalf("Expected unsent cids to be broadcast") + } + pq1 := peerQueues[p1].(*mockPQ) + if len(pq1.bcst) != 4 { // broadcast all 4 want-haves + t.Fatal("Expected 4 want-haves") + } + if !testutil.MatchKeysIgnoreOrder(pq1.bcst, cids4) { + t.Fatal("Expected all cids to be broadcast") + } + + allCids := cids + allCids = append(allCids, cids2...) + allCids = append(allCids, cids3...) + allCids = append(allCids, cids4...) + + // Add another peer + peer2 := peers[2] + pq2 := &mockPQ{} + peerQueues[peer2] = pq2 + pwm.addPeer(pq2, peer2) + if !testutil.MatchKeysIgnoreOrder(pq2.bcst, allCids) { + t.Fatalf("Expected all cids to be broadcast.") + } + + clearSent(peerQueues) + pwm.broadcastWantHaves(allCids) + if len(pq2.bcst) != 0 { + t.Errorf("did not expect to have CIDs to broadcast") + } +} + +func TestPWMSendWants(t *testing.T) { + test.Flaky(t) + + pwm := newPeerWantManager(&gauge{}, &gauge{}) + + peers := testutil.GeneratePeers(2) + p0 := peers[0] + p1 := peers[1] + cids := testutil.GenerateCids(2) + cids2 := testutil.GenerateCids(2) + + peerQueues := make(map[peer.ID]PeerQueue) + for _, p := range peers[:2] { + pq := &mockPQ{} + peerQueues[p] = pq + pwm.addPeer(pq, p) + } + pq0 := peerQueues[p0].(*mockPQ) + pq1 := peerQueues[p1].(*mockPQ) + + // Send 2 want-blocks and 2 want-haves to p0 + clearSent(peerQueues) + pwm.sendWants(p0, cids, cids2) + if !testutil.MatchKeysIgnoreOrder(pq0.wbs, cids) { + t.Fatal("Expected 2 want-blocks") + } + if !testutil.MatchKeysIgnoreOrder(pq0.whs, cids2) { + t.Fatal("Expected 2 want-haves") + } + + // Send to p0 + // - 1 old want-block and 2 new want-blocks + // - 1 old want-have and 2 new want-haves + clearSent(peerQueues) + cids3 := testutil.GenerateCids(2) + cids4 := testutil.GenerateCids(2) + pwm.sendWants(p0, append(cids3, cids[0]), append(cids4, cids2[0])) + if !testutil.MatchKeysIgnoreOrder(pq0.wbs, cids3) { + t.Fatal("Expected 2 want-blocks") + } + if !testutil.MatchKeysIgnoreOrder(pq0.whs, cids4) { + t.Fatal("Expected 2 want-haves") + } + + // Send to p0 as want-blocks: 1 new want-block, 1 old want-have + clearSent(peerQueues) + cids5 := testutil.GenerateCids(1) + newWantBlockOldWantHave := append(cids5, cids2[0]) + pwm.sendWants(p0, newWantBlockOldWantHave, []cid.Cid{}) + // If a want was sent as a want-have, it should be ok to now send it as a + // want-block + if !testutil.MatchKeysIgnoreOrder(pq0.wbs, newWantBlockOldWantHave) { + t.Fatal("Expected 2 want-blocks") + } + if len(pq0.whs) != 0 { + t.Fatal("Expected 0 want-haves") + } + + // Send to p0 as want-haves: 1 new want-have, 1 old want-block + clearSent(peerQueues) + cids6 := testutil.GenerateCids(1) + newWantHaveOldWantBlock := append(cids6, cids[0]) + pwm.sendWants(p0, []cid.Cid{}, newWantHaveOldWantBlock) + // If a want was previously sent as a want-block, it should not be + // possible to now send it as a want-have + if !testutil.MatchKeysIgnoreOrder(pq0.whs, cids6) { + t.Fatal("Expected 1 want-have") + } + if len(pq0.wbs) != 0 { + t.Fatal("Expected 0 want-blocks") + } + + // Send 2 want-blocks and 2 want-haves to p1 + pwm.sendWants(p1, cids, cids2) + if !testutil.MatchKeysIgnoreOrder(pq1.wbs, cids) { + t.Fatal("Expected 2 want-blocks") + } + if !testutil.MatchKeysIgnoreOrder(pq1.whs, cids2) { + t.Fatal("Expected 2 want-haves") + } +} + +func TestPWMSendCancels(t *testing.T) { + test.Flaky(t) + + pwm := newPeerWantManager(&gauge{}, &gauge{}) + + peers := testutil.GeneratePeers(2) + p0 := peers[0] + p1 := peers[1] + wb1 := testutil.GenerateCids(2) + wh1 := testutil.GenerateCids(2) + wb2 := testutil.GenerateCids(2) + wh2 := testutil.GenerateCids(2) + allwb := append(wb1, wb2...) + allwh := append(wh1, wh2...) + + peerQueues := make(map[peer.ID]PeerQueue) + for _, p := range peers[:2] { + pq := &mockPQ{} + peerQueues[p] = pq + pwm.addPeer(pq, p) + } + pq0 := peerQueues[p0].(*mockPQ) + pq1 := peerQueues[p1].(*mockPQ) + + // Send 2 want-blocks and 2 want-haves to p0 + pwm.sendWants(p0, wb1, wh1) + // Send 3 want-blocks and 3 want-haves to p1 + // (1 overlapping want-block / want-have with p0) + pwm.sendWants(p1, append(wb2, wb1[1]), append(wh2, wh1[1])) + + if !testutil.MatchKeysIgnoreOrder(pwm.getWantBlocks(), allwb) { + t.Fatal("Expected 4 cids to be wanted") + } + if !testutil.MatchKeysIgnoreOrder(pwm.getWantHaves(), allwh) { + t.Fatal("Expected 4 cids to be wanted") + } + + // Cancel 1 want-block and 1 want-have that were sent to p0 + clearSent(peerQueues) + pwm.sendCancels([]cid.Cid{wb1[0], wh1[0]}) + // Should cancel the want-block and want-have + if len(pq1.cancels) != 0 { + t.Fatal("Expected no cancels sent to p1") + } + if !testutil.MatchKeysIgnoreOrder(pq0.cancels, []cid.Cid{wb1[0], wh1[0]}) { + t.Fatal("Expected 2 cids to be cancelled") + } + if !testutil.MatchKeysIgnoreOrder(pwm.getWantBlocks(), append(wb2, wb1[1])) { + t.Fatal("Expected 3 want-blocks") + } + if !testutil.MatchKeysIgnoreOrder(pwm.getWantHaves(), append(wh2, wh1[1])) { + t.Fatal("Expected 3 want-haves") + } + + // Cancel everything + clearSent(peerQueues) + allCids := append(allwb, allwh...) + pwm.sendCancels(allCids) + // Should cancel the remaining want-blocks and want-haves for p0 + if !testutil.MatchKeysIgnoreOrder(pq0.cancels, []cid.Cid{wb1[1], wh1[1]}) { + t.Fatal("Expected un-cancelled cids to be cancelled") + } + + // Should cancel the remaining want-blocks and want-haves for p1 + remainingP1 := append(wb2, wh2...) + remainingP1 = append(remainingP1, wb1[1], wh1[1]) + if len(pq1.cancels) != len(remainingP1) { + t.Fatal("mismatch", len(pq1.cancels), len(remainingP1)) + } + if !testutil.MatchKeysIgnoreOrder(pq1.cancels, remainingP1) { + t.Fatal("Expected un-cancelled cids to be cancelled") + } + if len(pwm.getWantBlocks()) != 0 { + t.Fatal("Expected 0 want-blocks") + } + if len(pwm.getWantHaves()) != 0 { + t.Fatal("Expected 0 want-haves") + } +} + +func TestStats(t *testing.T) { + test.Flaky(t) + + g := &gauge{} + wbg := &gauge{} + pwm := newPeerWantManager(g, wbg) + + peers := testutil.GeneratePeers(2) + p0 := peers[0] + p1 := peers[1] + cids := testutil.GenerateCids(2) + cids2 := testutil.GenerateCids(2) + + peerQueues := make(map[peer.ID]PeerQueue) + pq := &mockPQ{} + peerQueues[p0] = pq + pwm.addPeer(pq, p0) + + // Send 2 want-blocks and 2 want-haves to p0 + pwm.sendWants(p0, cids, cids2) + + if g.count != 4 { + t.Fatal("Expected 4 wants") + } + if wbg.count != 2 { + t.Fatal("Expected 2 want-blocks") + } + + // Send 1 old want-block and 2 new want-blocks to p0 + cids3 := testutil.GenerateCids(2) + pwm.sendWants(p0, append(cids3, cids[0]), []cid.Cid{}) + + if g.count != 6 { + t.Fatal("Expected 6 wants") + } + if wbg.count != 4 { + t.Fatal("Expected 4 want-blocks") + } + + // Broadcast 1 old want-have and 2 new want-haves + cids4 := testutil.GenerateCids(2) + pwm.broadcastWantHaves(append(cids4, cids2[0])) + if g.count != 8 { + t.Fatal("Expected 8 wants") + } + if wbg.count != 4 { + t.Fatal("Expected 4 want-blocks") + } + + // Add a second peer + pwm.addPeer(pq, p1) + + if g.count != 8 { + t.Fatal("Expected 8 wants") + } + if wbg.count != 4 { + t.Fatal("Expected 4 want-blocks") + } + + // Cancel 1 want-block that was sent to p0 + // and 1 want-block that was not sent + cids5 := testutil.GenerateCids(1) + pwm.sendCancels(append(cids5, cids[0])) + + if g.count != 7 { + t.Fatal("Expected 7 wants") + } + if wbg.count != 3 { + t.Fatal("Expected 3 want-blocks") + } + + // Remove first peer + pwm.removePeer(p0) + + // Should still have 3 broadcast wants + if g.count != 3 { + t.Fatal("Expected 3 wants") + } + if wbg.count != 0 { + t.Fatal("Expected all want-blocks to be removed") + } + + // Remove second peer + pwm.removePeer(p1) + + // Should still have 3 broadcast wants + if g.count != 3 { + t.Fatal("Expected 3 wants") + } + if wbg.count != 0 { + t.Fatal("Expected 0 want-blocks") + } + + // Cancel one remaining broadcast want-have + pwm.sendCancels(cids2[:1]) + if g.count != 2 { + t.Fatal("Expected 2 wants") + } + if wbg.count != 0 { + t.Fatal("Expected 0 want-blocks") + } +} + +func TestStatsOverlappingWantBlockWantHave(t *testing.T) { + test.Flaky(t) + + g := &gauge{} + wbg := &gauge{} + pwm := newPeerWantManager(g, wbg) + + peers := testutil.GeneratePeers(2) + p0 := peers[0] + p1 := peers[1] + cids := testutil.GenerateCids(2) + cids2 := testutil.GenerateCids(2) + + pwm.addPeer(&mockPQ{}, p0) + pwm.addPeer(&mockPQ{}, p1) + + // Send 2 want-blocks and 2 want-haves to p0 + pwm.sendWants(p0, cids, cids2) + + // Send opposite: + // 2 want-haves and 2 want-blocks to p1 + pwm.sendWants(p1, cids2, cids) + + if g.count != 4 { + t.Fatal("Expected 4 wants") + } + if wbg.count != 4 { + t.Fatal("Expected 4 want-blocks") + } + + // Cancel 1 of each group of cids + pwm.sendCancels([]cid.Cid{cids[0], cids2[0]}) + + if g.count != 2 { + t.Fatal("Expected 2 wants") + } + if wbg.count != 2 { + t.Fatal("Expected 2 want-blocks") + } +} + +func TestStatsRemovePeerOverlappingWantBlockWantHave(t *testing.T) { + test.Flaky(t) + + g := &gauge{} + wbg := &gauge{} + pwm := newPeerWantManager(g, wbg) + + peers := testutil.GeneratePeers(2) + p0 := peers[0] + p1 := peers[1] + cids := testutil.GenerateCids(2) + cids2 := testutil.GenerateCids(2) + + pwm.addPeer(&mockPQ{}, p0) + pwm.addPeer(&mockPQ{}, p1) + + // Send 2 want-blocks and 2 want-haves to p0 + pwm.sendWants(p0, cids, cids2) + + // Send opposite: + // 2 want-haves and 2 want-blocks to p1 + pwm.sendWants(p1, cids2, cids) + + if g.count != 4 { + t.Fatal("Expected 4 wants") + } + if wbg.count != 4 { + t.Fatal("Expected 4 want-blocks") + } + + // Remove p0 + pwm.removePeer(p0) + + if g.count != 4 { + t.Fatal("Expected 4 wants") + } + if wbg.count != 2 { + t.Fatal("Expected 2 want-blocks") + } +} diff --git a/bitswap/client/internal/providerquerymanager/providerquerymanager.go b/bitswap/client/internal/providerquerymanager/providerquerymanager.go new file mode 100644 index 0000000000..9ef2e5fd8b --- /dev/null +++ b/bitswap/client/internal/providerquerymanager/providerquerymanager.go @@ -0,0 +1,430 @@ +package providerquerymanager + +import ( + "context" + "fmt" + "sync" + "time" + + "github.com/ipfs/go-cid" + logging "github.com/ipfs/go-log" + peer "github.com/libp2p/go-libp2p/core/peer" +) + +var log = logging.Logger("bitswap") + +const ( + maxProviders = 10 + maxInProcessRequests = 6 + defaultTimeout = 10 * time.Second +) + +type inProgressRequestStatus struct { + ctx context.Context + cancelFn func() + providersSoFar []peer.ID + listeners map[chan peer.ID]struct{} +} + +type findProviderRequest struct { + k cid.Cid + ctx context.Context +} + +// ProviderQueryNetwork is an interface for finding providers and connecting to +// peers. +type ProviderQueryNetwork interface { + ConnectTo(context.Context, peer.ID) error + FindProvidersAsync(context.Context, cid.Cid, int) <-chan peer.ID +} + +type providerQueryMessage interface { + debugMessage() string + handle(pqm *ProviderQueryManager) +} + +type receivedProviderMessage struct { + ctx context.Context + k cid.Cid + p peer.ID +} + +type finishedProviderQueryMessage struct { + ctx context.Context + k cid.Cid +} + +type newProvideQueryMessage struct { + ctx context.Context + k cid.Cid + inProgressRequestChan chan<- inProgressRequest +} + +type cancelRequestMessage struct { + incomingProviders chan peer.ID + k cid.Cid +} + +// ProviderQueryManager manages requests to find more providers for blocks +// for bitswap sessions. It's main goals are to: +// - rate limit requests -- don't have too many find provider calls running +// simultaneously +// - connect to found peers and filter them if it can't connect +// - ensure two findprovider calls for the same block don't run concurrently +// - manage timeouts +type ProviderQueryManager struct { + ctx context.Context + network ProviderQueryNetwork + providerQueryMessages chan providerQueryMessage + providerRequestsProcessing chan *findProviderRequest + incomingFindProviderRequests chan *findProviderRequest + + findProviderTimeout time.Duration + timeoutMutex sync.RWMutex + + // do not touch outside the run loop + inProgressRequestStatuses map[cid.Cid]*inProgressRequestStatus +} + +// New initializes a new ProviderQueryManager for a given context and a given +// network provider. +func New(ctx context.Context, network ProviderQueryNetwork) *ProviderQueryManager { + return &ProviderQueryManager{ + ctx: ctx, + network: network, + providerQueryMessages: make(chan providerQueryMessage, 16), + providerRequestsProcessing: make(chan *findProviderRequest), + incomingFindProviderRequests: make(chan *findProviderRequest), + inProgressRequestStatuses: make(map[cid.Cid]*inProgressRequestStatus), + findProviderTimeout: defaultTimeout, + } +} + +// Startup starts processing for the ProviderQueryManager. +func (pqm *ProviderQueryManager) Startup() { + go pqm.run() +} + +type inProgressRequest struct { + providersSoFar []peer.ID + incoming chan peer.ID +} + +// SetFindProviderTimeout changes the timeout for finding providers +func (pqm *ProviderQueryManager) SetFindProviderTimeout(findProviderTimeout time.Duration) { + pqm.timeoutMutex.Lock() + pqm.findProviderTimeout = findProviderTimeout + pqm.timeoutMutex.Unlock() +} + +// FindProvidersAsync finds providers for the given block. +func (pqm *ProviderQueryManager) FindProvidersAsync(sessionCtx context.Context, k cid.Cid) <-chan peer.ID { + inProgressRequestChan := make(chan inProgressRequest) + + select { + case pqm.providerQueryMessages <- &newProvideQueryMessage{ + ctx: sessionCtx, + k: k, + inProgressRequestChan: inProgressRequestChan, + }: + case <-pqm.ctx.Done(): + ch := make(chan peer.ID) + close(ch) + return ch + case <-sessionCtx.Done(): + ch := make(chan peer.ID) + close(ch) + return ch + } + + // DO NOT select on sessionCtx. We only want to abort here if we're + // shutting down because we can't actually _cancel_ the request till we + // get to receiveProviders. + var receivedInProgressRequest inProgressRequest + select { + case <-pqm.ctx.Done(): + ch := make(chan peer.ID) + close(ch) + return ch + case receivedInProgressRequest = <-inProgressRequestChan: + } + + return pqm.receiveProviders(sessionCtx, k, receivedInProgressRequest) +} + +func (pqm *ProviderQueryManager) receiveProviders(sessionCtx context.Context, k cid.Cid, receivedInProgressRequest inProgressRequest) <-chan peer.ID { + // maintains an unbuffered queue for incoming providers for given request for a given session + // essentially, as a provider comes in, for a given CID, we want to immediately broadcast to all + // sessions that queried that CID, without worrying about whether the client code is actually + // reading from the returned channel -- so that the broadcast never blocks + // based on: https://medium.com/capital-one-tech/building-an-unbounded-channel-in-go-789e175cd2cd + returnedProviders := make(chan peer.ID) + receivedProviders := append([]peer.ID(nil), receivedInProgressRequest.providersSoFar[0:]...) + incomingProviders := receivedInProgressRequest.incoming + + go func() { + defer close(returnedProviders) + outgoingProviders := func() chan<- peer.ID { + if len(receivedProviders) == 0 { + return nil + } + return returnedProviders + } + nextProvider := func() peer.ID { + if len(receivedProviders) == 0 { + return "" + } + return receivedProviders[0] + } + for len(receivedProviders) > 0 || incomingProviders != nil { + select { + case <-pqm.ctx.Done(): + return + case <-sessionCtx.Done(): + if incomingProviders != nil { + pqm.cancelProviderRequest(k, incomingProviders) + } + return + case provider, ok := <-incomingProviders: + if !ok { + incomingProviders = nil + } else { + receivedProviders = append(receivedProviders, provider) + } + case outgoingProviders() <- nextProvider(): + receivedProviders = receivedProviders[1:] + } + } + }() + return returnedProviders +} + +func (pqm *ProviderQueryManager) cancelProviderRequest(k cid.Cid, incomingProviders chan peer.ID) { + cancelMessageChannel := pqm.providerQueryMessages + for { + select { + case cancelMessageChannel <- &cancelRequestMessage{ + incomingProviders: incomingProviders, + k: k, + }: + cancelMessageChannel = nil + // clear out any remaining providers, in case and "incoming provider" + // messages get processed before our cancel message + case _, ok := <-incomingProviders: + if !ok { + return + } + case <-pqm.ctx.Done(): + return + } + } +} + +func (pqm *ProviderQueryManager) findProviderWorker() { + // findProviderWorker just cycles through incoming provider queries one + // at a time. We have six of these workers running at once + // to let requests go in parallel but keep them rate limited + for { + select { + case fpr, ok := <-pqm.providerRequestsProcessing: + if !ok { + return + } + k := fpr.k + log.Debugf("Beginning Find Provider Request for cid: %s", k.String()) + pqm.timeoutMutex.RLock() + findProviderCtx, cancel := context.WithTimeout(fpr.ctx, pqm.findProviderTimeout) + pqm.timeoutMutex.RUnlock() + providers := pqm.network.FindProvidersAsync(findProviderCtx, k, maxProviders) + wg := &sync.WaitGroup{} + for p := range providers { + wg.Add(1) + go func(p peer.ID) { + defer wg.Done() + err := pqm.network.ConnectTo(findProviderCtx, p) + if err != nil { + log.Debugf("failed to connect to provider %s: %s", p, err) + return + } + select { + case pqm.providerQueryMessages <- &receivedProviderMessage{ + ctx: findProviderCtx, + k: k, + p: p, + }: + case <-pqm.ctx.Done(): + return + } + }(p) + } + wg.Wait() + cancel() + select { + case pqm.providerQueryMessages <- &finishedProviderQueryMessage{ + ctx: findProviderCtx, + k: k, + }: + case <-pqm.ctx.Done(): + } + case <-pqm.ctx.Done(): + return + } + } +} + +func (pqm *ProviderQueryManager) providerRequestBufferWorker() { + // the provider request buffer worker just maintains an unbounded + // buffer for incoming provider queries and dispatches to the find + // provider workers as they become available + // based on: https://medium.com/capital-one-tech/building-an-unbounded-channel-in-go-789e175cd2cd + var providerQueryRequestBuffer []*findProviderRequest + nextProviderQuery := func() *findProviderRequest { + if len(providerQueryRequestBuffer) == 0 { + return nil + } + return providerQueryRequestBuffer[0] + } + outgoingRequests := func() chan<- *findProviderRequest { + if len(providerQueryRequestBuffer) == 0 { + return nil + } + return pqm.providerRequestsProcessing + } + + for { + select { + case incomingRequest, ok := <-pqm.incomingFindProviderRequests: + if !ok { + return + } + providerQueryRequestBuffer = append(providerQueryRequestBuffer, incomingRequest) + case outgoingRequests() <- nextProviderQuery(): + providerQueryRequestBuffer = providerQueryRequestBuffer[1:] + case <-pqm.ctx.Done(): + return + } + } +} + +func (pqm *ProviderQueryManager) cleanupInProcessRequests() { + for _, requestStatus := range pqm.inProgressRequestStatuses { + for listener := range requestStatus.listeners { + close(listener) + } + requestStatus.cancelFn() + } +} + +func (pqm *ProviderQueryManager) run() { + defer pqm.cleanupInProcessRequests() + + go pqm.providerRequestBufferWorker() + for i := 0; i < maxInProcessRequests; i++ { + go pqm.findProviderWorker() + } + + for { + select { + case nextMessage := <-pqm.providerQueryMessages: + log.Debug(nextMessage.debugMessage()) + nextMessage.handle(pqm) + case <-pqm.ctx.Done(): + return + } + } +} + +func (rpm *receivedProviderMessage) debugMessage() string { + return fmt.Sprintf("Received provider (%s) for cid (%s)", rpm.p.String(), rpm.k.String()) +} + +func (rpm *receivedProviderMessage) handle(pqm *ProviderQueryManager) { + requestStatus, ok := pqm.inProgressRequestStatuses[rpm.k] + if !ok { + log.Errorf("Received provider (%s) for cid (%s) not requested", rpm.p.String(), rpm.k.String()) + return + } + requestStatus.providersSoFar = append(requestStatus.providersSoFar, rpm.p) + for listener := range requestStatus.listeners { + select { + case listener <- rpm.p: + case <-pqm.ctx.Done(): + return + } + } +} + +func (fpqm *finishedProviderQueryMessage) debugMessage() string { + return fmt.Sprintf("Finished Provider Query on cid: %s", fpqm.k.String()) +} + +func (fpqm *finishedProviderQueryMessage) handle(pqm *ProviderQueryManager) { + requestStatus, ok := pqm.inProgressRequestStatuses[fpqm.k] + if !ok { + // we canceled the request as it finished. + return + } + for listener := range requestStatus.listeners { + close(listener) + } + delete(pqm.inProgressRequestStatuses, fpqm.k) + requestStatus.cancelFn() +} + +func (npqm *newProvideQueryMessage) debugMessage() string { + return fmt.Sprintf("New Provider Query on cid: %s", npqm.k.String()) +} + +func (npqm *newProvideQueryMessage) handle(pqm *ProviderQueryManager) { + requestStatus, ok := pqm.inProgressRequestStatuses[npqm.k] + if !ok { + + ctx, cancelFn := context.WithCancel(pqm.ctx) + requestStatus = &inProgressRequestStatus{ + listeners: make(map[chan peer.ID]struct{}), + ctx: ctx, + cancelFn: cancelFn, + } + pqm.inProgressRequestStatuses[npqm.k] = requestStatus + select { + case pqm.incomingFindProviderRequests <- &findProviderRequest{ + k: npqm.k, + ctx: ctx, + }: + case <-pqm.ctx.Done(): + return + } + } + inProgressChan := make(chan peer.ID) + requestStatus.listeners[inProgressChan] = struct{}{} + select { + case npqm.inProgressRequestChan <- inProgressRequest{ + providersSoFar: requestStatus.providersSoFar, + incoming: inProgressChan, + }: + case <-pqm.ctx.Done(): + } +} + +func (crm *cancelRequestMessage) debugMessage() string { + return fmt.Sprintf("Cancel provider query on cid: %s", crm.k.String()) +} + +func (crm *cancelRequestMessage) handle(pqm *ProviderQueryManager) { + requestStatus, ok := pqm.inProgressRequestStatuses[crm.k] + if !ok { + // Request finished while queued. + return + } + _, ok = requestStatus.listeners[crm.incomingProviders] + if !ok { + // Request finished and _restarted_ while queued. + return + } + delete(requestStatus.listeners, crm.incomingProviders) + close(crm.incomingProviders) + if len(requestStatus.listeners) == 0 { + delete(pqm.inProgressRequestStatuses, crm.k) + requestStatus.cancelFn() + } +} diff --git a/bitswap/client/internal/providerquerymanager/providerquerymanager_test.go b/bitswap/client/internal/providerquerymanager/providerquerymanager_test.go new file mode 100644 index 0000000000..6cf5fa4a2d --- /dev/null +++ b/bitswap/client/internal/providerquerymanager/providerquerymanager_test.go @@ -0,0 +1,388 @@ +package providerquerymanager + +import ( + "context" + "errors" + "reflect" + "sync" + "testing" + "time" + + "github.com/ipfs/boxo/bitswap/internal/testutil" + "github.com/ipfs/boxo/internal/test" + cid "github.com/ipfs/go-cid" + "github.com/libp2p/go-libp2p/core/peer" +) + +type fakeProviderNetwork struct { + peersFound []peer.ID + connectError error + delay time.Duration + connectDelay time.Duration + queriesMadeMutex sync.RWMutex + queriesMade int + liveQueries int +} + +func (fpn *fakeProviderNetwork) ConnectTo(context.Context, peer.ID) error { + time.Sleep(fpn.connectDelay) + return fpn.connectError +} + +func (fpn *fakeProviderNetwork) FindProvidersAsync(ctx context.Context, k cid.Cid, max int) <-chan peer.ID { + fpn.queriesMadeMutex.Lock() + fpn.queriesMade++ + fpn.liveQueries++ + fpn.queriesMadeMutex.Unlock() + incomingPeers := make(chan peer.ID) + go func() { + defer close(incomingPeers) + for _, p := range fpn.peersFound { + time.Sleep(fpn.delay) + select { + case <-ctx.Done(): + return + default: + } + select { + case incomingPeers <- p: + case <-ctx.Done(): + return + } + } + fpn.queriesMadeMutex.Lock() + fpn.liveQueries-- + fpn.queriesMadeMutex.Unlock() + }() + + return incomingPeers +} + +func TestNormalSimultaneousFetch(t *testing.T) { + test.Flaky(t) + + peers := testutil.GeneratePeers(10) + fpn := &fakeProviderNetwork{ + peersFound: peers, + delay: 1 * time.Millisecond, + } + ctx := context.Background() + providerQueryManager := New(ctx, fpn) + providerQueryManager.Startup() + keys := testutil.GenerateCids(2) + + sessionCtx, cancel := context.WithTimeout(ctx, 5*time.Second) + defer cancel() + firstRequestChan := providerQueryManager.FindProvidersAsync(sessionCtx, keys[0]) + secondRequestChan := providerQueryManager.FindProvidersAsync(sessionCtx, keys[1]) + + var firstPeersReceived []peer.ID + for p := range firstRequestChan { + firstPeersReceived = append(firstPeersReceived, p) + } + + var secondPeersReceived []peer.ID + for p := range secondRequestChan { + secondPeersReceived = append(secondPeersReceived, p) + } + + if len(firstPeersReceived) != len(peers) || len(secondPeersReceived) != len(peers) { + t.Fatal("Did not collect all peers for request that was completed") + } + + fpn.queriesMadeMutex.Lock() + defer fpn.queriesMadeMutex.Unlock() + if fpn.queriesMade != 2 { + t.Fatal("Did not dedup provider requests running simultaneously") + } + +} + +func TestDedupingProviderRequests(t *testing.T) { + test.Flaky(t) + + peers := testutil.GeneratePeers(10) + fpn := &fakeProviderNetwork{ + peersFound: peers, + delay: 1 * time.Millisecond, + } + ctx := context.Background() + providerQueryManager := New(ctx, fpn) + providerQueryManager.Startup() + key := testutil.GenerateCids(1)[0] + + sessionCtx, cancel := context.WithTimeout(ctx, 5*time.Second) + defer cancel() + firstRequestChan := providerQueryManager.FindProvidersAsync(sessionCtx, key) + secondRequestChan := providerQueryManager.FindProvidersAsync(sessionCtx, key) + + var firstPeersReceived []peer.ID + for p := range firstRequestChan { + firstPeersReceived = append(firstPeersReceived, p) + } + + var secondPeersReceived []peer.ID + for p := range secondRequestChan { + secondPeersReceived = append(secondPeersReceived, p) + } + + if len(firstPeersReceived) != len(peers) || len(secondPeersReceived) != len(peers) { + t.Fatal("Did not collect all peers for request that was completed") + } + + if !reflect.DeepEqual(firstPeersReceived, secondPeersReceived) { + t.Fatal("Did not receive the same response to both find provider requests") + } + fpn.queriesMadeMutex.Lock() + defer fpn.queriesMadeMutex.Unlock() + if fpn.queriesMade != 1 { + t.Fatal("Did not dedup provider requests running simultaneously") + } +} + +func TestCancelOneRequestDoesNotTerminateAnother(t *testing.T) { + test.Flaky(t) + + peers := testutil.GeneratePeers(10) + fpn := &fakeProviderNetwork{ + peersFound: peers, + delay: 1 * time.Millisecond, + } + ctx := context.Background() + providerQueryManager := New(ctx, fpn) + providerQueryManager.Startup() + + key := testutil.GenerateCids(1)[0] + + // first session will cancel before done + firstSessionCtx, firstCancel := context.WithTimeout(ctx, 3*time.Millisecond) + defer firstCancel() + firstRequestChan := providerQueryManager.FindProvidersAsync(firstSessionCtx, key) + secondSessionCtx, secondCancel := context.WithTimeout(ctx, 5*time.Second) + defer secondCancel() + secondRequestChan := providerQueryManager.FindProvidersAsync(secondSessionCtx, key) + + var firstPeersReceived []peer.ID + for p := range firstRequestChan { + firstPeersReceived = append(firstPeersReceived, p) + } + + var secondPeersReceived []peer.ID + for p := range secondRequestChan { + secondPeersReceived = append(secondPeersReceived, p) + } + + if len(secondPeersReceived) != len(peers) { + t.Fatal("Did not collect all peers for request that was completed") + } + + if len(firstPeersReceived) >= len(peers) { + t.Fatal("Collected all peers on cancelled peer, should have been cancelled immediately") + } + fpn.queriesMadeMutex.Lock() + defer fpn.queriesMadeMutex.Unlock() + if fpn.queriesMade != 1 { + t.Fatal("Did not dedup provider requests running simultaneously") + } +} + +func TestCancelManagerExitsGracefully(t *testing.T) { + test.Flaky(t) + + peers := testutil.GeneratePeers(10) + fpn := &fakeProviderNetwork{ + peersFound: peers, + delay: 1 * time.Millisecond, + } + ctx := context.Background() + managerCtx, managerCancel := context.WithTimeout(ctx, 5*time.Millisecond) + defer managerCancel() + providerQueryManager := New(managerCtx, fpn) + providerQueryManager.Startup() + + key := testutil.GenerateCids(1)[0] + + sessionCtx, cancel := context.WithTimeout(ctx, 20*time.Millisecond) + defer cancel() + firstRequestChan := providerQueryManager.FindProvidersAsync(sessionCtx, key) + secondRequestChan := providerQueryManager.FindProvidersAsync(sessionCtx, key) + + var firstPeersReceived []peer.ID + for p := range firstRequestChan { + firstPeersReceived = append(firstPeersReceived, p) + } + + var secondPeersReceived []peer.ID + for p := range secondRequestChan { + secondPeersReceived = append(secondPeersReceived, p) + } + + if len(firstPeersReceived) >= len(peers) || + len(secondPeersReceived) >= len(peers) { + t.Fatal("Did not cancel requests in progress correctly") + } +} + +func TestPeersWithConnectionErrorsNotAddedToPeerList(t *testing.T) { + test.Flaky(t) + + peers := testutil.GeneratePeers(10) + fpn := &fakeProviderNetwork{ + peersFound: peers, + connectError: errors.New("not able to connect"), + delay: 1 * time.Millisecond, + } + ctx := context.Background() + providerQueryManager := New(ctx, fpn) + providerQueryManager.Startup() + + key := testutil.GenerateCids(1)[0] + + sessionCtx, cancel := context.WithTimeout(ctx, 20*time.Millisecond) + defer cancel() + firstRequestChan := providerQueryManager.FindProvidersAsync(sessionCtx, key) + secondRequestChan := providerQueryManager.FindProvidersAsync(sessionCtx, key) + + var firstPeersReceived []peer.ID + for p := range firstRequestChan { + firstPeersReceived = append(firstPeersReceived, p) + } + + var secondPeersReceived []peer.ID + for p := range secondRequestChan { + secondPeersReceived = append(secondPeersReceived, p) + } + + if len(firstPeersReceived) != 0 || len(secondPeersReceived) != 0 { + t.Fatal("Did not filter out peers with connection issues") + } + +} + +func TestRateLimitingRequests(t *testing.T) { + test.Flaky(t) + + peers := testutil.GeneratePeers(10) + fpn := &fakeProviderNetwork{ + peersFound: peers, + delay: 5 * time.Millisecond, + } + ctx := context.Background() + ctx, cancel := context.WithCancel(ctx) + defer cancel() + providerQueryManager := New(ctx, fpn) + providerQueryManager.Startup() + + keys := testutil.GenerateCids(maxInProcessRequests + 1) + sessionCtx, cancel := context.WithTimeout(ctx, 5*time.Second) + defer cancel() + var requestChannels []<-chan peer.ID + for i := 0; i < maxInProcessRequests+1; i++ { + requestChannels = append(requestChannels, providerQueryManager.FindProvidersAsync(sessionCtx, keys[i])) + } + time.Sleep(20 * time.Millisecond) + fpn.queriesMadeMutex.Lock() + if fpn.liveQueries != maxInProcessRequests { + t.Logf("Queries made: %d\n", fpn.liveQueries) + t.Fatal("Did not limit parallel requests to rate limit") + } + fpn.queriesMadeMutex.Unlock() + for i := 0; i < maxInProcessRequests+1; i++ { + for range requestChannels[i] { + } + } + + fpn.queriesMadeMutex.Lock() + defer fpn.queriesMadeMutex.Unlock() + if fpn.queriesMade != maxInProcessRequests+1 { + t.Logf("Queries made: %d\n", fpn.queriesMade) + t.Fatal("Did not make all seperate requests") + } +} + +func TestFindProviderTimeout(t *testing.T) { + test.Flaky(t) + + peers := testutil.GeneratePeers(10) + fpn := &fakeProviderNetwork{ + peersFound: peers, + delay: 10 * time.Millisecond, + } + ctx := context.Background() + providerQueryManager := New(ctx, fpn) + providerQueryManager.Startup() + providerQueryManager.SetFindProviderTimeout(2 * time.Millisecond) + keys := testutil.GenerateCids(1) + + sessionCtx, cancel := context.WithTimeout(ctx, 5*time.Second) + defer cancel() + firstRequestChan := providerQueryManager.FindProvidersAsync(sessionCtx, keys[0]) + var firstPeersReceived []peer.ID + for p := range firstRequestChan { + firstPeersReceived = append(firstPeersReceived, p) + } + if len(firstPeersReceived) >= len(peers) { + t.Fatal("Find provider request should have timed out, did not") + } +} + +func TestFindProviderPreCanceled(t *testing.T) { + test.Flaky(t) + + peers := testutil.GeneratePeers(10) + fpn := &fakeProviderNetwork{ + peersFound: peers, + delay: 1 * time.Millisecond, + } + ctx := context.Background() + providerQueryManager := New(ctx, fpn) + providerQueryManager.Startup() + providerQueryManager.SetFindProviderTimeout(100 * time.Millisecond) + keys := testutil.GenerateCids(1) + + sessionCtx, cancel := context.WithCancel(ctx) + cancel() + firstRequestChan := providerQueryManager.FindProvidersAsync(sessionCtx, keys[0]) + if firstRequestChan == nil { + t.Fatal("expected non-nil channel") + } + select { + case <-firstRequestChan: + case <-time.After(10 * time.Millisecond): + t.Fatal("shouldn't have blocked waiting on a closed context") + } +} + +func TestCancelFindProvidersAfterCompletion(t *testing.T) { + test.Flaky(t) + + peers := testutil.GeneratePeers(2) + fpn := &fakeProviderNetwork{ + peersFound: peers, + delay: 1 * time.Millisecond, + } + ctx := context.Background() + providerQueryManager := New(ctx, fpn) + providerQueryManager.Startup() + providerQueryManager.SetFindProviderTimeout(100 * time.Millisecond) + keys := testutil.GenerateCids(1) + + sessionCtx, cancel := context.WithCancel(ctx) + firstRequestChan := providerQueryManager.FindProvidersAsync(sessionCtx, keys[0]) + <-firstRequestChan // wait for everything to start. + time.Sleep(10 * time.Millisecond) // wait for the incoming providres to stop. + cancel() // cancel the context. + + timer := time.NewTimer(10 * time.Millisecond) + defer timer.Stop() + for { + select { + case _, ok := <-firstRequestChan: + if !ok { + return + } + case <-timer.C: + t.Fatal("should have finished receiving responses within timeout") + } + } +} diff --git a/bitswap/client/internal/session/cidqueue.go b/bitswap/client/internal/session/cidqueue.go new file mode 100644 index 0000000000..aedfa944c4 --- /dev/null +++ b/bitswap/client/internal/session/cidqueue.go @@ -0,0 +1,63 @@ +package session + +import cid "github.com/ipfs/go-cid" + +type cidQueue struct { + elems []cid.Cid + eset *cid.Set +} + +func newCidQueue() *cidQueue { + return &cidQueue{eset: cid.NewSet()} +} + +func (cq *cidQueue) Pop() cid.Cid { + for { + if len(cq.elems) == 0 { + return cid.Cid{} + } + + out := cq.elems[0] + cq.elems = cq.elems[1:] + + if cq.eset.Has(out) { + cq.eset.Remove(out) + return out + } + } +} + +func (cq *cidQueue) Cids() []cid.Cid { + // Lazily delete from the list any cids that were removed from the set + if len(cq.elems) > cq.eset.Len() { + i := 0 + for _, c := range cq.elems { + if cq.eset.Has(c) { + cq.elems[i] = c + i++ + } + } + cq.elems = cq.elems[:i] + } + + // Make a copy of the cids + return append([]cid.Cid{}, cq.elems...) +} + +func (cq *cidQueue) Push(c cid.Cid) { + if cq.eset.Visit(c) { + cq.elems = append(cq.elems, c) + } +} + +func (cq *cidQueue) Remove(c cid.Cid) { + cq.eset.Remove(c) +} + +func (cq *cidQueue) Has(c cid.Cid) bool { + return cq.eset.Has(c) +} + +func (cq *cidQueue) Len() int { + return cq.eset.Len() +} diff --git a/bitswap/client/internal/session/peerresponsetracker.go b/bitswap/client/internal/session/peerresponsetracker.go new file mode 100644 index 0000000000..d81c3b0278 --- /dev/null +++ b/bitswap/client/internal/session/peerresponsetracker.go @@ -0,0 +1,70 @@ +package session + +import ( + "math/rand" + + peer "github.com/libp2p/go-libp2p/core/peer" +) + +// peerResponseTracker keeps track of how many times each peer was the first +// to send us a block for a given CID (used to rank peers) +type peerResponseTracker struct { + firstResponder map[peer.ID]int +} + +func newPeerResponseTracker() *peerResponseTracker { + return &peerResponseTracker{ + firstResponder: make(map[peer.ID]int), + } +} + +// receivedBlockFrom is called when a block is received from a peer +// (only called first time block is received) +func (prt *peerResponseTracker) receivedBlockFrom(from peer.ID) { + prt.firstResponder[from]++ +} + +// choose picks a peer from the list of candidate peers, favouring those peers +// that were first to send us previous blocks +func (prt *peerResponseTracker) choose(peers []peer.ID) peer.ID { + if len(peers) == 0 { + return "" + } + + rnd := rand.Float64() + + // Find the total received blocks for all candidate peers + total := 0 + for _, p := range peers { + total += prt.getPeerCount(p) + } + + // Choose one of the peers with a chance proportional to the number + // of blocks received from that peer + counted := 0.0 + for _, p := range peers { + counted += float64(prt.getPeerCount(p)) / float64(total) + if counted > rnd { + return p + } + } + + // We shouldn't get here unless there is some weirdness with floating point + // math that doesn't quite cover the whole range of peers in the for loop + // so just choose the last peer. + index := len(peers) - 1 + return peers[index] +} + +// getPeerCount returns the number of times the peer was first to send us a +// block +func (prt *peerResponseTracker) getPeerCount(p peer.ID) int { + count, ok := prt.firstResponder[p] + if ok { + return count + } + + // Make sure there is always at least a small chance a new peer + // will be chosen + return 1 +} diff --git a/bitswap/client/internal/session/peerresponsetracker_test.go b/bitswap/client/internal/session/peerresponsetracker_test.go new file mode 100644 index 0000000000..1aed9c4ef8 --- /dev/null +++ b/bitswap/client/internal/session/peerresponsetracker_test.go @@ -0,0 +1,126 @@ +package session + +import ( + "math" + "testing" + + "github.com/ipfs/boxo/bitswap/internal/testutil" + "github.com/ipfs/boxo/internal/test" + peer "github.com/libp2p/go-libp2p/core/peer" +) + +func TestPeerResponseTrackerInit(t *testing.T) { + test.Flaky(t) + + peers := testutil.GeneratePeers(2) + prt := newPeerResponseTracker() + + if prt.choose([]peer.ID{}) != "" { + t.Fatal("expected empty peer ID") + } + if prt.choose([]peer.ID{peers[0]}) != peers[0] { + t.Fatal("expected single peer ID") + } + p := prt.choose(peers) + if p != peers[0] && p != peers[1] { + t.Fatal("expected randomly chosen peer") + } +} + +func TestPeerResponseTrackerProbabilityUnknownPeers(t *testing.T) { + test.Flaky(t) + + peers := testutil.GeneratePeers(4) + prt := newPeerResponseTracker() + + choices := []int{0, 0, 0, 0} + count := 1000 + for i := 0; i < count; i++ { + p := prt.choose(peers) + if p == peers[0] { + choices[0]++ + } else if p == peers[1] { + choices[1]++ + } else if p == peers[2] { + choices[2]++ + } else if p == peers[3] { + choices[3]++ + } + } + + for _, c := range choices { + if c == 0 { + t.Fatal("expected each peer to be chosen at least once") + } + if math.Abs(float64(c-choices[0])) > 0.2*float64(count) { + t.Fatal("expected unknown peers to have roughly equal chance of being chosen") + } + } +} + +func TestPeerResponseTrackerProbabilityOneKnownOneUnknownPeer(t *testing.T) { + test.Flaky(t) + + peers := testutil.GeneratePeers(2) + prt := newPeerResponseTracker() + + prt.receivedBlockFrom(peers[0]) + + chooseFirst := 0 + chooseSecond := 0 + for i := 0; i < 1000; i++ { + p := prt.choose(peers) + if p == peers[0] { + chooseFirst++ + } else if p == peers[1] { + chooseSecond++ + } + } + + if chooseSecond == 0 { + t.Fatal("expected unknown peer to occasionally be chosen") + } + if chooseSecond > chooseFirst { + t.Fatal("expected known peer to be chosen more often") + } +} + +func TestPeerResponseTrackerProbabilityProportional(t *testing.T) { + test.Flaky(t) + + peers := testutil.GeneratePeers(3) + prt := newPeerResponseTracker() + + probabilities := []float64{0.1, 0.6, 0.3} + count := 1000 + for pi, prob := range probabilities { + for i := 0; float64(i) < float64(count)*prob; i++ { + prt.receivedBlockFrom(peers[pi]) + } + } + + var choices []int + for range probabilities { + choices = append(choices, 0) + } + + for i := 0; i < count; i++ { + p := prt.choose(peers) + if p == peers[0] { + choices[0]++ + } else if p == peers[1] { + choices[1]++ + } else if p == peers[2] { + choices[2]++ + } + } + + for i, c := range choices { + if c == 0 { + t.Fatal("expected each peer to be chosen at least once") + } + if math.Abs(float64(c)-(float64(count)*probabilities[i])) > 0.2*float64(count) { + t.Fatal("expected peers to be chosen proportionally to probability") + } + } +} diff --git a/bitswap/client/internal/session/sentwantblockstracker.go b/bitswap/client/internal/session/sentwantblockstracker.go new file mode 100644 index 0000000000..0dfe0630b0 --- /dev/null +++ b/bitswap/client/internal/session/sentwantblockstracker.go @@ -0,0 +1,33 @@ +package session + +import ( + cid "github.com/ipfs/go-cid" + peer "github.com/libp2p/go-libp2p/core/peer" +) + +// sentWantBlocksTracker keeps track of which peers we've sent a want-block to +type sentWantBlocksTracker struct { + sentWantBlocks map[peer.ID]map[cid.Cid]struct{} +} + +func newSentWantBlocksTracker() *sentWantBlocksTracker { + return &sentWantBlocksTracker{ + sentWantBlocks: make(map[peer.ID]map[cid.Cid]struct{}), + } +} + +func (s *sentWantBlocksTracker) addSentWantBlocksTo(p peer.ID, ks []cid.Cid) { + cids, ok := s.sentWantBlocks[p] + if !ok { + cids = make(map[cid.Cid]struct{}, len(ks)) + s.sentWantBlocks[p] = cids + } + for _, c := range ks { + cids[c] = struct{}{} + } +} + +func (s *sentWantBlocksTracker) haveSentWantBlockTo(p peer.ID, c cid.Cid) bool { + _, ok := s.sentWantBlocks[p][c] + return ok +} diff --git a/bitswap/client/internal/session/sentwantblockstracker_test.go b/bitswap/client/internal/session/sentwantblockstracker_test.go new file mode 100644 index 0000000000..ccb920e31c --- /dev/null +++ b/bitswap/client/internal/session/sentwantblockstracker_test.go @@ -0,0 +1,31 @@ +package session + +import ( + "testing" + + "github.com/ipfs/boxo/bitswap/internal/testutil" + "github.com/ipfs/boxo/internal/test" +) + +func TestSendWantBlocksTracker(t *testing.T) { + test.Flaky(t) + + peers := testutil.GeneratePeers(2) + cids := testutil.GenerateCids(2) + swbt := newSentWantBlocksTracker() + + if swbt.haveSentWantBlockTo(peers[0], cids[0]) { + t.Fatal("expected not to have sent anything yet") + } + + swbt.addSentWantBlocksTo(peers[0], cids) + if !swbt.haveSentWantBlockTo(peers[0], cids[0]) { + t.Fatal("expected to have sent cid to peer") + } + if !swbt.haveSentWantBlockTo(peers[0], cids[1]) { + t.Fatal("expected to have sent cid to peer") + } + if swbt.haveSentWantBlockTo(peers[1], cids[0]) { + t.Fatal("expected not to have sent cid to peer") + } +} diff --git a/bitswap/client/internal/session/session.go b/bitswap/client/internal/session/session.go new file mode 100644 index 0000000000..87a6dd919f --- /dev/null +++ b/bitswap/client/internal/session/session.go @@ -0,0 +1,508 @@ +package session + +import ( + "context" + "time" + + "github.com/ipfs/boxo/bitswap/client/internal" + bsbpm "github.com/ipfs/boxo/bitswap/client/internal/blockpresencemanager" + bsgetter "github.com/ipfs/boxo/bitswap/client/internal/getter" + notifications "github.com/ipfs/boxo/bitswap/client/internal/notifications" + bspm "github.com/ipfs/boxo/bitswap/client/internal/peermanager" + bssim "github.com/ipfs/boxo/bitswap/client/internal/sessioninterestmanager" + blocks "github.com/ipfs/boxo/blocks" + cid "github.com/ipfs/go-cid" + delay "github.com/ipfs/go-ipfs-delay" + logging "github.com/ipfs/go-log" + peer "github.com/libp2p/go-libp2p/core/peer" + "go.uber.org/zap" +) + +var log = logging.Logger("bs:sess") +var sflog = log.Desugar() + +const ( + broadcastLiveWantsLimit = 64 +) + +// PeerManager keeps track of which sessions are interested in which peers +// and takes care of sending wants for the sessions +type PeerManager interface { + // RegisterSession tells the PeerManager that the session is interested + // in a peer's connection state + RegisterSession(peer.ID, bspm.Session) + // UnregisterSession tells the PeerManager that the session is no longer + // interested in a peer's connection state + UnregisterSession(uint64) + // SendWants tells the PeerManager to send wants to the given peer + SendWants(ctx context.Context, peerId peer.ID, wantBlocks []cid.Cid, wantHaves []cid.Cid) + // BroadcastWantHaves sends want-haves to all connected peers (used for + // session discovery) + BroadcastWantHaves(context.Context, []cid.Cid) + // SendCancels tells the PeerManager to send cancels to all peers + SendCancels(context.Context, []cid.Cid) +} + +// SessionManager manages all the sessions +type SessionManager interface { + // Remove a session (called when the session shuts down) + RemoveSession(sesid uint64) + // Cancel wants (called when a call to GetBlocks() is cancelled) + CancelSessionWants(sid uint64, wants []cid.Cid) +} + +// SessionPeerManager keeps track of peers in the session +type SessionPeerManager interface { + // PeersDiscovered indicates if any peers have been discovered yet + PeersDiscovered() bool + // Shutdown the SessionPeerManager + Shutdown() + // Adds a peer to the session, returning true if the peer is new + AddPeer(peer.ID) bool + // Removes a peer from the session, returning true if the peer existed + RemovePeer(peer.ID) bool + // All peers in the session + Peers() []peer.ID + // Whether there are any peers in the session + HasPeers() bool + // Protect connection from being pruned by the connection manager + ProtectConnection(peer.ID) +} + +// ProviderFinder is used to find providers for a given key +type ProviderFinder interface { + // FindProvidersAsync searches for peers that provide the given CID + FindProvidersAsync(ctx context.Context, k cid.Cid) <-chan peer.ID +} + +// opType is the kind of operation that is being processed by the event loop +type opType int + +const ( + // Receive blocks + opReceive opType = iota + // Want blocks + opWant + // Cancel wants + opCancel + // Broadcast want-haves + opBroadcast + // Wants sent to peers + opWantsSent +) + +type op struct { + op opType + keys []cid.Cid +} + +// Session holds state for an individual bitswap transfer operation. +// This allows bitswap to make smarter decisions about who to send wantlist +// info to, and who to request blocks from. +type Session struct { + // dependencies + ctx context.Context + shutdown func() + sm SessionManager + pm PeerManager + sprm SessionPeerManager + providerFinder ProviderFinder + sim *bssim.SessionInterestManager + + sw sessionWants + sws sessionWantSender + + latencyTrkr latencyTracker + + // channels + incoming chan op + tickDelayReqs chan time.Duration + + // do not touch outside run loop + idleTick *time.Timer + periodicSearchTimer *time.Timer + baseTickDelay time.Duration + consecutiveTicks int + initialSearchDelay time.Duration + periodicSearchDelay delay.D + // identifiers + notif notifications.PubSub + id uint64 + + self peer.ID +} + +// New creates a new bitswap session whose lifetime is bounded by the +// given context. +func New( + ctx context.Context, + sm SessionManager, + id uint64, + sprm SessionPeerManager, + providerFinder ProviderFinder, + sim *bssim.SessionInterestManager, + pm PeerManager, + bpm *bsbpm.BlockPresenceManager, + notif notifications.PubSub, + initialSearchDelay time.Duration, + periodicSearchDelay delay.D, + self peer.ID) *Session { + + ctx, cancel := context.WithCancel(ctx) + s := &Session{ + sw: newSessionWants(broadcastLiveWantsLimit), + tickDelayReqs: make(chan time.Duration), + ctx: ctx, + shutdown: cancel, + sm: sm, + pm: pm, + sprm: sprm, + providerFinder: providerFinder, + sim: sim, + incoming: make(chan op, 128), + latencyTrkr: latencyTracker{}, + notif: notif, + baseTickDelay: time.Millisecond * 500, + id: id, + initialSearchDelay: initialSearchDelay, + periodicSearchDelay: periodicSearchDelay, + self: self, + } + s.sws = newSessionWantSender(id, pm, sprm, sm, bpm, s.onWantsSent, s.onPeersExhausted) + + go s.run(ctx) + + return s +} + +func (s *Session) ID() uint64 { + return s.id +} + +func (s *Session) Shutdown() { + s.shutdown() +} + +// ReceiveFrom receives incoming blocks from the given peer. +func (s *Session) ReceiveFrom(from peer.ID, ks []cid.Cid, haves []cid.Cid, dontHaves []cid.Cid) { + // The SessionManager tells each Session about all keys that it may be + // interested in. Here the Session filters the keys to the ones that this + // particular Session is interested in. + interestedRes := s.sim.FilterSessionInterested(s.id, ks, haves, dontHaves) + ks = interestedRes[0] + haves = interestedRes[1] + dontHaves = interestedRes[2] + s.logReceiveFrom(from, ks, haves, dontHaves) + + // Inform the session want sender that a message has been received + s.sws.Update(from, ks, haves, dontHaves) + + if len(ks) == 0 { + return + } + + // Inform the session that blocks have been received + select { + case s.incoming <- op{op: opReceive, keys: ks}: + case <-s.ctx.Done(): + } +} + +func (s *Session) logReceiveFrom(from peer.ID, interestedKs []cid.Cid, haves []cid.Cid, dontHaves []cid.Cid) { + // Save some CPU cycles if log level is higher than debug + if ce := sflog.Check(zap.DebugLevel, "Bitswap <- rcv message"); ce == nil { + return + } + + for _, c := range interestedKs { + log.Debugw("Bitswap <- block", "local", s.self, "from", from, "cid", c, "session", s.id) + } + for _, c := range haves { + log.Debugw("Bitswap <- HAVE", "local", s.self, "from", from, "cid", c, "session", s.id) + } + for _, c := range dontHaves { + log.Debugw("Bitswap <- DONT_HAVE", "local", s.self, "from", from, "cid", c, "session", s.id) + } +} + +// GetBlock fetches a single block. +func (s *Session) GetBlock(ctx context.Context, k cid.Cid) (blocks.Block, error) { + ctx, span := internal.StartSpan(ctx, "Session.GetBlock") + defer span.End() + return bsgetter.SyncGetBlock(ctx, k, s.GetBlocks) +} + +// GetBlocks fetches a set of blocks within the context of this session and +// returns a channel that found blocks will be returned on. No order is +// guaranteed on the returned blocks. +func (s *Session) GetBlocks(ctx context.Context, keys []cid.Cid) (<-chan blocks.Block, error) { + ctx, span := internal.StartSpan(ctx, "Session.GetBlocks") + defer span.End() + + return bsgetter.AsyncGetBlocks(ctx, s.ctx, keys, s.notif, + func(ctx context.Context, keys []cid.Cid) { + select { + case s.incoming <- op{op: opWant, keys: keys}: + case <-ctx.Done(): + case <-s.ctx.Done(): + } + }, + func(keys []cid.Cid) { + select { + case s.incoming <- op{op: opCancel, keys: keys}: + case <-s.ctx.Done(): + } + }, + ) +} + +// SetBaseTickDelay changes the rate at which ticks happen. +func (s *Session) SetBaseTickDelay(baseTickDelay time.Duration) { + select { + case s.tickDelayReqs <- baseTickDelay: + case <-s.ctx.Done(): + } +} + +// onWantsSent is called when wants are sent to a peer by the session wants sender +func (s *Session) onWantsSent(p peer.ID, wantBlocks []cid.Cid, wantHaves []cid.Cid) { + allBlks := append(wantBlocks[:len(wantBlocks):len(wantBlocks)], wantHaves...) + s.nonBlockingEnqueue(op{op: opWantsSent, keys: allBlks}) +} + +// onPeersExhausted is called when all available peers have sent DONT_HAVE for +// a set of cids (or all peers become unavailable) +func (s *Session) onPeersExhausted(ks []cid.Cid) { + s.nonBlockingEnqueue(op{op: opBroadcast, keys: ks}) +} + +// We don't want to block the sessionWantSender if the incoming channel +// is full. So if we can't immediately send on the incoming channel spin +// it off into a go-routine. +func (s *Session) nonBlockingEnqueue(o op) { + select { + case s.incoming <- o: + default: + go func() { + select { + case s.incoming <- o: + case <-s.ctx.Done(): + } + }() + } +} + +// Session run loop -- everything in this function should not be called +// outside of this loop +func (s *Session) run(ctx context.Context) { + go s.sws.Run() + + s.idleTick = time.NewTimer(s.initialSearchDelay) + s.periodicSearchTimer = time.NewTimer(s.periodicSearchDelay.NextWaitTime()) + for { + select { + case oper := <-s.incoming: + switch oper.op { + case opReceive: + // Received blocks + s.handleReceive(oper.keys) + case opWant: + // Client wants blocks + s.wantBlocks(ctx, oper.keys) + case opCancel: + // Wants were cancelled + s.sw.CancelPending(oper.keys) + s.sws.Cancel(oper.keys) + case opWantsSent: + // Wants were sent to a peer + s.sw.WantsSent(oper.keys) + case opBroadcast: + // Broadcast want-haves to all peers + s.broadcast(ctx, oper.keys) + default: + panic("unhandled operation") + } + case <-s.idleTick.C: + // The session hasn't received blocks for a while, broadcast + s.broadcast(ctx, nil) + case <-s.periodicSearchTimer.C: + // Periodically search for a random live want + s.handlePeriodicSearch(ctx) + case baseTickDelay := <-s.tickDelayReqs: + // Set the base tick delay + s.baseTickDelay = baseTickDelay + case <-ctx.Done(): + // Shutdown + s.handleShutdown() + return + } + } +} + +// Called when the session hasn't received any blocks for some time, or when +// all peers in the session have sent DONT_HAVE for a particular set of CIDs. +// Send want-haves to all connected peers, and search for new peers with the CID. +func (s *Session) broadcast(ctx context.Context, wants []cid.Cid) { + // If this broadcast is because of an idle timeout (we haven't received + // any blocks for a while) then broadcast all pending wants + if wants == nil { + wants = s.sw.PrepareBroadcast() + } + + // Broadcast a want-have for the live wants to everyone we're connected to + s.broadcastWantHaves(ctx, wants) + + // do not find providers on consecutive ticks + // -- just rely on periodic search widening + if len(wants) > 0 && (s.consecutiveTicks == 0) { + // Search for providers who have the first want in the list. + // Typically if the provider has the first block they will have + // the rest of the blocks also. + log.Debugw("FindMorePeers", "session", s.id, "cid", wants[0], "pending", len(wants)) + s.findMorePeers(ctx, wants[0]) + } + s.resetIdleTick() + + // If we have live wants record a consecutive tick + if s.sw.HasLiveWants() { + s.consecutiveTicks++ + } +} + +// handlePeriodicSearch is called periodically to search for providers of a +// randomly chosen CID in the sesssion. +func (s *Session) handlePeriodicSearch(ctx context.Context) { + randomWant := s.sw.RandomLiveWant() + if !randomWant.Defined() { + return + } + + // TODO: come up with a better strategy for determining when to search + // for new providers for blocks. + s.findMorePeers(ctx, randomWant) + + s.broadcastWantHaves(ctx, []cid.Cid{randomWant}) + + s.periodicSearchTimer.Reset(s.periodicSearchDelay.NextWaitTime()) +} + +// findMorePeers attempts to find more peers for a session by searching for +// providers for the given Cid +func (s *Session) findMorePeers(ctx context.Context, c cid.Cid) { + go func(k cid.Cid) { + for p := range s.providerFinder.FindProvidersAsync(ctx, k) { + // When a provider indicates that it has a cid, it's equivalent to + // the providing peer sending a HAVE + s.sws.Update(p, nil, []cid.Cid{c}, nil) + } + }(c) +} + +// handleShutdown is called when the session shuts down +func (s *Session) handleShutdown() { + // Stop the idle timer + s.idleTick.Stop() + // Shut down the session peer manager + s.sprm.Shutdown() + // Shut down the sessionWantSender (blocks until sessionWantSender stops + // sending) + s.sws.Shutdown() + // Signal to the SessionManager that the session has been shutdown + // and can be cleaned up + s.sm.RemoveSession(s.id) +} + +// handleReceive is called when the session receives blocks from a peer +func (s *Session) handleReceive(ks []cid.Cid) { + // Record which blocks have been received and figure out the total latency + // for fetching the blocks + wanted, totalLatency := s.sw.BlocksReceived(ks) + if len(wanted) == 0 { + return + } + + // Record latency + s.latencyTrkr.receiveUpdate(len(wanted), totalLatency) + + // Inform the SessionInterestManager that this session is no longer + // expecting to receive the wanted keys + s.sim.RemoveSessionWants(s.id, wanted) + + s.idleTick.Stop() + + // We've received new wanted blocks, so reset the number of ticks + // that have occurred since the last new block + s.consecutiveTicks = 0 + + s.resetIdleTick() +} + +// wantBlocks is called when blocks are requested by the client +func (s *Session) wantBlocks(ctx context.Context, newks []cid.Cid) { + if len(newks) > 0 { + // Inform the SessionInterestManager that this session is interested in the keys + s.sim.RecordSessionInterest(s.id, newks) + // Tell the sessionWants tracker that that the wants have been requested + s.sw.BlocksRequested(newks) + // Tell the sessionWantSender that the blocks have been requested + s.sws.Add(newks) + } + + // If we have discovered peers already, the sessionWantSender will + // send wants to them + if s.sprm.PeersDiscovered() { + return + } + + // No peers discovered yet, broadcast some want-haves + ks := s.sw.GetNextWants() + if len(ks) > 0 { + log.Infow("No peers - broadcasting", "session", s.id, "want-count", len(ks)) + s.broadcastWantHaves(ctx, ks) + } +} + +// Send want-haves to all connected peers +func (s *Session) broadcastWantHaves(ctx context.Context, wants []cid.Cid) { + log.Debugw("broadcastWantHaves", "session", s.id, "cids", wants) + s.pm.BroadcastWantHaves(ctx, wants) +} + +// The session will broadcast if it has outstanding wants and doesn't receive +// any blocks for some time. +// The length of time is calculated +// - initially +// as a fixed delay +// - once some blocks are received +// from a base delay and average latency, with a backoff +func (s *Session) resetIdleTick() { + var tickDelay time.Duration + if !s.latencyTrkr.hasLatency() { + tickDelay = s.initialSearchDelay + } else { + avLat := s.latencyTrkr.averageLatency() + tickDelay = s.baseTickDelay + (3 * avLat) + } + tickDelay *= time.Duration(1 + s.consecutiveTicks) + s.idleTick.Reset(tickDelay) +} + +// latencyTracker keeps track of the average latency between sending a want +// and receiving the corresponding block +type latencyTracker struct { + totalLatency time.Duration + count int +} + +func (lt *latencyTracker) hasLatency() bool { + return lt.totalLatency > 0 && lt.count > 0 +} + +func (lt *latencyTracker) averageLatency() time.Duration { + return lt.totalLatency / time.Duration(lt.count) +} + +func (lt *latencyTracker) receiveUpdate(count int, totalLatency time.Duration) { + lt.totalLatency += totalLatency + lt.count += count +} diff --git a/bitswap/client/internal/session/session_test.go b/bitswap/client/internal/session/session_test.go new file mode 100644 index 0000000000..b60c7d1af8 --- /dev/null +++ b/bitswap/client/internal/session/session_test.go @@ -0,0 +1,607 @@ +package session + +import ( + "context" + "sync" + "testing" + "time" + + bsbpm "github.com/ipfs/boxo/bitswap/client/internal/blockpresencemanager" + notifications "github.com/ipfs/boxo/bitswap/client/internal/notifications" + bspm "github.com/ipfs/boxo/bitswap/client/internal/peermanager" + bssim "github.com/ipfs/boxo/bitswap/client/internal/sessioninterestmanager" + bsspm "github.com/ipfs/boxo/bitswap/client/internal/sessionpeermanager" + "github.com/ipfs/boxo/bitswap/internal/testutil" + "github.com/ipfs/boxo/internal/test" + cid "github.com/ipfs/go-cid" + blocksutil "github.com/ipfs/go-ipfs-blocksutil" + delay "github.com/ipfs/go-ipfs-delay" + peer "github.com/libp2p/go-libp2p/core/peer" +) + +type mockSessionMgr struct { + lk sync.Mutex + removeSession bool + cancels []cid.Cid +} + +func newMockSessionMgr() *mockSessionMgr { + return &mockSessionMgr{} +} + +func (msm *mockSessionMgr) removeSessionCalled() bool { + msm.lk.Lock() + defer msm.lk.Unlock() + return msm.removeSession +} + +func (msm *mockSessionMgr) cancelled() []cid.Cid { + msm.lk.Lock() + defer msm.lk.Unlock() + return msm.cancels +} + +func (msm *mockSessionMgr) RemoveSession(sesid uint64) { + msm.lk.Lock() + defer msm.lk.Unlock() + msm.removeSession = true +} + +func (msm *mockSessionMgr) CancelSessionWants(sid uint64, wants []cid.Cid) { + msm.lk.Lock() + defer msm.lk.Unlock() + msm.cancels = append(msm.cancels, wants...) +} + +func newFakeSessionPeerManager() *bsspm.SessionPeerManager { + return bsspm.New(1, newFakePeerTagger()) +} + +func newFakePeerTagger() *fakePeerTagger { + return &fakePeerTagger{ + protectedPeers: make(map[peer.ID]map[string]struct{}), + } +} + +type fakePeerTagger struct { + lk sync.Mutex + protectedPeers map[peer.ID]map[string]struct{} +} + +func (fpt *fakePeerTagger) TagPeer(p peer.ID, tag string, val int) {} +func (fpt *fakePeerTagger) UntagPeer(p peer.ID, tag string) {} + +func (fpt *fakePeerTagger) Protect(p peer.ID, tag string) { + fpt.lk.Lock() + defer fpt.lk.Unlock() + + tags, ok := fpt.protectedPeers[p] + if !ok { + tags = make(map[string]struct{}) + fpt.protectedPeers[p] = tags + } + tags[tag] = struct{}{} +} + +func (fpt *fakePeerTagger) Unprotect(p peer.ID, tag string) bool { + fpt.lk.Lock() + defer fpt.lk.Unlock() + + if tags, ok := fpt.protectedPeers[p]; ok { + delete(tags, tag) + return len(tags) > 0 + } + + return false +} + +func (fpt *fakePeerTagger) isProtected(p peer.ID) bool { + fpt.lk.Lock() + defer fpt.lk.Unlock() + + return len(fpt.protectedPeers[p]) > 0 +} + +type fakeProviderFinder struct { + findMorePeersRequested chan cid.Cid +} + +func newFakeProviderFinder() *fakeProviderFinder { + return &fakeProviderFinder{ + findMorePeersRequested: make(chan cid.Cid, 1), + } +} + +func (fpf *fakeProviderFinder) FindProvidersAsync(ctx context.Context, k cid.Cid) <-chan peer.ID { + go func() { + select { + case fpf.findMorePeersRequested <- k: + case <-ctx.Done(): + } + }() + + return make(chan peer.ID) +} + +type wantReq struct { + cids []cid.Cid +} + +type fakePeerManager struct { + wantReqs chan wantReq +} + +func newFakePeerManager() *fakePeerManager { + return &fakePeerManager{ + wantReqs: make(chan wantReq, 1), + } +} + +func (pm *fakePeerManager) RegisterSession(peer.ID, bspm.Session) {} +func (pm *fakePeerManager) UnregisterSession(uint64) {} +func (pm *fakePeerManager) SendWants(context.Context, peer.ID, []cid.Cid, []cid.Cid) {} +func (pm *fakePeerManager) BroadcastWantHaves(ctx context.Context, cids []cid.Cid) { + select { + case pm.wantReqs <- wantReq{cids}: + case <-ctx.Done(): + } +} +func (pm *fakePeerManager) SendCancels(ctx context.Context, cancels []cid.Cid) {} + +func TestSessionGetBlocks(t *testing.T) { + test.Flaky(t) + + ctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond) + fpm := newFakePeerManager() + fspm := newFakeSessionPeerManager() + fpf := newFakeProviderFinder() + sim := bssim.New() + bpm := bsbpm.New() + notif := notifications.New() + defer notif.Shutdown() + id := testutil.GenerateSessionID() + sm := newMockSessionMgr() + session := New(ctx, sm, id, fspm, fpf, sim, fpm, bpm, notif, time.Second, delay.Fixed(time.Minute), "") + blockGenerator := blocksutil.NewBlockGenerator() + blks := blockGenerator.Blocks(broadcastLiveWantsLimit * 2) + var cids []cid.Cid + for _, block := range blks { + cids = append(cids, block.Cid()) + } + + _, err := session.GetBlocks(ctx, cids) + + if err != nil { + t.Fatal("error getting blocks") + } + + // Wait for initial want request + receivedWantReq := <-fpm.wantReqs + + // Should have registered session's interest in blocks + intSes := sim.FilterSessionInterested(id, cids) + if !testutil.MatchKeysIgnoreOrder(intSes[0], cids) { + t.Fatal("did not register session interest in blocks") + } + + // Should have sent out broadcast request for wants + if len(receivedWantReq.cids) != broadcastLiveWantsLimit { + t.Fatal("did not enqueue correct initial number of wants") + } + + // Simulate receiving HAVEs from several peers + peers := testutil.GeneratePeers(5) + for i, p := range peers { + blk := blks[testutil.IndexOf(blks, receivedWantReq.cids[i])] + session.ReceiveFrom(p, []cid.Cid{}, []cid.Cid{blk.Cid()}, []cid.Cid{}) + } + + time.Sleep(10 * time.Millisecond) + + // Verify new peers were recorded + if !testutil.MatchPeersIgnoreOrder(fspm.Peers(), peers) { + t.Fatal("peers not recorded by the peer manager") + } + + // Verify session still wants received blocks + _, unwanted := sim.SplitWantedUnwanted(blks) + if len(unwanted) > 0 { + t.Fatal("all blocks should still be wanted") + } + + // Simulate receiving DONT_HAVE for a CID + session.ReceiveFrom(peers[0], []cid.Cid{}, []cid.Cid{}, []cid.Cid{blks[0].Cid()}) + + time.Sleep(10 * time.Millisecond) + + // Verify session still wants received blocks + _, unwanted = sim.SplitWantedUnwanted(blks) + if len(unwanted) > 0 { + t.Fatal("all blocks should still be wanted") + } + + // Simulate receiving block for a CID + session.ReceiveFrom(peers[1], []cid.Cid{blks[0].Cid()}, []cid.Cid{}, []cid.Cid{}) + + time.Sleep(10 * time.Millisecond) + + // Verify session no longer wants received block + wanted, unwanted := sim.SplitWantedUnwanted(blks) + if len(unwanted) != 1 || !unwanted[0].Cid().Equals(blks[0].Cid()) { + t.Fatal("session wants block that has already been received") + } + if len(wanted) != len(blks)-1 { + t.Fatal("session wants incorrect number of blocks") + } + + // Shut down session + cancel() + + time.Sleep(10 * time.Millisecond) + + // Verify session was removed + if !sm.removeSessionCalled() { + t.Fatal("expected session to be removed") + } +} + +func TestSessionFindMorePeers(t *testing.T) { + test.Flaky(t) + + ctx, cancel := context.WithTimeout(context.Background(), 900*time.Millisecond) + defer cancel() + fpm := newFakePeerManager() + fspm := newFakeSessionPeerManager() + fpf := newFakeProviderFinder() + sim := bssim.New() + bpm := bsbpm.New() + notif := notifications.New() + defer notif.Shutdown() + id := testutil.GenerateSessionID() + sm := newMockSessionMgr() + session := New(ctx, sm, id, fspm, fpf, sim, fpm, bpm, notif, time.Second, delay.Fixed(time.Minute), "") + session.SetBaseTickDelay(200 * time.Microsecond) + blockGenerator := blocksutil.NewBlockGenerator() + blks := blockGenerator.Blocks(broadcastLiveWantsLimit * 2) + var cids []cid.Cid + for _, block := range blks { + cids = append(cids, block.Cid()) + } + _, err := session.GetBlocks(ctx, cids) + if err != nil { + t.Fatal("error getting blocks") + } + + // The session should initially broadcast want-haves + select { + case <-fpm.wantReqs: + case <-ctx.Done(): + t.Fatal("Did not make first want request ") + } + + // receive a block to trigger a tick reset + time.Sleep(20 * time.Millisecond) // need to make sure some latency registers + // or there will be no tick set -- time precision on Windows in go is in the + // millisecond range + p := testutil.GeneratePeers(1)[0] + + blk := blks[0] + session.ReceiveFrom(p, []cid.Cid{blk.Cid()}, []cid.Cid{}, []cid.Cid{}) + + // The session should now time out waiting for a response and broadcast + // want-haves again + select { + case <-fpm.wantReqs: + case <-ctx.Done(): + t.Fatal("Did not make second want request ") + } + + // The session should keep broadcasting periodically until it receives a response + select { + case receivedWantReq := <-fpm.wantReqs: + if len(receivedWantReq.cids) != broadcastLiveWantsLimit { + t.Fatal("did not rebroadcast whole live list") + } + // Make sure the first block is not included because it has already + // been received + for _, c := range receivedWantReq.cids { + if c.Equals(cids[0]) { + t.Fatal("should not braodcast block that was already received") + } + } + case <-ctx.Done(): + t.Fatal("Never rebroadcast want list") + } + + // The session should eventually try to find more peers + select { + case <-fpf.findMorePeersRequested: + case <-ctx.Done(): + t.Fatal("Did not find more peers") + } +} + +func TestSessionOnPeersExhausted(t *testing.T) { + test.Flaky(t) + + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Millisecond) + defer cancel() + fpm := newFakePeerManager() + fspm := newFakeSessionPeerManager() + fpf := newFakeProviderFinder() + + sim := bssim.New() + bpm := bsbpm.New() + notif := notifications.New() + defer notif.Shutdown() + id := testutil.GenerateSessionID() + sm := newMockSessionMgr() + session := New(ctx, sm, id, fspm, fpf, sim, fpm, bpm, notif, time.Second, delay.Fixed(time.Minute), "") + blockGenerator := blocksutil.NewBlockGenerator() + blks := blockGenerator.Blocks(broadcastLiveWantsLimit + 5) + var cids []cid.Cid + for _, block := range blks { + cids = append(cids, block.Cid()) + } + _, err := session.GetBlocks(ctx, cids) + + if err != nil { + t.Fatal("error getting blocks") + } + + // Wait for initial want request + receivedWantReq := <-fpm.wantReqs + + // Should have sent out broadcast request for wants + if len(receivedWantReq.cids) != broadcastLiveWantsLimit { + t.Fatal("did not enqueue correct initial number of wants") + } + + // Signal that all peers have send DONT_HAVE for two of the wants + session.onPeersExhausted(cids[len(cids)-2:]) + + // Wait for want request + receivedWantReq = <-fpm.wantReqs + + // Should have sent out broadcast request for wants + if len(receivedWantReq.cids) != 2 { + t.Fatal("did not enqueue correct initial number of wants") + } +} + +func TestSessionFailingToGetFirstBlock(t *testing.T) { + test.Flaky(t) + + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) + defer cancel() + fpm := newFakePeerManager() + fspm := newFakeSessionPeerManager() + fpf := newFakeProviderFinder() + sim := bssim.New() + bpm := bsbpm.New() + notif := notifications.New() + defer notif.Shutdown() + id := testutil.GenerateSessionID() + sm := newMockSessionMgr() + session := New(ctx, sm, id, fspm, fpf, sim, fpm, bpm, notif, 10*time.Millisecond, delay.Fixed(100*time.Millisecond), "") + blockGenerator := blocksutil.NewBlockGenerator() + blks := blockGenerator.Blocks(4) + var cids []cid.Cid + for _, block := range blks { + cids = append(cids, block.Cid()) + } + startTick := time.Now() + _, err := session.GetBlocks(ctx, cids) + if err != nil { + t.Fatal("error getting blocks") + } + + // The session should initially broadcast want-haves + select { + case <-fpm.wantReqs: + case <-ctx.Done(): + t.Fatal("Did not make first want request ") + } + + // Verify a broadcast was made + select { + case receivedWantReq := <-fpm.wantReqs: + if len(receivedWantReq.cids) < len(cids) { + t.Fatal("did not rebroadcast whole live list") + } + case <-ctx.Done(): + t.Fatal("Never rebroadcast want list") + } + + // Wait for a request to find more peers to occur + select { + case k := <-fpf.findMorePeersRequested: + if testutil.IndexOf(blks, k) == -1 { + t.Fatal("did not rebroadcast an active want") + } + case <-ctx.Done(): + t.Fatal("Did not find more peers") + } + firstTickLength := time.Since(startTick) + + // Wait for another broadcast to occur + select { + case receivedWantReq := <-fpm.wantReqs: + if len(receivedWantReq.cids) < len(cids) { + t.Fatal("did not rebroadcast whole live list") + } + case <-ctx.Done(): + t.Fatal("Never rebroadcast want list") + } + + // Wait for another broadcast to occur + startTick = time.Now() + select { + case receivedWantReq := <-fpm.wantReqs: + if len(receivedWantReq.cids) < len(cids) { + t.Fatal("did not rebroadcast whole live list") + } + case <-ctx.Done(): + t.Fatal("Never rebroadcast want list") + } + + // Tick should take longer + consecutiveTickLength := time.Since(startTick) + if firstTickLength > consecutiveTickLength { + t.Fatal("Should have increased tick length after first consecutive tick") + } + + // Wait for another broadcast to occur + startTick = time.Now() + select { + case receivedWantReq := <-fpm.wantReqs: + if len(receivedWantReq.cids) < len(cids) { + t.Fatal("did not rebroadcast whole live list") + } + case <-ctx.Done(): + t.Fatal("Never rebroadcast want list") + } + + // Tick should take longer + secondConsecutiveTickLength := time.Since(startTick) + if consecutiveTickLength > secondConsecutiveTickLength { + t.Fatal("Should have increased tick length after first consecutive tick") + } + + // Should not have tried to find peers on consecutive ticks + select { + case <-fpf.findMorePeersRequested: + t.Fatal("Should not have tried to find peers on consecutive ticks") + default: + } + + // Wait for rebroadcast to occur + select { + case k := <-fpf.findMorePeersRequested: + if testutil.IndexOf(blks, k) == -1 { + t.Fatal("did not rebroadcast an active want") + } + case <-ctx.Done(): + t.Fatal("Did not rebroadcast to find more peers") + } +} + +func TestSessionCtxCancelClosesGetBlocksChannel(t *testing.T) { + test.Flaky(t) + + fpm := newFakePeerManager() + fspm := newFakeSessionPeerManager() + fpf := newFakeProviderFinder() + sim := bssim.New() + bpm := bsbpm.New() + notif := notifications.New() + defer notif.Shutdown() + id := testutil.GenerateSessionID() + sm := newMockSessionMgr() + + // Create a new session with its own context + sessctx, sesscancel := context.WithTimeout(context.Background(), 100*time.Millisecond) + session := New(sessctx, sm, id, fspm, fpf, sim, fpm, bpm, notif, time.Second, delay.Fixed(time.Minute), "") + + timerCtx, timerCancel := context.WithTimeout(context.Background(), 10*time.Millisecond) + defer timerCancel() + + // Request a block with a new context + blockGenerator := blocksutil.NewBlockGenerator() + blks := blockGenerator.Blocks(1) + getctx, getcancel := context.WithTimeout(context.Background(), 100*time.Millisecond) + defer getcancel() + + getBlocksCh, err := session.GetBlocks(getctx, []cid.Cid{blks[0].Cid()}) + if err != nil { + t.Fatal("error getting blocks") + } + + // Cancel the session context + sesscancel() + + // Expect the GetBlocks() channel to be closed + select { + case _, ok := <-getBlocksCh: + if ok { + t.Fatal("expected channel to be closed but was not closed") + } + case <-timerCtx.Done(): + t.Fatal("expected channel to be closed before timeout") + } + + time.Sleep(10 * time.Millisecond) + + // Expect RemoveSession to be called + if !sm.removeSessionCalled() { + t.Fatal("expected onShutdown to be called") + } +} + +func TestSessionOnShutdownCalled(t *testing.T) { + test.Flaky(t) + + fpm := newFakePeerManager() + fspm := newFakeSessionPeerManager() + fpf := newFakeProviderFinder() + sim := bssim.New() + bpm := bsbpm.New() + notif := notifications.New() + defer notif.Shutdown() + id := testutil.GenerateSessionID() + sm := newMockSessionMgr() + + // Create a new session with its own context + sessctx, sesscancel := context.WithTimeout(context.Background(), 100*time.Millisecond) + defer sesscancel() + session := New(sessctx, sm, id, fspm, fpf, sim, fpm, bpm, notif, time.Second, delay.Fixed(time.Minute), "") + + // Shutdown the session + session.Shutdown() + + time.Sleep(10 * time.Millisecond) + + // Expect RemoveSession to be called + if !sm.removeSessionCalled() { + t.Fatal("expected onShutdown to be called") + } +} + +func TestSessionReceiveMessageAfterCtxCancel(t *testing.T) { + test.Flaky(t) + + ctx, cancelCtx := context.WithTimeout(context.Background(), 20*time.Millisecond) + fpm := newFakePeerManager() + fspm := newFakeSessionPeerManager() + fpf := newFakeProviderFinder() + + sim := bssim.New() + bpm := bsbpm.New() + notif := notifications.New() + defer notif.Shutdown() + id := testutil.GenerateSessionID() + sm := newMockSessionMgr() + session := New(ctx, sm, id, fspm, fpf, sim, fpm, bpm, notif, time.Second, delay.Fixed(time.Minute), "") + blockGenerator := blocksutil.NewBlockGenerator() + blks := blockGenerator.Blocks(2) + cids := []cid.Cid{blks[0].Cid(), blks[1].Cid()} + + _, err := session.GetBlocks(ctx, cids) + if err != nil { + t.Fatal("error getting blocks") + } + + // Wait for initial want request + <-fpm.wantReqs + + // Shut down session + cancelCtx() + + // Simulate receiving block for a CID + peer := testutil.GeneratePeers(1)[0] + session.ReceiveFrom(peer, []cid.Cid{blks[0].Cid()}, []cid.Cid{}, []cid.Cid{}) + + time.Sleep(5 * time.Millisecond) + + // If we don't get a panic then the test is considered passing +} diff --git a/bitswap/client/internal/session/sessionwants.go b/bitswap/client/internal/session/sessionwants.go new file mode 100644 index 0000000000..0d4ded013c --- /dev/null +++ b/bitswap/client/internal/session/sessionwants.go @@ -0,0 +1,193 @@ +package session + +import ( + "fmt" + "math/rand" + "time" + + cid "github.com/ipfs/go-cid" +) + +// liveWantsOrder and liveWants will get out of sync as blocks are received. +// This constant is the maximum amount to allow them to be out of sync before +// cleaning up the ordering array. +const liveWantsOrderGCLimit = 32 + +// sessionWants keeps track of which cids are waiting to be sent out, and which +// peers are "live" - ie, we've sent a request but haven't received a block yet +type sessionWants struct { + // The wants that have not yet been sent out + toFetch *cidQueue + // Wants that have been sent but have not received a response + liveWants map[cid.Cid]time.Time + // The order in which wants were requested + liveWantsOrder []cid.Cid + // The maximum number of want-haves to send in a broadcast + broadcastLimit int +} + +func newSessionWants(broadcastLimit int) sessionWants { + return sessionWants{ + toFetch: newCidQueue(), + liveWants: make(map[cid.Cid]time.Time), + broadcastLimit: broadcastLimit, + } +} + +func (sw *sessionWants) String() string { + return fmt.Sprintf("%d pending / %d live", sw.toFetch.Len(), len(sw.liveWants)) +} + +// BlocksRequested is called when the client makes a request for blocks +func (sw *sessionWants) BlocksRequested(newWants []cid.Cid) { + for _, k := range newWants { + sw.toFetch.Push(k) + } +} + +// GetNextWants is called when the session has not yet discovered peers with +// the blocks that it wants. It moves as many CIDs from the fetch queue to +// the live wants queue as possible (given the broadcast limit). +// Returns the newly live wants. +func (sw *sessionWants) GetNextWants() []cid.Cid { + now := time.Now() + + // Move CIDs from fetch queue to the live wants queue (up to the broadcast + // limit) + currentLiveCount := len(sw.liveWants) + toAdd := sw.broadcastLimit - currentLiveCount + + var live []cid.Cid + for ; toAdd > 0 && sw.toFetch.Len() > 0; toAdd-- { + c := sw.toFetch.Pop() + live = append(live, c) + sw.liveWantsOrder = append(sw.liveWantsOrder, c) + sw.liveWants[c] = now + } + + return live +} + +// WantsSent is called when wants are sent to a peer +func (sw *sessionWants) WantsSent(ks []cid.Cid) { + now := time.Now() + for _, c := range ks { + if _, ok := sw.liveWants[c]; !ok && sw.toFetch.Has(c) { + sw.toFetch.Remove(c) + sw.liveWantsOrder = append(sw.liveWantsOrder, c) + sw.liveWants[c] = now + } + } +} + +// BlocksReceived removes received block CIDs from the live wants list and +// measures latency. It returns the CIDs of blocks that were actually +// wanted (as opposed to duplicates) and the total latency for all incoming blocks. +func (sw *sessionWants) BlocksReceived(ks []cid.Cid) ([]cid.Cid, time.Duration) { + wanted := make([]cid.Cid, 0, len(ks)) + totalLatency := time.Duration(0) + if len(ks) == 0 { + return wanted, totalLatency + } + + // Filter for blocks that were actually wanted (as opposed to duplicates) + now := time.Now() + for _, c := range ks { + if sw.isWanted(c) { + wanted = append(wanted, c) + + // Measure latency + sentAt, ok := sw.liveWants[c] + if ok && !sentAt.IsZero() { + totalLatency += now.Sub(sentAt) + } + + // Remove the CID from the live wants / toFetch queue + delete(sw.liveWants, c) + sw.toFetch.Remove(c) + } + } + + // If the live wants ordering array is a long way out of sync with the + // live wants map, clean up the ordering array + if len(sw.liveWantsOrder)-len(sw.liveWants) > liveWantsOrderGCLimit { + cleaned := sw.liveWantsOrder[:0] + for _, c := range sw.liveWantsOrder { + if _, ok := sw.liveWants[c]; ok { + cleaned = append(cleaned, c) + } + } + sw.liveWantsOrder = cleaned + } + + return wanted, totalLatency +} + +// PrepareBroadcast saves the current time for each live want and returns the +// live want CIDs up to the broadcast limit. +func (sw *sessionWants) PrepareBroadcast() []cid.Cid { + now := time.Now() + live := make([]cid.Cid, 0, len(sw.liveWants)) + for _, c := range sw.liveWantsOrder { + if _, ok := sw.liveWants[c]; ok { + // No response was received for the want, so reset the sent time + // to now as we're about to broadcast + sw.liveWants[c] = now + + live = append(live, c) + if len(live) == sw.broadcastLimit { + break + } + } + } + + return live +} + +// CancelPending removes the given CIDs from the fetch queue. +func (sw *sessionWants) CancelPending(keys []cid.Cid) { + for _, k := range keys { + sw.toFetch.Remove(k) + } +} + +// LiveWants returns a list of live wants +func (sw *sessionWants) LiveWants() []cid.Cid { + live := make([]cid.Cid, 0, len(sw.liveWants)) + for c := range sw.liveWants { + live = append(live, c) + } + + return live +} + +// RandomLiveWant returns a randomly selected live want +func (sw *sessionWants) RandomLiveWant() cid.Cid { + if len(sw.liveWants) == 0 { + return cid.Cid{} + } + + // picking a random live want + i := rand.Intn(len(sw.liveWants)) + for k := range sw.liveWants { + if i == 0 { + return k + } + i-- + } + return cid.Cid{} +} + +// Has live wants indicates if there are any live wants +func (sw *sessionWants) HasLiveWants() bool { + return len(sw.liveWants) > 0 +} + +// Indicates whether the want is in either of the fetch or live queues +func (sw *sessionWants) isWanted(c cid.Cid) bool { + _, ok := sw.liveWants[c] + if !ok { + ok = sw.toFetch.Has(c) + } + return ok +} diff --git a/bitswap/client/internal/session/sessionwants_test.go b/bitswap/client/internal/session/sessionwants_test.go new file mode 100644 index 0000000000..bdb73ebd12 --- /dev/null +++ b/bitswap/client/internal/session/sessionwants_test.go @@ -0,0 +1,198 @@ +package session + +import ( + "testing" + + "github.com/ipfs/boxo/bitswap/internal/testutil" + "github.com/ipfs/boxo/internal/test" + cid "github.com/ipfs/go-cid" +) + +func TestEmptySessionWants(t *testing.T) { + test.Flaky(t) + + sw := newSessionWants(broadcastLiveWantsLimit) + + // Expect these functions to return nothing on a new sessionWants + lws := sw.PrepareBroadcast() + if len(lws) > 0 { + t.Fatal("expected no broadcast wants") + } + lws = sw.LiveWants() + if len(lws) > 0 { + t.Fatal("expected no live wants") + } + if sw.HasLiveWants() { + t.Fatal("expected not to have live wants") + } + rw := sw.RandomLiveWant() + if rw.Defined() { + t.Fatal("expected no random want") + } +} + +func TestSessionWants(t *testing.T) { + test.Flaky(t) + + sw := newSessionWants(5) + cids := testutil.GenerateCids(10) + others := testutil.GenerateCids(1) + + // Add 10 new wants + // toFetch Live + // 9876543210 + sw.BlocksRequested(cids) + + // Get next wants with a limit of 5 + // The first 5 cids should go move into the live queue + // toFetch Live + // 98765 43210 + nextw := sw.GetNextWants() + if len(nextw) != 5 { + t.Fatal("expected 5 next wants") + } + lws := sw.PrepareBroadcast() + if len(lws) != 5 { + t.Fatal("expected 5 broadcast wants", len(lws)) + } + lws = sw.LiveWants() + if len(lws) != 5 { + t.Fatal("expected 5 live wants") + } + if !sw.HasLiveWants() { + t.Fatal("expected to have live wants") + } + rw := sw.RandomLiveWant() + if !rw.Defined() { + t.Fatal("expected random want") + } + + // Two wanted blocks and one other block are received. + // The wanted blocks should be removed from the live wants queue + // (the other block CID should be ignored) + // toFetch Live + // 98765 432__ + recvdCids := []cid.Cid{cids[0], cids[1], others[0]} + sw.BlocksReceived(recvdCids) + lws = sw.LiveWants() + if len(lws) != 3 { + t.Fatal("expected 3 live wants") + } + + // Ask for next wants with a limit of 5 + // Should move 2 wants from toFetch queue to live wants + // toFetch Live + // 987__ 65432 + nextw = sw.GetNextWants() + if len(nextw) != 2 { + t.Fatal("expected 2 next wants") + } + lws = sw.LiveWants() + if len(lws) != 5 { + t.Fatal("expected 5 live wants") + } + + // One wanted block and one dup block are received. + // The wanted block should be removed from the live + // wants queue. + // toFetch Live + // 987 654_2 + recvdCids = []cid.Cid{cids[0], cids[3]} + sw.BlocksReceived(recvdCids) + lws = sw.LiveWants() + if len(lws) != 4 { + t.Fatal("expected 4 live wants") + } + + // One block in the toFetch queue should be cancelled + // toFetch Live + // 9_7 654_2 + sw.CancelPending([]cid.Cid{cids[8]}) + lws = sw.LiveWants() + if len(lws) != 4 { + t.Fatal("expected 4 live wants") + } +} + +func TestPrepareBroadcast(t *testing.T) { + test.Flaky(t) + + sw := newSessionWants(3) + cids := testutil.GenerateCids(10) + + // Add 6 new wants + // toFetch Live + // 543210 + sw.BlocksRequested(cids[:6]) + + // Get next wants with a limit of 3 + // The first 3 cids should go move into the live queue + // toFetch Live + // 543 210 + sw.GetNextWants() + + // Broadcast should contain wants in order + for i := 0; i < 10; i++ { + ws := sw.PrepareBroadcast() + if len(ws) != 3 { + t.Fatal("should broadcast all live wants") + } + for idx, c := range ws { + if !c.Equals(cids[idx]) { + t.Fatal("broadcast should always return wants in order") + } + } + } + + // One block received + // Remove a cid from the live queue + sw.BlocksReceived(cids[:1]) + // toFetch Live + // 543 21_ + + // Add 4 new wants + // toFetch Live + // 9876543 21 + sw.BlocksRequested(cids[6:]) + + // 2 Wants sent + // toFetch Live + // 98765 4321 + sw.WantsSent(cids[3:5]) + + // Broadcast should contain wants in order + cids = cids[1:] + for i := 0; i < 10; i++ { + ws := sw.PrepareBroadcast() + if len(ws) != 3 { + t.Fatal("should broadcast live wants up to limit", len(ws), len(cids)) + } + for idx, c := range ws { + if !c.Equals(cids[idx]) { + t.Fatal("broadcast should always return wants in order") + } + } + } +} + +// Test that even after GC broadcast returns correct wants +func TestPrepareBroadcastAfterGC(t *testing.T) { + test.Flaky(t) + + sw := newSessionWants(5) + cids := testutil.GenerateCids(liveWantsOrderGCLimit * 2) + + sw.BlocksRequested(cids) + + // Trigger a sessionWants internal GC of the live wants + sw.BlocksReceived(cids[:liveWantsOrderGCLimit+1]) + cids = cids[:liveWantsOrderGCLimit+1] + + // Broadcast should contain wants in order + ws := sw.PrepareBroadcast() + for i, c := range ws { + if !c.Equals(cids[i]) { + t.Fatal("broadcast should always return wants in order") + } + } +} diff --git a/bitswap/client/internal/session/sessionwantsender.go b/bitswap/client/internal/session/sessionwantsender.go new file mode 100644 index 0000000000..41145fbf6d --- /dev/null +++ b/bitswap/client/internal/session/sessionwantsender.go @@ -0,0 +1,767 @@ +package session + +import ( + "context" + + bsbpm "github.com/ipfs/boxo/bitswap/client/internal/blockpresencemanager" + + cid "github.com/ipfs/go-cid" + peer "github.com/libp2p/go-libp2p/core/peer" +) + +const ( + // Maximum number of changes to accept before blocking + changesBufferSize = 128 + // If the session receives this many DONT_HAVEs in a row from a peer, + // it prunes the peer from the session + peerDontHaveLimit = 16 +) + +// BlockPresence indicates whether a peer has a block. +// Note that the order is important, we decide which peer to send a want to +// based on knowing whether peer has the block. eg we're more likely to send +// a want to a peer that has the block than a peer that doesnt have the block +// so BPHave > BPDontHave +type BlockPresence int + +const ( + BPDontHave BlockPresence = iota + BPUnknown + BPHave +) + +// SessionWantsCanceller provides a method to cancel wants +type SessionWantsCanceller interface { + // Cancel wants for this session + CancelSessionWants(sid uint64, wants []cid.Cid) +} + +// update encapsulates a message received by the session +type update struct { + // Which peer sent the update + from peer.ID + // cids of blocks received + ks []cid.Cid + // HAVE message + haves []cid.Cid + // DONT_HAVE message + dontHaves []cid.Cid +} + +// peerAvailability indicates a peer's connection state +type peerAvailability struct { + target peer.ID + available bool +} + +// change can be new wants, a new message received by the session, +// or a change in the connect status of a peer +type change struct { + // new wants requested + add []cid.Cid + // wants cancelled + cancel []cid.Cid + // new message received by session (blocks / HAVEs / DONT_HAVEs) + update update + // peer has connected / disconnected + availability peerAvailability +} + +type onSendFn func(to peer.ID, wantBlocks []cid.Cid, wantHaves []cid.Cid) +type onPeersExhaustedFn func([]cid.Cid) + +// sessionWantSender is responsible for sending want-have and want-block to +// peers. For each want, it sends a single optimistic want-block request to +// one peer and want-have requests to all other peers in the session. +// To choose the best peer for the optimistic want-block it maintains a list +// of how peers have responded to each want (HAVE / DONT_HAVE / Unknown) and +// consults the peer response tracker (records which peers sent us blocks). +type sessionWantSender struct { + // The context is used when sending wants + ctx context.Context + // Called to shutdown the sessionWantSender + shutdown func() + // The sessionWantSender uses the closed channel to signal when it's + // finished shutting down + closed chan struct{} + // The session ID + sessionID uint64 + // A channel that collects incoming changes (events) + changes chan change + // Information about each want indexed by CID + wants map[cid.Cid]*wantInfo + // Keeps track of how many consecutive DONT_HAVEs a peer has sent + peerConsecutiveDontHaves map[peer.ID]int + // Tracks which peers we have send want-block to + swbt *sentWantBlocksTracker + // Tracks the number of blocks each peer sent us + peerRspTrkr *peerResponseTracker + // Sends wants to peers + pm PeerManager + // Keeps track of peers in the session + spm SessionPeerManager + // Cancels wants + canceller SessionWantsCanceller + // Keeps track of which peer has / doesn't have a block + bpm *bsbpm.BlockPresenceManager + // Called when wants are sent + onSend onSendFn + // Called when all peers explicitly don't have a block + onPeersExhausted onPeersExhaustedFn +} + +func newSessionWantSender(sid uint64, pm PeerManager, spm SessionPeerManager, canceller SessionWantsCanceller, + bpm *bsbpm.BlockPresenceManager, onSend onSendFn, onPeersExhausted onPeersExhaustedFn) sessionWantSender { + + ctx, cancel := context.WithCancel(context.Background()) + sws := sessionWantSender{ + ctx: ctx, + shutdown: cancel, + closed: make(chan struct{}), + sessionID: sid, + changes: make(chan change, changesBufferSize), + wants: make(map[cid.Cid]*wantInfo), + peerConsecutiveDontHaves: make(map[peer.ID]int), + swbt: newSentWantBlocksTracker(), + peerRspTrkr: newPeerResponseTracker(), + + pm: pm, + spm: spm, + canceller: canceller, + bpm: bpm, + onSend: onSend, + onPeersExhausted: onPeersExhausted, + } + + return sws +} + +func (sws *sessionWantSender) ID() uint64 { + return sws.sessionID +} + +// Add is called when new wants are added to the session +func (sws *sessionWantSender) Add(ks []cid.Cid) { + if len(ks) == 0 { + return + } + sws.addChange(change{add: ks}) +} + +// Cancel is called when a request is cancelled +func (sws *sessionWantSender) Cancel(ks []cid.Cid) { + if len(ks) == 0 { + return + } + sws.addChange(change{cancel: ks}) +} + +// Update is called when the session receives a message with incoming blocks +// or HAVE / DONT_HAVE +func (sws *sessionWantSender) Update(from peer.ID, ks []cid.Cid, haves []cid.Cid, dontHaves []cid.Cid) { + hasUpdate := len(ks) > 0 || len(haves) > 0 || len(dontHaves) > 0 + if !hasUpdate { + return + } + + sws.addChange(change{ + update: update{from, ks, haves, dontHaves}, + }) +} + +// SignalAvailability is called by the PeerManager to signal that a peer has +// connected / disconnected +func (sws *sessionWantSender) SignalAvailability(p peer.ID, isAvailable bool) { + availability := peerAvailability{p, isAvailable} + // Add the change in a non-blocking manner to avoid the possibility of a + // deadlock + sws.addChangeNonBlocking(change{availability: availability}) +} + +// Run is the main loop for processing incoming changes +func (sws *sessionWantSender) Run() { + for { + select { + case ch := <-sws.changes: + sws.onChange([]change{ch}) + case <-sws.ctx.Done(): + // Unregister the session with the PeerManager + sws.pm.UnregisterSession(sws.sessionID) + + // Close the 'closed' channel to signal to Shutdown() that the run + // loop has exited + close(sws.closed) + return + } + } +} + +// Shutdown the sessionWantSender +func (sws *sessionWantSender) Shutdown() { + // Signal to the run loop to stop processing + sws.shutdown() + // Wait for run loop to complete + <-sws.closed +} + +// addChange adds a new change to the queue +func (sws *sessionWantSender) addChange(c change) { + select { + case sws.changes <- c: + case <-sws.ctx.Done(): + } +} + +// addChangeNonBlocking adds a new change to the queue, using a go-routine +// if the change blocks, so as to avoid potential deadlocks +func (sws *sessionWantSender) addChangeNonBlocking(c change) { + select { + case sws.changes <- c: + default: + // changes channel is full, so add change in a go routine instead + go func() { + select { + case sws.changes <- c: + case <-sws.ctx.Done(): + } + }() + } +} + +// collectChanges collects all the changes that have occurred since the last +// invocation of onChange +func (sws *sessionWantSender) collectChanges(changes []change) []change { + for len(changes) < changesBufferSize { + select { + case next := <-sws.changes: + changes = append(changes, next) + default: + return changes + } + } + return changes +} + +// onChange processes the next set of changes +func (sws *sessionWantSender) onChange(changes []change) { + // Several changes may have been recorded since the last time we checked, + // so pop all outstanding changes from the channel + changes = sws.collectChanges(changes) + + // Apply each change + availability := make(map[peer.ID]bool, len(changes)) + cancels := make([]cid.Cid, 0) + var updates []update + for _, chng := range changes { + // Initialize info for new wants + for _, c := range chng.add { + sws.trackWant(c) + } + + // Remove cancelled wants + for _, c := range chng.cancel { + sws.untrackWant(c) + cancels = append(cancels, c) + } + + // Consolidate updates and changes to availability + if chng.update.from != "" { + // If the update includes blocks or haves, treat it as signaling that + // the peer is available + if len(chng.update.ks) > 0 || len(chng.update.haves) > 0 { + p := chng.update.from + availability[p] = true + + // Register with the PeerManager + sws.pm.RegisterSession(p, sws) + } + + updates = append(updates, chng.update) + } + if chng.availability.target != "" { + availability[chng.availability.target] = chng.availability.available + } + } + + // Update peer availability + newlyAvailable, newlyUnavailable := sws.processAvailability(availability) + + // Update wants + dontHaves := sws.processUpdates(updates) + + // Check if there are any wants for which all peers have indicated they + // don't have the want + sws.checkForExhaustedWants(dontHaves, newlyUnavailable) + + // If there are any cancels, send them + if len(cancels) > 0 { + sws.canceller.CancelSessionWants(sws.sessionID, cancels) + } + + // If there are some connected peers, send any pending wants + if sws.spm.HasPeers() { + sws.sendNextWants(newlyAvailable) + } +} + +// processAvailability updates the want queue with any changes in +// peer availability +// It returns the peers that have become +// - newly available +// - newly unavailable +func (sws *sessionWantSender) processAvailability(availability map[peer.ID]bool) (avail []peer.ID, unavail []peer.ID) { + var newlyAvailable []peer.ID + var newlyUnavailable []peer.ID + for p, isNowAvailable := range availability { + stateChange := false + if isNowAvailable { + isNewPeer := sws.spm.AddPeer(p) + if isNewPeer { + stateChange = true + newlyAvailable = append(newlyAvailable, p) + } + } else { + wasAvailable := sws.spm.RemovePeer(p) + if wasAvailable { + stateChange = true + newlyUnavailable = append(newlyUnavailable, p) + } + } + + // If the state has changed + if stateChange { + sws.updateWantsPeerAvailability(p, isNowAvailable) + // Reset the count of consecutive DONT_HAVEs received from the + // peer + delete(sws.peerConsecutiveDontHaves, p) + } + } + + return newlyAvailable, newlyUnavailable +} + +// trackWant creates a new entry in the map of CID -> want info +func (sws *sessionWantSender) trackWant(c cid.Cid) { + if _, ok := sws.wants[c]; ok { + return + } + + // Create the want info + wi := newWantInfo(sws.peerRspTrkr) + sws.wants[c] = wi + + // For each available peer, register any information we know about + // whether the peer has the block + for _, p := range sws.spm.Peers() { + sws.updateWantBlockPresence(c, p) + } +} + +// untrackWant removes an entry from the map of CID -> want info +func (sws *sessionWantSender) untrackWant(c cid.Cid) { + delete(sws.wants, c) +} + +// processUpdates processes incoming blocks and HAVE / DONT_HAVEs. +// It returns all DONT_HAVEs. +func (sws *sessionWantSender) processUpdates(updates []update) []cid.Cid { + // Process received blocks keys + blkCids := cid.NewSet() + for _, upd := range updates { + for _, c := range upd.ks { + blkCids.Add(c) + + // Remove the want + removed := sws.removeWant(c) + if removed != nil { + // Inform the peer tracker that this peer was the first to send + // us the block + sws.peerRspTrkr.receivedBlockFrom(upd.from) + + // Protect the connection to this peer so that we can ensure + // that the connection doesn't get pruned by the connection + // manager + sws.spm.ProtectConnection(upd.from) + } + delete(sws.peerConsecutiveDontHaves, upd.from) + } + } + + // Process received DONT_HAVEs + dontHaves := cid.NewSet() + prunePeers := make(map[peer.ID]struct{}) + for _, upd := range updates { + for _, c := range upd.dontHaves { + // Track the number of consecutive DONT_HAVEs each peer receives + if sws.peerConsecutiveDontHaves[upd.from] == peerDontHaveLimit { + prunePeers[upd.from] = struct{}{} + } else { + sws.peerConsecutiveDontHaves[upd.from]++ + } + + // If we already received a block for the want, there's no need to + // update block presence etc + if blkCids.Has(c) { + continue + } + + dontHaves.Add(c) + + // Update the block presence for the peer + sws.updateWantBlockPresence(c, upd.from) + + // Check if the DONT_HAVE is in response to a want-block + // (could also be in response to want-have) + if sws.swbt.haveSentWantBlockTo(upd.from, c) { + // If we were waiting for a response from this peer, clear + // sentTo so that we can send the want to another peer + if sentTo, ok := sws.getWantSentTo(c); ok && sentTo == upd.from { + sws.setWantSentTo(c, "") + } + } + } + } + + // Process received HAVEs + for _, upd := range updates { + for _, c := range upd.haves { + // If we haven't already received a block for the want + if !blkCids.Has(c) { + // Update the block presence for the peer + sws.updateWantBlockPresence(c, upd.from) + } + + // Clear the consecutive DONT_HAVE count for the peer + delete(sws.peerConsecutiveDontHaves, upd.from) + delete(prunePeers, upd.from) + } + } + + // If any peers have sent us too many consecutive DONT_HAVEs, remove them + // from the session + for p := range prunePeers { + // Before removing the peer from the session, check if the peer + // sent us a HAVE for a block that we want + for c := range sws.wants { + if sws.bpm.PeerHasBlock(p, c) { + delete(prunePeers, p) + break + } + } + } + if len(prunePeers) > 0 { + go func() { + for p := range prunePeers { + // Peer doesn't have anything we want, so remove it + log.Infof("peer %s sent too many dont haves, removing from session %d", p, sws.ID()) + sws.SignalAvailability(p, false) + } + }() + } + + return dontHaves.Keys() +} + +// checkForExhaustedWants checks if there are any wants for which all peers +// have sent a DONT_HAVE. We call these "exhausted" wants. +func (sws *sessionWantSender) checkForExhaustedWants(dontHaves []cid.Cid, newlyUnavailable []peer.ID) { + // If there are no new DONT_HAVEs, and no peers became unavailable, then + // we don't need to check for exhausted wants + if len(dontHaves) == 0 && len(newlyUnavailable) == 0 { + return + } + + // We need to check each want for which we just received a DONT_HAVE + wants := dontHaves + + // If a peer just became unavailable, then we need to check all wants + // (because it may be the last peer who hadn't sent a DONT_HAVE for a CID) + if len(newlyUnavailable) > 0 { + // Collect all pending wants + wants = make([]cid.Cid, len(sws.wants)) + for c := range sws.wants { + wants = append(wants, c) + } + + // If the last available peer in the session has become unavailable + // then we need to broadcast all pending wants + if !sws.spm.HasPeers() { + sws.processExhaustedWants(wants) + return + } + } + + // If all available peers for a cid sent a DONT_HAVE, signal to the session + // that we've exhausted available peers + if len(wants) > 0 { + exhausted := sws.bpm.AllPeersDoNotHaveBlock(sws.spm.Peers(), wants) + sws.processExhaustedWants(exhausted) + } +} + +// processExhaustedWants filters the list so that only those wants that haven't +// already been marked as exhausted are passed to onPeersExhausted() +func (sws *sessionWantSender) processExhaustedWants(exhausted []cid.Cid) { + newlyExhausted := sws.newlyExhausted(exhausted) + if len(newlyExhausted) > 0 { + sws.onPeersExhausted(newlyExhausted) + } +} + +// convenience structs for passing around want-blocks and want-haves for a peer +type wantSets struct { + wantBlocks *cid.Set + wantHaves *cid.Set +} + +type allWants map[peer.ID]*wantSets + +func (aw allWants) forPeer(p peer.ID) *wantSets { + if _, ok := aw[p]; !ok { + aw[p] = &wantSets{ + wantBlocks: cid.NewSet(), + wantHaves: cid.NewSet(), + } + } + return aw[p] +} + +// sendNextWants sends wants to peers according to the latest information +// about which peers have / dont have blocks +func (sws *sessionWantSender) sendNextWants(newlyAvailable []peer.ID) { + toSend := make(allWants) + + for c, wi := range sws.wants { + // Ensure we send want-haves to any newly available peers + for _, p := range newlyAvailable { + toSend.forPeer(p).wantHaves.Add(c) + } + + // We already sent a want-block to a peer and haven't yet received a + // response yet + if wi.sentTo != "" { + continue + } + + // All the peers have indicated that they don't have the block + // corresponding to this want, so we must wait to discover more peers + if wi.bestPeer == "" { + // TODO: work this out in real time instead of using bestP? + continue + } + + // Record that we are sending a want-block for this want to the peer + sws.setWantSentTo(c, wi.bestPeer) + + // Send a want-block to the chosen peer + toSend.forPeer(wi.bestPeer).wantBlocks.Add(c) + + // Send a want-have to each other peer + for _, op := range sws.spm.Peers() { + if op != wi.bestPeer { + toSend.forPeer(op).wantHaves.Add(c) + } + } + } + + // Send any wants we've collected + sws.sendWants(toSend) +} + +// sendWants sends want-have and want-blocks to the appropriate peers +func (sws *sessionWantSender) sendWants(sends allWants) { + // For each peer we're sending a request to + for p, snd := range sends { + // Piggyback some other want-haves onto the request to the peer + for _, c := range sws.getPiggybackWantHaves(p, snd.wantBlocks) { + snd.wantHaves.Add(c) + } + + // Send the wants to the peer. + // Note that the PeerManager ensures that we don't sent duplicate + // want-haves / want-blocks to a peer, and that want-blocks take + // precedence over want-haves. + wblks := snd.wantBlocks.Keys() + whaves := snd.wantHaves.Keys() + sws.pm.SendWants(sws.ctx, p, wblks, whaves) + + // Inform the session that we've sent the wants + sws.onSend(p, wblks, whaves) + + // Record which peers we send want-block to + sws.swbt.addSentWantBlocksTo(p, wblks) + } +} + +// getPiggybackWantHaves gets the want-haves that should be piggybacked onto +// a request that we are making to send want-blocks to a peer +func (sws *sessionWantSender) getPiggybackWantHaves(p peer.ID, wantBlocks *cid.Set) []cid.Cid { + var whs []cid.Cid + for c := range sws.wants { + // Don't send want-have if we're already sending a want-block + // (or have previously) + if !wantBlocks.Has(c) && !sws.swbt.haveSentWantBlockTo(p, c) { + whs = append(whs, c) + } + } + return whs +} + +// newlyExhausted filters the list of keys for wants that have not already +// been marked as exhausted (all peers indicated they don't have the block) +func (sws *sessionWantSender) newlyExhausted(ks []cid.Cid) []cid.Cid { + var res []cid.Cid + for _, c := range ks { + if wi, ok := sws.wants[c]; ok { + if !wi.exhausted { + res = append(res, c) + wi.exhausted = true + } + } + } + return res +} + +// removeWant is called when the corresponding block is received +func (sws *sessionWantSender) removeWant(c cid.Cid) *wantInfo { + if wi, ok := sws.wants[c]; ok { + delete(sws.wants, c) + return wi + } + return nil +} + +// updateWantsPeerAvailability is called when the availability changes for a +// peer. It updates all the wants accordingly. +func (sws *sessionWantSender) updateWantsPeerAvailability(p peer.ID, isNowAvailable bool) { + for c, wi := range sws.wants { + if isNowAvailable { + sws.updateWantBlockPresence(c, p) + } else { + wi.removePeer(p) + } + } +} + +// updateWantBlockPresence is called when a HAVE / DONT_HAVE is received for the given +// want / peer +func (sws *sessionWantSender) updateWantBlockPresence(c cid.Cid, p peer.ID) { + wi, ok := sws.wants[c] + if !ok { + return + } + + // If the peer sent us a HAVE or DONT_HAVE for the cid, adjust the + // block presence for the peer / cid combination + switch { + case sws.bpm.PeerHasBlock(p, c): + wi.setPeerBlockPresence(p, BPHave) + case sws.bpm.PeerDoesNotHaveBlock(p, c): + wi.setPeerBlockPresence(p, BPDontHave) + default: + wi.setPeerBlockPresence(p, BPUnknown) + } +} + +// Which peer was the want sent to +func (sws *sessionWantSender) getWantSentTo(c cid.Cid) (peer.ID, bool) { + if wi, ok := sws.wants[c]; ok { + return wi.sentTo, true + } + return "", false +} + +// Record which peer the want was sent to +func (sws *sessionWantSender) setWantSentTo(c cid.Cid, p peer.ID) { + if wi, ok := sws.wants[c]; ok { + wi.sentTo = p + } +} + +// wantInfo keeps track of the information for a want +type wantInfo struct { + // Tracks HAVE / DONT_HAVE sent to us for the want by each peer + blockPresence map[peer.ID]BlockPresence + // The peer that we've sent a want-block to (cleared when we get a response) + sentTo peer.ID + // The "best" peer to send the want to next + bestPeer peer.ID + // Keeps track of how many hits / misses each peer has sent us for wants + // in the session + peerRspTrkr *peerResponseTracker + // true if all known peers have sent a DONT_HAVE for this want + exhausted bool +} + +// func newWantInfo(prt *peerResponseTracker, c cid.Cid, startIndex int) *wantInfo { +func newWantInfo(prt *peerResponseTracker) *wantInfo { + return &wantInfo{ + blockPresence: make(map[peer.ID]BlockPresence), + peerRspTrkr: prt, + exhausted: false, + } +} + +// setPeerBlockPresence sets the block presence for the given peer +func (wi *wantInfo) setPeerBlockPresence(p peer.ID, bp BlockPresence) { + wi.blockPresence[p] = bp + wi.calculateBestPeer() + + // If a peer informed us that it has a block then make sure the want is no + // longer flagged as exhausted (exhausted means no peers have the block) + if bp == BPHave { + wi.exhausted = false + } +} + +// removePeer deletes the given peer from the want info +func (wi *wantInfo) removePeer(p peer.ID) { + // If we were waiting to hear back from the peer that is being removed, + // clear the sentTo field so we no longer wait + if p == wi.sentTo { + wi.sentTo = "" + } + delete(wi.blockPresence, p) + wi.calculateBestPeer() +} + +// calculateBestPeer finds the best peer to send the want to next +func (wi *wantInfo) calculateBestPeer() { + // Recalculate the best peer + bestBP := BPDontHave + bestPeer := peer.ID("") + + // Find the peer with the best block presence, recording how many peers + // share the block presence + countWithBest := 0 + for p, bp := range wi.blockPresence { + if bp > bestBP { + bestBP = bp + bestPeer = p + countWithBest = 1 + } else if bp == bestBP { + countWithBest++ + } + } + wi.bestPeer = bestPeer + + // If no peer has a block presence better than DONT_HAVE, bail out + if bestPeer == "" { + return + } + + // If there was only one peer with the best block presence, we're done + if countWithBest <= 1 { + return + } + + // There were multiple peers with the best block presence, so choose one of + // them to be the best + var peersWithBest []peer.ID + for p, bp := range wi.blockPresence { + if bp == bestBP { + peersWithBest = append(peersWithBest, p) + } + } + wi.bestPeer = wi.peerRspTrkr.choose(peersWithBest) +} diff --git a/bitswap/client/internal/session/sessionwantsender_test.go b/bitswap/client/internal/session/sessionwantsender_test.go new file mode 100644 index 0000000000..97ff788a9d --- /dev/null +++ b/bitswap/client/internal/session/sessionwantsender_test.go @@ -0,0 +1,942 @@ +package session + +import ( + "context" + "sync" + "testing" + "time" + + bsbpm "github.com/ipfs/boxo/bitswap/client/internal/blockpresencemanager" + bspm "github.com/ipfs/boxo/bitswap/client/internal/peermanager" + bsspm "github.com/ipfs/boxo/bitswap/client/internal/sessionpeermanager" + "github.com/ipfs/boxo/bitswap/internal/testutil" + "github.com/ipfs/boxo/internal/test" + cid "github.com/ipfs/go-cid" + peer "github.com/libp2p/go-libp2p/core/peer" +) + +type sentWants struct { + sync.Mutex + p peer.ID + wantHaves *cid.Set + wantBlocks *cid.Set +} + +func (sw *sentWants) add(wantBlocks []cid.Cid, wantHaves []cid.Cid) { + sw.Lock() + defer sw.Unlock() + + for _, c := range wantBlocks { + sw.wantBlocks.Add(c) + } + for _, c := range wantHaves { + if !sw.wantBlocks.Has(c) { + sw.wantHaves.Add(c) + } + } + +} +func (sw *sentWants) wantHavesKeys() []cid.Cid { + sw.Lock() + defer sw.Unlock() + return sw.wantHaves.Keys() +} +func (sw *sentWants) wantBlocksKeys() []cid.Cid { + sw.Lock() + defer sw.Unlock() + return sw.wantBlocks.Keys() +} + +type mockPeerManager struct { + lk sync.Mutex + peerSessions map[peer.ID]bspm.Session + peerSends map[peer.ID]*sentWants +} + +func newMockPeerManager() *mockPeerManager { + return &mockPeerManager{ + peerSessions: make(map[peer.ID]bspm.Session), + peerSends: make(map[peer.ID]*sentWants), + } +} + +func (pm *mockPeerManager) RegisterSession(p peer.ID, sess bspm.Session) { + pm.lk.Lock() + defer pm.lk.Unlock() + + pm.peerSessions[p] = sess +} + +func (pm *mockPeerManager) has(p peer.ID, sid uint64) bool { + pm.lk.Lock() + defer pm.lk.Unlock() + + if session, ok := pm.peerSessions[p]; ok { + return session.ID() == sid + } + return false +} + +func (*mockPeerManager) UnregisterSession(uint64) {} +func (*mockPeerManager) BroadcastWantHaves(context.Context, []cid.Cid) {} +func (*mockPeerManager) SendCancels(context.Context, []cid.Cid) {} + +func (pm *mockPeerManager) SendWants(ctx context.Context, p peer.ID, wantBlocks []cid.Cid, wantHaves []cid.Cid) { + pm.lk.Lock() + defer pm.lk.Unlock() + + sw, ok := pm.peerSends[p] + if !ok { + sw = &sentWants{p: p, wantHaves: cid.NewSet(), wantBlocks: cid.NewSet()} + pm.peerSends[p] = sw + } + sw.add(wantBlocks, wantHaves) +} + +func (pm *mockPeerManager) waitNextWants() map[peer.ID]*sentWants { + time.Sleep(10 * time.Millisecond) + + pm.lk.Lock() + defer pm.lk.Unlock() + nw := make(map[peer.ID]*sentWants) + for p, sentWants := range pm.peerSends { + nw[p] = sentWants + } + return nw +} + +func (pm *mockPeerManager) clearWants() { + pm.lk.Lock() + defer pm.lk.Unlock() + + for p := range pm.peerSends { + delete(pm.peerSends, p) + } +} + +type exhaustedPeers struct { + lk sync.Mutex + ks []cid.Cid +} + +func (ep *exhaustedPeers) onPeersExhausted(ks []cid.Cid) { + ep.lk.Lock() + defer ep.lk.Unlock() + + ep.ks = append(ep.ks, ks...) +} + +func (ep *exhaustedPeers) clear() { + ep.lk.Lock() + defer ep.lk.Unlock() + + ep.ks = nil +} + +func (ep *exhaustedPeers) exhausted() []cid.Cid { + ep.lk.Lock() + defer ep.lk.Unlock() + + return append([]cid.Cid{}, ep.ks...) +} + +func TestSendWants(t *testing.T) { + test.Flaky(t) + + cids := testutil.GenerateCids(4) + peers := testutil.GeneratePeers(1) + peerA := peers[0] + sid := uint64(1) + pm := newMockPeerManager() + fpm := newFakeSessionPeerManager() + swc := newMockSessionMgr() + bpm := bsbpm.New() + onSend := func(peer.ID, []cid.Cid, []cid.Cid) {} + onPeersExhausted := func([]cid.Cid) {} + spm := newSessionWantSender(sid, pm, fpm, swc, bpm, onSend, onPeersExhausted) + defer spm.Shutdown() + + go spm.Run() + + // add cid0, cid1 + blkCids0 := cids[0:2] + spm.Add(blkCids0) + // peerA: HAVE cid0 + spm.Update(peerA, []cid.Cid{}, []cid.Cid{cids[0]}, []cid.Cid{}) + + // Wait for processing to complete + peerSends := pm.waitNextWants() + + // Should have sent + // peerA: want-block cid0, cid1 + sw, ok := peerSends[peerA] + if !ok { + t.Fatal("Nothing sent to peer") + } + if !testutil.MatchKeysIgnoreOrder(sw.wantBlocksKeys(), blkCids0) { + t.Fatal("Wrong keys") + } + if len(sw.wantHavesKeys()) > 0 { + t.Fatal("Expecting no want-haves") + } +} + +func TestSendsWantBlockToOnePeerOnly(t *testing.T) { + test.Flaky(t) + + cids := testutil.GenerateCids(4) + peers := testutil.GeneratePeers(2) + peerA := peers[0] + peerB := peers[1] + sid := uint64(1) + pm := newMockPeerManager() + fpm := newFakeSessionPeerManager() + swc := newMockSessionMgr() + bpm := bsbpm.New() + onSend := func(peer.ID, []cid.Cid, []cid.Cid) {} + onPeersExhausted := func([]cid.Cid) {} + spm := newSessionWantSender(sid, pm, fpm, swc, bpm, onSend, onPeersExhausted) + defer spm.Shutdown() + + go spm.Run() + + // add cid0, cid1 + blkCids0 := cids[0:2] + spm.Add(blkCids0) + // peerA: HAVE cid0 + spm.Update(peerA, []cid.Cid{}, []cid.Cid{cids[0]}, []cid.Cid{}) + + // Wait for processing to complete + peerSends := pm.waitNextWants() + + // Should have sent + // peerA: want-block cid0, cid1 + sw, ok := peerSends[peerA] + if !ok { + t.Fatal("Nothing sent to peer") + } + if !testutil.MatchKeysIgnoreOrder(sw.wantBlocksKeys(), blkCids0) { + t.Fatal("Wrong keys") + } + + // Clear wants (makes keeping track of what's been sent easier) + pm.clearWants() + + // peerB: HAVE cid0 + spm.Update(peerB, []cid.Cid{}, []cid.Cid{cids[0]}, []cid.Cid{}) + + // Wait for processing to complete + peerSends = pm.waitNextWants() + + // Have not received response from peerA, so should not send want-block to + // peerB. Should have sent + // peerB: want-have cid0, cid1 + sw, ok = peerSends[peerB] + if !ok { + t.Fatal("Nothing sent to peer") + } + if sw.wantBlocks.Len() > 0 { + t.Fatal("Expecting no want-blocks") + } + if !testutil.MatchKeysIgnoreOrder(sw.wantHavesKeys(), blkCids0) { + t.Fatal("Wrong keys") + } +} + +func TestReceiveBlock(t *testing.T) { + test.Flaky(t) + + cids := testutil.GenerateCids(2) + peers := testutil.GeneratePeers(2) + peerA := peers[0] + peerB := peers[1] + sid := uint64(1) + pm := newMockPeerManager() + fpm := newFakeSessionPeerManager() + swc := newMockSessionMgr() + bpm := bsbpm.New() + onSend := func(peer.ID, []cid.Cid, []cid.Cid) {} + onPeersExhausted := func([]cid.Cid) {} + spm := newSessionWantSender(sid, pm, fpm, swc, bpm, onSend, onPeersExhausted) + defer spm.Shutdown() + + go spm.Run() + + // add cid0, cid1 + spm.Add(cids) + // peerA: HAVE cid0 + spm.Update(peerA, []cid.Cid{}, []cid.Cid{cids[0]}, []cid.Cid{}) + + // Wait for processing to complete + peerSends := pm.waitNextWants() + + // Should have sent + // peerA: want-block cid0, cid1 + sw, ok := peerSends[peerA] + if !ok { + t.Fatal("Nothing sent to peer") + } + if !testutil.MatchKeysIgnoreOrder(sw.wantBlocksKeys(), cids) { + t.Fatal("Wrong keys") + } + + // Clear wants (makes keeping track of what's been sent easier) + pm.clearWants() + + // peerA: block cid0, DONT_HAVE cid1 + bpm.ReceiveFrom(peerA, []cid.Cid{}, []cid.Cid{cids[1]}) + spm.Update(peerA, []cid.Cid{cids[0]}, []cid.Cid{}, []cid.Cid{cids[1]}) + // peerB: HAVE cid0, cid1 + bpm.ReceiveFrom(peerB, cids, []cid.Cid{}) + spm.Update(peerB, []cid.Cid{}, cids, []cid.Cid{}) + + // Wait for processing to complete + peerSends = pm.waitNextWants() + + // Should have sent + // peerB: want-block cid1 + // (should not have sent want-block for cid0 because block0 has already + // been received) + sw, ok = peerSends[peerB] + if !ok { + t.Fatal("Nothing sent to peer") + } + wb := sw.wantBlocksKeys() + if len(wb) != 1 || !wb[0].Equals(cids[1]) { + t.Fatal("Wrong keys", wb) + } +} + +func TestCancelWants(t *testing.T) { + test.Flaky(t) + + cids := testutil.GenerateCids(4) + sid := uint64(1) + pm := newMockPeerManager() + fpm := newFakeSessionPeerManager() + swc := newMockSessionMgr() + bpm := bsbpm.New() + onSend := func(peer.ID, []cid.Cid, []cid.Cid) {} + onPeersExhausted := func([]cid.Cid) {} + spm := newSessionWantSender(sid, pm, fpm, swc, bpm, onSend, onPeersExhausted) + defer spm.Shutdown() + + go spm.Run() + + // add cid0, cid1, cid2 + blkCids := cids[0:3] + spm.Add(blkCids) + + time.Sleep(5 * time.Millisecond) + + // cancel cid0, cid2 + cancelCids := []cid.Cid{cids[0], cids[2]} + spm.Cancel(cancelCids) + + // Wait for processing to complete + time.Sleep(5 * time.Millisecond) + + // Should have sent cancels for cid0, cid2 + sent := swc.cancelled() + if !testutil.MatchKeysIgnoreOrder(sent, cancelCids) { + t.Fatal("Wrong keys") + } +} + +func TestRegisterSessionWithPeerManager(t *testing.T) { + test.Flaky(t) + + cids := testutil.GenerateCids(2) + peers := testutil.GeneratePeers(2) + peerA := peers[0] + peerB := peers[1] + sid := uint64(1) + pm := newMockPeerManager() + fpm := newFakeSessionPeerManager() + swc := newMockSessionMgr() + bpm := bsbpm.New() + onSend := func(peer.ID, []cid.Cid, []cid.Cid) {} + onPeersExhausted := func([]cid.Cid) {} + spm := newSessionWantSender(sid, pm, fpm, swc, bpm, onSend, onPeersExhausted) + defer spm.Shutdown() + + go spm.Run() + + // peerA: HAVE cid0 + spm.Update(peerA, nil, cids[:1], nil) + + // Wait for processing to complete + time.Sleep(10 * time.Millisecond) + + // Expect session to have been registered with PeerManager + if !pm.has(peerA, sid) { + t.Fatal("Expected HAVE to register session with PeerManager") + } + + // peerB: block cid1 + spm.Update(peerB, cids[1:], nil, nil) + + // Wait for processing to complete + time.Sleep(10 * time.Millisecond) + + // Expect session to have been registered with PeerManager + if !pm.has(peerB, sid) { + t.Fatal("Expected HAVE to register session with PeerManager") + } +} + +func TestProtectConnFirstPeerToSendWantedBlock(t *testing.T) { + test.Flaky(t) + + cids := testutil.GenerateCids(2) + peers := testutil.GeneratePeers(3) + peerA := peers[0] + peerB := peers[1] + peerC := peers[2] + sid := uint64(1) + pm := newMockPeerManager() + fpt := newFakePeerTagger() + fpm := bsspm.New(1, fpt) + swc := newMockSessionMgr() + bpm := bsbpm.New() + onSend := func(peer.ID, []cid.Cid, []cid.Cid) {} + onPeersExhausted := func([]cid.Cid) {} + spm := newSessionWantSender(sid, pm, fpm, swc, bpm, onSend, onPeersExhausted) + defer spm.Shutdown() + + go spm.Run() + + // add cid0 + spm.Add(cids[:1]) + + // peerA: block cid0 + spm.Update(peerA, cids[:1], nil, nil) + + // Wait for processing to complete + time.Sleep(10 * time.Millisecond) + + // Expect peer A to be protected as it was first to send the block + if !fpt.isProtected(peerA) { + t.Fatal("Expected first peer to send block to have protected connection") + } + + // peerB: block cid0 + spm.Update(peerB, cids[:1], nil, nil) + + // Wait for processing to complete + time.Sleep(10 * time.Millisecond) + + // Expect peer B not to be protected as it was not first to send the block + if fpt.isProtected(peerB) { + t.Fatal("Expected peer not to be protected") + } + + // peerC: block cid1 + spm.Update(peerC, cids[1:], nil, nil) + + // Wait for processing to complete + time.Sleep(10 * time.Millisecond) + + // Expect peer C not to be protected as we didn't want the block it sent + if fpt.isProtected(peerC) { + t.Fatal("Expected peer not to be protected") + } +} + +func TestPeerUnavailable(t *testing.T) { + test.Flaky(t) + + cids := testutil.GenerateCids(2) + peers := testutil.GeneratePeers(2) + peerA := peers[0] + peerB := peers[1] + sid := uint64(1) + pm := newMockPeerManager() + fpm := newFakeSessionPeerManager() + swc := newMockSessionMgr() + bpm := bsbpm.New() + onSend := func(peer.ID, []cid.Cid, []cid.Cid) {} + onPeersExhausted := func([]cid.Cid) {} + spm := newSessionWantSender(sid, pm, fpm, swc, bpm, onSend, onPeersExhausted) + defer spm.Shutdown() + + go spm.Run() + + // add cid0, cid1 + spm.Add(cids) + // peerA: HAVE cid0 + spm.Update(peerA, []cid.Cid{}, []cid.Cid{cids[0]}, []cid.Cid{}) + + // Wait for processing to complete + peerSends := pm.waitNextWants() + + // Should have sent + // peerA: want-block cid0, cid1 + sw, ok := peerSends[peerA] + if !ok { + t.Fatal("Nothing sent to peer") + } + if !testutil.MatchKeysIgnoreOrder(sw.wantBlocksKeys(), cids) { + t.Fatal("Wrong keys") + } + + // Clear wants (makes keeping track of what's been sent easier) + pm.clearWants() + + // peerB: HAVE cid0 + spm.Update(peerB, []cid.Cid{}, []cid.Cid{cids[0]}, []cid.Cid{}) + + // Wait for processing to complete + peerSends = pm.waitNextWants() + + // Should not have sent anything because want-blocks were already sent to + // peer A + sw, ok = peerSends[peerB] + if ok && sw.wantBlocks.Len() > 0 { + t.Fatal("Expected no wants sent to peer") + } + + // peerA becomes unavailable + spm.SignalAvailability(peerA, false) + + // Wait for processing to complete + peerSends = pm.waitNextWants() + + // Should now have sent want-block cid0, cid1 to peerB + sw, ok = peerSends[peerB] + if !ok { + t.Fatal("Nothing sent to peer") + } + if !testutil.MatchKeysIgnoreOrder(sw.wantBlocksKeys(), cids) { + t.Fatal("Wrong keys") + } +} + +func TestPeersExhausted(t *testing.T) { + test.Flaky(t) + + cids := testutil.GenerateCids(3) + peers := testutil.GeneratePeers(2) + peerA := peers[0] + peerB := peers[1] + sid := uint64(1) + pm := newMockPeerManager() + fpm := newFakeSessionPeerManager() + swc := newMockSessionMgr() + bpm := bsbpm.New() + onSend := func(peer.ID, []cid.Cid, []cid.Cid) {} + + ep := exhaustedPeers{} + spm := newSessionWantSender(sid, pm, fpm, swc, bpm, onSend, ep.onPeersExhausted) + + go spm.Run() + + // add cid0, cid1 + spm.Add(cids) + + // peerA: HAVE cid0 + bpm.ReceiveFrom(peerA, []cid.Cid{cids[0]}, []cid.Cid{}) + // Note: this also registers peer A as being available + spm.Update(peerA, []cid.Cid{cids[0]}, []cid.Cid{}, []cid.Cid{}) + + // peerA: DONT_HAVE cid1 + bpm.ReceiveFrom(peerA, []cid.Cid{}, []cid.Cid{cids[1]}) + spm.Update(peerA, []cid.Cid{}, []cid.Cid{}, []cid.Cid{cids[1]}) + + time.Sleep(5 * time.Millisecond) + + // All available peers (peer A) have sent us a DONT_HAVE for cid1, + // so expect that onPeersExhausted() will be called with cid1 + if !testutil.MatchKeysIgnoreOrder(ep.exhausted(), []cid.Cid{cids[1]}) { + t.Fatal("Wrong keys") + } + + // Clear exhausted cids + ep.clear() + + // peerB: HAVE cid0 + bpm.ReceiveFrom(peerB, []cid.Cid{cids[0]}, []cid.Cid{}) + // Note: this also registers peer B as being available + spm.Update(peerB, []cid.Cid{cids[0]}, []cid.Cid{}, []cid.Cid{}) + + // peerB: DONT_HAVE cid1, cid2 + bpm.ReceiveFrom(peerB, []cid.Cid{}, []cid.Cid{cids[1], cids[2]}) + spm.Update(peerB, []cid.Cid{}, []cid.Cid{}, []cid.Cid{cids[1], cids[2]}) + + // Wait for processing to complete + pm.waitNextWants() + + // All available peers (peer A and peer B) have sent us a DONT_HAVE + // for cid1, but we already called onPeersExhausted with cid1, so it + // should not be called again + if len(ep.exhausted()) > 0 { + t.Fatal("Wrong keys") + } + + // peerA: DONT_HAVE cid2 + bpm.ReceiveFrom(peerA, []cid.Cid{}, []cid.Cid{cids[2]}) + spm.Update(peerA, []cid.Cid{}, []cid.Cid{}, []cid.Cid{cids[2]}) + + // Wait for processing to complete + pm.waitNextWants() + + // All available peers (peer A and peer B) have sent us a DONT_HAVE for + // cid2, so expect that onPeersExhausted() will be called with cid2 + if !testutil.MatchKeysIgnoreOrder(ep.exhausted(), []cid.Cid{cids[2]}) { + t.Fatal("Wrong keys") + } +} + +// Tests that when +// - all the peers except one have sent a DONT_HAVE for a CID +// - the remaining peer becomes unavailable +// onPeersExhausted should be sent for that CID +func TestPeersExhaustedLastWaitingPeerUnavailable(t *testing.T) { + test.Flaky(t) + + cids := testutil.GenerateCids(2) + peers := testutil.GeneratePeers(2) + peerA := peers[0] + peerB := peers[1] + sid := uint64(1) + pm := newMockPeerManager() + fpm := newFakeSessionPeerManager() + swc := newMockSessionMgr() + bpm := bsbpm.New() + onSend := func(peer.ID, []cid.Cid, []cid.Cid) {} + + ep := exhaustedPeers{} + spm := newSessionWantSender(sid, pm, fpm, swc, bpm, onSend, ep.onPeersExhausted) + + go spm.Run() + + // add cid0, cid1 + spm.Add(cids) + + // peerA: HAVE cid0 + bpm.ReceiveFrom(peerA, []cid.Cid{cids[0]}, []cid.Cid{}) + // Note: this also registers peer A as being available + spm.Update(peerA, []cid.Cid{}, []cid.Cid{cids[0]}, []cid.Cid{}) + // peerB: HAVE cid0 + bpm.ReceiveFrom(peerB, []cid.Cid{cids[0]}, []cid.Cid{}) + // Note: this also registers peer B as being available + spm.Update(peerB, []cid.Cid{}, []cid.Cid{cids[0]}, []cid.Cid{}) + + // peerA: DONT_HAVE cid1 + bpm.ReceiveFrom(peerA, []cid.Cid{}, []cid.Cid{cids[1]}) + spm.Update(peerA, []cid.Cid{}, []cid.Cid{}, []cid.Cid{cids[0]}) + + time.Sleep(5 * time.Millisecond) + + // peerB: becomes unavailable + spm.SignalAvailability(peerB, false) + + time.Sleep(5 * time.Millisecond) + + // All remaining peers (peer A) have sent us a DONT_HAVE for cid1, + // so expect that onPeersExhausted() will be called with cid1 + if !testutil.MatchKeysIgnoreOrder(ep.exhausted(), []cid.Cid{cids[1]}) { + t.Fatal("Wrong keys") + } +} + +// Tests that when all the peers are removed from the session +// onPeersExhausted should be called with all outstanding CIDs +func TestPeersExhaustedAllPeersUnavailable(t *testing.T) { + test.Flaky(t) + + cids := testutil.GenerateCids(3) + peers := testutil.GeneratePeers(2) + peerA := peers[0] + peerB := peers[1] + sid := uint64(1) + pm := newMockPeerManager() + fpm := newFakeSessionPeerManager() + swc := newMockSessionMgr() + bpm := bsbpm.New() + onSend := func(peer.ID, []cid.Cid, []cid.Cid) {} + + ep := exhaustedPeers{} + spm := newSessionWantSender(sid, pm, fpm, swc, bpm, onSend, ep.onPeersExhausted) + + go spm.Run() + + // add cid0, cid1, cid2 + spm.Add(cids) + + // peerA: receive block for cid0 (and register peer A with sessionWantSender) + spm.Update(peerA, []cid.Cid{cids[0]}, []cid.Cid{}, []cid.Cid{}) + // peerB: HAVE cid1 + bpm.ReceiveFrom(peerB, []cid.Cid{cids[0]}, []cid.Cid{}) + // Note: this also registers peer B as being available + spm.Update(peerB, []cid.Cid{}, []cid.Cid{cids[0]}, []cid.Cid{}) + + time.Sleep(5 * time.Millisecond) + + // peerA and peerB: become unavailable + spm.SignalAvailability(peerA, false) + spm.SignalAvailability(peerB, false) + + time.Sleep(5 * time.Millisecond) + + // Expect that onPeersExhausted() will be called with all cids for blocks + // that have not been received + if !testutil.MatchKeysIgnoreOrder(ep.exhausted(), []cid.Cid{cids[1], cids[2]}) { + t.Fatal("Wrong keys") + } +} + +func TestConsecutiveDontHaveLimit(t *testing.T) { + test.Flaky(t) + + cids := testutil.GenerateCids(peerDontHaveLimit + 10) + p := testutil.GeneratePeers(1)[0] + sid := uint64(1) + pm := newMockPeerManager() + fpm := newFakeSessionPeerManager() + swc := newMockSessionMgr() + bpm := bsbpm.New() + onSend := func(peer.ID, []cid.Cid, []cid.Cid) {} + onPeersExhausted := func([]cid.Cid) {} + spm := newSessionWantSender(sid, pm, fpm, swc, bpm, onSend, onPeersExhausted) + defer spm.Shutdown() + + go spm.Run() + + // Add all cids as wants + spm.Add(cids) + + // Receive a block from peer (adds it to the session) + spm.Update(p, cids[:1], []cid.Cid{}, []cid.Cid{}) + + // Wait for processing to complete + time.Sleep(10 * time.Millisecond) + + // Peer should be available + if has := fpm.HasPeer(p); !has { + t.Fatal("Expected peer to be available") + } + + // Receive DONT_HAVEs from peer that do not exceed limit + for _, c := range cids[1:peerDontHaveLimit] { + bpm.ReceiveFrom(p, []cid.Cid{}, []cid.Cid{c}) + spm.Update(p, []cid.Cid{}, []cid.Cid{}, []cid.Cid{c}) + } + + // Wait for processing to complete + time.Sleep(20 * time.Millisecond) + + // Peer should be available + if has := fpm.HasPeer(p); !has { + t.Fatal("Expected peer to be available") + } + + // Receive DONT_HAVEs from peer that exceed limit + for _, c := range cids[peerDontHaveLimit:] { + bpm.ReceiveFrom(p, []cid.Cid{}, []cid.Cid{c}) + spm.Update(p, []cid.Cid{}, []cid.Cid{}, []cid.Cid{c}) + } + + // Wait for processing to complete + time.Sleep(20 * time.Millisecond) + + // Session should remove peer + if has := fpm.HasPeer(p); has { + t.Fatal("Expected peer not to be available") + } +} + +func TestConsecutiveDontHaveLimitInterrupted(t *testing.T) { + test.Flaky(t) + + cids := testutil.GenerateCids(peerDontHaveLimit + 10) + p := testutil.GeneratePeers(1)[0] + sid := uint64(1) + pm := newMockPeerManager() + fpm := newFakeSessionPeerManager() + swc := newMockSessionMgr() + bpm := bsbpm.New() + onSend := func(peer.ID, []cid.Cid, []cid.Cid) {} + onPeersExhausted := func([]cid.Cid) {} + spm := newSessionWantSender(sid, pm, fpm, swc, bpm, onSend, onPeersExhausted) + defer spm.Shutdown() + + go spm.Run() + + // Add all cids as wants + spm.Add(cids) + + // Receive a block from peer (adds it to the session) + spm.Update(p, cids[:1], []cid.Cid{}, []cid.Cid{}) + + // Wait for processing to complete + time.Sleep(5 * time.Millisecond) + + // Peer should be available + if has := fpm.HasPeer(p); !has { + t.Fatal("Expected peer to be available") + } + + // Receive DONT_HAVE then HAVE then DONT_HAVE from peer, + // where consecutive DONT_HAVEs would have exceeded limit + // (but they are not consecutive) + for _, c := range cids[1:peerDontHaveLimit] { + // DONT_HAVEs + bpm.ReceiveFrom(p, []cid.Cid{}, []cid.Cid{c}) + spm.Update(p, []cid.Cid{}, []cid.Cid{}, []cid.Cid{c}) + } + for _, c := range cids[peerDontHaveLimit : peerDontHaveLimit+1] { + // HAVEs + bpm.ReceiveFrom(p, []cid.Cid{c}, []cid.Cid{}) + spm.Update(p, []cid.Cid{}, []cid.Cid{c}, []cid.Cid{}) + } + for _, c := range cids[peerDontHaveLimit+1:] { + // DONT_HAVEs + bpm.ReceiveFrom(p, []cid.Cid{}, []cid.Cid{c}) + spm.Update(p, []cid.Cid{}, []cid.Cid{}, []cid.Cid{c}) + } + + // Wait for processing to complete + time.Sleep(5 * time.Millisecond) + + // Peer should be available + if has := fpm.HasPeer(p); !has { + t.Fatal("Expected peer to be available") + } +} + +func TestConsecutiveDontHaveReinstateAfterRemoval(t *testing.T) { + test.Flaky(t) + + cids := testutil.GenerateCids(peerDontHaveLimit + 10) + p := testutil.GeneratePeers(1)[0] + sid := uint64(1) + pm := newMockPeerManager() + fpm := newFakeSessionPeerManager() + swc := newMockSessionMgr() + bpm := bsbpm.New() + onSend := func(peer.ID, []cid.Cid, []cid.Cid) {} + onPeersExhausted := func([]cid.Cid) {} + spm := newSessionWantSender(sid, pm, fpm, swc, bpm, onSend, onPeersExhausted) + defer spm.Shutdown() + + go spm.Run() + + // Add all cids as wants + spm.Add(cids) + + // Receive a block from peer (adds it to the session) + spm.Update(p, cids[:1], []cid.Cid{}, []cid.Cid{}) + + // Wait for processing to complete + time.Sleep(5 * time.Millisecond) + + // Peer should be available + if has := fpm.HasPeer(p); !has { + t.Fatal("Expected peer to be available") + } + + // Receive DONT_HAVEs from peer that exceed limit + for _, c := range cids[1 : peerDontHaveLimit+2] { + bpm.ReceiveFrom(p, []cid.Cid{}, []cid.Cid{c}) + spm.Update(p, []cid.Cid{}, []cid.Cid{}, []cid.Cid{c}) + } + + // Wait for processing to complete + time.Sleep(10 * time.Millisecond) + + // Session should remove peer + if has := fpm.HasPeer(p); has { + t.Fatal("Expected peer not to be available") + } + + // Receive a HAVE from peer (adds it back into the session) + bpm.ReceiveFrom(p, cids[:1], []cid.Cid{}) + spm.Update(p, []cid.Cid{}, cids[:1], []cid.Cid{}) + + // Wait for processing to complete + time.Sleep(10 * time.Millisecond) + + // Peer should be available + if has := fpm.HasPeer(p); !has { + t.Fatal("Expected peer to be available") + } + + cids2 := testutil.GenerateCids(peerDontHaveLimit + 10) + + // Receive DONT_HAVEs from peer that don't exceed limit + for _, c := range cids2[1:peerDontHaveLimit] { + bpm.ReceiveFrom(p, []cid.Cid{}, []cid.Cid{c}) + spm.Update(p, []cid.Cid{}, []cid.Cid{}, []cid.Cid{c}) + } + + // Wait for processing to complete + time.Sleep(10 * time.Millisecond) + + // Peer should be available + if has := fpm.HasPeer(p); !has { + t.Fatal("Expected peer to be available") + } + + // Receive DONT_HAVEs from peer that exceed limit + for _, c := range cids2[peerDontHaveLimit:] { + bpm.ReceiveFrom(p, []cid.Cid{}, []cid.Cid{c}) + spm.Update(p, []cid.Cid{}, []cid.Cid{}, []cid.Cid{c}) + } + + // Wait for processing to complete + time.Sleep(10 * time.Millisecond) + + // Session should remove peer + if has := fpm.HasPeer(p); has { + t.Fatal("Expected peer not to be available") + } +} + +func TestConsecutiveDontHaveDontRemoveIfHasWantedBlock(t *testing.T) { + test.Flaky(t) + + cids := testutil.GenerateCids(peerDontHaveLimit + 10) + p := testutil.GeneratePeers(1)[0] + sid := uint64(1) + pm := newMockPeerManager() + fpm := newFakeSessionPeerManager() + swc := newMockSessionMgr() + bpm := bsbpm.New() + onSend := func(peer.ID, []cid.Cid, []cid.Cid) {} + onPeersExhausted := func([]cid.Cid) {} + spm := newSessionWantSender(sid, pm, fpm, swc, bpm, onSend, onPeersExhausted) + defer spm.Shutdown() + + go spm.Run() + + // Add all cids as wants + spm.Add(cids) + + // Receive a HAVE from peer (adds it to the session) + bpm.ReceiveFrom(p, cids[:1], []cid.Cid{}) + spm.Update(p, []cid.Cid{}, cids[:1], []cid.Cid{}) + + // Wait for processing to complete + time.Sleep(10 * time.Millisecond) + + // Peer should be available + if has := fpm.HasPeer(p); !has { + t.Fatal("Expected peer to be available") + } + + // Receive DONT_HAVEs from peer that exceed limit + for _, c := range cids[1 : peerDontHaveLimit+5] { + bpm.ReceiveFrom(p, []cid.Cid{}, []cid.Cid{c}) + spm.Update(p, []cid.Cid{}, []cid.Cid{}, []cid.Cid{c}) + } + + // Wait for processing to complete + time.Sleep(20 * time.Millisecond) + + // Peer should still be available because it has a block that we want. + // (We received a HAVE for cid 0 but didn't yet receive the block) + if has := fpm.HasPeer(p); !has { + t.Fatal("Expected peer to be available") + } +} diff --git a/bitswap/client/internal/session/wantinfo_test.go b/bitswap/client/internal/session/wantinfo_test.go new file mode 100644 index 0000000000..c7348a4a1f --- /dev/null +++ b/bitswap/client/internal/session/wantinfo_test.go @@ -0,0 +1,89 @@ +package session + +import ( + "testing" + + "github.com/ipfs/boxo/bitswap/internal/testutil" + "github.com/ipfs/boxo/internal/test" +) + +func TestEmptyWantInfo(t *testing.T) { + test.Flaky(t) + + wp := newWantInfo(newPeerResponseTracker()) + + if wp.bestPeer != "" { + t.Fatal("expected no best peer") + } +} + +func TestSetPeerBlockPresence(t *testing.T) { + test.Flaky(t) + + peers := testutil.GeneratePeers(2) + wp := newWantInfo(newPeerResponseTracker()) + + wp.setPeerBlockPresence(peers[0], BPUnknown) + if wp.bestPeer != peers[0] { + t.Fatal("wrong best peer") + } + + wp.setPeerBlockPresence(peers[1], BPHave) + if wp.bestPeer != peers[1] { + t.Fatal("wrong best peer") + } + + wp.setPeerBlockPresence(peers[0], BPDontHave) + if wp.bestPeer != peers[1] { + t.Fatal("wrong best peer") + } +} + +func TestSetPeerBlockPresenceBestLower(t *testing.T) { + test.Flaky(t) + + peers := testutil.GeneratePeers(2) + wp := newWantInfo(newPeerResponseTracker()) + + wp.setPeerBlockPresence(peers[0], BPHave) + if wp.bestPeer != peers[0] { + t.Fatal("wrong best peer") + } + + wp.setPeerBlockPresence(peers[1], BPUnknown) + if wp.bestPeer != peers[0] { + t.Fatal("wrong best peer") + } + + wp.setPeerBlockPresence(peers[0], BPDontHave) + if wp.bestPeer != peers[1] { + t.Fatal("wrong best peer") + } +} + +func TestRemoveThenSetDontHave(t *testing.T) { + test.Flaky(t) + + peers := testutil.GeneratePeers(2) + wp := newWantInfo(newPeerResponseTracker()) + + wp.setPeerBlockPresence(peers[0], BPUnknown) + if wp.bestPeer != peers[0] { + t.Fatal("wrong best peer") + } + + wp.removePeer(peers[0]) + if wp.bestPeer != "" { + t.Fatal("wrong best peer") + } + + wp.setPeerBlockPresence(peers[1], BPUnknown) + if wp.bestPeer != peers[1] { + t.Fatal("wrong best peer") + } + + wp.setPeerBlockPresence(peers[0], BPDontHave) + if wp.bestPeer != peers[1] { + t.Fatal("wrong best peer") + } +} diff --git a/bitswap/client/internal/sessioninterestmanager/sessioninterestmanager.go b/bitswap/client/internal/sessioninterestmanager/sessioninterestmanager.go new file mode 100644 index 0000000000..78358c0420 --- /dev/null +++ b/bitswap/client/internal/sessioninterestmanager/sessioninterestmanager.go @@ -0,0 +1,201 @@ +package sessioninterestmanager + +import ( + "sync" + + blocks "github.com/ipfs/boxo/blocks" + + cid "github.com/ipfs/go-cid" +) + +// SessionInterestManager records the CIDs that each session is interested in. +type SessionInterestManager struct { + lk sync.RWMutex + wants map[cid.Cid]map[uint64]bool +} + +// New initializes a new SessionInterestManager. +func New() *SessionInterestManager { + return &SessionInterestManager{ + // Map of cids -> sessions -> bool + // + // The boolean indicates whether the session still wants the block + // or is just interested in receiving messages about it. + // + // Note that once the block is received the session no longer wants + // the block, but still wants to receive messages from peers who have + // the block as they may have other blocks the session is interested in. + wants: make(map[cid.Cid]map[uint64]bool), + } +} + +// When the client asks the session for blocks, the session calls +// RecordSessionInterest() with those cids. +func (sim *SessionInterestManager) RecordSessionInterest(ses uint64, ks []cid.Cid) { + sim.lk.Lock() + defer sim.lk.Unlock() + + // For each key + for _, c := range ks { + // Record that the session wants the blocks + if want, ok := sim.wants[c]; ok { + want[ses] = true + } else { + sim.wants[c] = map[uint64]bool{ses: true} + } + } +} + +// When the session shuts down it calls RemoveSessionInterest(). +// Returns the keys that no session is interested in any more. +func (sim *SessionInterestManager) RemoveSession(ses uint64) []cid.Cid { + sim.lk.Lock() + defer sim.lk.Unlock() + + // The keys that no session is interested in + deletedKs := make([]cid.Cid, 0) + + // For each known key + for c := range sim.wants { + // Remove the session from the list of sessions that want the key + delete(sim.wants[c], ses) + + // If there are no more sessions that want the key + if len(sim.wants[c]) == 0 { + // Clean up the list memory + delete(sim.wants, c) + // Add the key to the list of keys that no session is interested in + deletedKs = append(deletedKs, c) + } + } + + return deletedKs +} + +// When the session receives blocks, it calls RemoveSessionWants(). +func (sim *SessionInterestManager) RemoveSessionWants(ses uint64, ks []cid.Cid) { + sim.lk.Lock() + defer sim.lk.Unlock() + + // For each key + for _, c := range ks { + // If the session wanted the block + if wanted, ok := sim.wants[c][ses]; ok && wanted { + // Mark the block as unwanted + sim.wants[c][ses] = false + } + } +} + +// When a request is cancelled, the session calls RemoveSessionInterested(). +// Returns the keys that no session is interested in any more. +func (sim *SessionInterestManager) RemoveSessionInterested(ses uint64, ks []cid.Cid) []cid.Cid { + sim.lk.Lock() + defer sim.lk.Unlock() + + // The keys that no session is interested in + deletedKs := make([]cid.Cid, 0, len(ks)) + + // For each key + for _, c := range ks { + // If there is a list of sessions that want the key + if _, ok := sim.wants[c]; ok { + // Remove the session from the list of sessions that want the key + delete(sim.wants[c], ses) + + // If there are no more sessions that want the key + if len(sim.wants[c]) == 0 { + // Clean up the list memory + delete(sim.wants, c) + // Add the key to the list of keys that no session is interested in + deletedKs = append(deletedKs, c) + } + } + } + + return deletedKs +} + +// The session calls FilterSessionInterested() to filter the sets of keys for +// those that the session is interested in +func (sim *SessionInterestManager) FilterSessionInterested(ses uint64, ksets ...[]cid.Cid) [][]cid.Cid { + sim.lk.RLock() + defer sim.lk.RUnlock() + + // For each set of keys + kres := make([][]cid.Cid, len(ksets)) + for i, ks := range ksets { + // The set of keys that at least one session is interested in + has := make([]cid.Cid, 0, len(ks)) + + // For each key in the list + for _, c := range ks { + // If there is a session that's interested, add the key to the set + if _, ok := sim.wants[c][ses]; ok { + has = append(has, c) + } + } + kres[i] = has + } + return kres +} + +// When bitswap receives blocks it calls SplitWantedUnwanted() to discard +// unwanted blocks +func (sim *SessionInterestManager) SplitWantedUnwanted(blks []blocks.Block) ([]blocks.Block, []blocks.Block) { + sim.lk.RLock() + defer sim.lk.RUnlock() + + // Get the wanted block keys as a set + wantedKs := cid.NewSet() + for _, b := range blks { + c := b.Cid() + // For each session that is interested in the key + for ses := range sim.wants[c] { + // If the session wants the key (rather than just being interested) + if wanted, ok := sim.wants[c][ses]; ok && wanted { + // Add the key to the set + wantedKs.Add(c) + } + } + } + + // Separate the blocks into wanted and unwanted + wantedBlks := make([]blocks.Block, 0, len(blks)) + notWantedBlks := make([]blocks.Block, 0) + for _, b := range blks { + if wantedKs.Has(b.Cid()) { + wantedBlks = append(wantedBlks, b) + } else { + notWantedBlks = append(notWantedBlks, b) + } + } + return wantedBlks, notWantedBlks +} + +// When the SessionManager receives a message it calls InterestedSessions() to +// find out which sessions are interested in the message. +func (sim *SessionInterestManager) InterestedSessions(blks []cid.Cid, haves []cid.Cid, dontHaves []cid.Cid) []uint64 { + sim.lk.RLock() + defer sim.lk.RUnlock() + + ks := make([]cid.Cid, 0, len(blks)+len(haves)+len(dontHaves)) + ks = append(ks, blks...) + ks = append(ks, haves...) + ks = append(ks, dontHaves...) + + // Create a set of sessions that are interested in the keys + sesSet := make(map[uint64]struct{}) + for _, c := range ks { + for s := range sim.wants[c] { + sesSet[s] = struct{}{} + } + } + + // Convert the set into a list + ses := make([]uint64, 0, len(sesSet)) + for s := range sesSet { + ses = append(ses, s) + } + return ses +} diff --git a/bitswap/client/internal/sessioninterestmanager/sessioninterestmanager_test.go b/bitswap/client/internal/sessioninterestmanager/sessioninterestmanager_test.go new file mode 100644 index 0000000000..85857b9ba1 --- /dev/null +++ b/bitswap/client/internal/sessioninterestmanager/sessioninterestmanager_test.go @@ -0,0 +1,231 @@ +package sessioninterestmanager + +import ( + "testing" + + "github.com/ipfs/boxo/bitswap/internal/testutil" + "github.com/ipfs/boxo/internal/test" + cid "github.com/ipfs/go-cid" +) + +func TestEmpty(t *testing.T) { + test.Flaky(t) + + sim := New() + + ses := uint64(1) + cids := testutil.GenerateCids(2) + res := sim.FilterSessionInterested(ses, cids) + if len(res) != 1 || len(res[0]) > 0 { + t.Fatal("Expected no interest") + } + if len(sim.InterestedSessions(cids, []cid.Cid{}, []cid.Cid{})) > 0 { + t.Fatal("Expected no interest") + } +} + +func TestBasic(t *testing.T) { + test.Flaky(t) + + sim := New() + + ses1 := uint64(1) + ses2 := uint64(2) + cids1 := testutil.GenerateCids(2) + cids2 := append(testutil.GenerateCids(1), cids1[1]) + sim.RecordSessionInterest(ses1, cids1) + + res := sim.FilterSessionInterested(ses1, cids1) + if len(res) != 1 || len(res[0]) != 2 { + t.Fatal("Expected 2 keys") + } + if len(sim.InterestedSessions(cids1, []cid.Cid{}, []cid.Cid{})) != 1 { + t.Fatal("Expected 1 session") + } + + sim.RecordSessionInterest(ses2, cids2) + res = sim.FilterSessionInterested(ses2, cids1[:1]) + if len(res) != 1 || len(res[0]) != 0 { + t.Fatal("Expected no interest") + } + res = sim.FilterSessionInterested(ses2, cids2) + if len(res) != 1 || len(res[0]) != 2 { + t.Fatal("Expected 2 keys") + } + + if len(sim.InterestedSessions(cids1[:1], []cid.Cid{}, []cid.Cid{})) != 1 { + t.Fatal("Expected 1 session") + } + if len(sim.InterestedSessions(cids1[1:], []cid.Cid{}, []cid.Cid{})) != 2 { + t.Fatal("Expected 2 sessions") + } +} + +func TestInterestedSessions(t *testing.T) { + test.Flaky(t) + + sim := New() + + ses := uint64(1) + cids := testutil.GenerateCids(3) + sim.RecordSessionInterest(ses, cids[0:2]) + + if len(sim.InterestedSessions(cids, []cid.Cid{}, []cid.Cid{})) != 1 { + t.Fatal("Expected 1 session") + } + if len(sim.InterestedSessions(cids[0:1], []cid.Cid{}, []cid.Cid{})) != 1 { + t.Fatal("Expected 1 session") + } + if len(sim.InterestedSessions([]cid.Cid{}, cids, []cid.Cid{})) != 1 { + t.Fatal("Expected 1 session") + } + if len(sim.InterestedSessions([]cid.Cid{}, cids[0:1], []cid.Cid{})) != 1 { + t.Fatal("Expected 1 session") + } + if len(sim.InterestedSessions([]cid.Cid{}, []cid.Cid{}, cids)) != 1 { + t.Fatal("Expected 1 session") + } + if len(sim.InterestedSessions([]cid.Cid{}, []cid.Cid{}, cids[0:1])) != 1 { + t.Fatal("Expected 1 session") + } +} + +func TestRemoveSession(t *testing.T) { + test.Flaky(t) + + sim := New() + + ses1 := uint64(1) + ses2 := uint64(2) + cids1 := testutil.GenerateCids(2) + cids2 := append(testutil.GenerateCids(1), cids1[1]) + sim.RecordSessionInterest(ses1, cids1) + sim.RecordSessionInterest(ses2, cids2) + sim.RemoveSession(ses1) + + res := sim.FilterSessionInterested(ses1, cids1) + if len(res) != 1 || len(res[0]) != 0 { + t.Fatal("Expected no interest") + } + + res = sim.FilterSessionInterested(ses2, cids1, cids2) + if len(res) != 2 { + t.Fatal("unexpected results size") + } + if len(res[0]) != 1 { + t.Fatal("Expected 1 key") + } + if len(res[1]) != 2 { + t.Fatal("Expected 2 keys") + } +} + +func TestRemoveSessionInterested(t *testing.T) { + test.Flaky(t) + + sim := New() + + ses1 := uint64(1) + ses2 := uint64(2) + cids1 := testutil.GenerateCids(2) + cids2 := append(testutil.GenerateCids(1), cids1[1]) + sim.RecordSessionInterest(ses1, cids1) + sim.RecordSessionInterest(ses2, cids2) + + res := sim.RemoveSessionInterested(ses1, []cid.Cid{cids1[0]}) + if len(res) != 1 { + t.Fatal("Expected no interested sessions left") + } + + interested := sim.FilterSessionInterested(ses1, cids1) + if len(interested) != 1 || len(interested[0]) != 1 { + t.Fatal("Expected ses1 still interested in one cid") + } + + res = sim.RemoveSessionInterested(ses1, cids1) + if len(res) != 0 { + t.Fatal("Expected ses2 to be interested in one cid") + } + + interested = sim.FilterSessionInterested(ses1, cids1) + if len(interested) != 1 || len(interested[0]) != 0 { + t.Fatal("Expected ses1 to have no remaining interest") + } + + interested = sim.FilterSessionInterested(ses2, cids1) + if len(interested) != 1 || len(interested[0]) != 1 { + t.Fatal("Expected ses2 to still be interested in one key") + } +} + +func TestSplitWantedUnwanted(t *testing.T) { + test.Flaky(t) + + blks := testutil.GenerateBlocksOfSize(3, 1024) + sim := New() + ses1 := uint64(1) + ses2 := uint64(2) + + var cids []cid.Cid + for _, b := range blks { + cids = append(cids, b.Cid()) + } + + // ses1: + // ses2: + wanted, unwanted := sim.SplitWantedUnwanted(blks) + if len(wanted) > 0 { + t.Fatal("Expected no blocks") + } + if len(unwanted) != 3 { + t.Fatal("Expected 3 blocks") + } + + // ses1: 0 1 + // ses2: + sim.RecordSessionInterest(ses1, cids[0:2]) + wanted, unwanted = sim.SplitWantedUnwanted(blks) + if len(wanted) != 2 { + t.Fatal("Expected 2 blocks") + } + if len(unwanted) != 1 { + t.Fatal("Expected 1 block") + } + + // ses1: 1 + // ses2: 1 2 + sim.RecordSessionInterest(ses2, cids[1:]) + sim.RemoveSessionWants(ses1, cids[:1]) + + wanted, unwanted = sim.SplitWantedUnwanted(blks) + if len(wanted) != 2 { + t.Fatal("Expected 2 blocks") + } + if len(unwanted) != 1 { + t.Fatal("Expected no blocks") + } + + // ses1: + // ses2: 1 2 + sim.RemoveSessionWants(ses1, cids[1:2]) + + wanted, unwanted = sim.SplitWantedUnwanted(blks) + if len(wanted) != 2 { + t.Fatal("Expected 2 blocks") + } + if len(unwanted) != 1 { + t.Fatal("Expected no blocks") + } + + // ses1: + // ses2: 2 + sim.RemoveSessionWants(ses2, cids[1:2]) + + wanted, unwanted = sim.SplitWantedUnwanted(blks) + if len(wanted) != 1 { + t.Fatal("Expected 2 blocks") + } + if len(unwanted) != 2 { + t.Fatal("Expected 2 blocks") + } +} diff --git a/bitswap/client/internal/sessionmanager/sessionmanager.go b/bitswap/client/internal/sessionmanager/sessionmanager.go new file mode 100644 index 0000000000..38e490a2e1 --- /dev/null +++ b/bitswap/client/internal/sessionmanager/sessionmanager.go @@ -0,0 +1,196 @@ +package sessionmanager + +import ( + "context" + "strconv" + "sync" + "time" + + cid "github.com/ipfs/go-cid" + delay "github.com/ipfs/go-ipfs-delay" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" + + "github.com/ipfs/boxo/bitswap/client/internal" + bsbpm "github.com/ipfs/boxo/bitswap/client/internal/blockpresencemanager" + notifications "github.com/ipfs/boxo/bitswap/client/internal/notifications" + bssession "github.com/ipfs/boxo/bitswap/client/internal/session" + bssim "github.com/ipfs/boxo/bitswap/client/internal/sessioninterestmanager" + exchange "github.com/ipfs/boxo/exchange" + peer "github.com/libp2p/go-libp2p/core/peer" +) + +// Session is a session that is managed by the session manager +type Session interface { + exchange.Fetcher + ID() uint64 + ReceiveFrom(peer.ID, []cid.Cid, []cid.Cid, []cid.Cid) + Shutdown() +} + +// SessionFactory generates a new session for the SessionManager to track. +type SessionFactory func( + ctx context.Context, + sm bssession.SessionManager, + id uint64, + sprm bssession.SessionPeerManager, + sim *bssim.SessionInterestManager, + pm bssession.PeerManager, + bpm *bsbpm.BlockPresenceManager, + notif notifications.PubSub, + provSearchDelay time.Duration, + rebroadcastDelay delay.D, + self peer.ID) Session + +// PeerManagerFactory generates a new peer manager for a session. +type PeerManagerFactory func(ctx context.Context, id uint64) bssession.SessionPeerManager + +// SessionManager is responsible for creating, managing, and dispatching to +// sessions. +type SessionManager struct { + ctx context.Context + sessionFactory SessionFactory + sessionInterestManager *bssim.SessionInterestManager + peerManagerFactory PeerManagerFactory + blockPresenceManager *bsbpm.BlockPresenceManager + peerManager bssession.PeerManager + notif notifications.PubSub + + // Sessions + sessLk sync.RWMutex + sessions map[uint64]Session + + // Session Index + sessIDLk sync.Mutex + sessID uint64 + + self peer.ID +} + +// New creates a new SessionManager. +func New(ctx context.Context, sessionFactory SessionFactory, sessionInterestManager *bssim.SessionInterestManager, peerManagerFactory PeerManagerFactory, + blockPresenceManager *bsbpm.BlockPresenceManager, peerManager bssession.PeerManager, notif notifications.PubSub, self peer.ID) *SessionManager { + + return &SessionManager{ + ctx: ctx, + sessionFactory: sessionFactory, + sessionInterestManager: sessionInterestManager, + peerManagerFactory: peerManagerFactory, + blockPresenceManager: blockPresenceManager, + peerManager: peerManager, + notif: notif, + sessions: make(map[uint64]Session), + self: self, + } +} + +// NewSession initializes a session with the given context, and adds to the +// session manager. +func (sm *SessionManager) NewSession(ctx context.Context, + provSearchDelay time.Duration, + rebroadcastDelay delay.D) exchange.Fetcher { + id := sm.GetNextSessionID() + + ctx, span := internal.StartSpan(ctx, "SessionManager.NewSession", trace.WithAttributes(attribute.String("ID", strconv.FormatUint(id, 10)))) + defer span.End() + + pm := sm.peerManagerFactory(ctx, id) + session := sm.sessionFactory(ctx, sm, id, pm, sm.sessionInterestManager, sm.peerManager, sm.blockPresenceManager, sm.notif, provSearchDelay, rebroadcastDelay, sm.self) + + sm.sessLk.Lock() + if sm.sessions != nil { // check if SessionManager was shutdown + sm.sessions[id] = session + } + sm.sessLk.Unlock() + + return session +} + +func (sm *SessionManager) Shutdown() { + sm.sessLk.Lock() + + sessions := make([]Session, 0, len(sm.sessions)) + for _, ses := range sm.sessions { + sessions = append(sessions, ses) + } + + // Ensure that if Shutdown() is called twice we only shut down + // the sessions once + sm.sessions = nil + + sm.sessLk.Unlock() + + for _, ses := range sessions { + ses.Shutdown() + } +} + +func (sm *SessionManager) RemoveSession(sesid uint64) { + // Remove session from SessionInterestManager - returns the keys that no + // session is interested in anymore. + cancelKs := sm.sessionInterestManager.RemoveSession(sesid) + + // Cancel keys that no session is interested in anymore + sm.cancelWants(cancelKs) + + sm.sessLk.Lock() + defer sm.sessLk.Unlock() + + // Clean up session + if sm.sessions != nil { // check if SessionManager was shutdown + delete(sm.sessions, sesid) + } +} + +// GetNextSessionID returns the next sequential identifier for a session. +func (sm *SessionManager) GetNextSessionID() uint64 { + sm.sessIDLk.Lock() + defer sm.sessIDLk.Unlock() + + sm.sessID++ + return sm.sessID +} + +// ReceiveFrom is called when a new message is received +func (sm *SessionManager) ReceiveFrom(ctx context.Context, p peer.ID, blks []cid.Cid, haves []cid.Cid, dontHaves []cid.Cid) { + // Record block presence for HAVE / DONT_HAVE + sm.blockPresenceManager.ReceiveFrom(p, haves, dontHaves) + + // Notify each session that is interested in the blocks / HAVEs / DONT_HAVEs + for _, id := range sm.sessionInterestManager.InterestedSessions(blks, haves, dontHaves) { + sm.sessLk.RLock() + if sm.sessions == nil { // check if SessionManager was shutdown + sm.sessLk.RUnlock() + return + } + sess, ok := sm.sessions[id] + sm.sessLk.RUnlock() + + if ok { + sess.ReceiveFrom(p, blks, haves, dontHaves) + } + } + + // Send CANCEL to all peers with want-have / want-block + sm.peerManager.SendCancels(ctx, blks) +} + +// CancelSessionWants is called when a session cancels wants because a call to +// GetBlocks() is cancelled +func (sm *SessionManager) CancelSessionWants(sesid uint64, wants []cid.Cid) { + // Remove session's interest in the given blocks - returns the keys that no + // session is interested in anymore. + cancelKs := sm.sessionInterestManager.RemoveSessionInterested(sesid, wants) + sm.cancelWants(cancelKs) +} + +func (sm *SessionManager) cancelWants(wants []cid.Cid) { + // Free up block presence tracking for keys that no session is interested + // in anymore + sm.blockPresenceManager.RemoveKeys(wants) + + // Send CANCEL to all peers for blocks that no session is interested in + // anymore. + // Note: use bitswap context because session context may already be Done. + sm.peerManager.SendCancels(sm.ctx, wants) +} diff --git a/bitswap/client/internal/sessionmanager/sessionmanager_test.go b/bitswap/client/internal/sessionmanager/sessionmanager_test.go new file mode 100644 index 0000000000..98df7e8009 --- /dev/null +++ b/bitswap/client/internal/sessionmanager/sessionmanager_test.go @@ -0,0 +1,268 @@ +package sessionmanager + +import ( + "context" + "fmt" + "sync" + "testing" + "time" + + bsbpm "github.com/ipfs/boxo/bitswap/client/internal/blockpresencemanager" + notifications "github.com/ipfs/boxo/bitswap/client/internal/notifications" + bspm "github.com/ipfs/boxo/bitswap/client/internal/peermanager" + bssession "github.com/ipfs/boxo/bitswap/client/internal/session" + bssim "github.com/ipfs/boxo/bitswap/client/internal/sessioninterestmanager" + "github.com/ipfs/boxo/bitswap/internal/testutil" + blocks "github.com/ipfs/boxo/blocks" + "github.com/ipfs/boxo/internal/test" + cid "github.com/ipfs/go-cid" + delay "github.com/ipfs/go-ipfs-delay" + peer "github.com/libp2p/go-libp2p/core/peer" +) + +type fakeSession struct { + ks []cid.Cid + wantBlocks []cid.Cid + wantHaves []cid.Cid + id uint64 + pm *fakeSesPeerManager + sm bssession.SessionManager + notif notifications.PubSub +} + +func (*fakeSession) GetBlock(context.Context, cid.Cid) (blocks.Block, error) { + return nil, nil +} +func (*fakeSession) GetBlocks(context.Context, []cid.Cid) (<-chan blocks.Block, error) { + return nil, nil +} +func (fs *fakeSession) ID() uint64 { + return fs.id +} +func (fs *fakeSession) ReceiveFrom(p peer.ID, ks []cid.Cid, wantBlocks []cid.Cid, wantHaves []cid.Cid) { + fs.ks = append(fs.ks, ks...) + fs.wantBlocks = append(fs.wantBlocks, wantBlocks...) + fs.wantHaves = append(fs.wantHaves, wantHaves...) +} +func (fs *fakeSession) Shutdown() { + fs.sm.RemoveSession(fs.id) +} + +type fakeSesPeerManager struct { +} + +func (*fakeSesPeerManager) Peers() []peer.ID { return nil } +func (*fakeSesPeerManager) PeersDiscovered() bool { return false } +func (*fakeSesPeerManager) Shutdown() {} +func (*fakeSesPeerManager) AddPeer(peer.ID) bool { return false } +func (*fakeSesPeerManager) RemovePeer(peer.ID) bool { return false } +func (*fakeSesPeerManager) HasPeers() bool { return false } +func (*fakeSesPeerManager) ProtectConnection(peer.ID) {} + +type fakePeerManager struct { + lk sync.Mutex + cancels []cid.Cid +} + +func (*fakePeerManager) RegisterSession(peer.ID, bspm.Session) {} +func (*fakePeerManager) UnregisterSession(uint64) {} +func (*fakePeerManager) SendWants(context.Context, peer.ID, []cid.Cid, []cid.Cid) {} +func (*fakePeerManager) BroadcastWantHaves(context.Context, []cid.Cid) {} +func (fpm *fakePeerManager) SendCancels(ctx context.Context, cancels []cid.Cid) { + fpm.lk.Lock() + defer fpm.lk.Unlock() + fpm.cancels = append(fpm.cancels, cancels...) +} +func (fpm *fakePeerManager) cancelled() []cid.Cid { + fpm.lk.Lock() + defer fpm.lk.Unlock() + return fpm.cancels +} + +func sessionFactory(ctx context.Context, + sm bssession.SessionManager, + id uint64, + sprm bssession.SessionPeerManager, + sim *bssim.SessionInterestManager, + pm bssession.PeerManager, + bpm *bsbpm.BlockPresenceManager, + notif notifications.PubSub, + provSearchDelay time.Duration, + rebroadcastDelay delay.D, + self peer.ID) Session { + fs := &fakeSession{ + id: id, + pm: sprm.(*fakeSesPeerManager), + sm: sm, + notif: notif, + } + go func() { + <-ctx.Done() + sm.RemoveSession(fs.id) + }() + return fs +} + +func peerManagerFactory(ctx context.Context, id uint64) bssession.SessionPeerManager { + return &fakeSesPeerManager{} +} + +func TestReceiveFrom(t *testing.T) { + test.Flaky(t) + + ctx := context.Background() + ctx, cancel := context.WithCancel(ctx) + defer cancel() + notif := notifications.New() + defer notif.Shutdown() + sim := bssim.New() + bpm := bsbpm.New() + pm := &fakePeerManager{} + sm := New(ctx, sessionFactory, sim, peerManagerFactory, bpm, pm, notif, "") + + p := peer.ID(fmt.Sprint(123)) + block := blocks.NewBlock([]byte("block")) + + firstSession := sm.NewSession(ctx, time.Second, delay.Fixed(time.Minute)).(*fakeSession) + secondSession := sm.NewSession(ctx, time.Second, delay.Fixed(time.Minute)).(*fakeSession) + thirdSession := sm.NewSession(ctx, time.Second, delay.Fixed(time.Minute)).(*fakeSession) + + sim.RecordSessionInterest(firstSession.ID(), []cid.Cid{block.Cid()}) + sim.RecordSessionInterest(thirdSession.ID(), []cid.Cid{block.Cid()}) + + sm.ReceiveFrom(ctx, p, []cid.Cid{block.Cid()}, []cid.Cid{}, []cid.Cid{}) + if len(firstSession.ks) == 0 || + len(secondSession.ks) > 0 || + len(thirdSession.ks) == 0 { + t.Fatal("should have received blocks but didn't") + } + + sm.ReceiveFrom(ctx, p, []cid.Cid{}, []cid.Cid{block.Cid()}, []cid.Cid{}) + if len(firstSession.wantBlocks) == 0 || + len(secondSession.wantBlocks) > 0 || + len(thirdSession.wantBlocks) == 0 { + t.Fatal("should have received want-blocks but didn't") + } + + sm.ReceiveFrom(ctx, p, []cid.Cid{}, []cid.Cid{}, []cid.Cid{block.Cid()}) + if len(firstSession.wantHaves) == 0 || + len(secondSession.wantHaves) > 0 || + len(thirdSession.wantHaves) == 0 { + t.Fatal("should have received want-haves but didn't") + } + + if len(pm.cancelled()) != 1 { + t.Fatal("should have sent cancel for received blocks") + } +} + +func TestReceiveBlocksWhenManagerShutdown(t *testing.T) { + test.Flaky(t) + + ctx := context.Background() + ctx, cancel := context.WithCancel(ctx) + defer cancel() + notif := notifications.New() + defer notif.Shutdown() + sim := bssim.New() + bpm := bsbpm.New() + pm := &fakePeerManager{} + sm := New(ctx, sessionFactory, sim, peerManagerFactory, bpm, pm, notif, "") + + p := peer.ID(fmt.Sprint(123)) + block := blocks.NewBlock([]byte("block")) + + firstSession := sm.NewSession(ctx, time.Second, delay.Fixed(time.Minute)).(*fakeSession) + secondSession := sm.NewSession(ctx, time.Second, delay.Fixed(time.Minute)).(*fakeSession) + thirdSession := sm.NewSession(ctx, time.Second, delay.Fixed(time.Minute)).(*fakeSession) + + sim.RecordSessionInterest(firstSession.ID(), []cid.Cid{block.Cid()}) + sim.RecordSessionInterest(secondSession.ID(), []cid.Cid{block.Cid()}) + sim.RecordSessionInterest(thirdSession.ID(), []cid.Cid{block.Cid()}) + + sm.Shutdown() + + // wait for sessions to get removed + time.Sleep(10 * time.Millisecond) + + sm.ReceiveFrom(ctx, p, []cid.Cid{block.Cid()}, []cid.Cid{}, []cid.Cid{}) + if len(firstSession.ks) > 0 || + len(secondSession.ks) > 0 || + len(thirdSession.ks) > 0 { + t.Fatal("received blocks for sessions after manager is shutdown") + } +} + +func TestReceiveBlocksWhenSessionContextCancelled(t *testing.T) { + test.Flaky(t) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + notif := notifications.New() + defer notif.Shutdown() + sim := bssim.New() + bpm := bsbpm.New() + pm := &fakePeerManager{} + sm := New(ctx, sessionFactory, sim, peerManagerFactory, bpm, pm, notif, "") + + p := peer.ID(fmt.Sprint(123)) + block := blocks.NewBlock([]byte("block")) + + firstSession := sm.NewSession(ctx, time.Second, delay.Fixed(time.Minute)).(*fakeSession) + sessionCtx, sessionCancel := context.WithCancel(ctx) + secondSession := sm.NewSession(sessionCtx, time.Second, delay.Fixed(time.Minute)).(*fakeSession) + thirdSession := sm.NewSession(ctx, time.Second, delay.Fixed(time.Minute)).(*fakeSession) + + sim.RecordSessionInterest(firstSession.ID(), []cid.Cid{block.Cid()}) + sim.RecordSessionInterest(secondSession.ID(), []cid.Cid{block.Cid()}) + sim.RecordSessionInterest(thirdSession.ID(), []cid.Cid{block.Cid()}) + + sessionCancel() + + // wait for sessions to get removed + time.Sleep(10 * time.Millisecond) + + sm.ReceiveFrom(ctx, p, []cid.Cid{block.Cid()}, []cid.Cid{}, []cid.Cid{}) + if len(firstSession.ks) == 0 || + len(secondSession.ks) > 0 || + len(thirdSession.ks) == 0 { + t.Fatal("received blocks for sessions that are canceled") + } +} + +func TestShutdown(t *testing.T) { + test.Flaky(t) + + ctx := context.Background() + ctx, cancel := context.WithCancel(ctx) + defer cancel() + notif := notifications.New() + defer notif.Shutdown() + sim := bssim.New() + bpm := bsbpm.New() + pm := &fakePeerManager{} + sm := New(ctx, sessionFactory, sim, peerManagerFactory, bpm, pm, notif, "") + + p := peer.ID(fmt.Sprint(123)) + block := blocks.NewBlock([]byte("block")) + cids := []cid.Cid{block.Cid()} + firstSession := sm.NewSession(ctx, time.Second, delay.Fixed(time.Minute)).(*fakeSession) + sim.RecordSessionInterest(firstSession.ID(), cids) + sm.ReceiveFrom(ctx, p, []cid.Cid{}, []cid.Cid{}, cids) + + if !bpm.HasKey(block.Cid()) { + t.Fatal("expected cid to be added to block presence manager") + } + + sm.Shutdown() + + // wait for cleanup + time.Sleep(10 * time.Millisecond) + + if bpm.HasKey(block.Cid()) { + t.Fatal("expected cid to be removed from block presence manager") + } + if !testutil.MatchKeysIgnoreOrder(pm.cancelled(), cids) { + t.Fatal("expected cancels to be sent") + } +} diff --git a/bitswap/client/internal/sessionpeermanager/sessionpeermanager.go b/bitswap/client/internal/sessionpeermanager/sessionpeermanager.go new file mode 100644 index 0000000000..35784d7b77 --- /dev/null +++ b/bitswap/client/internal/sessionpeermanager/sessionpeermanager.go @@ -0,0 +1,150 @@ +package sessionpeermanager + +import ( + "fmt" + "sync" + + logging "github.com/ipfs/go-log" + + peer "github.com/libp2p/go-libp2p/core/peer" +) + +var log = logging.Logger("bs:sprmgr") + +const ( + // Connection Manager tag value for session peers. Indicates to connection + // manager that it should keep the connection to the peer. + sessionPeerTagValue = 5 +) + +// PeerTagger is an interface for tagging peers with metadata +type PeerTagger interface { + TagPeer(peer.ID, string, int) + UntagPeer(p peer.ID, tag string) + Protect(peer.ID, string) + Unprotect(peer.ID, string) bool +} + +// SessionPeerManager keeps track of peers for a session, and takes care of +// ConnectionManager tagging. +type SessionPeerManager struct { + tagger PeerTagger + tag string + + id uint64 + plk sync.RWMutex + peers map[peer.ID]struct{} + peersDiscovered bool +} + +// New creates a new SessionPeerManager +func New(id uint64, tagger PeerTagger) *SessionPeerManager { + return &SessionPeerManager{ + id: id, + tag: fmt.Sprint("bs-ses-", id), + tagger: tagger, + peers: make(map[peer.ID]struct{}), + } +} + +// AddPeer adds the peer to the SessionPeerManager. +// Returns true if the peer is a new peer, false if it already existed. +func (spm *SessionPeerManager) AddPeer(p peer.ID) bool { + spm.plk.Lock() + defer spm.plk.Unlock() + + // Check if the peer is a new peer + if _, ok := spm.peers[p]; ok { + return false + } + + spm.peers[p] = struct{}{} + spm.peersDiscovered = true + + // Tag the peer with the ConnectionManager so it doesn't discard the + // connection + spm.tagger.TagPeer(p, spm.tag, sessionPeerTagValue) + + log.Debugw("Bitswap: Added peer to session", "session", spm.id, "peer", p, "peerCount", len(spm.peers)) + return true +} + +// Protect connection to this peer from being pruned by the connection manager +func (spm *SessionPeerManager) ProtectConnection(p peer.ID) { + spm.plk.Lock() + defer spm.plk.Unlock() + + if _, ok := spm.peers[p]; !ok { + return + } + + spm.tagger.Protect(p, spm.tag) +} + +// RemovePeer removes the peer from the SessionPeerManager. +// Returns true if the peer was removed, false if it did not exist. +func (spm *SessionPeerManager) RemovePeer(p peer.ID) bool { + spm.plk.Lock() + defer spm.plk.Unlock() + + if _, ok := spm.peers[p]; !ok { + return false + } + + delete(spm.peers, p) + spm.tagger.UntagPeer(p, spm.tag) + spm.tagger.Unprotect(p, spm.tag) + + log.Debugw("Bitswap: removed peer from session", "session", spm.id, "peer", p, "peerCount", len(spm.peers)) + return true +} + +// PeersDiscovered indicates whether peers have been discovered yet. +// Returns true once a peer has been discovered by the session (even if all +// peers are later removed from the session). +func (spm *SessionPeerManager) PeersDiscovered() bool { + spm.plk.RLock() + defer spm.plk.RUnlock() + + return spm.peersDiscovered +} + +func (spm *SessionPeerManager) Peers() []peer.ID { + spm.plk.RLock() + defer spm.plk.RUnlock() + + peers := make([]peer.ID, 0, len(spm.peers)) + for p := range spm.peers { + peers = append(peers, p) + } + + return peers +} + +func (spm *SessionPeerManager) HasPeers() bool { + spm.plk.RLock() + defer spm.plk.RUnlock() + + return len(spm.peers) > 0 +} + +func (spm *SessionPeerManager) HasPeer(p peer.ID) bool { + spm.plk.RLock() + defer spm.plk.RUnlock() + + _, ok := spm.peers[p] + return ok +} + +// Shutdown untags all the peers +func (spm *SessionPeerManager) Shutdown() { + spm.plk.Lock() + defer spm.plk.Unlock() + + // Untag the peers with the ConnectionManager so that it can release + // connections to those peers + for p := range spm.peers { + spm.tagger.UntagPeer(p, spm.tag) + spm.tagger.Unprotect(p, spm.tag) + } +} diff --git a/bitswap/client/internal/sessionpeermanager/sessionpeermanager_test.go b/bitswap/client/internal/sessionpeermanager/sessionpeermanager_test.go new file mode 100644 index 0000000000..fc1d7274d2 --- /dev/null +++ b/bitswap/client/internal/sessionpeermanager/sessionpeermanager_test.go @@ -0,0 +1,321 @@ +package sessionpeermanager + +import ( + "sync" + "testing" + + "github.com/ipfs/boxo/bitswap/internal/testutil" + "github.com/ipfs/boxo/internal/test" + peer "github.com/libp2p/go-libp2p/core/peer" +) + +type fakePeerTagger struct { + lk sync.Mutex + taggedPeers []peer.ID + protectedPeers map[peer.ID]map[string]struct{} + wait sync.WaitGroup +} + +func newFakePeerTagger() *fakePeerTagger { + return &fakePeerTagger{ + protectedPeers: make(map[peer.ID]map[string]struct{}), + } +} + +func (fpt *fakePeerTagger) TagPeer(p peer.ID, tag string, n int) { + fpt.wait.Add(1) + + fpt.lk.Lock() + defer fpt.lk.Unlock() + fpt.taggedPeers = append(fpt.taggedPeers, p) +} + +func (fpt *fakePeerTagger) UntagPeer(p peer.ID, tag string) { + defer fpt.wait.Done() + + fpt.lk.Lock() + defer fpt.lk.Unlock() + for i := 0; i < len(fpt.taggedPeers); i++ { + if fpt.taggedPeers[i] == p { + fpt.taggedPeers[i] = fpt.taggedPeers[len(fpt.taggedPeers)-1] + fpt.taggedPeers = fpt.taggedPeers[:len(fpt.taggedPeers)-1] + return + } + } +} + +func (fpt *fakePeerTagger) Protect(p peer.ID, tag string) { + fpt.lk.Lock() + defer fpt.lk.Unlock() + + tags, ok := fpt.protectedPeers[p] + if !ok { + tags = make(map[string]struct{}) + fpt.protectedPeers[p] = tags + } + tags[tag] = struct{}{} +} + +func (fpt *fakePeerTagger) Unprotect(p peer.ID, tag string) bool { + fpt.lk.Lock() + defer fpt.lk.Unlock() + + if tags, ok := fpt.protectedPeers[p]; ok { + delete(tags, tag) + if len(tags) == 0 { + delete(fpt.protectedPeers, p) + } + return len(tags) > 0 + } + + return false +} + +func (fpt *fakePeerTagger) isProtected(p peer.ID) bool { + fpt.lk.Lock() + defer fpt.lk.Unlock() + + return len(fpt.protectedPeers[p]) > 0 +} + +func TestAddPeers(t *testing.T) { + test.Flaky(t) + + peers := testutil.GeneratePeers(2) + spm := New(1, &fakePeerTagger{}) + + isNew := spm.AddPeer(peers[0]) + if !isNew { + t.Fatal("Expected peer to be new") + } + + isNew = spm.AddPeer(peers[0]) + if isNew { + t.Fatal("Expected peer to no longer be new") + } + + isNew = spm.AddPeer(peers[1]) + if !isNew { + t.Fatal("Expected peer to be new") + } +} + +func TestRemovePeers(t *testing.T) { + test.Flaky(t) + + peers := testutil.GeneratePeers(2) + spm := New(1, &fakePeerTagger{}) + + existed := spm.RemovePeer(peers[0]) + if existed { + t.Fatal("Expected peer not to exist") + } + + spm.AddPeer(peers[0]) + spm.AddPeer(peers[1]) + + existed = spm.RemovePeer(peers[0]) + if !existed { + t.Fatal("Expected peer to exist") + } + existed = spm.RemovePeer(peers[1]) + if !existed { + t.Fatal("Expected peer to exist") + } + existed = spm.RemovePeer(peers[0]) + if existed { + t.Fatal("Expected peer not to have existed") + } +} + +func TestHasPeers(t *testing.T) { + test.Flaky(t) + + peers := testutil.GeneratePeers(2) + spm := New(1, &fakePeerTagger{}) + + if spm.HasPeers() { + t.Fatal("Expected not to have peers yet") + } + + spm.AddPeer(peers[0]) + if !spm.HasPeers() { + t.Fatal("Expected to have peers") + } + + spm.AddPeer(peers[1]) + if !spm.HasPeers() { + t.Fatal("Expected to have peers") + } + + spm.RemovePeer(peers[0]) + if !spm.HasPeers() { + t.Fatal("Expected to have peers") + } + + spm.RemovePeer(peers[1]) + if spm.HasPeers() { + t.Fatal("Expected to no longer have peers") + } +} + +func TestHasPeer(t *testing.T) { + test.Flaky(t) + + peers := testutil.GeneratePeers(2) + spm := New(1, &fakePeerTagger{}) + + if spm.HasPeer(peers[0]) { + t.Fatal("Expected not to have peer yet") + } + + spm.AddPeer(peers[0]) + if !spm.HasPeer(peers[0]) { + t.Fatal("Expected to have peer") + } + + spm.AddPeer(peers[1]) + if !spm.HasPeer(peers[1]) { + t.Fatal("Expected to have peer") + } + + spm.RemovePeer(peers[0]) + if spm.HasPeer(peers[0]) { + t.Fatal("Expected not to have peer") + } + + if !spm.HasPeer(peers[1]) { + t.Fatal("Expected to have peer") + } +} + +func TestPeers(t *testing.T) { + test.Flaky(t) + + peers := testutil.GeneratePeers(2) + spm := New(1, &fakePeerTagger{}) + + if len(spm.Peers()) > 0 { + t.Fatal("Expected not to have peers yet") + } + + spm.AddPeer(peers[0]) + if len(spm.Peers()) != 1 { + t.Fatal("Expected to have one peer") + } + + spm.AddPeer(peers[1]) + if len(spm.Peers()) != 2 { + t.Fatal("Expected to have two peers") + } + + spm.RemovePeer(peers[0]) + if len(spm.Peers()) != 1 { + t.Fatal("Expected to have one peer") + } +} + +func TestPeersDiscovered(t *testing.T) { + test.Flaky(t) + + peers := testutil.GeneratePeers(2) + spm := New(1, &fakePeerTagger{}) + + if spm.PeersDiscovered() { + t.Fatal("Expected not to have discovered peers yet") + } + + spm.AddPeer(peers[0]) + if !spm.PeersDiscovered() { + t.Fatal("Expected to have discovered peers") + } + + spm.RemovePeer(peers[0]) + if !spm.PeersDiscovered() { + t.Fatal("Expected to still have discovered peers") + } +} + +func TestPeerTagging(t *testing.T) { + test.Flaky(t) + + peers := testutil.GeneratePeers(2) + fpt := &fakePeerTagger{} + spm := New(1, fpt) + + spm.AddPeer(peers[0]) + if len(fpt.taggedPeers) != 1 { + t.Fatal("Expected to have tagged one peer") + } + + spm.AddPeer(peers[0]) + if len(fpt.taggedPeers) != 1 { + t.Fatal("Expected to have tagged one peer") + } + + spm.AddPeer(peers[1]) + if len(fpt.taggedPeers) != 2 { + t.Fatal("Expected to have tagged two peers") + } + + spm.RemovePeer(peers[1]) + if len(fpt.taggedPeers) != 1 { + t.Fatal("Expected to have untagged peer") + } +} + +func TestProtectConnection(t *testing.T) { + test.Flaky(t) + + peers := testutil.GeneratePeers(1) + peerA := peers[0] + fpt := newFakePeerTagger() + spm := New(1, fpt) + + // Should not protect connection if peer hasn't been added yet + spm.ProtectConnection(peerA) + if fpt.isProtected(peerA) { + t.Fatal("Expected peer not to be protected") + } + + // Once peer is added, should be able to protect connection + spm.AddPeer(peerA) + spm.ProtectConnection(peerA) + if !fpt.isProtected(peerA) { + t.Fatal("Expected peer to be protected") + } + + // Removing peer should unprotect connection + spm.RemovePeer(peerA) + if fpt.isProtected(peerA) { + t.Fatal("Expected peer to be unprotected") + } +} + +func TestShutdown(t *testing.T) { + test.Flaky(t) + + peers := testutil.GeneratePeers(2) + fpt := newFakePeerTagger() + spm := New(1, fpt) + + spm.AddPeer(peers[0]) + spm.AddPeer(peers[1]) + if len(fpt.taggedPeers) != 2 { + t.Fatal("Expected to have tagged two peers") + } + + spm.ProtectConnection(peers[0]) + if !fpt.isProtected(peers[0]) { + t.Fatal("Expected peer to be protected") + } + + spm.Shutdown() + + if len(fpt.taggedPeers) != 0 { + t.Fatal("Expected to have untagged all peers") + } + if len(fpt.protectedPeers) != 0 { + t.Fatal("Expected to have unprotected all peers") + } +} diff --git a/bitswap/client/internal/tracing.go b/bitswap/client/internal/tracing.go new file mode 100644 index 0000000000..aa1f7992ff --- /dev/null +++ b/bitswap/client/internal/tracing.go @@ -0,0 +1,13 @@ +package internal + +import ( + "context" + "fmt" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/trace" +) + +func StartSpan(ctx context.Context, name string, opts ...trace.SpanStartOption) (context.Context, trace.Span) { + return otel.Tracer("go-bitswap").Start(ctx, fmt.Sprintf("Bitswap.%s", name), opts...) +} diff --git a/bitswap/client/stat.go b/bitswap/client/stat.go new file mode 100644 index 0000000000..013afec676 --- /dev/null +++ b/bitswap/client/stat.go @@ -0,0 +1,30 @@ +package client + +import ( + cid "github.com/ipfs/go-cid" +) + +// Stat is a struct that provides various statistics on bitswap operations +type Stat struct { + Wantlist []cid.Cid + BlocksReceived uint64 + DataReceived uint64 + DupBlksReceived uint64 + DupDataReceived uint64 + MessagesReceived uint64 +} + +// Stat returns aggregated statistics about bitswap operations +func (bs *Client) Stat() (st Stat, err error) { + bs.counterLk.Lock() + c := bs.counters + st.BlocksReceived = c.blocksRecvd + st.DupBlksReceived = c.dupBlocksRecvd + st.DupDataReceived = c.dupDataRecvd + st.DataReceived = c.dataRecvd + st.MessagesReceived = c.messagesRecvd + bs.counterLk.Unlock() + st.Wantlist = bs.GetWantlist() + + return st, nil +} diff --git a/bitswap/client/wantlist/wantlist.go b/bitswap/client/wantlist/wantlist.go new file mode 100644 index 0000000000..6cb71eeccc --- /dev/null +++ b/bitswap/client/wantlist/wantlist.go @@ -0,0 +1,142 @@ +// Package wantlist implements an object for bitswap that contains the keys +// that a given peer wants. +package wantlist + +import ( + "sort" + + pb "github.com/ipfs/boxo/bitswap/message/pb" + + cid "github.com/ipfs/go-cid" +) + +// Wantlist is a raw list of wanted blocks and their priorities +type Wantlist struct { + set map[cid.Cid]Entry + + // Re-computing this can get expensive so we memoize it. + cached []Entry +} + +// Entry is an entry in a want list, consisting of a cid and its priority +type Entry struct { + Cid cid.Cid + Priority int32 + WantType pb.Message_Wantlist_WantType +} + +// NewRefEntry creates a new reference tracked wantlist entry. +func NewRefEntry(c cid.Cid, p int32) Entry { + return Entry{ + Cid: c, + Priority: p, + WantType: pb.Message_Wantlist_Block, + } +} + +type entrySlice []Entry + +func (es entrySlice) Len() int { return len(es) } +func (es entrySlice) Swap(i, j int) { es[i], es[j] = es[j], es[i] } +func (es entrySlice) Less(i, j int) bool { return es[i].Priority > es[j].Priority } + +// New generates a new raw Wantlist +func New() *Wantlist { + return &Wantlist{ + set: make(map[cid.Cid]Entry), + } +} + +// Len returns the number of entries in a wantlist. +func (w *Wantlist) Len() int { + return len(w.set) +} + +// Add adds an entry in a wantlist from CID & Priority, if not already present. +func (w *Wantlist) Add(c cid.Cid, priority int32, wantType pb.Message_Wantlist_WantType) bool { + e, ok := w.set[c] + + // Adding want-have should not override want-block + if ok && (e.WantType == pb.Message_Wantlist_Block || wantType == pb.Message_Wantlist_Have) { + return false + } + + w.put(c, Entry{ + Cid: c, + Priority: priority, + WantType: wantType, + }) + + return true +} + +// Remove removes the given cid from the wantlist. +func (w *Wantlist) Remove(c cid.Cid) bool { + _, ok := w.set[c] + if !ok { + return false + } + + w.delete(c) + return true +} + +// Remove removes the given cid from the wantlist, respecting the type: +// Remove with want-have will not remove an existing want-block. +func (w *Wantlist) RemoveType(c cid.Cid, wantType pb.Message_Wantlist_WantType) bool { + e, ok := w.set[c] + if !ok { + return false + } + + // Removing want-have should not remove want-block + if e.WantType == pb.Message_Wantlist_Block && wantType == pb.Message_Wantlist_Have { + return false + } + + w.delete(c) + return true +} + +func (w *Wantlist) delete(c cid.Cid) { + delete(w.set, c) + w.cached = nil +} + +func (w *Wantlist) put(c cid.Cid, e Entry) { + w.cached = nil + w.set[c] = e +} + +// Contains returns the entry, if present, for the given CID, plus whether it +// was present. +func (w *Wantlist) Contains(c cid.Cid) (Entry, bool) { + e, ok := w.set[c] + return e, ok +} + +// Entries returns all wantlist entries for a want list, sorted by priority. +// +// DO NOT MODIFY. The returned list is cached. +func (w *Wantlist) Entries() []Entry { + if w.cached != nil { + return w.cached + } + es := make([]Entry, 0, len(w.set)) + for _, e := range w.set { + es = append(es, e) + } + sort.Sort(entrySlice(es)) + w.cached = es + return es[0:len(es):len(es)] +} + +// Absorb all the entries in other into this want list +func (w *Wantlist) Absorb(other *Wantlist) { + // Invalidate the cache up-front to avoid doing any work trying to keep it up-to-date. + w.cached = nil + + for _, e := range other.Entries() { + w.Add(e.Cid, e.Priority, e.WantType) + } +} diff --git a/bitswap/client/wantlist/wantlist_test.go b/bitswap/client/wantlist/wantlist_test.go new file mode 100644 index 0000000000..829af50a6f --- /dev/null +++ b/bitswap/client/wantlist/wantlist_test.go @@ -0,0 +1,256 @@ +package wantlist + +import ( + "testing" + + pb "github.com/ipfs/boxo/bitswap/message/pb" + "github.com/ipfs/boxo/internal/test" + cid "github.com/ipfs/go-cid" + "github.com/stretchr/testify/require" +) + +var testcids []cid.Cid + +func init() { + strs := []string{ + "QmQL8LqkEgYXaDHdNYCG2mmpow7Sp8Z8Kt3QS688vyBeC7", + "QmcBDsdjgSXU7BP4A4V8LJCXENE5xVwnhrhRGVTJr9YCVj", + "QmQakgd2wDxc3uUF4orGdEm28zUT9Mmimp5pyPG2SFS9Gj", + } + for _, s := range strs { + c, err := cid.Decode(s) + if err != nil { + panic(err) + } + testcids = append(testcids, c) + } + +} + +type wli interface { + Contains(cid.Cid) (Entry, bool) +} + +func assertHasCid(t *testing.T, w wli, c cid.Cid) { + e, ok := w.Contains(c) + if !ok { + t.Fatal("expected to have ", c) + } + if !e.Cid.Equals(c) { + t.Fatal("returned entry had wrong cid value") + } +} + +func TestBasicWantlist(t *testing.T) { + test.Flaky(t) + + wl := New() + + if !wl.Add(testcids[0], 5, pb.Message_Wantlist_Block) { + t.Fatal("expected true") + } + assertHasCid(t, wl, testcids[0]) + if !wl.Add(testcids[1], 4, pb.Message_Wantlist_Block) { + t.Fatal("expected true") + } + assertHasCid(t, wl, testcids[0]) + assertHasCid(t, wl, testcids[1]) + + if wl.Len() != 2 { + t.Fatal("should have had two items") + } + + if wl.Add(testcids[1], 4, pb.Message_Wantlist_Block) { + t.Fatal("add shouldnt report success on second add") + } + assertHasCid(t, wl, testcids[0]) + assertHasCid(t, wl, testcids[1]) + + if wl.Len() != 2 { + t.Fatal("should have had two items") + } + + if !wl.RemoveType(testcids[0], pb.Message_Wantlist_Block) { + t.Fatal("should have gotten true") + } + + assertHasCid(t, wl, testcids[1]) + if _, has := wl.Contains(testcids[0]); has { + t.Fatal("shouldnt have this cid") + } +} + +func TestAddHaveThenBlock(t *testing.T) { + test.Flaky(t) + + wl := New() + + wl.Add(testcids[0], 5, pb.Message_Wantlist_Have) + wl.Add(testcids[0], 5, pb.Message_Wantlist_Block) + + e, ok := wl.Contains(testcids[0]) + if !ok { + t.Fatal("expected to have ", testcids[0]) + } + if e.WantType != pb.Message_Wantlist_Block { + t.Fatal("expected to be ", pb.Message_Wantlist_Block) + } +} + +func TestAddBlockThenHave(t *testing.T) { + test.Flaky(t) + + wl := New() + + wl.Add(testcids[0], 5, pb.Message_Wantlist_Block) + wl.Add(testcids[0], 5, pb.Message_Wantlist_Have) + + e, ok := wl.Contains(testcids[0]) + if !ok { + t.Fatal("expected to have ", testcids[0]) + } + if e.WantType != pb.Message_Wantlist_Block { + t.Fatal("expected to be ", pb.Message_Wantlist_Block) + } +} + +func TestAddHaveThenRemoveBlock(t *testing.T) { + test.Flaky(t) + + wl := New() + + wl.Add(testcids[0], 5, pb.Message_Wantlist_Have) + wl.RemoveType(testcids[0], pb.Message_Wantlist_Block) + + _, ok := wl.Contains(testcids[0]) + if ok { + t.Fatal("expected not to have ", testcids[0]) + } +} + +func TestAddBlockThenRemoveHave(t *testing.T) { + test.Flaky(t) + + wl := New() + + wl.Add(testcids[0], 5, pb.Message_Wantlist_Block) + wl.RemoveType(testcids[0], pb.Message_Wantlist_Have) + + e, ok := wl.Contains(testcids[0]) + if !ok { + t.Fatal("expected to have ", testcids[0]) + } + if e.WantType != pb.Message_Wantlist_Block { + t.Fatal("expected to be ", pb.Message_Wantlist_Block) + } +} + +func TestAddHaveThenRemoveAny(t *testing.T) { + test.Flaky(t) + + wl := New() + + wl.Add(testcids[0], 5, pb.Message_Wantlist_Have) + wl.Remove(testcids[0]) + + _, ok := wl.Contains(testcids[0]) + if ok { + t.Fatal("expected not to have ", testcids[0]) + } +} + +func TestAddBlockThenRemoveAny(t *testing.T) { + test.Flaky(t) + + wl := New() + + wl.Add(testcids[0], 5, pb.Message_Wantlist_Block) + wl.Remove(testcids[0]) + + _, ok := wl.Contains(testcids[0]) + if ok { + t.Fatal("expected not to have ", testcids[0]) + } +} + +func TestAbsort(t *testing.T) { + test.Flaky(t) + + wl := New() + wl.Add(testcids[0], 5, pb.Message_Wantlist_Block) + wl.Add(testcids[1], 4, pb.Message_Wantlist_Have) + wl.Add(testcids[2], 3, pb.Message_Wantlist_Have) + + wl2 := New() + wl2.Add(testcids[0], 2, pb.Message_Wantlist_Have) + wl2.Add(testcids[1], 1, pb.Message_Wantlist_Block) + + wl.Absorb(wl2) + + e, ok := wl.Contains(testcids[0]) + if !ok { + t.Fatal("expected to have ", testcids[0]) + } + if e.Priority != 5 { + t.Fatal("expected priority 5") + } + if e.WantType != pb.Message_Wantlist_Block { + t.Fatal("expected type ", pb.Message_Wantlist_Block) + } + + e, ok = wl.Contains(testcids[1]) + if !ok { + t.Fatal("expected to have ", testcids[1]) + } + if e.Priority != 1 { + t.Fatal("expected priority 1") + } + if e.WantType != pb.Message_Wantlist_Block { + t.Fatal("expected type ", pb.Message_Wantlist_Block) + } + + e, ok = wl.Contains(testcids[2]) + if !ok { + t.Fatal("expected to have ", testcids[2]) + } + if e.Priority != 3 { + t.Fatal("expected priority 3") + } + if e.WantType != pb.Message_Wantlist_Have { + t.Fatal("expected type ", pb.Message_Wantlist_Have) + } +} + +func TestSortEntries(t *testing.T) { + test.Flaky(t) + + wl := New() + + wl.Add(testcids[0], 3, pb.Message_Wantlist_Block) + wl.Add(testcids[1], 5, pb.Message_Wantlist_Have) + wl.Add(testcids[2], 4, pb.Message_Wantlist_Have) + + entries := wl.Entries() + if !entries[0].Cid.Equals(testcids[1]) || + !entries[1].Cid.Equals(testcids[2]) || + !entries[2].Cid.Equals(testcids[0]) { + t.Fatal("wrong order") + } + +} + +// Test adding and removing interleaved with checking entries to make sure we clear the cache. +func TestCache(t *testing.T) { + test.Flaky(t) + + wl := New() + + wl.Add(testcids[0], 3, pb.Message_Wantlist_Block) + require.Len(t, wl.Entries(), 1) + + wl.Add(testcids[1], 3, pb.Message_Wantlist_Block) + require.Len(t, wl.Entries(), 2) + + wl.Remove(testcids[1]) + require.Len(t, wl.Entries(), 1) +} diff --git a/bitswap/decision/forward.go b/bitswap/decision/forward.go new file mode 100644 index 0000000000..c7e7b42f43 --- /dev/null +++ b/bitswap/decision/forward.go @@ -0,0 +1,12 @@ +package decision + +import "github.com/ipfs/boxo/bitswap/server" + +type ( + // Deprecated: use server.Receipt instead + Receipt = server.Receipt + // Deprecated: use server.ScoreLedger instead + ScoreLedger = server.ScoreLedger + // Deprecated: use server.ScorePeerFunc instead + ScorePeerFunc = server.ScorePeerFunc +) diff --git a/bitswap/forward.go b/bitswap/forward.go new file mode 100644 index 0000000000..d6d657b2b6 --- /dev/null +++ b/bitswap/forward.go @@ -0,0 +1,17 @@ +package bitswap + +import ( + "github.com/ipfs/boxo/bitswap/server" + "github.com/ipfs/boxo/bitswap/tracer" +) + +type ( + // DEPRECATED + PeerBlockRequestFilter = server.PeerBlockRequestFilter + // DEPRECATED + TaskComparator = server.TaskComparator + // DEPRECATED + TaskInfo = server.TaskInfo + // DEPRECATED + Tracer = tracer.Tracer +) diff --git a/bitswap/internal/defaults/defaults.go b/bitswap/internal/defaults/defaults.go new file mode 100644 index 0000000000..f9494a0da6 --- /dev/null +++ b/bitswap/internal/defaults/defaults.go @@ -0,0 +1,36 @@ +package defaults + +import ( + "encoding/binary" + "time" +) + +const ( + // these requests take at _least_ two minutes at the moment. + ProvideTimeout = time.Minute * 3 + ProvSearchDelay = time.Second + + // Number of concurrent workers in decision engine that process requests to the blockstore + BitswapEngineBlockstoreWorkerCount = 128 + // the total number of simultaneous threads sending outgoing messages + BitswapTaskWorkerCount = 8 + // how many worker threads to start for decision engine task worker + BitswapEngineTaskWorkerCount = 8 + // the total amount of bytes that a peer should have outstanding, it is utilized by the decision engine + BitswapMaxOutstandingBytesPerPeer = 1 << 20 + // the number of bytes we attempt to make each outgoing bitswap message + BitswapEngineTargetMessageSize = 16 * 1024 + // HasBlockBufferSize is the buffer size of the channel for new blocks + // that need to be provided. They should get pulled over by the + // provideCollector even before they are actually provided. + // TODO: Does this need to be this large givent that? + HasBlockBufferSize = 256 + + // Maximum size of the wantlist we are willing to keep in memory. + MaxQueuedWantlistEntiresPerPeer = 1024 + + // Copied from github.com/ipfs/go-verifcid#maximumHashLength + // FIXME: expose this in go-verifcid. + MaximumHashLength = 128 + MaximumAllowedCid = binary.MaxVarintLen64*4 + MaximumHashLength +) diff --git a/bitswap/internal/testutil/testutil.go b/bitswap/internal/testutil/testutil.go new file mode 100644 index 0000000000..493d01f239 --- /dev/null +++ b/bitswap/internal/testutil/testutil.go @@ -0,0 +1,140 @@ +package testutil + +import ( + "crypto/rand" + "fmt" + + "github.com/ipfs/boxo/bitswap/client/wantlist" + bsmsg "github.com/ipfs/boxo/bitswap/message" + blocks "github.com/ipfs/boxo/blocks" + cid "github.com/ipfs/go-cid" + blocksutil "github.com/ipfs/go-ipfs-blocksutil" + peer "github.com/libp2p/go-libp2p/core/peer" +) + +var blockGenerator = blocksutil.NewBlockGenerator() +var prioritySeq int32 + +// GenerateBlocksOfSize generates a series of blocks of the given byte size +func GenerateBlocksOfSize(n int, size int64) []blocks.Block { + generatedBlocks := make([]blocks.Block, 0, n) + for i := 0; i < n; i++ { + // rand.Read never errors + buf := make([]byte, size) + rand.Read(buf) + b := blocks.NewBlock(buf) + generatedBlocks = append(generatedBlocks, b) + + } + return generatedBlocks +} + +// GenerateCids produces n content identifiers. +func GenerateCids(n int) []cid.Cid { + cids := make([]cid.Cid, 0, n) + for i := 0; i < n; i++ { + c := blockGenerator.Next().Cid() + cids = append(cids, c) + } + return cids +} + +// GenerateMessageEntries makes fake bitswap message entries. +func GenerateMessageEntries(n int, isCancel bool) []bsmsg.Entry { + bsmsgs := make([]bsmsg.Entry, 0, n) + for i := 0; i < n; i++ { + prioritySeq++ + msg := bsmsg.Entry{ + Entry: wantlist.NewRefEntry(blockGenerator.Next().Cid(), prioritySeq), + Cancel: isCancel, + } + bsmsgs = append(bsmsgs, msg) + } + return bsmsgs +} + +var peerSeq int + +// GeneratePeers creates n peer ids. +func GeneratePeers(n int) []peer.ID { + peerIds := make([]peer.ID, 0, n) + for i := 0; i < n; i++ { + peerSeq++ + p := peer.ID(fmt.Sprint(peerSeq)) + peerIds = append(peerIds, p) + } + return peerIds +} + +var nextSession uint64 + +// GenerateSessionID make a unit session identifier. +func GenerateSessionID() uint64 { + nextSession++ + return uint64(nextSession) +} + +// ContainsPeer returns true if a peer is found n a list of peers. +func ContainsPeer(peers []peer.ID, p peer.ID) bool { + for _, n := range peers { + if p == n { + return true + } + } + return false +} + +// IndexOf returns the index of a given cid in an array of blocks +func IndexOf(blks []blocks.Block, c cid.Cid) int { + for i, n := range blks { + if n.Cid() == c { + return i + } + } + return -1 +} + +// ContainsBlock returns true if a block is found n a list of blocks +func ContainsBlock(blks []blocks.Block, block blocks.Block) bool { + return IndexOf(blks, block.Cid()) != -1 +} + +// ContainsKey returns true if a key is found n a list of CIDs. +func ContainsKey(ks []cid.Cid, c cid.Cid) bool { + for _, k := range ks { + if c == k { + return true + } + } + return false +} + +// MatchKeysIgnoreOrder returns true if the lists of CIDs match (even if +// they're in a different order) +func MatchKeysIgnoreOrder(ks1 []cid.Cid, ks2 []cid.Cid) bool { + if len(ks1) != len(ks2) { + return false + } + + for _, k := range ks1 { + if !ContainsKey(ks2, k) { + return false + } + } + return true +} + +// MatchPeersIgnoreOrder returns true if the lists of peers match (even if +// they're in a different order) +func MatchPeersIgnoreOrder(ps1 []peer.ID, ps2 []peer.ID) bool { + if len(ps1) != len(ps2) { + return false + } + + for _, p := range ps1 { + if !ContainsPeer(ps2, p) { + return false + } + } + return true +} diff --git a/bitswap/internal/testutil/testutil_test.go b/bitswap/internal/testutil/testutil_test.go new file mode 100644 index 0000000000..e2b6699862 --- /dev/null +++ b/bitswap/internal/testutil/testutil_test.go @@ -0,0 +1,16 @@ +package testutil + +import ( + "testing" + + blocks "github.com/ipfs/boxo/blocks" +) + +func TestGenerateBlocksOfSize(t *testing.T) { + for _, b1 := range GenerateBlocksOfSize(10, 100) { + b2 := blocks.NewBlock(b1.RawData()) + if b2.Cid() != b1.Cid() { + t.Fatal("block CIDs mismatch") + } + } +} diff --git a/bitswap/internal/tracing.go b/bitswap/internal/tracing.go new file mode 100644 index 0000000000..aa1f7992ff --- /dev/null +++ b/bitswap/internal/tracing.go @@ -0,0 +1,13 @@ +package internal + +import ( + "context" + "fmt" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/trace" +) + +func StartSpan(ctx context.Context, name string, opts ...trace.SpanStartOption) (context.Context, trace.Span) { + return otel.Tracer("go-bitswap").Start(ctx, fmt.Sprintf("Bitswap.%s", name), opts...) +} diff --git a/bitswap/message/message.go b/bitswap/message/message.go new file mode 100644 index 0000000000..51195f4aa1 --- /dev/null +++ b/bitswap/message/message.go @@ -0,0 +1,500 @@ +package message + +import ( + "encoding/binary" + "errors" + "io" + + "github.com/ipfs/boxo/bitswap/client/wantlist" + pb "github.com/ipfs/boxo/bitswap/message/pb" + + blocks "github.com/ipfs/boxo/blocks" + cid "github.com/ipfs/go-cid" + pool "github.com/libp2p/go-buffer-pool" + msgio "github.com/libp2p/go-msgio" + + u "github.com/ipfs/boxo/util" + "github.com/libp2p/go-libp2p/core/network" +) + +// BitSwapMessage is the basic interface for interacting building, encoding, +// and decoding messages sent on the BitSwap protocol. +type BitSwapMessage interface { + // Wantlist returns a slice of unique keys that represent data wanted by + // the sender. + Wantlist() []Entry + + // Blocks returns a slice of unique blocks. + Blocks() []blocks.Block + // BlockPresences returns the list of HAVE / DONT_HAVE in the message + BlockPresences() []BlockPresence + // Haves returns the Cids for each HAVE + Haves() []cid.Cid + // DontHaves returns the Cids for each DONT_HAVE + DontHaves() []cid.Cid + // PendingBytes returns the number of outstanding bytes of data that the + // engine has yet to send to the client (because they didn't fit in this + // message) + PendingBytes() int32 + + // AddEntry adds an entry to the Wantlist. + AddEntry(key cid.Cid, priority int32, wantType pb.Message_Wantlist_WantType, sendDontHave bool) int + + // Cancel adds a CANCEL for the given CID to the message + // Returns the size of the CANCEL entry in the protobuf + Cancel(key cid.Cid) int + + // Remove removes any entries for the given CID. Useful when the want + // status for the CID changes when preparing a message. + Remove(key cid.Cid) + + // Empty indicates whether the message has any information + Empty() bool + // Size returns the size of the message in bytes + Size() int + + // A full wantlist is an authoritative copy, a 'non-full' wantlist is a patch-set + Full() bool + + // AddBlock adds a block to the message + AddBlock(blocks.Block) + // AddBlockPresence adds a HAVE / DONT_HAVE for the given Cid to the message + AddBlockPresence(cid.Cid, pb.Message_BlockPresenceType) + // AddHave adds a HAVE for the given Cid to the message + AddHave(cid.Cid) + // AddDontHave adds a DONT_HAVE for the given Cid to the message + AddDontHave(cid.Cid) + // SetPendingBytes sets the number of bytes of data that are yet to be sent + // to the client (because they didn't fit in this message) + SetPendingBytes(int32) + Exportable + + Loggable() map[string]interface{} + + // Reset the values in the message back to defaults, so it can be reused + Reset(bool) + + // Clone the message fields + Clone() BitSwapMessage +} + +// Exportable is an interface for structures than can be +// encoded in a bitswap protobuf. +type Exportable interface { + // Note that older Bitswap versions use a different wire format, so we need + // to convert the message to the appropriate format depending on which + // version of the protocol the remote peer supports. + ToProtoV0() *pb.Message + ToProtoV1() *pb.Message + ToNetV0(w io.Writer) error + ToNetV1(w io.Writer) error +} + +// BlockPresence represents a HAVE / DONT_HAVE for a given Cid +type BlockPresence struct { + Cid cid.Cid + Type pb.Message_BlockPresenceType +} + +// Entry is a wantlist entry in a Bitswap message, with flags indicating +// - whether message is a cancel +// - whether requester wants a DONT_HAVE message +// - whether requester wants a HAVE message (instead of the block) +type Entry struct { + wantlist.Entry + Cancel bool + SendDontHave bool +} + +// Get the size of the entry on the wire +func (e *Entry) Size() int { + epb := e.ToPB() + return epb.Size() +} + +// Get the entry in protobuf form +func (e *Entry) ToPB() pb.Message_Wantlist_Entry { + return pb.Message_Wantlist_Entry{ + Block: pb.Cid{Cid: e.Cid}, + Priority: int32(e.Priority), + Cancel: e.Cancel, + WantType: e.WantType, + SendDontHave: e.SendDontHave, + } +} + +var MaxEntrySize = maxEntrySize() + +func maxEntrySize() int { + var maxInt32 int32 = (1 << 31) - 1 + + c := cid.NewCidV0(u.Hash([]byte("cid"))) + e := Entry{ + Entry: wantlist.Entry{ + Cid: c, + Priority: maxInt32, + WantType: pb.Message_Wantlist_Have, + }, + SendDontHave: true, // true takes up more space than false + Cancel: true, + } + return e.Size() +} + +type impl struct { + full bool + wantlist map[cid.Cid]*Entry + blocks map[cid.Cid]blocks.Block + blockPresences map[cid.Cid]pb.Message_BlockPresenceType + pendingBytes int32 +} + +// New returns a new, empty bitswap message +func New(full bool) BitSwapMessage { + return newMsg(full) +} + +func newMsg(full bool) *impl { + return &impl{ + full: full, + wantlist: make(map[cid.Cid]*Entry), + blocks: make(map[cid.Cid]blocks.Block), + blockPresences: make(map[cid.Cid]pb.Message_BlockPresenceType), + } +} + +// Clone the message fields +func (m *impl) Clone() BitSwapMessage { + msg := newMsg(m.full) + for k := range m.wantlist { + msg.wantlist[k] = m.wantlist[k] + } + for k := range m.blocks { + msg.blocks[k] = m.blocks[k] + } + for k := range m.blockPresences { + msg.blockPresences[k] = m.blockPresences[k] + } + msg.pendingBytes = m.pendingBytes + return msg +} + +// Reset the values in the message back to defaults, so it can be reused +func (m *impl) Reset(full bool) { + m.full = full + for k := range m.wantlist { + delete(m.wantlist, k) + } + for k := range m.blocks { + delete(m.blocks, k) + } + for k := range m.blockPresences { + delete(m.blockPresences, k) + } + m.pendingBytes = 0 +} + +var errCidMissing = errors.New("missing cid") + +func newMessageFromProto(pbm pb.Message) (BitSwapMessage, error) { + m := newMsg(pbm.Wantlist.Full) + for _, e := range pbm.Wantlist.Entries { + if !e.Block.Cid.Defined() { + return nil, errCidMissing + } + m.addEntry(e.Block.Cid, e.Priority, e.Cancel, e.WantType, e.SendDontHave) + } + + // deprecated + for _, d := range pbm.Blocks { + // CIDv0, sha256, protobuf only + b := blocks.NewBlock(d) + m.AddBlock(b) + } + // + + for _, b := range pbm.GetPayload() { + pref, err := cid.PrefixFromBytes(b.GetPrefix()) + if err != nil { + return nil, err + } + + c, err := pref.Sum(b.GetData()) + if err != nil { + return nil, err + } + + blk, err := blocks.NewBlockWithCid(b.GetData(), c) + if err != nil { + return nil, err + } + + m.AddBlock(blk) + } + + for _, bi := range pbm.GetBlockPresences() { + if !bi.Cid.Cid.Defined() { + return nil, errCidMissing + } + m.AddBlockPresence(bi.Cid.Cid, bi.Type) + } + + m.pendingBytes = pbm.PendingBytes + + return m, nil +} + +func (m *impl) Full() bool { + return m.full +} + +func (m *impl) Empty() bool { + return len(m.blocks) == 0 && len(m.wantlist) == 0 && len(m.blockPresences) == 0 +} + +func (m *impl) Wantlist() []Entry { + out := make([]Entry, 0, len(m.wantlist)) + for _, e := range m.wantlist { + out = append(out, *e) + } + return out +} + +func (m *impl) Blocks() []blocks.Block { + bs := make([]blocks.Block, 0, len(m.blocks)) + for _, block := range m.blocks { + bs = append(bs, block) + } + return bs +} + +func (m *impl) BlockPresences() []BlockPresence { + bps := make([]BlockPresence, 0, len(m.blockPresences)) + for c, t := range m.blockPresences { + bps = append(bps, BlockPresence{c, t}) + } + return bps +} + +func (m *impl) Haves() []cid.Cid { + return m.getBlockPresenceByType(pb.Message_Have) +} + +func (m *impl) DontHaves() []cid.Cid { + return m.getBlockPresenceByType(pb.Message_DontHave) +} + +func (m *impl) getBlockPresenceByType(t pb.Message_BlockPresenceType) []cid.Cid { + cids := make([]cid.Cid, 0, len(m.blockPresences)) + for c, bpt := range m.blockPresences { + if bpt == t { + cids = append(cids, c) + } + } + return cids +} + +func (m *impl) PendingBytes() int32 { + return m.pendingBytes +} + +func (m *impl) SetPendingBytes(pendingBytes int32) { + m.pendingBytes = pendingBytes +} + +func (m *impl) Remove(k cid.Cid) { + delete(m.wantlist, k) +} + +func (m *impl) Cancel(k cid.Cid) int { + return m.addEntry(k, 0, true, pb.Message_Wantlist_Block, false) +} + +func (m *impl) AddEntry(k cid.Cid, priority int32, wantType pb.Message_Wantlist_WantType, sendDontHave bool) int { + return m.addEntry(k, priority, false, wantType, sendDontHave) +} + +func (m *impl) addEntry(c cid.Cid, priority int32, cancel bool, wantType pb.Message_Wantlist_WantType, sendDontHave bool) int { + e, exists := m.wantlist[c] + if exists { + // Only change priority if want is of the same type + if e.WantType == wantType { + e.Priority = priority + } + // Only change from "dont cancel" to "do cancel" + if cancel { + e.Cancel = cancel + } + // Only change from "dont send" to "do send" DONT_HAVE + if sendDontHave { + e.SendDontHave = sendDontHave + } + // want-block overrides existing want-have + if wantType == pb.Message_Wantlist_Block && e.WantType == pb.Message_Wantlist_Have { + e.WantType = wantType + } + m.wantlist[c] = e + return 0 + } + + e = &Entry{ + Entry: wantlist.Entry{ + Cid: c, + Priority: priority, + WantType: wantType, + }, + SendDontHave: sendDontHave, + Cancel: cancel, + } + m.wantlist[c] = e + + return e.Size() +} + +func (m *impl) AddBlock(b blocks.Block) { + delete(m.blockPresences, b.Cid()) + m.blocks[b.Cid()] = b +} + +func (m *impl) AddBlockPresence(c cid.Cid, t pb.Message_BlockPresenceType) { + if _, ok := m.blocks[c]; ok { + return + } + m.blockPresences[c] = t +} + +func (m *impl) AddHave(c cid.Cid) { + m.AddBlockPresence(c, pb.Message_Have) +} + +func (m *impl) AddDontHave(c cid.Cid) { + m.AddBlockPresence(c, pb.Message_DontHave) +} + +func (m *impl) Size() int { + size := 0 + for _, block := range m.blocks { + size += len(block.RawData()) + } + for c := range m.blockPresences { + size += BlockPresenceSize(c) + } + for _, e := range m.wantlist { + size += e.Size() + } + + return size +} + +func BlockPresenceSize(c cid.Cid) int { + return (&pb.Message_BlockPresence{ + Cid: pb.Cid{Cid: c}, + Type: pb.Message_Have, + }).Size() +} + +// FromNet generates a new BitswapMessage from incoming data on an io.Reader. +func FromNet(r io.Reader) (BitSwapMessage, error) { + reader := msgio.NewVarintReaderSize(r, network.MessageSizeMax) + return FromMsgReader(reader) +} + +// FromPBReader generates a new Bitswap message from a gogo-protobuf reader +func FromMsgReader(r msgio.Reader) (BitSwapMessage, error) { + msg, err := r.ReadMsg() + if err != nil { + return nil, err + } + + var pb pb.Message + err = pb.Unmarshal(msg) + r.ReleaseMsg(msg) + if err != nil { + return nil, err + } + + return newMessageFromProto(pb) +} + +func (m *impl) ToProtoV0() *pb.Message { + pbm := new(pb.Message) + pbm.Wantlist.Entries = make([]pb.Message_Wantlist_Entry, 0, len(m.wantlist)) + for _, e := range m.wantlist { + pbm.Wantlist.Entries = append(pbm.Wantlist.Entries, e.ToPB()) + } + pbm.Wantlist.Full = m.full + + blocks := m.Blocks() + pbm.Blocks = make([][]byte, 0, len(blocks)) + for _, b := range blocks { + pbm.Blocks = append(pbm.Blocks, b.RawData()) + } + return pbm +} + +func (m *impl) ToProtoV1() *pb.Message { + pbm := new(pb.Message) + pbm.Wantlist.Entries = make([]pb.Message_Wantlist_Entry, 0, len(m.wantlist)) + for _, e := range m.wantlist { + pbm.Wantlist.Entries = append(pbm.Wantlist.Entries, e.ToPB()) + } + pbm.Wantlist.Full = m.full + + blocks := m.Blocks() + pbm.Payload = make([]pb.Message_Block, 0, len(blocks)) + for _, b := range blocks { + pbm.Payload = append(pbm.Payload, pb.Message_Block{ + Data: b.RawData(), + Prefix: b.Cid().Prefix().Bytes(), + }) + } + + pbm.BlockPresences = make([]pb.Message_BlockPresence, 0, len(m.blockPresences)) + for c, t := range m.blockPresences { + pbm.BlockPresences = append(pbm.BlockPresences, pb.Message_BlockPresence{ + Cid: pb.Cid{Cid: c}, + Type: t, + }) + } + + pbm.PendingBytes = m.PendingBytes() + + return pbm +} + +func (m *impl) ToNetV0(w io.Writer) error { + return write(w, m.ToProtoV0()) +} + +func (m *impl) ToNetV1(w io.Writer) error { + return write(w, m.ToProtoV1()) +} + +func write(w io.Writer, m *pb.Message) error { + size := m.Size() + + buf := pool.Get(size + binary.MaxVarintLen64) + defer pool.Put(buf) + + n := binary.PutUvarint(buf, uint64(size)) + + written, err := m.MarshalTo(buf[n:]) + if err != nil { + return err + } + n += written + + _, err = w.Write(buf[:n]) + return err +} + +func (m *impl) Loggable() map[string]interface{} { + blocks := make([]string, 0, len(m.blocks)) + for _, v := range m.blocks { + blocks = append(blocks, v.Cid().String()) + } + return map[string]interface{}{ + "blocks": blocks, + "wants": m.Wantlist(), + } +} diff --git a/bitswap/message/message_test.go b/bitswap/message/message_test.go new file mode 100644 index 0000000000..b191c28be0 --- /dev/null +++ b/bitswap/message/message_test.go @@ -0,0 +1,311 @@ +package message + +import ( + "bytes" + "testing" + + "github.com/ipfs/boxo/bitswap/client/wantlist" + pb "github.com/ipfs/boxo/bitswap/message/pb" + blocksutil "github.com/ipfs/go-ipfs-blocksutil" + + blocks "github.com/ipfs/boxo/blocks" + u "github.com/ipfs/boxo/util" + cid "github.com/ipfs/go-cid" +) + +func mkFakeCid(s string) cid.Cid { + return cid.NewCidV0(u.Hash([]byte(s))) +} + +func TestAppendWanted(t *testing.T) { + str := mkFakeCid("foo") + m := New(true) + m.AddEntry(str, 1, pb.Message_Wantlist_Block, true) + + if !wantlistContains(&m.ToProtoV0().Wantlist, str) { + t.Fail() + } +} + +func TestNewMessageFromProto(t *testing.T) { + str := mkFakeCid("a_key") + protoMessage := new(pb.Message) + protoMessage.Wantlist.Entries = []pb.Message_Wantlist_Entry{ + {Block: pb.Cid{Cid: str}}, + } + if !wantlistContains(&protoMessage.Wantlist, str) { + t.Fail() + } + m, err := newMessageFromProto(*protoMessage) + if err != nil { + t.Fatal(err) + } + + if !wantlistContains(&m.ToProtoV0().Wantlist, str) { + t.Fail() + } +} + +func TestAppendBlock(t *testing.T) { + + strs := make([]string, 2) + strs = append(strs, "Celeritas") + strs = append(strs, "Incendia") + + m := New(true) + for _, str := range strs { + block := blocks.NewBlock([]byte(str)) + m.AddBlock(block) + } + + // assert strings are in proto message + for _, blockbytes := range m.ToProtoV0().GetBlocks() { + s := bytes.NewBuffer(blockbytes).String() + if !contains(strs, s) { + t.Fail() + } + } +} + +func TestWantlist(t *testing.T) { + keystrs := []cid.Cid{mkFakeCid("foo"), mkFakeCid("bar"), mkFakeCid("baz"), mkFakeCid("bat")} + m := New(true) + for _, s := range keystrs { + m.AddEntry(s, 1, pb.Message_Wantlist_Block, true) + } + exported := m.Wantlist() + + for _, k := range exported { + present := false + for _, s := range keystrs { + + if s.Equals(k.Cid) { + present = true + } + } + if !present { + t.Logf("%v isn't in original list", k.Cid) + t.Fail() + } + } +} + +func TestCopyProtoByValue(t *testing.T) { + str := mkFakeCid("foo") + m := New(true) + protoBeforeAppend := m.ToProtoV0() + m.AddEntry(str, 1, pb.Message_Wantlist_Block, true) + if wantlistContains(&protoBeforeAppend.Wantlist, str) { + t.Fail() + } +} + +func TestToNetFromNetPreservesWantList(t *testing.T) { + original := New(true) + original.AddEntry(mkFakeCid("M"), 1, pb.Message_Wantlist_Block, true) + original.AddEntry(mkFakeCid("B"), 1, pb.Message_Wantlist_Block, true) + original.AddEntry(mkFakeCid("D"), 1, pb.Message_Wantlist_Block, true) + original.AddEntry(mkFakeCid("T"), 1, pb.Message_Wantlist_Block, true) + original.AddEntry(mkFakeCid("F"), 1, pb.Message_Wantlist_Block, true) + + buf := new(bytes.Buffer) + if err := original.ToNetV1(buf); err != nil { + t.Fatal(err) + } + + copied, err := FromNet(buf) + if err != nil { + t.Fatal(err) + } + + if !copied.Full() { + t.Fatal("fullness attribute got dropped on marshal") + } + + keys := make(map[cid.Cid]bool) + for _, k := range copied.Wantlist() { + keys[k.Cid] = true + } + + for _, k := range original.Wantlist() { + if _, ok := keys[k.Cid]; !ok { + t.Fatalf("Key Missing: \"%v\"", k) + } + } +} + +func TestToAndFromNetMessage(t *testing.T) { + + original := New(true) + original.AddBlock(blocks.NewBlock([]byte("W"))) + original.AddBlock(blocks.NewBlock([]byte("E"))) + original.AddBlock(blocks.NewBlock([]byte("F"))) + original.AddBlock(blocks.NewBlock([]byte("M"))) + + buf := new(bytes.Buffer) + if err := original.ToNetV1(buf); err != nil { + t.Fatal(err) + } + + m2, err := FromNet(buf) + if err != nil { + t.Fatal(err) + } + + keys := make(map[cid.Cid]bool) + for _, b := range m2.Blocks() { + keys[b.Cid()] = true + } + + for _, b := range original.Blocks() { + if _, ok := keys[b.Cid()]; !ok { + t.Fail() + } + } +} + +func wantlistContains(wantlist *pb.Message_Wantlist, c cid.Cid) bool { + for _, e := range wantlist.GetEntries() { + if e.Block.Cid.Defined() && c.Equals(e.Block.Cid) { + return true + } + } + return false +} + +func contains(strs []string, x string) bool { + for _, s := range strs { + if s == x { + return true + } + } + return false +} + +func TestDuplicates(t *testing.T) { + b := blocks.NewBlock([]byte("foo")) + msg := New(true) + + msg.AddEntry(b.Cid(), 1, pb.Message_Wantlist_Block, true) + msg.AddEntry(b.Cid(), 1, pb.Message_Wantlist_Block, true) + if len(msg.Wantlist()) != 1 { + t.Fatal("Duplicate in BitSwapMessage") + } + + msg.AddBlock(b) + msg.AddBlock(b) + if len(msg.Blocks()) != 1 { + t.Fatal("Duplicate in BitSwapMessage") + } + + b2 := blocks.NewBlock([]byte("bar")) + msg.AddBlockPresence(b2.Cid(), pb.Message_Have) + msg.AddBlockPresence(b2.Cid(), pb.Message_Have) + if len(msg.Haves()) != 1 { + t.Fatal("Duplicate in BitSwapMessage") + } +} + +func TestBlockPresences(t *testing.T) { + b1 := blocks.NewBlock([]byte("foo")) + b2 := blocks.NewBlock([]byte("bar")) + msg := New(true) + + msg.AddBlockPresence(b1.Cid(), pb.Message_Have) + msg.AddBlockPresence(b2.Cid(), pb.Message_DontHave) + if len(msg.Haves()) != 1 || !msg.Haves()[0].Equals(b1.Cid()) { + t.Fatal("Expected HAVE") + } + if len(msg.DontHaves()) != 1 || !msg.DontHaves()[0].Equals(b2.Cid()) { + t.Fatal("Expected HAVE") + } + + msg.AddBlock(b1) + if len(msg.Haves()) != 0 { + t.Fatal("Expected block to overwrite HAVE") + } + + msg.AddBlock(b2) + if len(msg.DontHaves()) != 0 { + t.Fatal("Expected block to overwrite DONT_HAVE") + } + + msg.AddBlockPresence(b1.Cid(), pb.Message_Have) + if len(msg.Haves()) != 0 { + t.Fatal("Expected HAVE not to overwrite block") + } + + msg.AddBlockPresence(b2.Cid(), pb.Message_DontHave) + if len(msg.DontHaves()) != 0 { + t.Fatal("Expected DONT_HAVE not to overwrite block") + } +} + +func TestAddWantlistEntry(t *testing.T) { + b := blocks.NewBlock([]byte("foo")) + msg := New(true) + + msg.AddEntry(b.Cid(), 1, pb.Message_Wantlist_Have, false) + msg.AddEntry(b.Cid(), 2, pb.Message_Wantlist_Block, true) + entries := msg.Wantlist() + if len(entries) != 1 { + t.Fatal("Duplicate in BitSwapMessage") + } + e := entries[0] + if e.WantType != pb.Message_Wantlist_Block { + t.Fatal("want-block should override want-have") + } + if e.SendDontHave != true { + t.Fatal("true SendDontHave should override false SendDontHave") + } + if e.Priority != 1 { + t.Fatal("priority should only be overridden if wants are of same type") + } + + msg.AddEntry(b.Cid(), 2, pb.Message_Wantlist_Block, true) + e = msg.Wantlist()[0] + if e.Priority != 2 { + t.Fatal("priority should be overridden if wants are of same type") + } + + msg.AddEntry(b.Cid(), 3, pb.Message_Wantlist_Have, false) + e = msg.Wantlist()[0] + if e.WantType != pb.Message_Wantlist_Block { + t.Fatal("want-have should not override want-block") + } + if e.SendDontHave != true { + t.Fatal("false SendDontHave should not override true SendDontHave") + } + if e.Priority != 2 { + t.Fatal("priority should only be overridden if wants are of same type") + } + + msg.Cancel(b.Cid()) + e = msg.Wantlist()[0] + if !e.Cancel { + t.Fatal("cancel should override want") + } + + msg.AddEntry(b.Cid(), 10, pb.Message_Wantlist_Block, true) + if !e.Cancel { + t.Fatal("want should not override cancel") + } +} + +func TestEntrySize(t *testing.T) { + blockGenerator := blocksutil.NewBlockGenerator() + c := blockGenerator.Next().Cid() + e := Entry{ + Entry: wantlist.Entry{ + Cid: c, + Priority: 10, + WantType: pb.Message_Wantlist_Have, + }, + SendDontHave: true, + Cancel: false, + } + epb := e.ToPB() + if e.Size() != epb.Size() { + t.Fatal("entry size calculation incorrect", e.Size(), epb.Size()) + } +} diff --git a/bitswap/message/pb/Makefile b/bitswap/message/pb/Makefile new file mode 100644 index 0000000000..df34e54b01 --- /dev/null +++ b/bitswap/message/pb/Makefile @@ -0,0 +1,11 @@ +PB = $(wildcard *.proto) +GO = $(PB:.proto=.pb.go) + +all: $(GO) + +%.pb.go: %.proto + protoc --proto_path=$(GOPATH)/src:. --gogofaster_out=. $< + +clean: + rm -f *.pb.go + rm -f *.go diff --git a/bitswap/message/pb/cid.go b/bitswap/message/pb/cid.go new file mode 100644 index 0000000000..46ab0d5074 --- /dev/null +++ b/bitswap/message/pb/cid.go @@ -0,0 +1,44 @@ +package bitswap_message_pb + +import ( + "github.com/ipfs/go-cid" +) + +// NOTE: Don't "embed" the cid, wrap it like we're doing here. Otherwise, gogo +// will try to use the Bytes() function. + +// Cid is a custom type for CIDs in protobufs, that allows us to avoid +// reallocating. +type Cid struct { + Cid cid.Cid +} + +func (c Cid) Marshal() ([]byte, error) { + return c.Cid.Bytes(), nil +} + +func (c *Cid) MarshalTo(data []byte) (int, error) { + // intentionally using KeyString here to avoid allocating. + return copy(data[:c.Size()], c.Cid.KeyString()), nil +} + +func (c *Cid) Unmarshal(data []byte) (err error) { + c.Cid, err = cid.Cast(data) + return err +} + +func (c *Cid) Size() int { + return len(c.Cid.KeyString()) +} + +func (c Cid) MarshalJSON() ([]byte, error) { + return c.Cid.MarshalJSON() +} + +func (c *Cid) UnmarshalJSON(data []byte) error { + return c.Cid.UnmarshalJSON(data) +} + +func (c Cid) Equal(other Cid) bool { + return c.Cid.Equals(other.Cid) +} diff --git a/bitswap/message/pb/cid_test.go b/bitswap/message/pb/cid_test.go new file mode 100644 index 0000000000..490e6b9970 --- /dev/null +++ b/bitswap/message/pb/cid_test.go @@ -0,0 +1,32 @@ +package bitswap_message_pb_test + +import ( + "bytes" + "testing" + + u "github.com/ipfs/boxo/util" + "github.com/ipfs/go-cid" + + pb "github.com/ipfs/boxo/bitswap/message/pb" +) + +func TestCID(t *testing.T) { + var expected = [...]byte{ + 10, 34, 18, 32, 195, 171, + 143, 241, 55, 32, 232, 173, + 144, 71, 221, 57, 70, 107, + 60, 137, 116, 229, 146, 194, + 250, 56, 61, 74, 57, 96, + 113, 76, 174, 240, 196, 242, + } + + c := cid.NewCidV0(u.Hash([]byte("foobar"))) + msg := pb.Message_BlockPresence{Cid: pb.Cid{Cid: c}} + actual, err := msg.Marshal() + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(actual, expected[:]) { + t.Fatal("failed to correctly encode custom CID type") + } +} diff --git a/bitswap/message/pb/message.pb.go b/bitswap/message/pb/message.pb.go new file mode 100644 index 0000000000..ef98a0a9fa --- /dev/null +++ b/bitswap/message/pb/message.pb.go @@ -0,0 +1,1569 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: message.proto + +package bitswap_message_pb + +import ( + fmt "fmt" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type Message_BlockPresenceType int32 + +const ( + Message_Have Message_BlockPresenceType = 0 + Message_DontHave Message_BlockPresenceType = 1 +) + +var Message_BlockPresenceType_name = map[int32]string{ + 0: "Have", + 1: "DontHave", +} + +var Message_BlockPresenceType_value = map[string]int32{ + "Have": 0, + "DontHave": 1, +} + +func (x Message_BlockPresenceType) String() string { + return proto.EnumName(Message_BlockPresenceType_name, int32(x)) +} + +func (Message_BlockPresenceType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_33c57e4bae7b9afd, []int{0, 0} +} + +type Message_Wantlist_WantType int32 + +const ( + Message_Wantlist_Block Message_Wantlist_WantType = 0 + Message_Wantlist_Have Message_Wantlist_WantType = 1 +) + +var Message_Wantlist_WantType_name = map[int32]string{ + 0: "Block", + 1: "Have", +} + +var Message_Wantlist_WantType_value = map[string]int32{ + "Block": 0, + "Have": 1, +} + +func (x Message_Wantlist_WantType) String() string { + return proto.EnumName(Message_Wantlist_WantType_name, int32(x)) +} + +func (Message_Wantlist_WantType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_33c57e4bae7b9afd, []int{0, 0, 0} +} + +type Message struct { + Wantlist Message_Wantlist `protobuf:"bytes,1,opt,name=wantlist,proto3" json:"wantlist"` + Blocks [][]byte `protobuf:"bytes,2,rep,name=blocks,proto3" json:"blocks,omitempty"` + Payload []Message_Block `protobuf:"bytes,3,rep,name=payload,proto3" json:"payload"` + BlockPresences []Message_BlockPresence `protobuf:"bytes,4,rep,name=blockPresences,proto3" json:"blockPresences"` + PendingBytes int32 `protobuf:"varint,5,opt,name=pendingBytes,proto3" json:"pendingBytes,omitempty"` +} + +func (m *Message) Reset() { *m = Message{} } +func (m *Message) String() string { return proto.CompactTextString(m) } +func (*Message) ProtoMessage() {} +func (*Message) Descriptor() ([]byte, []int) { + return fileDescriptor_33c57e4bae7b9afd, []int{0} +} +func (m *Message) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Message) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Message.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Message) XXX_Merge(src proto.Message) { + xxx_messageInfo_Message.Merge(m, src) +} +func (m *Message) XXX_Size() int { + return m.Size() +} +func (m *Message) XXX_DiscardUnknown() { + xxx_messageInfo_Message.DiscardUnknown(m) +} + +var xxx_messageInfo_Message proto.InternalMessageInfo + +func (m *Message) GetWantlist() Message_Wantlist { + if m != nil { + return m.Wantlist + } + return Message_Wantlist{} +} + +func (m *Message) GetBlocks() [][]byte { + if m != nil { + return m.Blocks + } + return nil +} + +func (m *Message) GetPayload() []Message_Block { + if m != nil { + return m.Payload + } + return nil +} + +func (m *Message) GetBlockPresences() []Message_BlockPresence { + if m != nil { + return m.BlockPresences + } + return nil +} + +func (m *Message) GetPendingBytes() int32 { + if m != nil { + return m.PendingBytes + } + return 0 +} + +type Message_Wantlist struct { + Entries []Message_Wantlist_Entry `protobuf:"bytes,1,rep,name=entries,proto3" json:"entries"` + Full bool `protobuf:"varint,2,opt,name=full,proto3" json:"full,omitempty"` +} + +func (m *Message_Wantlist) Reset() { *m = Message_Wantlist{} } +func (m *Message_Wantlist) String() string { return proto.CompactTextString(m) } +func (*Message_Wantlist) ProtoMessage() {} +func (*Message_Wantlist) Descriptor() ([]byte, []int) { + return fileDescriptor_33c57e4bae7b9afd, []int{0, 0} +} +func (m *Message_Wantlist) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Message_Wantlist) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Message_Wantlist.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Message_Wantlist) XXX_Merge(src proto.Message) { + xxx_messageInfo_Message_Wantlist.Merge(m, src) +} +func (m *Message_Wantlist) XXX_Size() int { + return m.Size() +} +func (m *Message_Wantlist) XXX_DiscardUnknown() { + xxx_messageInfo_Message_Wantlist.DiscardUnknown(m) +} + +var xxx_messageInfo_Message_Wantlist proto.InternalMessageInfo + +func (m *Message_Wantlist) GetEntries() []Message_Wantlist_Entry { + if m != nil { + return m.Entries + } + return nil +} + +func (m *Message_Wantlist) GetFull() bool { + if m != nil { + return m.Full + } + return false +} + +type Message_Wantlist_Entry struct { + Block Cid `protobuf:"bytes,1,opt,name=block,proto3,customtype=Cid" json:"block"` + Priority int32 `protobuf:"varint,2,opt,name=priority,proto3" json:"priority,omitempty"` + Cancel bool `protobuf:"varint,3,opt,name=cancel,proto3" json:"cancel,omitempty"` + WantType Message_Wantlist_WantType `protobuf:"varint,4,opt,name=wantType,proto3,enum=bitswap.message.pb.Message_Wantlist_WantType" json:"wantType,omitempty"` + SendDontHave bool `protobuf:"varint,5,opt,name=sendDontHave,proto3" json:"sendDontHave,omitempty"` +} + +func (m *Message_Wantlist_Entry) Reset() { *m = Message_Wantlist_Entry{} } +func (m *Message_Wantlist_Entry) String() string { return proto.CompactTextString(m) } +func (*Message_Wantlist_Entry) ProtoMessage() {} +func (*Message_Wantlist_Entry) Descriptor() ([]byte, []int) { + return fileDescriptor_33c57e4bae7b9afd, []int{0, 0, 0} +} +func (m *Message_Wantlist_Entry) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Message_Wantlist_Entry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Message_Wantlist_Entry.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Message_Wantlist_Entry) XXX_Merge(src proto.Message) { + xxx_messageInfo_Message_Wantlist_Entry.Merge(m, src) +} +func (m *Message_Wantlist_Entry) XXX_Size() int { + return m.Size() +} +func (m *Message_Wantlist_Entry) XXX_DiscardUnknown() { + xxx_messageInfo_Message_Wantlist_Entry.DiscardUnknown(m) +} + +var xxx_messageInfo_Message_Wantlist_Entry proto.InternalMessageInfo + +func (m *Message_Wantlist_Entry) GetPriority() int32 { + if m != nil { + return m.Priority + } + return 0 +} + +func (m *Message_Wantlist_Entry) GetCancel() bool { + if m != nil { + return m.Cancel + } + return false +} + +func (m *Message_Wantlist_Entry) GetWantType() Message_Wantlist_WantType { + if m != nil { + return m.WantType + } + return Message_Wantlist_Block +} + +func (m *Message_Wantlist_Entry) GetSendDontHave() bool { + if m != nil { + return m.SendDontHave + } + return false +} + +type Message_Block struct { + Prefix []byte `protobuf:"bytes,1,opt,name=prefix,proto3" json:"prefix,omitempty"` + Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` +} + +func (m *Message_Block) Reset() { *m = Message_Block{} } +func (m *Message_Block) String() string { return proto.CompactTextString(m) } +func (*Message_Block) ProtoMessage() {} +func (*Message_Block) Descriptor() ([]byte, []int) { + return fileDescriptor_33c57e4bae7b9afd, []int{0, 1} +} +func (m *Message_Block) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Message_Block) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Message_Block.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Message_Block) XXX_Merge(src proto.Message) { + xxx_messageInfo_Message_Block.Merge(m, src) +} +func (m *Message_Block) XXX_Size() int { + return m.Size() +} +func (m *Message_Block) XXX_DiscardUnknown() { + xxx_messageInfo_Message_Block.DiscardUnknown(m) +} + +var xxx_messageInfo_Message_Block proto.InternalMessageInfo + +func (m *Message_Block) GetPrefix() []byte { + if m != nil { + return m.Prefix + } + return nil +} + +func (m *Message_Block) GetData() []byte { + if m != nil { + return m.Data + } + return nil +} + +type Message_BlockPresence struct { + Cid Cid `protobuf:"bytes,1,opt,name=cid,proto3,customtype=Cid" json:"cid"` + Type Message_BlockPresenceType `protobuf:"varint,2,opt,name=type,proto3,enum=bitswap.message.pb.Message_BlockPresenceType" json:"type,omitempty"` +} + +func (m *Message_BlockPresence) Reset() { *m = Message_BlockPresence{} } +func (m *Message_BlockPresence) String() string { return proto.CompactTextString(m) } +func (*Message_BlockPresence) ProtoMessage() {} +func (*Message_BlockPresence) Descriptor() ([]byte, []int) { + return fileDescriptor_33c57e4bae7b9afd, []int{0, 2} +} +func (m *Message_BlockPresence) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Message_BlockPresence) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Message_BlockPresence.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Message_BlockPresence) XXX_Merge(src proto.Message) { + xxx_messageInfo_Message_BlockPresence.Merge(m, src) +} +func (m *Message_BlockPresence) XXX_Size() int { + return m.Size() +} +func (m *Message_BlockPresence) XXX_DiscardUnknown() { + xxx_messageInfo_Message_BlockPresence.DiscardUnknown(m) +} + +var xxx_messageInfo_Message_BlockPresence proto.InternalMessageInfo + +func (m *Message_BlockPresence) GetType() Message_BlockPresenceType { + if m != nil { + return m.Type + } + return Message_Have +} + +func init() { + proto.RegisterEnum("bitswap.message.pb.Message_BlockPresenceType", Message_BlockPresenceType_name, Message_BlockPresenceType_value) + proto.RegisterEnum("bitswap.message.pb.Message_Wantlist_WantType", Message_Wantlist_WantType_name, Message_Wantlist_WantType_value) + proto.RegisterType((*Message)(nil), "bitswap.message.pb.Message") + proto.RegisterType((*Message_Wantlist)(nil), "bitswap.message.pb.Message.Wantlist") + proto.RegisterType((*Message_Wantlist_Entry)(nil), "bitswap.message.pb.Message.Wantlist.Entry") + proto.RegisterType((*Message_Block)(nil), "bitswap.message.pb.Message.Block") + proto.RegisterType((*Message_BlockPresence)(nil), "bitswap.message.pb.Message.BlockPresence") +} + +func init() { proto.RegisterFile("message.proto", fileDescriptor_33c57e4bae7b9afd) } + +var fileDescriptor_33c57e4bae7b9afd = []byte{ + // 497 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x93, 0xdf, 0x8a, 0xd3, 0x40, + 0x14, 0xc6, 0x33, 0x4d, 0xd2, 0xc6, 0xd3, 0xee, 0x52, 0xe7, 0x42, 0x42, 0xc0, 0x34, 0x5b, 0xbc, + 0x88, 0xca, 0x66, 0xa1, 0xfb, 0x04, 0x5b, 0xff, 0xa0, 0x82, 0x20, 0x83, 0xd0, 0xeb, 0xfc, 0x99, + 0xd6, 0xc1, 0x6c, 0x12, 0x33, 0x53, 0xd7, 0xbe, 0x85, 0x8f, 0xb5, 0x37, 0xc2, 0x5e, 0x8a, 0xca, + 0x22, 0xed, 0x8b, 0x48, 0x4e, 0xa6, 0x85, 0xba, 0xe2, 0xee, 0xdd, 0x9c, 0x33, 0xe7, 0xfb, 0x65, + 0xbe, 0xef, 0x10, 0x38, 0x38, 0xe7, 0x52, 0xc6, 0x0b, 0x1e, 0x55, 0x75, 0xa9, 0x4a, 0x4a, 0x13, + 0xa1, 0xe4, 0x45, 0x5c, 0x45, 0xbb, 0x76, 0xe2, 0x1d, 0x2f, 0x84, 0xfa, 0xb0, 0x4c, 0xa2, 0xb4, + 0x3c, 0x3f, 0x59, 0x94, 0x8b, 0xf2, 0x04, 0x47, 0x93, 0xe5, 0x1c, 0x2b, 0x2c, 0xf0, 0xd4, 0x22, + 0xc6, 0xbf, 0xba, 0xd0, 0x7b, 0xdb, 0xaa, 0xe9, 0x4b, 0x70, 0x2e, 0xe2, 0x42, 0xe5, 0x42, 0x2a, + 0x97, 0x04, 0x24, 0xec, 0x4f, 0x1e, 0x45, 0x37, 0xbf, 0x10, 0xe9, 0xf1, 0x68, 0xa6, 0x67, 0xa7, + 0xd6, 0xe5, 0xf5, 0xc8, 0x60, 0x3b, 0x2d, 0x7d, 0x00, 0xdd, 0x24, 0x2f, 0xd3, 0x8f, 0xd2, 0xed, + 0x04, 0x66, 0x38, 0x60, 0xba, 0xa2, 0x67, 0xd0, 0xab, 0xe2, 0x55, 0x5e, 0xc6, 0x99, 0x6b, 0x06, + 0x66, 0xd8, 0x9f, 0x1c, 0xfd, 0x0f, 0x3f, 0x6d, 0x44, 0x9a, 0xbd, 0xd5, 0xd1, 0x19, 0x1c, 0x22, + 0xec, 0x5d, 0xcd, 0x25, 0x2f, 0x52, 0x2e, 0x5d, 0x0b, 0x49, 0x8f, 0x6f, 0x25, 0x6d, 0x15, 0x9a, + 0xf8, 0x17, 0x86, 0x8e, 0x61, 0x50, 0xf1, 0x22, 0x13, 0xc5, 0x62, 0xba, 0x52, 0x5c, 0xba, 0x76, + 0x40, 0x42, 0x9b, 0xed, 0xf5, 0xbc, 0x9f, 0x1d, 0x70, 0xb6, 0xa6, 0xe9, 0x1b, 0xe8, 0xf1, 0x42, + 0xd5, 0x82, 0x4b, 0x97, 0xe0, 0x13, 0x9e, 0xdc, 0x25, 0xab, 0xe8, 0x45, 0xa1, 0xea, 0xd5, 0xd6, + 0x95, 0x06, 0x50, 0x0a, 0xd6, 0x7c, 0x99, 0xe7, 0x6e, 0x27, 0x20, 0xa1, 0xc3, 0xf0, 0xec, 0x7d, + 0x23, 0x60, 0xe3, 0x30, 0x3d, 0x02, 0x1b, 0x1f, 0x8b, 0x3b, 0x19, 0x4c, 0xfb, 0x8d, 0xf6, 0xc7, + 0xf5, 0xc8, 0x7c, 0x26, 0x32, 0xd6, 0xde, 0x50, 0x0f, 0x9c, 0xaa, 0x16, 0x65, 0x2d, 0xd4, 0x0a, + 0x21, 0x36, 0xdb, 0xd5, 0xcd, 0x36, 0xd2, 0xb8, 0x48, 0x79, 0xee, 0x9a, 0x88, 0xd7, 0x15, 0x7d, + 0xdd, 0x6e, 0xfb, 0xfd, 0xaa, 0xe2, 0xae, 0x15, 0x90, 0xf0, 0x70, 0x72, 0x7c, 0x27, 0x07, 0x33, + 0x2d, 0x62, 0x3b, 0x79, 0x13, 0x9e, 0xe4, 0x45, 0xf6, 0xbc, 0x2c, 0xd4, 0xab, 0xf8, 0x33, 0xc7, + 0xf0, 0x1c, 0xb6, 0xd7, 0x1b, 0x8f, 0xda, 0xec, 0x70, 0xfe, 0x1e, 0xd8, 0xb8, 0x93, 0xa1, 0x41, + 0x1d, 0xb0, 0x9a, 0xeb, 0x21, 0xf1, 0x4e, 0x75, 0xb3, 0x79, 0x70, 0x55, 0xf3, 0xb9, 0xf8, 0xd2, + 0x1a, 0x66, 0xba, 0x6a, 0x52, 0xca, 0x62, 0x15, 0xa3, 0xc1, 0x01, 0xc3, 0xb3, 0xf7, 0x09, 0x0e, + 0xf6, 0xb6, 0x4b, 0x1f, 0x82, 0x99, 0x8a, 0xec, 0x5f, 0x51, 0x35, 0x7d, 0x7a, 0x06, 0x96, 0x6a, + 0x0c, 0x77, 0x6e, 0x37, 0xbc, 0xc7, 0x45, 0xc3, 0x28, 0x1d, 0x3f, 0x85, 0xfb, 0x37, 0xae, 0x76, + 0x36, 0x0c, 0x3a, 0x00, 0x67, 0xeb, 0x79, 0x48, 0xa6, 0xee, 0xe5, 0xda, 0x27, 0x57, 0x6b, 0x9f, + 0xfc, 0x5e, 0xfb, 0xe4, 0xeb, 0xc6, 0x37, 0xae, 0x36, 0xbe, 0xf1, 0x7d, 0xe3, 0x1b, 0x49, 0x17, + 0xff, 0xbf, 0xd3, 0x3f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x8a, 0x8a, 0xaf, 0x83, 0xd3, 0x03, 0x00, + 0x00, +} + +func (m *Message) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Message) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Message) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.PendingBytes != 0 { + i = encodeVarintMessage(dAtA, i, uint64(m.PendingBytes)) + i-- + dAtA[i] = 0x28 + } + if len(m.BlockPresences) > 0 { + for iNdEx := len(m.BlockPresences) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.BlockPresences[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMessage(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + if len(m.Payload) > 0 { + for iNdEx := len(m.Payload) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Payload[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMessage(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if len(m.Blocks) > 0 { + for iNdEx := len(m.Blocks) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Blocks[iNdEx]) + copy(dAtA[i:], m.Blocks[iNdEx]) + i = encodeVarintMessage(dAtA, i, uint64(len(m.Blocks[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.Wantlist.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMessage(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *Message_Wantlist) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Message_Wantlist) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Message_Wantlist) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Full { + i-- + if m.Full { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + if len(m.Entries) > 0 { + for iNdEx := len(m.Entries) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Entries[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMessage(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *Message_Wantlist_Entry) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Message_Wantlist_Entry) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Message_Wantlist_Entry) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.SendDontHave { + i-- + if m.SendDontHave { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x28 + } + if m.WantType != 0 { + i = encodeVarintMessage(dAtA, i, uint64(m.WantType)) + i-- + dAtA[i] = 0x20 + } + if m.Cancel { + i-- + if m.Cancel { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + if m.Priority != 0 { + i = encodeVarintMessage(dAtA, i, uint64(m.Priority)) + i-- + dAtA[i] = 0x10 + } + { + size := m.Block.Size() + i -= size + if _, err := m.Block.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintMessage(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *Message_Block) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Message_Block) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Message_Block) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Data) > 0 { + i -= len(m.Data) + copy(dAtA[i:], m.Data) + i = encodeVarintMessage(dAtA, i, uint64(len(m.Data))) + i-- + dAtA[i] = 0x12 + } + if len(m.Prefix) > 0 { + i -= len(m.Prefix) + copy(dAtA[i:], m.Prefix) + i = encodeVarintMessage(dAtA, i, uint64(len(m.Prefix))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Message_BlockPresence) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Message_BlockPresence) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Message_BlockPresence) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Type != 0 { + i = encodeVarintMessage(dAtA, i, uint64(m.Type)) + i-- + dAtA[i] = 0x10 + } + { + size := m.Cid.Size() + i -= size + if _, err := m.Cid.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintMessage(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintMessage(dAtA []byte, offset int, v uint64) int { + offset -= sovMessage(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *Message) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Wantlist.Size() + n += 1 + l + sovMessage(uint64(l)) + if len(m.Blocks) > 0 { + for _, b := range m.Blocks { + l = len(b) + n += 1 + l + sovMessage(uint64(l)) + } + } + if len(m.Payload) > 0 { + for _, e := range m.Payload { + l = e.Size() + n += 1 + l + sovMessage(uint64(l)) + } + } + if len(m.BlockPresences) > 0 { + for _, e := range m.BlockPresences { + l = e.Size() + n += 1 + l + sovMessage(uint64(l)) + } + } + if m.PendingBytes != 0 { + n += 1 + sovMessage(uint64(m.PendingBytes)) + } + return n +} + +func (m *Message_Wantlist) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Entries) > 0 { + for _, e := range m.Entries { + l = e.Size() + n += 1 + l + sovMessage(uint64(l)) + } + } + if m.Full { + n += 2 + } + return n +} + +func (m *Message_Wantlist_Entry) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Block.Size() + n += 1 + l + sovMessage(uint64(l)) + if m.Priority != 0 { + n += 1 + sovMessage(uint64(m.Priority)) + } + if m.Cancel { + n += 2 + } + if m.WantType != 0 { + n += 1 + sovMessage(uint64(m.WantType)) + } + if m.SendDontHave { + n += 2 + } + return n +} + +func (m *Message_Block) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Prefix) + if l > 0 { + n += 1 + l + sovMessage(uint64(l)) + } + l = len(m.Data) + if l > 0 { + n += 1 + l + sovMessage(uint64(l)) + } + return n +} + +func (m *Message_BlockPresence) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Cid.Size() + n += 1 + l + sovMessage(uint64(l)) + if m.Type != 0 { + n += 1 + sovMessage(uint64(m.Type)) + } + return n +} + +func sovMessage(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozMessage(x uint64) (n int) { + return sovMessage(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *Message) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMessage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Message: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Message: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Wantlist", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMessage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMessage + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMessage + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Wantlist.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Blocks", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMessage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthMessage + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthMessage + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Blocks = append(m.Blocks, make([]byte, postIndex-iNdEx)) + copy(m.Blocks[len(m.Blocks)-1], dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Payload", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMessage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMessage + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMessage + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Payload = append(m.Payload, Message_Block{}) + if err := m.Payload[len(m.Payload)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BlockPresences", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMessage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMessage + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMessage + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.BlockPresences = append(m.BlockPresences, Message_BlockPresence{}) + if err := m.BlockPresences[len(m.BlockPresences)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PendingBytes", wireType) + } + m.PendingBytes = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMessage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.PendingBytes |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipMessage(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMessage + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Message_Wantlist) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMessage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Wantlist: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Wantlist: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Entries", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMessage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMessage + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMessage + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Entries = append(m.Entries, Message_Wantlist_Entry{}) + if err := m.Entries[len(m.Entries)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Full", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMessage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Full = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipMessage(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMessage + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Message_Wantlist_Entry) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMessage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Entry: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Entry: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Block", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMessage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthMessage + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthMessage + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Block.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Priority", wireType) + } + m.Priority = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMessage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Priority |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Cancel", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMessage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Cancel = bool(v != 0) + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field WantType", wireType) + } + m.WantType = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMessage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.WantType |= Message_Wantlist_WantType(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SendDontHave", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMessage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.SendDontHave = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipMessage(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMessage + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Message_Block) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMessage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Block: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Block: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Prefix", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMessage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthMessage + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthMessage + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Prefix = append(m.Prefix[:0], dAtA[iNdEx:postIndex]...) + if m.Prefix == nil { + m.Prefix = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMessage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthMessage + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthMessage + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) + if m.Data == nil { + m.Data = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMessage(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMessage + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Message_BlockPresence) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMessage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BlockPresence: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BlockPresence: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Cid", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMessage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthMessage + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthMessage + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Cid.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + m.Type = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMessage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Type |= Message_BlockPresenceType(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipMessage(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMessage + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipMessage(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowMessage + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowMessage + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowMessage + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthMessage + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupMessage + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthMessage + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthMessage = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowMessage = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupMessage = fmt.Errorf("proto: unexpected end of group") +) diff --git a/bitswap/message/pb/message.proto b/bitswap/message/pb/message.proto new file mode 100644 index 0000000000..e6c271cc2f --- /dev/null +++ b/bitswap/message/pb/message.proto @@ -0,0 +1,46 @@ +syntax = "proto3"; + +package bitswap.message.pb; + +import "github.com/gogo/protobuf/gogoproto/gogo.proto"; + +message Message { + + message Wantlist { + enum WantType { + Block = 0; + Have = 1; + } + + message Entry { + bytes block = 1 [(gogoproto.customtype) = "Cid", (gogoproto.nullable) = false]; // the block cid (cidV0 in bitswap 1.0.0, cidV1 in bitswap 1.1.0) + int32 priority = 2; // the priority (normalized). default to 1 + bool cancel = 3; // whether this revokes an entry + WantType wantType = 4; // Note: defaults to enum 0, ie Block + bool sendDontHave = 5; // Note: defaults to false + } + + repeated Entry entries = 1 [(gogoproto.nullable) = false]; // a list of wantlist entries + bool full = 2; // whether this is the full wantlist. default to false + } + + message Block { + bytes prefix = 1; // CID prefix (cid version, multicodec and multihash prefix (type + length) + bytes data = 2; + } + + enum BlockPresenceType { + Have = 0; + DontHave = 1; + } + message BlockPresence { + bytes cid = 1 [(gogoproto.customtype) = "Cid", (gogoproto.nullable) = false]; + BlockPresenceType type = 2; + } + + Wantlist wantlist = 1 [(gogoproto.nullable) = false]; + repeated bytes blocks = 2; // used to send Blocks in bitswap 1.0.0 + repeated Block payload = 3 [(gogoproto.nullable) = false]; // used to send Blocks in bitswap 1.1.0 + repeated BlockPresence blockPresences = 4 [(gogoproto.nullable) = false]; + int32 pendingBytes = 5; +} diff --git a/bitswap/metrics/metrics.go b/bitswap/metrics/metrics.go new file mode 100644 index 0000000000..b719237276 --- /dev/null +++ b/bitswap/metrics/metrics.go @@ -0,0 +1,46 @@ +package metrics + +import ( + "context" + + "github.com/ipfs/go-metrics-interface" +) + +var ( + // the 1<<18+15 is to observe old file chunks that are 1<<18 + 14 in size + metricsBuckets = []float64{1 << 6, 1 << 10, 1 << 14, 1 << 18, 1<<18 + 15, 1 << 22} + + timeMetricsBuckets = []float64{1, 10, 30, 60, 90, 120, 600} +) + +func DupHist(ctx context.Context) metrics.Histogram { + return metrics.NewCtx(ctx, "recv_dup_blocks_bytes", "Summary of duplicate data blocks recived").Histogram(metricsBuckets) +} + +func AllHist(ctx context.Context) metrics.Histogram { + return metrics.NewCtx(ctx, "recv_all_blocks_bytes", "Summary of all data blocks recived").Histogram(metricsBuckets) +} + +func SentHist(ctx context.Context) metrics.Histogram { + return metrics.NewCtx(ctx, "sent_all_blocks_bytes", "Histogram of blocks sent by this bitswap").Histogram(metricsBuckets) +} + +func SendTimeHist(ctx context.Context) metrics.Histogram { + return metrics.NewCtx(ctx, "send_times", "Histogram of how long it takes to send messages in this bitswap").Histogram(timeMetricsBuckets) +} + +func PendingEngineGauge(ctx context.Context) metrics.Gauge { + return metrics.NewCtx(ctx, "pending_tasks", "Total number of pending tasks").Gauge() +} + +func ActiveEngineGauge(ctx context.Context) metrics.Gauge { + return metrics.NewCtx(ctx, "active_tasks", "Total number of active tasks").Gauge() +} + +func PendingBlocksGauge(ctx context.Context) metrics.Gauge { + return metrics.NewCtx(ctx, "pending_block_tasks", "Total number of pending blockstore tasks").Gauge() +} + +func ActiveBlocksGauge(ctx context.Context) metrics.Gauge { + return metrics.NewCtx(ctx, "active_block_tasks", "Total number of active blockstore tasks").Gauge() +} diff --git a/bitswap/network/connecteventmanager.go b/bitswap/network/connecteventmanager.go new file mode 100644 index 0000000000..88337fce36 --- /dev/null +++ b/bitswap/network/connecteventmanager.go @@ -0,0 +1,218 @@ +package network + +import ( + "sync" + + "github.com/libp2p/go-libp2p/core/peer" +) + +type ConnectionListener interface { + PeerConnected(peer.ID) + PeerDisconnected(peer.ID) +} + +type state byte + +const ( + stateDisconnected = iota + stateResponsive + stateUnresponsive +) + +type connectEventManager struct { + connListeners []ConnectionListener + lk sync.RWMutex + cond sync.Cond + peers map[peer.ID]*peerState + + changeQueue []peer.ID + stop bool + done chan struct{} +} + +type peerState struct { + newState, curState state + pending bool +} + +func newConnectEventManager(connListeners ...ConnectionListener) *connectEventManager { + evtManager := &connectEventManager{ + connListeners: connListeners, + peers: make(map[peer.ID]*peerState), + done: make(chan struct{}), + } + evtManager.cond = sync.Cond{L: &evtManager.lk} + return evtManager +} + +func (c *connectEventManager) Start() { + go c.worker() +} + +func (c *connectEventManager) Stop() { + c.lk.Lock() + c.stop = true + c.lk.Unlock() + c.cond.Broadcast() + + <-c.done +} + +func (c *connectEventManager) getState(p peer.ID) state { + if state, ok := c.peers[p]; ok { + return state.newState + } else { + return stateDisconnected + } +} + +func (c *connectEventManager) setState(p peer.ID, newState state) { + state, ok := c.peers[p] + if !ok { + state = new(peerState) + c.peers[p] = state + } + state.newState = newState + if !state.pending && state.newState != state.curState { + state.pending = true + c.changeQueue = append(c.changeQueue, p) + c.cond.Broadcast() + } +} + +// Waits for a change to be enqueued, or for the event manager to be stopped. Returns false if the +// connect event manager has been stopped. +func (c *connectEventManager) waitChange() bool { + for !c.stop && len(c.changeQueue) == 0 { + c.cond.Wait() + } + return !c.stop +} + +func (c *connectEventManager) worker() { + c.lk.Lock() + defer c.lk.Unlock() + defer close(c.done) + + for c.waitChange() { + pid := c.changeQueue[0] + c.changeQueue[0] = peer.ID("") // free the peer ID (slicing won't do that) + c.changeQueue = c.changeQueue[1:] + + state, ok := c.peers[pid] + // If we've disconnected and forgotten, continue. + if !ok { + // This shouldn't be possible because _this_ thread is responsible for + // removing peers from this map, and we shouldn't get duplicate entries in + // the change queue. + log.Error("a change was enqueued for a peer we're not tracking") + continue + } + + // Record the fact that this "state" is no longer in the queue. + state.pending = false + + // Then, if there's nothing to do, continue. + if state.curState == state.newState { + continue + } + + // Or record the state update, then apply it. + oldState := state.curState + state.curState = state.newState + + switch state.newState { + case stateDisconnected: + delete(c.peers, pid) + fallthrough + case stateUnresponsive: + // Only trigger a disconnect event if the peer was responsive. + // We could be transitioning from unresponsive to disconnected. + if oldState == stateResponsive { + c.lk.Unlock() + for _, v := range c.connListeners { + v.PeerDisconnected(pid) + } + c.lk.Lock() + } + case stateResponsive: + c.lk.Unlock() + for _, v := range c.connListeners { + v.PeerConnected(pid) + } + c.lk.Lock() + } + } +} + +// Called whenever we receive a new connection. May be called many times. +func (c *connectEventManager) Connected(p peer.ID) { + c.lk.Lock() + defer c.lk.Unlock() + + // !responsive -> responsive + + if c.getState(p) == stateResponsive { + return + } + c.setState(p, stateResponsive) +} + +// Called when we drop the final connection to a peer. +func (c *connectEventManager) Disconnected(p peer.ID) { + c.lk.Lock() + defer c.lk.Unlock() + + // !disconnected -> disconnected + + if c.getState(p) == stateDisconnected { + return + } + + c.setState(p, stateDisconnected) +} + +// Called whenever a peer is unresponsive. +func (c *connectEventManager) MarkUnresponsive(p peer.ID) { + c.lk.Lock() + defer c.lk.Unlock() + + // responsive -> unresponsive + + if c.getState(p) != stateResponsive { + return + } + + c.setState(p, stateUnresponsive) +} + +// Called whenever we receive a message from a peer. +// +// - When we're connected to the peer, this will mark the peer as responsive (from unresponsive). +// - When not connected, we ignore this call. Unfortunately, a peer may disconnect before we process +// +// the "on message" event, so we can't treat this as evidence of a connection. +func (c *connectEventManager) OnMessage(p peer.ID) { + c.lk.RLock() + unresponsive := c.getState(p) == stateUnresponsive + c.lk.RUnlock() + + // Only continue if both connected, and unresponsive. + if !unresponsive { + return + } + + // unresponsive -> responsive + + // We need to make a modification so now take a write lock + c.lk.Lock() + defer c.lk.Unlock() + + // Note: state may have changed in the time between when read lock + // was released and write lock taken, so check again + if c.getState(p) != stateUnresponsive { + return + } + + c.setState(p, stateResponsive) +} diff --git a/bitswap/network/connecteventmanager_test.go b/bitswap/network/connecteventmanager_test.go new file mode 100644 index 0000000000..e3904ee555 --- /dev/null +++ b/bitswap/network/connecteventmanager_test.go @@ -0,0 +1,175 @@ +package network + +import ( + "sync" + "testing" + "time" + + "github.com/ipfs/boxo/bitswap/internal/testutil" + "github.com/ipfs/boxo/internal/test" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/stretchr/testify/require" +) + +type mockConnEvent struct { + connected bool + peer peer.ID +} + +type mockConnListener struct { + sync.Mutex + events []mockConnEvent +} + +func newMockConnListener() *mockConnListener { + return new(mockConnListener) +} + +func (cl *mockConnListener) PeerConnected(p peer.ID) { + cl.Lock() + defer cl.Unlock() + cl.events = append(cl.events, mockConnEvent{connected: true, peer: p}) +} + +func (cl *mockConnListener) PeerDisconnected(p peer.ID) { + cl.Lock() + defer cl.Unlock() + cl.events = append(cl.events, mockConnEvent{connected: false, peer: p}) +} + +func wait(t *testing.T, c *connectEventManager) { + require.Eventually(t, func() bool { + c.lk.RLock() + defer c.lk.RUnlock() + return len(c.changeQueue) == 0 + }, time.Second, time.Millisecond, "connection event manager never processed events") +} + +func TestConnectEventManagerConnectDisconnect(t *testing.T) { + test.Flaky(t) + + connListener := newMockConnListener() + peers := testutil.GeneratePeers(2) + cem := newConnectEventManager(connListener) + cem.Start() + t.Cleanup(cem.Stop) + + var expectedEvents []mockConnEvent + + // Connect A twice, should only see one event + cem.Connected(peers[0]) + cem.Connected(peers[0]) + expectedEvents = append(expectedEvents, mockConnEvent{ + peer: peers[0], + connected: true, + }) + + // Flush the event queue. + wait(t, cem) + require.Equal(t, expectedEvents, connListener.events) + + // Block up the event loop. + connListener.Lock() + cem.Connected(peers[1]) + expectedEvents = append(expectedEvents, mockConnEvent{ + peer: peers[1], + connected: true, + }) + + // We don't expect this to show up. + cem.Disconnected(peers[0]) + cem.Connected(peers[0]) + + connListener.Unlock() + + wait(t, cem) + require.Equal(t, expectedEvents, connListener.events) +} + +func TestConnectEventManagerMarkUnresponsive(t *testing.T) { + test.Flaky(t) + + connListener := newMockConnListener() + p := testutil.GeneratePeers(1)[0] + cem := newConnectEventManager(connListener) + cem.Start() + t.Cleanup(cem.Stop) + + var expectedEvents []mockConnEvent + + // Don't mark as connected when we receive a message (could have been delayed). + cem.OnMessage(p) + wait(t, cem) + require.Equal(t, expectedEvents, connListener.events) + + // Handle connected event. + cem.Connected(p) + wait(t, cem) + + expectedEvents = append(expectedEvents, mockConnEvent{ + peer: p, + connected: true, + }) + require.Equal(t, expectedEvents, connListener.events) + + // Becomes unresponsive. + cem.MarkUnresponsive(p) + wait(t, cem) + + expectedEvents = append(expectedEvents, mockConnEvent{ + peer: p, + connected: false, + }) + require.Equal(t, expectedEvents, connListener.events) + + // We have a new connection, mark them responsive. + cem.Connected(p) + wait(t, cem) + expectedEvents = append(expectedEvents, mockConnEvent{ + peer: p, + connected: true, + }) + require.Equal(t, expectedEvents, connListener.events) + + // No duplicate event. + cem.OnMessage(p) + wait(t, cem) + require.Equal(t, expectedEvents, connListener.events) +} + +func TestConnectEventManagerDisconnectAfterMarkUnresponsive(t *testing.T) { + test.Flaky(t) + + connListener := newMockConnListener() + p := testutil.GeneratePeers(1)[0] + cem := newConnectEventManager(connListener) + cem.Start() + t.Cleanup(cem.Stop) + + var expectedEvents []mockConnEvent + + // Handle connected event. + cem.Connected(p) + wait(t, cem) + + expectedEvents = append(expectedEvents, mockConnEvent{ + peer: p, + connected: true, + }) + require.Equal(t, expectedEvents, connListener.events) + + // Becomes unresponsive. + cem.MarkUnresponsive(p) + wait(t, cem) + + expectedEvents = append(expectedEvents, mockConnEvent{ + peer: p, + connected: false, + }) + require.Equal(t, expectedEvents, connListener.events) + + cem.Disconnected(p) + wait(t, cem) + require.Empty(t, cem.peers) // all disconnected + require.Equal(t, expectedEvents, connListener.events) +} diff --git a/bitswap/network/interface.go b/bitswap/network/interface.go new file mode 100644 index 0000000000..962bc25882 --- /dev/null +++ b/bitswap/network/interface.go @@ -0,0 +1,111 @@ +package network + +import ( + "context" + "time" + + bsmsg "github.com/ipfs/boxo/bitswap/message" + "github.com/ipfs/boxo/bitswap/network/internal" + + cid "github.com/ipfs/go-cid" + + "github.com/libp2p/go-libp2p/core/connmgr" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/libp2p/go-libp2p/p2p/protocol/ping" +) + +var ( + // ProtocolBitswapNoVers is equivalent to the legacy bitswap protocol + ProtocolBitswapNoVers = internal.ProtocolBitswapNoVers + // ProtocolBitswapOneZero is the prefix for the legacy bitswap protocol + ProtocolBitswapOneZero = internal.ProtocolBitswapOneZero + // ProtocolBitswapOneOne is the the prefix for version 1.1.0 + ProtocolBitswapOneOne = internal.ProtocolBitswapOneOne + // ProtocolBitswap is the current version of the bitswap protocol: 1.2.0 + ProtocolBitswap = internal.ProtocolBitswap +) + +// BitSwapNetwork provides network connectivity for BitSwap sessions. +type BitSwapNetwork interface { + Self() peer.ID + + // SendMessage sends a BitSwap message to a peer. + SendMessage( + context.Context, + peer.ID, + bsmsg.BitSwapMessage) error + + // Start registers the Reciver and starts handling new messages, connectivity events, etc. + Start(...Receiver) + // Stop stops the network service. + Stop() + + ConnectTo(context.Context, peer.ID) error + DisconnectFrom(context.Context, peer.ID) error + + NewMessageSender(context.Context, peer.ID, *MessageSenderOpts) (MessageSender, error) + + ConnectionManager() connmgr.ConnManager + + Stats() Stats + + Routing + + Pinger +} + +// MessageSender is an interface for sending a series of messages over the bitswap +// network +type MessageSender interface { + SendMsg(context.Context, bsmsg.BitSwapMessage) error + Close() error + Reset() error + // Indicates whether the remote peer supports HAVE / DONT_HAVE messages + SupportsHave() bool +} + +type MessageSenderOpts struct { + MaxRetries int + SendTimeout time.Duration + SendErrorBackoff time.Duration +} + +// Receiver is an interface that can receive messages from the BitSwapNetwork. +type Receiver interface { + ReceiveMessage( + ctx context.Context, + sender peer.ID, + incoming bsmsg.BitSwapMessage) + + ReceiveError(error) + + // Connected/Disconnected warns bitswap about peer connections. + PeerConnected(peer.ID) + PeerDisconnected(peer.ID) +} + +// Routing is an interface to providing and finding providers on a bitswap +// network. +type Routing interface { + // FindProvidersAsync returns a channel of providers for the given key. + FindProvidersAsync(context.Context, cid.Cid, int) <-chan peer.ID + + // Provide provides the key to the network. + Provide(context.Context, cid.Cid) error +} + +// Pinger is an interface to ping a peer and get the average latency of all pings +type Pinger interface { + // Ping a peer + Ping(context.Context, peer.ID) ping.Result + // Get the average latency of all pings + Latency(peer.ID) time.Duration +} + +// Stats is a container for statistics about the bitswap network +// the numbers inside are specific to bitswap, and not any other protocols +// using the same underlying network. +type Stats struct { + MessagesSent uint64 + MessagesRecvd uint64 +} diff --git a/bitswap/network/internal/default.go b/bitswap/network/internal/default.go new file mode 100644 index 0000000000..13f4936a85 --- /dev/null +++ b/bitswap/network/internal/default.go @@ -0,0 +1,23 @@ +package internal + +import ( + "github.com/libp2p/go-libp2p/core/protocol" +) + +var ( + // ProtocolBitswapNoVers is equivalent to the legacy bitswap protocol + ProtocolBitswapNoVers protocol.ID = "/ipfs/bitswap" + // ProtocolBitswapOneZero is the prefix for the legacy bitswap protocol + ProtocolBitswapOneZero protocol.ID = "/ipfs/bitswap/1.0.0" + // ProtocolBitswapOneOne is the the prefix for version 1.1.0 + ProtocolBitswapOneOne protocol.ID = "/ipfs/bitswap/1.1.0" + // ProtocolBitswap is the current version of the bitswap protocol: 1.2.0 + ProtocolBitswap protocol.ID = "/ipfs/bitswap/1.2.0" +) + +var DefaultProtocols = []protocol.ID{ + ProtocolBitswap, + ProtocolBitswapOneOne, + ProtocolBitswapOneZero, + ProtocolBitswapNoVers, +} diff --git a/bitswap/network/ipfs_impl.go b/bitswap/network/ipfs_impl.go new file mode 100644 index 0000000000..00eb76ba4e --- /dev/null +++ b/bitswap/network/ipfs_impl.go @@ -0,0 +1,472 @@ +package network + +import ( + "context" + "errors" + "fmt" + "io" + "sync/atomic" + "time" + + bsmsg "github.com/ipfs/boxo/bitswap/message" + "github.com/ipfs/boxo/bitswap/network/internal" + + cid "github.com/ipfs/go-cid" + logging "github.com/ipfs/go-log" + "github.com/libp2p/go-libp2p/core/connmgr" + "github.com/libp2p/go-libp2p/core/host" + "github.com/libp2p/go-libp2p/core/network" + "github.com/libp2p/go-libp2p/core/peer" + peerstore "github.com/libp2p/go-libp2p/core/peerstore" + "github.com/libp2p/go-libp2p/core/protocol" + "github.com/libp2p/go-libp2p/core/routing" + "github.com/libp2p/go-libp2p/p2p/protocol/ping" + msgio "github.com/libp2p/go-msgio" + ma "github.com/multiformats/go-multiaddr" + "github.com/multiformats/go-multistream" +) + +var log = logging.Logger("bitswap_network") + +var connectTimeout = time.Second * 5 + +var maxSendTimeout = 2 * time.Minute +var minSendTimeout = 10 * time.Second +var sendLatency = 2 * time.Second +var minSendRate = (100 * 1000) / 8 // 100kbit/s + +// NewFromIpfsHost returns a BitSwapNetwork supported by underlying IPFS host. +func NewFromIpfsHost(host host.Host, r routing.ContentRouting, opts ...NetOpt) BitSwapNetwork { + s := processSettings(opts...) + + bitswapNetwork := impl{ + host: host, + routing: r, + + protocolBitswapNoVers: s.ProtocolPrefix + ProtocolBitswapNoVers, + protocolBitswapOneZero: s.ProtocolPrefix + ProtocolBitswapOneZero, + protocolBitswapOneOne: s.ProtocolPrefix + ProtocolBitswapOneOne, + protocolBitswap: s.ProtocolPrefix + ProtocolBitswap, + + supportedProtocols: s.SupportedProtocols, + } + + return &bitswapNetwork +} + +func processSettings(opts ...NetOpt) Settings { + s := Settings{SupportedProtocols: append([]protocol.ID(nil), internal.DefaultProtocols...)} + for _, opt := range opts { + opt(&s) + } + for i, proto := range s.SupportedProtocols { + s.SupportedProtocols[i] = s.ProtocolPrefix + proto + } + return s +} + +// impl transforms the ipfs network interface, which sends and receives +// NetMessage objects, into the bitswap network interface. +type impl struct { + // NOTE: Stats must be at the top of the heap allocation to ensure 64bit + // alignment. + stats Stats + + host host.Host + routing routing.ContentRouting + connectEvtMgr *connectEventManager + + protocolBitswapNoVers protocol.ID + protocolBitswapOneZero protocol.ID + protocolBitswapOneOne protocol.ID + protocolBitswap protocol.ID + + supportedProtocols []protocol.ID + + // inbound messages from the network are forwarded to the receiver + receivers []Receiver +} + +type streamMessageSender struct { + to peer.ID + stream network.Stream + connected bool + bsnet *impl + opts *MessageSenderOpts +} + +// Open a stream to the remote peer +func (s *streamMessageSender) Connect(ctx context.Context) (network.Stream, error) { + if s.connected { + return s.stream, nil + } + + tctx, cancel := context.WithTimeout(ctx, s.opts.SendTimeout) + defer cancel() + + if err := s.bsnet.ConnectTo(tctx, s.to); err != nil { + return nil, err + } + + stream, err := s.bsnet.newStreamToPeer(tctx, s.to) + if err != nil { + return nil, err + } + + s.stream = stream + s.connected = true + return s.stream, nil +} + +// Reset the stream +func (s *streamMessageSender) Reset() error { + if s.stream != nil { + err := s.stream.Reset() + s.connected = false + return err + } + return nil +} + +// Close the stream +func (s *streamMessageSender) Close() error { + return s.stream.Close() +} + +// Indicates whether the peer supports HAVE / DONT_HAVE messages +func (s *streamMessageSender) SupportsHave() bool { + return s.bsnet.SupportsHave(s.stream.Protocol()) +} + +// Send a message to the peer, attempting multiple times +func (s *streamMessageSender) SendMsg(ctx context.Context, msg bsmsg.BitSwapMessage) error { + return s.multiAttempt(ctx, func() error { + return s.send(ctx, msg) + }) +} + +// Perform a function with multiple attempts, and a timeout +func (s *streamMessageSender) multiAttempt(ctx context.Context, fn func() error) error { + // Try to call the function repeatedly + var err error + for i := 0; i < s.opts.MaxRetries; i++ { + if err = fn(); err == nil { + // Attempt was successful + return nil + } + + // Attempt failed + + // If the sender has been closed or the context cancelled, just bail out + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + + // Protocol is not supported, so no need to try multiple times + if errors.Is(err, multistream.ErrNotSupported[protocol.ID]{}) { + s.bsnet.connectEvtMgr.MarkUnresponsive(s.to) + return err + } + + // Failed to send so reset stream and try again + _ = s.Reset() + + // Failed too many times so mark the peer as unresponsive and return an error + if i == s.opts.MaxRetries-1 { + s.bsnet.connectEvtMgr.MarkUnresponsive(s.to) + return err + } + + select { + case <-ctx.Done(): + return ctx.Err() + case <-time.After(s.opts.SendErrorBackoff): + // wait a short time in case disconnect notifications are still propagating + log.Infof("send message to %s failed but context was not Done: %s", s.to, err) + } + } + return err +} + +// Send a message to the peer +func (s *streamMessageSender) send(ctx context.Context, msg bsmsg.BitSwapMessage) error { + start := time.Now() + stream, err := s.Connect(ctx) + if err != nil { + log.Infof("failed to open stream to %s: %s", s.to, err) + return err + } + + // The send timeout includes the time required to connect + // (although usually we will already have connected - we only need to + // connect after a failed attempt to send) + timeout := s.opts.SendTimeout - time.Since(start) + if err = s.bsnet.msgToStream(ctx, stream, msg, timeout); err != nil { + log.Infof("failed to send message to %s: %s", s.to, err) + return err + } + + return nil +} + +func (bsnet *impl) Self() peer.ID { + return bsnet.host.ID() +} + +func (bsnet *impl) Ping(ctx context.Context, p peer.ID) ping.Result { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + res := <-ping.Ping(ctx, bsnet.host, p) + return res +} + +func (bsnet *impl) Latency(p peer.ID) time.Duration { + return bsnet.host.Peerstore().LatencyEWMA(p) +} + +// Indicates whether the given protocol supports HAVE / DONT_HAVE messages +func (bsnet *impl) SupportsHave(proto protocol.ID) bool { + switch proto { + case bsnet.protocolBitswapOneOne, bsnet.protocolBitswapOneZero, bsnet.protocolBitswapNoVers: + return false + } + return true +} + +func (bsnet *impl) msgToStream(ctx context.Context, s network.Stream, msg bsmsg.BitSwapMessage, timeout time.Duration) error { + deadline := time.Now().Add(timeout) + if dl, ok := ctx.Deadline(); ok && dl.Before(deadline) { + deadline = dl + } + + if err := s.SetWriteDeadline(deadline); err != nil { + log.Warnf("error setting deadline: %s", err) + } + + // Older Bitswap versions use a slightly different wire format so we need + // to convert the message to the appropriate format depending on the remote + // peer's Bitswap version. + switch s.Protocol() { + case bsnet.protocolBitswapOneOne, bsnet.protocolBitswap: + if err := msg.ToNetV1(s); err != nil { + log.Debugf("error: %s", err) + return err + } + case bsnet.protocolBitswapOneZero, bsnet.protocolBitswapNoVers: + if err := msg.ToNetV0(s); err != nil { + log.Debugf("error: %s", err) + return err + } + default: + return fmt.Errorf("unrecognized protocol on remote: %s", s.Protocol()) + } + + atomic.AddUint64(&bsnet.stats.MessagesSent, 1) + + if err := s.SetWriteDeadline(time.Time{}); err != nil { + log.Warnf("error resetting deadline: %s", err) + } + return nil +} + +func (bsnet *impl) NewMessageSender(ctx context.Context, p peer.ID, opts *MessageSenderOpts) (MessageSender, error) { + opts = setDefaultOpts(opts) + + sender := &streamMessageSender{ + to: p, + bsnet: bsnet, + opts: opts, + } + + err := sender.multiAttempt(ctx, func() error { + _, err := sender.Connect(ctx) + return err + }) + + if err != nil { + return nil, err + } + + return sender, nil +} + +func setDefaultOpts(opts *MessageSenderOpts) *MessageSenderOpts { + copy := *opts + if opts.MaxRetries == 0 { + copy.MaxRetries = 3 + } + if opts.SendTimeout == 0 { + copy.SendTimeout = maxSendTimeout + } + if opts.SendErrorBackoff == 0 { + copy.SendErrorBackoff = 100 * time.Millisecond + } + return © +} + +func sendTimeout(size int) time.Duration { + timeout := sendLatency + timeout += time.Duration((uint64(time.Second) * uint64(size)) / uint64(minSendRate)) + if timeout > maxSendTimeout { + timeout = maxSendTimeout + } else if timeout < minSendTimeout { + timeout = minSendTimeout + } + return timeout +} + +func (bsnet *impl) SendMessage( + ctx context.Context, + p peer.ID, + outgoing bsmsg.BitSwapMessage) error { + + tctx, cancel := context.WithTimeout(ctx, connectTimeout) + defer cancel() + + s, err := bsnet.newStreamToPeer(tctx, p) + if err != nil { + return err + } + + timeout := sendTimeout(outgoing.Size()) + if err = bsnet.msgToStream(ctx, s, outgoing, timeout); err != nil { + _ = s.Reset() + return err + } + + return s.Close() +} + +func (bsnet *impl) newStreamToPeer(ctx context.Context, p peer.ID) (network.Stream, error) { + return bsnet.host.NewStream(ctx, p, bsnet.supportedProtocols...) +} + +func (bsnet *impl) Start(r ...Receiver) { + bsnet.receivers = r + { + connectionListeners := make([]ConnectionListener, len(r)) + for i, v := range r { + connectionListeners[i] = v + } + bsnet.connectEvtMgr = newConnectEventManager(connectionListeners...) + } + for _, proto := range bsnet.supportedProtocols { + bsnet.host.SetStreamHandler(proto, bsnet.handleNewStream) + } + bsnet.host.Network().Notify((*netNotifiee)(bsnet)) + bsnet.connectEvtMgr.Start() + +} + +func (bsnet *impl) Stop() { + bsnet.connectEvtMgr.Stop() + bsnet.host.Network().StopNotify((*netNotifiee)(bsnet)) +} + +func (bsnet *impl) ConnectTo(ctx context.Context, p peer.ID) error { + return bsnet.host.Connect(ctx, peer.AddrInfo{ID: p}) +} + +func (bsnet *impl) DisconnectFrom(ctx context.Context, p peer.ID) error { + return bsnet.host.Network().ClosePeer(p) +} + +// FindProvidersAsync returns a channel of providers for the given key. +func (bsnet *impl) FindProvidersAsync(ctx context.Context, k cid.Cid, max int) <-chan peer.ID { + out := make(chan peer.ID, max) + go func() { + defer close(out) + providers := bsnet.routing.FindProvidersAsync(ctx, k, max) + for info := range providers { + if info.ID == bsnet.host.ID() { + continue // ignore self as provider + } + bsnet.host.Peerstore().AddAddrs(info.ID, info.Addrs, peerstore.TempAddrTTL) + select { + case <-ctx.Done(): + return + case out <- info.ID: + } + } + }() + return out +} + +// Provide provides the key to the network +func (bsnet *impl) Provide(ctx context.Context, k cid.Cid) error { + return bsnet.routing.Provide(ctx, k, true) +} + +// handleNewStream receives a new stream from the network. +func (bsnet *impl) handleNewStream(s network.Stream) { + defer s.Close() + + if len(bsnet.receivers) == 0 { + _ = s.Reset() + return + } + + reader := msgio.NewVarintReaderSize(s, network.MessageSizeMax) + for { + received, err := bsmsg.FromMsgReader(reader) + if err != nil { + if err != io.EOF { + _ = s.Reset() + for _, v := range bsnet.receivers { + v.ReceiveError(err) + } + log.Debugf("bitswap net handleNewStream from %s error: %s", s.Conn().RemotePeer(), err) + } + return + } + + p := s.Conn().RemotePeer() + ctx := context.Background() + log.Debugf("bitswap net handleNewStream from %s", s.Conn().RemotePeer()) + bsnet.connectEvtMgr.OnMessage(s.Conn().RemotePeer()) + atomic.AddUint64(&bsnet.stats.MessagesRecvd, 1) + for _, v := range bsnet.receivers { + v.ReceiveMessage(ctx, p, received) + } + } +} + +func (bsnet *impl) ConnectionManager() connmgr.ConnManager { + return bsnet.host.ConnManager() +} + +func (bsnet *impl) Stats() Stats { + return Stats{ + MessagesRecvd: atomic.LoadUint64(&bsnet.stats.MessagesRecvd), + MessagesSent: atomic.LoadUint64(&bsnet.stats.MessagesSent), + } +} + +type netNotifiee impl + +func (nn *netNotifiee) impl() *impl { + return (*impl)(nn) +} + +func (nn *netNotifiee) Connected(n network.Network, v network.Conn) { + // ignore transient connections + if v.Stat().Transient { + return + } + + nn.impl().connectEvtMgr.Connected(v.RemotePeer()) +} +func (nn *netNotifiee) Disconnected(n network.Network, v network.Conn) { + // Only record a "disconnect" when we actually disconnect. + if n.Connectedness(v.RemotePeer()) == network.Connected { + return + } + + nn.impl().connectEvtMgr.Disconnected(v.RemotePeer()) +} +func (nn *netNotifiee) OpenedStream(n network.Network, s network.Stream) {} +func (nn *netNotifiee) ClosedStream(n network.Network, v network.Stream) {} +func (nn *netNotifiee) Listen(n network.Network, a ma.Multiaddr) {} +func (nn *netNotifiee) ListenClose(n network.Network, a ma.Multiaddr) {} diff --git a/bitswap/network/ipfs_impl_test.go b/bitswap/network/ipfs_impl_test.go new file mode 100644 index 0000000000..61b00baa35 --- /dev/null +++ b/bitswap/network/ipfs_impl_test.go @@ -0,0 +1,682 @@ +package network_test + +import ( + "context" + "fmt" + "sync" + "testing" + "time" + + bsmsg "github.com/ipfs/boxo/bitswap/message" + pb "github.com/ipfs/boxo/bitswap/message/pb" + bsnet "github.com/ipfs/boxo/bitswap/network" + "github.com/ipfs/boxo/bitswap/network/internal" + tn "github.com/ipfs/boxo/bitswap/testnet" + "github.com/ipfs/boxo/internal/test" + mockrouting "github.com/ipfs/boxo/routing/mock" + ds "github.com/ipfs/go-datastore" + blocksutil "github.com/ipfs/go-ipfs-blocksutil" + tnet "github.com/libp2p/go-libp2p-testing/net" + "github.com/libp2p/go-libp2p/core/host" + "github.com/libp2p/go-libp2p/core/network" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/libp2p/go-libp2p/core/protocol" + mocknet "github.com/libp2p/go-libp2p/p2p/net/mock" + "github.com/multiformats/go-multistream" +) + +// Receiver is an interface for receiving messages from the GraphSyncNetwork. +type receiver struct { + peers map[peer.ID]struct{} + messageReceived chan struct{} + connectionEvent chan bool + lastMessage bsmsg.BitSwapMessage + lastSender peer.ID + listener network.Notifiee +} + +func newReceiver() *receiver { + return &receiver{ + peers: make(map[peer.ID]struct{}), + messageReceived: make(chan struct{}), + // Avoid blocking. 100 is good enough for tests. + connectionEvent: make(chan bool, 100), + } +} + +func (r *receiver) ReceiveMessage( + ctx context.Context, + sender peer.ID, + incoming bsmsg.BitSwapMessage) { + r.lastSender = sender + r.lastMessage = incoming + select { + case <-ctx.Done(): + case r.messageReceived <- struct{}{}: + } +} + +func (r *receiver) ReceiveError(err error) { +} + +func (r *receiver) PeerConnected(p peer.ID) { + r.peers[p] = struct{}{} + r.connectionEvent <- true +} + +func (r *receiver) PeerDisconnected(p peer.ID) { + delete(r.peers, p) + r.connectionEvent <- false +} + +var errMockNetErr = fmt.Errorf("network err") + +type ErrStream struct { + network.Stream + lk sync.Mutex + err error + timingOut bool + closed bool +} + +type ErrHost struct { + host.Host + lk sync.Mutex + err error + timingOut bool + streams []*ErrStream +} + +func (es *ErrStream) Write(b []byte) (int, error) { + es.lk.Lock() + defer es.lk.Unlock() + + if es.err != nil { + return 0, es.err + } + if es.timingOut { + return 0, context.DeadlineExceeded + } + return es.Stream.Write(b) +} + +func (es *ErrStream) Close() error { + es.lk.Lock() + es.closed = true + es.lk.Unlock() + + return es.Stream.Close() +} + +func (eh *ErrHost) Connect(ctx context.Context, pi peer.AddrInfo) error { + eh.lk.Lock() + defer eh.lk.Unlock() + + if eh.err != nil { + return eh.err + } + if eh.timingOut { + return context.DeadlineExceeded + } + return eh.Host.Connect(ctx, pi) +} + +func (eh *ErrHost) NewStream(ctx context.Context, p peer.ID, pids ...protocol.ID) (network.Stream, error) { + eh.lk.Lock() + defer eh.lk.Unlock() + + if eh.err != nil { + return nil, errMockNetErr + } + if eh.timingOut { + return nil, context.DeadlineExceeded + } + stream, err := eh.Host.NewStream(ctx, p, pids...) + estrm := &ErrStream{Stream: stream, err: eh.err, timingOut: eh.timingOut} + + eh.streams = append(eh.streams, estrm) + return estrm, err +} + +func (eh *ErrHost) setError(err error) { + eh.lk.Lock() + defer eh.lk.Unlock() + + eh.err = err + for _, s := range eh.streams { + s.lk.Lock() + s.err = err + s.lk.Unlock() + } +} + +func (eh *ErrHost) setTimeoutState(timingOut bool) { + eh.lk.Lock() + defer eh.lk.Unlock() + + eh.timingOut = timingOut + for _, s := range eh.streams { + s.lk.Lock() + s.timingOut = timingOut + s.lk.Unlock() + } +} + +func TestMessageSendAndReceive(t *testing.T) { + test.Flaky(t) + + // create network + ctx := context.Background() + ctx, cancel := context.WithTimeout(ctx, 10*time.Second) + defer cancel() + mn := mocknet.New() + defer mn.Close() + mr := mockrouting.NewServer() + streamNet, err := tn.StreamNet(ctx, mn, mr) + if err != nil { + t.Fatal("Unable to setup network") + } + p1 := tnet.RandIdentityOrFatal(t) + p2 := tnet.RandIdentityOrFatal(t) + + bsnet1 := streamNet.Adapter(p1) + bsnet2 := streamNet.Adapter(p2) + r1 := newReceiver() + r2 := newReceiver() + bsnet1.Start(r1) + t.Cleanup(bsnet1.Stop) + bsnet2.Start(r2) + t.Cleanup(bsnet2.Stop) + + err = mn.LinkAll() + if err != nil { + t.Fatal(err) + } + err = bsnet1.ConnectTo(ctx, p2.ID()) + if err != nil { + t.Fatal(err) + } + select { + case <-ctx.Done(): + t.Fatal("did not connect peer") + case <-r1.connectionEvent: + } + err = bsnet2.ConnectTo(ctx, p1.ID()) + if err != nil { + t.Fatal(err) + } + select { + case <-ctx.Done(): + t.Fatal("did not connect peer") + case <-r2.connectionEvent: + } + if _, ok := r1.peers[p2.ID()]; !ok { + t.Fatal("did to connect to correct peer") + } + if _, ok := r2.peers[p1.ID()]; !ok { + t.Fatal("did to connect to correct peer") + } + blockGenerator := blocksutil.NewBlockGenerator() + block1 := blockGenerator.Next() + block2 := blockGenerator.Next() + sent := bsmsg.New(false) + sent.AddEntry(block1.Cid(), 1, pb.Message_Wantlist_Block, true) + sent.AddBlock(block2) + + err = bsnet1.SendMessage(ctx, p2.ID(), sent) + if err != nil { + t.Fatal(err) + } + + select { + case <-ctx.Done(): + t.Fatal("did not receive message sent") + case <-r2.messageReceived: + } + + sender := r2.lastSender + if sender != p1.ID() { + t.Fatal("received message from wrong node") + } + + received := r2.lastMessage + + sentWants := sent.Wantlist() + if len(sentWants) != 1 { + t.Fatal("Did not add want to sent message") + } + sentWant := sentWants[0] + receivedWants := received.Wantlist() + if len(receivedWants) != 1 { + t.Fatal("Did not add want to received message") + } + receivedWant := receivedWants[0] + if receivedWant.Cid != sentWant.Cid || + receivedWant.Priority != sentWant.Priority || + receivedWant.Cancel != sentWant.Cancel { + t.Fatal("Sent message wants did not match received message wants") + } + sentBlocks := sent.Blocks() + if len(sentBlocks) != 1 { + t.Fatal("Did not add block to sent message") + } + sentBlock := sentBlocks[0] + receivedBlocks := received.Blocks() + if len(receivedBlocks) != 1 { + t.Fatal("Did not add response to received message") + } + receivedBlock := receivedBlocks[0] + if receivedBlock.Cid() != sentBlock.Cid() { + t.Fatal("Sent message blocks did not match received message blocks") + } +} + +func prepareNetwork(t *testing.T, ctx context.Context, p1 tnet.Identity, r1 *receiver, p2 tnet.Identity, r2 *receiver) (*ErrHost, bsnet.BitSwapNetwork, *ErrHost, bsnet.BitSwapNetwork, bsmsg.BitSwapMessage) { + // create network + mn := mocknet.New() + defer mn.Close() + mr := mockrouting.NewServer() + + // Host 1 + h1, err := mn.AddPeer(p1.PrivateKey(), p1.Address()) + if err != nil { + t.Fatal(err) + } + eh1 := &ErrHost{Host: h1} + routing1 := mr.ClientWithDatastore(context.TODO(), p1, ds.NewMapDatastore()) + bsnet1 := bsnet.NewFromIpfsHost(eh1, routing1) + bsnet1.Start(r1) + t.Cleanup(bsnet1.Stop) + if r1.listener != nil { + eh1.Network().Notify(r1.listener) + } + + // Host 2 + h2, err := mn.AddPeer(p2.PrivateKey(), p2.Address()) + if err != nil { + t.Fatal(err) + } + eh2 := &ErrHost{Host: h2} + routing2 := mr.ClientWithDatastore(context.TODO(), p2, ds.NewMapDatastore()) + bsnet2 := bsnet.NewFromIpfsHost(eh2, routing2) + bsnet2.Start(r2) + t.Cleanup(bsnet2.Stop) + if r2.listener != nil { + eh2.Network().Notify(r2.listener) + } + + // Networking + err = mn.LinkAll() + if err != nil { + t.Fatal(err) + } + err = bsnet1.ConnectTo(ctx, p2.ID()) + if err != nil { + t.Fatal(err) + } + isConnected := <-r1.connectionEvent + if !isConnected { + t.Fatal("Expected connect event") + } + + err = bsnet2.ConnectTo(ctx, p1.ID()) + if err != nil { + t.Fatal(err) + } + + blockGenerator := blocksutil.NewBlockGenerator() + block1 := blockGenerator.Next() + msg := bsmsg.New(false) + msg.AddEntry(block1.Cid(), 1, pb.Message_Wantlist_Block, true) + + return eh1, bsnet1, eh2, bsnet2, msg +} + +func TestMessageResendAfterError(t *testing.T) { + test.Flaky(t) + + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + + p1 := tnet.RandIdentityOrFatal(t) + r1 := newReceiver() + p2 := tnet.RandIdentityOrFatal(t) + r2 := newReceiver() + + eh, bsnet1, _, _, msg := prepareNetwork(t, ctx, p1, r1, p2, r2) + + testSendErrorBackoff := 100 * time.Millisecond + ms, err := bsnet1.NewMessageSender(ctx, p2.ID(), &bsnet.MessageSenderOpts{ + MaxRetries: 3, + SendTimeout: 100 * time.Millisecond, + SendErrorBackoff: testSendErrorBackoff, + }) + if err != nil { + t.Fatal(err) + } + defer ms.Close() + + // Return an error from the networking layer the next time we try to send + // a message + eh.setError(errMockNetErr) + + go func() { + time.Sleep(testSendErrorBackoff / 2) + // Stop throwing errors so that the following attempt to send succeeds + eh.setError(nil) + }() + + // Send message with retries, first one should fail, then subsequent + // message should succeed + err = ms.SendMsg(ctx, msg) + if err != nil { + t.Fatal(err) + } + + select { + case <-ctx.Done(): + t.Fatal("did not receive message sent") + case <-r2.messageReceived: + } +} + +func TestMessageSendTimeout(t *testing.T) { + test.Flaky(t) + + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + + p1 := tnet.RandIdentityOrFatal(t) + r1 := newReceiver() + p2 := tnet.RandIdentityOrFatal(t) + r2 := newReceiver() + + eh, bsnet1, _, _, msg := prepareNetwork(t, ctx, p1, r1, p2, r2) + + ms, err := bsnet1.NewMessageSender(ctx, p2.ID(), &bsnet.MessageSenderOpts{ + MaxRetries: 3, + SendTimeout: 100 * time.Millisecond, + SendErrorBackoff: 100 * time.Millisecond, + }) + if err != nil { + t.Fatal(err) + } + defer ms.Close() + + // Return a DeadlineExceeded error from the networking layer the next time we try to + // send a message + eh.setTimeoutState(true) + + // Send message with retries, all attempts should fail + err = ms.SendMsg(ctx, msg) + if err == nil { + t.Fatal("Expected error from SednMsg") + } + + select { + case <-time.After(500 * time.Millisecond): + t.Fatal("Did not receive disconnect event") + case isConnected := <-r1.connectionEvent: + if isConnected { + t.Fatal("Expected disconnect event (got connect event)") + } + } +} + +func TestMessageSendNotSupportedResponse(t *testing.T) { + test.Flaky(t) + + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + + p1 := tnet.RandIdentityOrFatal(t) + r1 := newReceiver() + p2 := tnet.RandIdentityOrFatal(t) + r2 := newReceiver() + + eh, bsnet1, _, _, _ := prepareNetwork(t, ctx, p1, r1, p2, r2) + + eh.setError(multistream.ErrNotSupported[protocol.ID]{}) + ms, err := bsnet1.NewMessageSender(ctx, p2.ID(), &bsnet.MessageSenderOpts{ + MaxRetries: 3, + SendTimeout: 100 * time.Millisecond, + SendErrorBackoff: 100 * time.Millisecond, + }) + if err == nil { + ms.Close() + t.Fatal("Expected ErrNotSupported") + } + + select { + case <-time.After(500 * time.Millisecond): + t.Fatal("Did not receive disconnect event") + case isConnected := <-r1.connectionEvent: + if isConnected { + t.Fatal("Expected disconnect event (got connect event)") + } + } +} + +func TestSupportsHave(t *testing.T) { + test.Flaky(t) + + ctx := context.Background() + mn := mocknet.New() + defer mn.Close() + mr := mockrouting.NewServer() + streamNet, err := tn.StreamNet(ctx, mn, mr) + if err != nil { + t.Fatalf("Unable to setup network: %s", err) + } + + type testCase struct { + proto protocol.ID + expSupportsHave bool + } + + testCases := []testCase{ + {bsnet.ProtocolBitswap, true}, + {bsnet.ProtocolBitswapOneOne, false}, + {bsnet.ProtocolBitswapOneZero, false}, + {bsnet.ProtocolBitswapNoVers, false}, + } + + for _, tc := range testCases { + t.Run(fmt.Sprintf("%s-%v", tc.proto, tc.expSupportsHave), func(t *testing.T) { + p1 := tnet.RandIdentityOrFatal(t) + bsnet1 := streamNet.Adapter(p1) + bsnet1.Start(newReceiver()) + t.Cleanup(bsnet1.Stop) + + p2 := tnet.RandIdentityOrFatal(t) + bsnet2 := streamNet.Adapter(p2, bsnet.SupportedProtocols([]protocol.ID{tc.proto})) + bsnet2.Start(newReceiver()) + t.Cleanup(bsnet2.Stop) + + err = mn.LinkAll() + if err != nil { + t.Fatal(err) + } + + senderCurrent, err := bsnet1.NewMessageSender(ctx, p2.ID(), &bsnet.MessageSenderOpts{}) + if err != nil { + t.Fatal(err) + } + defer senderCurrent.Close() + + if senderCurrent.SupportsHave() != tc.expSupportsHave { + t.Fatal("Expected sender HAVE message support", tc.proto, tc.expSupportsHave) + } + }) + } +} + +func testNetworkCounters(t *testing.T, n1 int, n2 int) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + p1 := tnet.RandIdentityOrFatal(t) + r1 := newReceiver() + p2 := tnet.RandIdentityOrFatal(t) + r2 := newReceiver() + + h1, bsnet1, h2, bsnet2, msg := prepareNetwork(t, ctx, p1, r1, p2, r2) + + for n := 0; n < n1; n++ { + ctx, cancel := context.WithTimeout(ctx, time.Second) + err := bsnet1.SendMessage(ctx, p2.ID(), msg) + if err != nil { + t.Fatal(err) + } + select { + case <-ctx.Done(): + t.Fatal("p2 did not receive message sent") + case <-r2.messageReceived: + for j := 0; j < 2; j++ { + err := bsnet2.SendMessage(ctx, p1.ID(), msg) + if err != nil { + t.Fatal(err) + } + select { + case <-ctx.Done(): + t.Fatal("p1 did not receive message sent") + case <-r1.messageReceived: + } + } + } + cancel() + } + + if n2 > 0 { + ms, err := bsnet1.NewMessageSender(ctx, p2.ID(), &bsnet.MessageSenderOpts{}) + if err != nil { + t.Fatal(err) + } + defer ms.Close() + for n := 0; n < n2; n++ { + ctx, cancel := context.WithTimeout(ctx, time.Second) + err = ms.SendMsg(ctx, msg) + if err != nil { + t.Fatal(err) + } + select { + case <-ctx.Done(): + t.Fatal("p2 did not receive message sent") + case <-r2.messageReceived: + for j := 0; j < 2; j++ { + err := bsnet2.SendMessage(ctx, p1.ID(), msg) + if err != nil { + t.Fatal(err) + } + select { + case <-ctx.Done(): + t.Fatal("p1 did not receive message sent") + case <-r1.messageReceived: + } + } + } + cancel() + } + ms.Close() + } + + // Wait until all streams are closed and MessagesRecvd counters + // updated. + ctxto, cancelto := context.WithTimeout(ctx, 5*time.Second) + defer cancelto() + ctxwait, cancelwait := context.WithCancel(ctx) + go func() { + // Wait until all streams are closed + throttler := time.NewTicker(time.Millisecond * 5) + defer throttler.Stop() + for { + h1.lk.Lock() + var done bool + for _, s := range h1.streams { + s.lk.Lock() + closed := s.closed + closed = closed || s.err != nil + s.lk.Unlock() + if closed { + continue + } + pid := s.Protocol() + for _, v := range internal.DefaultProtocols { + if pid == v { + goto ElseH1 + } + } + } + done = true + ElseH1: + h1.lk.Unlock() + if done { + break + } + select { + case <-ctxto.Done(): + return + case <-throttler.C: + } + } + + for { + h2.lk.Lock() + var done bool + for _, s := range h2.streams { + s.lk.Lock() + closed := s.closed + closed = closed || s.err != nil + s.lk.Unlock() + if closed { + continue + } + pid := s.Protocol() + for _, v := range internal.DefaultProtocols { + if pid == v { + goto ElseH2 + } + } + } + done = true + ElseH2: + h2.lk.Unlock() + if done { + break + } + select { + case <-ctxto.Done(): + return + case <-throttler.C: + } + } + + cancelwait() + }() + + select { + case <-ctxto.Done(): + t.Fatal("network streams closing timed out") + case <-ctxwait.Done(): + } + + if bsnet1.Stats().MessagesSent != uint64(n1+n2) { + t.Fatal(fmt.Errorf("expected %d sent messages, got %d", n1+n2, bsnet1.Stats().MessagesSent)) + } + + if bsnet2.Stats().MessagesRecvd != uint64(n1+n2) { + t.Fatal(fmt.Errorf("expected %d received messages, got %d", n1+n2, bsnet2.Stats().MessagesRecvd)) + } + + if bsnet1.Stats().MessagesRecvd != 2*uint64(n1+n2) { + t.Fatal(fmt.Errorf("expected %d received reply messages, got %d", 2*(n1+n2), bsnet1.Stats().MessagesRecvd)) + } +} + +func TestNetworkCounters(t *testing.T) { + test.Flaky(t) + + for n := 0; n < 11; n++ { + testNetworkCounters(t, 10-n, n) + } +} diff --git a/bitswap/network/ipfs_impl_timeout_test.go b/bitswap/network/ipfs_impl_timeout_test.go new file mode 100644 index 0000000000..178c2fb69d --- /dev/null +++ b/bitswap/network/ipfs_impl_timeout_test.go @@ -0,0 +1,27 @@ +package network + +import ( + "testing" + "time" + + "github.com/ipfs/boxo/internal/test" + "github.com/stretchr/testify/require" +) + +func TestSendTimeout(t *testing.T) { + test.Flaky(t) + + require.Equal(t, minSendTimeout, sendTimeout(0)) + require.Equal(t, maxSendTimeout, sendTimeout(1<<30)) + + // Check a 1MiB block (very large) + oneMiB := uint64(1 << 20) + hundredKbit := uint64(100 * 1000) + hundredKB := hundredKbit / 8 + expectedTime := sendLatency + time.Duration(oneMiB*uint64(time.Second)/hundredKB) + actualTime := sendTimeout(int(oneMiB)) + require.Equal(t, expectedTime, actualTime) + + // Check a 256KiB block (expected) + require.InDelta(t, 25*time.Second, sendTimeout(256<<10), float64(5*time.Second)) +} diff --git a/bitswap/network/options.go b/bitswap/network/options.go new file mode 100644 index 0000000000..10d02e5e96 --- /dev/null +++ b/bitswap/network/options.go @@ -0,0 +1,22 @@ +package network + +import "github.com/libp2p/go-libp2p/core/protocol" + +type NetOpt func(*Settings) + +type Settings struct { + ProtocolPrefix protocol.ID + SupportedProtocols []protocol.ID +} + +func Prefix(prefix protocol.ID) NetOpt { + return func(settings *Settings) { + settings.ProtocolPrefix = prefix + } +} + +func SupportedProtocols(protos []protocol.ID) NetOpt { + return func(settings *Settings) { + settings.SupportedProtocols = protos + } +} diff --git a/bitswap/options.go b/bitswap/options.go new file mode 100644 index 0000000000..da759dfe28 --- /dev/null +++ b/bitswap/options.go @@ -0,0 +1,89 @@ +package bitswap + +import ( + "time" + + "github.com/ipfs/boxo/bitswap/client" + "github.com/ipfs/boxo/bitswap/server" + "github.com/ipfs/boxo/bitswap/tracer" + delay "github.com/ipfs/go-ipfs-delay" +) + +type option func(*Bitswap) + +// Option is interface{} of server.Option or client.Option or func(*Bitswap) +// wrapped in a struct to gain strong type checking. +type Option struct { + v interface{} +} + +func EngineBlockstoreWorkerCount(count int) Option { + return Option{server.EngineBlockstoreWorkerCount(count)} +} + +func EngineTaskWorkerCount(count int) Option { + return Option{server.EngineTaskWorkerCount(count)} +} + +func MaxOutstandingBytesPerPeer(count int) Option { + return Option{server.MaxOutstandingBytesPerPeer(count)} +} + +func MaxQueuedWantlistEntriesPerPeer(count uint) Option { + return Option{server.MaxQueuedWantlistEntriesPerPeer(count)} +} + +// MaxCidSize only affects the server. +// If it is 0 no limit is applied. +func MaxCidSize(n uint) Option { + return Option{server.MaxCidSize(n)} +} + +func TaskWorkerCount(count int) Option { + return Option{server.TaskWorkerCount(count)} +} + +func ProvideEnabled(enabled bool) Option { + return Option{server.ProvideEnabled(enabled)} +} + +func SetSendDontHaves(send bool) Option { + return Option{server.SetSendDontHaves(send)} +} + +func WithPeerBlockRequestFilter(pbrf server.PeerBlockRequestFilter) Option { + return Option{server.WithPeerBlockRequestFilter(pbrf)} +} + +func WithScoreLedger(scoreLedger server.ScoreLedger) Option { + return Option{server.WithScoreLedger(scoreLedger)} +} + +func WithTargetMessageSize(tms int) Option { + return Option{server.WithTargetMessageSize(tms)} +} + +func WithTaskComparator(comparator server.TaskComparator) Option { + return Option{server.WithTaskComparator(comparator)} +} + +func ProviderSearchDelay(newProvSearchDelay time.Duration) Option { + return Option{client.ProviderSearchDelay(newProvSearchDelay)} +} + +func RebroadcastDelay(newRebroadcastDelay delay.D) Option { + return Option{client.RebroadcastDelay(newRebroadcastDelay)} +} + +func SetSimulateDontHavesOnTimeout(send bool) Option { + return Option{client.SetSimulateDontHavesOnTimeout(send)} +} + +func WithTracer(tap tracer.Tracer) Option { + // Only trace the server, both receive the same messages anyway + return Option{ + option(func(bs *Bitswap) { + bs.tracer = tap + }), + } +} diff --git a/bitswap/sendOnlyTracer.go b/bitswap/sendOnlyTracer.go new file mode 100644 index 0000000000..ad03e2922b --- /dev/null +++ b/bitswap/sendOnlyTracer.go @@ -0,0 +1,20 @@ +package bitswap + +import ( + "github.com/ipfs/boxo/bitswap/message" + "github.com/ipfs/boxo/bitswap/tracer" + "github.com/libp2p/go-libp2p/core/peer" +) + +type sendOnlyTracer interface { + MessageSent(peer.ID, message.BitSwapMessage) +} + +var _ tracer.Tracer = nopReceiveTracer{} + +// we need to only trace sends because we already trace receives in the polyfill object (to not get them traced twice) +type nopReceiveTracer struct { + sendOnlyTracer +} + +func (nopReceiveTracer) MessageReceived(peer.ID, message.BitSwapMessage) {} diff --git a/bitswap/server/forward.go b/bitswap/server/forward.go new file mode 100644 index 0000000000..ee353da191 --- /dev/null +++ b/bitswap/server/forward.go @@ -0,0 +1,14 @@ +package server + +import ( + "github.com/ipfs/boxo/bitswap/server/internal/decision" +) + +type ( + Receipt = decision.Receipt + PeerBlockRequestFilter = decision.PeerBlockRequestFilter + TaskComparator = decision.TaskComparator + TaskInfo = decision.TaskInfo + ScoreLedger = decision.ScoreLedger + ScorePeerFunc = decision.ScorePeerFunc +) diff --git a/bitswap/server/internal/decision/blockstoremanager.go b/bitswap/server/internal/decision/blockstoremanager.go new file mode 100644 index 0000000000..2605b06eef --- /dev/null +++ b/bitswap/server/internal/decision/blockstoremanager.go @@ -0,0 +1,149 @@ +package decision + +import ( + "context" + "fmt" + "sync" + + blocks "github.com/ipfs/boxo/blocks" + bstore "github.com/ipfs/boxo/blockstore" + cid "github.com/ipfs/go-cid" + ipld "github.com/ipfs/go-ipld-format" + "github.com/ipfs/go-metrics-interface" +) + +// blockstoreManager maintains a pool of workers that make requests to the blockstore. +type blockstoreManager struct { + bs bstore.Blockstore + workerCount int + jobs chan func() + pendingGauge metrics.Gauge + activeGauge metrics.Gauge + + workerWG sync.WaitGroup + stopChan chan struct{} + stopOnce sync.Once +} + +// newBlockstoreManager creates a new blockstoreManager with the given context +// and number of workers +func newBlockstoreManager( + bs bstore.Blockstore, + workerCount int, + pendingGauge metrics.Gauge, + activeGauge metrics.Gauge, +) *blockstoreManager { + return &blockstoreManager{ + bs: bs, + workerCount: workerCount, + jobs: make(chan func()), + pendingGauge: pendingGauge, + activeGauge: activeGauge, + stopChan: make(chan struct{}), + } +} + +func (bsm *blockstoreManager) start() { + bsm.workerWG.Add(bsm.workerCount) + for i := 0; i < bsm.workerCount; i++ { + go bsm.worker() + } +} + +func (bsm *blockstoreManager) stop() { + bsm.stopOnce.Do(func() { + close(bsm.stopChan) + }) + bsm.workerWG.Wait() +} + +func (bsm *blockstoreManager) worker() { + defer bsm.workerWG.Done() + for { + select { + case <-bsm.stopChan: + return + case job := <-bsm.jobs: + bsm.pendingGauge.Dec() + bsm.activeGauge.Inc() + job() + bsm.activeGauge.Dec() + } + } +} + +func (bsm *blockstoreManager) addJob(ctx context.Context, job func()) error { + select { + case <-ctx.Done(): + return ctx.Err() + case <-bsm.stopChan: + return fmt.Errorf("shutting down") + case bsm.jobs <- job: + bsm.pendingGauge.Inc() + return nil + } +} + +func (bsm *blockstoreManager) getBlockSizes(ctx context.Context, ks []cid.Cid) (map[cid.Cid]int, error) { + res := make(map[cid.Cid]int) + if len(ks) == 0 { + return res, nil + } + + var lk sync.Mutex + return res, bsm.jobPerKey(ctx, ks, func(c cid.Cid) { + size, err := bsm.bs.GetSize(ctx, c) + if err != nil { + if !ipld.IsNotFound(err) { + // Note: this isn't a fatal error. We shouldn't abort the request + log.Errorf("blockstore.GetSize(%s) error: %s", c, err) + } + } else { + lk.Lock() + res[c] = size + lk.Unlock() + } + }) +} + +func (bsm *blockstoreManager) getBlocks(ctx context.Context, ks []cid.Cid) (map[cid.Cid]blocks.Block, error) { + res := make(map[cid.Cid]blocks.Block, len(ks)) + if len(ks) == 0 { + return res, nil + } + + var lk sync.Mutex + return res, bsm.jobPerKey(ctx, ks, func(c cid.Cid) { + blk, err := bsm.bs.Get(ctx, c) + if err != nil { + if !ipld.IsNotFound(err) { + // Note: this isn't a fatal error. We shouldn't abort the request + log.Errorf("blockstore.Get(%s) error: %s", c, err) + } + return + } + + lk.Lock() + res[c] = blk + lk.Unlock() + }) +} + +func (bsm *blockstoreManager) jobPerKey(ctx context.Context, ks []cid.Cid, jobFn func(c cid.Cid)) error { + var err error + var wg sync.WaitGroup + for _, k := range ks { + c := k + wg.Add(1) + err = bsm.addJob(ctx, func() { + jobFn(c) + wg.Done() + }) + if err != nil { + wg.Done() + break + } + } + wg.Wait() + return err +} diff --git a/bitswap/server/internal/decision/blockstoremanager_test.go b/bitswap/server/internal/decision/blockstoremanager_test.go new file mode 100644 index 0000000000..20e27ef953 --- /dev/null +++ b/bitswap/server/internal/decision/blockstoremanager_test.go @@ -0,0 +1,278 @@ +package decision + +import ( + "context" + "crypto/rand" + "sync" + "testing" + "time" + + "github.com/ipfs/boxo/bitswap/internal/testutil" + blocks "github.com/ipfs/boxo/blocks" + blockstore "github.com/ipfs/boxo/blockstore" + "github.com/ipfs/boxo/internal/test" + cid "github.com/ipfs/go-cid" + ds "github.com/ipfs/go-datastore" + "github.com/ipfs/go-datastore/delayed" + ds_sync "github.com/ipfs/go-datastore/sync" + delay "github.com/ipfs/go-ipfs-delay" + "github.com/ipfs/go-metrics-interface" +) + +func newBlockstoreManagerForTesting( + t *testing.T, + ctx context.Context, + bs blockstore.Blockstore, + workerCount int, +) *blockstoreManager { + testPendingBlocksGauge := metrics.NewCtx(ctx, "pending_block_tasks", "Total number of pending blockstore tasks").Gauge() + testActiveBlocksGauge := metrics.NewCtx(ctx, "active_block_tasks", "Total number of active blockstore tasks").Gauge() + bsm := newBlockstoreManager(bs, workerCount, testPendingBlocksGauge, testActiveBlocksGauge) + bsm.start() + t.Cleanup(bsm.stop) + return bsm +} + +func TestBlockstoreManagerNotFoundKey(t *testing.T) { + test.Flaky(t) + + ctx := context.Background() + bsdelay := delay.Fixed(3 * time.Millisecond) + dstore := ds_sync.MutexWrap(delayed.New(ds.NewMapDatastore(), bsdelay)) + bstore := blockstore.NewBlockstore(ds_sync.MutexWrap(dstore)) + + bsm := newBlockstoreManagerForTesting(t, ctx, bstore, 5) + + cids := testutil.GenerateCids(4) + sizes, err := bsm.getBlockSizes(ctx, cids) + if err != nil { + t.Fatal(err) + } + if len(sizes) != 0 { + t.Fatal("Wrong response length") + } + + for _, c := range cids { + if _, ok := sizes[c]; ok { + t.Fatal("Non-existent block should have no size") + } + } + + blks, err := bsm.getBlocks(ctx, cids) + if err != nil { + t.Fatal(err) + } + if len(blks) != 0 { + t.Fatal("Wrong response length") + } + + for _, c := range cids { + if _, ok := blks[c]; ok { + t.Fatal("Non-existent block should have no size") + } + } +} + +func TestBlockstoreManager(t *testing.T) { + test.Flaky(t) + + ctx := context.Background() + bsdelay := delay.Fixed(3 * time.Millisecond) + dstore := ds_sync.MutexWrap(delayed.New(ds.NewMapDatastore(), bsdelay)) + bstore := blockstore.NewBlockstore(ds_sync.MutexWrap(dstore)) + + bsm := newBlockstoreManagerForTesting(t, ctx, bstore, 5) + + exp := make(map[cid.Cid]blocks.Block) + var blks []blocks.Block + for i := 0; i < 32; i++ { + buf := make([]byte, 1024*(i+1)) + _, _ = rand.Read(buf) + b := blocks.NewBlock(buf) + blks = append(blks, b) + exp[b.Cid()] = b + } + + // Put all blocks in the blockstore except the last one + if err := bstore.PutMany(ctx, blks[:len(blks)-1]); err != nil { + t.Fatal(err) + } + + var cids []cid.Cid + for _, b := range blks { + cids = append(cids, b.Cid()) + } + + sizes, err := bsm.getBlockSizes(ctx, cids) + if err != nil { + t.Fatal(err) + } + if len(sizes) != len(blks)-1 { + t.Fatal("Wrong response length") + } + + for _, c := range cids { + expSize := len(exp[c].RawData()) + size, ok := sizes[c] + + // Only the last key should be missing + if c.Equals(cids[len(cids)-1]) { + if ok { + t.Fatal("Non-existent block should not be in sizes map") + } + } else { + if !ok { + t.Fatal("Block should be in sizes map") + } + if size != expSize { + t.Fatal("Block has wrong size") + } + } + } + + fetched, err := bsm.getBlocks(ctx, cids) + if err != nil { + t.Fatal(err) + } + if len(fetched) != len(blks)-1 { + t.Fatal("Wrong response length") + } + + for _, c := range cids { + blk, ok := fetched[c] + + // Only the last key should be missing + if c.Equals(cids[len(cids)-1]) { + if ok { + t.Fatal("Non-existent block should not be in blocks map") + } + } else { + if !ok { + t.Fatal("Block should be in blocks map") + } + if !blk.Cid().Equals(c) { + t.Fatal("Block has wrong cid") + } + } + } +} + +func TestBlockstoreManagerConcurrency(t *testing.T) { + test.Flaky(t) + + ctx := context.Background() + bsdelay := delay.Fixed(3 * time.Millisecond) + dstore := ds_sync.MutexWrap(delayed.New(ds.NewMapDatastore(), bsdelay)) + bstore := blockstore.NewBlockstore(ds_sync.MutexWrap(dstore)) + + workerCount := 5 + bsm := newBlockstoreManagerForTesting(t, ctx, bstore, workerCount) + + blkSize := int64(8 * 1024) + blks := testutil.GenerateBlocksOfSize(32, blkSize) + var ks []cid.Cid + for _, b := range blks { + ks = append(ks, b.Cid()) + } + + err := bstore.PutMany(ctx, blks) + if err != nil { + t.Fatal(err) + } + + // Create more concurrent requests than the number of workers + wg := sync.WaitGroup{} + for i := 0; i < 16; i++ { + wg.Add(1) + + go func(t *testing.T) { + defer wg.Done() + + sizes, err := bsm.getBlockSizes(ctx, ks) + if err != nil { + t.Error(err) + } + if len(sizes) != len(blks) { + t.Error("Wrong response length") + } + }(t) + } + wg.Wait() +} + +func TestBlockstoreManagerClose(t *testing.T) { + test.Flaky(t) + + ctx := context.Background() + delayTime := 20 * time.Millisecond + bsdelay := delay.Fixed(delayTime) + dstore := ds_sync.MutexWrap(delayed.New(ds.NewMapDatastore(), bsdelay)) + bstore := blockstore.NewBlockstore(ds_sync.MutexWrap(dstore)) + + bsm := newBlockstoreManagerForTesting(t, ctx, bstore, 3) + + blks := testutil.GenerateBlocksOfSize(10, 1024) + var ks []cid.Cid + for _, b := range blks { + ks = append(ks, b.Cid()) + } + + err := bstore.PutMany(ctx, blks) + if err != nil { + t.Fatal(err) + } + + bsm.stop() + + time.Sleep(5 * time.Millisecond) + + before := time.Now() + _, err = bsm.getBlockSizes(ctx, ks) + if err == nil { + t.Error("expected an error") + } + // would expect to wait delayTime*10 if we didn't cancel. + if time.Since(before) > delayTime*2 { + t.Error("expected a fast timeout") + } +} + +func TestBlockstoreManagerCtxDone(t *testing.T) { + test.Flaky(t) + + delayTime := 20 * time.Millisecond + bsdelay := delay.Fixed(delayTime) + + underlyingDstore := ds_sync.MutexWrap(ds.NewMapDatastore()) + dstore := delayed.New(underlyingDstore, bsdelay) + underlyingBstore := blockstore.NewBlockstore(underlyingDstore) + bstore := blockstore.NewBlockstore(dstore) + + ctx := context.Background() + bsm := newBlockstoreManagerForTesting(t, ctx, bstore, 3) + + blks := testutil.GenerateBlocksOfSize(100, 128) + var ks []cid.Cid + for _, b := range blks { + ks = append(ks, b.Cid()) + } + + err := underlyingBstore.PutMany(ctx, blks) + if err != nil { + t.Fatal(err) + } + + ctx, cancel := context.WithTimeout(context.Background(), delayTime/2) + defer cancel() + + before := time.Now() + _, err = bsm.getBlockSizes(ctx, ks) + if err == nil { + t.Error("expected an error") + } + + // would expect to wait delayTime*100/3 if we didn't cancel. + if time.Since(before) > delayTime*10 { + t.Error("expected a fast timeout") + } +} diff --git a/bitswap/server/internal/decision/engine.go b/bitswap/server/internal/decision/engine.go new file mode 100644 index 0000000000..1d2f241e9f --- /dev/null +++ b/bitswap/server/internal/decision/engine.go @@ -0,0 +1,980 @@ +// Package decision implements the decision engine for the bitswap service. +package decision + +import ( + "context" + "fmt" + "math/bits" + "sync" + "time" + + "github.com/google/uuid" + + wl "github.com/ipfs/boxo/bitswap/client/wantlist" + "github.com/ipfs/boxo/bitswap/internal/defaults" + bsmsg "github.com/ipfs/boxo/bitswap/message" + pb "github.com/ipfs/boxo/bitswap/message/pb" + bmetrics "github.com/ipfs/boxo/bitswap/metrics" + blocks "github.com/ipfs/boxo/blocks" + bstore "github.com/ipfs/boxo/blockstore" + "github.com/ipfs/go-cid" + logging "github.com/ipfs/go-log" + "github.com/ipfs/go-metrics-interface" + "github.com/ipfs/go-peertaskqueue" + "github.com/ipfs/go-peertaskqueue/peertask" + "github.com/ipfs/go-peertaskqueue/peertracker" + process "github.com/jbenet/goprocess" + "github.com/libp2p/go-libp2p/core/peer" + mh "github.com/multiformats/go-multihash" +) + +// TODO consider taking responsibility for other types of requests. For +// example, there could be a |cancelQueue| for all of the cancellation +// messages that need to go out. There could also be a |wantlistQueue| for +// the local peer's wantlists. Alternatively, these could all be bundled +// into a single, intelligent global queue that efficiently +// batches/combines and takes all of these into consideration. +// +// Right now, messages go onto the network for four reasons: +// 1. an initial `sendwantlist` message to a provider of the first key in a +// request +// 2. a periodic full sweep of `sendwantlist` messages to all providers +// 3. upon receipt of blocks, a `cancel` message to all peers +// 4. draining the priority queue of `blockrequests` from peers +// +// Presently, only `blockrequests` are handled by the decision engine. +// However, there is an opportunity to give it more responsibility! If the +// decision engine is given responsibility for all of the others, it can +// intelligently decide how to combine requests efficiently. +// +// Some examples of what would be possible: +// +// * when sending out the wantlists, include `cancel` requests +// * when handling `blockrequests`, include `sendwantlist` and `cancel` as +// appropriate +// * when handling `cancel`, if we recently received a wanted block from a +// peer, include a partial wantlist that contains a few other high priority +// blocks +// +// In a sense, if we treat the decision engine as a black box, it could do +// whatever it sees fit to produce desired outcomes (get wanted keys +// quickly, maintain good relationships with peers, etc). + +var log = logging.Logger("engine") + +const ( + // outboxChanBuffer must be 0 to prevent stale messages from being sent + outboxChanBuffer = 0 + // targetMessageSize is the ideal size of the batched payload. We try to + // pop this much data off the request queue, but it may be a little more + // or less depending on what's in the queue. + defaultTargetMessageSize = 16 * 1024 + // tagFormat is the tag given to peers associated an engine + tagFormat = "bs-engine-%s-%s" + + // queuedTagWeight is the default weight for peers that have work queued + // on their behalf. + queuedTagWeight = 10 + + // maxBlockSizeReplaceHasWithBlock is the maximum size of the block in + // bytes up to which we will replace a want-have with a want-block + maxBlockSizeReplaceHasWithBlock = 1024 +) + +// Envelope contains a message for a Peer. +type Envelope struct { + // Peer is the intended recipient. + Peer peer.ID + + // Message is the payload. + Message bsmsg.BitSwapMessage + + // A callback to notify the decision queue that the task is complete + Sent func() +} + +// PeerTagger covers the methods on the connection manager used by the decision +// engine to tag peers +type PeerTagger interface { + TagPeer(peer.ID, string, int) + UntagPeer(p peer.ID, tag string) +} + +// Assigns a specific score to a peer +type ScorePeerFunc func(peer.ID, int) + +// ScoreLedger is an external ledger dealing with peer scores. +type ScoreLedger interface { + // Returns aggregated data communication with a given peer. + GetReceipt(p peer.ID) *Receipt + // Increments the sent counter for the given peer. + AddToSentBytes(p peer.ID, n int) + // Increments the received counter for the given peer. + AddToReceivedBytes(p peer.ID, n int) + // PeerConnected should be called when a new peer connects, + // meaning the ledger should open accounting. + PeerConnected(p peer.ID) + // PeerDisconnected should be called when a peer disconnects to + // clean up the accounting. + PeerDisconnected(p peer.ID) + // Starts the ledger sampling process. + Start(scorePeer ScorePeerFunc) + // Stops the sampling process. + Stop() +} + +// Engine manages sending requested blocks to peers. +type Engine struct { + // peerRequestQueue is a priority queue of requests received from peers. + // Requests are popped from the queue, packaged up, and placed in the + // outbox. + peerRequestQueue *peertaskqueue.PeerTaskQueue + + // FIXME it's a bit odd for the client and the worker to both share memory + // (both modify the peerRequestQueue) and also to communicate over the + // workSignal channel. consider sending requests over the channel and + // allowing the worker to have exclusive access to the peerRequestQueue. In + // that case, no lock would be required. + workSignal chan struct{} + + // outbox contains outgoing messages to peers. This is owned by the + // taskWorker goroutine + outbox chan (<-chan *Envelope) + + bsm *blockstoreManager + + peerTagger PeerTagger + + tagQueued, tagUseful string + + lock sync.RWMutex // protects the fields immediately below + + // peerLedger saves which peers are waiting for a Cid + peerLedger *peerLedger + + // an external ledger dealing with peer scores + scoreLedger ScoreLedger + + ticker *time.Ticker + + taskWorkerLock sync.Mutex + taskWorkerCount int + + targetMessageSize int + + // maxBlockSizeReplaceHasWithBlock is the maximum size of the block in + // bytes up to which we will replace a want-have with a want-block + maxBlockSizeReplaceHasWithBlock int + + sendDontHaves bool + + self peer.ID + + // metrics gauge for total pending tasks across all workers + pendingGauge metrics.Gauge + + // metrics gauge for total pending tasks across all workers + activeGauge metrics.Gauge + + // used to ensure metrics are reported each fixed number of operation + metricsLock sync.Mutex + metricUpdateCounter int + + taskComparator TaskComparator + + peerBlockRequestFilter PeerBlockRequestFilter + + bstoreWorkerCount int + maxOutstandingBytesPerPeer int + + maxQueuedWantlistEntriesPerPeer uint + maxCidSize uint +} + +// TaskInfo represents the details of a request from a peer. +type TaskInfo struct { + Peer peer.ID + // The CID of the block + Cid cid.Cid + // Tasks can be want-have or want-block + IsWantBlock bool + // Whether to immediately send a response if the block is not found + SendDontHave bool + // The size of the block corresponding to the task + BlockSize int + // Whether the block was found + HaveBlock bool +} + +// TaskComparator is used for task prioritization. +// It should return true if task 'ta' has higher priority than task 'tb' +type TaskComparator func(ta, tb *TaskInfo) bool + +// PeerBlockRequestFilter is used to accept / deny requests for a CID coming from a PeerID +// It should return true if the request should be fullfilled. +type PeerBlockRequestFilter func(p peer.ID, c cid.Cid) bool + +type Option func(*Engine) + +func WithTaskComparator(comparator TaskComparator) Option { + return func(e *Engine) { + e.taskComparator = comparator + } +} + +func WithPeerBlockRequestFilter(pbrf PeerBlockRequestFilter) Option { + return func(e *Engine) { + e.peerBlockRequestFilter = pbrf + } +} + +func WithTargetMessageSize(size int) Option { + return func(e *Engine) { + e.targetMessageSize = size + } +} + +func WithScoreLedger(scoreledger ScoreLedger) Option { + return func(e *Engine) { + e.scoreLedger = scoreledger + } +} + +// WithBlockstoreWorkerCount sets the number of worker threads used for +// blockstore operations in the decision engine +func WithBlockstoreWorkerCount(count int) Option { + if count <= 0 { + panic(fmt.Sprintf("Engine blockstore worker count is %d but must be > 0", count)) + } + return func(e *Engine) { + e.bstoreWorkerCount = count + } +} + +// WithTaskWorkerCount sets the number of worker threads used inside the engine +func WithTaskWorkerCount(count int) Option { + if count <= 0 { + panic(fmt.Sprintf("Engine task worker count is %d but must be > 0", count)) + } + return func(e *Engine) { + e.taskWorkerCount = count + } +} + +// WithMaxOutstandingBytesPerPeer describes approximately how much work we are will to have outstanding to a peer at any +// given time. Setting it to 0 will disable any limiting. +func WithMaxOutstandingBytesPerPeer(count int) Option { + if count < 0 { + panic(fmt.Sprintf("max outstanding bytes per peer is %d but must be >= 0", count)) + } + return func(e *Engine) { + e.maxOutstandingBytesPerPeer = count + } +} + +// WithMaxQueuedWantlistEntriesPerPeer limits how much individual entries each peer is allowed to send. +// If a peer send us more than this we will truncate newest entries. +func WithMaxQueuedWantlistEntriesPerPeer(count uint) Option { + return func(e *Engine) { + e.maxQueuedWantlistEntriesPerPeer = count + } +} + +// WithMaxQueuedWantlistEntriesPerPeer limits how much individual entries each peer is allowed to send. +// If a peer send us more than this we will truncate newest entries. +func WithMaxCidSize(n uint) Option { + return func(e *Engine) { + e.maxCidSize = n + } +} + +func WithSetSendDontHave(send bool) Option { + return func(e *Engine) { + e.sendDontHaves = send + } +} + +// wrapTaskComparator wraps a TaskComparator so it can be used as a QueueTaskComparator +func wrapTaskComparator(tc TaskComparator) peertask.QueueTaskComparator { + return func(a, b *peertask.QueueTask) bool { + taskDataA := a.Task.Data.(*taskData) + taskInfoA := &TaskInfo{ + Peer: a.Target, + Cid: a.Task.Topic.(cid.Cid), + IsWantBlock: taskDataA.IsWantBlock, + SendDontHave: taskDataA.SendDontHave, + BlockSize: taskDataA.BlockSize, + HaveBlock: taskDataA.HaveBlock, + } + taskDataB := b.Task.Data.(*taskData) + taskInfoB := &TaskInfo{ + Peer: b.Target, + Cid: b.Task.Topic.(cid.Cid), + IsWantBlock: taskDataB.IsWantBlock, + SendDontHave: taskDataB.SendDontHave, + BlockSize: taskDataB.BlockSize, + HaveBlock: taskDataB.HaveBlock, + } + return tc(taskInfoA, taskInfoB) + } +} + +// NewEngine creates a new block sending engine for the given block store. +// maxOutstandingBytesPerPeer hints to the peer task queue not to give a peer more tasks if it has some maximum +// work already outstanding. +func NewEngine( + ctx context.Context, + bs bstore.Blockstore, + peerTagger PeerTagger, + self peer.ID, + opts ...Option, +) *Engine { + return newEngine( + ctx, + bs, + peerTagger, + self, + maxBlockSizeReplaceHasWithBlock, + opts..., + ) +} + +func newEngine( + ctx context.Context, + bs bstore.Blockstore, + peerTagger PeerTagger, + self peer.ID, + maxReplaceSize int, + opts ...Option, +) *Engine { + e := &Engine{ + scoreLedger: NewDefaultScoreLedger(), + bstoreWorkerCount: defaults.BitswapEngineBlockstoreWorkerCount, + maxOutstandingBytesPerPeer: defaults.BitswapMaxOutstandingBytesPerPeer, + peerTagger: peerTagger, + outbox: make(chan (<-chan *Envelope), outboxChanBuffer), + workSignal: make(chan struct{}, 1), + ticker: time.NewTicker(time.Millisecond * 100), + maxBlockSizeReplaceHasWithBlock: maxReplaceSize, + taskWorkerCount: defaults.BitswapEngineTaskWorkerCount, + sendDontHaves: true, + self: self, + peerLedger: newPeerLedger(), + pendingGauge: bmetrics.PendingEngineGauge(ctx), + activeGauge: bmetrics.ActiveEngineGauge(ctx), + targetMessageSize: defaultTargetMessageSize, + tagQueued: fmt.Sprintf(tagFormat, "queued", uuid.New().String()), + tagUseful: fmt.Sprintf(tagFormat, "useful", uuid.New().String()), + maxQueuedWantlistEntriesPerPeer: defaults.MaxQueuedWantlistEntiresPerPeer, + maxCidSize: defaults.MaximumAllowedCid, + } + + for _, opt := range opts { + opt(e) + } + + e.bsm = newBlockstoreManager(bs, e.bstoreWorkerCount, bmetrics.PendingBlocksGauge(ctx), bmetrics.ActiveBlocksGauge(ctx)) + + // default peer task queue options + peerTaskQueueOpts := []peertaskqueue.Option{ + peertaskqueue.OnPeerAddedHook(e.onPeerAdded), + peertaskqueue.OnPeerRemovedHook(e.onPeerRemoved), + peertaskqueue.TaskMerger(newTaskMerger()), + peertaskqueue.IgnoreFreezing(true), + peertaskqueue.MaxOutstandingWorkPerPeer(e.maxOutstandingBytesPerPeer), + } + + if e.taskComparator != nil { + queueTaskComparator := wrapTaskComparator(e.taskComparator) + peerTaskQueueOpts = append(peerTaskQueueOpts, peertaskqueue.PeerComparator(peertracker.TaskPriorityPeerComparator(queueTaskComparator))) + peerTaskQueueOpts = append(peerTaskQueueOpts, peertaskqueue.TaskComparator(queueTaskComparator)) + } + + e.peerRequestQueue = peertaskqueue.New(peerTaskQueueOpts...) + + return e +} + +func (e *Engine) updateMetrics() { + e.metricsLock.Lock() + c := e.metricUpdateCounter + e.metricUpdateCounter++ + e.metricsLock.Unlock() + + if c%100 == 0 { + stats := e.peerRequestQueue.Stats() + e.activeGauge.Set(float64(stats.NumActive)) + e.pendingGauge.Set(float64(stats.NumPending)) + } +} + +// SetSendDontHaves indicates what to do when the engine receives a want-block +// for a block that is not in the blockstore. Either +// - Send a DONT_HAVE message +// - Simply don't respond +// Older versions of Bitswap did not respond, so this allows us to simulate +// those older versions for testing. +func (e *Engine) SetSendDontHaves(send bool) { + e.sendDontHaves = send +} + +// Starts the score ledger. Before start the function checks and, +// if it is unset, initializes the scoreLedger with the default +// implementation. +func (e *Engine) startScoreLedger(px process.Process) { + e.scoreLedger.Start(func(p peer.ID, score int) { + if score == 0 { + e.peerTagger.UntagPeer(p, e.tagUseful) + } else { + e.peerTagger.TagPeer(p, e.tagUseful, score) + } + }) + px.Go(func(ppx process.Process) { + <-ppx.Closing() + e.scoreLedger.Stop() + }) +} + +func (e *Engine) startBlockstoreManager(px process.Process) { + e.bsm.start() + px.Go(func(ppx process.Process) { + <-ppx.Closing() + e.bsm.stop() + }) +} + +// Start up workers to handle requests from other nodes for the data on this node +func (e *Engine) StartWorkers(ctx context.Context, px process.Process) { + e.startBlockstoreManager(px) + e.startScoreLedger(px) + + e.taskWorkerLock.Lock() + defer e.taskWorkerLock.Unlock() + + for i := 0; i < e.taskWorkerCount; i++ { + px.Go(func(_ process.Process) { + e.taskWorker(ctx) + }) + } + +} + +func (e *Engine) onPeerAdded(p peer.ID) { + e.peerTagger.TagPeer(p, e.tagQueued, queuedTagWeight) +} + +func (e *Engine) onPeerRemoved(p peer.ID) { + e.peerTagger.UntagPeer(p, e.tagQueued) +} + +// WantlistForPeer returns the list of keys that the given peer has asked for +func (e *Engine) WantlistForPeer(p peer.ID) []wl.Entry { + e.lock.RLock() + defer e.lock.RUnlock() + + return e.peerLedger.WantlistForPeer(p) +} + +// LedgerForPeer returns aggregated data communication with a given peer. +func (e *Engine) LedgerForPeer(p peer.ID) *Receipt { + return e.scoreLedger.GetReceipt(p) +} + +// Each taskWorker pulls items off the request queue up to the maximum size +// and adds them to an envelope that is passed off to the bitswap workers, +// which send the message to the network. +func (e *Engine) taskWorker(ctx context.Context) { + defer e.taskWorkerExit() + for { + oneTimeUse := make(chan *Envelope, 1) // buffer to prevent blocking + select { + case <-ctx.Done(): + return + case e.outbox <- oneTimeUse: + } + // receiver is ready for an outoing envelope. let's prepare one. first, + // we must acquire a task from the PQ... + envelope, err := e.nextEnvelope(ctx) + if err != nil { + close(oneTimeUse) + return // ctx cancelled + } + oneTimeUse <- envelope // buffered. won't block + close(oneTimeUse) + } +} + +// taskWorkerExit handles cleanup of task workers +func (e *Engine) taskWorkerExit() { + e.taskWorkerLock.Lock() + defer e.taskWorkerLock.Unlock() + + e.taskWorkerCount-- + if e.taskWorkerCount == 0 { + close(e.outbox) + } +} + +// nextEnvelope runs in the taskWorker goroutine. Returns an error if the +// context is cancelled before the next Envelope can be created. +func (e *Engine) nextEnvelope(ctx context.Context) (*Envelope, error) { + for { + // Pop some tasks off the request queue + p, nextTasks, pendingBytes := e.peerRequestQueue.PopTasks(e.targetMessageSize) + e.updateMetrics() + for len(nextTasks) == 0 { + select { + case <-ctx.Done(): + return nil, ctx.Err() + case <-e.workSignal: + p, nextTasks, pendingBytes = e.peerRequestQueue.PopTasks(e.targetMessageSize) + e.updateMetrics() + case <-e.ticker.C: + // When a task is cancelled, the queue may be "frozen" for a + // period of time. We periodically "thaw" the queue to make + // sure it doesn't get stuck in a frozen state. + e.peerRequestQueue.ThawRound() + p, nextTasks, pendingBytes = e.peerRequestQueue.PopTasks(e.targetMessageSize) + e.updateMetrics() + } + } + + // Create a new message + msg := bsmsg.New(false) + + log.Debugw("Bitswap process tasks", "local", e.self, "taskCount", len(nextTasks)) + + // Amount of data in the request queue still waiting to be popped + msg.SetPendingBytes(int32(pendingBytes)) + + // Split out want-blocks, want-haves and DONT_HAVEs + blockCids := make([]cid.Cid, 0, len(nextTasks)) + blockTasks := make(map[cid.Cid]*taskData, len(nextTasks)) + for _, t := range nextTasks { + c := t.Topic.(cid.Cid) + td := t.Data.(*taskData) + if td.HaveBlock { + if td.IsWantBlock { + blockCids = append(blockCids, c) + blockTasks[c] = td + } else { + // Add HAVES to the message + msg.AddHave(c) + } + } else { + // Add DONT_HAVEs to the message + msg.AddDontHave(c) + } + } + + // Fetch blocks from datastore + blks, err := e.bsm.getBlocks(ctx, blockCids) + if err != nil { + // we're dropping the envelope but that's not an issue in practice. + return nil, err + } + + for c, t := range blockTasks { + blk := blks[c] + // If the block was not found (it has been removed) + if blk == nil { + // If the client requested DONT_HAVE, add DONT_HAVE to the message + if t.SendDontHave { + msg.AddDontHave(c) + } + } else { + // Add the block to the message + // log.Debugf(" make evlp %s->%s block: %s (%d bytes)", e.self, p, c, len(blk.RawData())) + msg.AddBlock(blk) + } + } + + // If there's nothing in the message, bail out + if msg.Empty() { + e.peerRequestQueue.TasksDone(p, nextTasks...) + continue + } + + log.Debugw("Bitswap engine -> msg", "local", e.self, "to", p, "blockCount", len(msg.Blocks()), "presenceCount", len(msg.BlockPresences()), "size", msg.Size()) + return &Envelope{ + Peer: p, + Message: msg, + Sent: func() { + // Once the message has been sent, signal the request queue so + // it can be cleared from the queue + e.peerRequestQueue.TasksDone(p, nextTasks...) + + // Signal the worker to check for more work + e.signalNewWork() + }, + }, nil + } +} + +// Outbox returns a channel of one-time use Envelope channels. +func (e *Engine) Outbox() <-chan (<-chan *Envelope) { + return e.outbox +} + +// Peers returns a slice of Peers with whom the local node has active sessions. +func (e *Engine) Peers() []peer.ID { + e.lock.RLock() + defer e.lock.RUnlock() + + return e.peerLedger.CollectPeerIDs() +} + +// MessageReceived is called when a message is received from a remote peer. +// For each item in the wantlist, add a want-have or want-block entry to the +// request queue (this is later popped off by the workerTasks) +func (e *Engine) MessageReceived(ctx context.Context, p peer.ID, m bsmsg.BitSwapMessage) (mustKillConnection bool) { + entries := m.Wantlist() + + if len(entries) > 0 { + log.Debugw("Bitswap engine <- msg", "local", e.self, "from", p, "entryCount", len(entries)) + for _, et := range entries { + if !et.Cancel { + if et.WantType == pb.Message_Wantlist_Have { + log.Debugw("Bitswap engine <- want-have", "local", e.self, "from", p, "cid", et.Cid) + } else { + log.Debugw("Bitswap engine <- want-block", "local", e.self, "from", p, "cid", et.Cid) + } + } + } + } + + if m.Empty() { + log.Infof("received empty message from %s", p) + } + + newWorkExists := false + defer func() { + if newWorkExists { + e.signalNewWork() + } + }() + + // Dispatch entries + wants, cancels := e.splitWantsCancels(entries) + wants, denials := e.splitWantsDenials(p, wants) + + // Get block sizes + wantKs := cid.NewSet() + for _, entry := range wants { + wantKs.Add(entry.Cid) + } + blockSizes, err := e.bsm.getBlockSizes(ctx, wantKs.Keys()) + if err != nil { + log.Info("aborting message processing", err) + return + } + + e.lock.Lock() + + if m.Full() { + e.peerLedger.ClearPeerWantlist(p) + } + + s := uint(e.peerLedger.WantlistSizeForPeer(p)) + if wouldBe := s + uint(len(wants)); wouldBe > e.maxQueuedWantlistEntriesPerPeer { + log.Debugw("wantlist overflow", "local", e.self, "remote", p, "would be", wouldBe) + // truncate wantlist to avoid overflow + available, o := bits.Sub(e.maxQueuedWantlistEntriesPerPeer, s, 0) + if o != 0 { + available = 0 + } + wants = wants[:available] + } + + filteredWants := wants[:0] // shift inplace + + for _, entry := range wants { + if entry.Cid.Prefix().MhType == mh.IDENTITY { + // This is a truely broken client, let's kill the connection. + e.lock.Unlock() + log.Warnw("peer wants an identity CID", "local", e.self, "remote", p) + return true + } + if e.maxCidSize != 0 && uint(entry.Cid.ByteLen()) > e.maxCidSize { + // Ignore requests about CIDs that big. + continue + } + + e.peerLedger.Wants(p, entry.Entry) + filteredWants = append(filteredWants, entry) + } + clear := wants[len(filteredWants):] + for i := range clear { + clear[i] = bsmsg.Entry{} // early GC + } + wants = filteredWants + for _, entry := range cancels { + if entry.Cid.Prefix().MhType == mh.IDENTITY { + // This is a truely broken client, let's kill the connection. + e.lock.Unlock() + log.Warnw("peer canceled an identity CID", "local", e.self, "remote", p) + return true + } + if e.maxCidSize != 0 && uint(entry.Cid.ByteLen()) > e.maxCidSize { + // Ignore requests about CIDs that big. + continue + } + + log.Debugw("Bitswap engine <- cancel", "local", e.self, "from", p, "cid", entry.Cid) + if e.peerLedger.CancelWant(p, entry.Cid) { + e.peerRequestQueue.Remove(entry.Cid, p) + } + } + e.lock.Unlock() + + var activeEntries []peertask.Task + + // Cancel a block operation + sendDontHave := func(entry bsmsg.Entry) { + // Only add the task to the queue if the requester wants a DONT_HAVE + if e.sendDontHaves && entry.SendDontHave { + c := entry.Cid + + newWorkExists = true + isWantBlock := false + if entry.WantType == pb.Message_Wantlist_Block { + isWantBlock = true + } + + activeEntries = append(activeEntries, peertask.Task{ + Topic: c, + Priority: int(entry.Priority), + Work: bsmsg.BlockPresenceSize(c), + Data: &taskData{ + BlockSize: 0, + HaveBlock: false, + IsWantBlock: isWantBlock, + SendDontHave: entry.SendDontHave, + }, + }) + } + } + + // Deny access to blocks + for _, entry := range denials { + log.Debugw("Bitswap engine: block denied access", "local", e.self, "from", p, "cid", entry.Cid, "sendDontHave", entry.SendDontHave) + sendDontHave(entry) + } + + // For each want-have / want-block + for _, entry := range wants { + c := entry.Cid + blockSize, found := blockSizes[entry.Cid] + + // If the block was not found + if !found { + log.Debugw("Bitswap engine: block not found", "local", e.self, "from", p, "cid", entry.Cid, "sendDontHave", entry.SendDontHave) + sendDontHave(entry) + } else { + // The block was found, add it to the queue + newWorkExists = true + + isWantBlock := e.sendAsBlock(entry.WantType, blockSize) + + log.Debugw("Bitswap engine: block found", "local", e.self, "from", p, "cid", entry.Cid, "isWantBlock", isWantBlock) + + // entrySize is the amount of space the entry takes up in the + // message we send to the recipient. If we're sending a block, the + // entrySize is the size of the block. Otherwise it's the size of + // a block presence entry. + entrySize := blockSize + if !isWantBlock { + entrySize = bsmsg.BlockPresenceSize(c) + } + activeEntries = append(activeEntries, peertask.Task{ + Topic: c, + Priority: int(entry.Priority), + Work: entrySize, + Data: &taskData{ + BlockSize: blockSize, + HaveBlock: true, + IsWantBlock: isWantBlock, + SendDontHave: entry.SendDontHave, + }, + }) + } + } + + // Push entries onto the request queue + if len(activeEntries) > 0 { + e.peerRequestQueue.PushTasksTruncated(e.maxQueuedWantlistEntriesPerPeer, p, activeEntries...) + e.updateMetrics() + } + return false +} + +// Split the want-have / want-block entries from the cancel entries +func (e *Engine) splitWantsCancels(es []bsmsg.Entry) ([]bsmsg.Entry, []bsmsg.Entry) { + wants := make([]bsmsg.Entry, 0, len(es)) + cancels := make([]bsmsg.Entry, 0, len(es)) + for _, et := range es { + if et.Cancel { + cancels = append(cancels, et) + } else { + wants = append(wants, et) + } + } + return wants, cancels +} + +// Split the want-have / want-block entries from the block that will be denied access +func (e *Engine) splitWantsDenials(p peer.ID, allWants []bsmsg.Entry) ([]bsmsg.Entry, []bsmsg.Entry) { + if e.peerBlockRequestFilter == nil { + return allWants, nil + } + + wants := make([]bsmsg.Entry, 0, len(allWants)) + denied := make([]bsmsg.Entry, 0, len(allWants)) + + for _, et := range allWants { + if e.peerBlockRequestFilter(p, et.Cid) { + wants = append(wants, et) + } else { + denied = append(denied, et) + } + } + + return wants, denied +} + +// ReceivedBlocks is called when new blocks are received from the network. +// This function also updates the receive side of the ledger. +func (e *Engine) ReceivedBlocks(from peer.ID, blks []blocks.Block) { + if len(blks) == 0 { + return + } + + // Record how many bytes were received in the ledger + for _, blk := range blks { + log.Debugw("Bitswap engine <- block", "local", e.self, "from", from, "cid", blk.Cid(), "size", len(blk.RawData())) + e.scoreLedger.AddToReceivedBytes(from, len(blk.RawData())) + } +} + +// NotifyNewBlocks is called when new blocks becomes available locally, and in particular when the caller of bitswap +// decide to store those blocks and make them available on the network. +func (e *Engine) NotifyNewBlocks(blks []blocks.Block) { + if len(blks) == 0 { + return + } + + // Get the size of each block + blockSizes := make(map[cid.Cid]int, len(blks)) + for _, blk := range blks { + blockSizes[blk.Cid()] = len(blk.RawData()) + } + + // Check each peer to see if it wants one of the blocks we received + var work bool + for _, b := range blks { + k := b.Cid() + + e.lock.RLock() + peers := e.peerLedger.Peers(k) + e.lock.RUnlock() + + for _, entry := range peers { + work = true + + blockSize := blockSizes[k] + isWantBlock := e.sendAsBlock(entry.WantType, blockSize) + + entrySize := blockSize + if !isWantBlock { + entrySize = bsmsg.BlockPresenceSize(k) + } + + e.peerRequestQueue.PushTasksTruncated(e.maxQueuedWantlistEntriesPerPeer, entry.Peer, peertask.Task{ + Topic: k, + Priority: int(entry.Priority), + Work: entrySize, + Data: &taskData{ + BlockSize: blockSize, + HaveBlock: true, + IsWantBlock: isWantBlock, + SendDontHave: false, + }, + }) + e.updateMetrics() + } + } + + if work { + e.signalNewWork() + } +} + +// TODO add contents of m.WantList() to my local wantlist? NB: could introduce +// race conditions where I send a message, but MessageSent gets handled after +// MessageReceived. The information in the local wantlist could become +// inconsistent. Would need to ensure that Sends and acknowledgement of the +// send happen atomically + +// MessageSent is called when a message has successfully been sent out, to record +// changes. +func (e *Engine) MessageSent(p peer.ID, m bsmsg.BitSwapMessage) { + e.lock.Lock() + defer e.lock.Unlock() + + // Remove sent blocks from the want list for the peer + for _, block := range m.Blocks() { + e.scoreLedger.AddToSentBytes(p, len(block.RawData())) + e.peerLedger.CancelWantWithType(p, block.Cid(), pb.Message_Wantlist_Block) + } + + // Remove sent block presences from the want list for the peer + for _, bp := range m.BlockPresences() { + // Don't record sent data. We reserve that for data blocks. + if bp.Type == pb.Message_Have { + e.peerLedger.CancelWantWithType(p, bp.Cid, pb.Message_Wantlist_Have) + } + } +} + +// PeerConnected is called when a new peer connects, meaning we should start +// sending blocks. +func (e *Engine) PeerConnected(p peer.ID) { + e.lock.Lock() + defer e.lock.Unlock() + + e.scoreLedger.PeerConnected(p) +} + +// PeerDisconnected is called when a peer disconnects. +func (e *Engine) PeerDisconnected(p peer.ID) { + e.peerRequestQueue.Clear(p) + + e.lock.Lock() + defer e.lock.Unlock() + + e.peerLedger.PeerDisconnected(p) + e.scoreLedger.PeerDisconnected(p) +} + +// If the want is a want-have, and it's below a certain size, send the full +// block (instead of sending a HAVE) +func (e *Engine) sendAsBlock(wantType pb.Message_Wantlist_WantType, blockSize int) bool { + isWantBlock := wantType == pb.Message_Wantlist_Block + return isWantBlock || blockSize <= e.maxBlockSizeReplaceHasWithBlock +} + +func (e *Engine) numBytesSentTo(p peer.ID) uint64 { + return e.LedgerForPeer(p).Sent +} + +func (e *Engine) numBytesReceivedFrom(p peer.ID) uint64 { + return e.LedgerForPeer(p).Recv +} + +func (e *Engine) signalNewWork() { + // Signal task generation to restart (if stopped!) + select { + case e.workSignal <- struct{}{}: + default: + } +} diff --git a/bitswap/server/internal/decision/engine_test.go b/bitswap/server/internal/decision/engine_test.go new file mode 100644 index 0000000000..ce7c800d68 --- /dev/null +++ b/bitswap/server/internal/decision/engine_test.go @@ -0,0 +1,1764 @@ +package decision + +import ( + "bytes" + "context" + "crypto/rand" + "encoding/binary" + "errors" + "fmt" + "strings" + "sync" + "testing" + "time" + + "github.com/benbjohnson/clock" + "github.com/ipfs/boxo/bitswap/internal/testutil" + message "github.com/ipfs/boxo/bitswap/message" + pb "github.com/ipfs/boxo/bitswap/message/pb" + blocks "github.com/ipfs/boxo/blocks" + blockstore "github.com/ipfs/boxo/blockstore" + "github.com/ipfs/boxo/internal/test" + "github.com/ipfs/go-cid" + ds "github.com/ipfs/go-datastore" + dssync "github.com/ipfs/go-datastore/sync" + process "github.com/jbenet/goprocess" + peer "github.com/libp2p/go-libp2p/core/peer" + libp2ptest "github.com/libp2p/go-libp2p/core/test" + mh "github.com/multiformats/go-multihash" +) + +type peerTag struct { + done chan struct{} + peers map[peer.ID]int +} + +type fakePeerTagger struct { + lk sync.Mutex + tags map[string]*peerTag +} + +func (fpt *fakePeerTagger) TagPeer(p peer.ID, tag string, n int) { + fpt.lk.Lock() + defer fpt.lk.Unlock() + if fpt.tags == nil { + fpt.tags = make(map[string]*peerTag, 1) + } + pt, ok := fpt.tags[tag] + if !ok { + pt = &peerTag{peers: make(map[peer.ID]int, 1), done: make(chan struct{})} + fpt.tags[tag] = pt + } + pt.peers[p] = n +} + +func (fpt *fakePeerTagger) UntagPeer(p peer.ID, tag string) { + fpt.lk.Lock() + defer fpt.lk.Unlock() + pt := fpt.tags[tag] + if pt == nil { + return + } + delete(pt.peers, p) + if len(pt.peers) == 0 { + close(pt.done) + delete(fpt.tags, tag) + } +} + +func (fpt *fakePeerTagger) count(tag string) int { + fpt.lk.Lock() + defer fpt.lk.Unlock() + if pt, ok := fpt.tags[tag]; ok { + return len(pt.peers) + } + return 0 +} + +func (fpt *fakePeerTagger) wait(tag string) { + fpt.lk.Lock() + pt := fpt.tags[tag] + if pt == nil { + fpt.lk.Unlock() + return + } + doneCh := pt.done + fpt.lk.Unlock() + <-doneCh +} + +type engineSet struct { + PeerTagger *fakePeerTagger + Peer peer.ID + Engine *Engine + Blockstore blockstore.Blockstore +} + +func newTestEngine(ctx context.Context, idStr string, opts ...Option) engineSet { + return newTestEngineWithSampling(ctx, idStr, shortTerm, nil, clock.New(), opts...) +} + +func newTestEngineWithSampling(ctx context.Context, idStr string, peerSampleInterval time.Duration, sampleCh chan struct{}, clock clock.Clock, opts ...Option) engineSet { + fpt := &fakePeerTagger{} + bs := blockstore.NewBlockstore(dssync.MutexWrap(ds.NewMapDatastore())) + e := newEngineForTesting(ctx, bs, fpt, "localhost", 0, append(opts[:len(opts):len(opts)], WithScoreLedger(NewTestScoreLedger(peerSampleInterval, sampleCh, clock)), WithBlockstoreWorkerCount(4))...) + e.StartWorkers(ctx, process.WithTeardown(func() error { return nil })) + return engineSet{ + Peer: peer.ID(idStr), + //Strategy: New(true), + PeerTagger: fpt, + Blockstore: bs, + Engine: e, + } +} + +func TestConsistentAccounting(t *testing.T) { + test.Flaky(t) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + sender := newTestEngine(ctx, "Ernie") + receiver := newTestEngine(ctx, "Bert") + + // Send messages from Ernie to Bert + for i := 0; i < 1000; i++ { + + m := message.New(false) + content := []string{"this", "is", "message", "i"} + m.AddBlock(blocks.NewBlock([]byte(strings.Join(content, " ")))) + + sender.Engine.MessageSent(receiver.Peer, m) + receiver.Engine.MessageReceived(ctx, sender.Peer, m) + receiver.Engine.ReceivedBlocks(sender.Peer, m.Blocks()) + } + + // Ensure sender records the change + if sender.Engine.numBytesSentTo(receiver.Peer) == 0 { + t.Fatal("Sent bytes were not recorded") + } + + // Ensure sender and receiver have the same values + if sender.Engine.numBytesSentTo(receiver.Peer) != receiver.Engine.numBytesReceivedFrom(sender.Peer) { + t.Fatal("Inconsistent book-keeping. Strategies don't agree") + } + + // Ensure sender didn't record receving anything. And that the receiver + // didn't record sending anything + if receiver.Engine.numBytesSentTo(sender.Peer) != 0 || sender.Engine.numBytesReceivedFrom(receiver.Peer) != 0 { + t.Fatal("Bert didn't send bytes to Ernie") + } +} + +func TestPeerIsAddedToPeersWhenMessageSent(t *testing.T) { + test.Flaky(t) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + sanfrancisco := newTestEngine(ctx, "sf") + seattle := newTestEngine(ctx, "sea") + + m := message.New(true) + + // We need to request something for it to add us as partner. + m.AddEntry(blocks.NewBlock([]byte("Hæ")).Cid(), 0, pb.Message_Wantlist_Block, true) + + seattle.Engine.MessageReceived(ctx, sanfrancisco.Peer, m) + + if seattle.Peer == sanfrancisco.Peer { + t.Fatal("Sanity Check: Peers have same Key!") + } + + if !peerIsPartner(sanfrancisco.Peer, seattle.Engine) { + t.Fatal("Peer wasn't added as a Partner") + } + + seattle.Engine.PeerDisconnected(sanfrancisco.Peer) + if peerIsPartner(sanfrancisco.Peer, seattle.Engine) { + t.Fatal("expected peer to be removed") + } +} + +func peerIsPartner(p peer.ID, e *Engine) bool { + for _, partner := range e.Peers() { + if partner == p { + return true + } + } + return false +} + +func newEngineForTesting( + ctx context.Context, + bs blockstore.Blockstore, + peerTagger PeerTagger, + self peer.ID, + maxReplaceSize int, + opts ...Option, +) *Engine { + return newEngine( + ctx, + bs, + peerTagger, + self, + maxReplaceSize, + opts..., + ) +} + +func TestOutboxClosedWhenEngineClosed(t *testing.T) { + test.Flaky(t) + + t.SkipNow() // TODO implement *Engine.Close + ctx := context.Background() + e := newEngineForTesting(ctx, blockstore.NewBlockstore(dssync.MutexWrap(ds.NewMapDatastore())), &fakePeerTagger{}, "localhost", 0, WithScoreLedger(NewTestScoreLedger(shortTerm, nil, clock.New())), WithBlockstoreWorkerCount(4)) + e.StartWorkers(ctx, process.WithTeardown(func() error { return nil })) + var wg sync.WaitGroup + wg.Add(1) + go func() { + for nextEnvelope := range e.Outbox() { + <-nextEnvelope + } + wg.Done() + }() + // e.Close() + wg.Wait() + if _, ok := <-e.Outbox(); ok { + t.Fatal("channel should be closed") + } +} + +func TestPartnerWantHaveWantBlockNonActive(t *testing.T) { + test.Flaky(t) + + alphabet := "abcdefghijklmnopqrstuvwxyz" + vowels := "aeiou" + + bs := blockstore.NewBlockstore(dssync.MutexWrap(ds.NewMapDatastore())) + for _, letter := range strings.Split(alphabet, "") { + block := blocks.NewBlock([]byte(letter)) + if err := bs.Put(context.Background(), block); err != nil { + t.Fatal(err) + } + } + + partner := libp2ptest.RandPeerIDFatal(t) + // partnerWantBlocks(e, vowels, partner) + + type testCaseEntry struct { + wantBlks string + wantHaves string + sendDontHave bool + } + + type testCaseExp struct { + blks string + haves string + dontHaves string + } + + type testCase struct { + only bool + wls []testCaseEntry + exp []testCaseExp + } + + testCases := []testCase{ + // Just send want-blocks + { + wls: []testCaseEntry{ + { + wantBlks: vowels, + sendDontHave: false, + }, + }, + exp: []testCaseExp{ + { + blks: vowels, + }, + }, + }, + + // Send want-blocks and want-haves + { + wls: []testCaseEntry{ + { + wantBlks: vowels, + wantHaves: "fgh", + sendDontHave: false, + }, + }, + exp: []testCaseExp{ + { + blks: vowels, + haves: "fgh", + }, + }, + }, + + // Send want-blocks and want-haves, with some want-haves that are not + // present, but without requesting DONT_HAVES + { + wls: []testCaseEntry{ + { + wantBlks: vowels, + wantHaves: "fgh123", + sendDontHave: false, + }, + }, + exp: []testCaseExp{ + { + blks: vowels, + haves: "fgh", + }, + }, + }, + + // Send want-blocks and want-haves, with some want-haves that are not + // present, and request DONT_HAVES + { + wls: []testCaseEntry{ + { + wantBlks: vowels, + wantHaves: "fgh123", + sendDontHave: true, + }, + }, + exp: []testCaseExp{ + { + blks: vowels, + haves: "fgh", + dontHaves: "123", + }, + }, + }, + + // Send want-blocks and want-haves, with some want-blocks and want-haves that are not + // present, but without requesting DONT_HAVES + { + wls: []testCaseEntry{ + { + wantBlks: "aeiou123", + wantHaves: "fgh456", + sendDontHave: false, + }, + }, + exp: []testCaseExp{ + { + blks: "aeiou", + haves: "fgh", + dontHaves: "", + }, + }, + }, + + // Send want-blocks and want-haves, with some want-blocks and want-haves that are not + // present, and request DONT_HAVES + { + wls: []testCaseEntry{ + { + wantBlks: "aeiou123", + wantHaves: "fgh456", + sendDontHave: true, + }, + }, + exp: []testCaseExp{ + { + blks: "aeiou", + haves: "fgh", + dontHaves: "123456", + }, + }, + }, + + // Send repeated want-blocks + { + wls: []testCaseEntry{ + { + wantBlks: "ae", + sendDontHave: false, + }, + { + wantBlks: "io", + sendDontHave: false, + }, + { + wantBlks: "u", + sendDontHave: false, + }, + }, + exp: []testCaseExp{ + { + blks: "aeiou", + }, + }, + }, + + // Send repeated want-blocks and want-haves + { + wls: []testCaseEntry{ + { + wantBlks: "ae", + wantHaves: "jk", + sendDontHave: false, + }, + { + wantBlks: "io", + wantHaves: "lm", + sendDontHave: false, + }, + { + wantBlks: "u", + sendDontHave: false, + }, + }, + exp: []testCaseExp{ + { + blks: "aeiou", + haves: "jklm", + }, + }, + }, + + // Send repeated want-blocks and want-haves, with some want-blocks and want-haves that are not + // present, and request DONT_HAVES + { + wls: []testCaseEntry{ + { + wantBlks: "ae12", + wantHaves: "jk5", + sendDontHave: true, + }, + { + wantBlks: "io34", + wantHaves: "lm", + sendDontHave: true, + }, + { + wantBlks: "u", + wantHaves: "6", + sendDontHave: true, + }, + }, + exp: []testCaseExp{ + { + blks: "aeiou", + haves: "jklm", + dontHaves: "123456", + }, + }, + }, + + // Send want-block then want-have for same CID + { + wls: []testCaseEntry{ + { + wantBlks: "a", + sendDontHave: true, + }, + { + wantHaves: "a", + sendDontHave: true, + }, + }, + // want-have should be ignored because there was already a + // want-block for the same CID in the queue + exp: []testCaseExp{ + { + blks: "a", + }, + }, + }, + + // Send want-have then want-block for same CID + { + wls: []testCaseEntry{ + { + wantHaves: "b", + sendDontHave: true, + }, + { + wantBlks: "b", + sendDontHave: true, + }, + }, + // want-block should overwrite existing want-have + exp: []testCaseExp{ + { + blks: "b", + }, + }, + }, + + // Send want-block then want-block for same CID + { + wls: []testCaseEntry{ + { + wantBlks: "a", + sendDontHave: true, + }, + { + wantBlks: "a", + sendDontHave: true, + }, + }, + // second want-block should be ignored + exp: []testCaseExp{ + { + blks: "a", + }, + }, + }, + + // Send want-have then want-have for same CID + { + wls: []testCaseEntry{ + { + wantHaves: "a", + sendDontHave: true, + }, + { + wantHaves: "a", + sendDontHave: true, + }, + }, + // second want-have should be ignored + exp: []testCaseExp{ + { + haves: "a", + }, + }, + }, + } + + var onlyTestCases []testCase + for _, testCase := range testCases { + if testCase.only { + onlyTestCases = append(onlyTestCases, testCase) + } + } + if len(onlyTestCases) > 0 { + testCases = onlyTestCases + } + + ctx := context.Background() + e := newEngineForTesting(ctx, bs, &fakePeerTagger{}, "localhost", 0, WithScoreLedger(NewTestScoreLedger(shortTerm, nil, clock.New())), WithBlockstoreWorkerCount(4)) + e.StartWorkers(ctx, process.WithTeardown(func() error { return nil })) + for i, testCase := range testCases { + t.Logf("Test case %d:", i) + for _, wl := range testCase.wls { + t.Logf(" want-blocks '%s' / want-haves '%s' / sendDontHave %t", + wl.wantBlks, wl.wantHaves, wl.sendDontHave) + wantBlks := strings.Split(wl.wantBlks, "") + wantHaves := strings.Split(wl.wantHaves, "") + partnerWantBlocksHaves(e, wantBlks, wantHaves, wl.sendDontHave, partner) + } + + for _, exp := range testCase.exp { + expBlks := strings.Split(exp.blks, "") + expHaves := strings.Split(exp.haves, "") + expDontHaves := strings.Split(exp.dontHaves, "") + + next := <-e.Outbox() + env := <-next + err := checkOutput(t, e, env, expBlks, expHaves, expDontHaves) + if err != nil { + t.Fatal(err) + } + env.Sent() + } + } +} + +func TestPartnerWantHaveWantBlockActive(t *testing.T) { + test.Flaky(t) + + alphabet := "abcdefghijklmnopqrstuvwxyz" + + bs := blockstore.NewBlockstore(dssync.MutexWrap(ds.NewMapDatastore())) + for _, letter := range strings.Split(alphabet, "") { + block := blocks.NewBlock([]byte(letter)) + if err := bs.Put(context.Background(), block); err != nil { + t.Fatal(err) + } + } + + partner := libp2ptest.RandPeerIDFatal(t) + + type testCaseEntry struct { + wantBlks string + wantHaves string + sendDontHave bool + } + + type testCaseExp struct { + blks string + haves string + dontHaves string + } + + type testCase struct { + only bool + wls []testCaseEntry + exp []testCaseExp + } + + testCases := []testCase{ + // Send want-block then want-have for same CID + { + wls: []testCaseEntry{ + { + wantBlks: "a", + sendDontHave: true, + }, + { + wantHaves: "a", + sendDontHave: true, + }, + }, + // want-have should be ignored because there was already a + // want-block for the same CID in the queue + exp: []testCaseExp{ + { + blks: "a", + }, + }, + }, + + // Send want-have then want-block for same CID + { + wls: []testCaseEntry{ + { + wantHaves: "b", + sendDontHave: true, + }, + { + wantBlks: "b", + sendDontHave: true, + }, + }, + // want-have is active when want-block is added, so want-have + // should get sent, then want-block + exp: []testCaseExp{ + { + haves: "b", + }, + { + blks: "b", + }, + }, + }, + + // Send want-block then want-block for same CID + { + wls: []testCaseEntry{ + { + wantBlks: "a", + sendDontHave: true, + }, + { + wantBlks: "a", + sendDontHave: true, + }, + }, + // second want-block should be ignored + exp: []testCaseExp{ + { + blks: "a", + }, + }, + }, + + // Send want-have then want-have for same CID + { + wls: []testCaseEntry{ + { + wantHaves: "a", + sendDontHave: true, + }, + { + wantHaves: "a", + sendDontHave: true, + }, + }, + // second want-have should be ignored + exp: []testCaseExp{ + { + haves: "a", + }, + }, + }, + } + + var onlyTestCases []testCase + for _, testCase := range testCases { + if testCase.only { + onlyTestCases = append(onlyTestCases, testCase) + } + } + if len(onlyTestCases) > 0 { + testCases = onlyTestCases + } + + ctx := context.Background() + e := newEngineForTesting(ctx, bs, &fakePeerTagger{}, "localhost", 0, WithScoreLedger(NewTestScoreLedger(shortTerm, nil, clock.New())), WithBlockstoreWorkerCount(4)) + e.StartWorkers(ctx, process.WithTeardown(func() error { return nil })) + + var next envChan + for i, testCase := range testCases { + envs := make([]*Envelope, 0) + + t.Logf("Test case %d:", i) + for _, wl := range testCase.wls { + t.Logf(" want-blocks '%s' / want-haves '%s' / sendDontHave %t", + wl.wantBlks, wl.wantHaves, wl.sendDontHave) + wantBlks := strings.Split(wl.wantBlks, "") + wantHaves := strings.Split(wl.wantHaves, "") + partnerWantBlocksHaves(e, wantBlks, wantHaves, wl.sendDontHave, partner) + + var env *Envelope + next, env = getNextEnvelope(e, next, 5*time.Millisecond) + if env != nil { + envs = append(envs, env) + } + } + + if len(envs) != len(testCase.exp) { + t.Fatalf("Expected %d envelopes but received %d", len(testCase.exp), len(envs)) + } + + for i, exp := range testCase.exp { + expBlks := strings.Split(exp.blks, "") + expHaves := strings.Split(exp.haves, "") + expDontHaves := strings.Split(exp.dontHaves, "") + + err := checkOutput(t, e, envs[i], expBlks, expHaves, expDontHaves) + if err != nil { + t.Fatal(err) + } + envs[i].Sent() + } + } +} + +func checkOutput(t *testing.T, e *Engine, envelope *Envelope, expBlks []string, expHaves []string, expDontHaves []string) error { + blks := envelope.Message.Blocks() + presences := envelope.Message.BlockPresences() + + // Verify payload message length + if len(blks) != len(expBlks) { + blkDiff := formatBlocksDiff(blks, expBlks) + msg := fmt.Sprintf("Received %d blocks. Expected %d blocks:\n%s", len(blks), len(expBlks), blkDiff) + return errors.New(msg) + } + + // Verify block presences message length + expPresencesCount := len(expHaves) + len(expDontHaves) + if len(presences) != expPresencesCount { + presenceDiff := formatPresencesDiff(presences, expHaves, expDontHaves) + return fmt.Errorf("Received %d BlockPresences. Expected %d BlockPresences:\n%s", + len(presences), expPresencesCount, presenceDiff) + } + + // Verify payload message contents + for _, k := range expBlks { + found := false + expected := blocks.NewBlock([]byte(k)) + for _, block := range blks { + if block.Cid().Equals(expected.Cid()) { + found = true + break + } + } + if !found { + return errors.New(formatBlocksDiff(blks, expBlks)) + } + } + + // Verify HAVEs + if err := checkPresence(presences, expHaves, pb.Message_Have); err != nil { + return errors.New(formatPresencesDiff(presences, expHaves, expDontHaves)) + } + + // Verify DONT_HAVEs + if err := checkPresence(presences, expDontHaves, pb.Message_DontHave); err != nil { + return errors.New(formatPresencesDiff(presences, expHaves, expDontHaves)) + } + + return nil +} + +func checkPresence(presences []message.BlockPresence, expPresence []string, presenceType pb.Message_BlockPresenceType) error { + for _, k := range expPresence { + found := false + expected := blocks.NewBlock([]byte(k)) + for _, p := range presences { + if p.Cid.Equals(expected.Cid()) { + found = true + if p.Type != presenceType { + return errors.New("type mismatch") + } + break + } + } + if !found { + return errors.New("not found") + } + } + return nil +} + +func formatBlocksDiff(blks []blocks.Block, expBlks []string) string { + var out bytes.Buffer + out.WriteString(fmt.Sprintf("Blocks (%d):\n", len(blks))) + for _, b := range blks { + out.WriteString(fmt.Sprintf(" %s: %s\n", b.Cid(), b.RawData())) + } + out.WriteString(fmt.Sprintf("Expected (%d):\n", len(expBlks))) + for _, k := range expBlks { + expected := blocks.NewBlock([]byte(k)) + out.WriteString(fmt.Sprintf(" %s: %s\n", expected.Cid(), k)) + } + return out.String() +} + +func formatPresencesDiff(presences []message.BlockPresence, expHaves []string, expDontHaves []string) string { + var out bytes.Buffer + out.WriteString(fmt.Sprintf("BlockPresences (%d):\n", len(presences))) + for _, p := range presences { + t := "HAVE" + if p.Type == pb.Message_DontHave { + t = "DONT_HAVE" + } + out.WriteString(fmt.Sprintf(" %s - %s\n", p.Cid, t)) + } + out.WriteString(fmt.Sprintf("Expected (%d):\n", len(expHaves)+len(expDontHaves))) + for _, k := range expHaves { + expected := blocks.NewBlock([]byte(k)) + out.WriteString(fmt.Sprintf(" %s: %s - HAVE\n", expected.Cid(), k)) + } + for _, k := range expDontHaves { + expected := blocks.NewBlock([]byte(k)) + out.WriteString(fmt.Sprintf(" %s: %s - DONT_HAVE\n", expected.Cid(), k)) + } + return out.String() +} + +func TestPartnerWantsThenCancels(t *testing.T) { + test.Flaky(t) + + numRounds := 10 + if testing.Short() { + numRounds = 1 + } + alphabet := strings.Split("abcdefghijklmnopqrstuvwxyz", "") + vowels := strings.Split("aeiou", "") + + type testCase [][]string + testcases := []testCase{ + { + alphabet, vowels, + }, + { + alphabet, stringsComplement(alphabet, vowels), + alphabet[1:25], stringsComplement(alphabet[1:25], vowels), alphabet[2:25], stringsComplement(alphabet[2:25], vowels), + alphabet[3:25], stringsComplement(alphabet[3:25], vowels), alphabet[4:25], stringsComplement(alphabet[4:25], vowels), + alphabet[5:25], stringsComplement(alphabet[5:25], vowels), alphabet[6:25], stringsComplement(alphabet[6:25], vowels), + alphabet[7:25], stringsComplement(alphabet[7:25], vowels), alphabet[8:25], stringsComplement(alphabet[8:25], vowels), + alphabet[9:25], stringsComplement(alphabet[9:25], vowels), alphabet[10:25], stringsComplement(alphabet[10:25], vowels), + alphabet[11:25], stringsComplement(alphabet[11:25], vowels), alphabet[12:25], stringsComplement(alphabet[12:25], vowels), + alphabet[13:25], stringsComplement(alphabet[13:25], vowels), alphabet[14:25], stringsComplement(alphabet[14:25], vowels), + alphabet[15:25], stringsComplement(alphabet[15:25], vowels), alphabet[16:25], stringsComplement(alphabet[16:25], vowels), + alphabet[17:25], stringsComplement(alphabet[17:25], vowels), alphabet[18:25], stringsComplement(alphabet[18:25], vowels), + alphabet[19:25], stringsComplement(alphabet[19:25], vowels), alphabet[20:25], stringsComplement(alphabet[20:25], vowels), + alphabet[21:25], stringsComplement(alphabet[21:25], vowels), alphabet[22:25], stringsComplement(alphabet[22:25], vowels), + alphabet[23:25], stringsComplement(alphabet[23:25], vowels), alphabet[24:25], stringsComplement(alphabet[24:25], vowels), + alphabet[25:25], stringsComplement(alphabet[25:25], vowels), + }, + } + + bs := blockstore.NewBlockstore(dssync.MutexWrap(ds.NewMapDatastore())) + for _, letter := range alphabet { + block := blocks.NewBlock([]byte(letter)) + if err := bs.Put(context.Background(), block); err != nil { + t.Fatal(err) + } + } + + ctx := context.Background() + for i := 0; i < numRounds; i++ { + expected := make([][]string, 0, len(testcases)) + e := newEngineForTesting(ctx, bs, &fakePeerTagger{}, "localhost", 0, WithScoreLedger(NewTestScoreLedger(shortTerm, nil, clock.New())), WithBlockstoreWorkerCount(4)) + e.StartWorkers(ctx, process.WithTeardown(func() error { return nil })) + for _, testcase := range testcases { + set := testcase[0] + cancels := testcase[1] + keeps := stringsComplement(set, cancels) + expected = append(expected, keeps) + + partner := libp2ptest.RandPeerIDFatal(t) + + partnerWantBlocks(e, set, partner) + partnerCancels(e, cancels, partner) + } + if err := checkHandledInOrder(t, e, expected); err != nil { + t.Logf("run #%d of %d", i, numRounds) + t.Fatal(err) + } + } +} + +func TestSendReceivedBlocksToPeersThatWantThem(t *testing.T) { + test.Flaky(t) + + bs := blockstore.NewBlockstore(dssync.MutexWrap(ds.NewMapDatastore())) + partner := libp2ptest.RandPeerIDFatal(t) + otherPeer := libp2ptest.RandPeerIDFatal(t) + + ctx := context.Background() + e := newEngineForTesting(ctx, bs, &fakePeerTagger{}, "localhost", 0, WithScoreLedger(NewTestScoreLedger(shortTerm, nil, clock.New())), WithBlockstoreWorkerCount(4)) + e.StartWorkers(ctx, process.WithTeardown(func() error { return nil })) + + blks := testutil.GenerateBlocksOfSize(4, 8*1024) + msg := message.New(false) + msg.AddEntry(blks[0].Cid(), 4, pb.Message_Wantlist_Have, false) + msg.AddEntry(blks[1].Cid(), 3, pb.Message_Wantlist_Have, false) + msg.AddEntry(blks[2].Cid(), 2, pb.Message_Wantlist_Block, false) + msg.AddEntry(blks[3].Cid(), 1, pb.Message_Wantlist_Block, false) + e.MessageReceived(context.Background(), partner, msg) + + // Nothing in blockstore, so shouldn't get any envelope + var next envChan + next, env := getNextEnvelope(e, next, 5*time.Millisecond) + if env != nil { + t.Fatal("expected no envelope yet") + } + + e.ReceivedBlocks(otherPeer, []blocks.Block{blks[0], blks[2]}) + if err := bs.PutMany(context.Background(), []blocks.Block{blks[0], blks[2]}); err != nil { + t.Fatal(err) + } + e.NotifyNewBlocks([]blocks.Block{blks[0], blks[2]}) + _, env = getNextEnvelope(e, next, 5*time.Millisecond) + if env == nil { + t.Fatal("expected envelope") + } + if env.Peer != partner { + t.Fatal("expected message to peer") + } + sentBlk := env.Message.Blocks() + if len(sentBlk) != 1 || !sentBlk[0].Cid().Equals(blks[2].Cid()) { + t.Fatal("expected 1 block") + } + sentHave := env.Message.BlockPresences() + if len(sentHave) != 1 || !sentHave[0].Cid.Equals(blks[0].Cid()) || sentHave[0].Type != pb.Message_Have { + t.Fatal("expected 1 HAVE") + } +} + +func TestSendDontHave(t *testing.T) { + test.Flaky(t) + + bs := blockstore.NewBlockstore(dssync.MutexWrap(ds.NewMapDatastore())) + partner := libp2ptest.RandPeerIDFatal(t) + otherPeer := libp2ptest.RandPeerIDFatal(t) + + ctx := context.Background() + e := newEngineForTesting(ctx, bs, &fakePeerTagger{}, "localhost", 0, WithScoreLedger(NewTestScoreLedger(shortTerm, nil, clock.New())), WithBlockstoreWorkerCount(4)) + e.StartWorkers(ctx, process.WithTeardown(func() error { return nil })) + + blks := testutil.GenerateBlocksOfSize(4, 8*1024) + msg := message.New(false) + msg.AddEntry(blks[0].Cid(), 4, pb.Message_Wantlist_Have, false) + msg.AddEntry(blks[1].Cid(), 3, pb.Message_Wantlist_Have, true) + msg.AddEntry(blks[2].Cid(), 2, pb.Message_Wantlist_Block, false) + msg.AddEntry(blks[3].Cid(), 1, pb.Message_Wantlist_Block, true) + e.MessageReceived(context.Background(), partner, msg) + + // Nothing in blockstore, should get DONT_HAVE for entries that wanted it + var next envChan + next, env := getNextEnvelope(e, next, 10*time.Millisecond) + if env == nil { + t.Fatal("expected envelope") + } + if env.Peer != partner { + t.Fatal("expected message to peer") + } + if len(env.Message.Blocks()) > 0 { + t.Fatal("expected no blocks") + } + sentDontHaves := env.Message.BlockPresences() + if len(sentDontHaves) != 2 { + t.Fatal("expected 2 DONT_HAVEs") + } + if !sentDontHaves[0].Cid.Equals(blks[1].Cid()) && + !sentDontHaves[1].Cid.Equals(blks[1].Cid()) { + t.Fatal("expected DONT_HAVE for want-have") + } + if !sentDontHaves[0].Cid.Equals(blks[3].Cid()) && + !sentDontHaves[1].Cid.Equals(blks[3].Cid()) { + t.Fatal("expected DONT_HAVE for want-block") + } + + // Receive all the blocks + e.ReceivedBlocks(otherPeer, []blocks.Block{blks[0], blks[2]}) + if err := bs.PutMany(context.Background(), blks); err != nil { + t.Fatal(err) + } + e.NotifyNewBlocks(blks) + + // Envelope should contain 2 HAVEs / 2 blocks + _, env = getNextEnvelope(e, next, 10*time.Millisecond) + if env == nil { + t.Fatal("expected envelope") + } + if env.Peer != partner { + t.Fatal("expected message to peer") + } + if len(env.Message.Blocks()) != 2 { + t.Fatal("expected 2 blocks") + } + sentHave := env.Message.BlockPresences() + if len(sentHave) != 2 || sentHave[0].Type != pb.Message_Have || sentHave[1].Type != pb.Message_Have { + t.Fatal("expected 2 HAVEs") + } +} + +func TestWantlistForPeer(t *testing.T) { + test.Flaky(t) + + bs := blockstore.NewBlockstore(dssync.MutexWrap(ds.NewMapDatastore())) + partner := libp2ptest.RandPeerIDFatal(t) + otherPeer := libp2ptest.RandPeerIDFatal(t) + + ctx := context.Background() + e := newEngineForTesting(ctx, bs, &fakePeerTagger{}, "localhost", 0, WithScoreLedger(NewTestScoreLedger(shortTerm, nil, clock.New())), WithBlockstoreWorkerCount(4)) + e.StartWorkers(ctx, process.WithTeardown(func() error { return nil })) + + blks := testutil.GenerateBlocksOfSize(4, 8*1024) + msg := message.New(false) + msg.AddEntry(blks[0].Cid(), 2, pb.Message_Wantlist_Have, false) + msg.AddEntry(blks[1].Cid(), 3, pb.Message_Wantlist_Have, false) + e.MessageReceived(context.Background(), partner, msg) + + msg2 := message.New(false) + msg2.AddEntry(blks[2].Cid(), 1, pb.Message_Wantlist_Block, false) + msg2.AddEntry(blks[3].Cid(), 4, pb.Message_Wantlist_Block, false) + e.MessageReceived(context.Background(), partner, msg2) + + entries := e.WantlistForPeer(otherPeer) + if len(entries) != 0 { + t.Fatal("expected wantlist to contain no wants for other peer") + } + + entries = e.WantlistForPeer(partner) + if len(entries) != 4 { + t.Fatal("expected wantlist to contain all wants from parter") + } + + e.PeerDisconnected(partner) + entries = e.WantlistForPeer(partner) + if len(entries) != 0 { + t.Fatal("expected wantlist to be empty after disconnect") + } +} + +func TestTaskComparator(t *testing.T) { + test.Flaky(t) + + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) + defer cancel() + + keys := []string{"a", "b", "c", "d", "e", "f", "g", "h", "i", "j"} + cids := make(map[cid.Cid]int) + blks := make([]blocks.Block, 0, len(keys)) + for i, letter := range keys { + block := blocks.NewBlock([]byte(letter)) + blks = append(blks, block) + cids[block.Cid()] = i + } + + fpt := &fakePeerTagger{} + sl := NewTestScoreLedger(shortTerm, nil, clock.New()) + bs := blockstore.NewBlockstore(dssync.MutexWrap(ds.NewMapDatastore())) + if err := bs.PutMany(ctx, blks); err != nil { + t.Fatal(err) + } + + // use a single task worker so that the order of outgoing messages is deterministic + e := newEngineForTesting(ctx, bs, fpt, "localhost", 0, WithScoreLedger(sl), WithBlockstoreWorkerCount(4), WithTaskWorkerCount(1), + // if this Option is omitted, the test fails + WithTaskComparator(func(ta, tb *TaskInfo) bool { + // prioritize based on lexicographic ordering of block content + return cids[ta.Cid] < cids[tb.Cid] + }), + ) + e.StartWorkers(ctx, process.WithTeardown(func() error { return nil })) + + // rely on randomness of Go map's iteration order to add Want entries in random order + peerIDs := make([]peer.ID, len(keys)) + for _, i := range cids { + peerID := libp2ptest.RandPeerIDFatal(t) + peerIDs[i] = peerID + partnerWantBlocks(e, keys[i:i+1], peerID) + } + + // check that outgoing messages are sent in the correct order + for i, peerID := range peerIDs { + next := <-e.Outbox() + envelope := <-next + if peerID != envelope.Peer { + t.Errorf("expected message for peer ID %#v but instead got message for peer ID %#v", peerID, envelope.Peer) + } + responseBlocks := envelope.Message.Blocks() + if len(responseBlocks) != 1 { + t.Errorf("expected 1 block in response but instead got %v", len(blks)) + } else if responseBlocks[0].Cid() != blks[i].Cid() { + t.Errorf("expected block with CID %#v but instead got block with CID %#v", blks[i].Cid(), responseBlocks[0].Cid()) + } + } +} + +func TestPeerBlockFilter(t *testing.T) { + test.Flaky(t) + + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) + defer cancel() + + // Generate a few keys + keys := []string{"a", "b", "c", "d"} + blks := make([]blocks.Block, 0, len(keys)) + for _, letter := range keys { + block := blocks.NewBlock([]byte(letter)) + blks = append(blks, block) + } + + // Generate a few partner peers + peerIDs := make([]peer.ID, 3) + peerIDs[0] = libp2ptest.RandPeerIDFatal(t) + peerIDs[1] = libp2ptest.RandPeerIDFatal(t) + peerIDs[2] = libp2ptest.RandPeerIDFatal(t) + + // Setup the main peer + fpt := &fakePeerTagger{} + sl := NewTestScoreLedger(shortTerm, nil, clock.New()) + bs := blockstore.NewBlockstore(dssync.MutexWrap(ds.NewMapDatastore())) + if err := bs.PutMany(ctx, blks); err != nil { + t.Fatal(err) + } + + e := newEngineForTesting(ctx, bs, fpt, "localhost", 0, WithScoreLedger(sl), WithBlockstoreWorkerCount(4), + WithPeerBlockRequestFilter(func(p peer.ID, c cid.Cid) bool { + // peer 0 has access to everything + if p == peerIDs[0] { + return true + } + // peer 1 can only access key c and d + if p == peerIDs[1] { + return blks[2].Cid().Equals(c) || blks[3].Cid().Equals(c) + } + // peer 2 and other can only access key d + return blks[3].Cid().Equals(c) + }), + ) + e.StartWorkers(ctx, process.WithTeardown(func() error { return nil })) + + // Setup the test + type testCaseEntry struct { + peerIndex int + wantBlks string + wantHaves string + } + + type testCaseExp struct { + blks string + haves string + dontHaves string + } + + type testCase struct { + only bool + wl testCaseEntry + exp testCaseExp + } + + testCases := []testCase{ + // Peer 0 has access to everything: want-block `a` succeeds. + { + wl: testCaseEntry{ + peerIndex: 0, + wantBlks: "a", + }, + exp: testCaseExp{ + blks: "a", + }, + }, + // Peer 0 has access to everything: want-have `b` succeeds. + { + wl: testCaseEntry{ + peerIndex: 0, + wantHaves: "b1", + }, + exp: testCaseExp{ + haves: "b", + dontHaves: "1", + }, + }, + // Peer 1 has access to [c, d]: want-have `a` result in dont-have. + { + wl: testCaseEntry{ + peerIndex: 1, + wantHaves: "ac", + }, + exp: testCaseExp{ + haves: "c", + dontHaves: "a", + }, + }, + // Peer 1 has access to [c, d]: want-block `b` result in dont-have. + { + wl: testCaseEntry{ + peerIndex: 1, + wantBlks: "bd", + }, + exp: testCaseExp{ + blks: "d", + dontHaves: "b", + }, + }, + // Peer 2 has access to [d]: want-have `a` and want-block `b` result in dont-have. + { + wl: testCaseEntry{ + peerIndex: 2, + wantHaves: "a", + wantBlks: "bcd1", + }, + exp: testCaseExp{ + haves: "", + blks: "d", + dontHaves: "abc1", + }, + }, + } + + var onlyTestCases []testCase + for _, testCase := range testCases { + if testCase.only { + onlyTestCases = append(onlyTestCases, testCase) + } + } + if len(onlyTestCases) > 0 { + testCases = onlyTestCases + } + + for i, testCase := range testCases { + // Create wants requests + wl := testCase.wl + + t.Logf("test case %v: Peer%v / want-blocks '%s' / want-haves '%s'", + i, wl.peerIndex, wl.wantBlks, wl.wantHaves) + + wantBlks := strings.Split(wl.wantBlks, "") + wantHaves := strings.Split(wl.wantHaves, "") + + partnerWantBlocksHaves(e, wantBlks, wantHaves, true, peerIDs[wl.peerIndex]) + + // Check result + exp := testCase.exp + + next := <-e.Outbox() + envelope := <-next + + expBlks := strings.Split(exp.blks, "") + expHaves := strings.Split(exp.haves, "") + expDontHaves := strings.Split(exp.dontHaves, "") + + err := checkOutput(t, e, envelope, expBlks, expHaves, expDontHaves) + if err != nil { + t.Fatal(err) + } + } +} + +func TestPeerBlockFilterMutability(t *testing.T) { + test.Flaky(t) + + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) + defer cancel() + + // Generate a few keys + keys := []string{"a", "b", "c", "d"} + blks := make([]blocks.Block, 0, len(keys)) + for _, letter := range keys { + block := blocks.NewBlock([]byte(letter)) + blks = append(blks, block) + } + + partnerID := libp2ptest.RandPeerIDFatal(t) + + // Setup the main peer + fpt := &fakePeerTagger{} + sl := NewTestScoreLedger(shortTerm, nil, clock.New()) + bs := blockstore.NewBlockstore(dssync.MutexWrap(ds.NewMapDatastore())) + if err := bs.PutMany(ctx, blks); err != nil { + t.Fatal(err) + } + + filterAllowList := make(map[cid.Cid]bool) + + e := newEngineForTesting(ctx, bs, fpt, "localhost", 0, WithScoreLedger(sl), WithBlockstoreWorkerCount(4), + WithPeerBlockRequestFilter(func(p peer.ID, c cid.Cid) bool { + return filterAllowList[c] + }), + ) + e.StartWorkers(ctx, process.WithTeardown(func() error { return nil })) + + // Setup the test + type testCaseEntry struct { + allowList string + wantBlks string + wantHaves string + } + + type testCaseExp struct { + blks string + haves string + dontHaves string + } + + type testCase struct { + only bool + wls []testCaseEntry + exps []testCaseExp + } + + testCases := []testCase{ + { + wls: []testCaseEntry{ + { + // Peer has no accesses & request a want-block + allowList: "", + wantBlks: "a", + }, + { + // Then Peer is allowed access to a + allowList: "a", + wantBlks: "a", + }, + }, + exps: []testCaseExp{ + { + dontHaves: "a", + }, + { + blks: "a", + }, + }, + }, + { + wls: []testCaseEntry{ + { + // Peer has access to bc + allowList: "bc", + wantHaves: "bc", + }, + { + // Then Peer loses access to b + allowList: "c", + wantBlks: "bc", // Note: We request a block here to force a response from the node + }, + }, + exps: []testCaseExp{ + { + haves: "bc", + }, + { + blks: "c", + dontHaves: "b", + }, + }, + }, + { + wls: []testCaseEntry{ + { + // Peer has no accesses & request a want-have + allowList: "", + wantHaves: "d", + }, + { + // Then Peer gains access to d + allowList: "d", + wantHaves: "d", + }, + }, + exps: []testCaseExp{ + { + dontHaves: "d", + }, + { + haves: "d", + }, + }, + }, + } + + var onlyTestCases []testCase + for _, testCase := range testCases { + if testCase.only { + onlyTestCases = append(onlyTestCases, testCase) + } + } + if len(onlyTestCases) > 0 { + testCases = onlyTestCases + } + + for i, testCase := range testCases { + for j := range testCase.wls { + wl := testCase.wls[j] + exp := testCase.exps[j] + + // Create wants requests + t.Logf("test case %v, %v: allow-list '%s' / want-blocks '%s' / want-haves '%s'", + i, j, wl.allowList, wl.wantBlks, wl.wantHaves) + + allowList := strings.Split(wl.allowList, "") + wantBlks := strings.Split(wl.wantBlks, "") + wantHaves := strings.Split(wl.wantHaves, "") + + // Update the allow list + filterAllowList = make(map[cid.Cid]bool) + for _, letter := range allowList { + block := blocks.NewBlock([]byte(letter)) + filterAllowList[block.Cid()] = true + } + + // Send the request + partnerWantBlocksHaves(e, wantBlks, wantHaves, true, partnerID) + + // Check result + next := <-e.Outbox() + envelope := <-next + + expBlks := strings.Split(exp.blks, "") + expHaves := strings.Split(exp.haves, "") + expDontHaves := strings.Split(exp.dontHaves, "") + + err := checkOutput(t, e, envelope, expBlks, expHaves, expDontHaves) + if err != nil { + t.Fatal(err) + } + } + } +} + +func TestTaggingPeers(t *testing.T) { + test.Flaky(t) + + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) + defer cancel() + sanfrancisco := newTestEngine(ctx, "sf") + seattle := newTestEngine(ctx, "sea") + + keys := []string{"a", "b", "c", "d", "e"} + for _, letter := range keys { + block := blocks.NewBlock([]byte(letter)) + if err := sanfrancisco.Blockstore.Put(context.Background(), block); err != nil { + t.Fatal(err) + } + } + partnerWantBlocks(sanfrancisco.Engine, keys, seattle.Peer) + next := <-sanfrancisco.Engine.Outbox() + envelope := <-next + + if sanfrancisco.PeerTagger.count(sanfrancisco.Engine.tagQueued) != 1 { + t.Fatal("Incorrect number of peers tagged") + } + envelope.Sent() + <-sanfrancisco.Engine.Outbox() + sanfrancisco.PeerTagger.wait(sanfrancisco.Engine.tagQueued) + if sanfrancisco.PeerTagger.count(sanfrancisco.Engine.tagQueued) != 0 { + t.Fatal("Peers should be untagged but weren't") + } +} + +func TestTaggingUseful(t *testing.T) { + test.Flaky(t) + + peerSampleIntervalHalf := 10 * time.Millisecond + + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + sampleCh := make(chan struct{}) + mockClock := clock.NewMock() + me := newTestEngineWithSampling(ctx, "engine", peerSampleIntervalHalf*2, sampleCh, mockClock) + mockClock.Add(1 * time.Millisecond) + friend := peer.ID("friend") + + block := blocks.NewBlock([]byte("foobar")) + msg := message.New(false) + msg.AddBlock(block) + + for i := 0; i < 3; i++ { + if untagged := me.PeerTagger.count(me.Engine.tagUseful); untagged != 0 { + t.Fatalf("%d peers should be untagged but weren't", untagged) + } + mockClock.Add(peerSampleIntervalHalf) + me.Engine.MessageSent(friend, msg) + + mockClock.Add(peerSampleIntervalHalf) + <-sampleCh + + if tagged := me.PeerTagger.count(me.Engine.tagUseful); tagged != 1 { + t.Fatalf("1 peer should be tagged, but %d were", tagged) + } + + for j := 0; j < longTermRatio; j++ { + mockClock.Add(peerSampleIntervalHalf * 2) + <-sampleCh + } + } + + if me.PeerTagger.count(me.Engine.tagUseful) == 0 { + t.Fatal("peers should still be tagged due to long-term usefulness") + } + + for j := 0; j < longTermRatio; j++ { + mockClock.Add(peerSampleIntervalHalf * 2) + <-sampleCh + } + + if me.PeerTagger.count(me.Engine.tagUseful) == 0 { + t.Fatal("peers should still be tagged due to long-term usefulness") + } + + for j := 0; j < longTermRatio; j++ { + mockClock.Add(peerSampleIntervalHalf * 2) + <-sampleCh + } + + if me.PeerTagger.count(me.Engine.tagUseful) != 0 { + t.Fatal("peers should finally be untagged") + } +} + +func partnerWantBlocks(e *Engine, wantBlocks []string, partner peer.ID) { + add := message.New(false) + for i, letter := range wantBlocks { + block := blocks.NewBlock([]byte(letter)) + add.AddEntry(block.Cid(), int32(len(wantBlocks)-i), pb.Message_Wantlist_Block, true) + } + e.MessageReceived(context.Background(), partner, add) +} + +func partnerWantBlocksHaves(e *Engine, wantBlocks []string, wantHaves []string, sendDontHave bool, partner peer.ID) { + add := message.New(false) + priority := int32(len(wantHaves) + len(wantBlocks)) + for _, letter := range wantHaves { + block := blocks.NewBlock([]byte(letter)) + add.AddEntry(block.Cid(), priority, pb.Message_Wantlist_Have, sendDontHave) + priority-- + } + for _, letter := range wantBlocks { + block := blocks.NewBlock([]byte(letter)) + add.AddEntry(block.Cid(), priority, pb.Message_Wantlist_Block, sendDontHave) + priority-- + } + e.MessageReceived(context.Background(), partner, add) +} + +func partnerCancels(e *Engine, keys []string, partner peer.ID) { + cancels := message.New(false) + for _, k := range keys { + block := blocks.NewBlock([]byte(k)) + cancels.Cancel(block.Cid()) + } + e.MessageReceived(context.Background(), partner, cancels) +} + +type envChan <-chan *Envelope + +func getNextEnvelope(e *Engine, next envChan, t time.Duration) (envChan, *Envelope) { + ctx, cancel := context.WithTimeout(context.Background(), t) + defer cancel() + + if next == nil { + next = <-e.Outbox() // returns immediately + } + + select { + case env, ok := <-next: // blocks till next envelope ready + if !ok { + log.Warnf("got closed channel") + return nil, nil + } + return nil, env + case <-ctx.Done(): + // log.Warnf("got timeout") + } + return next, nil +} + +func checkHandledInOrder(t *testing.T, e *Engine, expected [][]string) error { + for _, keys := range expected { + next := <-e.Outbox() + envelope := <-next + received := envelope.Message.Blocks() + // Verify payload message length + if len(received) != len(keys) { + return errors.New(fmt.Sprintln("# blocks received", len(received), "# blocks expected", len(keys))) + } + // Verify payload message contents + for _, k := range keys { + found := false + expected := blocks.NewBlock([]byte(k)) + for _, block := range received { + if block.Cid().Equals(expected.Cid()) { + found = true + break + } + } + if !found { + return errors.New(fmt.Sprintln("received", received, "expected", string(expected.RawData()))) + } + } + } + return nil +} + +func stringsComplement(set, subset []string) []string { + m := make(map[string]struct{}) + for _, letter := range subset { + m[letter] = struct{}{} + } + var complement []string + for _, letter := range set { + if _, exists := m[letter]; !exists { + complement = append(complement, letter) + } + } + return complement +} + +func TestWantlistDoesNotGrowPastLimit(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + const limit = 32 + warsaw := newTestEngine(ctx, "warsaw", WithMaxQueuedWantlistEntriesPerPeer(limit)) + riga := newTestEngine(ctx, "riga") + + // Send in two messages to test reslicing. + for i := 2; i != 0; i-- { + m := message.New(false) + for j := limit * 3 / 4; j != 0; j-- { + m.AddEntry(blocks.NewBlock([]byte(fmt.Sprint(i, j))).Cid(), 0, pb.Message_Wantlist_Block, true) + } + warsaw.Engine.MessageReceived(ctx, riga.Peer, m) + } + + if warsaw.Peer == riga.Peer { + t.Fatal("Sanity Check: Peers have same Key!") + } + + wl := warsaw.Engine.WantlistForPeer(riga.Peer) + if len(wl) != limit { + t.Fatal("wantlist does not match limit", len(wl)) + } +} + +func TestWantlistGrowsToLimit(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + const limit = 32 + warsaw := newTestEngine(ctx, "warsaw", WithMaxQueuedWantlistEntriesPerPeer(limit)) + riga := newTestEngine(ctx, "riga") + + // Send in two messages to test reslicing. + m := message.New(false) + for j := limit; j != 0; j-- { + m.AddEntry(blocks.NewBlock([]byte(fmt.Sprint(j))).Cid(), 0, pb.Message_Wantlist_Block, true) + } + warsaw.Engine.MessageReceived(ctx, riga.Peer, m) + + if warsaw.Peer == riga.Peer { + t.Fatal("Sanity Check: Peers have same Key!") + } + + wl := warsaw.Engine.WantlistForPeer(riga.Peer) + if len(wl) != limit { + t.Fatal("wantlist does not match limit", len(wl)) + } +} + +func TestIgnoresCidsAboveLimit(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + const cidLimit = 64 + warsaw := newTestEngine(ctx, "warsaw", WithMaxCidSize(cidLimit)) + riga := newTestEngine(ctx, "riga") + + // Send in two messages to test reslicing. + m := message.New(true) + + m.AddEntry(blocks.NewBlock([]byte("Hæ")).Cid(), 0, pb.Message_Wantlist_Block, true) + + var hash mh.Multihash + hash = binary.AppendUvarint(hash, mh.BLAKE3) + hash = binary.AppendUvarint(hash, cidLimit) + startOfDigest := len(hash) + hash = append(hash, make(mh.Multihash, cidLimit)...) + rand.Read(hash[startOfDigest:]) + m.AddEntry(cid.NewCidV1(cid.Raw, hash), 0, pb.Message_Wantlist_Block, true) + + warsaw.Engine.MessageReceived(ctx, riga.Peer, m) + + if warsaw.Peer == riga.Peer { + t.Fatal("Sanity Check: Peers have same Key!") + } + + wl := warsaw.Engine.WantlistForPeer(riga.Peer) + if len(wl) != 1 { + t.Fatal("wantlist add a CID too big") + } +} + +func TestKillConnectionForInlineCid(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + warsaw := newTestEngine(ctx, "warsaw") + riga := newTestEngine(ctx, "riga") + + if warsaw.Peer == riga.Peer { + t.Fatal("Sanity Check: Peers have same Key!") + } + + // Send in two messages to test reslicing. + m := message.New(true) + + m.AddEntry(blocks.NewBlock([]byte("Hæ")).Cid(), 0, pb.Message_Wantlist_Block, true) + + var hash mh.Multihash + hash = binary.AppendUvarint(hash, mh.IDENTITY) + const digestSize = 32 + hash = binary.AppendUvarint(hash, digestSize) + startOfDigest := len(hash) + hash = append(hash, make(mh.Multihash, digestSize)...) + rand.Read(hash[startOfDigest:]) + m.AddEntry(cid.NewCidV1(cid.Raw, hash), 0, pb.Message_Wantlist_Block, true) + + if !warsaw.Engine.MessageReceived(ctx, riga.Peer, m) { + t.Fatal("connection was not killed when receiving inline in cancel") + } + + m.Reset(true) + + m.AddEntry(blocks.NewBlock([]byte("Hæ")).Cid(), 0, pb.Message_Wantlist_Block, true) + m.Cancel(cid.NewCidV1(cid.Raw, hash)) + + if !warsaw.Engine.MessageReceived(ctx, riga.Peer, m) { + t.Fatal("connection was not killed when receiving inline in cancel") + } +} diff --git a/bitswap/server/internal/decision/ewma.go b/bitswap/server/internal/decision/ewma.go new file mode 100644 index 0000000000..80d7d86b6d --- /dev/null +++ b/bitswap/server/internal/decision/ewma.go @@ -0,0 +1,5 @@ +package decision + +func ewma(old, new, alpha float64) float64 { + return new*alpha + (1-alpha)*old +} diff --git a/bitswap/server/internal/decision/peer_ledger.go b/bitswap/server/internal/decision/peer_ledger.go new file mode 100644 index 0000000000..cc7a5e1ac2 --- /dev/null +++ b/bitswap/server/internal/decision/peer_ledger.go @@ -0,0 +1,156 @@ +package decision + +import ( + wl "github.com/ipfs/boxo/bitswap/client/wantlist" + pb "github.com/ipfs/boxo/bitswap/message/pb" + + "github.com/ipfs/go-cid" + "github.com/libp2p/go-libp2p/core/peer" +) + +type peerLedger struct { + // thoses two maps are inversions of each other + peers map[peer.ID]map[cid.Cid]entry + cids map[cid.Cid]map[peer.ID]entry +} + +func newPeerLedger() *peerLedger { + return &peerLedger{ + peers: make(map[peer.ID]map[cid.Cid]entry), + cids: make(map[cid.Cid]map[peer.ID]entry), + } +} + +func (l *peerLedger) Wants(p peer.ID, e wl.Entry) { + cids, ok := l.peers[p] + if !ok { + cids = make(map[cid.Cid]entry) + l.peers[p] = cids + } + cids[e.Cid] = entry{e.Priority, e.WantType} + + m, ok := l.cids[e.Cid] + if !ok { + m = make(map[peer.ID]entry) + l.cids[e.Cid] = m + } + m[p] = entry{e.Priority, e.WantType} +} + +// CancelWant returns true if the cid was present in the wantlist. +func (l *peerLedger) CancelWant(p peer.ID, k cid.Cid) bool { + wants, ok := l.peers[p] + if !ok { + return false + } + delete(wants, k) + if len(wants) == 0 { + delete(l.peers, p) + } + + l.removePeerFromCid(p, k) + return true +} + +// CancelWantWithType will not cancel WantBlock if we sent a HAVE message. +func (l *peerLedger) CancelWantWithType(p peer.ID, k cid.Cid, typ pb.Message_Wantlist_WantType) { + wants, ok := l.peers[p] + if !ok { + return + } + e, ok := wants[k] + if !ok { + return + } + if typ == pb.Message_Wantlist_Have && e.WantType == pb.Message_Wantlist_Block { + return + } + + delete(wants, k) + if len(wants) == 0 { + delete(l.peers, p) + } + + l.removePeerFromCid(p, k) +} + +func (l *peerLedger) removePeerFromCid(p peer.ID, k cid.Cid) { + m, ok := l.cids[k] + if !ok { + return + } + delete(m, p) + if len(m) == 0 { + delete(l.cids, k) + } +} + +type entryForPeer struct { + Peer peer.ID + entry +} + +type entry struct { + Priority int32 + WantType pb.Message_Wantlist_WantType +} + +func (l *peerLedger) Peers(k cid.Cid) []entryForPeer { + m, ok := l.cids[k] + if !ok { + return nil + } + peers := make([]entryForPeer, 0, len(m)) + for p, e := range m { + peers = append(peers, entryForPeer{p, e}) + } + return peers +} + +func (l *peerLedger) CollectPeerIDs() []peer.ID { + peers := make([]peer.ID, 0, len(l.peers)) + for p := range l.peers { + peers = append(peers, p) + } + return peers +} + +func (l *peerLedger) WantlistSizeForPeer(p peer.ID) int { + return len(l.peers[p]) +} + +func (l *peerLedger) WantlistForPeer(p peer.ID) []wl.Entry { + cids, ok := l.peers[p] + if !ok { + return nil + } + + entries := make([]wl.Entry, 0, len(l.cids)) + for c, e := range cids { + entries = append(entries, wl.Entry{ + Cid: c, + Priority: e.Priority, + WantType: e.WantType, + }) + } + return entries +} + +// ClearPeerWantlist does not take an effort to fully erase it from memory. +// This is intended when the peer is still connected and the map capacity could +// be reused. If the memory should be freed use PeerDisconnected instead. +func (l *peerLedger) ClearPeerWantlist(p peer.ID) { + cids, ok := l.peers[p] + if !ok { + return + } + + for c := range cids { + l.removePeerFromCid(p, c) + } +} + +func (l *peerLedger) PeerDisconnected(p peer.ID) { + l.ClearPeerWantlist(p) + delete(l.peers, p) +} diff --git a/bitswap/server/internal/decision/scoreledger.go b/bitswap/server/internal/decision/scoreledger.go new file mode 100644 index 0000000000..dbcf69d855 --- /dev/null +++ b/bitswap/server/internal/decision/scoreledger.go @@ -0,0 +1,353 @@ +package decision + +import ( + "sync" + "time" + + "github.com/benbjohnson/clock" + peer "github.com/libp2p/go-libp2p/core/peer" +) + +const ( + // the alpha for the EWMA used to track short term usefulness + shortTermAlpha = 0.5 + + // the alpha for the EWMA used to track long term usefulness + longTermAlpha = 0.05 + + // how frequently the engine should sample usefulness. Peers that + // interact every shortTerm time period are considered "active". + shortTerm = 10 * time.Second + + // long term ratio defines what "long term" means in terms of the + // shortTerm duration. Peers that interact once every longTermRatio are + // considered useful over the long term. + longTermRatio = 10 + + // long/short term scores for tagging peers + longTermScore = 10 // this is a high tag but it grows _very_ slowly. + shortTermScore = 10 // this is a high tag but it'll go away quickly if we aren't using the peer. +) + +// Stores the data exchange relationship between two peers. +type scoreledger struct { + // Partner is the remote Peer. + partner peer.ID + + // tracks bytes sent... + bytesSent uint64 + + // ...and received. + bytesRecv uint64 + + // lastExchange is the time of the last data exchange. + lastExchange time.Time + + // These scores keep track of how useful we think this peer is. Short + // tracks short-term usefulness and long tracks long-term usefulness. + shortScore, longScore float64 + + // Score keeps track of the score used in the peer tagger. We track it + // here to avoid unnecessarily updating the tags in the connection manager. + score int + + // exchangeCount is the number of exchanges with this peer + exchangeCount uint64 + + // the record lock + lock sync.RWMutex + + clock clock.Clock +} + +// Receipt is a summary of the ledger for a given peer +// collecting various pieces of aggregated data for external +// reporting purposes. +type Receipt struct { + Peer string + Value float64 + Sent uint64 + Recv uint64 + Exchanged uint64 +} + +// Increments the sent counter. +func (l *scoreledger) AddToSentBytes(n int) { + l.lock.Lock() + defer l.lock.Unlock() + l.exchangeCount++ + l.lastExchange = l.clock.Now() + l.bytesSent += uint64(n) +} + +// Increments the received counter. +func (l *scoreledger) AddToReceivedBytes(n int) { + l.lock.Lock() + defer l.lock.Unlock() + l.exchangeCount++ + l.lastExchange = l.clock.Now() + l.bytesRecv += uint64(n) +} + +// Returns the Receipt for this ledger record. +func (l *scoreledger) Receipt() *Receipt { + l.lock.RLock() + defer l.lock.RUnlock() + + return &Receipt{ + Peer: l.partner.String(), + Value: float64(l.bytesSent) / float64(l.bytesRecv+1), + Sent: l.bytesSent, + Recv: l.bytesRecv, + Exchanged: l.exchangeCount, + } +} + +// DefaultScoreLedger is used by Engine as the default ScoreLedger. +type DefaultScoreLedger struct { + // the score func + scorePeer ScorePeerFunc + // is closed on Close + closing chan struct{} + // protects the fields immediatly below + lock sync.RWMutex + // ledgerMap lists score ledgers by their partner key. + ledgerMap map[peer.ID]*scoreledger + // how frequently the engine should sample peer usefulness + peerSampleInterval time.Duration + // used by the tests to detect when a sample is taken + sampleCh chan struct{} + clock clock.Clock +} + +// scoreWorker keeps track of how "useful" our peers are, updating scores in the +// connection manager. +// +// It does this by tracking two scores: short-term usefulness and long-term +// usefulness. Short-term usefulness is sampled frequently and highly weights +// new observations. Long-term usefulness is sampled less frequently and highly +// weights on long-term trends. +// +// In practice, we do this by keeping two EWMAs. If we see an interaction +// within the sampling period, we record the score, otherwise, we record a 0. +// The short-term one has a high alpha and is sampled every shortTerm period. +// The long-term one has a low alpha and is sampled every +// longTermRatio*shortTerm period. +// +// To calculate the final score, we sum the short-term and long-term scores then +// adjust it ±25% based on our debt ratio. Peers that have historically been +// more useful to us than we are to them get the highest score. +func (dsl *DefaultScoreLedger) scoreWorker() { + ticker := dsl.clock.Ticker(dsl.peerSampleInterval) + defer ticker.Stop() + + type update struct { + peer peer.ID + score int + } + var ( + lastShortUpdate, lastLongUpdate time.Time + updates []update + ) + + for i := 0; ; i = (i + 1) % longTermRatio { + var now time.Time + select { + case now = <-ticker.C: + case <-dsl.closing: + return + } + + // The long term update ticks every `longTermRatio` short + // intervals. + updateLong := i == 0 + + dsl.lock.Lock() + for _, l := range dsl.ledgerMap { + l.lock.Lock() + + // Update the short-term score. + if l.lastExchange.After(lastShortUpdate) { + l.shortScore = ewma(l.shortScore, shortTermScore, shortTermAlpha) + } else { + l.shortScore = ewma(l.shortScore, 0, shortTermAlpha) + } + + // Update the long-term score. + if updateLong { + if l.lastExchange.After(lastLongUpdate) { + l.longScore = ewma(l.longScore, longTermScore, longTermAlpha) + } else { + l.longScore = ewma(l.longScore, 0, longTermAlpha) + } + } + + // Calculate the new score. + // + // The accounting score adjustment prefers peers _we_ + // need over peers that need us. This doesn't help with + // leeching. + var lscore float64 + if l.bytesRecv == 0 { + lscore = 0 + } else { + lscore = float64(l.bytesRecv) / float64(l.bytesRecv+l.bytesSent) + } + score := int((l.shortScore + l.longScore) * (lscore*.5 + .75)) + + // Avoid updating the connection manager unless there's a change. This can be expensive. + if l.score != score { + // put these in a list so we can perform the updates outside _global_ the lock. + updates = append(updates, update{l.partner, score}) + l.score = score + } + l.lock.Unlock() + } + dsl.lock.Unlock() + + // record the times. + lastShortUpdate = now + if updateLong { + lastLongUpdate = now + } + + // apply the updates + for _, update := range updates { + dsl.scorePeer(update.peer, update.score) + } + // Keep the memory. It's not much and it saves us from having to allocate. + updates = updates[:0] + + // Used by the tests + if dsl.sampleCh != nil { + dsl.sampleCh <- struct{}{} + } + } +} + +// Returns the score ledger for the given peer or nil if that peer +// is not on the ledger. +func (dsl *DefaultScoreLedger) find(p peer.ID) *scoreledger { + // Take a read lock (as it's less expensive) to check if we have + // a ledger for the peer. + dsl.lock.RLock() + l, ok := dsl.ledgerMap[p] + dsl.lock.RUnlock() + if ok { + return l + } + return nil +} + +// Returns a new scoreledger. +func newScoreLedger(p peer.ID, clock clock.Clock) *scoreledger { + return &scoreledger{ + partner: p, + clock: clock, + } +} + +// Lazily instantiates a ledger. +func (dsl *DefaultScoreLedger) findOrCreate(p peer.ID) *scoreledger { + l := dsl.find(p) + if l != nil { + return l + } + + // There's no ledger, so take a write lock, then check again and + // create the ledger if necessary. + dsl.lock.Lock() + defer dsl.lock.Unlock() + l, ok := dsl.ledgerMap[p] + if !ok { + l = newScoreLedger(p, dsl.clock) + dsl.ledgerMap[p] = l + } + return l +} + +// GetReceipt returns aggregated data communication with a given peer. +func (dsl *DefaultScoreLedger) GetReceipt(p peer.ID) *Receipt { + l := dsl.find(p) + if l != nil { + return l.Receipt() + } + + // Return a blank receipt otherwise. + return &Receipt{ + Peer: p.String(), + Value: 0, + Sent: 0, + Recv: 0, + Exchanged: 0, + } +} + +// Starts the default ledger sampling process. +func (dsl *DefaultScoreLedger) Start(scorePeer ScorePeerFunc) { + dsl.init(scorePeer) + go dsl.scoreWorker() +} + +// Stops the sampling process. +func (dsl *DefaultScoreLedger) Stop() { + close(dsl.closing) +} + +// Initializes the score ledger. +func (dsl *DefaultScoreLedger) init(scorePeer ScorePeerFunc) { + dsl.lock.Lock() + defer dsl.lock.Unlock() + dsl.scorePeer = scorePeer +} + +// Increments the sent counter for the given peer. +func (dsl *DefaultScoreLedger) AddToSentBytes(p peer.ID, n int) { + l := dsl.findOrCreate(p) + l.AddToSentBytes(n) +} + +// Increments the received counter for the given peer. +func (dsl *DefaultScoreLedger) AddToReceivedBytes(p peer.ID, n int) { + l := dsl.findOrCreate(p) + l.AddToReceivedBytes(n) +} + +// PeerConnected should be called when a new peer connects, meaning +// we should open accounting. +func (dsl *DefaultScoreLedger) PeerConnected(p peer.ID) { + dsl.lock.Lock() + defer dsl.lock.Unlock() + _, ok := dsl.ledgerMap[p] + if !ok { + dsl.ledgerMap[p] = newScoreLedger(p, dsl.clock) + } +} + +// PeerDisconnected should be called when a peer disconnects to +// clean up the accounting. +func (dsl *DefaultScoreLedger) PeerDisconnected(p peer.ID) { + dsl.lock.Lock() + defer dsl.lock.Unlock() + delete(dsl.ledgerMap, p) +} + +// Creates a new instance of the default score ledger. +func NewDefaultScoreLedger() *DefaultScoreLedger { + return &DefaultScoreLedger{ + ledgerMap: make(map[peer.ID]*scoreledger), + closing: make(chan struct{}), + peerSampleInterval: shortTerm, + clock: clock.New(), + } +} + +// Creates a new instance of the default score ledger with testing +// parameters. +func NewTestScoreLedger(peerSampleInterval time.Duration, sampleCh chan struct{}, clock clock.Clock) *DefaultScoreLedger { + dsl := NewDefaultScoreLedger() + dsl.peerSampleInterval = peerSampleInterval + dsl.sampleCh = sampleCh + dsl.clock = clock + return dsl +} diff --git a/bitswap/server/internal/decision/taskmerger.go b/bitswap/server/internal/decision/taskmerger.go new file mode 100644 index 0000000000..191200e584 --- /dev/null +++ b/bitswap/server/internal/decision/taskmerger.go @@ -0,0 +1,87 @@ +package decision + +import ( + "github.com/ipfs/go-peertaskqueue/peertask" +) + +// taskData is extra data associated with each task in the request queue +type taskData struct { + // Tasks can be want-have or want-block + IsWantBlock bool + // Whether to immediately send a response if the block is not found + SendDontHave bool + // The size of the block corresponding to the task + BlockSize int + // Whether the block was found + HaveBlock bool +} + +type taskMerger struct{} + +func newTaskMerger() *taskMerger { + return &taskMerger{} +} + +// The request queue uses this Method to decide if a newly pushed task has any +// new information beyond the tasks with the same Topic (CID) in the queue. +func (*taskMerger) HasNewInfo(task peertask.Task, existing []*peertask.Task) bool { + haveSize := false + isWantBlock := false + for _, et := range existing { + etd := et.Data.(*taskData) + if etd.HaveBlock { + haveSize = true + } + + if etd.IsWantBlock { + isWantBlock = true + } + } + + // If there is no active want-block and the new task is a want-block, + // the new task is better + newTaskData := task.Data.(*taskData) + if !isWantBlock && newTaskData.IsWantBlock { + return true + } + + // If there is no size information for the CID and the new task has + // size information, the new task is better + if !haveSize && newTaskData.HaveBlock { + return true + } + + return false +} + +// The request queue uses Merge to merge a newly pushed task with an existing +// task with the same Topic (CID) +func (*taskMerger) Merge(task peertask.Task, existing *peertask.Task) { + newTask := task.Data.(*taskData) + existingTask := existing.Data.(*taskData) + + // If we now have block size information, update the task with + // the new block size + if !existingTask.HaveBlock && newTask.HaveBlock { + existingTask.HaveBlock = newTask.HaveBlock + existingTask.BlockSize = newTask.BlockSize + } + + // If replacing a want-have with a want-block + if !existingTask.IsWantBlock && newTask.IsWantBlock { + // Change the type from want-have to want-block + existingTask.IsWantBlock = true + // If the want-have was a DONT_HAVE, or the want-block has a size + if !existingTask.HaveBlock || newTask.HaveBlock { + // Update the entry size + existingTask.HaveBlock = newTask.HaveBlock + existing.Work = task.Work + } + } + + // If the task is a want-block, make sure the entry size is equal + // to the block size (because we will send the whole block) + if existingTask.IsWantBlock && existingTask.HaveBlock { + existing.Work = existingTask.BlockSize + } +} diff --git a/bitswap/server/internal/decision/taskmerger_test.go b/bitswap/server/internal/decision/taskmerger_test.go new file mode 100644 index 0000000000..e0ce46ed6f --- /dev/null +++ b/bitswap/server/internal/decision/taskmerger_test.go @@ -0,0 +1,366 @@ +package decision + +import ( + "testing" + + "github.com/ipfs/boxo/bitswap/internal/testutil" + "github.com/ipfs/boxo/internal/test" + "github.com/ipfs/go-peertaskqueue" + "github.com/ipfs/go-peertaskqueue/peertask" +) + +func TestPushHaveVsBlock(t *testing.T) { + test.Flaky(t) + + partner := testutil.GeneratePeers(1)[0] + + wantHave := peertask.Task{ + Topic: "1", + Priority: 10, + Work: 1, + Data: &taskData{ + IsWantBlock: false, + BlockSize: 10, + HaveBlock: true, + SendDontHave: false, + }, + } + wantBlock := peertask.Task{ + Topic: "1", + Priority: 10, + Work: 10, + Data: &taskData{ + IsWantBlock: true, + BlockSize: 10, + HaveBlock: true, + SendDontHave: false, + }, + } + + runTestCase := func(tasks []peertask.Task, expIsWantBlock bool) { + tasks = cloneTasks(tasks) + ptq := peertaskqueue.New(peertaskqueue.TaskMerger(newTaskMerger())) + ptq.PushTasks(partner, tasks...) + _, popped, _ := ptq.PopTasks(100) + if len(popped) != 1 { + t.Fatalf("Expected 1 task, received %d tasks", len(popped)) + } + isWantBlock := popped[0].Data.(*taskData).IsWantBlock + if isWantBlock != expIsWantBlock { + t.Fatalf("Expected task.IsWantBlock to be %t, received %t", expIsWantBlock, isWantBlock) + } + } + const wantBlockType = true + const wantHaveType = false + + // should ignore second want-have + runTestCase([]peertask.Task{wantHave, wantHave}, wantHaveType) + // should ignore second want-block + runTestCase([]peertask.Task{wantBlock, wantBlock}, wantBlockType) + // want-have does not overwrite want-block + runTestCase([]peertask.Task{wantBlock, wantHave}, wantBlockType) + // want-block overwrites want-have + runTestCase([]peertask.Task{wantHave, wantBlock}, wantBlockType) +} + +func TestPushSizeInfo(t *testing.T) { + test.Flaky(t) + + partner := testutil.GeneratePeers(1)[0] + + wantBlockBlockSize := 10 + wantBlockDontHaveBlockSize := 0 + wantHaveBlockSize := 10 + wantHaveDontHaveBlockSize := 0 + wantBlock := peertask.Task{ + Topic: "1", + Priority: 10, + Work: 10, + Data: &taskData{ + IsWantBlock: true, + BlockSize: wantBlockBlockSize, + HaveBlock: true, + SendDontHave: false, + }, + } + wantBlockDontHave := peertask.Task{ + Topic: "1", + Priority: 10, + Work: 2, + Data: &taskData{ + IsWantBlock: true, + BlockSize: wantBlockDontHaveBlockSize, + HaveBlock: false, + SendDontHave: false, + }, + } + wantHave := peertask.Task{ + Topic: "1", + Priority: 10, + Work: 1, Data: &taskData{ + IsWantBlock: false, + BlockSize: wantHaveBlockSize, + HaveBlock: true, + SendDontHave: false, + }, + } + wantHaveDontHave := peertask.Task{ + Topic: "1", + Priority: 10, + Work: 1, + Data: &taskData{ + IsWantBlock: false, + BlockSize: wantHaveDontHaveBlockSize, + HaveBlock: false, + SendDontHave: false, + }, + } + + runTestCase := func(tasks []peertask.Task, expSize int, expBlockSize int, expIsWantBlock bool) { + tasks = cloneTasks(tasks) + ptq := peertaskqueue.New(peertaskqueue.TaskMerger(newTaskMerger())) + ptq.PushTasks(partner, tasks...) + _, popped, _ := ptq.PopTasks(100) + if len(popped) != 1 { + t.Fatalf("Expected 1 task, received %d tasks", len(popped)) + } + if popped[0].Work != expSize { + t.Fatalf("Expected task.Work to be %d, received %d", expSize, popped[0].Work) + } + td := popped[0].Data.(*taskData) + if td.BlockSize != expBlockSize { + t.Fatalf("Expected task.Work to be %d, received %d", expBlockSize, td.BlockSize) + } + if td.IsWantBlock != expIsWantBlock { + t.Fatalf("Expected task.IsWantBlock to be %t, received %t", expIsWantBlock, td.IsWantBlock) + } + } + + isWantBlock := true + isWantHave := false + + // want-block (DONT_HAVE) should have no effect on existing want-block (DONT_HAVE) + runTestCase([]peertask.Task{wantBlockDontHave, wantBlockDontHave}, wantBlockDontHave.Work, wantBlockDontHaveBlockSize, isWantBlock) + // want-have (DONT_HAVE) should have no effect on existing want-block (DONT_HAVE) + runTestCase([]peertask.Task{wantBlockDontHave, wantHaveDontHave}, wantBlockDontHave.Work, wantBlockDontHaveBlockSize, isWantBlock) + // want-block with size should update existing want-block (DONT_HAVE) + runTestCase([]peertask.Task{wantBlockDontHave, wantBlock}, wantBlock.Work, wantBlockBlockSize, isWantBlock) + // want-have with size should update existing want-block (DONT_HAVE) size, + // but leave it as a want-block (ie should not change it to want-have) + runTestCase([]peertask.Task{wantBlockDontHave, wantHave}, wantHaveBlockSize, wantHaveBlockSize, isWantBlock) + + // want-block (DONT_HAVE) size should not update existing want-block with size + runTestCase([]peertask.Task{wantBlock, wantBlockDontHave}, wantBlock.Work, wantBlockBlockSize, isWantBlock) + // want-have (DONT_HAVE) should have no effect on existing want-block with size + runTestCase([]peertask.Task{wantBlock, wantHaveDontHave}, wantBlock.Work, wantBlockBlockSize, isWantBlock) + // want-block with size should have no effect on existing want-block with size + runTestCase([]peertask.Task{wantBlock, wantBlock}, wantBlock.Work, wantBlockBlockSize, isWantBlock) + // want-have with size should have no effect on existing want-block with size + runTestCase([]peertask.Task{wantBlock, wantHave}, wantBlock.Work, wantBlockBlockSize, isWantBlock) + + // want-block (DONT_HAVE) should update type and entry size of existing want-have (DONT_HAVE) + runTestCase([]peertask.Task{wantHaveDontHave, wantBlockDontHave}, wantBlockDontHave.Work, wantBlockDontHaveBlockSize, isWantBlock) + // want-have (DONT_HAVE) should have no effect on existing want-have (DONT_HAVE) + runTestCase([]peertask.Task{wantHaveDontHave, wantHaveDontHave}, wantHaveDontHave.Work, wantHaveDontHaveBlockSize, isWantHave) + // want-block with size should update existing want-have (DONT_HAVE) + runTestCase([]peertask.Task{wantHaveDontHave, wantBlock}, wantBlock.Work, wantBlockBlockSize, isWantBlock) + // want-have with size should update existing want-have (DONT_HAVE) + runTestCase([]peertask.Task{wantHaveDontHave, wantHave}, wantHave.Work, wantHaveBlockSize, isWantHave) + + // want-block (DONT_HAVE) should update type and entry size of existing want-have with size + runTestCase([]peertask.Task{wantHave, wantBlockDontHave}, wantHaveBlockSize, wantHaveBlockSize, isWantBlock) + // want-have (DONT_HAVE) should not update existing want-have with size + runTestCase([]peertask.Task{wantHave, wantHaveDontHave}, wantHave.Work, wantHaveBlockSize, isWantHave) + // want-block with size should update type and entry size of existing want-have with size + runTestCase([]peertask.Task{wantHave, wantBlock}, wantBlock.Work, wantBlockBlockSize, isWantBlock) + // want-have should have no effect on existing want-have + runTestCase([]peertask.Task{wantHave, wantHave}, wantHave.Work, wantHaveBlockSize, isWantHave) +} + +func TestPushHaveVsBlockActive(t *testing.T) { + test.Flaky(t) + + partner := testutil.GeneratePeers(1)[0] + + wantBlock := peertask.Task{ + Topic: "1", + Priority: 10, + Work: 10, + Data: &taskData{ + IsWantBlock: true, + BlockSize: 10, + HaveBlock: true, + SendDontHave: false, + }, + } + wantHave := peertask.Task{ + Topic: "1", + Priority: 10, + Work: 1, + Data: &taskData{ + IsWantBlock: false, + BlockSize: 10, + HaveBlock: true, + SendDontHave: false, + }, + } + + runTestCase := func(tasks []peertask.Task, expCount int) { + tasks = cloneTasks(tasks) + ptq := peertaskqueue.New(peertaskqueue.TaskMerger(newTaskMerger())) + // ptq.PushTasks(partner, tasks...) + var popped []*peertask.Task + for _, task := range tasks { + // Push the task + // tracker.PushTasks([]peertask.Task{task}) + ptq.PushTasks(partner, task) + // Pop the task (which makes it active) + _, poppedTasks, _ := ptq.PopTasks(10) + popped = append(popped, poppedTasks...) + } + if len(popped) != expCount { + t.Fatalf("Expected %d tasks, received %d tasks", expCount, len(popped)) + } + } + + // should ignore second want-have + runTestCase([]peertask.Task{wantHave, wantHave}, 1) + // should ignore second want-block + runTestCase([]peertask.Task{wantBlock, wantBlock}, 1) + // want-have does not overwrite want-block + runTestCase([]peertask.Task{wantBlock, wantHave}, 1) + // can't replace want-have with want-block because want-have is active + runTestCase([]peertask.Task{wantHave, wantBlock}, 2) +} + +func TestPushSizeInfoActive(t *testing.T) { + test.Flaky(t) + + partner := testutil.GeneratePeers(1)[0] + + wantBlock := peertask.Task{ + Topic: "1", + Priority: 10, + Work: 10, + Data: &taskData{ + IsWantBlock: true, + BlockSize: 10, + HaveBlock: true, + SendDontHave: false, + }, + } + wantBlockDontHave := peertask.Task{ + Topic: "1", + Priority: 10, + Work: 2, + Data: &taskData{ + IsWantBlock: true, + BlockSize: 0, + HaveBlock: false, + SendDontHave: false, + }, + } + wantHave := peertask.Task{ + Topic: "1", + Priority: 10, + Work: 1, + Data: &taskData{ + IsWantBlock: false, + BlockSize: 10, + HaveBlock: true, + SendDontHave: false, + }, + } + wantHaveDontHave := peertask.Task{ + Topic: "1", + Priority: 10, + Work: 1, + Data: &taskData{ + IsWantBlock: false, + BlockSize: 0, + HaveBlock: false, + SendDontHave: false, + }, + } + + runTestCase := func(tasks []peertask.Task, expTasks []peertask.Task) { + tasks = cloneTasks(tasks) + ptq := peertaskqueue.New(peertaskqueue.TaskMerger(newTaskMerger())) + var popped []*peertask.Task + for _, task := range tasks { + // Push the task + ptq.PushTasks(partner, task) + // Pop the task (which makes it active) + _, poppedTasks, _ := ptq.PopTasks(10) + popped = append(popped, poppedTasks...) + } + if len(popped) != len(expTasks) { + t.Fatalf("Expected %d tasks, received %d tasks", len(expTasks), len(popped)) + } + for i, task := range popped { + td := task.Data.(*taskData) + expTd := expTasks[i].Data.(*taskData) + if td.IsWantBlock != expTd.IsWantBlock { + t.Fatalf("Expected IsWantBlock to be %t, received %t", expTd.IsWantBlock, td.IsWantBlock) + } + if task.Work != expTasks[i].Work { + t.Fatalf("Expected Size to be %d, received %d", expTasks[i].Work, task.Work) + } + } + } + + // second want-block (DONT_HAVE) should be ignored + runTestCase([]peertask.Task{wantBlockDontHave, wantBlockDontHave}, []peertask.Task{wantBlockDontHave}) + // want-have (DONT_HAVE) should be ignored if there is existing active want-block (DONT_HAVE) + runTestCase([]peertask.Task{wantBlockDontHave, wantHaveDontHave}, []peertask.Task{wantBlockDontHave}) + // want-block with size should be added if there is existing active want-block (DONT_HAVE) + runTestCase([]peertask.Task{wantBlockDontHave, wantBlock}, []peertask.Task{wantBlockDontHave, wantBlock}) + // want-have with size should be added if there is existing active want-block (DONT_HAVE) + runTestCase([]peertask.Task{wantBlockDontHave, wantHave}, []peertask.Task{wantBlockDontHave, wantHave}) + + // want-block (DONT_HAVE) should be added if there is existing active want-have (DONT_HAVE) + runTestCase([]peertask.Task{wantHaveDontHave, wantBlockDontHave}, []peertask.Task{wantHaveDontHave, wantBlockDontHave}) + // want-have (DONT_HAVE) should be ignored if there is existing active want-have (DONT_HAVE) + runTestCase([]peertask.Task{wantHaveDontHave, wantHaveDontHave}, []peertask.Task{wantHaveDontHave}) + // want-block with size should be added if there is existing active want-have (DONT_HAVE) + runTestCase([]peertask.Task{wantHaveDontHave, wantBlock}, []peertask.Task{wantHaveDontHave, wantBlock}) + // want-have with size should be added if there is existing active want-have (DONT_HAVE) + runTestCase([]peertask.Task{wantHaveDontHave, wantHave}, []peertask.Task{wantHaveDontHave, wantHave}) + + // want-block (DONT_HAVE) should be ignored if there is existing active want-block with size + runTestCase([]peertask.Task{wantBlock, wantBlockDontHave}, []peertask.Task{wantBlock}) + // want-have (DONT_HAVE) should be ignored if there is existing active want-block with size + runTestCase([]peertask.Task{wantBlock, wantHaveDontHave}, []peertask.Task{wantBlock}) + // second want-block with size should be ignored + runTestCase([]peertask.Task{wantBlock, wantBlock}, []peertask.Task{wantBlock}) + // want-have with size should be ignored if there is existing active want-block with size + runTestCase([]peertask.Task{wantBlock, wantHave}, []peertask.Task{wantBlock}) + + // want-block (DONT_HAVE) should be added if there is existing active want-have with size + runTestCase([]peertask.Task{wantHave, wantBlockDontHave}, []peertask.Task{wantHave, wantBlockDontHave}) + // want-have (DONT_HAVE) should be ignored if there is existing active want-have with size + runTestCase([]peertask.Task{wantHave, wantHaveDontHave}, []peertask.Task{wantHave}) + // second want-have with size should be ignored + runTestCase([]peertask.Task{wantHave, wantHave}, []peertask.Task{wantHave}) + // want-block with size should be added if there is existing active want-have with size + runTestCase([]peertask.Task{wantHave, wantBlock}, []peertask.Task{wantHave, wantBlock}) +} + +func cloneTasks(tasks []peertask.Task) []peertask.Task { + var cp []peertask.Task + for _, t := range tasks { + td := t.Data.(*taskData) + cp = append(cp, peertask.Task{ + Topic: t.Topic, + Priority: t.Priority, + Work: t.Work, + Data: &taskData{ + IsWantBlock: td.IsWantBlock, + BlockSize: td.BlockSize, + HaveBlock: td.HaveBlock, + SendDontHave: td.SendDontHave, + }, + }) + } + return cp +} diff --git a/bitswap/server/server.go b/bitswap/server/server.go new file mode 100644 index 0000000000..c5ff40c9bf --- /dev/null +++ b/bitswap/server/server.go @@ -0,0 +1,560 @@ +package server + +import ( + "context" + "errors" + "fmt" + "sort" + "sync" + "time" + + "github.com/ipfs/boxo/bitswap/internal/defaults" + "github.com/ipfs/boxo/bitswap/message" + pb "github.com/ipfs/boxo/bitswap/message/pb" + bmetrics "github.com/ipfs/boxo/bitswap/metrics" + bsnet "github.com/ipfs/boxo/bitswap/network" + "github.com/ipfs/boxo/bitswap/server/internal/decision" + "github.com/ipfs/boxo/bitswap/tracer" + blocks "github.com/ipfs/boxo/blocks" + blockstore "github.com/ipfs/boxo/blockstore" + "github.com/ipfs/go-cid" + logging "github.com/ipfs/go-log" + "github.com/ipfs/go-metrics-interface" + process "github.com/jbenet/goprocess" + procctx "github.com/jbenet/goprocess/context" + "github.com/libp2p/go-libp2p/core/peer" + "go.uber.org/zap" +) + +var provideKeysBufferSize = 2048 + +var log = logging.Logger("bitswap-server") +var sflog = log.Desugar() + +const provideWorkerMax = 6 + +type Option func(*Server) + +type Server struct { + sentHistogram metrics.Histogram + sendTimeHistogram metrics.Histogram + + // the engine is the bit of logic that decides who to send which blocks to + engine *decision.Engine + + // network delivers messages on behalf of the session + network bsnet.BitSwapNetwork + + // External statistics interface + tracer tracer.Tracer + + // Counters for various statistics + counterLk sync.Mutex + counters Stat + + // the total number of simultaneous threads sending outgoing messages + taskWorkerCount int + + process process.Process + + // newBlocks is a channel for newly added blocks to be provided to the + // network. blocks pushed down this channel get buffered and fed to the + // provideKeys channel later on to avoid too much network activity + newBlocks chan cid.Cid + // provideKeys directly feeds provide workers + provideKeys chan cid.Cid + + // Extra options to pass to the decision manager + engineOptions []decision.Option + + // the size of channel buffer to use + hasBlockBufferSize int + // whether or not to make provide announcements + provideEnabled bool +} + +func New(ctx context.Context, network bsnet.BitSwapNetwork, bstore blockstore.Blockstore, options ...Option) *Server { + ctx, cancel := context.WithCancel(ctx) + + px := process.WithTeardown(func() error { + return nil + }) + go func() { + <-px.Closing() // process closes first + cancel() + }() + + s := &Server{ + sentHistogram: bmetrics.SentHist(ctx), + sendTimeHistogram: bmetrics.SendTimeHist(ctx), + taskWorkerCount: defaults.BitswapTaskWorkerCount, + network: network, + process: px, + provideEnabled: true, + hasBlockBufferSize: defaults.HasBlockBufferSize, + provideKeys: make(chan cid.Cid, provideKeysBufferSize), + } + s.newBlocks = make(chan cid.Cid, s.hasBlockBufferSize) + + for _, o := range options { + o(s) + } + + s.engine = decision.NewEngine( + ctx, + bstore, + network.ConnectionManager(), + network.Self(), + s.engineOptions..., + ) + s.engineOptions = nil + + s.startWorkers(ctx, px) + + return s +} + +func TaskWorkerCount(count int) Option { + if count <= 0 { + panic(fmt.Sprintf("task worker count is %d but must be > 0", count)) + } + return func(bs *Server) { + bs.taskWorkerCount = count + } +} + +func WithTracer(tap tracer.Tracer) Option { + return func(bs *Server) { + bs.tracer = tap + } +} + +// ProvideEnabled is an option for enabling/disabling provide announcements +func ProvideEnabled(enabled bool) Option { + return func(bs *Server) { + bs.provideEnabled = enabled + } +} + +func WithPeerBlockRequestFilter(pbrf decision.PeerBlockRequestFilter) Option { + o := decision.WithPeerBlockRequestFilter(pbrf) + return func(bs *Server) { + bs.engineOptions = append(bs.engineOptions, o) + } +} + +// WithTaskComparator configures custom task prioritization logic. +func WithTaskComparator(comparator decision.TaskComparator) Option { + o := decision.WithTaskComparator(comparator) + return func(bs *Server) { + bs.engineOptions = append(bs.engineOptions, o) + } +} + +// Configures the engine to use the given score decision logic. +func WithScoreLedger(scoreLedger decision.ScoreLedger) Option { + o := decision.WithScoreLedger(scoreLedger) + return func(bs *Server) { + bs.engineOptions = append(bs.engineOptions, o) + } +} + +// LedgerForPeer returns aggregated data about blocks swapped and communication +// with a given peer. +func (bs *Server) LedgerForPeer(p peer.ID) *decision.Receipt { + return bs.engine.LedgerForPeer(p) +} + +// EngineTaskWorkerCount sets the number of worker threads used inside the engine +func EngineTaskWorkerCount(count int) Option { + o := decision.WithTaskWorkerCount(count) + return func(bs *Server) { + bs.engineOptions = append(bs.engineOptions, o) + } +} + +// SetSendDontHaves indicates what to do when the engine receives a want-block +// for a block that is not in the blockstore. Either +// - Send a DONT_HAVE message +// - Simply don't respond +// This option is only used for testing. +func SetSendDontHaves(send bool) Option { + o := decision.WithSetSendDontHave(send) + return func(bs *Server) { + bs.engineOptions = append(bs.engineOptions, o) + } +} + +// EngineBlockstoreWorkerCount sets the number of worker threads used for +// blockstore operations in the decision engine +func EngineBlockstoreWorkerCount(count int) Option { + o := decision.WithBlockstoreWorkerCount(count) + return func(bs *Server) { + bs.engineOptions = append(bs.engineOptions, o) + } +} + +func WithTargetMessageSize(tms int) Option { + o := decision.WithTargetMessageSize(tms) + return func(bs *Server) { + bs.engineOptions = append(bs.engineOptions, o) + } +} + +// MaxOutstandingBytesPerPeer describes approximately how much work we are will to have outstanding to a peer at any +// given time. Setting it to 0 will disable any limiting. +func MaxOutstandingBytesPerPeer(count int) Option { + o := decision.WithMaxOutstandingBytesPerPeer(count) + return func(bs *Server) { + bs.engineOptions = append(bs.engineOptions, o) + } +} + +// MaxQueuedWantlistEntriesPerPeer limits how much individual entries each peer is allowed to send. +// If a peer send us more than this we will truncate newest entries. +// It defaults to defaults.MaxQueuedWantlistEntiresPerPeer. +func MaxQueuedWantlistEntriesPerPeer(count uint) Option { + o := decision.WithMaxQueuedWantlistEntriesPerPeer(count) + return func(bs *Server) { + bs.engineOptions = append(bs.engineOptions, o) + } +} + +// MaxCidSize limits how big CIDs we are willing to serve. +// We will ignore CIDs over this limit. +// It defaults to [defaults.MaxCidSize]. +// If it is 0 no limit is applied. +func MaxCidSize(n uint) Option { + o := decision.WithMaxCidSize(n) + return func(bs *Server) { + bs.engineOptions = append(bs.engineOptions, o) + } +} + +// HasBlockBufferSize configure how big the new blocks buffer should be. +func HasBlockBufferSize(count int) Option { + if count < 0 { + panic("cannot have negative buffer size") + } + return func(bs *Server) { + bs.hasBlockBufferSize = count + } +} + +// WantlistForPeer returns the currently understood list of blocks requested by a +// given peer. +func (bs *Server) WantlistForPeer(p peer.ID) []cid.Cid { + var out []cid.Cid + for _, e := range bs.engine.WantlistForPeer(p) { + out = append(out, e.Cid) + } + return out +} + +func (bs *Server) startWorkers(ctx context.Context, px process.Process) { + bs.engine.StartWorkers(ctx, px) + + // Start up workers to handle requests from other nodes for the data on this node + for i := 0; i < bs.taskWorkerCount; i++ { + i := i + px.Go(func(px process.Process) { + bs.taskWorker(ctx, i) + }) + } + + if bs.provideEnabled { + // Start up a worker to manage sending out provides messages + px.Go(func(px process.Process) { + bs.provideCollector(ctx) + }) + + // Spawn up multiple workers to handle incoming blocks + // consider increasing number if providing blocks bottlenecks + // file transfers + px.Go(bs.provideWorker) + } +} + +func (bs *Server) taskWorker(ctx context.Context, id int) { + defer log.Debug("bitswap task worker shutting down...") + log := log.With("ID", id) + for { + log.Debug("Bitswap.TaskWorker.Loop") + select { + case nextEnvelope := <-bs.engine.Outbox(): + select { + case envelope, ok := <-nextEnvelope: + if !ok { + continue + } + + start := time.Now() + + // TODO: Only record message as sent if there was no error? + // Ideally, yes. But we'd need some way to trigger a retry and/or drop + // the peer. + bs.engine.MessageSent(envelope.Peer, envelope.Message) + if bs.tracer != nil { + bs.tracer.MessageSent(envelope.Peer, envelope.Message) + } + bs.sendBlocks(ctx, envelope) + + dur := time.Since(start) + bs.sendTimeHistogram.Observe(dur.Seconds()) + + case <-ctx.Done(): + return + } + case <-ctx.Done(): + return + } + } +} + +func (bs *Server) logOutgoingBlocks(env *decision.Envelope) { + if ce := sflog.Check(zap.DebugLevel, "sent message"); ce == nil { + return + } + + self := bs.network.Self() + + for _, blockPresence := range env.Message.BlockPresences() { + c := blockPresence.Cid + switch blockPresence.Type { + case pb.Message_Have: + log.Debugw("sent message", + "type", "HAVE", + "cid", c, + "local", self, + "to", env.Peer, + ) + case pb.Message_DontHave: + log.Debugw("sent message", + "type", "DONT_HAVE", + "cid", c, + "local", self, + "to", env.Peer, + ) + default: + panic(fmt.Sprintf("unrecognized BlockPresence type %v", blockPresence.Type)) + } + + } + for _, block := range env.Message.Blocks() { + log.Debugw("sent message", + "type", "BLOCK", + "cid", block.Cid(), + "local", self, + "to", env.Peer, + ) + } +} + +func (bs *Server) sendBlocks(ctx context.Context, env *decision.Envelope) { + // Blocks need to be sent synchronously to maintain proper backpressure + // throughout the network stack + defer env.Sent() + + err := bs.network.SendMessage(ctx, env.Peer, env.Message) + if err != nil { + log.Debugw("failed to send blocks message", + "peer", env.Peer, + "error", err, + ) + return + } + + bs.logOutgoingBlocks(env) + + dataSent := 0 + blocks := env.Message.Blocks() + for _, b := range blocks { + dataSent += len(b.RawData()) + } + bs.counterLk.Lock() + bs.counters.BlocksSent += uint64(len(blocks)) + bs.counters.DataSent += uint64(dataSent) + bs.counterLk.Unlock() + bs.sentHistogram.Observe(float64(env.Message.Size())) + log.Debugw("sent message", "peer", env.Peer) +} + +type Stat struct { + Peers []string + ProvideBufLen int + BlocksSent uint64 + DataSent uint64 +} + +// Stat returns aggregated statistics about bitswap operations +func (bs *Server) Stat() (Stat, error) { + bs.counterLk.Lock() + s := bs.counters + bs.counterLk.Unlock() + s.ProvideBufLen = len(bs.newBlocks) + + peers := bs.engine.Peers() + peersStr := make([]string, len(peers)) + for i, p := range peers { + peersStr[i] = p.Pretty() + } + sort.Strings(peersStr) + s.Peers = peersStr + + return s, nil +} + +// NotifyNewBlocks announces the existence of blocks to this bitswap service. The +// service will potentially notify its peers. +// Bitswap itself doesn't store new blocks. It's the caller responsibility to ensure +// that those blocks are available in the blockstore before calling this function. +func (bs *Server) NotifyNewBlocks(ctx context.Context, blks ...blocks.Block) error { + select { + case <-bs.process.Closing(): + return errors.New("bitswap is closed") + default: + } + + // Send wanted blocks to decision engine + bs.engine.NotifyNewBlocks(blks) + + // If the reprovider is enabled, send block to reprovider + if bs.provideEnabled { + for _, blk := range blks { + select { + case bs.newBlocks <- blk.Cid(): + // send block off to be reprovided + case <-bs.process.Closing(): + return bs.process.Close() + } + } + } + + return nil +} + +func (bs *Server) provideCollector(ctx context.Context) { + defer close(bs.provideKeys) + var toProvide []cid.Cid + var nextKey cid.Cid + var keysOut chan cid.Cid + + for { + select { + case blkey, ok := <-bs.newBlocks: + if !ok { + log.Debug("newBlocks channel closed") + return + } + + if keysOut == nil { + nextKey = blkey + keysOut = bs.provideKeys + } else { + toProvide = append(toProvide, blkey) + } + case keysOut <- nextKey: + if len(toProvide) > 0 { + nextKey = toProvide[0] + toProvide = toProvide[1:] + } else { + keysOut = nil + } + case <-ctx.Done(): + return + } + } +} + +func (bs *Server) provideWorker(px process.Process) { + // FIXME: OnClosingContext returns a _custom_ context type. + // Unfortunately, deriving a new cancelable context from this custom + // type fires off a goroutine. To work around this, we create a single + // cancelable context up-front and derive all sub-contexts from that. + // + // See: https://github.com/ipfs/go-ipfs/issues/5810 + ctx := procctx.OnClosingContext(px) + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + limit := make(chan struct{}, provideWorkerMax) + + limitedGoProvide := func(k cid.Cid, wid int) { + defer func() { + // replace token when done + <-limit + }() + + log.Debugw("Bitswap.ProvideWorker.Start", "ID", wid, "cid", k) + defer log.Debugw("Bitswap.ProvideWorker.End", "ID", wid, "cid", k) + + ctx, cancel := context.WithTimeout(ctx, defaults.ProvideTimeout) // timeout ctx + defer cancel() + + if err := bs.network.Provide(ctx, k); err != nil { + log.Warn(err) + } + } + + // worker spawner, reads from bs.provideKeys until it closes, spawning a + // _ratelimited_ number of workers to handle each key. + for wid := 2; ; wid++ { + log.Debug("Bitswap.ProvideWorker.Loop") + + select { + case <-px.Closing(): + return + case k, ok := <-bs.provideKeys: + if !ok { + log.Debug("provideKeys channel closed") + return + } + select { + case <-px.Closing(): + return + case limit <- struct{}{}: + go limitedGoProvide(k, wid) + } + } + } +} + +func (bs *Server) ReceiveMessage(ctx context.Context, p peer.ID, incoming message.BitSwapMessage) { + // This call records changes to wantlists, blocks received, + // and number of bytes transfered. + mustKillConnection := bs.engine.MessageReceived(ctx, p, incoming) + if mustKillConnection { + bs.network.DisconnectFrom(ctx, p) + } + // TODO: this is bad, and could be easily abused. + // Should only track *useful* messages in ledger + + if bs.tracer != nil { + bs.tracer.MessageReceived(p, incoming) + } +} + +// ReceivedBlocks notify the decision engine that a peer is well behaving +// and gave us usefull data, potentially increasing it's score and making us +// send them more data in exchange. +func (bs *Server) ReceivedBlocks(from peer.ID, blks []blocks.Block) { + bs.engine.ReceivedBlocks(from, blks) +} + +func (*Server) ReceiveError(err error) { + log.Infof("Bitswap Client ReceiveError: %s", err) + // TODO log the network error + // TODO bubble the network error up to the parent context/error logger + +} +func (bs *Server) PeerConnected(p peer.ID) { + bs.engine.PeerConnected(p) +} +func (bs *Server) PeerDisconnected(p peer.ID) { + bs.engine.PeerDisconnected(p) +} + +// Close is called to shutdown the Client +func (bs *Server) Close() error { + return bs.process.Close() +} diff --git a/bitswap/testinstance/testinstance.go b/bitswap/testinstance/testinstance.go new file mode 100644 index 0000000000..5a052b8314 --- /dev/null +++ b/bitswap/testinstance/testinstance.go @@ -0,0 +1,132 @@ +package testsession + +import ( + "context" + "time" + + "github.com/ipfs/boxo/bitswap" + bsnet "github.com/ipfs/boxo/bitswap/network" + tn "github.com/ipfs/boxo/bitswap/testnet" + blockstore "github.com/ipfs/boxo/blockstore" + ds "github.com/ipfs/go-datastore" + delayed "github.com/ipfs/go-datastore/delayed" + ds_sync "github.com/ipfs/go-datastore/sync" + delay "github.com/ipfs/go-ipfs-delay" + tnet "github.com/libp2p/go-libp2p-testing/net" + p2ptestutil "github.com/libp2p/go-libp2p-testing/netutil" + peer "github.com/libp2p/go-libp2p/core/peer" +) + +// NewTestInstanceGenerator generates a new InstanceGenerator for the given +// testnet +func NewTestInstanceGenerator(net tn.Network, netOptions []bsnet.NetOpt, bsOptions []bitswap.Option) InstanceGenerator { + ctx, cancel := context.WithCancel(context.Background()) + return InstanceGenerator{ + net: net, + seq: 0, + ctx: ctx, // TODO take ctx as param to Next, Instances + cancel: cancel, + bsOptions: bsOptions, + netOptions: netOptions, + } +} + +// InstanceGenerator generates new test instances of bitswap+dependencies +type InstanceGenerator struct { + seq int + net tn.Network + ctx context.Context + cancel context.CancelFunc + bsOptions []bitswap.Option + netOptions []bsnet.NetOpt +} + +// Close closes the clobal context, shutting down all test instances +func (g *InstanceGenerator) Close() error { + g.cancel() + return nil // for Closer interface +} + +// Next generates a new instance of bitswap + dependencies +func (g *InstanceGenerator) Next() Instance { + g.seq++ + p, err := p2ptestutil.RandTestBogusIdentity() + if err != nil { + panic("FIXME") // TODO change signature + } + return NewInstance(g.ctx, g.net, p, g.netOptions, g.bsOptions) +} + +// Instances creates N test instances of bitswap + dependencies and connects +// them to each other +func (g *InstanceGenerator) Instances(n int) []Instance { + var instances []Instance + for j := 0; j < n; j++ { + inst := g.Next() + instances = append(instances, inst) + } + ConnectInstances(instances) + return instances +} + +// ConnectInstances connects the given instances to each other +func ConnectInstances(instances []Instance) { + for i, inst := range instances { + for j := i + 1; j < len(instances); j++ { + oinst := instances[j] + err := inst.Adapter.ConnectTo(context.Background(), oinst.Peer) + if err != nil { + panic(err.Error()) + } + } + } +} + +// Instance is a test instance of bitswap + dependencies for integration testing +type Instance struct { + Peer peer.ID + Exchange *bitswap.Bitswap + blockstore blockstore.Blockstore + Adapter bsnet.BitSwapNetwork + blockstoreDelay delay.D +} + +// Blockstore returns the block store for this test instance +func (i *Instance) Blockstore() blockstore.Blockstore { + return i.blockstore +} + +// SetBlockstoreLatency customizes the artificial delay on receiving blocks +// from a blockstore test instance. +func (i *Instance) SetBlockstoreLatency(t time.Duration) time.Duration { + return i.blockstoreDelay.Set(t) +} + +// NewInstance creates a test bitswap instance. +// +// NB: It's easy make mistakes by providing the same peer ID to two different +// instances. To safeguard, use the InstanceGenerator to generate instances. It's +// just a much better idea. +func NewInstance(ctx context.Context, net tn.Network, p tnet.Identity, netOptions []bsnet.NetOpt, bsOptions []bitswap.Option) Instance { + bsdelay := delay.Fixed(0) + + adapter := net.Adapter(p, netOptions...) + dstore := ds_sync.MutexWrap(delayed.New(ds.NewMapDatastore(), bsdelay)) + + bstore, err := blockstore.CachedBlockstore(ctx, + blockstore.NewBlockstore(ds_sync.MutexWrap(dstore)), + blockstore.DefaultCacheOpts()) + if err != nil { + panic(err.Error()) // FIXME perhaps change signature and return error. + } + + bs := bitswap.New(ctx, adapter, bstore, bsOptions...) + + return Instance{ + Adapter: adapter, + Peer: p.ID(), + Exchange: bs, + blockstore: bstore, + blockstoreDelay: bsdelay, + } +} diff --git a/bitswap/testnet/interface.go b/bitswap/testnet/interface.go new file mode 100644 index 0000000000..ec28185185 --- /dev/null +++ b/bitswap/testnet/interface.go @@ -0,0 +1,16 @@ +package bitswap + +import ( + bsnet "github.com/ipfs/boxo/bitswap/network" + + tnet "github.com/libp2p/go-libp2p-testing/net" + "github.com/libp2p/go-libp2p/core/peer" +) + +// Network is an interface for generating bitswap network interfaces +// based on a test network. +type Network interface { + Adapter(tnet.Identity, ...bsnet.NetOpt) bsnet.BitSwapNetwork + + HasPeer(peer.ID) bool +} diff --git a/bitswap/testnet/internet_latency_delay_generator.go b/bitswap/testnet/internet_latency_delay_generator.go new file mode 100644 index 0000000000..25b9f5b80d --- /dev/null +++ b/bitswap/testnet/internet_latency_delay_generator.go @@ -0,0 +1,63 @@ +package bitswap + +import ( + "math/rand" + "time" + + "github.com/ipfs/go-ipfs-delay" +) + +var sharedRNG = rand.New(rand.NewSource(time.Now().UnixNano())) + +// InternetLatencyDelayGenerator generates three clusters of delays, +// typical of the type of peers you would encounter on the interenet. +// Given a base delay time T, the wait time generated will be either: +// 1. A normalized distribution around the base time +// 2. A normalized distribution around the base time plus a "medium" delay +// 3. A normalized distribution around the base time plus a "large" delay +// The size of the medium & large delays are determined when the generator +// is constructed, as well as the relative percentages with which delays fall +// into each of the three different clusters, and the standard deviation for +// the normalized distribution. +// This can be used to generate a number of scenarios typical of latency +// distribution among peers on the internet. +func InternetLatencyDelayGenerator( + mediumDelay time.Duration, + largeDelay time.Duration, + percentMedium float64, + percentLarge float64, + std time.Duration, + rng *rand.Rand) delay.Generator { + if rng == nil { + rng = sharedRNG + } + + return &internetLatencyDelayGenerator{ + mediumDelay: mediumDelay, + largeDelay: largeDelay, + percentLarge: percentLarge, + percentMedium: percentMedium, + std: std, + rng: rng, + } +} + +type internetLatencyDelayGenerator struct { + mediumDelay time.Duration + largeDelay time.Duration + percentLarge float64 + percentMedium float64 + std time.Duration + rng *rand.Rand +} + +func (d *internetLatencyDelayGenerator) NextWaitTime(t time.Duration) time.Duration { + clusterDistribution := d.rng.Float64() + baseDelay := time.Duration(d.rng.NormFloat64()*float64(d.std)) + t + if clusterDistribution < d.percentLarge { + return baseDelay + d.largeDelay + } else if clusterDistribution < d.percentMedium+d.percentLarge { + return baseDelay + d.mediumDelay + } + return baseDelay +} diff --git a/bitswap/testnet/internet_latency_delay_generator_test.go b/bitswap/testnet/internet_latency_delay_generator_test.go new file mode 100644 index 0000000000..dcd6a92b53 --- /dev/null +++ b/bitswap/testnet/internet_latency_delay_generator_test.go @@ -0,0 +1,69 @@ +package bitswap + +import ( + "math" + "math/rand" + "testing" + "time" +) + +const testSeed = 99 + +func TestInternetLatencyDelayNextWaitTimeDistribution(t *testing.T) { + initialValue := 1000 * time.Millisecond + deviation := 100 * time.Millisecond + mediumDelay := 1000 * time.Millisecond + largeDelay := 3000 * time.Millisecond + percentMedium := 0.2 + percentLarge := 0.4 + buckets := make(map[string]int) + internetLatencyDistributionDelay := InternetLatencyDelayGenerator( + mediumDelay, + largeDelay, + percentMedium, + percentLarge, + deviation, + rand.New(rand.NewSource(testSeed))) + + buckets["fast"] = 0 + buckets["medium"] = 0 + buckets["slow"] = 0 + buckets["outside_1_deviation"] = 0 + + // strategy here is rather than mock randomness, just use enough samples to + // get approximately the distribution you'd expect + for i := 0; i < 10000; i++ { + next := internetLatencyDistributionDelay.NextWaitTime(initialValue) + if math.Abs((next - initialValue).Seconds()) <= deviation.Seconds() { + buckets["fast"]++ + } else if math.Abs((next - initialValue - mediumDelay).Seconds()) <= deviation.Seconds() { + buckets["medium"]++ + } else if math.Abs((next - initialValue - largeDelay).Seconds()) <= deviation.Seconds() { + buckets["slow"]++ + } else { + buckets["outside_1_deviation"]++ + } + } + totalInOneDeviation := float64(10000 - buckets["outside_1_deviation"]) + oneDeviationPercentage := totalInOneDeviation / 10000 + fastPercentageResult := float64(buckets["fast"]) / totalInOneDeviation + mediumPercentageResult := float64(buckets["medium"]) / totalInOneDeviation + slowPercentageResult := float64(buckets["slow"]) / totalInOneDeviation + + // see 68-95-99 rule for normal distributions + if math.Abs(oneDeviationPercentage-0.6827) >= 0.1 { + t.Fatal("Failed to distribute values normally based on standard deviation") + } + + if math.Abs(fastPercentageResult+percentMedium+percentLarge-1) >= 0.1 { + t.Fatal("Incorrect percentage of values distributed around fast delay time") + } + + if math.Abs(mediumPercentageResult-percentMedium) >= 0.1 { + t.Fatal("Incorrect percentage of values distributed around medium delay time") + } + + if math.Abs(slowPercentageResult-percentLarge) >= 0.1 { + t.Fatal("Incorrect percentage of values distributed around slow delay time") + } +} diff --git a/bitswap/testnet/network_test.go b/bitswap/testnet/network_test.go new file mode 100644 index 0000000000..dba784ec37 --- /dev/null +++ b/bitswap/testnet/network_test.go @@ -0,0 +1,104 @@ +package bitswap + +import ( + "context" + "sync" + "testing" + + bsmsg "github.com/ipfs/boxo/bitswap/message" + bsnet "github.com/ipfs/boxo/bitswap/network" + + blocks "github.com/ipfs/boxo/blocks" + mockrouting "github.com/ipfs/boxo/routing/mock" + delay "github.com/ipfs/go-ipfs-delay" + + tnet "github.com/libp2p/go-libp2p-testing/net" + "github.com/libp2p/go-libp2p/core/peer" +) + +func TestSendMessageAsyncButWaitForResponse(t *testing.T) { + net := VirtualNetwork(mockrouting.NewServer(), delay.Fixed(0)) + responderPeer := tnet.RandIdentityOrFatal(t) + waiter := net.Adapter(tnet.RandIdentityOrFatal(t)) + responder := net.Adapter(responderPeer) + + var wg sync.WaitGroup + + wg.Add(1) + + expectedStr := "received async" + + responder.Start(lambda(func( + ctx context.Context, + fromWaiter peer.ID, + msgFromWaiter bsmsg.BitSwapMessage) { + + msgToWaiter := bsmsg.New(true) + msgToWaiter.AddBlock(blocks.NewBlock([]byte(expectedStr))) + err := waiter.SendMessage(ctx, fromWaiter, msgToWaiter) + if err != nil { + t.Error(err) + } + })) + t.Cleanup(responder.Stop) + + waiter.Start(lambda(func( + ctx context.Context, + fromResponder peer.ID, + msgFromResponder bsmsg.BitSwapMessage) { + + // TODO assert that this came from the correct peer and that the message contents are as expected + ok := false + for _, b := range msgFromResponder.Blocks() { + if string(b.RawData()) == expectedStr { + wg.Done() + ok = true + } + } + + if !ok { + t.Fatal("Message not received from the responder") + } + })) + t.Cleanup(waiter.Stop) + + messageSentAsync := bsmsg.New(true) + messageSentAsync.AddBlock(blocks.NewBlock([]byte("data"))) + errSending := waiter.SendMessage( + context.Background(), responderPeer.ID(), messageSentAsync) + if errSending != nil { + t.Fatal(errSending) + } + + wg.Wait() // until waiter delegate function is executed +} + +type receiverFunc func(ctx context.Context, p peer.ID, + incoming bsmsg.BitSwapMessage) + +// lambda returns a Receiver instance given a receiver function +func lambda(f receiverFunc) bsnet.Receiver { + return &lambdaImpl{ + f: f, + } +} + +type lambdaImpl struct { + f func(ctx context.Context, p peer.ID, incoming bsmsg.BitSwapMessage) +} + +func (lam *lambdaImpl) ReceiveMessage(ctx context.Context, + p peer.ID, incoming bsmsg.BitSwapMessage) { + lam.f(ctx, p, incoming) +} + +func (lam *lambdaImpl) ReceiveError(err error) { + // TODO log error +} + +func (lam *lambdaImpl) PeerConnected(p peer.ID) { + // TODO +} +func (lam *lambdaImpl) PeerDisconnected(peer.ID) { + // TODO +} diff --git a/bitswap/testnet/peernet.go b/bitswap/testnet/peernet.go new file mode 100644 index 0000000000..e4df19699f --- /dev/null +++ b/bitswap/testnet/peernet.go @@ -0,0 +1,44 @@ +package bitswap + +import ( + "context" + + bsnet "github.com/ipfs/boxo/bitswap/network" + + mockrouting "github.com/ipfs/boxo/routing/mock" + ds "github.com/ipfs/go-datastore" + + tnet "github.com/libp2p/go-libp2p-testing/net" + "github.com/libp2p/go-libp2p/core/peer" + mockpeernet "github.com/libp2p/go-libp2p/p2p/net/mock" +) + +type peernet struct { + mockpeernet.Mocknet + routingserver mockrouting.Server +} + +// StreamNet is a testnet that uses libp2p's MockNet +func StreamNet(ctx context.Context, net mockpeernet.Mocknet, rs mockrouting.Server) (Network, error) { + return &peernet{net, rs}, nil +} + +func (pn *peernet) Adapter(p tnet.Identity, opts ...bsnet.NetOpt) bsnet.BitSwapNetwork { + client, err := pn.Mocknet.AddPeer(p.PrivateKey(), p.Address()) + if err != nil { + panic(err.Error()) + } + routing := pn.routingserver.ClientWithDatastore(context.TODO(), p, ds.NewMapDatastore()) + return bsnet.NewFromIpfsHost(client, routing, opts...) +} + +func (pn *peernet) HasPeer(p peer.ID) bool { + for _, member := range pn.Mocknet.Peers() { + if p == member { + return true + } + } + return false +} + +var _ Network = (*peernet)(nil) diff --git a/bitswap/testnet/rate_limit_generators.go b/bitswap/testnet/rate_limit_generators.go new file mode 100644 index 0000000000..2c4a1cd563 --- /dev/null +++ b/bitswap/testnet/rate_limit_generators.go @@ -0,0 +1,42 @@ +package bitswap + +import ( + "math/rand" +) + +type fixedRateLimitGenerator struct { + rateLimit float64 +} + +// FixedRateLimitGenerator returns a rate limit generatoe that always generates +// the specified rate limit in bytes/sec. +func FixedRateLimitGenerator(rateLimit float64) RateLimitGenerator { + return &fixedRateLimitGenerator{rateLimit} +} + +func (rateLimitGenerator *fixedRateLimitGenerator) NextRateLimit() float64 { + return rateLimitGenerator.rateLimit +} + +type variableRateLimitGenerator struct { + rateLimit float64 + std float64 + rng *rand.Rand +} + +// VariableRateLimitGenerator makes rate limites that following a normal distribution. +func VariableRateLimitGenerator(rateLimit float64, std float64, rng *rand.Rand) RateLimitGenerator { + if rng == nil { + rng = sharedRNG + } + + return &variableRateLimitGenerator{ + std: std, + rng: rng, + rateLimit: rateLimit, + } +} + +func (rateLimitGenerator *variableRateLimitGenerator) NextRateLimit() float64 { + return rateLimitGenerator.rng.NormFloat64()*rateLimitGenerator.std + rateLimitGenerator.rateLimit +} diff --git a/bitswap/testnet/virtual.go b/bitswap/testnet/virtual.go new file mode 100644 index 0000000000..0deb1b1ab0 --- /dev/null +++ b/bitswap/testnet/virtual.go @@ -0,0 +1,428 @@ +package bitswap + +import ( + "context" + "errors" + "sort" + "sync" + "sync/atomic" + "time" + + bsmsg "github.com/ipfs/boxo/bitswap/message" + bsnet "github.com/ipfs/boxo/bitswap/network" + + mockrouting "github.com/ipfs/boxo/routing/mock" + cid "github.com/ipfs/go-cid" + delay "github.com/ipfs/go-ipfs-delay" + + tnet "github.com/libp2p/go-libp2p-testing/net" + "github.com/libp2p/go-libp2p/core/connmgr" + "github.com/libp2p/go-libp2p/core/peer" + protocol "github.com/libp2p/go-libp2p/core/protocol" + "github.com/libp2p/go-libp2p/core/routing" + mocknet "github.com/libp2p/go-libp2p/p2p/net/mock" + "github.com/libp2p/go-libp2p/p2p/protocol/ping" +) + +// VirtualNetwork generates a new testnet instance - a fake network that +// is used to simulate sending messages. +func VirtualNetwork(rs mockrouting.Server, d delay.D) Network { + return &network{ + latencies: make(map[peer.ID]map[peer.ID]time.Duration), + clients: make(map[peer.ID]*receiverQueue), + delay: d, + routingserver: rs, + isRateLimited: false, + rateLimitGenerator: nil, + conns: make(map[string]struct{}), + } +} + +// RateLimitGenerator is an interface for generating rate limits across peers +type RateLimitGenerator interface { + NextRateLimit() float64 +} + +// RateLimitedVirtualNetwork generates a testnet instance where nodes are rate +// limited in the upload/download speed. +func RateLimitedVirtualNetwork(rs mockrouting.Server, d delay.D, rateLimitGenerator RateLimitGenerator) Network { + return &network{ + latencies: make(map[peer.ID]map[peer.ID]time.Duration), + rateLimiters: make(map[peer.ID]map[peer.ID]*mocknet.RateLimiter), + clients: make(map[peer.ID]*receiverQueue), + delay: d, + routingserver: rs, + isRateLimited: true, + rateLimitGenerator: rateLimitGenerator, + conns: make(map[string]struct{}), + } +} + +type network struct { + mu sync.Mutex + latencies map[peer.ID]map[peer.ID]time.Duration + rateLimiters map[peer.ID]map[peer.ID]*mocknet.RateLimiter + clients map[peer.ID]*receiverQueue + routingserver mockrouting.Server + delay delay.D + isRateLimited bool + rateLimitGenerator RateLimitGenerator + conns map[string]struct{} +} + +type message struct { + from peer.ID + msg bsmsg.BitSwapMessage + shouldSend time.Time +} + +// receiverQueue queues up a set of messages to be sent, and sends them *in +// order* with their delays respected as much as sending them in order allows +// for +type receiverQueue struct { + receiver *networkClient + queue []*message + active bool + lk sync.Mutex +} + +func (n *network) Adapter(p tnet.Identity, opts ...bsnet.NetOpt) bsnet.BitSwapNetwork { + n.mu.Lock() + defer n.mu.Unlock() + + s := bsnet.Settings{ + SupportedProtocols: []protocol.ID{ + bsnet.ProtocolBitswap, + bsnet.ProtocolBitswapOneOne, + bsnet.ProtocolBitswapOneZero, + bsnet.ProtocolBitswapNoVers, + }, + } + for _, opt := range opts { + opt(&s) + } + + client := &networkClient{ + local: p.ID(), + network: n, + routing: n.routingserver.Client(p), + supportedProtocols: s.SupportedProtocols, + } + n.clients[p.ID()] = &receiverQueue{receiver: client} + return client +} + +func (n *network) HasPeer(p peer.ID) bool { + n.mu.Lock() + defer n.mu.Unlock() + + _, found := n.clients[p] + return found +} + +// TODO should this be completely asynchronous? +// TODO what does the network layer do with errors received from services? +func (n *network) SendMessage( + ctx context.Context, + from peer.ID, + to peer.ID, + mes bsmsg.BitSwapMessage) error { + + mes = mes.Clone() + + n.mu.Lock() + defer n.mu.Unlock() + + latencies, ok := n.latencies[from] + if !ok { + latencies = make(map[peer.ID]time.Duration) + n.latencies[from] = latencies + } + + latency, ok := latencies[to] + if !ok { + latency = n.delay.NextWaitTime() + latencies[to] = latency + } + + var bandwidthDelay time.Duration + if n.isRateLimited { + rateLimiters, ok := n.rateLimiters[from] + if !ok { + rateLimiters = make(map[peer.ID]*mocknet.RateLimiter) + n.rateLimiters[from] = rateLimiters + } + + rateLimiter, ok := rateLimiters[to] + if !ok { + rateLimiter = mocknet.NewRateLimiter(n.rateLimitGenerator.NextRateLimit()) + rateLimiters[to] = rateLimiter + } + + size := mes.ToProtoV1().Size() + bandwidthDelay = rateLimiter.Limit(size) + } else { + bandwidthDelay = 0 + } + + receiver, ok := n.clients[to] + if !ok { + return errors.New("cannot locate peer on network") + } + + // nb: terminate the context since the context wouldn't actually be passed + // over the network in a real scenario + + msg := &message{ + from: from, + msg: mes, + shouldSend: time.Now().Add(latency).Add(bandwidthDelay), + } + receiver.enqueue(msg) + + return nil +} + +var _ bsnet.Receiver = (*networkClient)(nil) + +type networkClient struct { + // These need to be at the top of the struct (allocated on the heap) for alignment on 32bit platforms. + stats bsnet.Stats + + local peer.ID + receivers []bsnet.Receiver + network *network + routing routing.Routing + supportedProtocols []protocol.ID +} + +func (nc *networkClient) ReceiveMessage(ctx context.Context, sender peer.ID, incoming bsmsg.BitSwapMessage) { + for _, v := range nc.receivers { + v.ReceiveMessage(ctx, sender, incoming) + } +} + +func (nc *networkClient) ReceiveError(e error) { + for _, v := range nc.receivers { + v.ReceiveError(e) + } +} + +func (nc *networkClient) PeerConnected(p peer.ID) { + for _, v := range nc.receivers { + v.PeerConnected(p) + } +} +func (nc *networkClient) PeerDisconnected(p peer.ID) { + for _, v := range nc.receivers { + v.PeerDisconnected(p) + } +} + +func (nc *networkClient) Self() peer.ID { + return nc.local +} + +func (nc *networkClient) Ping(ctx context.Context, p peer.ID) ping.Result { + return ping.Result{RTT: nc.Latency(p)} +} + +func (nc *networkClient) Latency(p peer.ID) time.Duration { + nc.network.mu.Lock() + defer nc.network.mu.Unlock() + return nc.network.latencies[nc.local][p] +} + +func (nc *networkClient) SendMessage( + ctx context.Context, + to peer.ID, + message bsmsg.BitSwapMessage) error { + if err := nc.network.SendMessage(ctx, nc.local, to, message); err != nil { + return err + } + atomic.AddUint64(&nc.stats.MessagesSent, 1) + return nil +} + +func (nc *networkClient) Stats() bsnet.Stats { + return bsnet.Stats{ + MessagesRecvd: atomic.LoadUint64(&nc.stats.MessagesRecvd), + MessagesSent: atomic.LoadUint64(&nc.stats.MessagesSent), + } +} + +// FindProvidersAsync returns a channel of providers for the given key. +func (nc *networkClient) FindProvidersAsync(ctx context.Context, k cid.Cid, max int) <-chan peer.ID { + // NB: this function duplicates the AddrInfo -> ID transformation in the + // bitswap network adapter. Not to worry. This network client will be + // deprecated once the ipfsnet.Mock is added. The code below is only + // temporary. + + out := make(chan peer.ID) + go func() { + defer close(out) + providers := nc.routing.FindProvidersAsync(ctx, k, max) + for info := range providers { + select { + case <-ctx.Done(): + case out <- info.ID: + } + } + }() + return out +} + +func (nc *networkClient) ConnectionManager() connmgr.ConnManager { + return &connmgr.NullConnMgr{} +} + +type messagePasser struct { + net *networkClient + target peer.ID + local peer.ID + ctx context.Context +} + +func (mp *messagePasser) SendMsg(ctx context.Context, m bsmsg.BitSwapMessage) error { + return mp.net.SendMessage(ctx, mp.target, m) +} + +func (mp *messagePasser) Close() error { + return nil +} + +func (mp *messagePasser) Reset() error { + return nil +} + +var oldProtos = map[protocol.ID]struct{}{ + bsnet.ProtocolBitswapNoVers: {}, + bsnet.ProtocolBitswapOneZero: {}, + bsnet.ProtocolBitswapOneOne: {}, +} + +func (mp *messagePasser) SupportsHave() bool { + protos := mp.net.network.clients[mp.target].receiver.supportedProtocols + for _, proto := range protos { + if _, ok := oldProtos[proto]; !ok { + return true + } + } + return false +} + +func (nc *networkClient) NewMessageSender(ctx context.Context, p peer.ID, opts *bsnet.MessageSenderOpts) (bsnet.MessageSender, error) { + return &messagePasser{ + net: nc, + target: p, + local: nc.local, + ctx: ctx, + }, nil +} + +// Provide provides the key to the network. +func (nc *networkClient) Provide(ctx context.Context, k cid.Cid) error { + return nc.routing.Provide(ctx, k, true) +} + +func (nc *networkClient) Start(r ...bsnet.Receiver) { + nc.receivers = r +} + +func (nc *networkClient) Stop() { +} + +func (nc *networkClient) ConnectTo(_ context.Context, p peer.ID) error { + nc.network.mu.Lock() + otherClient, ok := nc.network.clients[p] + if !ok { + nc.network.mu.Unlock() + return errors.New("no such peer in network") + } + + tag := tagForPeers(nc.local, p) + if _, ok := nc.network.conns[tag]; ok { + nc.network.mu.Unlock() + // log.Warning("ALREADY CONNECTED TO PEER (is this a reconnect? test lib needs fixing)") + return nil + } + nc.network.conns[tag] = struct{}{} + nc.network.mu.Unlock() + + otherClient.receiver.PeerConnected(nc.local) + nc.PeerConnected(p) + return nil +} + +func (nc *networkClient) DisconnectFrom(_ context.Context, p peer.ID) error { + nc.network.mu.Lock() + defer nc.network.mu.Unlock() + + otherClient, ok := nc.network.clients[p] + if !ok { + return errors.New("no such peer in network") + } + + tag := tagForPeers(nc.local, p) + if _, ok := nc.network.conns[tag]; !ok { + // Already disconnected + return nil + } + delete(nc.network.conns, tag) + + otherClient.receiver.PeerDisconnected(nc.local) + nc.PeerDisconnected(p) + return nil +} + +func (rq *receiverQueue) enqueue(m *message) { + rq.lk.Lock() + defer rq.lk.Unlock() + rq.queue = append(rq.queue, m) + if !rq.active { + rq.active = true + go rq.process() + } +} + +func (rq *receiverQueue) Swap(i, j int) { + rq.queue[i], rq.queue[j] = rq.queue[j], rq.queue[i] +} + +func (rq *receiverQueue) Len() int { + return len(rq.queue) +} + +func (rq *receiverQueue) Less(i, j int) bool { + return rq.queue[i].shouldSend.UnixNano() < rq.queue[j].shouldSend.UnixNano() +} + +func (rq *receiverQueue) process() { + for { + rq.lk.Lock() + sort.Sort(rq) + if len(rq.queue) == 0 { + rq.active = false + rq.lk.Unlock() + return + } + m := rq.queue[0] + if time.Until(m.shouldSend).Seconds() < 0.1 { + rq.queue = rq.queue[1:] + rq.lk.Unlock() + time.Sleep(time.Until(m.shouldSend)) + atomic.AddUint64(&rq.receiver.stats.MessagesRecvd, 1) + rq.receiver.ReceiveMessage(context.TODO(), m.from, m.msg) + } else { + rq.lk.Unlock() + time.Sleep(100 * time.Millisecond) + } + } +} + +func tagForPeers(a, b peer.ID) string { + if a < b { + return string(a + b) + } + return string(b + a) +} diff --git a/bitswap/tracer/tracer.go b/bitswap/tracer/tracer.go new file mode 100644 index 0000000000..421212adf3 --- /dev/null +++ b/bitswap/tracer/tracer.go @@ -0,0 +1,13 @@ +package tracer + +import ( + bsmsg "github.com/ipfs/boxo/bitswap/message" + peer "github.com/libp2p/go-libp2p/core/peer" +) + +// Tracer provides methods to access all messages sent and received by Bitswap. +// This interface can be used to implement various statistics (this is original intent). +type Tracer interface { + MessageReceived(peer.ID, bsmsg.BitSwapMessage) + MessageSent(peer.ID, bsmsg.BitSwapMessage) +} diff --git a/bitswap/wantlist/forward.go b/bitswap/wantlist/forward.go new file mode 100644 index 0000000000..077ef9a590 --- /dev/null +++ b/bitswap/wantlist/forward.go @@ -0,0 +1,23 @@ +package wantlist + +import ( + "github.com/ipfs/boxo/bitswap/client/wantlist" + "github.com/ipfs/go-cid" +) + +type ( + // Deprecated: use wantlist.Entry instead + Entry = wantlist.Entry + // Deprecated: use wantlist.Wantlist instead + Wantlist = wantlist.Wantlist +) + +// Deprecated: use wantlist.New instead +func New() *Wantlist { + return wantlist.New() +} + +// Deprecated: use wantlist.NewRefEntry instead +func NewRefEntry(c cid.Cid, p int32) Entry { + return wantlist.NewRefEntry(c, p) +} diff --git a/blocks/blocks.go b/blocks/blocks.go new file mode 100644 index 0000000000..770c9a38fc --- /dev/null +++ b/blocks/blocks.go @@ -0,0 +1,82 @@ +// Package blocks contains the lowest level of IPLD data structures. +// A block is raw data accompanied by a CID. The CID contains the multihash +// corresponding to the block. +package blocks + +import ( + "errors" + "fmt" + + u "github.com/ipfs/boxo/util" + cid "github.com/ipfs/go-cid" + mh "github.com/multiformats/go-multihash" +) + +// ErrWrongHash is returned when the Cid of a block is not the expected +// according to the contents. It is currently used only when debugging. +var ErrWrongHash = errors.New("data did not match given hash") + +// Block provides abstraction for blocks implementations. +type Block interface { + RawData() []byte + Cid() cid.Cid + String() string + Loggable() map[string]interface{} +} + +// A BasicBlock is a singular block of data in ipfs. It implements the Block +// interface. +type BasicBlock struct { + cid cid.Cid + data []byte +} + +// NewBlock creates a Block object from opaque data. It will hash the data. +func NewBlock(data []byte) *BasicBlock { + // TODO: fix assumptions + return &BasicBlock{data: data, cid: cid.NewCidV0(u.Hash(data))} +} + +// NewBlockWithCid creates a new block when the hash of the data +// is already known, this is used to save time in situations where +// we are able to be confident that the data is correct. +func NewBlockWithCid(data []byte, c cid.Cid) (*BasicBlock, error) { + if u.Debug { + chkc, err := c.Prefix().Sum(data) + if err != nil { + return nil, err + } + + if !chkc.Equals(c) { + return nil, ErrWrongHash + } + } + return &BasicBlock{data: data, cid: c}, nil +} + +// Multihash returns the hash contained in the block CID. +func (b *BasicBlock) Multihash() mh.Multihash { + return b.cid.Hash() +} + +// RawData returns the block raw contents as a byte slice. +func (b *BasicBlock) RawData() []byte { + return b.data +} + +// Cid returns the content identifier of the block. +func (b *BasicBlock) Cid() cid.Cid { + return b.cid +} + +// String provides a human-readable representation of the block CID. +func (b *BasicBlock) String() string { + return fmt.Sprintf("[Block %s]", b.Cid()) +} + +// Loggable returns a go-log loggable item. +func (b *BasicBlock) Loggable() map[string]interface{} { + return map[string]interface{}{ + "block": b.Cid().String(), + } +} diff --git a/blocks/blocks_test.go b/blocks/blocks_test.go new file mode 100644 index 0000000000..d0c636f116 --- /dev/null +++ b/blocks/blocks_test.go @@ -0,0 +1,98 @@ +package blocks + +import ( + "bytes" + "testing" + + u "github.com/ipfs/boxo/util" + cid "github.com/ipfs/go-cid" + mh "github.com/multiformats/go-multihash" +) + +func TestBlocksBasic(t *testing.T) { + + // Test empty data + empty := []byte{} + NewBlock(empty) + + // Test nil case + NewBlock(nil) + + // Test some data + NewBlock([]byte("Hello world!")) +} + +func TestData(t *testing.T) { + data := []byte("some data") + block := NewBlock(data) + + if !bytes.Equal(block.RawData(), data) { + t.Error("data is wrong") + } +} + +func TestHash(t *testing.T) { + data := []byte("some other data") + block := NewBlock(data) + + hash, err := mh.Sum(data, mh.SHA2_256, -1) + if err != nil { + t.Fatal(err) + } + + if !bytes.Equal(block.Multihash(), hash) { + t.Error("wrong multihash") + } +} + +func TestCid(t *testing.T) { + data := []byte("yet another data") + block := NewBlock(data) + c := block.Cid() + + if !bytes.Equal(block.Multihash(), c.Hash()) { + t.Error("key contains wrong data") + } +} + +func TestManualHash(t *testing.T) { + oldDebugState := u.Debug + defer (func() { + u.Debug = oldDebugState + })() + + data := []byte("I can't figure out more names .. data") + hash, err := mh.Sum(data, mh.SHA2_256, -1) + if err != nil { + t.Fatal(err) + } + + c := cid.NewCidV0(hash) + + u.Debug = false + block, err := NewBlockWithCid(data, c) + if err != nil { + t.Fatal(err) + } + + if !bytes.Equal(block.Multihash(), hash) { + t.Error("wrong multihash") + } + + data[5] = byte((uint32(data[5]) + 5) % 256) // Transfrom hash to be different + block, err = NewBlockWithCid(data, c) + if err != nil { + t.Fatal(err) + } + + if !bytes.Equal(block.Multihash(), hash) { + t.Error("wrong multihash") + } + + u.Debug = true + + _, err = NewBlockWithCid(data, c) + if err != ErrWrongHash { + t.Fatal(err) + } +} diff --git a/blockservice/LICENSE b/blockservice/LICENSE deleted file mode 100644 index 7d5dcac4d2..0000000000 --- a/blockservice/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2014-2018 Juan Batiz-Benet - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/blockservice/README.md b/blockservice/README.md deleted file mode 100644 index d36c5cc779..0000000000 --- a/blockservice/README.md +++ /dev/null @@ -1,36 +0,0 @@ -go-blockservice -================== - -[![](https://img.shields.io/badge/made%20by-Protocol%20Labs-blue.svg?style=flat-square)](http://ipn.io) -[![](https://img.shields.io/badge/project-IPFS-blue.svg?style=flat-square)](http://ipfs.io/) -[![](https://img.shields.io/badge/freenode-%23ipfs-blue.svg?style=flat-square)](http://webchat.freenode.net/?channels=%23ipfs) -[![Coverage Status](https://codecov.io/gh/ipfs/go-block-format/branch/master/graph/badge.svg)](https://codecov.io/gh/ipfs/go-block-format/branch/master) -[![Build Status](https://circleci.com/gh/ipfs/go-blockservice.svg?style=svg)](https://circleci.com/gh/ipfs/go-blockservice) - -> go-blockservice provides a seamless interface to both local and remote storage backends. - -## Lead Maintainer - -[Steven Allen](https://github.com/Stebalien) - -## Table of Contents - -- [TODO](#todo) -- [Contribute](#contribute) -- [License](#license) - -## TODO - -The interfaces here really would like to be merged with the blockstore interfaces. -The 'dagservice' constructor currently takes a blockservice, but it would be really nice -if it could just take a blockstore, and have this package implement a blockstore. - -## Contribute - -PRs are welcome! - -Small note: If editing the Readme, please conform to the [standard-readme](https://github.com/RichardLitt/standard-readme) specification. - -## License - -MIT © Juan Batiz-Benet diff --git a/blockservice/blockservice.go b/blockservice/blockservice.go index 8594e253a1..27db7d4a08 100644 --- a/blockservice/blockservice.go +++ b/blockservice/blockservice.go @@ -11,15 +11,15 @@ import ( "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/trace" - blocks "github.com/ipfs/go-block-format" + blocks "github.com/ipfs/boxo/blocks" + blockstore "github.com/ipfs/boxo/blockstore" + exchange "github.com/ipfs/boxo/exchange" + "github.com/ipfs/boxo/verifcid" cid "github.com/ipfs/go-cid" - blockstore "github.com/ipfs/go-ipfs-blockstore" - exchange "github.com/ipfs/go-ipfs-exchange-interface" ipld "github.com/ipfs/go-ipld-format" logging "github.com/ipfs/go-log/v2" - "github.com/ipfs/go-verifcid" - "github.com/ipfs/go-blockservice/internal" + "github.com/ipfs/boxo/blockservice/internal" ) var logger = logging.Logger("blockservice") diff --git a/blockservice/blockservice_test.go b/blockservice/blockservice_test.go index 846ae71695..60522ced4b 100644 --- a/blockservice/blockservice_test.go +++ b/blockservice/blockservice_test.go @@ -4,14 +4,14 @@ import ( "context" "testing" - blocks "github.com/ipfs/go-block-format" + blocks "github.com/ipfs/boxo/blocks" + blockstore "github.com/ipfs/boxo/blockstore" + exchange "github.com/ipfs/boxo/exchange" + offline "github.com/ipfs/boxo/exchange/offline" cid "github.com/ipfs/go-cid" ds "github.com/ipfs/go-datastore" dssync "github.com/ipfs/go-datastore/sync" - blockstore "github.com/ipfs/go-ipfs-blockstore" butil "github.com/ipfs/go-ipfs-blocksutil" - exchange "github.com/ipfs/go-ipfs-exchange-interface" - offline "github.com/ipfs/go-ipfs-exchange-offline" ipld "github.com/ipfs/go-ipld-format" ) diff --git a/blockservice/test/blocks_test.go b/blockservice/test/blocks_test.go index c9e2faee78..311e34afcc 100644 --- a/blockservice/test/blocks_test.go +++ b/blockservice/test/blocks_test.go @@ -7,15 +7,15 @@ import ( "testing" "time" - . "github.com/ipfs/go-blockservice" + . "github.com/ipfs/boxo/blockservice" - blocks "github.com/ipfs/go-block-format" + blocks "github.com/ipfs/boxo/blocks" + blockstore "github.com/ipfs/boxo/blockstore" + offline "github.com/ipfs/boxo/exchange/offline" + u "github.com/ipfs/boxo/util" cid "github.com/ipfs/go-cid" ds "github.com/ipfs/go-datastore" dssync "github.com/ipfs/go-datastore/sync" - blockstore "github.com/ipfs/go-ipfs-blockstore" - offline "github.com/ipfs/go-ipfs-exchange-offline" - u "github.com/ipfs/go-ipfs-util" ) func newObject(data []byte) blocks.Block { diff --git a/blockservice/test/mock.go b/blockservice/test/mock.go index d55be8a3f4..fa6469fb6d 100644 --- a/blockservice/test/mock.go +++ b/blockservice/test/mock.go @@ -1,11 +1,11 @@ package bstest import ( - testinstance "github.com/ipfs/go-bitswap/testinstance" - tn "github.com/ipfs/go-bitswap/testnet" - "github.com/ipfs/go-blockservice" + testinstance "github.com/ipfs/boxo/bitswap/testinstance" + tn "github.com/ipfs/boxo/bitswap/testnet" + "github.com/ipfs/boxo/blockservice" + mockrouting "github.com/ipfs/boxo/routing/mock" delay "github.com/ipfs/go-ipfs-delay" - mockrouting "github.com/ipfs/go-ipfs-routing/mock" ) // Mocks returns |n| connected mock Blockservices diff --git a/blockstore/arc_cache.go b/blockstore/arc_cache.go new file mode 100644 index 0000000000..9ae0a3e068 --- /dev/null +++ b/blockstore/arc_cache.go @@ -0,0 +1,408 @@ +package blockstore + +import ( + "context" + "sort" + "sync" + + lru "github.com/hashicorp/golang-lru" + blocks "github.com/ipfs/boxo/blocks" + cid "github.com/ipfs/go-cid" + ipld "github.com/ipfs/go-ipld-format" + metrics "github.com/ipfs/go-metrics-interface" +) + +type cacheHave bool +type cacheSize int + +type lock struct { + mu sync.RWMutex + refcnt int +} + +// arccache wraps a BlockStore with an Adaptive Replacement Cache (ARC) that +// does not store the actual blocks, just metadata about them: existence and +// size. This provides block access-time improvements, allowing +// to short-cut many searches without querying the underlying datastore. +type arccache struct { + lklk sync.Mutex + lks map[string]*lock + + cache *lru.TwoQueueCache + + blockstore Blockstore + viewer Viewer + + hits metrics.Counter + total metrics.Counter +} + +var _ Blockstore = (*arccache)(nil) +var _ Viewer = (*arccache)(nil) + +func newARCCachedBS(ctx context.Context, bs Blockstore, lruSize int) (*arccache, error) { + cache, err := lru.New2Q(lruSize) + if err != nil { + return nil, err + } + c := &arccache{cache: cache, blockstore: bs, lks: make(map[string]*lock)} + c.hits = metrics.NewCtx(ctx, "arc.hits_total", "Number of ARC cache hits").Counter() + c.total = metrics.NewCtx(ctx, "arc_total", "Total number of ARC cache requests").Counter() + if v, ok := bs.(Viewer); ok { + c.viewer = v + } + return c, nil +} + +func (b *arccache) lock(k string, write bool) { + b.lklk.Lock() + lk, ok := b.lks[k] + if !ok { + lk = new(lock) + b.lks[k] = lk + } + lk.refcnt++ + b.lklk.Unlock() + if write { + lk.mu.Lock() + } else { + lk.mu.RLock() + } +} + +func (b *arccache) unlock(key string, write bool) { + b.lklk.Lock() + lk := b.lks[key] + lk.refcnt-- + if lk.refcnt == 0 { + delete(b.lks, key) + } + b.lklk.Unlock() + if write { + lk.mu.Unlock() + } else { + lk.mu.RUnlock() + } +} + +func cacheKey(k cid.Cid) string { + return string(k.Hash()) +} + +func (b *arccache) DeleteBlock(ctx context.Context, k cid.Cid) error { + if !k.Defined() { + return nil + } + + key := cacheKey(k) + + if has, _, ok := b.queryCache(key); ok && !has { + return nil + } + + b.lock(key, true) + defer b.unlock(key, true) + + err := b.blockstore.DeleteBlock(ctx, k) + if err == nil { + b.cacheHave(key, false) + } else { + b.cacheInvalidate(key) + } + return err +} + +func (b *arccache) Has(ctx context.Context, k cid.Cid) (bool, error) { + if !k.Defined() { + logger.Error("undefined cid in arccache") + // Return cache invalid so the call to blockstore happens + // in case of invalid key and correct error is created. + return false, nil + } + + key := cacheKey(k) + + if has, _, ok := b.queryCache(key); ok { + return has, nil + } + + b.lock(key, false) + defer b.unlock(key, false) + + has, err := b.blockstore.Has(ctx, k) + if err != nil { + return false, err + } + b.cacheHave(key, has) + return has, nil +} + +func (b *arccache) GetSize(ctx context.Context, k cid.Cid) (int, error) { + if !k.Defined() { + return -1, ipld.ErrNotFound{Cid: k} + } + + key := cacheKey(k) + + if has, blockSize, ok := b.queryCache(key); ok { + if !has { + // don't have it, return + return -1, ipld.ErrNotFound{Cid: k} + } + if blockSize >= 0 { + // have it and we know the size + return blockSize, nil + } + // we have it but don't know the size, ask the datastore. + } + + b.lock(key, false) + defer b.unlock(key, false) + + blockSize, err := b.blockstore.GetSize(ctx, k) + if ipld.IsNotFound(err) { + b.cacheHave(key, false) + } else if err == nil { + b.cacheSize(key, blockSize) + } + return blockSize, err +} + +func (b *arccache) View(ctx context.Context, k cid.Cid, callback func([]byte) error) error { + // shortcircuit and fall back to Get if the underlying store + // doesn't support Viewer. + if b.viewer == nil { + blk, err := b.Get(ctx, k) + if err != nil { + return err + } + return callback(blk.RawData()) + } + + if !k.Defined() { + return ipld.ErrNotFound{Cid: k} + } + + key := cacheKey(k) + + if has, _, ok := b.queryCache(key); ok && !has { + // short circuit if the cache deterministically tells us the item + // doesn't exist. + return ipld.ErrNotFound{Cid: k} + } + + b.lock(key, false) + defer b.unlock(key, false) + + var cberr error + var size int + if err := b.viewer.View(ctx, k, func(buf []byte) error { + size = len(buf) + cberr = callback(buf) + return nil + }); err != nil { + if ipld.IsNotFound(err) { + b.cacheHave(key, false) + } + return err + } + + b.cacheSize(key, size) + + return cberr +} + +func (b *arccache) Get(ctx context.Context, k cid.Cid) (blocks.Block, error) { + if !k.Defined() { + return nil, ipld.ErrNotFound{Cid: k} + } + + key := cacheKey(k) + + if has, _, ok := b.queryCache(key); ok && !has { + return nil, ipld.ErrNotFound{Cid: k} + } + + b.lock(key, false) + defer b.unlock(key, false) + + bl, err := b.blockstore.Get(ctx, k) + if bl == nil && ipld.IsNotFound(err) { + b.cacheHave(key, false) + } else if bl != nil { + b.cacheSize(key, len(bl.RawData())) + } + return bl, err +} + +func (b *arccache) Put(ctx context.Context, bl blocks.Block) error { + key := cacheKey(bl.Cid()) + + if has, _, ok := b.queryCache(key); ok && has { + return nil + } + + b.lock(key, true) + defer b.unlock(key, true) + + err := b.blockstore.Put(ctx, bl) + if err == nil { + b.cacheSize(key, len(bl.RawData())) + } else { + b.cacheInvalidate(key) + } + return err +} + +type keyedBlocks struct { + keys []string + blocks []blocks.Block +} + +func (b *keyedBlocks) Len() int { + return len(b.keys) +} + +func (b *keyedBlocks) Less(i, j int) bool { + return b.keys[i] < b.keys[j] +} + +func (b *keyedBlocks) Swap(i, j int) { + b.keys[i], b.keys[j] = b.keys[j], b.keys[i] + b.blocks[i], b.blocks[j] = b.blocks[j], b.blocks[i] +} + +func (b *keyedBlocks) append(key string, blk blocks.Block) { + b.keys = append(b.keys, key) + b.blocks = append(b.blocks, blk) +} + +func (b *keyedBlocks) isEmpty() bool { + return len(b.keys) == 0 +} + +func (b *keyedBlocks) sortAndDedup() { + if b.isEmpty() { + return + } + + sort.Sort(b) + + // https://github.com/golang/go/wiki/SliceTricks#in-place-deduplicate-comparable + j := 0 + for i := 1; i < len(b.keys); i++ { + if b.keys[j] == b.keys[i] { + continue + } + j++ + b.keys[j] = b.keys[i] + b.blocks[j] = b.blocks[i] + } + + b.keys = b.keys[:j+1] + b.blocks = b.blocks[:j+1] +} + +func newKeyedBlocks(cap int) *keyedBlocks { + return &keyedBlocks{ + keys: make([]string, 0, cap), + blocks: make([]blocks.Block, 0, cap), + } +} + +func (b *arccache) PutMany(ctx context.Context, bs []blocks.Block) error { + good := newKeyedBlocks(len(bs)) + for _, blk := range bs { + // call put on block if result is inconclusive or we are sure that + // the block isn't in storage + key := cacheKey(blk.Cid()) + if has, _, ok := b.queryCache(key); !ok || (ok && !has) { + good.append(key, blk) + } + } + + if good.isEmpty() { + return nil + } + + good.sortAndDedup() + + for _, key := range good.keys { + b.lock(key, true) + } + + defer func() { + for _, key := range good.keys { + b.unlock(key, true) + } + }() + + err := b.blockstore.PutMany(ctx, good.blocks) + if err != nil { + return err + } + for i, key := range good.keys { + b.cacheSize(key, len(good.blocks[i].RawData())) + } + + return nil +} + +func (b *arccache) HashOnRead(enabled bool) { + b.blockstore.HashOnRead(enabled) +} + +func (b *arccache) cacheHave(key string, have bool) { + b.cache.Add(key, cacheHave(have)) +} + +func (b *arccache) cacheSize(key string, blockSize int) { + b.cache.Add(key, cacheSize(blockSize)) +} + +func (b *arccache) cacheInvalidate(key string) { + b.cache.Remove(key) +} + +// queryCache checks if the CID is in the cache. If so, it returns: +// +// - exists (bool): whether the CID is known to exist or not. +// - size (int): the size if cached, or -1 if not cached. +// - ok (bool): whether present in the cache. +// +// When ok is false, the answer in inconclusive and the caller must ignore the +// other two return values. Querying the underying store is necessary. +// +// When ok is true, exists carries the correct answer, and size carries the +// size, if known, or -1 if not. +func (b *arccache) queryCache(k string) (exists bool, size int, ok bool) { + b.total.Inc() + + h, ok := b.cache.Get(k) + if ok { + b.hits.Inc() + switch h := h.(type) { + case cacheHave: + return bool(h), -1, true + case cacheSize: + return true, int(h), true + } + } + return false, -1, false +} + +func (b *arccache) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) { + return b.blockstore.AllKeysChan(ctx) +} + +func (b *arccache) GCLock(ctx context.Context) Unlocker { + return b.blockstore.(GCBlockstore).GCLock(ctx) +} + +func (b *arccache) PinLock(ctx context.Context) Unlocker { + return b.blockstore.(GCBlockstore).PinLock(ctx) +} + +func (b *arccache) GCRequested(ctx context.Context) bool { + return b.blockstore.(GCBlockstore).GCRequested(ctx) +} diff --git a/blockstore/arc_cache_test.go b/blockstore/arc_cache_test.go new file mode 100644 index 0000000000..f5a3e7078e --- /dev/null +++ b/blockstore/arc_cache_test.go @@ -0,0 +1,399 @@ +package blockstore + +import ( + "context" + "io" + "math/rand" + "sync/atomic" + "testing" + "time" + + blocks "github.com/ipfs/boxo/blocks" + cid "github.com/ipfs/go-cid" + ds "github.com/ipfs/go-datastore" + syncds "github.com/ipfs/go-datastore/sync" + ipld "github.com/ipfs/go-ipld-format" +) + +var exampleBlock = blocks.NewBlock([]byte("foo")) + +func testArcCached(ctx context.Context, bs Blockstore) (*arccache, error) { + if ctx == nil { + ctx = context.TODO() + } + opts := DefaultCacheOpts() + opts.HasBloomFilterSize = 0 + opts.HasBloomFilterHashes = 0 + bbs, err := CachedBlockstore(ctx, bs, opts) + if err == nil { + return bbs.(*arccache), nil + } + return nil, err +} + +func createStores(t testing.TB) (*arccache, Blockstore, *callbackDatastore) { + cd := &callbackDatastore{f: func() {}, ds: ds.NewMapDatastore()} + bs := NewBlockstore(syncds.MutexWrap(cd)) + arc, err := testArcCached(context.TODO(), bs) + if err != nil { + t.Fatal(err) + } + return arc, bs, cd +} + +func trap(message string, cd *callbackDatastore, t *testing.T) { + cd.SetFunc(func() { + t.Fatal(message) + }) +} +func untrap(cd *callbackDatastore) { + cd.SetFunc(func() {}) +} + +func TestRemoveCacheEntryOnDelete(t *testing.T) { + arc, _, cd := createStores(t) + + arc.Put(bg, exampleBlock) + + cd.Lock() + writeHitTheDatastore := false + cd.Unlock() + + cd.SetFunc(func() { + writeHitTheDatastore = true + }) + + arc.DeleteBlock(bg, exampleBlock.Cid()) + arc.Put(bg, exampleBlock) + if !writeHitTheDatastore { + t.Fail() + } +} + +func TestElideDuplicateWrite(t *testing.T) { + arc, _, cd := createStores(t) + + arc.Put(bg, exampleBlock) + trap("write hit datastore", cd, t) + arc.Put(bg, exampleBlock) +} + +func TestHasRequestTriggersCache(t *testing.T) { + arc, _, cd := createStores(t) + + arc.Has(bg, exampleBlock.Cid()) + trap("has hit datastore", cd, t) + if has, err := arc.Has(bg, exampleBlock.Cid()); has || err != nil { + t.Fatal("has was true but there is no such block") + } + + untrap(cd) + err := arc.Put(bg, exampleBlock) + if err != nil { + t.Fatal(err) + } + + trap("has hit datastore", cd, t) + + if has, err := arc.Has(bg, exampleBlock.Cid()); !has || err != nil { + t.Fatal("has returned invalid result") + } +} + +func TestGetFillsCache(t *testing.T) { + arc, _, cd := createStores(t) + + if bl, err := arc.Get(bg, exampleBlock.Cid()); bl != nil || err == nil { + t.Fatal("block was found or there was no error") + } + + trap("has hit datastore", cd, t) + + if has, err := arc.Has(bg, exampleBlock.Cid()); has || err != nil { + t.Fatal("has was true but there is no such block") + } + if _, err := arc.GetSize(bg, exampleBlock.Cid()); !ipld.IsNotFound(err) { + t.Fatal("getsize was true but there is no such block") + } + + untrap(cd) + + if err := arc.Put(bg, exampleBlock); err != nil { + t.Fatal(err) + } + + trap("has hit datastore", cd, t) + + if has, err := arc.Has(bg, exampleBlock.Cid()); !has || err != nil { + t.Fatal("has returned invalid result") + } + if blockSize, err := arc.GetSize(bg, exampleBlock.Cid()); blockSize == -1 || err != nil { + t.Fatal("getsize returned invalid result", blockSize, err) + } +} + +func TestGetAndDeleteFalseShortCircuit(t *testing.T) { + arc, _, cd := createStores(t) + + arc.Has(bg, exampleBlock.Cid()) + arc.GetSize(bg, exampleBlock.Cid()) + + trap("get hit datastore", cd, t) + + if bl, err := arc.Get(bg, exampleBlock.Cid()); bl != nil || !ipld.IsNotFound(err) { + t.Fatal("get returned invalid result") + } + + if arc.DeleteBlock(bg, exampleBlock.Cid()) != nil { + t.Fatal("expected deletes to be idempotent") + } +} + +func TestArcCreationFailure(t *testing.T) { + if arc, err := newARCCachedBS(context.TODO(), nil, -1); arc != nil || err == nil { + t.Fatal("expected error and no cache") + } +} + +func TestInvalidKey(t *testing.T) { + arc, _, _ := createStores(t) + + bl, err := arc.Get(bg, cid.Cid{}) + + if bl != nil { + t.Fatal("blocks should be nil") + } + if err == nil { + t.Fatal("expected error") + } +} + +func TestHasAfterSucessfulGetIsCached(t *testing.T) { + arc, bs, cd := createStores(t) + + bs.Put(bg, exampleBlock) + + arc.Get(bg, exampleBlock.Cid()) + + trap("has hit datastore", cd, t) + arc.Has(bg, exampleBlock.Cid()) +} + +func TestGetSizeAfterSucessfulGetIsCached(t *testing.T) { + arc, bs, cd := createStores(t) + + bs.Put(bg, exampleBlock) + + arc.Get(bg, exampleBlock.Cid()) + + trap("has hit datastore", cd, t) + arc.GetSize(bg, exampleBlock.Cid()) +} + +func TestGetSizeAfterSucessfulHas(t *testing.T) { + arc, bs, _ := createStores(t) + + bs.Put(bg, exampleBlock) + has, err := arc.Has(bg, exampleBlock.Cid()) + if err != nil { + t.Fatal(err) + } + if !has { + t.Fatal("expected to have block") + } + + if size, err := arc.GetSize(bg, exampleBlock.Cid()); err != nil { + t.Fatal(err) + } else if size != len(exampleBlock.RawData()) { + t.Fatalf("expected size %d, got %d", len(exampleBlock.RawData()), size) + } +} + +func TestGetSizeMissingZeroSizeBlock(t *testing.T) { + arc, bs, cd := createStores(t) + emptyBlock := blocks.NewBlock([]byte{}) + missingBlock := blocks.NewBlock([]byte("missingBlock")) + + bs.Put(bg, emptyBlock) + + arc.Get(bg, emptyBlock.Cid()) + + trap("has hit datastore", cd, t) + if blockSize, err := arc.GetSize(bg, emptyBlock.Cid()); blockSize != 0 || err != nil { + t.Fatal("getsize returned invalid result") + } + untrap(cd) + + arc.Get(bg, missingBlock.Cid()) + + trap("has hit datastore", cd, t) + if _, err := arc.GetSize(bg, missingBlock.Cid()); !ipld.IsNotFound(err) { + t.Fatal("getsize returned invalid result") + } +} + +func TestDifferentKeyObjectsWork(t *testing.T) { + arc, bs, cd := createStores(t) + + bs.Put(bg, exampleBlock) + + arc.Get(bg, exampleBlock.Cid()) + + trap("has hit datastore", cd, t) + cidstr := exampleBlock.Cid().String() + + ncid, err := cid.Decode(cidstr) + if err != nil { + t.Fatal(err) + } + + arc.Has(bg, ncid) +} + +func TestPutManyCaches(t *testing.T) { + t.Run("happy path PutMany", func(t *testing.T) { + arc, _, cd := createStores(t) + arc.PutMany(bg, []blocks.Block{exampleBlock}) + + trap("has hit datastore", cd, t) + arc.Has(bg, exampleBlock.Cid()) + arc.GetSize(bg, exampleBlock.Cid()) + untrap(cd) + arc.DeleteBlock(bg, exampleBlock.Cid()) + + arc.Put(bg, exampleBlock) + trap("PunMany has hit datastore", cd, t) + arc.PutMany(bg, []blocks.Block{exampleBlock}) + }) + + t.Run("PutMany with duplicates", func(t *testing.T) { + arc, _, cd := createStores(t) + arc.PutMany(bg, []blocks.Block{exampleBlock, exampleBlock}) + + trap("has hit datastore", cd, t) + arc.Has(bg, exampleBlock.Cid()) + arc.GetSize(bg, exampleBlock.Cid()) + untrap(cd) + arc.DeleteBlock(bg, exampleBlock.Cid()) + + arc.Put(bg, exampleBlock) + trap("PunMany has hit datastore", cd, t) + arc.PutMany(bg, []blocks.Block{exampleBlock}) + }) +} + +func BenchmarkARCCacheConcurrentOps(b *testing.B) { + // ~4k blocks seems high enough to be realistic, + // but low enough to cause collisions. + // Keep it as a power of 2, to simplify code below. + const numBlocks = 4 << 10 + + dummyBlocks := make([]blocks.Block, numBlocks) + + { + // scope dummyRand to prevent its unsafe concurrent use below + dummyRand := rand.New(rand.NewSource(time.Now().UnixNano())) + for i := range dummyBlocks { + dummy := make([]byte, 32) + if _, err := io.ReadFull(dummyRand, dummy); err != nil { + b.Fatal(err) + } + dummyBlocks[i] = blocks.NewBlock(dummy) + } + } + + // Each test begins with half the blocks present in the cache. + // This allows test cases to have both hits and misses, + // regardless of whether or not they do Puts. + putHalfBlocks := func(arc *arccache) { + for i, block := range dummyBlocks { + if i%2 == 0 { + if err := arc.Put(bg, block); err != nil { + b.Fatal(err) + } + } + } + } + + // We always mix just two operations at a time. + const numOps = 2 + var testOps = []struct { + name string + ops [numOps]func(*arccache, blocks.Block) + }{ + {"PutDelete", [...]func(*arccache, blocks.Block){ + func(arc *arccache, block blocks.Block) { + arc.Put(bg, block) + }, + func(arc *arccache, block blocks.Block) { + arc.DeleteBlock(bg, block.Cid()) + }, + }}, + {"GetDelete", [...]func(*arccache, blocks.Block){ + func(arc *arccache, block blocks.Block) { + arc.Get(bg, block.Cid()) + }, + func(arc *arccache, block blocks.Block) { + arc.DeleteBlock(bg, block.Cid()) + }, + }}, + {"GetPut", [...]func(*arccache, blocks.Block){ + func(arc *arccache, block blocks.Block) { + arc.Get(bg, block.Cid()) + }, + func(arc *arccache, block blocks.Block) { + arc.Put(bg, block) + }, + }}, + } + + for _, test := range testOps { + test := test // prevent reuse of the range var + b.Run(test.name, func(b *testing.B) { + arc, _, _ := createStores(b) + putHalfBlocks(arc) + var opCounts [numOps]uint64 + + b.ResetTimer() + b.ReportAllocs() + + b.RunParallel(func(pb *testing.PB) { + rnd := rand.New(rand.NewSource(time.Now().UnixNano())) + for pb.Next() { + n := rnd.Int63() + blockIdx := n % numBlocks // lower bits decide the block + opIdx := (n / numBlocks) % numOps // higher bits decide what operation + + block := dummyBlocks[blockIdx] + op := test.ops[opIdx] + op(arc, block) + + atomic.AddUint64(&opCounts[opIdx], 1) + } + }) + + // We expect each op to fire roughly an equal amount of times. + // Error otherwise, as that likely means the logic is wrong. + var minIdx, maxIdx int + var minCount, maxCount uint64 + for opIdx, count := range opCounts { + if minCount == 0 || count < minCount { + minIdx = opIdx + minCount = count + } + if maxCount == 0 || count > maxCount { + maxIdx = opIdx + maxCount = count + } + } + // Skip this check if we ran few times, to avoid false positives. + if maxCount > 100 { + ratio := float64(maxCount) / float64(minCount) + if maxRatio := 2.0; ratio > maxRatio { + b.Fatalf("op %d ran %fx as many times as %d", maxIdx, ratio, minIdx) + } + } + + }) + } +} diff --git a/blockstore/blockstore.go b/blockstore/blockstore.go new file mode 100644 index 0000000000..f82c875650 --- /dev/null +++ b/blockstore/blockstore.go @@ -0,0 +1,346 @@ +// Package blockstore implements a thin wrapper over a datastore, giving a +// clean interface for Getting and Putting block objects. +package blockstore + +import ( + "context" + "errors" + "sync" + "sync/atomic" + + blocks "github.com/ipfs/boxo/blocks" + dshelp "github.com/ipfs/boxo/datastore/dshelp" + cid "github.com/ipfs/go-cid" + ds "github.com/ipfs/go-datastore" + dsns "github.com/ipfs/go-datastore/namespace" + dsq "github.com/ipfs/go-datastore/query" + ipld "github.com/ipfs/go-ipld-format" + logging "github.com/ipfs/go-log" + uatomic "go.uber.org/atomic" +) + +var logger = logging.Logger("blockstore") + +// BlockPrefix namespaces blockstore datastores +var BlockPrefix = ds.NewKey("blocks") + +// ErrHashMismatch is an error returned when the hash of a block +// is different than expected. +var ErrHashMismatch = errors.New("block in storage has different hash than requested") + +// Blockstore wraps a Datastore block-centered methods and provides a layer +// of abstraction which allows to add different caching strategies. +type Blockstore interface { + DeleteBlock(context.Context, cid.Cid) error + Has(context.Context, cid.Cid) (bool, error) + Get(context.Context, cid.Cid) (blocks.Block, error) + + // GetSize returns the CIDs mapped BlockSize + GetSize(context.Context, cid.Cid) (int, error) + + // Put puts a given block to the underlying datastore + Put(context.Context, blocks.Block) error + + // PutMany puts a slice of blocks at the same time using batching + // capabilities of the underlying datastore whenever possible. + PutMany(context.Context, []blocks.Block) error + + // AllKeysChan returns a channel from which + // the CIDs in the Blockstore can be read. It should respect + // the given context, closing the channel if it becomes Done. + AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) + + // HashOnRead specifies if every read block should be + // rehashed to make sure it matches its CID. + HashOnRead(enabled bool) +} + +// Viewer can be implemented by blockstores that offer zero-copy access to +// values. +// +// Callers of View must not mutate or retain the byte slice, as it could be +// an mmapped memory region, or a pooled byte buffer. +// +// View is especially suitable for deserialising in place. +// +// The callback will only be called iff the query operation is successful (and +// the block is found); otherwise, the error will be propagated. Errors returned +// by the callback will be propagated as well. +type Viewer interface { + View(ctx context.Context, cid cid.Cid, callback func([]byte) error) error +} + +// GCLocker abstract functionality to lock a blockstore when performing +// garbage-collection operations. +type GCLocker interface { + // GCLock locks the blockstore for garbage collection. No operations + // that expect to finish with a pin should ocurr simultaneously. + // Reading during GC is safe, and requires no lock. + GCLock(context.Context) Unlocker + + // PinLock locks the blockstore for sequences of puts expected to finish + // with a pin (before GC). Multiple put->pin sequences can write through + // at the same time, but no GC should happen simulatenously. + // Reading during Pinning is safe, and requires no lock. + PinLock(context.Context) Unlocker + + // GcRequested returns true if GCLock has been called and is waiting to + // take the lock + GCRequested(context.Context) bool +} + +// GCBlockstore is a blockstore that can safely run garbage-collection +// operations. +type GCBlockstore interface { + Blockstore + GCLocker +} + +// NewGCBlockstore returns a default implementation of GCBlockstore +// using the given Blockstore and GCLocker. +func NewGCBlockstore(bs Blockstore, gcl GCLocker) GCBlockstore { + return gcBlockstore{bs, gcl} +} + +type gcBlockstore struct { + Blockstore + GCLocker +} + +// Option is a default implementation Blockstore option +type Option struct { + f func(bs *blockstore) +} + +// WriteThrough skips checking if the blockstore already has a block before +// writing it. +func WriteThrough() Option { + return Option{ + func(bs *blockstore) { + bs.writeThrough = true + }, + } +} + +// NoPrefix avoids wrapping the blockstore into the BlockPrefix namespace +// ("/blocks"), so keys will not be modified in any way. +func NoPrefix() Option { + return Option{ + func(bs *blockstore) { + bs.noPrefix = true + }, + } +} + +// NewBlockstore returns a default Blockstore implementation +// using the provided datastore.Batching backend. +func NewBlockstore(d ds.Batching, opts ...Option) Blockstore { + bs := &blockstore{ + datastore: d, + rehash: uatomic.NewBool(false), + } + + for _, o := range opts { + o.f(bs) + } + + if !bs.noPrefix { + bs.datastore = dsns.Wrap(bs.datastore, BlockPrefix) + } + return bs +} + +// NewBlockstoreNoPrefix returns a default Blockstore implementation +// using the provided datastore.Batching backend. +// This constructor does not modify input keys in any way +// +// Deprecated: Use NewBlockstore with the NoPrefix option instead. +func NewBlockstoreNoPrefix(d ds.Batching) Blockstore { + return NewBlockstore(d, NoPrefix()) +} + +type blockstore struct { + datastore ds.Batching + + rehash *uatomic.Bool + writeThrough bool + noPrefix bool +} + +func (bs *blockstore) HashOnRead(enabled bool) { + bs.rehash.Store(enabled) +} + +func (bs *blockstore) Get(ctx context.Context, k cid.Cid) (blocks.Block, error) { + if !k.Defined() { + logger.Error("undefined cid in blockstore") + return nil, ipld.ErrNotFound{Cid: k} + } + bdata, err := bs.datastore.Get(ctx, dshelp.MultihashToDsKey(k.Hash())) + if err == ds.ErrNotFound { + return nil, ipld.ErrNotFound{Cid: k} + } + if err != nil { + return nil, err + } + if bs.rehash.Load() { + rbcid, err := k.Prefix().Sum(bdata) + if err != nil { + return nil, err + } + + if !rbcid.Equals(k) { + return nil, ErrHashMismatch + } + + return blocks.NewBlockWithCid(bdata, rbcid) + } + return blocks.NewBlockWithCid(bdata, k) +} + +func (bs *blockstore) Put(ctx context.Context, block blocks.Block) error { + k := dshelp.MultihashToDsKey(block.Cid().Hash()) + + // Has is cheaper than Put, so see if we already have it + if !bs.writeThrough { + exists, err := bs.datastore.Has(ctx, k) + if err == nil && exists { + return nil // already stored. + } + } + return bs.datastore.Put(ctx, k, block.RawData()) +} + +func (bs *blockstore) PutMany(ctx context.Context, blocks []blocks.Block) error { + if len(blocks) == 1 { + // performance fast-path + return bs.Put(ctx, blocks[0]) + } + + t, err := bs.datastore.Batch(ctx) + if err != nil { + return err + } + for _, b := range blocks { + k := dshelp.MultihashToDsKey(b.Cid().Hash()) + + if !bs.writeThrough { + exists, err := bs.datastore.Has(ctx, k) + if err == nil && exists { + continue + } + } + + err = t.Put(ctx, k, b.RawData()) + if err != nil { + return err + } + } + return t.Commit(ctx) +} + +func (bs *blockstore) Has(ctx context.Context, k cid.Cid) (bool, error) { + return bs.datastore.Has(ctx, dshelp.MultihashToDsKey(k.Hash())) +} + +func (bs *blockstore) GetSize(ctx context.Context, k cid.Cid) (int, error) { + size, err := bs.datastore.GetSize(ctx, dshelp.MultihashToDsKey(k.Hash())) + if err == ds.ErrNotFound { + return -1, ipld.ErrNotFound{Cid: k} + } + return size, err +} + +func (bs *blockstore) DeleteBlock(ctx context.Context, k cid.Cid) error { + return bs.datastore.Delete(ctx, dshelp.MultihashToDsKey(k.Hash())) +} + +// AllKeysChan runs a query for keys from the blockstore. +// this is very simplistic, in the future, take dsq.Query as a param? +// +// AllKeysChan respects context. +func (bs *blockstore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) { + + // KeysOnly, because that would be _a lot_ of data. + q := dsq.Query{KeysOnly: true} + res, err := bs.datastore.Query(ctx, q) + if err != nil { + return nil, err + } + + output := make(chan cid.Cid, dsq.KeysOnlyBufSize) + go func() { + defer func() { + res.Close() // ensure exit (signals early exit, too) + close(output) + }() + + for { + e, ok := res.NextSync() + if !ok { + return + } + if e.Error != nil { + logger.Errorf("blockstore.AllKeysChan got err: %s", e.Error) + return + } + + // need to convert to key.Key using key.KeyFromDsKey. + bk, err := dshelp.BinaryFromDsKey(ds.RawKey(e.Key)) + if err != nil { + logger.Warningf("error parsing key from binary: %s", err) + continue + } + k := cid.NewCidV1(cid.Raw, bk) + select { + case <-ctx.Done(): + return + case output <- k: + } + } + }() + + return output, nil +} + +// NewGCLocker returns a default implementation of +// GCLocker using standard [RW] mutexes. +func NewGCLocker() GCLocker { + return &gclocker{} +} + +type gclocker struct { + lk sync.RWMutex + gcreq int32 +} + +// Unlocker represents an object which can Unlock +// something. +type Unlocker interface { + Unlock(context.Context) +} + +type unlocker struct { + unlock func() +} + +func (u *unlocker) Unlock(_ context.Context) { + u.unlock() + u.unlock = nil // ensure its not called twice +} + +func (bs *gclocker) GCLock(_ context.Context) Unlocker { + atomic.AddInt32(&bs.gcreq, 1) + bs.lk.Lock() + atomic.AddInt32(&bs.gcreq, -1) + return &unlocker{bs.lk.Unlock} +} + +func (bs *gclocker) PinLock(_ context.Context) Unlocker { + bs.lk.RLock() + return &unlocker{bs.lk.RUnlock} +} + +func (bs *gclocker) GCRequested(_ context.Context) bool { + return atomic.LoadInt32(&bs.gcreq) > 0 +} diff --git a/blockstore/blockstore_test.go b/blockstore/blockstore_test.go new file mode 100644 index 0000000000..6231e119cd --- /dev/null +++ b/blockstore/blockstore_test.go @@ -0,0 +1,333 @@ +package blockstore + +import ( + "bytes" + "context" + "fmt" + "testing" + + blocks "github.com/ipfs/boxo/blocks" + u "github.com/ipfs/boxo/util" + cid "github.com/ipfs/go-cid" + ds "github.com/ipfs/go-datastore" + dsq "github.com/ipfs/go-datastore/query" + ds_sync "github.com/ipfs/go-datastore/sync" + ipld "github.com/ipfs/go-ipld-format" +) + +func TestGetWhenKeyNotPresent(t *testing.T) { + bs := NewBlockstore(ds_sync.MutexWrap(ds.NewMapDatastore())) + c := cid.NewCidV0(u.Hash([]byte("stuff"))) + bl, err := bs.Get(bg, c) + + if bl != nil { + t.Error("nil block expected") + } + if err == nil { + t.Error("error expected, got nil") + } +} + +func TestGetWhenKeyIsNil(t *testing.T) { + bs := NewBlockstore(ds_sync.MutexWrap(ds.NewMapDatastore())) + _, err := bs.Get(bg, cid.Cid{}) + if !ipld.IsNotFound(err) { + t.Fail() + } +} + +func TestPutThenGetBlock(t *testing.T) { + bs := NewBlockstore(ds_sync.MutexWrap(ds.NewMapDatastore())) + block := blocks.NewBlock([]byte("some data")) + + err := bs.Put(bg, block) + if err != nil { + t.Fatal(err) + } + + blockFromBlockstore, err := bs.Get(bg, block.Cid()) + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(block.RawData(), blockFromBlockstore.RawData()) { + t.Fail() + } +} + +func TestCidv0v1(t *testing.T) { + bs := NewBlockstore(ds_sync.MutexWrap(ds.NewMapDatastore())) + block := blocks.NewBlock([]byte("some data")) + + err := bs.Put(bg, block) + if err != nil { + t.Fatal(err) + } + + blockFromBlockstore, err := bs.Get(bg, cid.NewCidV1(cid.DagProtobuf, block.Cid().Hash())) + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(block.RawData(), blockFromBlockstore.RawData()) { + t.Fail() + } +} + +func TestPutThenGetSizeBlock(t *testing.T) { + bs := NewBlockstore(ds_sync.MutexWrap(ds.NewMapDatastore())) + block := blocks.NewBlock([]byte("some data")) + missingBlock := blocks.NewBlock([]byte("missingBlock")) + emptyBlock := blocks.NewBlock([]byte{}) + + err := bs.Put(bg, block) + if err != nil { + t.Fatal(err) + } + + blockSize, err := bs.GetSize(bg, block.Cid()) + if err != nil { + t.Fatal(err) + } + if len(block.RawData()) != blockSize { + t.Fail() + } + + err = bs.Put(bg, emptyBlock) + if err != nil { + t.Fatal(err) + } + + if blockSize, err := bs.GetSize(bg, emptyBlock.Cid()); blockSize != 0 || err != nil { + t.Fatal(err) + } + + if blockSize, err := bs.GetSize(bg, missingBlock.Cid()); blockSize != -1 || err == nil { + t.Fatal("getsize returned invalid result") + } +} + +type countHasDS struct { + ds.Datastore + hasCount int +} + +func (ds *countHasDS) Has(ctx context.Context, key ds.Key) (exists bool, err error) { + ds.hasCount += 1 + return ds.Datastore.Has(ctx, key) +} + +func TestPutUsesHas(t *testing.T) { + // Some datastores rely on the implementation detail that Put checks Has + // first, to avoid overriding existing objects' metadata. This test ensures + // that Blockstore continues to behave this way. + // Please ping https://github.com/ipfs/boxo/blockstore/pull/47 if this + // behavior is being removed. + ds := &countHasDS{ + Datastore: ds.NewMapDatastore(), + } + bs := NewBlockstore(ds_sync.MutexWrap(ds)) + bl := blocks.NewBlock([]byte("some data")) + if err := bs.Put(bg, bl); err != nil { + t.Fatal(err) + } + if err := bs.Put(bg, bl); err != nil { + t.Fatal(err) + } + if ds.hasCount != 2 { + t.Errorf("Blockstore did not call Has before attempting Put, this breaks compatibility") + } +} + +func TestHashOnRead(t *testing.T) { + orginalDebug := u.Debug + defer (func() { + u.Debug = orginalDebug + })() + u.Debug = false + + bs := NewBlockstore(ds_sync.MutexWrap(ds.NewMapDatastore())) + bl := blocks.NewBlock([]byte("some data")) + blBad, err := blocks.NewBlockWithCid([]byte("some other data"), bl.Cid()) + if err != nil { + t.Fatal("debug is off, still got an error") + } + bl2 := blocks.NewBlock([]byte("some other data")) + bs.Put(bg, blBad) + bs.Put(bg, bl2) + bs.HashOnRead(true) + + if _, err := bs.Get(bg, bl.Cid()); err != ErrHashMismatch { + t.Fatalf("expected '%v' got '%v'\n", ErrHashMismatch, err) + } + + if b, err := bs.Get(bg, bl2.Cid()); err != nil || b.String() != bl2.String() { + t.Fatal("got wrong blocks") + } +} + +func newBlockStoreWithKeys(t *testing.T, d ds.Datastore, N int) (Blockstore, []cid.Cid) { + if d == nil { + d = ds.NewMapDatastore() + } + bs := NewBlockstore(ds_sync.MutexWrap(d)) + + keys := make([]cid.Cid, N) + for i := 0; i < N; i++ { + block := blocks.NewBlock([]byte(fmt.Sprintf("some data %d", i))) + err := bs.Put(bg, block) + if err != nil { + t.Fatal(err) + } + keys[i] = block.Cid() + } + return bs, keys +} + +func collect(ch <-chan cid.Cid) []cid.Cid { + var keys []cid.Cid + for k := range ch { + keys = append(keys, k) + } + return keys +} + +func TestAllKeysSimple(t *testing.T) { + bs, keys := newBlockStoreWithKeys(t, nil, 100) + + ctx := context.Background() + ch, err := bs.AllKeysChan(ctx) + if err != nil { + t.Fatal(err) + } + keys2 := collect(ch) + + // for _, k2 := range keys2 { + // t.Log("found ", k2.B58String()) + // } + + expectMatches(t, keys, keys2) +} + +func TestAllKeysRespectsContext(t *testing.T) { + N := 100 + + d := &queryTestDS{ds: ds.NewMapDatastore()} + bs, _ := newBlockStoreWithKeys(t, d, N) + + started := make(chan struct{}, 1) + done := make(chan struct{}, 1) + errors := make(chan error, 100) + + getKeys := func(ctx context.Context) { + started <- struct{}{} + ch, err := bs.AllKeysChan(ctx) // once without cancelling + if err != nil { + errors <- err + } + _ = collect(ch) + done <- struct{}{} + errors <- nil // a nil one to signal break + } + + var results dsq.Results + var resultsmu = make(chan struct{}) + resultChan := make(chan dsq.Result) + d.SetFunc(func(q dsq.Query) (dsq.Results, error) { + results = dsq.ResultsWithChan(q, resultChan) + resultsmu <- struct{}{} + return results, nil + }) + + go getKeys(context.Background()) + + // make sure it's waiting. + <-started + <-resultsmu + select { + case <-done: + t.Fatal("sync is wrong") + case <-results.Process().Closing(): + t.Fatal("should not be closing") + case <-results.Process().Closed(): + t.Fatal("should not be closed") + default: + } + + e := dsq.Entry{Key: BlockPrefix.ChildString("foo").String()} + resultChan <- dsq.Result{Entry: e} // let it go. + close(resultChan) + <-done // should be done now. + <-results.Process().Closed() // should be closed now + + // print any errors + for err := range errors { + if err == nil { + break + } + t.Error(err) + } + +} + +func expectMatches(t *testing.T, expect, actual []cid.Cid) { + t.Helper() + + if len(expect) != len(actual) { + t.Errorf("expect and actual differ: %d != %d", len(expect), len(actual)) + } + + actualSet := make(map[string]bool, len(actual)) + for _, k := range actual { + actualSet[string(k.Hash())] = true + } + + for _, ek := range expect { + if !actualSet[string(ek.Hash())] { + t.Error("expected key not found: ", ek) + } + } +} + +type queryTestDS struct { + cb func(q dsq.Query) (dsq.Results, error) + ds ds.Datastore +} + +func (c *queryTestDS) SetFunc(f func(dsq.Query) (dsq.Results, error)) { c.cb = f } + +func (c *queryTestDS) Put(ctx context.Context, key ds.Key, value []byte) (err error) { + return c.ds.Put(ctx, key, value) +} + +func (c *queryTestDS) Get(ctx context.Context, key ds.Key) (value []byte, err error) { + return c.ds.Get(ctx, key) +} + +func (c *queryTestDS) Has(ctx context.Context, key ds.Key) (exists bool, err error) { + return c.ds.Has(ctx, key) +} + +func (c *queryTestDS) GetSize(ctx context.Context, key ds.Key) (size int, err error) { + return c.ds.GetSize(ctx, key) +} + +func (c *queryTestDS) Delete(ctx context.Context, key ds.Key) (err error) { + return c.ds.Delete(ctx, key) +} + +func (c *queryTestDS) Query(ctx context.Context, q dsq.Query) (dsq.Results, error) { + if c.cb != nil { + return c.cb(q) + } + return c.ds.Query(ctx, q) +} + +func (c *queryTestDS) Sync(ctx context.Context, key ds.Key) error { + return c.ds.Sync(ctx, key) +} + +func (c *queryTestDS) Batch(_ context.Context) (ds.Batch, error) { + return ds.NewBasicBatch(c), nil +} +func (c *queryTestDS) Close() error { + return nil +} diff --git a/blockstore/bloom_cache.go b/blockstore/bloom_cache.go new file mode 100644 index 0000000000..a6ef19acf9 --- /dev/null +++ b/blockstore/bloom_cache.go @@ -0,0 +1,231 @@ +package blockstore + +import ( + "context" + "fmt" + "sync/atomic" + "time" + + bloom "github.com/ipfs/bbloom" + blocks "github.com/ipfs/boxo/blocks" + cid "github.com/ipfs/go-cid" + ipld "github.com/ipfs/go-ipld-format" + metrics "github.com/ipfs/go-metrics-interface" +) + +// bloomCached returns a Blockstore that caches Has requests using a Bloom +// filter. bloomSize is size of bloom filter in bytes. hashCount specifies the +// number of hashing functions in the bloom filter (usually known as k). +func bloomCached(ctx context.Context, bs Blockstore, bloomSize, hashCount int) (*bloomcache, error) { + bl, err := bloom.New(float64(bloomSize), float64(hashCount)) + if err != nil { + return nil, err + } + bc := &bloomcache{ + blockstore: bs, + bloom: bl, + hits: metrics.NewCtx(ctx, "bloom.hits_total", + "Number of cache hits in bloom cache").Counter(), + total: metrics.NewCtx(ctx, "bloom_total", + "Total number of requests to bloom cache").Counter(), + buildChan: make(chan struct{}), + } + if v, ok := bs.(Viewer); ok { + bc.viewer = v + } + go func() { + err := bc.build(ctx) + if err != nil { + select { + case <-ctx.Done(): + logger.Warning("Cache rebuild closed by context finishing: ", err) + default: + logger.Error(err) + } + return + } + if metrics.Active() { + fill := metrics.NewCtx(ctx, "bloom_fill_ratio", + "Ratio of bloom filter fullnes, (updated once a minute)").Gauge() + + t := time.NewTicker(1 * time.Minute) + defer t.Stop() + for { + select { + case <-ctx.Done(): + return + case <-t.C: + fill.Set(bc.bloom.FillRatioTS()) + } + } + } + }() + return bc, nil +} + +type bloomcache struct { + active int32 + + bloom *bloom.Bloom + buildErr error + + buildChan chan struct{} + blockstore Blockstore + viewer Viewer + + // Statistics + hits metrics.Counter + total metrics.Counter +} + +var _ Blockstore = (*bloomcache)(nil) +var _ Viewer = (*bloomcache)(nil) + +func (b *bloomcache) BloomActive() bool { + return atomic.LoadInt32(&b.active) != 0 +} + +func (b *bloomcache) Wait(ctx context.Context) error { + select { + case <-ctx.Done(): + return ctx.Err() + case <-b.buildChan: + return b.buildErr + } +} + +func (b *bloomcache) build(ctx context.Context) error { + evt := logger.EventBegin(ctx, "bloomcache.build") + defer evt.Done() + defer close(b.buildChan) + + ch, err := b.blockstore.AllKeysChan(ctx) + if err != nil { + b.buildErr = fmt.Errorf("AllKeysChan failed in bloomcache rebuild with: %v", err) + return b.buildErr + } + for { + select { + case key, ok := <-ch: + if !ok { + atomic.StoreInt32(&b.active, 1) + return nil + } + b.bloom.AddTS(key.Hash()) // Use binary key, the more compact the better + case <-ctx.Done(): + b.buildErr = ctx.Err() + return b.buildErr + } + } +} + +func (b *bloomcache) DeleteBlock(ctx context.Context, k cid.Cid) error { + if has, ok := b.hasCached(k); ok && !has { + return nil + } + + return b.blockstore.DeleteBlock(ctx, k) +} + +// if ok == false has is inconclusive +// if ok == true then has respons to question: is it contained +func (b *bloomcache) hasCached(k cid.Cid) (has bool, ok bool) { + b.total.Inc() + if !k.Defined() { + logger.Error("undefined in bloom cache") + // Return cache invalid so call to blockstore + // in case of invalid key is forwarded deeper + return false, false + } + if b.BloomActive() { + blr := b.bloom.HasTS(k.Hash()) + if !blr { // not contained in bloom is only conclusive answer bloom gives + b.hits.Inc() + return false, true + } + } + return false, false +} + +func (b *bloomcache) Has(ctx context.Context, k cid.Cid) (bool, error) { + if has, ok := b.hasCached(k); ok { + return has, nil + } + + return b.blockstore.Has(ctx, k) +} + +func (b *bloomcache) GetSize(ctx context.Context, k cid.Cid) (int, error) { + if has, ok := b.hasCached(k); ok && !has { + return -1, ipld.ErrNotFound{Cid: k} + } + + return b.blockstore.GetSize(ctx, k) +} + +func (b *bloomcache) View(ctx context.Context, k cid.Cid, callback func([]byte) error) error { + if b.viewer == nil { + blk, err := b.Get(ctx, k) + if err != nil { + return err + } + return callback(blk.RawData()) + } + + if has, ok := b.hasCached(k); ok && !has { + return ipld.ErrNotFound{Cid: k} + } + return b.viewer.View(ctx, k, callback) +} + +func (b *bloomcache) Get(ctx context.Context, k cid.Cid) (blocks.Block, error) { + if has, ok := b.hasCached(k); ok && !has { + return nil, ipld.ErrNotFound{Cid: k} + } + + return b.blockstore.Get(ctx, k) +} + +func (b *bloomcache) Put(ctx context.Context, bl blocks.Block) error { + // See comment in PutMany + err := b.blockstore.Put(ctx, bl) + if err == nil { + b.bloom.AddTS(bl.Cid().Hash()) + } + return err +} + +func (b *bloomcache) PutMany(ctx context.Context, bs []blocks.Block) error { + // bloom cache gives only conclusive resulty if key is not contained + // to reduce number of puts we need conclusive information if block is contained + // this means that PutMany can't be improved with bloom cache so we just + // just do a passthrough. + err := b.blockstore.PutMany(ctx, bs) + if err != nil { + return err + } + for _, bl := range bs { + b.bloom.AddTS(bl.Cid().Hash()) + } + return nil +} + +func (b *bloomcache) HashOnRead(enabled bool) { + b.blockstore.HashOnRead(enabled) +} + +func (b *bloomcache) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) { + return b.blockstore.AllKeysChan(ctx) +} + +func (b *bloomcache) GCLock(ctx context.Context) Unlocker { + return b.blockstore.(GCBlockstore).GCLock(ctx) +} + +func (b *bloomcache) PinLock(ctx context.Context) Unlocker { + return b.blockstore.(GCBlockstore).PinLock(ctx) +} + +func (b *bloomcache) GCRequested(ctx context.Context) bool { + return b.blockstore.(GCBlockstore).GCRequested(ctx) +} diff --git a/blockstore/bloom_cache_test.go b/blockstore/bloom_cache_test.go new file mode 100644 index 0000000000..137e450f46 --- /dev/null +++ b/blockstore/bloom_cache_test.go @@ -0,0 +1,215 @@ +package blockstore + +import ( + "context" + "fmt" + "sync" + "testing" + "time" + + blocks "github.com/ipfs/boxo/blocks" + ds "github.com/ipfs/go-datastore" + dsq "github.com/ipfs/go-datastore/query" + syncds "github.com/ipfs/go-datastore/sync" + ipld "github.com/ipfs/go-ipld-format" +) + +var bg = context.Background() + +func testBloomCached(ctx context.Context, bs Blockstore) (*bloomcache, error) { + if ctx == nil { + ctx = context.Background() + } + opts := DefaultCacheOpts() + opts.HasARCCacheSize = 0 + bbs, err := CachedBlockstore(ctx, bs, opts) + if err == nil { + return bbs.(*bloomcache), nil + } + return nil, err +} + +func TestPutManyAddsToBloom(t *testing.T) { + bs := NewBlockstore(syncds.MutexWrap(ds.NewMapDatastore())) + + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) + defer cancel() + + cachedbs, err := testBloomCached(ctx, bs) + if err != nil { + t.Fatal(err) + } + + if err := cachedbs.Wait(ctx); err != nil { + t.Fatalf("Failed while waiting for the filter to build: %d", cachedbs.bloom.ElementsAdded()) + } + + block1 := blocks.NewBlock([]byte("foo")) + block2 := blocks.NewBlock([]byte("bar")) + emptyBlock := blocks.NewBlock([]byte{}) + + cachedbs.PutMany(bg, []blocks.Block{block1, emptyBlock}) + has, err := cachedbs.Has(bg, block1.Cid()) + if err != nil { + t.Fatal(err) + } + blockSize, err := cachedbs.GetSize(bg, block1.Cid()) + if err != nil { + t.Fatal(err) + } + if blockSize == -1 || !has { + t.Fatal("added block is reported missing") + } + + has, err = cachedbs.Has(bg, block2.Cid()) + if err != nil { + t.Fatal(err) + } + blockSize, err = cachedbs.GetSize(bg, block2.Cid()) + if err != nil && !ipld.IsNotFound(err) { + t.Fatal(err) + } + if blockSize > -1 || has { + t.Fatal("not added block is reported to be in blockstore") + } + + has, err = cachedbs.Has(bg, emptyBlock.Cid()) + if err != nil { + t.Fatal(err) + } + blockSize, err = cachedbs.GetSize(bg, emptyBlock.Cid()) + if err != nil { + t.Fatal(err) + } + if blockSize != 0 || !has { + t.Fatal("added block is reported missing") + } +} + +func TestReturnsErrorWhenSizeNegative(t *testing.T) { + bs := NewBlockstore(syncds.MutexWrap(ds.NewMapDatastore())) + _, err := bloomCached(context.Background(), bs, -1, 1) + if err == nil { + t.Fail() + } +} +func TestHasIsBloomCached(t *testing.T) { + cd := &callbackDatastore{f: func() {}, ds: ds.NewMapDatastore()} + bs := NewBlockstore(syncds.MutexWrap(cd)) + + for i := 0; i < 1000; i++ { + bs.Put(bg, blocks.NewBlock([]byte(fmt.Sprintf("data: %d", i)))) + } + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) + defer cancel() + + cachedbs, err := testBloomCached(ctx, bs) + if err != nil { + t.Fatal(err) + } + + if err := cachedbs.Wait(ctx); err != nil { + t.Fatalf("Failed while waiting for the filter to build: %d", cachedbs.bloom.ElementsAdded()) + } + + cacheFails := 0 + cd.SetFunc(func() { + cacheFails++ + }) + + for i := 0; i < 1000; i++ { + cachedbs.Has(bg, blocks.NewBlock([]byte(fmt.Sprintf("data: %d", i+2000))).Cid()) + } + + if float64(cacheFails)/float64(1000) > float64(0.05) { + t.Fatalf("Bloom filter has cache miss rate of more than 5%%") + } + + cacheFails = 0 + block := blocks.NewBlock([]byte("newBlock")) + + cachedbs.PutMany(bg, []blocks.Block{block}) + if cacheFails != 2 { + t.Fatalf("expected two datastore hits: %d", cacheFails) + } + cachedbs.Put(bg, block) + if cacheFails != 3 { + t.Fatalf("expected datastore hit: %d", cacheFails) + } + + if has, err := cachedbs.Has(bg, block.Cid()); !has || err != nil { + t.Fatal("has gave wrong response") + } + + bl, err := cachedbs.Get(bg, block.Cid()) + if bl.String() != block.String() { + t.Fatal("block data doesn't match") + } + + if err != nil { + t.Fatal("there should't be an error") + } +} + +var _ ds.Batching = (*callbackDatastore)(nil) + +type callbackDatastore struct { + sync.Mutex + f func() + ds ds.Datastore +} + +func (c *callbackDatastore) SetFunc(f func()) { + c.Lock() + defer c.Unlock() + c.f = f +} + +func (c *callbackDatastore) CallF() { + c.Lock() + defer c.Unlock() + c.f() +} + +func (c *callbackDatastore) Put(ctx context.Context, key ds.Key, value []byte) (err error) { + c.CallF() + return c.ds.Put(ctx, key, value) +} + +func (c *callbackDatastore) Get(ctx context.Context, key ds.Key) (value []byte, err error) { + c.CallF() + return c.ds.Get(ctx, key) +} + +func (c *callbackDatastore) Has(ctx context.Context, key ds.Key) (exists bool, err error) { + c.CallF() + return c.ds.Has(ctx, key) +} + +func (c *callbackDatastore) GetSize(ctx context.Context, key ds.Key) (size int, err error) { + c.CallF() + return c.ds.GetSize(ctx, key) +} + +func (c *callbackDatastore) Close() error { + return nil +} + +func (c *callbackDatastore) Delete(ctx context.Context, key ds.Key) (err error) { + c.CallF() + return c.ds.Delete(ctx, key) +} + +func (c *callbackDatastore) Query(ctx context.Context, q dsq.Query) (dsq.Results, error) { + c.CallF() + return c.ds.Query(ctx, q) +} + +func (c *callbackDatastore) Sync(ctx context.Context, key ds.Key) error { + c.CallF() + return c.ds.Sync(ctx, key) +} + +func (c *callbackDatastore) Batch(_ context.Context) (ds.Batch, error) { + return ds.NewBasicBatch(c), nil +} diff --git a/blockstore/caching.go b/blockstore/caching.go new file mode 100644 index 0000000000..798b84ce2b --- /dev/null +++ b/blockstore/caching.go @@ -0,0 +1,55 @@ +package blockstore + +import ( + "context" + "errors" + + metrics "github.com/ipfs/go-metrics-interface" +) + +// CacheOpts wraps options for CachedBlockStore(). +// Next to each option is it aproximate memory usage per unit +type CacheOpts struct { + HasBloomFilterSize int // 1 byte + HasBloomFilterHashes int // No size, 7 is usually best, consult bloom papers + HasARCCacheSize int // 32 bytes +} + +// DefaultCacheOpts returns a CacheOpts initialized with default values. +func DefaultCacheOpts() CacheOpts { + return CacheOpts{ + HasBloomFilterSize: 512 << 10, + HasBloomFilterHashes: 7, + HasARCCacheSize: 64 << 10, + } +} + +// CachedBlockstore returns a blockstore wrapped in an ARCCache and +// then in a bloom filter cache, if the options indicate it. +func CachedBlockstore( + ctx context.Context, + bs Blockstore, + opts CacheOpts) (cbs Blockstore, err error) { + cbs = bs + + if opts.HasBloomFilterSize < 0 || opts.HasBloomFilterHashes < 0 || + opts.HasARCCacheSize < 0 { + return nil, errors.New("all options for cache need to be greater than zero") + } + + if opts.HasBloomFilterSize != 0 && opts.HasBloomFilterHashes == 0 { + return nil, errors.New("bloom filter hash count can't be 0 when there is size set") + } + + ctx = metrics.CtxSubScope(ctx, "bs.cache") + + if opts.HasARCCacheSize > 0 { + cbs, err = newARCCachedBS(ctx, cbs, opts.HasARCCacheSize) + } + if opts.HasBloomFilterSize != 0 { + // *8 because of bytes to bits conversion + cbs, err = bloomCached(ctx, cbs, opts.HasBloomFilterSize*8, opts.HasBloomFilterHashes) + } + + return cbs, err +} diff --git a/blockstore/caching_test.go b/blockstore/caching_test.go new file mode 100644 index 0000000000..16066ad18c --- /dev/null +++ b/blockstore/caching_test.go @@ -0,0 +1,38 @@ +package blockstore + +import ( + "context" + "testing" +) + +func TestCachingOptsLessThanZero(t *testing.T) { + opts := DefaultCacheOpts() + opts.HasARCCacheSize = -1 + + if _, err := CachedBlockstore(context.TODO(), nil, opts); err == nil { + t.Error("wrong ARC setting was not detected") + } + + opts = DefaultCacheOpts() + opts.HasBloomFilterSize = -1 + + if _, err := CachedBlockstore(context.TODO(), nil, opts); err == nil { + t.Error("negative bloom size was not detected") + } + + opts = DefaultCacheOpts() + opts.HasBloomFilterHashes = -1 + + if _, err := CachedBlockstore(context.TODO(), nil, opts); err == nil { + t.Error("negative hashes setting was not detected") + } +} + +func TestBloomHashesAtZero(t *testing.T) { + opts := DefaultCacheOpts() + opts.HasBloomFilterHashes = 0 + + if _, err := CachedBlockstore(context.TODO(), nil, opts); err == nil { + t.Error("zero hashes setting with positive size was not detected") + } +} diff --git a/blockstore/idstore.go b/blockstore/idstore.go new file mode 100644 index 0000000000..04128ebe8d --- /dev/null +++ b/blockstore/idstore.go @@ -0,0 +1,123 @@ +package blockstore + +import ( + "context" + "io" + + blocks "github.com/ipfs/boxo/blocks" + cid "github.com/ipfs/go-cid" + mh "github.com/multiformats/go-multihash" +) + +// idstore wraps a BlockStore to add support for identity hashes +type idstore struct { + bs Blockstore + viewer Viewer +} + +var _ Blockstore = (*idstore)(nil) +var _ Viewer = (*idstore)(nil) +var _ io.Closer = (*idstore)(nil) + +func NewIdStore(bs Blockstore) Blockstore { + ids := &idstore{bs: bs} + if v, ok := bs.(Viewer); ok { + ids.viewer = v + } + return ids +} + +func extractContents(k cid.Cid) (bool, []byte) { + // Pre-check by calling Prefix(), this much faster than extracting the hash. + if k.Prefix().MhType != mh.IDENTITY { + return false, nil + } + + dmh, err := mh.Decode(k.Hash()) + if err != nil || dmh.Code != mh.IDENTITY { + return false, nil + } + return true, dmh.Digest +} + +func (b *idstore) DeleteBlock(ctx context.Context, k cid.Cid) error { + isId, _ := extractContents(k) + if isId { + return nil + } + return b.bs.DeleteBlock(ctx, k) +} + +func (b *idstore) Has(ctx context.Context, k cid.Cid) (bool, error) { + isId, _ := extractContents(k) + if isId { + return true, nil + } + return b.bs.Has(ctx, k) +} + +func (b *idstore) View(ctx context.Context, k cid.Cid, callback func([]byte) error) error { + if b.viewer == nil { + blk, err := b.Get(ctx, k) + if err != nil { + return err + } + return callback(blk.RawData()) + } + isId, bdata := extractContents(k) + if isId { + return callback(bdata) + } + return b.viewer.View(ctx, k, callback) +} + +func (b *idstore) GetSize(ctx context.Context, k cid.Cid) (int, error) { + isId, bdata := extractContents(k) + if isId { + return len(bdata), nil + } + return b.bs.GetSize(ctx, k) +} + +func (b *idstore) Get(ctx context.Context, k cid.Cid) (blocks.Block, error) { + isId, bdata := extractContents(k) + if isId { + return blocks.NewBlockWithCid(bdata, k) + } + return b.bs.Get(ctx, k) +} + +func (b *idstore) Put(ctx context.Context, bl blocks.Block) error { + isId, _ := extractContents(bl.Cid()) + if isId { + return nil + } + return b.bs.Put(ctx, bl) +} + +func (b *idstore) PutMany(ctx context.Context, bs []blocks.Block) error { + toPut := make([]blocks.Block, 0, len(bs)) + for _, bl := range bs { + isId, _ := extractContents(bl.Cid()) + if isId { + continue + } + toPut = append(toPut, bl) + } + return b.bs.PutMany(ctx, toPut) +} + +func (b *idstore) HashOnRead(enabled bool) { + b.bs.HashOnRead(enabled) +} + +func (b *idstore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) { + return b.bs.AllKeysChan(ctx) +} + +func (b *idstore) Close() error { + if c, ok := b.bs.(io.Closer); ok { + return c.Close() + } + return nil +} diff --git a/blockstore/idstore_test.go b/blockstore/idstore_test.go new file mode 100644 index 0000000000..1f55dcb0b6 --- /dev/null +++ b/blockstore/idstore_test.go @@ -0,0 +1,162 @@ +package blockstore + +import ( + "context" + "testing" + + blk "github.com/ipfs/boxo/blocks" + cid "github.com/ipfs/go-cid" + ds "github.com/ipfs/go-datastore" + mh "github.com/multiformats/go-multihash" +) + +func createTestStores() (Blockstore, *callbackDatastore) { + cd := &callbackDatastore{f: func() {}, ds: ds.NewMapDatastore()} + ids := NewIdStore(NewBlockstore(cd)) + return ids, cd +} + +func TestIdStore(t *testing.T) { + idhash1, _ := cid.NewPrefixV1(cid.Raw, mh.IDENTITY).Sum([]byte("idhash1")) + idblock1, _ := blk.NewBlockWithCid([]byte("idhash1"), idhash1) + hash1, _ := cid.NewPrefixV1(cid.Raw, mh.SHA2_256).Sum([]byte("hash1")) + block1, _ := blk.NewBlockWithCid([]byte("hash1"), hash1) + emptyHash, _ := cid.NewPrefixV1(cid.Raw, mh.SHA2_256).Sum([]byte("emptyHash")) + emptyBlock, _ := blk.NewBlockWithCid([]byte{}, emptyHash) + + ids, cb := createTestStores() + + have, _ := ids.Has(bg, idhash1) + if !have { + t.Fatal("Has() failed on idhash") + } + + _, err := ids.Get(bg, idhash1) + if err != nil { + t.Fatalf("Get() failed on idhash: %v", err) + } + + noop := func() {} + failIfPassThough := func() { + t.Fatal("operation on identity hash passed though to datastore") + } + + cb.f = failIfPassThough + err = ids.Put(bg, idblock1) + if err != nil { + t.Fatal(err) + } + + cb.f = noop + err = ids.Put(bg, block1) + if err != nil { + t.Fatalf("Put() failed on normal block: %v", err) + } + + have, _ = ids.Has(bg, hash1) + if !have { + t.Fatal("normal block not added to datastore") + } + + blockSize, _ := ids.GetSize(bg, hash1) + if blockSize == -1 { + t.Fatal("normal block not added to datastore") + } + + _, err = ids.Get(bg, hash1) + if err != nil { + t.Fatal(err) + } + + err = ids.Put(bg, emptyBlock) + if err != nil { + t.Fatalf("Put() failed on normal block: %v", err) + } + + have, _ = ids.Has(bg, emptyHash) + if !have { + t.Fatal("normal block not added to datastore") + } + + blockSize, _ = ids.GetSize(bg, emptyHash) + if blockSize != 0 { + t.Fatal("normal block not added to datastore") + } + + cb.f = failIfPassThough + err = ids.DeleteBlock(bg, idhash1) + if err != nil { + t.Fatal(err) + } + + cb.f = noop + err = ids.DeleteBlock(bg, hash1) + if err != nil { + t.Fatal(err) + } + + have, _ = ids.Has(bg, hash1) + if have { + t.Fatal("normal block not deleted from datastore") + } + + blockSize, _ = ids.GetSize(bg, hash1) + if blockSize > -1 { + t.Fatal("normal block not deleted from datastore") + } + + err = ids.DeleteBlock(bg, emptyHash) + if err != nil { + t.Fatal(err) + } + + idhash2, _ := cid.NewPrefixV1(cid.Raw, mh.IDENTITY).Sum([]byte("idhash2")) + idblock2, _ := blk.NewBlockWithCid([]byte("idhash2"), idhash2) + hash2, _ := cid.NewPrefixV1(cid.Raw, mh.SHA2_256).Sum([]byte("hash2")) + block2, _ := blk.NewBlockWithCid([]byte("hash2"), hash2) + + cb.f = failIfPassThough + err = ids.PutMany(bg, []blk.Block{idblock1, idblock2}) + if err != nil { + t.Fatal(err) + } + + opCount := 0 + cb.f = func() { + opCount++ + } + + err = ids.PutMany(bg, []blk.Block{block1, block2}) + if err != nil { + t.Fatal(err) + } + if opCount != 4 { + // one call to Has and Put for each Cid + t.Fatalf("expected exactly 4 operations got %d", opCount) + } + + opCount = 0 + err = ids.PutMany(bg, []blk.Block{idblock1, block1}) + if err != nil { + t.Fatal(err) + } + if opCount != 1 { + // just one call to Put from the normal (non-id) block + t.Fatalf("expected exactly 1 operations got %d", opCount) + } + + ch, err := ids.AllKeysChan(context.TODO()) + if err != nil { + t.Fatal(err) + } + cnt := 0 + for c := range ch { + cnt++ + if c.Prefix().MhType == mh.IDENTITY { + t.Fatalf("block with identity hash found in blockstore") + } + } + if cnt != 2 { + t.Fatalf("expected exactly two keys returned by AllKeysChan got %d", cnt) + } +} diff --git a/chunker/benchmark_test.go b/chunker/benchmark_test.go new file mode 100644 index 0000000000..5069b06536 --- /dev/null +++ b/chunker/benchmark_test.go @@ -0,0 +1,59 @@ +package chunk + +import ( + "bytes" + "io" + "math/rand" + "testing" +) + +type newSplitter func(io.Reader) Splitter + +type bencSpec struct { + size int + name string +} + +var bSizes = []bencSpec{ + {1 << 10, "1K"}, + {1 << 20, "1M"}, + {16 << 20, "16M"}, + {100 << 20, "100M"}, +} + +func benchmarkChunker(b *testing.B, ns newSplitter) { + for _, s := range bSizes { + s := s + b.Run(s.name, func(b *testing.B) { + benchmarkChunkerSize(b, ns, s.size) + }) + } +} + +func benchmarkChunkerSize(b *testing.B, ns newSplitter, size int) { + rng := rand.New(rand.NewSource(1)) + data := make([]byte, size) + rng.Read(data) + + b.SetBytes(int64(size)) + b.ReportAllocs() + b.ResetTimer() + + var res uint64 + + for i := 0; i < b.N; i++ { + r := ns(bytes.NewReader(data)) + + for { + chunk, err := r.NextBytes() + if err != nil { + if err == io.EOF { + break + } + b.Fatal(err) + } + res = res + uint64(len(chunk)) + } + } + Res = Res + res +} diff --git a/chunker/buzhash.go b/chunker/buzhash.go new file mode 100644 index 0000000000..83ab019dd4 --- /dev/null +++ b/chunker/buzhash.go @@ -0,0 +1,151 @@ +package chunk + +import ( + "io" + "math/bits" + + pool "github.com/libp2p/go-buffer-pool" +) + +const ( + buzMin = 128 << 10 + buzMax = 512 << 10 + buzMask = 1<<17 - 1 +) + +type Buzhash struct { + r io.Reader + buf []byte + n int + + err error +} + +func NewBuzhash(r io.Reader) *Buzhash { + return &Buzhash{ + r: r, + buf: pool.Get(buzMax), + } +} + +func (b *Buzhash) Reader() io.Reader { + return b.r +} + +func (b *Buzhash) NextBytes() ([]byte, error) { + if b.err != nil { + return nil, b.err + } + + n, err := io.ReadFull(b.r, b.buf[b.n:]) + if err != nil { + if err == io.ErrUnexpectedEOF || err == io.EOF { + buffered := b.n + n + if buffered < buzMin { + b.err = io.EOF + // Read nothing? Don't return an empty block. + if buffered == 0 { + pool.Put(b.buf) + b.buf = nil + return nil, b.err + } + res := make([]byte, buffered) + copy(res, b.buf) + + pool.Put(b.buf) + b.buf = nil + return res, nil + } + } else { + b.err = err + pool.Put(b.buf) + b.buf = nil + return nil, err + } + } + + i := buzMin - 32 + + var state uint32 = 0 + + if buzMin > len(b.buf) { + panic("this is impossible") + } + + for ; i < buzMin; i++ { + state = bits.RotateLeft32(state, 1) + state = state ^ bytehash[b.buf[i]] + } + + { + max := b.n + n - 32 - 1 + + buf := b.buf + bufshf := b.buf[32:] + i = buzMin - 32 + _ = buf[max] + _ = bufshf[max] + + for ; i <= max; i++ { + if state&buzMask == 0 { + break + } + state = bits.RotateLeft32(state, 1) ^ + bytehash[buf[i]] ^ + bytehash[bufshf[i]] + } + i += 32 + } + + res := make([]byte, i) + copy(res, b.buf) + + b.n = copy(b.buf, b.buf[i:b.n+n]) + + return res, nil +} + +var bytehash = [256]uint32{ + 0x6236e7d5, 0x10279b0b, 0x72818182, 0xdc526514, 0x2fd41e3d, 0x777ef8c8, + 0x83ee5285, 0x2c8f3637, 0x2f049c1a, 0x57df9791, 0x9207151f, 0x9b544818, + 0x74eef658, 0x2028ca60, 0x0271d91a, 0x27ae587e, 0xecf9fa5f, 0x236e71cd, + 0xf43a8a2e, 0xbb13380, 0x9e57912c, 0x89a26cdb, 0x9fcf3d71, 0xa86da6f1, + 0x9c49f376, 0x346aecc7, 0xf094a9ee, 0xea99e9cb, 0xb01713c6, 0x88acffb, + 0x2960a0fb, 0x344a626c, 0x7ff22a46, 0x6d7a1aa5, 0x6a714916, 0x41d454ca, + 0x8325b830, 0xb65f563, 0x447fecca, 0xf9d0ea5e, 0xc1d9d3d4, 0xcb5ec574, + 0x55aae902, 0x86edc0e7, 0xd3a9e33, 0xe70dc1e1, 0xe3c5f639, 0x9b43140a, + 0xc6490ac5, 0x5e4030fb, 0x8e976dd5, 0xa87468ea, 0xf830ef6f, 0xcc1ed5a5, + 0x611f4e78, 0xddd11905, 0xf2613904, 0x566c67b9, 0x905a5ccc, 0x7b37b3a4, + 0x4b53898a, 0x6b8fd29d, 0xaad81575, 0x511be414, 0x3cfac1e7, 0x8029a179, + 0xd40efeda, 0x7380e02, 0xdc9beffd, 0x2d049082, 0x99bc7831, 0xff5002a8, + 0x21ce7646, 0x1cd049b, 0xf43994f, 0xc3c6c5a5, 0xbbda5f50, 0xec15ec7, + 0x9adb19b6, 0xc1e80b9, 0xb9b52968, 0xae162419, 0x2542b405, 0x91a42e9d, + 0x6be0f668, 0x6ed7a6b9, 0xbc2777b4, 0xe162ce56, 0x4266aad5, 0x60fdb704, + 0x66f832a5, 0x9595f6ca, 0xfee83ced, 0x55228d99, 0x12bf0e28, 0x66896459, + 0x789afda, 0x282baa8, 0x2367a343, 0x591491b0, 0x2ff1a4b1, 0x410739b6, + 0x9b7055a0, 0x2e0eb229, 0x24fc8252, 0x3327d3df, 0xb0782669, 0x1c62e069, + 0x7f503101, 0xf50593ae, 0xd9eb275d, 0xe00eb678, 0x5917ccde, 0x97b9660a, + 0xdd06202d, 0xed229e22, 0xa9c735bf, 0xd6316fe6, 0x6fc72e4c, 0x206dfa2, + 0xd6b15c5a, 0x69d87b49, 0x9c97745, 0x13445d61, 0x35a975aa, 0x859aa9b9, + 0x65380013, 0xd1fb6391, 0xc29255fd, 0x784a3b91, 0xb9e74c26, 0x63ce4d40, + 0xc07cbe9e, 0xe6e4529e, 0xfb3632f, 0x9438d9c9, 0x682f94a8, 0xf8fd4611, + 0x257ec1ed, 0x475ce3d6, 0x60ee2db1, 0x2afab002, 0x2b9e4878, 0x86b340de, + 0x1482fdca, 0xfe41b3bf, 0xd4a412b0, 0xe09db98c, 0xc1af5d53, 0x7e55e25f, + 0xd3346b38, 0xb7a12cbd, 0x9c6827ba, 0x71f78bee, 0x8c3a0f52, 0x150491b0, + 0xf26de912, 0x233e3a4e, 0xd309ebba, 0xa0a9e0ff, 0xca2b5921, 0xeeb9893c, + 0x33829e88, 0x9870cc2a, 0x23c4b9d0, 0xeba32ea3, 0xbdac4d22, 0x3bc8c44c, + 0x1e8d0397, 0xf9327735, 0x783b009f, 0xeb83742, 0x2621dc71, 0xed017d03, + 0x5c760aa1, 0x5a69814b, 0x96e3047f, 0xa93c9cde, 0x615c86f5, 0xb4322aa5, + 0x4225534d, 0xd2e2de3, 0xccfccc4b, 0xbac2a57, 0xf0a06d04, 0xbc78d737, + 0xf2d1f766, 0xf5a7953c, 0xbcdfda85, 0x5213b7d5, 0xbce8a328, 0xd38f5f18, + 0xdb094244, 0xfe571253, 0x317fa7ee, 0x4a324f43, 0x3ffc39d9, 0x51b3fa8e, + 0x7a4bee9f, 0x78bbc682, 0x9f5c0350, 0x2fe286c, 0x245ab686, 0xed6bf7d7, + 0xac4988a, 0x3fe010fa, 0xc65fe369, 0xa45749cb, 0x2b84e537, 0xde9ff363, + 0x20540f9a, 0xaa8c9b34, 0x5bc476b3, 0x1d574bd7, 0x929100ad, 0x4721de4d, + 0x27df1b05, 0x58b18546, 0xb7e76764, 0xdf904e58, 0x97af57a1, 0xbd4dc433, + 0xa6256dfd, 0xf63998f3, 0xf1e05833, 0xe20acf26, 0xf57fd9d6, 0x90300b4d, + 0x89df4290, 0x68d01cbc, 0xcf893ee3, 0xcc42a046, 0x778e181b, 0x67265c76, + 0xe981a4c4, 0x82991da1, 0x708f7294, 0xe6e2ae62, 0xfc441870, 0x95e1b0b6, + 0x445f825, 0x5a93b47f, 0x5e9cf4be, 0x84da71e7, 0x9d9582b0, 0x9bf835ef, + 0x591f61e2, 0x43325985, 0x5d2de32e, 0x8d8fbf0f, 0x95b30f38, 0x7ad5b6e, + 0x4e934edf, 0x3cd4990e, 0x9053e259, 0x5c41857d} diff --git a/chunker/buzhash_norace_test.go b/chunker/buzhash_norace_test.go new file mode 100644 index 0000000000..50dc0e5ce2 --- /dev/null +++ b/chunker/buzhash_norace_test.go @@ -0,0 +1,14 @@ +//go:build !race + +package chunk + +import ( + "testing" +) + +func TestFuzzBuzhashChunking(t *testing.T) { + buf := make([]byte, 1024*1024*16) + for i := 0; i < 100; i++ { + testBuzhashChunking(t, buf) + } +} diff --git a/chunker/buzhash_test.go b/chunker/buzhash_test.go new file mode 100644 index 0000000000..fe6de4434e --- /dev/null +++ b/chunker/buzhash_test.go @@ -0,0 +1,91 @@ +package chunk + +import ( + "bytes" + "io" + "testing" + + util "github.com/ipfs/boxo/util" +) + +func testBuzhashChunking(t *testing.T, buf []byte) (chunkCount int) { + n, err := util.NewTimeSeededRand().Read(buf) + if n < len(buf) { + t.Fatalf("expected %d bytes, got %d", len(buf), n) + } + if err != nil { + t.Fatal(err) + } + + r := NewBuzhash(bytes.NewReader(buf)) + + var chunks [][]byte + + for { + chunk, err := r.NextBytes() + if err != nil { + if err == io.EOF { + break + } + t.Fatal(err) + } + + chunks = append(chunks, chunk) + } + chunkCount += len(chunks) + + for i, chunk := range chunks { + if len(chunk) == 0 { + t.Fatalf("chunk %d/%d is empty", i+1, len(chunks)) + } + } + + for i, chunk := range chunks[:len(chunks)-1] { + if len(chunk) < buzMin { + t.Fatalf("chunk %d/%d is less than the minimum size", i+1, len(chunks)) + } + } + + unchunked := bytes.Join(chunks, nil) + if !bytes.Equal(unchunked, buf) { + t.Fatal("data was chunked incorrectly") + } + + return chunkCount +} + +func TestBuzhashChunking(t *testing.T) { + buf := make([]byte, 1024*1024*16) + count := testBuzhashChunking(t, buf) + t.Logf("average block size: %d\n", len(buf)/count) +} + +func TestBuzhashChunkReuse(t *testing.T) { + newBuzhash := func(r io.Reader) Splitter { + return NewBuzhash(r) + } + testReuse(t, newBuzhash) +} + +func BenchmarkBuzhash2(b *testing.B) { + benchmarkChunker(b, func(r io.Reader) Splitter { + return NewBuzhash(r) + }) +} + +func TestBuzhashBitsHashBias(t *testing.T) { + counts := make([]byte, 32) + for _, h := range bytehash { + for i := 0; i < 32; i++ { + if h&1 == 1 { + counts[i]++ + } + h = h >> 1 + } + } + for i, c := range counts { + if c != 128 { + t.Errorf("Bit balance in position %d broken, %d ones", i, c) + } + } +} diff --git a/chunker/gen/main.go b/chunker/gen/main.go new file mode 100644 index 0000000000..9d908544b8 --- /dev/null +++ b/chunker/gen/main.go @@ -0,0 +1,33 @@ +// This file generates bytehash LUT +package main + +import ( + "fmt" + "math/rand" +) + +const nRounds = 200 + +func main() { + rnd := rand.New(rand.NewSource(0)) + + lut := make([]uint32, 256) + for i := 0; i < 256/2; i++ { + lut[i] = 1<<32 - 1 + } + + for r := 0; r < nRounds; r++ { + for b := uint32(0); b < 32; b++ { + mask := uint32(1) << b + nmask := ^mask + for i, j := range rnd.Perm(256) { + li := lut[i] + lj := lut[j] + lut[i] = li&nmask | (lj & mask) + lut[j] = lj&nmask | (li & mask) + } + } + } + + fmt.Printf("%#v", lut) +} diff --git a/chunker/parse.go b/chunker/parse.go new file mode 100644 index 0000000000..486cd14adc --- /dev/null +++ b/chunker/parse.go @@ -0,0 +1,114 @@ +package chunk + +import ( + "errors" + "fmt" + "io" + "strconv" + "strings" +) + +const ( + // DefaultBlockSize is the chunk size that splitters produce (or aim to). + DefaultBlockSize int64 = 1024 * 256 + + // No leaf block should contain more than 1MiB of payload data ( wrapping overhead aside ) + // This effectively mandates the maximum chunk size + // See discussion at https://github.com/ipfs/boxo/chunker/pull/21#discussion_r369124879 for background + ChunkSizeLimit int = 1048576 +) + +var ( + ErrRabinMin = errors.New("rabin min must be greater than 16") + ErrSize = errors.New("chunker size must be greater than 0") + ErrSizeMax = fmt.Errorf("chunker parameters may not exceed the maximum chunk size of %d", ChunkSizeLimit) +) + +// FromString returns a Splitter depending on the given string: +// it supports "default" (""), "size-{size}", "rabin", "rabin-{blocksize}", +// "rabin-{min}-{avg}-{max}" and "buzhash". +func FromString(r io.Reader, chunker string) (Splitter, error) { + switch { + case chunker == "" || chunker == "default": + return DefaultSplitter(r), nil + + case strings.HasPrefix(chunker, "size-"): + sizeStr := strings.Split(chunker, "-")[1] + size, err := strconv.Atoi(sizeStr) + if err != nil { + return nil, err + } else if size <= 0 { + return nil, ErrSize + } else if size > ChunkSizeLimit { + return nil, ErrSizeMax + } + return NewSizeSplitter(r, int64(size)), nil + + case strings.HasPrefix(chunker, "rabin"): + return parseRabinString(r, chunker) + + case chunker == "buzhash": + return NewBuzhash(r), nil + + default: + return nil, fmt.Errorf("unrecognized chunker option: %s", chunker) + } +} + +func parseRabinString(r io.Reader, chunker string) (Splitter, error) { + parts := strings.Split(chunker, "-") + switch len(parts) { + case 1: + return NewRabin(r, uint64(DefaultBlockSize)), nil + case 2: + size, err := strconv.Atoi(parts[1]) + if err != nil { + return nil, err + } else if int(float32(size)*1.5) > ChunkSizeLimit { // FIXME - this will be addressed in a subsequent PR + return nil, ErrSizeMax + } + return NewRabin(r, uint64(size)), nil + case 4: + sub := strings.Split(parts[1], ":") + if len(sub) > 1 && sub[0] != "min" { + return nil, errors.New("first label must be min") + } + min, err := strconv.Atoi(sub[len(sub)-1]) + if err != nil { + return nil, err + } + if min < 16 { + return nil, ErrRabinMin + } + sub = strings.Split(parts[2], ":") + if len(sub) > 1 && sub[0] != "avg" { + log.Error("sub == ", sub) + return nil, errors.New("second label must be avg") + } + avg, err := strconv.Atoi(sub[len(sub)-1]) + if err != nil { + return nil, err + } + + sub = strings.Split(parts[3], ":") + if len(sub) > 1 && sub[0] != "max" { + return nil, errors.New("final label must be max") + } + max, err := strconv.Atoi(sub[len(sub)-1]) + if err != nil { + return nil, err + } + + if min >= avg { + return nil, errors.New("incorrect format: rabin-min must be smaller than rabin-avg") + } else if avg >= max { + return nil, errors.New("incorrect format: rabin-avg must be smaller than rabin-max") + } else if max > ChunkSizeLimit { + return nil, ErrSizeMax + } + + return NewRabinMinMax(r, uint64(min), uint64(avg), uint64(max)), nil + default: + return nil, errors.New("incorrect format (expected 'rabin' 'rabin-[avg]' or 'rabin-[min]-[avg]-[max]'") + } +} diff --git a/chunker/parse_test.go b/chunker/parse_test.go new file mode 100644 index 0000000000..237a2b439a --- /dev/null +++ b/chunker/parse_test.go @@ -0,0 +1,80 @@ +package chunk + +import ( + "bytes" + "fmt" + "testing" +) + +const ( + testTwoThirdsOfChunkLimit = 2 * (float32(ChunkSizeLimit) / float32(3)) +) + +func TestParseRabin(t *testing.T) { + r := bytes.NewReader(randBuf(t, 1000)) + + _, err := FromString(r, "rabin-18-25-32") + if err != nil { + t.Errorf(err.Error()) + } + + _, err = FromString(r, "rabin-15-23-31") + if err != ErrRabinMin { + t.Fatalf("Expected an 'ErrRabinMin' error, got: %#v", err) + } + + _, err = FromString(r, "rabin-20-20-21") + if err == nil || err.Error() != "incorrect format: rabin-min must be smaller than rabin-avg" { + t.Fatalf("Expected an arg-out-of-order error, got: %#v", err) + } + + _, err = FromString(r, "rabin-19-21-21") + if err == nil || err.Error() != "incorrect format: rabin-avg must be smaller than rabin-max" { + t.Fatalf("Expected an arg-out-of-order error, got: %#v", err) + } + + _, err = FromString(r, fmt.Sprintf("rabin-19-21-%d", ChunkSizeLimit)) + if err != nil { + t.Fatalf("Expected success, got: %#v", err) + } + + _, err = FromString(r, fmt.Sprintf("rabin-19-21-%d", 1+ChunkSizeLimit)) + if err != ErrSizeMax { + t.Fatalf("Expected 'ErrSizeMax', got: %#v", err) + } + + _, err = FromString(r, fmt.Sprintf("rabin-%.0f", testTwoThirdsOfChunkLimit)) + if err != nil { + t.Fatalf("Expected success, got: %#v", err) + } + + _, err = FromString(r, fmt.Sprintf("rabin-%.0f", 1+testTwoThirdsOfChunkLimit)) + if err != ErrSizeMax { + t.Fatalf("Expected 'ErrSizeMax', got: %#v", err) + } + +} + +func TestParseSize(t *testing.T) { + r := bytes.NewReader(randBuf(t, 1000)) + + _, err := FromString(r, "size-0") + if err != ErrSize { + t.Fatalf("Expected an 'ErrSize' error, got: %#v", err) + } + + _, err = FromString(r, "size-32") + if err != nil { + t.Fatalf("Expected success, got: %#v", err) + } + + _, err = FromString(r, fmt.Sprintf("size-%d", ChunkSizeLimit)) + if err != nil { + t.Fatalf("Expected success, got: %#v", err) + } + + _, err = FromString(r, fmt.Sprintf("size-%d", 1+ChunkSizeLimit)) + if err != ErrSizeMax { + t.Fatalf("Expected 'ErrSizeMax', got: %#v", err) + } +} diff --git a/chunker/rabin.go b/chunker/rabin.go new file mode 100644 index 0000000000..4247057b2f --- /dev/null +++ b/chunker/rabin.go @@ -0,0 +1,54 @@ +package chunk + +import ( + "hash/fnv" + "io" + + "github.com/whyrusleeping/chunker" +) + +// IpfsRabinPoly is the irreducible polynomial of degree 53 used by for Rabin. +var IpfsRabinPoly = chunker.Pol(17437180132763653) + +// Rabin implements the Splitter interface and splits content with Rabin +// fingerprints. +type Rabin struct { + r *chunker.Chunker + reader io.Reader +} + +// NewRabin creates a new Rabin splitter with the given +// average block size. +func NewRabin(r io.Reader, avgBlkSize uint64) *Rabin { + min := avgBlkSize / 3 + max := avgBlkSize + (avgBlkSize / 2) + + return NewRabinMinMax(r, min, avgBlkSize, max) +} + +// NewRabinMinMax returns a new Rabin splitter which uses +// the given min, average and max block sizes. +func NewRabinMinMax(r io.Reader, min, avg, max uint64) *Rabin { + h := fnv.New32a() + ch := chunker.New(r, IpfsRabinPoly, h, avg, min, max) + + return &Rabin{ + r: ch, + reader: r, + } +} + +// NextBytes reads the next bytes from the reader and returns a slice. +func (r *Rabin) NextBytes() ([]byte, error) { + ch, err := r.r.Next() + if err != nil { + return nil, err + } + + return ch.Data, nil +} + +// Reader returns the io.Reader associated to this Splitter. +func (r *Rabin) Reader() io.Reader { + return r.reader +} diff --git a/chunker/rabin_test.go b/chunker/rabin_test.go new file mode 100644 index 0000000000..70e1de5413 --- /dev/null +++ b/chunker/rabin_test.go @@ -0,0 +1,108 @@ +package chunk + +import ( + "bytes" + "fmt" + "io" + "testing" + + blocks "github.com/ipfs/boxo/blocks" + util "github.com/ipfs/boxo/util" +) + +func TestRabinChunking(t *testing.T) { + data := make([]byte, 1024*1024*16) + n, err := util.NewTimeSeededRand().Read(data) + if n < len(data) { + t.Fatalf("expected %d bytes, got %d", len(data), n) + } + if err != nil { + t.Fatal(err) + } + + r := NewRabin(bytes.NewReader(data), 1024*256) + + var chunks [][]byte + + for { + chunk, err := r.NextBytes() + if err != nil { + if err == io.EOF { + break + } + t.Fatal(err) + } + + chunks = append(chunks, chunk) + } + + fmt.Printf("average block size: %d\n", len(data)/len(chunks)) + + unchunked := bytes.Join(chunks, nil) + if !bytes.Equal(unchunked, data) { + fmt.Printf("%d %d\n", len(unchunked), len(data)) + t.Fatal("data was chunked incorrectly") + } +} + +func chunkData(t *testing.T, newC newSplitter, data []byte) map[string]blocks.Block { + r := newC(bytes.NewReader(data)) + + blkmap := make(map[string]blocks.Block) + + for { + blk, err := r.NextBytes() + if err != nil { + if err == io.EOF { + break + } + t.Fatal(err) + } + + b := blocks.NewBlock(blk) + blkmap[b.Cid().KeyString()] = b + } + + return blkmap +} + +func testReuse(t *testing.T, cr newSplitter) { + data := make([]byte, 1024*1024*16) + n, err := util.NewTimeSeededRand().Read(data) + if n < len(data) { + t.Fatalf("expected %d bytes, got %d", len(data), n) + } + if err != nil { + t.Fatal(err) + } + + ch1 := chunkData(t, cr, data[1000:]) + ch2 := chunkData(t, cr, data) + + var extra int + for k := range ch2 { + _, ok := ch1[k] + if !ok { + extra++ + } + } + + if extra > 2 { + t.Logf("too many spare chunks made: %d", extra) + } +} + +func TestRabinChunkReuse(t *testing.T) { + newRabin := func(r io.Reader) Splitter { + return NewRabin(r, 256*1024) + } + testReuse(t, newRabin) +} + +var Res uint64 + +func BenchmarkRabin(b *testing.B) { + benchmarkChunker(b, func(r io.Reader) Splitter { + return NewRabin(r, 256<<10) + }) +} diff --git a/chunker/splitting.go b/chunker/splitting.go new file mode 100644 index 0000000000..a137820ab1 --- /dev/null +++ b/chunker/splitting.go @@ -0,0 +1,102 @@ +// Package chunk implements streaming block splitters. +// Splitters read data from a reader and provide byte slices (chunks) +// The size and contents of these slices depend on the splitting method +// used. +package chunk + +import ( + "io" + + logging "github.com/ipfs/go-log" + pool "github.com/libp2p/go-buffer-pool" +) + +var log = logging.Logger("chunk") + +// A Splitter reads bytes from a Reader and creates "chunks" (byte slices) +// that can be used to build DAG nodes. +type Splitter interface { + Reader() io.Reader + NextBytes() ([]byte, error) +} + +// SplitterGen is a splitter generator, given a reader. +type SplitterGen func(r io.Reader) Splitter + +// DefaultSplitter returns a SizeSplitter with the DefaultBlockSize. +func DefaultSplitter(r io.Reader) Splitter { + return NewSizeSplitter(r, DefaultBlockSize) +} + +// SizeSplitterGen returns a SplitterGen function which will create +// a splitter with the given size when called. +func SizeSplitterGen(size int64) SplitterGen { + return func(r io.Reader) Splitter { + return NewSizeSplitter(r, size) + } +} + +// Chan returns a channel that receives each of the chunks produced +// by a splitter, along with another one for errors. +func Chan(s Splitter) (<-chan []byte, <-chan error) { + out := make(chan []byte) + errs := make(chan error, 1) + go func() { + defer close(out) + defer close(errs) + + // all-chunks loop (keep creating chunks) + for { + b, err := s.NextBytes() + if err != nil { + errs <- err + return + } + + out <- b + } + }() + return out, errs +} + +type sizeSplitterv2 struct { + r io.Reader + size uint32 + err error +} + +// NewSizeSplitter returns a new size-based Splitter with the given block size. +func NewSizeSplitter(r io.Reader, size int64) Splitter { + return &sizeSplitterv2{ + r: r, + size: uint32(size), + } +} + +// NextBytes produces a new chunk. +func (ss *sizeSplitterv2) NextBytes() ([]byte, error) { + if ss.err != nil { + return nil, ss.err + } + + full := pool.Get(int(ss.size)) + n, err := io.ReadFull(ss.r, full) + switch err { + case io.ErrUnexpectedEOF: + ss.err = io.EOF + small := make([]byte, n) + copy(small, full) + pool.Put(full) + return small, nil + case nil: + return full, nil + default: + pool.Put(full) + return nil, err + } +} + +// Reader returns the io.Reader associated to this Splitter. +func (ss *sizeSplitterv2) Reader() io.Reader { + return ss.r +} diff --git a/chunker/splitting_test.go b/chunker/splitting_test.go new file mode 100644 index 0000000000..c53dfb4a75 --- /dev/null +++ b/chunker/splitting_test.go @@ -0,0 +1,126 @@ +package chunk + +import ( + "bytes" + "io" + "testing" + + u "github.com/ipfs/boxo/util" +) + +func randBuf(t *testing.T, size int) []byte { + buf := make([]byte, size) + if _, err := u.NewTimeSeededRand().Read(buf); err != nil { + t.Fatal("failed to read enough randomness") + } + return buf +} + +func copyBuf(buf []byte) []byte { + cpy := make([]byte, len(buf)) + copy(cpy, buf) + return cpy +} + +func TestSizeSplitterOverAllocate(t *testing.T) { + max := 1000 + r := bytes.NewReader(randBuf(t, max)) + chunksize := int64(1024 * 256) + splitter := NewSizeSplitter(r, chunksize) + chunk, err := splitter.NextBytes() + if err != nil { + t.Fatal(err) + } + if cap(chunk) > len(chunk) { + t.Fatal("chunk capacity too large") + } +} + +func TestSizeSplitterIsDeterministic(t *testing.T) { + if testing.Short() { + t.SkipNow() + } + + test := func() { + bufR := randBuf(t, 10000000) // crank this up to satisfy yourself. + bufA := copyBuf(bufR) + bufB := copyBuf(bufR) + + chunksA, _ := Chan(DefaultSplitter(bytes.NewReader(bufA))) + chunksB, _ := Chan(DefaultSplitter(bytes.NewReader(bufB))) + + for n := 0; ; n++ { + a, moreA := <-chunksA + b, moreB := <-chunksB + + if !moreA { + if moreB { + t.Fatal("A ended, B didnt.") + } + return + } + + if !bytes.Equal(a, b) { + t.Fatalf("chunk %d not equal", n) + } + } + } + + for run := 0; run < 1; run++ { // crank this up to satisfy yourself. + test() + } +} + +func TestSizeSplitterFillsChunks(t *testing.T) { + if testing.Short() { + t.SkipNow() + } + + max := 10000000 + b := randBuf(t, max) + r := &clipReader{r: bytes.NewReader(b), size: 4000} + chunksize := int64(1024 * 256) + c, _ := Chan(NewSizeSplitter(r, chunksize)) + + sofar := 0 + whole := make([]byte, max) + for chunk := range c { + + bc := b[sofar : sofar+len(chunk)] + if !bytes.Equal(bc, chunk) { + t.Fatalf("chunk not correct: (sofar: %d) %d != %d, %v != %v", sofar, len(bc), len(chunk), bc[:100], chunk[:100]) + } + + copy(whole[sofar:], chunk) + + sofar += len(chunk) + if sofar != max && len(chunk) < int(chunksize) { + t.Fatal("sizesplitter split at a smaller size") + } + } + + if !bytes.Equal(b, whole) { + t.Fatal("splitter did not split right") + } +} + +type clipReader struct { + size int + r io.Reader +} + +func (s *clipReader) Read(buf []byte) (int, error) { + + // clip the incoming buffer to produce smaller chunks + if len(buf) > s.size { + buf = buf[:s.size] + } + + return s.r.Read(buf) +} + +func BenchmarkDefault(b *testing.B) { + benchmarkChunker(b, func(r io.Reader) Splitter { + return DefaultSplitter(r) + }) +} diff --git a/cmd/car/README.md b/cmd/car/README.md new file mode 100644 index 0000000000..995850fd73 --- /dev/null +++ b/cmd/car/README.md @@ -0,0 +1,38 @@ +car - The CLI tool +================== + +[![](https://img.shields.io/badge/made%20by-Protocol%20Labs-blue.svg?style=flat-square)](https://protocol.ai) +[![](https://img.shields.io/badge/project-ipld-orange.svg?style=flat-square)](https://github.com/ipld/ipld) +[![](https://img.shields.io/badge/matrix-%23ipld-blue.svg?style=flat-square)](https://matrix.to/#/#ipld:ipfs.io) + +> A CLI for interacting with car files + +## Usage + +``` +USAGE: + car [global options] command [command options] [arguments...] + +COMMANDS: + compile compile a car file from a debug patch + create, c Create a car file + debug debug a car file + detach-index Detach an index to a detached file + extract, x Extract the contents of a car when the car encodes UnixFS data + filter, f Filter the CIDs in a car + get-block, gb Get a block out of a car + get-dag, gd Get a dag out of a car + index, i write out the car with an index + inspect verifies a car and prints a basic report about its contents + list, l, ls List the CIDs in a car + root Get the root CID of a car + verify, v Verify a CAR is wellformed + help, h Shows a list of commands or help for one command +``` + +## Install + +To install the latest version of `car`, run: +```shell script +go install github.com/ipld/go-car/cmd/car@latest +``` diff --git a/cmd/car/car.go b/cmd/car/car.go new file mode 100644 index 0000000000..d2356484cc --- /dev/null +++ b/cmd/car/car.go @@ -0,0 +1,218 @@ +package main + +import ( + "log" + "os" + + "github.com/multiformats/go-multicodec" + "github.com/urfave/cli/v2" +) + +func main() { os.Exit(main1()) } + +func main1() int { + app := &cli.App{ + Name: "car", + Usage: "Utility for working with car files", + Commands: []*cli.Command{ + { + Name: "compile", + Usage: "compile a car file from a debug patch", + Action: CompileCar, + Flags: []cli.Flag{ + &cli.StringFlag{ + Name: "output", + Aliases: []string{"o", "f"}, + Usage: "The file to write to", + TakesFile: true, + }, + }, + }, + { + Name: "create", + Usage: "Create a car file", + Aliases: []string{"c"}, + Action: CreateCar, + Flags: []cli.Flag{ + &cli.StringFlag{ + Name: "file", + Aliases: []string{"f", "output", "o"}, + Usage: "The car file to write to", + TakesFile: true, + }, + &cli.IntFlag{ + Name: "version", + Value: 2, + Usage: "Write output as a v1 or v2 format car", + }, + }, + }, + { + Name: "debug", + Usage: "debug a car file", + Action: DebugCar, + Flags: []cli.Flag{ + &cli.StringFlag{ + Name: "output", + Aliases: []string{"o", "f"}, + Usage: "The file to write to", + TakesFile: true, + }, + }, + }, + { + Name: "detach-index", + Usage: "Detach an index to a detached file", + Action: DetachCar, + Subcommands: []*cli.Command{{ + Name: "list", + Usage: "List a detached index", + Action: DetachCarList, + }}, + }, + { + Name: "extract", + Aliases: []string{"x"}, + Usage: "Extract the contents of a car when the car encodes UnixFS data", + Action: ExtractCar, + ArgsUsage: "[output directory|-]", + Flags: []cli.Flag{ + &cli.StringFlag{ + Name: "file", + Aliases: []string{"f"}, + Usage: "The car file to extract from, or stdin if omitted", + Required: false, + TakesFile: true, + }, + &cli.StringFlag{ + Name: "path", + Aliases: []string{"p"}, + Usage: "The unixfs path to extract", + Required: false, + }, + &cli.BoolFlag{ + Name: "verbose", + Aliases: []string{"v"}, + Usage: "Include verbose information about extracted contents", + }, + }, + }, + { + Name: "filter", + Aliases: []string{"f"}, + Usage: "Filter the CIDs in a car", + Action: FilterCar, + Flags: []cli.Flag{ + &cli.StringFlag{ + Name: "cid-file", + Usage: "A file to read CIDs from", + TakesFile: true, + }, + &cli.BoolFlag{ + Name: "append", + Usage: "Append cids to an existing output file", + }, + }, + }, + { + Name: "get-block", + Aliases: []string{"gb"}, + Usage: "Get a block out of a car", + Action: GetCarBlock, + }, + { + Name: "get-dag", + Aliases: []string{"gd"}, + Usage: "Get a dag out of a car", + Action: GetCarDag, + Flags: []cli.Flag{ + &cli.StringFlag{ + Name: "selector", + Aliases: []string{"s"}, + Usage: "A selector over the dag", + }, + &cli.BoolFlag{ + Name: "strict", + Usage: "Fail if the selector finds links to blocks not in the original car", + }, + &cli.IntFlag{ + Name: "version", + Value: 2, + Usage: "Write output as a v1 or v2 format car", + }, + }, + }, + { + Name: "index", + Aliases: []string{"i"}, + Usage: "write out the car with an index", + Action: IndexCar, + Flags: []cli.Flag{ + &cli.StringFlag{ + Name: "codec", + Aliases: []string{"c"}, + Usage: "The type of index to write", + Value: multicodec.CarMultihashIndexSorted.String(), + }, + &cli.IntFlag{ + Name: "version", + Value: 2, + Usage: "Write output as a v1 or v2 format car", + }, + }, + Subcommands: []*cli.Command{{ + Name: "create", + Usage: "Write out a detached index", + Action: CreateIndex, + }}, + }, + { + Name: "inspect", + Usage: "verifies a car and prints a basic report about its contents", + Action: InspectCar, + Flags: []cli.Flag{ + &cli.BoolFlag{ + Name: "full", + Value: false, + Usage: "Check that the block data hash digests match the CIDs", + }, + }, + }, + { + Name: "list", + Aliases: []string{"l", "ls"}, + Usage: "List the CIDs in a car", + Action: ListCar, + Flags: []cli.Flag{ + &cli.BoolFlag{ + Name: "verbose", + Aliases: []string{"v"}, + Usage: "Include verbose information about contained blocks", + }, + &cli.BoolFlag{ + Name: "unixfs", + Usage: "List unixfs filesystem from the root of the car", + }, + }, + }, + { + Name: "root", + Usage: "Get the root CID of a car", + Action: CarRoot, + }, + { + Name: "verify", + Aliases: []string{"v"}, + Usage: "Verify a CAR is wellformed", + Action: VerifyCar, + }, + }, + } + + err := app.Run(os.Args) + if err != nil { + log.Println(err) + return 1 + } + return 0 +} diff --git a/cmd/car/compile.go b/cmd/car/compile.go new file mode 100644 index 0000000000..ca97000cfb --- /dev/null +++ b/cmd/car/compile.go @@ -0,0 +1,463 @@ +package main + +import ( + "bufio" + "bytes" + "context" + "fmt" + "io" + "os" + "regexp" + "strings" + "unicode/utf8" + + blocks "github.com/ipfs/boxo/blocks" + carv1 "github.com/ipfs/boxo/ipld/car" + "github.com/ipfs/boxo/ipld/car/util" + carv2 "github.com/ipfs/boxo/ipld/car/v2" + "github.com/ipfs/boxo/ipld/car/v2/blockstore" + "github.com/ipfs/go-cid" + "github.com/ipld/go-ipld-prime" + "github.com/ipld/go-ipld-prime/codec" + "github.com/ipld/go-ipld-prime/codec/dagjson" + "github.com/ipld/go-ipld-prime/datamodel" + "github.com/ipld/go-ipld-prime/linking" + cidlink "github.com/ipld/go-ipld-prime/linking/cid" + "github.com/ipld/go-ipld-prime/node/basicnode" + "github.com/ipld/go-ipld-prime/storage/memstore" + "github.com/polydawn/refmt/json" + "github.com/urfave/cli/v2" + "golang.org/x/exp/slices" +) + +var ( + plusLineRegex = regexp.MustCompile(`^\+\+\+ ([\w-]+) ([\S]+ )?([\w]+)$`) +) + +// Compile is a command to translate between a human-debuggable patch-like format and a car file. +func CompileCar(c *cli.Context) error { + var err error + inStream := os.Stdin + if c.Args().Len() >= 1 { + inStream, err = os.Open(c.Args().First()) + if err != nil { + return err + } + } + + //parse headers. + br := bufio.NewReader(inStream) + header, _, err := br.ReadLine() + if err != nil { + return err + } + + v2 := strings.HasPrefix(string(header), "car compile --v2 ") + rest := strings.TrimPrefix(string(header), "car compile ") + if v2 { + rest = strings.TrimPrefix(rest, "--v2 ") + } + carName := strings.TrimSpace(rest) + + roots := make([]cid.Cid, 0) + for { + peek, err := br.Peek(4) + if err == io.EOF { + break + } else if err != nil { + return err + } + if bytes.Equal(peek, []byte("--- ")) { + break + } + rootLine, _, err := br.ReadLine() + if err != nil { + return err + } + if strings.HasPrefix(string(rootLine), "root ") { + var rCidS string + fmt.Sscanf(string(rootLine), "root %s", &rCidS) + rCid, err := cid.Parse(rCidS) + if err != nil { + return err + } + roots = append(roots, rCid) + } + } + + //parse blocks. + cidList := make([]cid.Cid, 0) + rawBlocks := make(map[cid.Cid][]byte) + rawCodecs := make(map[cid.Cid]string) + + for { + nextCid, mode, nextBlk, err := parsePatch(br) + if err == io.EOF { + break + } else if err != nil { + return err + } + rawBlocks[nextCid] = nextBlk + rawCodecs[nextCid] = mode + cidList = append(cidList, nextCid) + } + + // Re-create the original IPLD encoded blocks, but allowing for modifications of the + // patch data which may generate new CIDs; so we track the DAG relationships and + // rewrite CIDs in other referring where they get updated. + + // structure as a tree + childMap := make(map[cid.Cid][]cid.Cid) + for c := range rawBlocks { + if _, ok := childMap[c]; !ok { + childMap[c] = make([]cid.Cid, 0) + } + for d, blk := range rawBlocks { + if c.Equals(d) { + continue + } + if strings.Contains(string(blk), c.String()) { + if _, ok := childMap[d]; !ok { + childMap[d] = make([]cid.Cid, 0) + } + childMap[d] = append(childMap[d], c) + } else if strings.Contains(string(blk), string(c.Bytes())) { + if _, ok := childMap[d]; !ok { + childMap[d] = make([]cid.Cid, 0) + } + childMap[d] = append(childMap[d], c) + } + } + } + + // re-parse/re-build CIDs + outBlocks := make(map[cid.Cid][]byte) + for len(childMap) > 0 { + for origCid, kids := range childMap { + if len(kids) == 0 { + // compile to final cid + blk := rawBlocks[origCid] + finalCid, finalBlk, err := serializeBlock(c.Context, origCid.Prefix(), rawCodecs[origCid], blk) + if err != nil { + return err + } + outBlocks[finalCid] = finalBlk + idx := slices.Index(cidList, origCid) + cidList[idx] = finalCid + + // update other remaining nodes of the new cid. + for otherCid, otherKids := range childMap { + for i, otherKid := range otherKids { + if otherKid.Equals(origCid) { + if !finalCid.Equals(origCid) { + // update block + rawBlocks[otherCid] = bytes.ReplaceAll(rawBlocks[otherCid], origCid.Bytes(), finalCid.Bytes()) + rawBlocks[otherCid] = bytes.ReplaceAll(rawBlocks[otherCid], []byte(origCid.String()), []byte(finalCid.String())) + } + // remove from childMap + nok := append(otherKids[0:i], otherKids[i+1:]...) + childMap[otherCid] = nok + break // to next child map entry. + } + } + } + + delete(childMap, origCid) + } + } + } + + if !v2 { + // write output + outStream := os.Stdout + if c.IsSet("output") { + outFileName := c.String("output") + if outFileName == "" { + outFileName = carName + } + outFile, err := os.Create(outFileName) + if err != nil { + return err + } + defer outFile.Close() + outStream = outFile + } + + if err := carv1.WriteHeader(&carv1.CarHeader{ + Roots: roots, + Version: 1, + }, outStream); err != nil { + return err + } + for c, blk := range outBlocks { + if err := util.LdWrite(outStream, c.Bytes(), blk); err != nil { + return err + } + } + } else { + outFileName := c.String("output") + if outFileName == "" { + outFileName = carName + } + + if outFileName == "-" && !c.IsSet("output") { + return fmt.Errorf("cannot stream carv2's to stdout") + } + bs, err := blockstore.OpenReadWrite(outFileName, roots) + if err != nil { + return err + } + for _, bc := range cidList { + blk := outBlocks[bc] + ob, _ := blocks.NewBlockWithCid(blk, bc) + bs.Put(c.Context, ob) + } + return bs.Finalize() + } + + return nil +} + +func serializeBlock(ctx context.Context, codec cid.Prefix, encoding string, raw []byte) (cid.Cid, []byte, error) { + ls := cidlink.DefaultLinkSystem() + store := memstore.Store{Bag: map[string][]byte{}} + ls.SetReadStorage(&store) + ls.SetWriteStorage(&store) + b := basicnode.Prototype.Any.NewBuilder() + if encoding == "dag-json" { + if err := dagjson.Decode(b, bytes.NewBuffer(raw)); err != nil { + return cid.Undef, nil, err + } + } else if encoding == "raw" { + if err := b.AssignBytes(raw); err != nil { + return cid.Undef, nil, err + } + } else { + return cid.Undef, nil, fmt.Errorf("unknown encoding: %s", encoding) + } + lnk, err := ls.Store(linking.LinkContext{Ctx: ctx}, cidlink.LinkPrototype{Prefix: codec}, b.Build()) + if err != nil { + return cid.Undef, nil, err + } + outCid := lnk.(cidlink.Link).Cid + outBytes, outErr := store.Get(ctx, outCid.KeyString()) + return outCid, outBytes, outErr +} + +// DebugCar is a command to translate between a car file, and a human-debuggable patch-like format. +func DebugCar(c *cli.Context) error { + var err error + inStream := os.Stdin + inFile := "-" + if c.Args().Len() >= 1 { + inFile = c.Args().First() + inStream, err = os.Open(inFile) + if err != nil { + return err + } + } + + rd, err := carv2.NewBlockReader(inStream) + if err != nil { + return err + } + + // patch the header. + outStream := os.Stdout + if c.IsSet("output") { + outFileName := c.String("output") + outFile, err := os.Create(outFileName) + if err != nil { + return err + } + defer outFile.Close() + outStream = outFile + } + + outStream.WriteString("car compile ") + if rd.Version == 2 { + outStream.WriteString("--v2 ") + } + + outStream.WriteString(inFile + "\n") + for _, rt := range rd.Roots { + fmt.Fprintf(outStream, "root %s\n", rt.String()) + } + + // patch each block. + nxt, err := rd.Next() + if err != nil { + return err + } + for nxt != nil { + chunk, err := patch(c.Context, nxt.Cid(), nxt.RawData()) + if err != nil { + return err + } + outStream.Write(chunk) + + nxt, err = rd.Next() + if err == io.EOF { + return nil + } + } + + return nil +} + +func patch(ctx context.Context, c cid.Cid, blk []byte) ([]byte, error) { + ls := cidlink.DefaultLinkSystem() + store := memstore.Store{Bag: map[string][]byte{}} + ls.SetReadStorage(&store) + ls.SetWriteStorage(&store) + store.Put(ctx, c.KeyString(), blk) + node, err := ls.Load(linking.LinkContext{Ctx: ctx}, cidlink.Link{Cid: c}, basicnode.Prototype.Any) + if err != nil { + return nil, fmt.Errorf("could not load block: %q", err) + } + + outMode := "dag-json" + if node.Kind() == datamodel.Kind_Bytes && isPrintable(node) { + outMode = "raw" + } + finalBuf := bytes.NewBuffer(nil) + + if outMode == "dag-json" { + opts := dagjson.EncodeOptions{ + EncodeLinks: true, + EncodeBytes: true, + MapSortMode: codec.MapSortMode_Lexical, + } + if err := dagjson.Marshal(node, json.NewEncoder(finalBuf, json.EncodeOptions{Line: []byte{'\n'}, Indent: []byte{'\t'}}), opts); err != nil { + return nil, err + } + } else if outMode == "raw" { + nb, err := node.AsBytes() + if err != nil { + return nil, err + } + finalBuf.Write(nb) + } + + // figure out number of lines. + lcnt := strings.Count(finalBuf.String(), "\n") + crStr := " (no-end-cr)" + if finalBuf.Bytes()[len(finalBuf.Bytes())-1] == '\n' { + crStr = "" + } + + outBuf := bytes.NewBuffer(nil) + outBuf.WriteString("--- " + c.String() + "\n") + outBuf.WriteString("+++ " + outMode + crStr + " " + c.String() + "\n") + outBuf.WriteString(fmt.Sprintf("@@ -%d,%d +%d,%d @@\n", 0, lcnt, 0, lcnt)) + outBuf.Write(finalBuf.Bytes()) + outBuf.WriteString("\n") + return outBuf.Bytes(), nil +} + +func isPrintable(n ipld.Node) bool { + b, err := n.AsBytes() + if err != nil { + return false + } + if !utf8.Valid(b) { + return false + } + if bytes.ContainsAny(b, string([]byte{0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x10, 0x11, 0x12, 0x13, 0x14, 0x16, 0x17, 0x18, 0x19, 0x1c, 0x1d, 0x1e, 0x1f})) { + return false + } + // check if would confuse the 'end of patch' checker. + if bytes.Contains(b, []byte("\n--- ")) { + return false + } + return true +} + +func parsePatch(br *bufio.Reader) (cid.Cid, string, []byte, error) { + // read initial line to parse CID. + l1, isPrefix, err := br.ReadLine() + if err != nil { + return cid.Undef, "", nil, err + } + if isPrefix { + return cid.Undef, "", nil, fmt.Errorf("unexpected long header l1") + } + var cs string + if _, err := fmt.Sscanf(string(l1), "--- %s", &cs); err != nil { + return cid.Undef, "", nil, fmt.Errorf("could not parse patch cid line (%s): %q", l1, err) + } + l2, isPrefix, err := br.ReadLine() + if err != nil { + return cid.Undef, "", nil, err + } + if isPrefix { + return cid.Undef, "", nil, fmt.Errorf("unexpected long header l2") + } + var mode string + var noEndReturn bool + matches := plusLineRegex.FindSubmatch(l2) + if len(matches) >= 2 { + mode = string(matches[1]) + } + if len(matches) < 2 || string(matches[len(matches)-1]) != cs { + return cid.Undef, "", nil, fmt.Errorf("mismatched cid lines: %v", string(l2)) + } + if len(matches[2]) > 0 { + noEndReturn = (string(matches[2]) == "(no-end-cr) ") + } + c, err := cid.Parse(cs) + if err != nil { + return cid.Undef, "", nil, err + } + + // skip over @@ line. + l3, isPrefix, err := br.ReadLine() + if err != nil { + return cid.Undef, "", nil, err + } + if isPrefix { + return cid.Undef, "", nil, fmt.Errorf("unexpected long header l3") + } + if !strings.HasPrefix(string(l3), "@@") { + return cid.Undef, "", nil, fmt.Errorf("unexpected missing chunk prefix") + } + + // keep going until next chunk or end. + outBuf := bytes.NewBuffer(nil) + for { + peek, err := br.Peek(4) + if err != nil && err != io.EOF { + return cid.Undef, "", nil, err + } + if bytes.Equal(peek, []byte("--- ")) { + break + } + // accumulate to buffer. + l, err := br.ReadBytes('\n') + if l != nil { + outBuf.Write(l) + } + if err == io.EOF { + break + } else if err != nil { + return cid.Undef, "", nil, err + } + } + + ob := outBuf.Bytes() + + // remove the final line return + if len(ob) > 2 && bytes.Equal(ob[len(ob)-2:], []byte("\r\n")) { + ob = ob[:len(ob)-2] + } else if len(ob) > 1 && bytes.Equal(ob[len(ob)-1:], []byte("\n")) { + ob = ob[:len(ob)-1] + } + + if noEndReturn && len(ob) > 2 && bytes.Equal(ob[len(ob)-2:], []byte("\r\n")) { + ob = ob[:len(ob)-2] + } else if noEndReturn && len(ob) > 1 && bytes.Equal(ob[len(ob)-1:], []byte("\n")) { + ob = ob[:len(ob)-1] + } + + return c, mode, ob, nil +} diff --git a/cmd/car/create.go b/cmd/car/create.go new file mode 100644 index 0000000000..88f6b5be4f --- /dev/null +++ b/cmd/car/create.go @@ -0,0 +1,130 @@ +package main + +import ( + "bytes" + "context" + "fmt" + "io" + "path" + + blocks "github.com/ipfs/boxo/blocks" + "github.com/ipfs/boxo/ipld/car/v2" + "github.com/ipfs/boxo/ipld/car/v2/blockstore" + "github.com/ipfs/go-cid" + "github.com/ipfs/go-unixfsnode/data/builder" + dagpb "github.com/ipld/go-codec-dagpb" + "github.com/ipld/go-ipld-prime" + cidlink "github.com/ipld/go-ipld-prime/linking/cid" + "github.com/multiformats/go-multicodec" + "github.com/multiformats/go-multihash" + "github.com/urfave/cli/v2" +) + +// CreateCar creates a car +func CreateCar(c *cli.Context) error { + var err error + if c.Args().Len() == 0 { + return fmt.Errorf("a source location to build the car from must be specified") + } + + if !c.IsSet("file") { + return fmt.Errorf("a file destination must be specified") + } + + // make a cid with the right length that we eventually will patch with the root. + hasher, err := multihash.GetHasher(multihash.SHA2_256) + if err != nil { + return err + } + digest := hasher.Sum([]byte{}) + hash, err := multihash.Encode(digest, multihash.SHA2_256) + if err != nil { + return err + } + proxyRoot := cid.NewCidV1(uint64(multicodec.DagPb), hash) + + options := []car.Option{} + switch c.Int("version") { + case 1: + options = []car.Option{blockstore.WriteAsCarV1(true)} + case 2: + // already the default + default: + return fmt.Errorf("invalid CAR version %d", c.Int("version")) + } + + cdest, err := blockstore.OpenReadWrite(c.String("file"), []cid.Cid{proxyRoot}, options...) + if err != nil { + return err + } + + // Write the unixfs blocks into the store. + root, err := writeFiles(c.Context, cdest, c.Args().Slice()...) + if err != nil { + return err + } + + if err := cdest.Finalize(); err != nil { + return err + } + // re-open/finalize with the final root. + return car.ReplaceRootsInFile(c.String("file"), []cid.Cid{root}) +} + +func writeFiles(ctx context.Context, bs *blockstore.ReadWrite, paths ...string) (cid.Cid, error) { + ls := cidlink.DefaultLinkSystem() + ls.TrustedStorage = true + ls.StorageReadOpener = func(_ ipld.LinkContext, l ipld.Link) (io.Reader, error) { + cl, ok := l.(cidlink.Link) + if !ok { + return nil, fmt.Errorf("not a cidlink") + } + blk, err := bs.Get(ctx, cl.Cid) + if err != nil { + return nil, err + } + return bytes.NewBuffer(blk.RawData()), nil + } + ls.StorageWriteOpener = func(_ ipld.LinkContext) (io.Writer, ipld.BlockWriteCommitter, error) { + buf := bytes.NewBuffer(nil) + return buf, func(l ipld.Link) error { + cl, ok := l.(cidlink.Link) + if !ok { + return fmt.Errorf("not a cidlink") + } + blk, err := blocks.NewBlockWithCid(buf.Bytes(), cl.Cid) + if err != nil { + return err + } + bs.Put(ctx, blk) + return nil + }, nil + } + + topLevel := make([]dagpb.PBLink, 0, len(paths)) + for _, p := range paths { + l, size, err := builder.BuildUnixFSRecursive(p, &ls) + if err != nil { + return cid.Undef, err + } + name := path.Base(p) + entry, err := builder.BuildUnixFSDirectoryEntry(name, int64(size), l) + if err != nil { + return cid.Undef, err + } + topLevel = append(topLevel, entry) + } + + // make a directory for the file(s). + + root, _, err := builder.BuildUnixFSDirectory(topLevel, &ls) + if err != nil { + return cid.Undef, nil + } + rcl, ok := root.(cidlink.Link) + if !ok { + return cid.Undef, fmt.Errorf("could not interpret %s", root) + } + + return rcl.Cid, nil +} diff --git a/cmd/car/detach.go b/cmd/car/detach.go new file mode 100644 index 0000000000..d68593d160 --- /dev/null +++ b/cmd/car/detach.go @@ -0,0 +1,73 @@ +package main + +import ( + "fmt" + "io" + "os" + + carv2 "github.com/ipfs/boxo/ipld/car/v2" + "github.com/ipfs/boxo/ipld/car/v2/index" + "github.com/multiformats/go-multihash" + "github.com/urfave/cli/v2" +) + +// DetachCar is a command to output the index part of a car. +func DetachCar(c *cli.Context) error { + r, err := carv2.OpenReader(c.Args().Get(0)) + if err != nil { + return err + } + defer r.Close() + + if !r.Header.HasIndex() { + return fmt.Errorf("no index present") + } + + outStream := os.Stdout + if c.Args().Len() >= 2 { + outStream, err = os.Create(c.Args().Get(1)) + if err != nil { + return err + } + } + defer outStream.Close() + + ir, err := r.IndexReader() + if err != nil { + return err + } + _, err = io.Copy(outStream, ir) + return err +} + +// DetachCarList prints a list of what's found in a detached index. +func DetachCarList(c *cli.Context) error { + var err error + + inStream := os.Stdin + if c.Args().Len() >= 1 { + inStream, err = os.Open(c.Args().First()) + if err != nil { + return err + } + defer inStream.Close() + } + + idx, err := index.ReadFrom(inStream) + if err != nil { + return err + } + + if iidx, ok := idx.(index.IterableIndex); ok { + err := iidx.ForEach(func(mh multihash.Multihash, offset uint64) error { + fmt.Printf("%s %d\n", mh, offset) + return nil + }) + if err != nil { + return err + } + return nil + } + + return fmt.Errorf("index of codec %s is not iterable", idx.Codec()) +} diff --git a/cmd/car/extract.go b/cmd/car/extract.go new file mode 100644 index 0000000000..9a80317501 --- /dev/null +++ b/cmd/car/extract.go @@ -0,0 +1,443 @@ +package main + +import ( + "context" + "errors" + "fmt" + "io" + "os" + "path" + "path/filepath" + "runtime" + "strings" + "sync" + + "github.com/ipfs/boxo/ipld/car/v2" + carstorage "github.com/ipfs/boxo/ipld/car/v2/storage" + "github.com/ipfs/go-cid" + "github.com/ipfs/go-unixfsnode" + "github.com/ipfs/go-unixfsnode/data" + "github.com/ipfs/go-unixfsnode/file" + dagpb "github.com/ipld/go-codec-dagpb" + "github.com/ipld/go-ipld-prime" + cidlink "github.com/ipld/go-ipld-prime/linking/cid" + basicnode "github.com/ipld/go-ipld-prime/node/basic" + "github.com/ipld/go-ipld-prime/storage" + "github.com/urfave/cli/v2" +) + +var ErrNotDir = fmt.Errorf("not a directory") + +// ExtractCar pulls files and directories out of a car +func ExtractCar(c *cli.Context) error { + outputDir, err := os.Getwd() + if err != nil { + return err + } + if c.Args().Present() { + outputDir = c.Args().First() + } + + var store storage.ReadableStorage + var roots []cid.Cid + + if c.String("file") == "" { + if f, ok := c.App.Reader.(*os.File); ok { + stat, err := f.Stat() + if err != nil { + return err + } + if (stat.Mode() & os.ModeCharDevice) != 0 { + // Is a terminal. In reality the user is unlikely to actually paste + // CAR data into this terminal, but this message may serve to make + // them aware that they can/should pipe data into this command. + stopKeys := "Ctrl+D" + if runtime.GOOS == "windows" { + stopKeys = "Ctrl+Z, Enter" + } + fmt.Fprintf(c.App.ErrWriter, "Reading from stdin; use %s to end\n", stopKeys) + } + } + var err error + store, roots, err = NewStdinReadStorage(c.App.Reader) + if err != nil { + return err + } + } else { + carFile, err := os.Open(c.String("file")) + if err != nil { + return err + } + store, err = carstorage.OpenReadable(carFile) + if err != nil { + return err + } + roots = store.(carstorage.ReadableCar).Roots() + } + + ls := cidlink.DefaultLinkSystem() + ls.TrustedStorage = true + ls.SetReadStorage(store) + + path, err := pathSegments(c.String("path")) + if err != nil { + return err + } + + var extractedFiles int + for _, root := range roots { + count, err := extractRoot(c, &ls, root, outputDir, path) + if err != nil { + return err + } + extractedFiles += count + } + if extractedFiles == 0 { + return cli.Exit("no files extracted", 1) + } else { + fmt.Fprintf(c.App.ErrWriter, "extracted %d file(s)\n", extractedFiles) + } + + return nil +} + +func extractRoot(c *cli.Context, ls *ipld.LinkSystem, root cid.Cid, outputDir string, path []string) (int, error) { + if root.Prefix().Codec == cid.Raw { + if c.IsSet("verbose") { + fmt.Fprintf(c.App.ErrWriter, "skipping raw root %s\n", root) + } + return 0, nil + } + + pbn, err := ls.Load(ipld.LinkContext{}, cidlink.Link{Cid: root}, dagpb.Type.PBNode) + if err != nil { + return 0, err + } + pbnode := pbn.(dagpb.PBNode) + + ufn, err := unixfsnode.Reify(ipld.LinkContext{}, pbnode, ls) + if err != nil { + return 0, err + } + + var outputResolvedDir string + if outputDir != "-" { + outputResolvedDir, err = filepath.EvalSymlinks(outputDir) + if err != nil { + return 0, err + } + if _, err := os.Stat(outputResolvedDir); os.IsNotExist(err) { + if err := os.Mkdir(outputResolvedDir, 0755); err != nil { + return 0, err + } + } + } + + count, err := extractDir(c, ls, ufn, outputResolvedDir, "/", path) + if err != nil { + if !errors.Is(err, ErrNotDir) { + return 0, fmt.Errorf("%s: %w", root, err) + } + + // if it's not a directory, it's a file. + ufsData, err := pbnode.LookupByString("Data") + if err != nil { + return 0, err + } + ufsBytes, err := ufsData.AsBytes() + if err != nil { + return 0, err + } + ufsNode, err := data.DecodeUnixFSData(ufsBytes) + if err != nil { + return 0, err + } + var outputName string + if outputDir != "-" { + outputName = filepath.Join(outputResolvedDir, "unknown") + } + if ufsNode.DataType.Int() == data.Data_File || ufsNode.DataType.Int() == data.Data_Raw { + if err := extractFile(c, ls, pbnode, outputName); err != nil { + return 0, err + } + } + return 1, nil + } + + return count, nil +} + +func resolvePath(root, pth string) (string, error) { + rp, err := filepath.Rel("/", pth) + if err != nil { + return "", fmt.Errorf("couldn't check relative-ness of %s: %w", pth, err) + } + joined := path.Join(root, rp) + + basename := path.Dir(joined) + final, err := filepath.EvalSymlinks(basename) + if err != nil { + return "", fmt.Errorf("couldn't eval symlinks in %s: %w", basename, err) + } + if final != path.Clean(basename) { + return "", fmt.Errorf("path attempts to redirect through symlinks") + } + return joined, nil +} + +func extractDir(c *cli.Context, ls *ipld.LinkSystem, n ipld.Node, outputRoot, outputPath string, matchPath []string) (int, error) { + if outputRoot != "" { + dirPath, err := resolvePath(outputRoot, outputPath) + if err != nil { + return 0, err + } + // make the directory. + if err := os.MkdirAll(dirPath, 0755); err != nil { + return 0, err + } + } + + if n.Kind() != ipld.Kind_Map { + return 0, ErrNotDir + } + + subPath := matchPath + if len(matchPath) > 0 { + subPath = matchPath[1:] + } + + extractElement := func(name string, n ipld.Node) (int, error) { + var nextRes string + if outputRoot != "" { + var err error + nextRes, err = resolvePath(outputRoot, path.Join(outputPath, name)) + if err != nil { + return 0, err + } + if c.IsSet("verbose") { + fmt.Fprintf(c.App.Writer, "%s\n", nextRes) + } + } + + if n.Kind() != ipld.Kind_Link { + return 0, fmt.Errorf("unexpected map value for %s at %s", name, outputPath) + } + // a directory may be represented as a map of name: if unixADL is applied + vl, err := n.AsLink() + if err != nil { + return 0, err + } + dest, err := ls.Load(ipld.LinkContext{}, vl, basicnode.Prototype.Any) + if err != nil { + if nf, ok := err.(interface{ NotFound() bool }); ok && nf.NotFound() { + fmt.Fprintf(c.App.ErrWriter, "data for entry not found: %s (skipping...)\n", path.Join(outputPath, name)) + return 0, nil + } + return 0, err + } + // degenerate files are handled here. + if dest.Kind() == ipld.Kind_Bytes { + if err := extractFile(c, ls, dest, nextRes); err != nil { + return 0, err + } + return 1, nil + } + + // dir / pbnode + pbb := dagpb.Type.PBNode.NewBuilder() + if err := pbb.AssignNode(dest); err != nil { + return 0, err + } + pbnode := pbb.Build().(dagpb.PBNode) + + // interpret dagpb 'data' as unixfs data and look at type. + ufsData, err := pbnode.LookupByString("Data") + if err != nil { + return 0, err + } + ufsBytes, err := ufsData.AsBytes() + if err != nil { + return 0, err + } + ufsNode, err := data.DecodeUnixFSData(ufsBytes) + if err != nil { + return 0, err + } + + switch ufsNode.DataType.Int() { + case data.Data_Directory, data.Data_HAMTShard: + ufn, err := unixfsnode.Reify(ipld.LinkContext{}, pbnode, ls) + if err != nil { + return 0, err + } + return extractDir(c, ls, ufn, outputRoot, path.Join(outputPath, name), subPath) + case data.Data_File, data.Data_Raw: + if err := extractFile(c, ls, pbnode, nextRes); err != nil { + return 0, err + } + return 1, nil + case data.Data_Symlink: + if nextRes == "" { + return 0, fmt.Errorf("cannot extract a symlink to stdout") + } + data := ufsNode.Data.Must().Bytes() + if err := os.Symlink(string(data), nextRes); err != nil { + return 0, err + } + return 1, nil + default: + return 0, fmt.Errorf("unknown unixfs type: %d", ufsNode.DataType.Int()) + } + } + + // specific path segment + if len(matchPath) > 0 { + val, err := n.LookupByString(matchPath[0]) + if err != nil { + return 0, err + } + return extractElement(matchPath[0], val) + } + + if outputPath == "-" && len(matchPath) == 0 { + return 0, fmt.Errorf("cannot extract a directory to stdout, use a path to extract a specific file") + } + + // everything + var count int + var shardSkip int + mi := n.MapIterator() + for !mi.Done() { + key, val, err := mi.Next() + if err != nil { + if nf, ok := err.(interface{ NotFound() bool }); ok && nf.NotFound() { + shardSkip++ + continue + } + return 0, err + } + ks, err := key.AsString() + if err != nil { + return 0, err + } + ecount, err := extractElement(ks, val) + if err != nil { + return 0, err + } + count += ecount + } + if shardSkip > 0 { + fmt.Fprintf(c.App.ErrWriter, "data for entry not found for %d unknown sharded entries (skipped...)\n", shardSkip) + } + return count, nil +} + +func extractFile(c *cli.Context, ls *ipld.LinkSystem, n ipld.Node, outputName string) error { + node, err := file.NewUnixFSFile(c.Context, n, ls) + if err != nil { + return err + } + nlr, err := node.AsLargeBytes() + if err != nil { + return err + } + var f *os.File + if outputName == "" { + f = os.Stdout + } else { + f, err = os.Create(outputName) + if err != nil { + return err + } + defer f.Close() + } + _, err = io.Copy(f, nlr) + return err +} + +// TODO: dedupe this with lassie, probably into go-unixfsnode +func pathSegments(path string) ([]string, error) { + segments := strings.Split(path, "/") + filtered := make([]string, 0, len(segments)) + for i := 0; i < len(segments); i++ { + if segments[i] == "" { + // Allow one leading and one trailing '/' at most + if i == 0 || i == len(segments)-1 { + continue + } + return nil, fmt.Errorf("invalid empty path segment at position %d", i) + } + if segments[i] == "." || segments[i] == ".." { + return nil, fmt.Errorf("'%s' is unsupported in paths", segments[i]) + } + filtered = append(filtered, segments[i]) + } + return filtered, nil +} + +var _ storage.ReadableStorage = (*stdinReadStorage)(nil) + +type stdinReadStorage struct { + blocks map[string][]byte + done bool + lk *sync.RWMutex + cond *sync.Cond +} + +func NewStdinReadStorage(reader io.Reader) (*stdinReadStorage, []cid.Cid, error) { + var lk sync.RWMutex + srs := &stdinReadStorage{ + blocks: make(map[string][]byte), + lk: &lk, + cond: sync.NewCond(&lk), + } + rdr, err := car.NewBlockReader(reader) + if err != nil { + return nil, nil, err + } + go func() { + for { + blk, err := rdr.Next() + if err == io.EOF { + srs.lk.Lock() + srs.done = true + srs.lk.Unlock() + return + } + if err != nil { + panic(err) + } + srs.lk.Lock() + srs.blocks[string(blk.Cid().Hash())] = blk.RawData() + srs.cond.Broadcast() + srs.lk.Unlock() + } + }() + return srs, rdr.Roots, nil +} + +func (srs *stdinReadStorage) Has(ctx context.Context, key string) (bool, error) { + _, err := srs.Get(ctx, key) + if err != nil { + return false, err + } + return true, nil +} + +func (srs *stdinReadStorage) Get(ctx context.Context, key string) ([]byte, error) { + c, err := cid.Cast([]byte(key)) + if err != nil { + return nil, err + } + srs.lk.Lock() + defer srs.lk.Unlock() + for { + if data, ok := srs.blocks[string(c.Hash())]; ok { + return data, nil + } + if srs.done { + return nil, carstorage.ErrNotFound{Cid: c} + } + srs.cond.Wait() + } +} diff --git a/cmd/car/filter.go b/cmd/car/filter.go new file mode 100644 index 0000000000..6d74f9fe40 --- /dev/null +++ b/cmd/car/filter.go @@ -0,0 +1,128 @@ +package main + +import ( + "bufio" + "fmt" + "io" + "os" + "strings" + + carv2 "github.com/ipfs/boxo/ipld/car/v2" + "github.com/ipfs/boxo/ipld/car/v2/blockstore" + "github.com/ipfs/go-cid" + "github.com/urfave/cli/v2" +) + +// FilterCar is a command to select a subset of a car by CID. +func FilterCar(c *cli.Context) error { + if c.Args().Len() < 2 { + return fmt.Errorf("an output filename must be provided") + } + + fd, err := os.Open(c.Args().First()) + if err != nil { + return err + } + defer fd.Close() + rd, err := carv2.NewBlockReader(fd) + if err != nil { + return err + } + + // Get the set of CIDs from stdin. + inStream := os.Stdin + if c.IsSet("cidFile") { + inStream, err = os.Open(c.String("cidFile")) + if err != nil { + return err + } + defer inStream.Close() + } + cidMap, err := parseCIDS(inStream) + if err != nil { + return err + } + fmt.Printf("filtering to %d cids\n", len(cidMap)) + + outRoots := make([]cid.Cid, 0) + for _, r := range rd.Roots { + if _, ok := cidMap[r]; ok { + outRoots = append(outRoots, r) + } + } + + outPath := c.Args().Get(1) + if !c.Bool("append") { + if _, err := os.Stat(outPath); err == nil || !os.IsNotExist(err) { + // output to an existing file. + if err := os.Truncate(outPath, 0); err != nil { + return err + } + } + } else { + // roots will need to be whatever is in the output already. + cv2r, err := carv2.OpenReader(outPath) + if err != nil { + return err + } + if cv2r.Version != 2 { + return fmt.Errorf("can only append to version 2 car files") + } + outRoots, err = cv2r.Roots() + if err != nil { + return err + } + _ = cv2r.Close() + } + + if len(outRoots) == 0 { + fmt.Fprintf(os.Stderr, "warning: no roots defined after filtering\n") + } + + bs, err := blockstore.OpenReadWrite(outPath, outRoots) + if err != nil { + return err + } + + for { + blk, err := rd.Next() + if err != nil { + if err == io.EOF { + break + } + return err + } + if _, ok := cidMap[blk.Cid()]; ok { + if err := bs.Put(c.Context, blk); err != nil { + return err + } + } + } + return bs.Finalize() +} + +func parseCIDS(r io.Reader) (map[cid.Cid]struct{}, error) { + cids := make(map[cid.Cid]struct{}) + br := bufio.NewReader(r) + for { + line, _, err := br.ReadLine() + if err != nil { + if err == io.EOF { + return cids, nil + } + return nil, err + } + trimLine := strings.TrimSpace(string(line)) + if len(trimLine) == 0 { + continue + } + c, err := cid.Parse(trimLine) + if err != nil { + return nil, err + } + if _, ok := cids[c]; ok { + fmt.Fprintf(os.Stderr, "duplicate cid: %s\n", c) + } + cids[c] = struct{}{} + } +} diff --git a/cmd/car/get.go b/cmd/car/get.go new file mode 100644 index 0000000000..0d249e6a5d --- /dev/null +++ b/cmd/car/get.go @@ -0,0 +1,215 @@ +package main + +import ( + "bytes" + "context" + "fmt" + + "io" + "os" + + dagpb "github.com/ipld/go-codec-dagpb" + "github.com/ipld/go-ipld-prime" + _ "github.com/ipld/go-ipld-prime/codec/cbor" + _ "github.com/ipld/go-ipld-prime/codec/dagcbor" + _ "github.com/ipld/go-ipld-prime/codec/dagjson" + _ "github.com/ipld/go-ipld-prime/codec/json" + _ "github.com/ipld/go-ipld-prime/codec/raw" + + "github.com/ipfs/boxo/ipld/car" + "github.com/ipfs/boxo/ipld/car/v2/blockstore" + "github.com/ipfs/go-cid" + ipldfmt "github.com/ipfs/go-ipld-format" + "github.com/ipfs/go-unixfsnode" + "github.com/ipld/go-ipld-prime/datamodel" + "github.com/ipld/go-ipld-prime/linking" + cidlink "github.com/ipld/go-ipld-prime/linking/cid" + "github.com/ipld/go-ipld-prime/node/basicnode" + "github.com/ipld/go-ipld-prime/traversal" + "github.com/ipld/go-ipld-prime/traversal/selector" + selectorParser "github.com/ipld/go-ipld-prime/traversal/selector/parse" + "github.com/urfave/cli/v2" +) + +// GetCarBlock is a command to get a block out of a car +func GetCarBlock(c *cli.Context) error { + if c.Args().Len() < 2 { + return fmt.Errorf("usage: car get-block [output file]") + } + + bs, err := blockstore.OpenReadOnly(c.Args().Get(0)) + if err != nil { + return err + } + + // string to CID + blkCid, err := cid.Parse(c.Args().Get(1)) + if err != nil { + return err + } + + blk, err := bs.Get(c.Context, blkCid) + if err != nil { + return err + } + + outStream := os.Stdout + if c.Args().Len() >= 3 { + outStream, err = os.Create(c.Args().Get(2)) + if err != nil { + return err + } + defer outStream.Close() + } + + _, err = outStream.Write(blk.RawData()) + return err +} + +// GetCarDag is a command to get a dag out of a car +func GetCarDag(c *cli.Context) error { + if c.Args().Len() < 2 { + return fmt.Errorf("usage: car get-dag [-s selector] [root cid] ") + } + + // if root cid is emitted we'll read it from the root of file.car. + output := c.Args().Get(1) + var rootCid cid.Cid + + bs, err := blockstore.OpenReadOnly(c.Args().Get(0)) + if err != nil { + return err + } + + if c.Args().Len() == 2 { + roots, err := bs.Roots() + if err != nil { + return err + } + if len(roots) != 1 { + return fmt.Errorf("car file has does not have exactly one root, dag root must be specified explicitly") + } + rootCid = roots[0] + } else { + rootCid, err = cid.Parse(output) + if err != nil { + return err + } + output = c.Args().Get(2) + } + + strict := c.Bool("strict") + + // selector traversal, default to ExploreAllRecursively which only explores the DAG blocks + // because we only care about the blocks loaded during the walk, not the nodes matched + sel := selectorParser.CommonSelector_MatchAllRecursively + if c.IsSet("selector") { + sel, err = selectorParser.ParseJSONSelector(c.String("selector")) + if err != nil { + return err + } + } + linkVisitOnlyOnce := !c.IsSet("selector") // if using a custom selector, this isn't as safe + + switch c.Int("version") { + case 2: + return writeCarV2(c.Context, rootCid, output, bs, strict, sel, linkVisitOnlyOnce) + case 1: + return writeCarV1(rootCid, output, bs, strict, sel, linkVisitOnlyOnce) + default: + return fmt.Errorf("invalid CAR version %d", c.Int("version")) + } +} + +func writeCarV2(ctx context.Context, rootCid cid.Cid, output string, bs *blockstore.ReadOnly, strict bool, sel datamodel.Node, linkVisitOnlyOnce bool) error { + _ = os.Remove(output) + + outStore, err := blockstore.OpenReadWrite(output, []cid.Cid{rootCid}, blockstore.AllowDuplicatePuts(false)) + if err != nil { + return err + } + + ls := cidlink.DefaultLinkSystem() + ls.KnownReifiers = map[string]linking.NodeReifier{"unixfs": unixfsnode.Reify} + ls.TrustedStorage = true + ls.StorageReadOpener = func(_ linking.LinkContext, l datamodel.Link) (io.Reader, error) { + if cl, ok := l.(cidlink.Link); ok { + blk, err := bs.Get(ctx, cl.Cid) + if err != nil { + if ipldfmt.IsNotFound(err) { + if strict { + return nil, err + } + return nil, traversal.SkipMe{} + } + return nil, err + } + if err := outStore.Put(ctx, blk); err != nil { + return nil, err + } + return bytes.NewBuffer(blk.RawData()), nil + } + return nil, fmt.Errorf("unknown link type: %T", l) + } + + nsc := func(lnk datamodel.Link, lctx ipld.LinkContext) (datamodel.NodePrototype, error) { + if lnk, ok := lnk.(cidlink.Link); ok && lnk.Cid.Prefix().Codec == 0x70 { + return dagpb.Type.PBNode, nil + } + return basicnode.Prototype.Any, nil + } + + rootLink := cidlink.Link{Cid: rootCid} + ns, _ := nsc(rootLink, ipld.LinkContext{}) + rootNode, err := ls.Load(ipld.LinkContext{}, rootLink, ns) + if err != nil { + return err + } + + traversalProgress := traversal.Progress{ + Cfg: &traversal.Config{ + LinkSystem: ls, + LinkTargetNodePrototypeChooser: nsc, + LinkVisitOnlyOnce: linkVisitOnlyOnce, + }, + } + + s, err := selector.CompileSelector(sel) + if err != nil { + return err + } + + err = traversalProgress.WalkMatching(rootNode, s, func(p traversal.Progress, n datamodel.Node) error { + lb, ok := n.(datamodel.LargeBytesNode) + if ok { + rs, err := lb.AsLargeBytes() + if err == nil { + _, err := io.Copy(io.Discard, rs) + if err != nil { + return err + } + } + } + return nil + }) + if err != nil { + return err + } + + return outStore.Finalize() +} + +func writeCarV1(rootCid cid.Cid, output string, bs *blockstore.ReadOnly, strict bool, sel datamodel.Node, linkVisitOnlyOnce bool) error { + opts := make([]car.Option, 0) + if linkVisitOnlyOnce { + opts = append(opts, car.TraverseLinksOnlyOnce()) + } + sc := car.NewSelectiveCar(context.Background(), bs, []car.Dag{{Root: rootCid, Selector: sel}}, opts...) + f, err := os.Create(output) + if err != nil { + return err + } + defer f.Close() + + return sc.Write(f) +} diff --git a/cmd/car/index.go b/cmd/car/index.go new file mode 100644 index 0000000000..1bf0945fc1 --- /dev/null +++ b/cmd/car/index.go @@ -0,0 +1,210 @@ +package main + +import ( + "bufio" + "fmt" + "io" + "os" + + carv1 "github.com/ipfs/boxo/ipld/car" + carv2 "github.com/ipfs/boxo/ipld/car/v2" + "github.com/ipfs/boxo/ipld/car/v2/index" + "github.com/ipfs/go-cid" + "github.com/multiformats/go-multicodec" + "github.com/multiformats/go-varint" + "github.com/urfave/cli/v2" +) + +// IndexCar is a command to add an index to a car +func IndexCar(c *cli.Context) error { + r, err := carv2.OpenReader(c.Args().Get(0)) + if err != nil { + return err + } + defer r.Close() + + if c.Int("version") == 1 { + if c.IsSet("codec") && c.String("codec") != "none" { + return fmt.Errorf("'none' is the only supported codec for a v1 car") + } + outStream := os.Stdout + if c.Args().Len() >= 2 { + outStream, err = os.Create(c.Args().Get(1)) + if err != nil { + return err + } + } + defer outStream.Close() + + dr, err := r.DataReader() + if err != nil { + return err + } + _, err = io.Copy(outStream, dr) + return err + } + + if c.Int("version") != 2 { + return fmt.Errorf("invalid CAR version %d", c.Int("version")) + } + + var idx index.Index + if c.String("codec") != "none" { + var mc multicodec.Code + if err := mc.Set(c.String("codec")); err != nil { + return err + } + idx, err = index.New(mc) + if err != nil { + return err + } + } + + outStream := os.Stdout + if c.Args().Len() >= 2 { + outStream, err = os.Create(c.Args().Get(1)) + if err != nil { + return err + } + } + defer outStream.Close() + + v1r, err := r.DataReader() + if err != nil { + return err + } + + if r.Version == 1 { + fi, err := os.Stat(c.Args().Get(0)) + if err != nil { + return err + } + r.Header.DataSize = uint64(fi.Size()) + } + v2Header := carv2.NewHeader(r.Header.DataSize) + if c.String("codec") == "none" { + v2Header.IndexOffset = 0 + if _, err := outStream.Write(carv2.Pragma); err != nil { + return err + } + if _, err := v2Header.WriteTo(outStream); err != nil { + return err + } + if _, err := io.Copy(outStream, v1r); err != nil { + return err + } + return nil + } + + if _, err := outStream.Write(carv2.Pragma); err != nil { + return err + } + if _, err := v2Header.WriteTo(outStream); err != nil { + return err + } + + // collect records as we go through the v1r + br := bufio.NewReader(v1r) + hdr, err := carv1.ReadHeader(br) + if err != nil { + return fmt.Errorf("error reading car header: %w", err) + } + if err := carv1.WriteHeader(hdr, outStream); err != nil { + return err + } + + records := make([]index.Record, 0) + var sectionOffset int64 + if sectionOffset, err = v1r.Seek(0, io.SeekCurrent); err != nil { + return err + } + sectionOffset -= int64(br.Buffered()) + + for { + // Read the section's length. + sectionLen, err := varint.ReadUvarint(br) + if err != nil { + if err == io.EOF { + break + } + return err + } + if _, err := outStream.Write(varint.ToUvarint(sectionLen)); err != nil { + return err + } + + // Null padding; by default it's an error. + // TODO: integrate corresponding ReadOption + if sectionLen == 0 { + // TODO: pad writer to expected length. + break + } + + // Read the CID. + cidLen, c, err := cid.CidFromReader(br) + if err != nil { + return err + } + records = append(records, index.Record{Cid: c, Offset: uint64(sectionOffset)}) + if _, err := c.WriteBytes(outStream); err != nil { + return err + } + + // Seek to the next section by skipping the block. + // The section length includes the CID, so subtract it. + remainingSectionLen := int64(sectionLen) - int64(cidLen) + if _, err := io.CopyN(outStream, br, remainingSectionLen); err != nil { + return err + } + sectionOffset += int64(sectionLen) + int64(varint.UvarintSize(sectionLen)) + } + + if err := idx.Load(records); err != nil { + return err + } + + _, err = index.WriteTo(idx, outStream) + return err +} + +// CreateIndex is a command to write out an index of the CAR file +func CreateIndex(c *cli.Context) error { + r, err := carv2.OpenReader(c.Args().Get(0)) + if err != nil { + return err + } + defer r.Close() + + outStream := os.Stdout + if c.Args().Len() >= 2 { + outStream, err = os.Create(c.Args().Get(1)) + if err != nil { + return err + } + } + defer outStream.Close() + + var mc multicodec.Code + if err := mc.Set(c.String("codec")); err != nil { + return err + } + idx, err := index.New(mc) + if err != nil { + return err + } + + dr, err := r.DataReader() + if err != nil { + return err + } + + if err := carv2.LoadIndex(idx, dr); err != nil { + return err + } + + if _, err := index.WriteTo(idx, outStream); err != nil { + return err + } + + return nil +} diff --git a/cmd/car/inspect.go b/cmd/car/inspect.go new file mode 100644 index 0000000000..67c2031bf5 --- /dev/null +++ b/cmd/car/inspect.go @@ -0,0 +1,137 @@ +package main + +import ( + "bytes" + "fmt" + "io" + "os" + "sort" + "strings" + + carv2 "github.com/ipfs/boxo/ipld/car/v2" + "github.com/multiformats/go-multicodec" + "github.com/urfave/cli/v2" +) + +// InspectCar verifies a CAR and prints a basic report about its contents +func InspectCar(c *cli.Context) (err error) { + inStream := os.Stdin + if c.Args().Len() >= 1 { + inStream, err = os.Open(c.Args().First()) + if err != nil { + return err + } + } + + rd, err := carv2.NewReader(inStream, carv2.ZeroLengthSectionAsEOF(true)) + if err != nil { + return err + } + stats, err := rd.Inspect(c.IsSet("full")) + if err != nil { + return err + } + + if stats.Version == 1 && c.IsSet("full") { // check that we've read all the data + got, err := inStream.Read(make([]byte, 1)) // force EOF + if err != nil && err != io.EOF { + return err + } else if got > 0 { + return fmt.Errorf("unexpected data after EOF: %d", got) + } + } + + var v2s string + if stats.Version == 2 { + idx := "(none)" + if stats.IndexCodec != 0 { + idx = stats.IndexCodec.String() + } + var buf bytes.Buffer + stats.Header.Characteristics.WriteTo(&buf) + v2s = fmt.Sprintf(`Characteristics: %x +Data offset: %d +Data (payload) length: %d +Index offset: %d +Index type: %s +`, buf.Bytes(), stats.Header.DataOffset, stats.Header.DataSize, stats.Header.IndexOffset, idx) + } + + var roots strings.Builder + switch len(stats.Roots) { + case 0: + roots.WriteString(" (none)") + case 1: + roots.WriteString(" ") + roots.WriteString(stats.Roots[0].String()) + default: + for _, r := range stats.Roots { + roots.WriteString("\n\t") + roots.WriteString(r.String()) + } + } + + var codecs strings.Builder + { + keys := make([]int, len(stats.CodecCounts)) + i := 0 + for codec := range stats.CodecCounts { + keys[i] = int(codec) + i++ + } + sort.Ints(keys) + for _, code := range keys { + codec := multicodec.Code(code) + codecs.WriteString(fmt.Sprintf("\n\t%s: %d", codec, stats.CodecCounts[codec])) + } + } + + var hashers strings.Builder + { + keys := make([]int, len(stats.MhTypeCounts)) + i := 0 + for codec := range stats.MhTypeCounts { + keys[i] = int(codec) + i++ + } + sort.Ints(keys) + for _, code := range keys { + codec := multicodec.Code(code) + hashers.WriteString(fmt.Sprintf("\n\t%s: %d", codec, stats.MhTypeCounts[codec])) + } + } + + rp := "No" + if stats.RootsPresent { + rp = "Yes" + } + + pfmt := `Version: %d +%sRoots:%s +Root blocks present in data: %s +Block count: %d +Min / average / max block length (bytes): %d / %d / %d +Min / average / max CID length (bytes): %d / %d / %d +Block count per codec:%s +CID count per multihash:%s +` + + fmt.Printf( + pfmt, + stats.Version, + v2s, + roots.String(), + rp, + stats.BlockCount, + stats.MinBlockLength, + stats.AvgBlockLength, + stats.MaxBlockLength, + stats.MinCidLength, + stats.AvgCidLength, + stats.MaxCidLength, + codecs.String(), + hashers.String(), + ) + + return nil +} diff --git a/cmd/car/list.go b/cmd/car/list.go new file mode 100644 index 0000000000..7a6e9f1852 --- /dev/null +++ b/cmd/car/list.go @@ -0,0 +1,222 @@ +package main + +import ( + "bytes" + "fmt" + "io" + "os" + "path" + + "github.com/dustin/go-humanize" + carv2 "github.com/ipfs/boxo/ipld/car/v2" + "github.com/ipfs/boxo/ipld/car/v2/blockstore" + "github.com/ipfs/go-cid" + data "github.com/ipfs/go-unixfsnode/data" + "github.com/ipfs/go-unixfsnode/hamt" + dagpb "github.com/ipld/go-codec-dagpb" + "github.com/ipld/go-ipld-prime" + cidlink "github.com/ipld/go-ipld-prime/linking/cid" + "github.com/multiformats/go-multicodec" + "github.com/urfave/cli/v2" +) + +// ListCar is a command to output the cids in a car. +func ListCar(c *cli.Context) error { + var err error + outStream := os.Stdout + if c.Args().Len() >= 2 { + outStream, err = os.Create(c.Args().Get(1)) + if err != nil { + return err + } + } + defer outStream.Close() + + if c.Bool("unixfs") { + return listUnixfs(c, outStream) + } + + inStream := os.Stdin + if c.Args().Len() >= 1 { + inStream, err = os.Open(c.Args().First()) + if err != nil { + return err + } + defer inStream.Close() + } + + rd, err := carv2.NewBlockReader(inStream) + if err != nil { + return err + } + + for { + blk, err := rd.Next() + if err != nil { + if err == io.EOF { + break + } + return err + } + if c.Bool("verbose") { + fmt.Fprintf(outStream, "%s: %s\n", + multicodec.Code(blk.Cid().Prefix().Codec).String(), + blk.Cid()) + if blk.Cid().Prefix().Codec == uint64(multicodec.DagPb) { + // parse as dag-pb + builder := dagpb.Type.PBNode.NewBuilder() + if err := dagpb.DecodeBytes(builder, blk.RawData()); err != nil { + fmt.Fprintf(outStream, "\tnot interpretable as dag-pb: %s\n", err) + continue + } + n := builder.Build() + pbn, ok := n.(dagpb.PBNode) + if !ok { + continue + } + dl := 0 + if pbn.Data.Exists() { + dl = len(pbn.Data.Must().Bytes()) + } + fmt.Fprintf(outStream, "\t%d links. %d bytes\n", pbn.Links.Length(), dl) + // example link: + li := pbn.Links.ListIterator() + max := 3 + for !li.Done() { + _, l, _ := li.Next() + max-- + pbl, ok := l.(dagpb.PBLink) + if ok && max >= 0 { + hsh := "" + lnk, ok := pbl.Hash.Link().(cidlink.Link) + if ok { + hsh = lnk.Cid.String() + } + name := "" + if pbl.Name.Exists() { + name = pbl.Name.Must().String() + } + size := 0 + if pbl.Tsize.Exists() { + size = int(pbl.Tsize.Must().Int()) + } + fmt.Fprintf(outStream, "\t\t%s[%s] %s\n", name, humanize.Bytes(uint64(size)), hsh) + } + } + if max < 0 { + fmt.Fprintf(outStream, "\t\t(%d total)\n", 3-max) + } + // see if it's unixfs. + ufd, err := data.DecodeUnixFSData(pbn.Data.Must().Bytes()) + if err != nil { + fmt.Fprintf(outStream, "\tnot interpretable as unixfs: %s\n", err) + continue + } + fmt.Fprintf(outStream, "\tUnixfs %s\n", data.DataTypeNames[ufd.FieldDataType().Int()]) + } + } else { + fmt.Fprintf(outStream, "%s\n", blk.Cid()) + } + } + + return err +} + +func listUnixfs(c *cli.Context, outStream io.Writer) error { + if c.Args().Len() == 0 { + return fmt.Errorf("must provide file to read from. unixfs reading requires random access") + } + + bs, err := blockstore.OpenReadOnly(c.Args().First()) + if err != nil { + return err + } + ls := cidlink.DefaultLinkSystem() + ls.TrustedStorage = true + ls.StorageReadOpener = func(_ ipld.LinkContext, l ipld.Link) (io.Reader, error) { + cl, ok := l.(cidlink.Link) + if !ok { + return nil, fmt.Errorf("not a cidlink") + } + blk, err := bs.Get(c.Context, cl.Cid) + if err != nil { + return nil, err + } + return bytes.NewBuffer(blk.RawData()), nil + } + + roots, err := bs.Roots() + if err != nil { + return err + } + for _, r := range roots { + if err := printUnixFSNode(c, "", r, &ls, outStream); err != nil { + return err + } + } + return nil +} + +func printUnixFSNode(c *cli.Context, prefix string, node cid.Cid, ls *ipld.LinkSystem, outStream io.Writer) error { + // it might be a raw file (bytes) node. if so, not actually an error. + if node.Prefix().Codec == cid.Raw { + return nil + } + + pbn, err := ls.Load(ipld.LinkContext{}, cidlink.Link{Cid: node}, dagpb.Type.PBNode) + if err != nil { + return err + } + + pbnode := pbn.(dagpb.PBNode) + + ufd, err := data.DecodeUnixFSData(pbnode.Data.Must().Bytes()) + if err != nil { + return err + } + + if ufd.FieldDataType().Int() == data.Data_Directory { + i := pbnode.Links.Iterator() + for !i.Done() { + _, l := i.Next() + name := path.Join(prefix, l.Name.Must().String()) + fmt.Fprintf(outStream, "%s\n", name) + // recurse into the file/directory + cl, err := l.Hash.AsLink() + if err != nil { + return err + } + if cidl, ok := cl.(cidlink.Link); ok { + if err := printUnixFSNode(c, name, cidl.Cid, ls, outStream); err != nil { + return err + } + } + + } + } else if ufd.FieldDataType().Int() == data.Data_HAMTShard { + hn, err := hamt.AttemptHAMTShardFromNode(c.Context, pbn, ls) + if err != nil { + return err + } + i := hn.Iterator() + for !i.Done() { + n, l := i.Next() + fmt.Fprintf(outStream, "%s\n", path.Join(prefix, n.String())) + // recurse into the file/directory + cl, err := l.AsLink() + if err != nil { + return err + } + if cidl, ok := cl.(cidlink.Link); ok { + if err := printUnixFSNode(c, path.Join(prefix, n.String()), cidl.Cid, ls, outStream); err != nil { + return err + } + } + } + } else { + // file, file chunk, symlink, other un-named entities. + return nil + } + + return nil +} diff --git a/cmd/car/root.go b/cmd/car/root.go new file mode 100644 index 0000000000..9bcfd4884a --- /dev/null +++ b/cmd/car/root.go @@ -0,0 +1,30 @@ +package main + +import ( + "fmt" + "os" + + carv2 "github.com/ipfs/boxo/ipld/car/v2" + "github.com/urfave/cli/v2" +) + +// CarRoot prints the root CID in a car +func CarRoot(c *cli.Context) (err error) { + inStream := os.Stdin + if c.Args().Len() >= 1 { + inStream, err = os.Open(c.Args().First()) + if err != nil { + return err + } + } + + rd, err := carv2.NewBlockReader(inStream) + if err != nil { + return err + } + for _, r := range rd.Roots { + fmt.Printf("%s\n", r.String()) + } + + return nil +} diff --git a/cmd/car/script_test.go b/cmd/car/script_test.go new file mode 100644 index 0000000000..dcf8f6b7a8 --- /dev/null +++ b/cmd/car/script_test.go @@ -0,0 +1,34 @@ +package main + +import ( + "flag" + "os" + "path/filepath" + "testing" + + "github.com/rogpeppe/go-internal/testscript" +) + +func TestMain(m *testing.M) { + os.Exit(testscript.RunMain(m, map[string]func() int{ + "car": main1, + })) +} + +var update = flag.Bool("u", false, "update testscript output files") + +func TestScript(t *testing.T) { + t.Parallel() + testscript.Run(t, testscript.Params{ + Dir: filepath.Join("testdata", "script"), + Setup: func(env *testscript.Env) error { + wd, err := os.Getwd() + if err != nil { + return err + } + env.Setenv("INPUTS", filepath.Join(wd, "testdata", "inputs")) + return nil + }, + UpdateScripts: *update, + }) +} diff --git a/cmd/car/testdata/inputs/badheaderlength.car b/cmd/car/testdata/inputs/badheaderlength.car new file mode 100644 index 0000000000..4f6aa54873 --- /dev/null +++ b/cmd/car/testdata/inputs/badheaderlength.car @@ -0,0 +1 @@ + olLʔ<#oKg#H* gversion \ No newline at end of file diff --git a/cmd/car/testdata/inputs/badsectionlength.car b/cmd/car/testdata/inputs/badsectionlength.car new file mode 100644 index 0000000000..8569fb18d0 Binary files /dev/null and b/cmd/car/testdata/inputs/badsectionlength.car differ diff --git a/cmd/car/testdata/inputs/bafy2bzacebohz654namrgmwjjx4qmtwgxixsd7pn4tlanyrc3g3hwj75hlxrw.block b/cmd/car/testdata/inputs/bafy2bzacebohz654namrgmwjjx4qmtwgxixsd7pn4tlanyrc3g3hwj75hlxrw.block new file mode 100644 index 0000000000..257d8522bd Binary files /dev/null and b/cmd/car/testdata/inputs/bafy2bzacebohz654namrgmwjjx4qmtwgxixsd7pn4tlanyrc3g3hwj75hlxrw.block differ diff --git a/cmd/car/testdata/inputs/sample-v1.car b/cmd/car/testdata/inputs/sample-v1.car new file mode 100644 index 0000000000..47a61c8c2a Binary files /dev/null and b/cmd/car/testdata/inputs/sample-v1.car differ diff --git a/cmd/car/testdata/inputs/sample-wrapped-v2.car b/cmd/car/testdata/inputs/sample-wrapped-v2.car new file mode 100644 index 0000000000..35c4d36899 Binary files /dev/null and b/cmd/car/testdata/inputs/sample-wrapped-v2.car differ diff --git a/cmd/car/testdata/inputs/simple-unixfs-missing-blocks.car b/cmd/car/testdata/inputs/simple-unixfs-missing-blocks.car new file mode 100644 index 0000000000..7e80887138 Binary files /dev/null and b/cmd/car/testdata/inputs/simple-unixfs-missing-blocks.car differ diff --git a/cmd/car/testdata/inputs/simple-unixfs.car b/cmd/car/testdata/inputs/simple-unixfs.car new file mode 100644 index 0000000000..55b8a571a9 Binary files /dev/null and b/cmd/car/testdata/inputs/simple-unixfs.car differ diff --git a/cmd/car/testdata/inputs/wikipedia-cryptographic-hash-function.car b/cmd/car/testdata/inputs/wikipedia-cryptographic-hash-function.car new file mode 100644 index 0000000000..c914805e71 Binary files /dev/null and b/cmd/car/testdata/inputs/wikipedia-cryptographic-hash-function.car differ diff --git a/cmd/car/testdata/script/compile.txt b/cmd/car/testdata/script/compile.txt new file mode 100644 index 0000000000..4607ca6c66 --- /dev/null +++ b/cmd/car/testdata/script/compile.txt @@ -0,0 +1,28 @@ +# debug a car to patch +car debug -o out.patch ${INPUTS}/sample-v1.car +! stderr . +grep -count=1049 \+\+\+ out.patch + +# recompile to binary +car compile -o out.car out.patch +! stderr . + +# should have same blocks as it started with. +car ls out.car +stdout -count=1043 '^bafy' +stdout -count=6 '^bafk' + +# make a small car +car create --file=small.car foo.txt + +car debug -o small.patch small.car +! stderr . + +car compile -o new.car small.patch +! stderr . + +# confirm roundtrip is stable. +cmp small.car new.car + +-- foo.txt -- +hello world \ No newline at end of file diff --git a/cmd/car/testdata/script/create-extract.txt b/cmd/car/testdata/script/create-extract.txt new file mode 100644 index 0000000000..6dac510b23 --- /dev/null +++ b/cmd/car/testdata/script/create-extract.txt @@ -0,0 +1,12 @@ +car create --file=out.car foo.txt bar.txt +mkdir out +car extract -v -f out.car out +stdout -count=2 'txt$' +stderr -count=1 '^extracted 2 file\(s\)$' +car create --file=out2.car out/foo.txt out/bar.txt +cmp out.car out2.car + +-- foo.txt -- +foo content +-- bar.txt -- +bar content diff --git a/cmd/car/testdata/script/create.txt b/cmd/car/testdata/script/create.txt new file mode 100644 index 0000000000..13849e31aa --- /dev/null +++ b/cmd/car/testdata/script/create.txt @@ -0,0 +1,13 @@ +car create --file=out.car foo.txt bar.txt + +car verify out.car +car list --unixfs out.car +stdout -count=2 'txt$' +car list out.car +stdout -count=3 '^baf' +stdout -count=2 '^bafk' + +-- foo.txt -- +foo content +-- bar.txt -- +bar content diff --git a/cmd/car/testdata/script/extract.txt b/cmd/car/testdata/script/extract.txt new file mode 100644 index 0000000000..aa2713beb5 --- /dev/null +++ b/cmd/car/testdata/script/extract.txt @@ -0,0 +1,113 @@ +# full DAG export, everything in the CAR +mkdir actual-full +car extract -f ${INPUTS}/simple-unixfs.car actual-full +stderr '^extracted 9 file\(s\)$' +cmp actual-full/a/1/A.txt expected/a/1/A.txt +cmp actual-full/a/2/B.txt expected/a/2/B.txt +cmp actual-full/a/3/C.txt expected/a/3/C.txt +cmp actual-full/b/5/E.txt expected/b/5/E.txt +cmp actual-full/b/6/F.txt expected/b/6/F.txt +cmp actual-full/b/4/D.txt expected/b/4/D.txt +cmp actual-full/c/9/I.txt expected/c/9/I.txt +cmp actual-full/c/7/G.txt expected/c/7/G.txt +cmp actual-full/c/8/H.txt expected/c/8/H.txt + +# full DAG export, everything in the CAR, accepted from stdin +mkdir actual-stdin +stdin ${INPUTS}/simple-unixfs.car +car extract actual-stdin +stderr '^extracted 9 file\(s\)$' +cmp actual-stdin/a/1/A.txt expected/a/1/A.txt +cmp actual-stdin/a/2/B.txt expected/a/2/B.txt +cmp actual-stdin/a/3/C.txt expected/a/3/C.txt +cmp actual-stdin/b/5/E.txt expected/b/5/E.txt +cmp actual-stdin/b/6/F.txt expected/b/6/F.txt +cmp actual-stdin/b/4/D.txt expected/b/4/D.txt +cmp actual-stdin/c/9/I.txt expected/c/9/I.txt +cmp actual-stdin/c/7/G.txt expected/c/7/G.txt +cmp actual-stdin/c/8/H.txt expected/c/8/H.txt + +# full DAG export, everything in the CAR, but the CAR is missing blocks (incomplete DAG) +mkdir actual-missing +car extract -f ${INPUTS}/simple-unixfs-missing-blocks.car actual-missing +stderr -count=1 'data for entry not found: /b/4 \(skipping\.\.\.\)' +stderr -count=1 'data for entry not found: /b/5/E.txt \(skipping\.\.\.\)' +stderr -count=1 'data for entry not found: /b/6 \(skipping\.\.\.\)' +stderr -count=1 '^extracted 6 file\(s\)$' +cmp actual-missing/a/1/A.txt expected/a/1/A.txt +cmp actual-missing/a/2/B.txt expected/a/2/B.txt +cmp actual-missing/a/3/C.txt expected/a/3/C.txt +! exists actual-missing/b/5/E.txt +! exists actual-missing/b/6/F.txt +! exists actual-missing/b/4/D.txt +cmp actual-missing/c/9/I.txt expected/c/9/I.txt +cmp actual-missing/c/7/G.txt expected/c/7/G.txt +cmp actual-missing/c/8/H.txt expected/c/8/H.txt + +# path-based partial export, everything under the path specified (also without leading / in path) +mkdir actual-partial +car extract -f ${INPUTS}/simple-unixfs.car -p b actual-partial +stderr '^extracted 3 file\(s\)$' +! exists actual-partial/a/1/A.txt +! exists actual-partial/a/2/B.txt +! exists actual-partial/a/3/C.txt +cmp actual-partial/b/5/E.txt expected/b/5/E.txt +cmp actual-partial/b/6/F.txt expected/b/6/F.txt +cmp actual-partial/b/4/D.txt expected/b/4/D.txt +! exists actual-partial/c/9/I.txt +! exists actual-partial/c/7/G.txt +! exists actual-partial/c/8/H.txt + +# path-based single-file export (also with leading /) +mkdir actual-single +car extract -f ${INPUTS}/simple-unixfs.car -p /a/2/B.txt actual-single +stderr '^extracted 1 file\(s\)$' +! exists actual-single/a/1/A.txt +cmp actual-single/a/2/B.txt expected/a/2/B.txt +! exists actual-single/a/3/C.txt +! exists actual-single/b/5/E.txt +! exists actual-single/b/6/F.txt +! exists actual-single/b/4/D.txt +! exists actual-single/c/9/I.txt +! exists actual-single/c/7/G.txt +! exists actual-single/c/8/H.txt + +# extract that doesn't yield any files should error +! car extract -f ${INPUTS}/simple-unixfs-missing-blocks.car -p b +stderr '^no files extracted$' + +# car with only one file, nested inside sharded directory, output to stdout +car extract -f ${INPUTS}/wikipedia-cryptographic-hash-function.car -p wiki/Cryptographic_hash_function - +stderr '^extracted 1 file\(s\)$' +stdout -count=1 '^ Cryptographic hash function$' + +# car with only one file, full extract, lots of errors +mkdir actual-wiki +car extract -f ${INPUTS}/wikipedia-cryptographic-hash-function.car actual-wiki +stderr '^extracted 1 file\(s\)$' +stderr -count=1 '^data for entry not found for 570 unknown sharded entries \(skipped\.\.\.\)$' +# random sampling of expected skip errors +stderr -count=1 '^data for entry not found: /wiki/1969_Men''s_World_Ice_Hockey_Championships \(skipping\.\.\.\)$' +stderr -count=1 '^data for entry not found: /wiki/Wrestle_mania_30 \(skipping\.\.\.\)$' +stderr -count=1 '^data for entry not found: /zimdump_version \(skipping\.\.\.\)$' +stderr -count=1 '^data for entry not found: /favicon.ico \(skipping\.\.\.\)$' +stderr -count=1 '^data for entry not found: /index.html \(skipping\.\.\.\)$' + +-- expected/a/1/A.txt -- +a1A +-- expected/a/2/B.txt -- +a2B +-- expected/a/3/C.txt -- +a3C +-- expected/b/5/E.txt -- +b5E +-- expected/b/6/F.txt -- +b6F +-- expected/b/4/D.txt -- +b4D +-- expected/c/9/I.txt -- +c9I +-- expected/c/7/G.txt -- +c7G +-- expected/c/8/H.txt -- +c8H \ No newline at end of file diff --git a/cmd/car/testdata/script/filter.txt b/cmd/car/testdata/script/filter.txt new file mode 100644 index 0000000000..0c0b12de79 --- /dev/null +++ b/cmd/car/testdata/script/filter.txt @@ -0,0 +1,30 @@ +# basic filter +stdin filteredcids.txt +car filter ${INPUTS}/sample-wrapped-v2.car out.car +stderr 'warning: no roots defined after filtering' +car list out.car +! stderr . +cmp stdout filteredcids.txt + +# filter with root CID +stdin filteredroot.txt +car filter ${INPUTS}/sample-wrapped-v2.car out.car +! stderr . +car list out.car +! stderr . +cmp stdout filteredroot.txt + +# append other cids +stdin filteredcids.txt +car filter -append ${INPUTS}/sample-wrapped-v2.car out.car +! stderr . +car list out.car +stdout -count=4 '^bafy' + + +-- filteredcids.txt -- +bafy2bzacebohz654namrgmwjjx4qmtwgxixsd7pn4tlanyrc3g3hwj75hlxrw +bafy2bzaceaqtiesyfqd2jibmofz22oolguzf5wscwh73rmeypglfu2xhkptri +bafy2bzacebct3dm7izgyauijzkaf3yd7ylni725k66rq7dfp3jr5ywhpprj3k +-- filteredroot.txt -- +bafy2bzaced4ueelaegfs5fqu4tzsh6ywbbpfk3cxppupmxfdhbpbhzawfw5oy diff --git a/cmd/car/testdata/script/get-block.txt b/cmd/car/testdata/script/get-block.txt new file mode 100644 index 0000000000..e042ee645a --- /dev/null +++ b/cmd/car/testdata/script/get-block.txt @@ -0,0 +1,19 @@ +env SAMPLE_CID='bafy2bzacebohz654namrgmwjjx4qmtwgxixsd7pn4tlanyrc3g3hwj75hlxrw' +env MISSING_CID='bafy2bzacebohz654namrgmwjjx4qmtwgxixsd7pn4tlanyrc3g3hwj75xxxxx' + +# "get-block" on a CARv1 with an output file. +car get-block ${INPUTS}/sample-v1.car ${SAMPLE_CID} out.block +cmp out.block ${INPUTS}/${SAMPLE_CID}.block +rm out.block + +# "get-block" on a CARv1 with stdout. +car get-block ${INPUTS}/sample-v1.car ${SAMPLE_CID} +cmp stdout ${INPUTS}/${SAMPLE_CID}.block + +# Short "gb" alias. +car get-block ${INPUTS}/sample-v1.car ${SAMPLE_CID} +cmp stdout ${INPUTS}/${SAMPLE_CID}.block + +# "get-block" on a missing CID. +! car get-block ${INPUTS}/sample-v1.car ${MISSING_CID} +stderr 'ipld: could not find bafy2bzacebohz654namrgmwjjx4qmtwgxixsd7pn4tlanyrc3g3hwj75xxxxw' diff --git a/cmd/car/testdata/script/get-dag.txt b/cmd/car/testdata/script/get-dag.txt new file mode 100644 index 0000000000..85751a08f3 --- /dev/null +++ b/cmd/car/testdata/script/get-dag.txt @@ -0,0 +1,6 @@ +env SAMPLE_CID='bafy2bzaceaycv7jhaegckatnncu5yugzkrnzeqsppzegufr35lroxxnsnpspu' +car get-dag ${INPUTS}/sample-v1.car ${SAMPLE_CID} out.car +! stderr . +car list out.car +! stderr . +stdout -count=1 '^bafy2bzaceaycv7jhaegckatnncu5yugzkrnzeqsppzegufr35lroxxnsnpspu' \ No newline at end of file diff --git a/cmd/car/testdata/script/index-create.txt b/cmd/car/testdata/script/index-create.txt new file mode 100644 index 0000000000..bfdfe65c1d --- /dev/null +++ b/cmd/car/testdata/script/index-create.txt @@ -0,0 +1,3 @@ +car index create ${INPUTS}/sample-v1.car sample-v1.car.idx +car detach-index ${INPUTS}/sample-wrapped-v2.car sample-wrapped-v2.car.idx +cmp sample-v1.car.idx sample-wrapped-v2.car.idx diff --git a/cmd/car/testdata/script/inspect.txt b/cmd/car/testdata/script/inspect.txt new file mode 100644 index 0000000000..efa20731ad --- /dev/null +++ b/cmd/car/testdata/script/inspect.txt @@ -0,0 +1,43 @@ +car inspect ${INPUTS}/sample-v1.car +cmp stdout v1inspect.txt + +car inspect ${INPUTS}/sample-wrapped-v2.car +cmp stdout v2inspect.txt + +! car inspect ${INPUTS}/badheaderlength.car +stderr 'invalid header data' + +! car inspect ${INPUTS}/badsectionlength.car +stderr 'invalid section data' + +-- v1inspect.txt -- +Version: 1 +Roots: bafy2bzaced4ueelaegfs5fqu4tzsh6ywbbpfk3cxppupmxfdhbpbhzawfw5oy +Root blocks present in data: Yes +Block count: 1049 +Min / average / max block length (bytes): 1 / 417 / 1342 +Min / average / max CID length (bytes): 14 / 37 / 38 +Block count per codec: + raw: 6 + dag-cbor: 1043 +CID count per multihash: + identity: 6 + blake2b-256: 1043 +-- v2inspect.txt -- +Version: 2 +Characteristics: 00000000000000000000000000000000 +Data offset: 51 +Data (payload) length: 479907 +Index offset: 479958 +Index type: car-multihash-index-sorted +Roots: bafy2bzaced4ueelaegfs5fqu4tzsh6ywbbpfk3cxppupmxfdhbpbhzawfw5oy +Root blocks present in data: Yes +Block count: 1049 +Min / average / max block length (bytes): 1 / 417 / 1342 +Min / average / max CID length (bytes): 14 / 37 / 38 +Block count per codec: + raw: 6 + dag-cbor: 1043 +CID count per multihash: + identity: 6 + blake2b-256: 1043 diff --git a/cmd/car/testdata/script/list.txt b/cmd/car/testdata/script/list.txt new file mode 100644 index 0000000000..29bb9021b7 --- /dev/null +++ b/cmd/car/testdata/script/list.txt @@ -0,0 +1,16 @@ +env SAMPLE_CID='bafy2bzacebohz654namrgmwjjx4qmtwgxixsd7pn4tlanyrc3g3hwj75hlxrw' +# "list" on a CARv1. +car list ${INPUTS}/sample-v1.car +stdout -count=1043 '^bafy' +stdout -count=6 '^bafk' +stdout -count=1 'bafy2bzacebohz654namrgmwjjx4qmtwgxixsd7pn4tlanyrc3g3hwj75hlxrw' + +# "list" on a CARv2. +car list ${INPUTS}/sample-wrapped-v2.car +stdout -count=1043 '^bafy' +stdout -count=6 '^bafk' +stdout -count=1 'bafy2bzacebohz654namrgmwjjx4qmtwgxixsd7pn4tlanyrc3g3hwj75hlxrw' + +# Short "l" alias. +car l ${INPUTS}/sample-v1.car +stdout -count=1043 '^bafy' diff --git a/cmd/car/testdata/script/root.txt b/cmd/car/testdata/script/root.txt new file mode 100644 index 0000000000..834f3a69d4 --- /dev/null +++ b/cmd/car/testdata/script/root.txt @@ -0,0 +1,15 @@ +car root ${INPUTS}/sample-v1.car +cmp stdout v1root.txt + +car root ${INPUTS}/sample-wrapped-v2.car +cmp stdout v2root.txt + +stop stdin_test_needs_car_fix +stdin ${INPUTS}/sample-wrapped-v2.car +car root +cmp stdout v2root.txt + +-- v1root.txt -- +bafy2bzaced4ueelaegfs5fqu4tzsh6ywbbpfk3cxppupmxfdhbpbhzawfw5oy +-- v2root.txt -- +bafy2bzaced4ueelaegfs5fqu4tzsh6ywbbpfk3cxppupmxfdhbpbhzawfw5oy \ No newline at end of file diff --git a/cmd/car/testdata/script/verify.txt b/cmd/car/testdata/script/verify.txt new file mode 100644 index 0000000000..3ef3c49e4c --- /dev/null +++ b/cmd/car/testdata/script/verify.txt @@ -0,0 +1,3 @@ +# "verify" should exit with code 0 on reasonable cars. +car verify ${INPUTS}/sample-v1.car +car verify ${INPUTS}/sample-wrapped-v2.car diff --git a/cmd/car/verify.go b/cmd/car/verify.go new file mode 100644 index 0000000000..4c1f107e77 --- /dev/null +++ b/cmd/car/verify.go @@ -0,0 +1,114 @@ +package main + +import ( + "fmt" + "io" + "os" + + carv2 "github.com/ipfs/boxo/ipld/car/v2" + "github.com/ipfs/boxo/ipld/car/v2/index" + "github.com/ipfs/go-cid" + "github.com/multiformats/go-multihash" + "github.com/urfave/cli/v2" +) + +// VerifyCar is a command to check a files validity +func VerifyCar(c *cli.Context) error { + if c.Args().Len() == 0 { + return fmt.Errorf("usage: car verify ") + } + + // header + rx, err := carv2.OpenReader(c.Args().First()) + if err != nil { + return err + } + defer rx.Close() + roots, err := rx.Roots() + if err != nil { + return err + } + if len(roots) == 0 { + return fmt.Errorf("no roots listed in car header") + } + rootMap := make(map[cid.Cid]struct{}) + for _, r := range roots { + rootMap[r] = struct{}{} + } + + if rx.Version == 2 { + if rx.Header.DataSize == 0 { + return fmt.Errorf("size of wrapped v1 car listed as '0'") + } + + flen, err := os.Stat(c.Args().First()) + if err != nil { + return err + } + lengthToIndex := carv2.PragmaSize + carv2.HeaderSize + rx.Header.DataSize + if uint64(flen.Size()) > lengthToIndex && rx.Header.IndexOffset == 0 { + return fmt.Errorf("header claims no index, but extra bytes in file beyond data size") + } + if rx.Header.DataOffset < carv2.PragmaSize+carv2.HeaderSize { + return fmt.Errorf("data offset places data within carv2 header") + } + if rx.Header.IndexOffset < lengthToIndex { + return fmt.Errorf("index offset overlaps with data. data ends at %d. index offset of %d", lengthToIndex, rx.Header.IndexOffset) + } + } + + // blocks + fd, err := os.Open(c.Args().First()) + if err != nil { + return err + } + rd, err := carv2.NewBlockReader(fd) + if err != nil { + return err + } + + cidList := make([]cid.Cid, 0) + for { + blk, err := rd.Next() + if err == io.EOF { + break + } + if err != nil { + return err + } + delete(rootMap, blk.Cid()) + cidList = append(cidList, blk.Cid()) + } + + if len(rootMap) > 0 { + return fmt.Errorf("header lists root(s) not present as a block: %v", rootMap) + } + + // index + if rx.Version == 2 && rx.Header.HasIndex() { + ir, err := rx.IndexReader() + if err != nil { + return err + } + idx, err := index.ReadFrom(ir) + if err != nil { + return err + } + for _, c := range cidList { + cidHash, err := multihash.Decode(c.Hash()) + if err != nil { + return err + } + if cidHash.Code == multihash.IDENTITY { + continue + } + if err := idx.GetAll(c, func(_ uint64) bool { + return true + }); err != nil { + return fmt.Errorf("could not look up known cid %s in index: %w", c, err) + } + } + } + + return nil +} diff --git a/codecov.yml b/codecov.yml new file mode 100644 index 0000000000..8e342b16d9 --- /dev/null +++ b/codecov.yml @@ -0,0 +1,2 @@ +comment: + layout: "reach, diff, files" diff --git a/coreiface/block.go b/coreiface/block.go new file mode 100644 index 0000000000..dbe31e9f81 --- /dev/null +++ b/coreiface/block.go @@ -0,0 +1,38 @@ +package iface + +import ( + "context" + "io" + + path "github.com/ipfs/boxo/coreiface/path" + + "github.com/ipfs/boxo/coreiface/options" +) + +// BlockStat contains information about a block +type BlockStat interface { + // Size is the size of a block + Size() int + + // Path returns path to the block + Path() path.Resolved +} + +// BlockAPI specifies the interface to the block layer +type BlockAPI interface { + // Put imports raw block data, hashing it using specified settings. + Put(context.Context, io.Reader, ...options.BlockPutOption) (BlockStat, error) + + // Get attempts to resolve the path and return a reader for data in the block + Get(context.Context, path.Path) (io.Reader, error) + + // Rm removes the block specified by the path from local blockstore. + // By default an error will be returned if the block can't be found locally. + // + // NOTE: If the specified block is pinned it won't be removed and no error + // will be returned + Rm(context.Context, path.Path, ...options.BlockRmOption) error + + // Stat returns information on + Stat(context.Context, path.Path) (BlockStat, error) +} diff --git a/coreiface/coreapi.go b/coreiface/coreapi.go new file mode 100644 index 0000000000..7276a3f606 --- /dev/null +++ b/coreiface/coreapi.go @@ -0,0 +1,60 @@ +// Package iface defines IPFS Core API which is a set of interfaces used to +// interact with IPFS nodes. +package iface + +import ( + "context" + + path "github.com/ipfs/boxo/coreiface/path" + + "github.com/ipfs/boxo/coreiface/options" + + ipld "github.com/ipfs/go-ipld-format" +) + +// CoreAPI defines an unified interface to IPFS for Go programs +type CoreAPI interface { + // Unixfs returns an implementation of Unixfs API + Unixfs() UnixfsAPI + + // Block returns an implementation of Block API + Block() BlockAPI + + // Dag returns an implementation of Dag API + Dag() APIDagService + + // Name returns an implementation of Name API + Name() NameAPI + + // Key returns an implementation of Key API + Key() KeyAPI + + // Pin returns an implementation of Pin API + Pin() PinAPI + + // Object returns an implementation of Object API + Object() ObjectAPI + + // Dht returns an implementation of Dht API + Dht() DhtAPI + + // Swarm returns an implementation of Swarm API + Swarm() SwarmAPI + + // PubSub returns an implementation of PubSub API + PubSub() PubSubAPI + + // Routing returns an implementation of Routing API + Routing() RoutingAPI + + // ResolvePath resolves the path using Unixfs resolver + ResolvePath(context.Context, path.Path) (path.Resolved, error) + + // ResolveNode resolves the path (if not resolved already) using Unixfs + // resolver, gets and returns the resolved Node + ResolveNode(context.Context, path.Path) (ipld.Node, error) + + // WithOptions creates new instance of CoreAPI based on this instance with + // a set of options applied + WithOptions(...options.ApiOption) (CoreAPI, error) +} diff --git a/coreiface/dag.go b/coreiface/dag.go new file mode 100644 index 0000000000..3cc3aeb4de --- /dev/null +++ b/coreiface/dag.go @@ -0,0 +1,13 @@ +package iface + +import ( + ipld "github.com/ipfs/go-ipld-format" +) + +// APIDagService extends ipld.DAGService +type APIDagService interface { + ipld.DAGService + + // Pinning returns special NodeAdder which recursively pins added nodes + Pinning() ipld.NodeAdder +} diff --git a/coreiface/dht.go b/coreiface/dht.go new file mode 100644 index 0000000000..93027a4067 --- /dev/null +++ b/coreiface/dht.go @@ -0,0 +1,27 @@ +package iface + +import ( + "context" + + "github.com/ipfs/boxo/coreiface/path" + + "github.com/ipfs/boxo/coreiface/options" + + "github.com/libp2p/go-libp2p/core/peer" +) + +// DhtAPI specifies the interface to the DHT +// Note: This API will likely get deprecated in near future, see +// https://github.com/ipfs/interface-ipfs-core/issues/249 for more context. +type DhtAPI interface { + // FindPeer queries the DHT for all of the multiaddresses associated with a + // Peer ID + FindPeer(context.Context, peer.ID) (peer.AddrInfo, error) + + // FindProviders finds peers in the DHT who can provide a specific value + // given a key. + FindProviders(context.Context, path.Path, ...options.DhtFindProvidersOption) (<-chan peer.AddrInfo, error) + + // Provide announces to the network that you are providing given values + Provide(context.Context, path.Path, ...options.DhtProvideOption) error +} diff --git a/coreiface/errors.go b/coreiface/errors.go new file mode 100644 index 0000000000..e0bd7805d8 --- /dev/null +++ b/coreiface/errors.go @@ -0,0 +1,10 @@ +package iface + +import "errors" + +var ( + ErrIsDir = errors.New("this dag node is a directory") + ErrNotFile = errors.New("this dag node is not a regular file") + ErrOffline = errors.New("this action must be run in online mode, try running 'ipfs daemon' first") + ErrNotSupported = errors.New("operation not supported") +) diff --git a/coreiface/idfmt.go b/coreiface/idfmt.go new file mode 100644 index 0000000000..80fd0f822f --- /dev/null +++ b/coreiface/idfmt.go @@ -0,0 +1,19 @@ +package iface + +import ( + "github.com/libp2p/go-libp2p/core/peer" + mbase "github.com/multiformats/go-multibase" +) + +func FormatKeyID(id peer.ID) string { + if s, err := peer.ToCid(id).StringOfBase(mbase.Base36); err != nil { + panic(err) + } else { + return s + } +} + +// FormatKey formats the given IPNS key in a canonical way. +func FormatKey(key Key) string { + return FormatKeyID(key.ID()) +} diff --git a/coreiface/key.go b/coreiface/key.go new file mode 100644 index 0000000000..118fe2e4fb --- /dev/null +++ b/coreiface/key.go @@ -0,0 +1,43 @@ +package iface + +import ( + "context" + + "github.com/ipfs/boxo/coreiface/path" + + "github.com/ipfs/boxo/coreiface/options" + + "github.com/libp2p/go-libp2p/core/peer" +) + +// Key specifies the interface to Keys in KeyAPI Keystore +type Key interface { + // Key returns key name + Name() string + + // Path returns key path + Path() path.Path + + // ID returns key PeerID + ID() peer.ID +} + +// KeyAPI specifies the interface to Keystore +type KeyAPI interface { + // Generate generates new key, stores it in the keystore under the specified + // name and returns a base58 encoded multihash of it's public key + Generate(ctx context.Context, name string, opts ...options.KeyGenerateOption) (Key, error) + + // Rename renames oldName key to newName. Returns the key and whether another + // key was overwritten, or an error + Rename(ctx context.Context, oldName string, newName string, opts ...options.KeyRenameOption) (Key, bool, error) + + // List lists keys stored in keystore + List(ctx context.Context) ([]Key, error) + + // Self returns the 'main' node key + Self(ctx context.Context) (Key, error) + + // Remove removes keys from keystore. Returns ipns path of the removed key + Remove(ctx context.Context, name string) (Key, error) +} diff --git a/coreiface/name.go b/coreiface/name.go new file mode 100644 index 0000000000..0c06183e69 --- /dev/null +++ b/coreiface/name.go @@ -0,0 +1,48 @@ +package iface + +import ( + "context" + "errors" + + path "github.com/ipfs/boxo/coreiface/path" + + "github.com/ipfs/boxo/coreiface/options" +) + +var ErrResolveFailed = errors.New("could not resolve name") + +// IpnsEntry specifies the interface to IpnsEntries +type IpnsEntry interface { + // Name returns IpnsEntry name + Name() string + // Value returns IpnsEntry value + Value() path.Path +} + +type IpnsResult struct { + path.Path + Err error +} + +// NameAPI specifies the interface to IPNS. +// +// IPNS is a PKI namespace, where names are the hashes of public keys, and the +// private key enables publishing new (signed) values. In both publish and +// resolve, the default name used is the node's own PeerID, which is the hash of +// its public key. +// +// You can use .Key API to list and generate more names and their respective keys. +type NameAPI interface { + // Publish announces new IPNS name + Publish(ctx context.Context, path path.Path, opts ...options.NamePublishOption) (IpnsEntry, error) + + // Resolve attempts to resolve the newest version of the specified name + Resolve(ctx context.Context, name string, opts ...options.NameResolveOption) (path.Path, error) + + // Search is a version of Resolve which outputs paths as they are discovered, + // reducing the time to first entry + // + // Note: by default, all paths read from the channel are considered unsafe, + // except the latest (last path in channel read buffer). + Search(ctx context.Context, name string, opts ...options.NameResolveOption) (<-chan IpnsResult, error) +} diff --git a/coreiface/object.go b/coreiface/object.go new file mode 100644 index 0000000000..d983fa49b6 --- /dev/null +++ b/coreiface/object.go @@ -0,0 +1,108 @@ +package iface + +import ( + "context" + "io" + + path "github.com/ipfs/boxo/coreiface/path" + + "github.com/ipfs/boxo/coreiface/options" + + "github.com/ipfs/go-cid" + ipld "github.com/ipfs/go-ipld-format" +) + +// ObjectStat provides information about dag nodes +type ObjectStat struct { + // Cid is the CID of the node + Cid cid.Cid + + // NumLinks is number of links the node contains + NumLinks int + + // BlockSize is size of the raw serialized node + BlockSize int + + // LinksSize is size of the links block section + LinksSize int + + // DataSize is the size of data block section + DataSize int + + // CumulativeSize is size of the tree (BlockSize + link sizes) + CumulativeSize int +} + +// ChangeType denotes type of change in ObjectChange +type ChangeType int + +const ( + // DiffAdd is set when a link was added to the graph + DiffAdd ChangeType = iota + + // DiffRemove is set when a link was removed from the graph + DiffRemove + + // DiffMod is set when a link was changed in the graph + DiffMod +) + +// ObjectChange represents a change ia a graph +type ObjectChange struct { + // Type of the change, either: + // * DiffAdd - Added a link + // * DiffRemove - Removed a link + // * DiffMod - Modified a link + Type ChangeType + + // Path to the changed link + Path string + + // Before holds the link path before the change. Note that when a link is + // added, this will be nil. + Before path.Resolved + + // After holds the link path after the change. Note that when a link is + // removed, this will be nil. + After path.Resolved +} + +// ObjectAPI specifies the interface to MerkleDAG and contains useful utilities +// for manipulating MerkleDAG data structures. +type ObjectAPI interface { + // New creates new, empty (by default) dag-node. + New(context.Context, ...options.ObjectNewOption) (ipld.Node, error) + + // Put imports the data into merkledag + Put(context.Context, io.Reader, ...options.ObjectPutOption) (path.Resolved, error) + + // Get returns the node for the path + Get(context.Context, path.Path) (ipld.Node, error) + + // Data returns reader for data of the node + Data(context.Context, path.Path) (io.Reader, error) + + // Links returns lint or links the node contains + Links(context.Context, path.Path) ([]*ipld.Link, error) + + // Stat returns information about the node + Stat(context.Context, path.Path) (*ObjectStat, error) + + // AddLink adds a link under the specified path. child path can point to a + // subdirectory within the patent which must be present (can be overridden + // with WithCreate option). + AddLink(ctx context.Context, base path.Path, name string, child path.Path, opts ...options.ObjectAddLinkOption) (path.Resolved, error) + + // RmLink removes a link from the node + RmLink(ctx context.Context, base path.Path, link string) (path.Resolved, error) + + // AppendData appends data to the node + AppendData(context.Context, path.Path, io.Reader) (path.Resolved, error) + + // SetData sets the data contained in the node + SetData(context.Context, path.Path, io.Reader) (path.Resolved, error) + + // Diff returns a set of changes needed to transform the first object into the + // second. + Diff(context.Context, path.Path, path.Path) ([]ObjectChange, error) +} diff --git a/coreiface/options/block.go b/coreiface/options/block.go new file mode 100644 index 0000000000..130648682f --- /dev/null +++ b/coreiface/options/block.go @@ -0,0 +1,164 @@ +package options + +import ( + "fmt" + + cid "github.com/ipfs/go-cid" + mc "github.com/multiformats/go-multicodec" + mh "github.com/multiformats/go-multihash" +) + +type BlockPutSettings struct { + CidPrefix cid.Prefix + Pin bool +} + +type BlockRmSettings struct { + Force bool +} + +type BlockPutOption func(*BlockPutSettings) error +type BlockRmOption func(*BlockRmSettings) error + +func BlockPutOptions(opts ...BlockPutOption) (*BlockPutSettings, error) { + var cidPrefix cid.Prefix + + // Baseline is CIDv1 raw sha2-255-32 (can be tweaked later via opts) + cidPrefix.Version = 1 + cidPrefix.Codec = uint64(mc.Raw) + cidPrefix.MhType = mh.SHA2_256 + cidPrefix.MhLength = -1 // -1 means len is to be calculated during mh.Sum() + + options := &BlockPutSettings{ + CidPrefix: cidPrefix, + Pin: false, + } + + // Apply any overrides + for _, opt := range opts { + err := opt(options) + if err != nil { + return nil, err + } + } + + return options, nil +} + +func BlockRmOptions(opts ...BlockRmOption) (*BlockRmSettings, error) { + options := &BlockRmSettings{ + Force: false, + } + + for _, opt := range opts { + err := opt(options) + if err != nil { + return nil, err + } + } + return options, nil +} + +type blockOpts struct{} + +var Block blockOpts + +// CidCodec is the modern option for Block.Put which specifies the multicodec to use +// in the CID returned by the Block.Put operation. +// It uses correct codes from go-multicodec and replaces the old Format now with CIDv1 as the default. +func (blockOpts) CidCodec(codecName string) BlockPutOption { + return func(settings *BlockPutSettings) error { + if codecName == "" { + return nil + } + code, err := codeFromName(codecName) + if err != nil { + return err + } + settings.CidPrefix.Codec = uint64(code) + return nil + } +} + +// Map string to code from go-multicodec +func codeFromName(codecName string) (mc.Code, error) { + var cidCodec mc.Code + err := cidCodec.Set(codecName) + return cidCodec, err +} + +// Format is a legacy option for Block.Put which specifies the multicodec to +// use to serialize the object. +// Provided for backward-compatibility only. Use CidCodec instead. +func (blockOpts) Format(format string) BlockPutOption { + return func(settings *BlockPutSettings) error { + if format == "" { + return nil + } + // Opt-in CIDv0 support for backward-compatibility + if format == "v0" { + settings.CidPrefix.Version = 0 + } + + // Fixup a legacy (invalid) names for dag-pb (0x70) + if format == "v0" || format == "protobuf" { + format = "dag-pb" + } + + // Fixup invalid name for dag-cbor (0x71) + if format == "cbor" { + format = "dag-cbor" + } + + // Set code based on name passed as "format" + code, err := codeFromName(format) + if err != nil { + return err + } + settings.CidPrefix.Codec = uint64(code) + + // If CIDv0, ensure all parameters are compatible + // (in theory go-cid would validate this anyway, but we want to provide better errors) + pref := settings.CidPrefix + if pref.Version == 0 { + if pref.Codec != uint64(mc.DagPb) { + return fmt.Errorf("only dag-pb is allowed with CIDv0") + } + if pref.MhType != mh.SHA2_256 || (pref.MhLength != -1 && pref.MhLength != 32) { + return fmt.Errorf("only sha2-255-32 is allowed with CIDv0") + } + } + + return nil + } + +} + +// Hash is an option for Block.Put which specifies the multihash settings to use +// when hashing the object. Default is mh.SHA2_256 (0x12). +// If mhLen is set to -1, default length for the hash will be used +func (blockOpts) Hash(mhType uint64, mhLen int) BlockPutOption { + return func(settings *BlockPutSettings) error { + settings.CidPrefix.MhType = mhType + settings.CidPrefix.MhLength = mhLen + return nil + } +} + +// Pin is an option for Block.Put which specifies whether to (recursively) pin +// added blocks +func (blockOpts) Pin(pin bool) BlockPutOption { + return func(settings *BlockPutSettings) error { + settings.Pin = pin + return nil + } +} + +// Force is an option for Block.Rm which, when set to true, will ignore +// non-existing blocks +func (blockOpts) Force(force bool) BlockRmOption { + return func(settings *BlockRmSettings) error { + settings.Force = force + return nil + } +} diff --git a/coreiface/options/dht.go b/coreiface/options/dht.go new file mode 100644 index 0000000000..e13e160200 --- /dev/null +++ b/coreiface/options/dht.go @@ -0,0 +1,62 @@ +package options + +type DhtProvideSettings struct { + Recursive bool +} + +type DhtFindProvidersSettings struct { + NumProviders int +} + +type DhtProvideOption func(*DhtProvideSettings) error +type DhtFindProvidersOption func(*DhtFindProvidersSettings) error + +func DhtProvideOptions(opts ...DhtProvideOption) (*DhtProvideSettings, error) { + options := &DhtProvideSettings{ + Recursive: false, + } + + for _, opt := range opts { + err := opt(options) + if err != nil { + return nil, err + } + } + return options, nil +} + +func DhtFindProvidersOptions(opts ...DhtFindProvidersOption) (*DhtFindProvidersSettings, error) { + options := &DhtFindProvidersSettings{ + NumProviders: 20, + } + + for _, opt := range opts { + err := opt(options) + if err != nil { + return nil, err + } + } + return options, nil +} + +type dhtOpts struct{} + +var Dht dhtOpts + +// Recursive is an option for Dht.Provide which specifies whether to provide +// the given path recursively +func (dhtOpts) Recursive(recursive bool) DhtProvideOption { + return func(settings *DhtProvideSettings) error { + settings.Recursive = recursive + return nil + } +} + +// NumProviders is an option for Dht.FindProviders which specifies the +// number of peers to look for. Default is 20 +func (dhtOpts) NumProviders(numProviders int) DhtFindProvidersOption { + return func(settings *DhtFindProvidersSettings) error { + settings.NumProviders = numProviders + return nil + } +} diff --git a/coreiface/options/global.go b/coreiface/options/global.go new file mode 100644 index 0000000000..90e2586f10 --- /dev/null +++ b/coreiface/options/global.go @@ -0,0 +1,47 @@ +package options + +type ApiSettings struct { + Offline bool + FetchBlocks bool +} + +type ApiOption func(*ApiSettings) error + +func ApiOptions(opts ...ApiOption) (*ApiSettings, error) { + options := &ApiSettings{ + Offline: false, + FetchBlocks: true, + } + + return ApiOptionsTo(options, opts...) +} + +func ApiOptionsTo(options *ApiSettings, opts ...ApiOption) (*ApiSettings, error) { + for _, opt := range opts { + err := opt(options) + if err != nil { + return nil, err + } + } + return options, nil +} + +type apiOpts struct{} + +var Api apiOpts + +func (apiOpts) Offline(offline bool) ApiOption { + return func(settings *ApiSettings) error { + settings.Offline = offline + return nil + } +} + +// FetchBlocks when set to false prevents api from fetching blocks from the +// network while allowing other services such as IPNS to still be online +func (apiOpts) FetchBlocks(fetch bool) ApiOption { + return func(settings *ApiSettings) error { + settings.FetchBlocks = fetch + return nil + } +} diff --git a/coreiface/options/key.go b/coreiface/options/key.go new file mode 100644 index 0000000000..4bc53a65fe --- /dev/null +++ b/coreiface/options/key.go @@ -0,0 +1,87 @@ +package options + +const ( + RSAKey = "rsa" + Ed25519Key = "ed25519" + + DefaultRSALen = 2048 +) + +type KeyGenerateSettings struct { + Algorithm string + Size int +} + +type KeyRenameSettings struct { + Force bool +} + +type KeyGenerateOption func(*KeyGenerateSettings) error +type KeyRenameOption func(*KeyRenameSettings) error + +func KeyGenerateOptions(opts ...KeyGenerateOption) (*KeyGenerateSettings, error) { + options := &KeyGenerateSettings{ + Algorithm: RSAKey, + Size: -1, + } + + for _, opt := range opts { + err := opt(options) + if err != nil { + return nil, err + } + } + return options, nil +} + +func KeyRenameOptions(opts ...KeyRenameOption) (*KeyRenameSettings, error) { + options := &KeyRenameSettings{ + Force: false, + } + + for _, opt := range opts { + err := opt(options) + if err != nil { + return nil, err + } + } + return options, nil +} + +type keyOpts struct{} + +var Key keyOpts + +// Type is an option for Key.Generate which specifies which algorithm +// should be used for the key. Default is options.RSAKey +// +// Supported key types: +// * options.RSAKey +// * options.Ed25519Key +func (keyOpts) Type(algorithm string) KeyGenerateOption { + return func(settings *KeyGenerateSettings) error { + settings.Algorithm = algorithm + return nil + } +} + +// Size is an option for Key.Generate which specifies the size of the key to +// generated. Default is -1 +// +// value of -1 means 'use default size for key type': +// - 2048 for RSA +func (keyOpts) Size(size int) KeyGenerateOption { + return func(settings *KeyGenerateSettings) error { + settings.Size = size + return nil + } +} + +// Force is an option for Key.Rename which specifies whether to allow to +// replace existing keys. +func (keyOpts) Force(force bool) KeyRenameOption { + return func(settings *KeyRenameSettings) error { + settings.Force = force + return nil + } +} diff --git a/coreiface/options/name.go b/coreiface/options/name.go new file mode 100644 index 0000000000..ae8be9ae9e --- /dev/null +++ b/coreiface/options/name.go @@ -0,0 +1,121 @@ +package options + +import ( + "time" + + ropts "github.com/ipfs/boxo/coreiface/options/namesys" +) + +const ( + DefaultNameValidTime = 24 * time.Hour +) + +type NamePublishSettings struct { + ValidTime time.Duration + Key string + + TTL *time.Duration + + AllowOffline bool +} + +type NameResolveSettings struct { + Cache bool + + ResolveOpts []ropts.ResolveOpt +} + +type NamePublishOption func(*NamePublishSettings) error +type NameResolveOption func(*NameResolveSettings) error + +func NamePublishOptions(opts ...NamePublishOption) (*NamePublishSettings, error) { + options := &NamePublishSettings{ + ValidTime: DefaultNameValidTime, + Key: "self", + + AllowOffline: false, + } + + for _, opt := range opts { + err := opt(options) + if err != nil { + return nil, err + } + } + + return options, nil +} + +func NameResolveOptions(opts ...NameResolveOption) (*NameResolveSettings, error) { + options := &NameResolveSettings{ + Cache: true, + } + + for _, opt := range opts { + err := opt(options) + if err != nil { + return nil, err + } + } + + return options, nil +} + +type nameOpts struct{} + +var Name nameOpts + +// ValidTime is an option for Name.Publish which specifies for how long the +// entry will remain valid. Default value is 24h +func (nameOpts) ValidTime(validTime time.Duration) NamePublishOption { + return func(settings *NamePublishSettings) error { + settings.ValidTime = validTime + return nil + } +} + +// Key is an option for Name.Publish which specifies the key to use for +// publishing. Default value is "self" which is the node's own PeerID. +// The key parameter must be either PeerID or keystore key alias. +// +// You can use KeyAPI to list and generate more names and their respective keys. +func (nameOpts) Key(key string) NamePublishOption { + return func(settings *NamePublishSettings) error { + settings.Key = key + return nil + } +} + +// AllowOffline is an option for Name.Publish which specifies whether to allow +// publishing when the node is offline. Default value is false +func (nameOpts) AllowOffline(allow bool) NamePublishOption { + return func(settings *NamePublishSettings) error { + settings.AllowOffline = allow + return nil + } +} + +// TTL is an option for Name.Publish which specifies the time duration the +// published record should be cached for (caution: experimental). +func (nameOpts) TTL(ttl time.Duration) NamePublishOption { + return func(settings *NamePublishSettings) error { + settings.TTL = &ttl + return nil + } +} + +// Cache is an option for Name.Resolve which specifies if cache should be used. +// Default value is true +func (nameOpts) Cache(cache bool) NameResolveOption { + return func(settings *NameResolveSettings) error { + settings.Cache = cache + return nil + } +} + +func (nameOpts) ResolveOption(opt ropts.ResolveOpt) NameResolveOption { + return func(settings *NameResolveSettings) error { + settings.ResolveOpts = append(settings.ResolveOpts, opt) + return nil + } +} diff --git a/coreiface/options/namesys/opts.go b/coreiface/options/namesys/opts.go new file mode 100644 index 0000000000..0cd1ba7781 --- /dev/null +++ b/coreiface/options/namesys/opts.go @@ -0,0 +1,123 @@ +package nsopts + +import ( + "time" +) + +const ( + // DefaultDepthLimit is the default depth limit used by Resolve. + DefaultDepthLimit = 32 + + // UnlimitedDepth allows infinite recursion in Resolve. You + // probably don't want to use this, but it's here if you absolutely + // trust resolution to eventually complete and can't put an upper + // limit on how many steps it will take. + UnlimitedDepth = 0 + + // DefaultIPNSRecordTTL specifies the time that the record can be cached + // before checking if its validity again. + DefaultIPNSRecordTTL = time.Minute + + // DefaultIPNSRecordEOL specifies the time that the network will cache IPNS + // records after being published. Records should be re-published before this + // interval expires. We use the same default expiration as the DHT. + DefaultIPNSRecordEOL = 48 * time.Hour +) + +// ResolveOpts specifies options for resolving an IPNS path +type ResolveOpts struct { + // Recursion depth limit + Depth uint + // The number of IPNS records to retrieve from the DHT + // (the best record is selected from this set) + DhtRecordCount uint + // The amount of time to wait for DHT records to be fetched + // and verified. A zero value indicates that there is no explicit + // timeout (although there is an implicit timeout due to dial + // timeouts within the DHT) + DhtTimeout time.Duration +} + +// DefaultResolveOpts returns the default options for resolving +// an IPNS path +func DefaultResolveOpts() ResolveOpts { + return ResolveOpts{ + Depth: DefaultDepthLimit, + DhtRecordCount: 16, + DhtTimeout: time.Minute, + } +} + +// ResolveOpt is used to set an option +type ResolveOpt func(*ResolveOpts) + +// Depth is the recursion depth limit +func Depth(depth uint) ResolveOpt { + return func(o *ResolveOpts) { + o.Depth = depth + } +} + +// DhtRecordCount is the number of IPNS records to retrieve from the DHT +func DhtRecordCount(count uint) ResolveOpt { + return func(o *ResolveOpts) { + o.DhtRecordCount = count + } +} + +// DhtTimeout is the amount of time to wait for DHT records to be fetched +// and verified. A zero value indicates that there is no explicit timeout +func DhtTimeout(timeout time.Duration) ResolveOpt { + return func(o *ResolveOpts) { + o.DhtTimeout = timeout + } +} + +// ProcessOpts converts an array of ResolveOpt into a ResolveOpts object +func ProcessOpts(opts []ResolveOpt) ResolveOpts { + rsopts := DefaultResolveOpts() + for _, option := range opts { + option(&rsopts) + } + return rsopts +} + +// PublishOptions specifies options for publishing an IPNS record. +type PublishOptions struct { + EOL time.Time + TTL time.Duration +} + +// DefaultPublishOptions returns the default options for publishing an IPNS record. +func DefaultPublishOptions() PublishOptions { + return PublishOptions{ + EOL: time.Now().Add(DefaultIPNSRecordEOL), + TTL: DefaultIPNSRecordTTL, + } +} + +// PublishOption is used to set an option for PublishOpts. +type PublishOption func(*PublishOptions) + +// PublishWithEOL sets an EOL. +func PublishWithEOL(eol time.Time) PublishOption { + return func(o *PublishOptions) { + o.EOL = eol + } +} + +// PublishWithEOL sets a TTL. +func PublishWithTTL(ttl time.Duration) PublishOption { + return func(o *PublishOptions) { + o.TTL = ttl + } +} + +// ProcessPublishOptions converts an array of PublishOpt into a PublishOpts object. +func ProcessPublishOptions(opts []PublishOption) PublishOptions { + rsopts := DefaultPublishOptions() + for _, option := range opts { + option(&rsopts) + } + return rsopts +} diff --git a/coreiface/options/object.go b/coreiface/options/object.go new file mode 100644 index 0000000000..e484a9f363 --- /dev/null +++ b/coreiface/options/object.go @@ -0,0 +1,124 @@ +package options + +type ObjectNewSettings struct { + Type string +} + +type ObjectPutSettings struct { + InputEnc string + DataType string + Pin bool +} + +type ObjectAddLinkSettings struct { + Create bool +} + +type ObjectNewOption func(*ObjectNewSettings) error +type ObjectPutOption func(*ObjectPutSettings) error +type ObjectAddLinkOption func(*ObjectAddLinkSettings) error + +func ObjectNewOptions(opts ...ObjectNewOption) (*ObjectNewSettings, error) { + options := &ObjectNewSettings{ + Type: "empty", + } + + for _, opt := range opts { + err := opt(options) + if err != nil { + return nil, err + } + } + return options, nil +} + +func ObjectPutOptions(opts ...ObjectPutOption) (*ObjectPutSettings, error) { + options := &ObjectPutSettings{ + InputEnc: "json", + DataType: "text", + Pin: false, + } + + for _, opt := range opts { + err := opt(options) + if err != nil { + return nil, err + } + } + return options, nil +} + +func ObjectAddLinkOptions(opts ...ObjectAddLinkOption) (*ObjectAddLinkSettings, error) { + options := &ObjectAddLinkSettings{ + Create: false, + } + + for _, opt := range opts { + err := opt(options) + if err != nil { + return nil, err + } + } + return options, nil +} + +type objectOpts struct{} + +var Object objectOpts + +// Type is an option for Object.New which allows to change the type of created +// dag node. +// +// Supported types: +// * 'empty' - Empty node +// * 'unixfs-dir' - Empty UnixFS directory +func (objectOpts) Type(t string) ObjectNewOption { + return func(settings *ObjectNewSettings) error { + settings.Type = t + return nil + } +} + +// InputEnc is an option for Object.Put which specifies the input encoding of the +// data. Default is "json". +// +// Supported encodings: +// * "protobuf" +// * "json" +func (objectOpts) InputEnc(e string) ObjectPutOption { + return func(settings *ObjectPutSettings) error { + settings.InputEnc = e + return nil + } +} + +// DataType is an option for Object.Put which specifies the encoding of data +// field when using Json or XML input encoding. +// +// Supported types: +// * "text" (default) +// * "base64" +func (objectOpts) DataType(t string) ObjectPutOption { + return func(settings *ObjectPutSettings) error { + settings.DataType = t + return nil + } +} + +// Pin is an option for Object.Put which specifies whether to pin the added +// objects, default is false +func (objectOpts) Pin(pin bool) ObjectPutOption { + return func(settings *ObjectPutSettings) error { + settings.Pin = pin + return nil + } +} + +// Create is an option for Object.AddLink which specifies whether create required +// directories for the child +func (objectOpts) Create(create bool) ObjectAddLinkOption { + return func(settings *ObjectAddLinkSettings) error { + settings.Create = create + return nil + } +} diff --git a/coreiface/options/pin.go b/coreiface/options/pin.go new file mode 100644 index 0000000000..75c2b8a263 --- /dev/null +++ b/coreiface/options/pin.go @@ -0,0 +1,283 @@ +package options + +import "fmt" + +// PinAddSettings represent the settings for PinAPI.Add +type PinAddSettings struct { + Recursive bool +} + +// PinLsSettings represent the settings for PinAPI.Ls +type PinLsSettings struct { + Type string +} + +// PinIsPinnedSettings represent the settings for PinAPI.IsPinned +type PinIsPinnedSettings struct { + WithType string +} + +// PinRmSettings represents the settings for PinAPI.Rm +type PinRmSettings struct { + Recursive bool +} + +// PinUpdateSettings represent the settings for PinAPI.Update +type PinUpdateSettings struct { + Unpin bool +} + +// PinAddOption is the signature of an option for PinAPI.Add +type PinAddOption func(*PinAddSettings) error + +// PinLsOption is the signature of an option for PinAPI.Ls +type PinLsOption func(*PinLsSettings) error + +// PinIsPinnedOption is the signature of an option for PinAPI.IsPinned +type PinIsPinnedOption func(*PinIsPinnedSettings) error + +// PinRmOption is the signature of an option for PinAPI.Rm +type PinRmOption func(*PinRmSettings) error + +// PinUpdateOption is the signature of an option for PinAPI.Update +type PinUpdateOption func(*PinUpdateSettings) error + +// PinAddOptions compile a series of PinAddOption into a ready to use +// PinAddSettings and set the default values. +func PinAddOptions(opts ...PinAddOption) (*PinAddSettings, error) { + options := &PinAddSettings{ + Recursive: true, + } + + for _, opt := range opts { + err := opt(options) + if err != nil { + return nil, err + } + } + + return options, nil +} + +// PinLsOptions compile a series of PinLsOption into a ready to use +// PinLsSettings and set the default values. +func PinLsOptions(opts ...PinLsOption) (*PinLsSettings, error) { + options := &PinLsSettings{ + Type: "all", + } + + for _, opt := range opts { + err := opt(options) + if err != nil { + return nil, err + } + } + + return options, nil +} + +// PinIsPinnedOptions compile a series of PinIsPinnedOption into a ready to use +// PinIsPinnedSettings and set the default values. +func PinIsPinnedOptions(opts ...PinIsPinnedOption) (*PinIsPinnedSettings, error) { + options := &PinIsPinnedSettings{ + WithType: "all", + } + + for _, opt := range opts { + err := opt(options) + if err != nil { + return nil, err + } + } + + return options, nil +} + +// PinRmOptions compile a series of PinRmOption into a ready to use +// PinRmSettings and set the default values. +func PinRmOptions(opts ...PinRmOption) (*PinRmSettings, error) { + options := &PinRmSettings{ + Recursive: true, + } + + for _, opt := range opts { + if err := opt(options); err != nil { + return nil, err + } + } + + return options, nil +} + +// PinUpdateOptions compile a series of PinUpdateOption into a ready to use +// PinUpdateSettings and set the default values. +func PinUpdateOptions(opts ...PinUpdateOption) (*PinUpdateSettings, error) { + options := &PinUpdateSettings{ + Unpin: true, + } + + for _, opt := range opts { + err := opt(options) + if err != nil { + return nil, err + } + } + + return options, nil +} + +type pinOpts struct { + Ls pinLsOpts + IsPinned pinIsPinnedOpts +} + +// Pin provide an access to all the options for the Pin API. +var Pin pinOpts + +type pinLsOpts struct{} + +// All is an option for Pin.Ls which will make it return all pins. It is +// the default +func (pinLsOpts) All() PinLsOption { + return Pin.Ls.pinType("all") +} + +// Recursive is an option for Pin.Ls which will make it only return recursive +// pins +func (pinLsOpts) Recursive() PinLsOption { + return Pin.Ls.pinType("recursive") +} + +// Direct is an option for Pin.Ls which will make it only return direct (non +// recursive) pins +func (pinLsOpts) Direct() PinLsOption { + return Pin.Ls.pinType("direct") +} + +// Indirect is an option for Pin.Ls which will make it only return indirect pins +// (objects referenced by other recursively pinned objects) +func (pinLsOpts) Indirect() PinLsOption { + return Pin.Ls.pinType("indirect") +} + +// Type is an option for Pin.Ls which will make it only return pins of the given +// type. +// +// Supported values: +// - "direct" - directly pinned objects +// - "recursive" - roots of recursive pins +// - "indirect" - indirectly pinned objects (referenced by recursively pinned +// objects) +// - "all" - all pinned objects (default) +func (pinLsOpts) Type(typeStr string) (PinLsOption, error) { + switch typeStr { + case "all", "direct", "indirect", "recursive": + return Pin.Ls.pinType(typeStr), nil + default: + return nil, fmt.Errorf("invalid type '%s', must be one of {direct, indirect, recursive, all}", typeStr) + } +} + +// pinType is an option for Pin.Ls which allows to specify which pin types should +// be returned +// +// Supported values: +// - "direct" - directly pinned objects +// - "recursive" - roots of recursive pins +// - "indirect" - indirectly pinned objects (referenced by recursively pinned +// objects) +// - "all" - all pinned objects (default) +func (pinLsOpts) pinType(t string) PinLsOption { + return func(settings *PinLsSettings) error { + settings.Type = t + return nil + } +} + +type pinIsPinnedOpts struct{} + +// All is an option for Pin.IsPinned which will make it search in all type of pins. +// It is the default +func (pinIsPinnedOpts) All() PinIsPinnedOption { + return Pin.IsPinned.pinType("all") +} + +// Recursive is an option for Pin.IsPinned which will make it only search in +// recursive pins +func (pinIsPinnedOpts) Recursive() PinIsPinnedOption { + return Pin.IsPinned.pinType("recursive") +} + +// Direct is an option for Pin.IsPinned which will make it only search in direct +// (non recursive) pins +func (pinIsPinnedOpts) Direct() PinIsPinnedOption { + return Pin.IsPinned.pinType("direct") +} + +// Indirect is an option for Pin.IsPinned which will make it only search indirect +// pins (objects referenced by other recursively pinned objects) +func (pinIsPinnedOpts) Indirect() PinIsPinnedOption { + return Pin.IsPinned.pinType("indirect") +} + +// Type is an option for Pin.IsPinned which will make it only search pins of the given +// type. +// +// Supported values: +// - "direct" - directly pinned objects +// - "recursive" - roots of recursive pins +// - "indirect" - indirectly pinned objects (referenced by recursively pinned +// objects) +// - "all" - all pinned objects (default) +func (pinIsPinnedOpts) Type(typeStr string) (PinIsPinnedOption, error) { + switch typeStr { + case "all", "direct", "indirect", "recursive": + return Pin.IsPinned.pinType(typeStr), nil + default: + return nil, fmt.Errorf("invalid type '%s', must be one of {direct, indirect, recursive, all}", typeStr) + } +} + +// pinType is an option for Pin.IsPinned which allows to specify which pin type the given +// pin is expected to be, speeding up the research. +// +// Supported values: +// - "direct" - directly pinned objects +// - "recursive" - roots of recursive pins +// - "indirect" - indirectly pinned objects (referenced by recursively pinned +// objects) +// - "all" - all pinned objects (default) +func (pinIsPinnedOpts) pinType(t string) PinIsPinnedOption { + return func(settings *PinIsPinnedSettings) error { + settings.WithType = t + return nil + } +} + +// Recursive is an option for Pin.Add which specifies whether to pin an entire +// object tree or just one object. Default: true +func (pinOpts) Recursive(recursive bool) PinAddOption { + return func(settings *PinAddSettings) error { + settings.Recursive = recursive + return nil + } +} + +// RmRecursive is an option for Pin.Rm which specifies whether to recursively +// unpin the object linked to by the specified object(s). This does not remove +// indirect pins referenced by other recursive pins. +func (pinOpts) RmRecursive(recursive bool) PinRmOption { + return func(settings *PinRmSettings) error { + settings.Recursive = recursive + return nil + } +} + +// Unpin is an option for Pin.Update which specifies whether to remove the old pin. +// Default is true. +func (pinOpts) Unpin(unpin bool) PinUpdateOption { + return func(settings *PinUpdateSettings) error { + settings.Unpin = unpin + return nil + } +} diff --git a/coreiface/options/pubsub.go b/coreiface/options/pubsub.go new file mode 100644 index 0000000000..c387d613db --- /dev/null +++ b/coreiface/options/pubsub.go @@ -0,0 +1,58 @@ +package options + +type PubSubPeersSettings struct { + Topic string +} + +type PubSubSubscribeSettings struct { + Discover bool +} + +type PubSubPeersOption func(*PubSubPeersSettings) error +type PubSubSubscribeOption func(*PubSubSubscribeSettings) error + +func PubSubPeersOptions(opts ...PubSubPeersOption) (*PubSubPeersSettings, error) { + options := &PubSubPeersSettings{ + Topic: "", + } + + for _, opt := range opts { + err := opt(options) + if err != nil { + return nil, err + } + } + return options, nil +} + +func PubSubSubscribeOptions(opts ...PubSubSubscribeOption) (*PubSubSubscribeSettings, error) { + options := &PubSubSubscribeSettings{ + Discover: false, + } + + for _, opt := range opts { + err := opt(options) + if err != nil { + return nil, err + } + } + return options, nil +} + +type pubsubOpts struct{} + +var PubSub pubsubOpts + +func (pubsubOpts) Topic(topic string) PubSubPeersOption { + return func(settings *PubSubPeersSettings) error { + settings.Topic = topic + return nil + } +} + +func (pubsubOpts) Discover(discover bool) PubSubSubscribeOption { + return func(settings *PubSubSubscribeSettings) error { + settings.Discover = discover + return nil + } +} diff --git a/coreiface/options/unixfs.go b/coreiface/options/unixfs.go new file mode 100644 index 0000000000..22ec8d4136 --- /dev/null +++ b/coreiface/options/unixfs.go @@ -0,0 +1,293 @@ +package options + +import ( + "errors" + "fmt" + + cid "github.com/ipfs/go-cid" + dag "github.com/ipfs/boxo/ipld/merkledag" + mh "github.com/multiformats/go-multihash" +) + +type Layout int + +const ( + BalancedLayout Layout = iota + TrickleLayout +) + +type UnixfsAddSettings struct { + CidVersion int + MhType uint64 + + Inline bool + InlineLimit int + RawLeaves bool + RawLeavesSet bool + + Chunker string + Layout Layout + + Pin bool + OnlyHash bool + FsCache bool + NoCopy bool + + Events chan<- interface{} + Silent bool + Progress bool +} + +type UnixfsLsSettings struct { + ResolveChildren bool + UseCumulativeSize bool +} + +type UnixfsAddOption func(*UnixfsAddSettings) error +type UnixfsLsOption func(*UnixfsLsSettings) error + +func UnixfsAddOptions(opts ...UnixfsAddOption) (*UnixfsAddSettings, cid.Prefix, error) { + options := &UnixfsAddSettings{ + CidVersion: -1, + MhType: mh.SHA2_256, + + Inline: false, + InlineLimit: 32, + RawLeaves: false, + RawLeavesSet: false, + + Chunker: "size-262144", + Layout: BalancedLayout, + + Pin: false, + OnlyHash: false, + FsCache: false, + NoCopy: false, + + Events: nil, + Silent: false, + Progress: false, + } + + for _, opt := range opts { + err := opt(options) + if err != nil { + return nil, cid.Prefix{}, err + } + } + + // nocopy -> rawblocks + if options.NoCopy && !options.RawLeaves { + // fixed? + if options.RawLeavesSet { + return nil, cid.Prefix{}, fmt.Errorf("nocopy option requires '--raw-leaves' to be enabled as well") + } + + // No, satisfy mandatory constraint. + options.RawLeaves = true + } + + // (hash != "sha2-256") -> CIDv1 + if options.MhType != mh.SHA2_256 { + switch options.CidVersion { + case 0: + return nil, cid.Prefix{}, errors.New("CIDv0 only supports sha2-256") + case 1, -1: + options.CidVersion = 1 + default: + return nil, cid.Prefix{}, fmt.Errorf("unknown CID version: %d", options.CidVersion) + } + } else { + if options.CidVersion < 0 { + // Default to CIDv0 + options.CidVersion = 0 + } + } + + // cidV1 -> raw blocks (by default) + if options.CidVersion > 0 && !options.RawLeavesSet { + options.RawLeaves = true + } + + prefix, err := dag.PrefixForCidVersion(options.CidVersion) + if err != nil { + return nil, cid.Prefix{}, err + } + + prefix.MhType = options.MhType + prefix.MhLength = -1 + + return options, prefix, nil +} + +func UnixfsLsOptions(opts ...UnixfsLsOption) (*UnixfsLsSettings, error) { + options := &UnixfsLsSettings{ + ResolveChildren: true, + } + + for _, opt := range opts { + err := opt(options) + if err != nil { + return nil, err + } + } + + return options, nil +} + +type unixfsOpts struct{} + +var Unixfs unixfsOpts + +// CidVersion specifies which CID version to use. Defaults to 0 unless an option +// that depends on CIDv1 is passed. +func (unixfsOpts) CidVersion(version int) UnixfsAddOption { + return func(settings *UnixfsAddSettings) error { + settings.CidVersion = version + return nil + } +} + +// Hash function to use. Implies CIDv1 if not set to sha2-256 (default). +// +// Table of functions is declared in https://github.com/multiformats/go-multihash/blob/master/multihash.go +func (unixfsOpts) Hash(mhtype uint64) UnixfsAddOption { + return func(settings *UnixfsAddSettings) error { + settings.MhType = mhtype + return nil + } +} + +// RawLeaves specifies whether to use raw blocks for leaves (data nodes with no +// links) instead of wrapping them with unixfs structures. +func (unixfsOpts) RawLeaves(enable bool) UnixfsAddOption { + return func(settings *UnixfsAddSettings) error { + settings.RawLeaves = enable + settings.RawLeavesSet = true + return nil + } +} + +// Inline tells the adder to inline small blocks into CIDs +func (unixfsOpts) Inline(enable bool) UnixfsAddOption { + return func(settings *UnixfsAddSettings) error { + settings.Inline = enable + return nil + } +} + +// InlineLimit sets the amount of bytes below which blocks will be encoded +// directly into CID instead of being stored and addressed by it's hash. +// Specifying this option won't enable block inlining. For that use `Inline` +// option. Default: 32 bytes +// +// Note that while there is no hard limit on the number of bytes, it should be +// kept at a reasonably low value, such as 64; implementations may choose to +// reject anything larger. +func (unixfsOpts) InlineLimit(limit int) UnixfsAddOption { + return func(settings *UnixfsAddSettings) error { + settings.InlineLimit = limit + return nil + } +} + +// Chunker specifies settings for the chunking algorithm to use. +// +// Default: size-262144, formats: +// size-[bytes] - Simple chunker splitting data into blocks of n bytes +// rabin-[min]-[avg]-[max] - Rabin chunker +func (unixfsOpts) Chunker(chunker string) UnixfsAddOption { + return func(settings *UnixfsAddSettings) error { + settings.Chunker = chunker + return nil + } +} + +// Layout tells the adder how to balance data between leaves. +// options.BalancedLayout is the default, it's optimized for static seekable +// files. +// options.TrickleLayout is optimized for streaming data, +func (unixfsOpts) Layout(layout Layout) UnixfsAddOption { + return func(settings *UnixfsAddSettings) error { + settings.Layout = layout + return nil + } +} + +// Pin tells the adder to pin the file root recursively after adding +func (unixfsOpts) Pin(pin bool) UnixfsAddOption { + return func(settings *UnixfsAddSettings) error { + settings.Pin = pin + return nil + } +} + +// HashOnly will make the adder calculate data hash without storing it in the +// blockstore or announcing it to the network +func (unixfsOpts) HashOnly(hashOnly bool) UnixfsAddOption { + return func(settings *UnixfsAddSettings) error { + settings.OnlyHash = hashOnly + return nil + } +} + +// Events specifies channel which will be used to report events about ongoing +// Add operation. +// +// Note that if this channel blocks it may slowdown the adder +func (unixfsOpts) Events(sink chan<- interface{}) UnixfsAddOption { + return func(settings *UnixfsAddSettings) error { + settings.Events = sink + return nil + } +} + +// Silent reduces event output +func (unixfsOpts) Silent(silent bool) UnixfsAddOption { + return func(settings *UnixfsAddSettings) error { + settings.Silent = silent + return nil + } +} + +// Progress tells the adder whether to enable progress events +func (unixfsOpts) Progress(enable bool) UnixfsAddOption { + return func(settings *UnixfsAddSettings) error { + settings.Progress = enable + return nil + } +} + +// FsCache tells the adder to check the filestore for pre-existing blocks +// +// Experimental +func (unixfsOpts) FsCache(enable bool) UnixfsAddOption { + return func(settings *UnixfsAddSettings) error { + settings.FsCache = enable + return nil + } +} + +// NoCopy tells the adder to add the files using filestore. Implies RawLeaves. +// +// Experimental +func (unixfsOpts) Nocopy(enable bool) UnixfsAddOption { + return func(settings *UnixfsAddSettings) error { + settings.NoCopy = enable + return nil + } +} + +func (unixfsOpts) ResolveChildren(resolve bool) UnixfsLsOption { + return func(settings *UnixfsLsSettings) error { + settings.ResolveChildren = resolve + return nil + } +} + +func (unixfsOpts) UseCumulativeSize(use bool) UnixfsLsOption { + return func(settings *UnixfsLsSettings) error { + settings.UseCumulativeSize = use + return nil + } +} diff --git a/coreiface/path/path.go b/coreiface/path/path.go new file mode 100644 index 0000000000..c26b8692b0 --- /dev/null +++ b/coreiface/path/path.go @@ -0,0 +1,199 @@ +package path + +import ( + "strings" + + ipfspath "github.com/ipfs/boxo/path" + cid "github.com/ipfs/go-cid" +) + +// Path is a generic wrapper for paths used in the API. A path can be resolved +// to a CID using one of Resolve functions in the API. +// +// Paths must be prefixed with a valid prefix: +// +// * /ipfs - Immutable unixfs path (files) +// * /ipld - Immutable ipld path (data) +// * /ipns - Mutable names. Usually resolves to one of the immutable paths +// TODO: /local (MFS) +type Path interface { + // String returns the path as a string. + String() string + + // Namespace returns the first component of the path. + // + // For example path "/ipfs/QmHash", calling Namespace() will return "ipfs" + // + // Calling this method on invalid paths (IsValid() != nil) will result in + // empty string + Namespace() string + + // Mutable returns false if the data pointed to by this path in guaranteed + // to not change. + // + // Note that resolved mutable path can be immutable. + Mutable() bool + + // IsValid checks if this path is a valid ipfs Path, returning nil iff it is + // valid + IsValid() error +} + +// Resolved is a path which was resolved to the last resolvable node. +// ResolvedPaths are guaranteed to return nil from `IsValid` +type Resolved interface { + // Cid returns the CID of the node referenced by the path. Remainder of the + // path is guaranteed to be within the node. + // + // Examples: + // If you have 3 linked objects: QmRoot -> A -> B: + // + // cidB := {"foo": {"bar": 42 }} + // cidA := {"B": {"/": cidB }} + // cidRoot := {"A": {"/": cidA }} + // + // And resolve paths: + // + // * "/ipfs/${cidRoot}" + // * Calling Cid() will return `cidRoot` + // * Calling Root() will return `cidRoot` + // * Calling Remainder() will return `` + // + // * "/ipfs/${cidRoot}/A" + // * Calling Cid() will return `cidA` + // * Calling Root() will return `cidRoot` + // * Calling Remainder() will return `` + // + // * "/ipfs/${cidRoot}/A/B/foo" + // * Calling Cid() will return `cidB` + // * Calling Root() will return `cidRoot` + // * Calling Remainder() will return `foo` + // + // * "/ipfs/${cidRoot}/A/B/foo/bar" + // * Calling Cid() will return `cidB` + // * Calling Root() will return `cidRoot` + // * Calling Remainder() will return `foo/bar` + Cid() cid.Cid + + // Root returns the CID of the root object of the path + // + // Example: + // If you have 3 linked objects: QmRoot -> A -> B, and resolve path + // "/ipfs/QmRoot/A/B", the Root method will return the CID of object QmRoot + // + // For more examples see the documentation of Cid() method + Root() cid.Cid + + // Remainder returns unresolved part of the path + // + // Example: + // If you have 2 linked objects: QmRoot -> A, where A is a CBOR node + // containing the following data: + // + // {"foo": {"bar": 42 }} + // + // When resolving "/ipld/QmRoot/A/foo/bar", Remainder will return "foo/bar" + // + // For more examples see the documentation of Cid() method + Remainder() string + + Path +} + +// path implements coreiface.Path +type path struct { + path string +} + +// resolvedPath implements coreiface.resolvedPath +type resolvedPath struct { + path + cid cid.Cid + root cid.Cid + remainder string +} + +// Join appends provided segments to the base path +func Join(base Path, a ...string) Path { + s := strings.Join(append([]string{base.String()}, a...), "/") + return &path{path: s} +} + +// IpfsPath creates new /ipfs path from the provided CID +func IpfsPath(c cid.Cid) Resolved { + return &resolvedPath{ + path: path{"/ipfs/" + c.String()}, + cid: c, + root: c, + remainder: "", + } +} + +// IpldPath creates new /ipld path from the provided CID +func IpldPath(c cid.Cid) Resolved { + return &resolvedPath{ + path: path{"/ipld/" + c.String()}, + cid: c, + root: c, + remainder: "", + } +} + +// New parses string path to a Path +func New(p string) Path { + if pp, err := ipfspath.ParsePath(p); err == nil { + p = pp.String() + } + + return &path{path: p} +} + +// NewResolvedPath creates new Resolved path. This function performs no checks +// and is intended to be used by resolver implementations. Incorrect inputs may +// cause panics. Handle with care. +func NewResolvedPath(ipath ipfspath.Path, c cid.Cid, root cid.Cid, remainder string) Resolved { + return &resolvedPath{ + path: path{ipath.String()}, + cid: c, + root: root, + remainder: remainder, + } +} + +func (p *path) String() string { + return p.path +} + +func (p *path) Namespace() string { + ip, err := ipfspath.ParsePath(p.path) + if err != nil { + return "" + } + + if len(ip.Segments()) < 1 { + panic("path without namespace") // this shouldn't happen under any scenario + } + return ip.Segments()[0] +} + +func (p *path) Mutable() bool { + // TODO: MFS: check for /local + return p.Namespace() == "ipns" +} + +func (p *path) IsValid() error { + _, err := ipfspath.ParsePath(p.path) + return err +} + +func (p *resolvedPath) Cid() cid.Cid { + return p.cid +} + +func (p *resolvedPath) Root() cid.Cid { + return p.root +} + +func (p *resolvedPath) Remainder() string { + return p.remainder +} diff --git a/coreiface/pin.go b/coreiface/pin.go new file mode 100644 index 0000000000..ba5df5354d --- /dev/null +++ b/coreiface/pin.go @@ -0,0 +1,63 @@ +package iface + +import ( + "context" + + path "github.com/ipfs/boxo/coreiface/path" + + "github.com/ipfs/boxo/coreiface/options" +) + +// Pin holds information about pinned resource +type Pin interface { + // Path to the pinned object + Path() path.Resolved + + // Type of the pin + Type() string + + // if not nil, an error happened. Everything else should be ignored. + Err() error +} + +// PinStatus holds information about pin health +type PinStatus interface { + // Ok indicates whether the pin has been verified to be correct + Ok() bool + + // BadNodes returns any bad (usually missing) nodes from the pin + BadNodes() []BadPinNode +} + +// BadPinNode is a node that has been marked as bad by Pin.Verify +type BadPinNode interface { + // Path is the path of the node + Path() path.Resolved + + // Err is the reason why the node has been marked as bad + Err() error +} + +// PinAPI specifies the interface to pining +type PinAPI interface { + // Add creates new pin, be default recursive - pinning the whole referenced + // tree + Add(context.Context, path.Path, ...options.PinAddOption) error + + // Ls returns list of pinned objects on this node + Ls(context.Context, ...options.PinLsOption) (<-chan Pin, error) + + // IsPinned returns whether or not the given cid is pinned + // and an explanation of why its pinned + IsPinned(context.Context, path.Path, ...options.PinIsPinnedOption) (string, bool, error) + + // Rm removes pin for object specified by the path + Rm(context.Context, path.Path, ...options.PinRmOption) error + + // Update changes one pin to another, skipping checks for matching paths in + // the old tree + Update(ctx context.Context, from path.Path, to path.Path, opts ...options.PinUpdateOption) error + + // Verify verifies the integrity of pinned objects + Verify(context.Context) (<-chan PinStatus, error) +} diff --git a/coreiface/pubsub.go b/coreiface/pubsub.go new file mode 100644 index 0000000000..bbd1da4ec1 --- /dev/null +++ b/coreiface/pubsub.go @@ -0,0 +1,48 @@ +package iface + +import ( + "context" + "io" + + "github.com/ipfs/boxo/coreiface/options" + + "github.com/libp2p/go-libp2p/core/peer" +) + +// PubSubSubscription is an active PubSub subscription +type PubSubSubscription interface { + io.Closer + + // Next return the next incoming message + Next(context.Context) (PubSubMessage, error) +} + +// PubSubMessage is a single PubSub message +type PubSubMessage interface { + // From returns id of a peer from which the message has arrived + From() peer.ID + + // Data returns the message body + Data() []byte + + // Seq returns message identifier + Seq() []byte + + // Topics returns list of topics this message was set to + Topics() []string +} + +// PubSubAPI specifies the interface to PubSub +type PubSubAPI interface { + // Ls lists subscribed topics by name + Ls(context.Context) ([]string, error) + + // Peers list peers we are currently pubsubbing with + Peers(context.Context, ...options.PubSubPeersOption) ([]peer.ID, error) + + // Publish a message to a given pubsub topic + Publish(context.Context, string, []byte) error + + // Subscribe to messages on a given topic + Subscribe(context.Context, string, ...options.PubSubSubscribeOption) (PubSubSubscription, error) +} diff --git a/coreiface/routing.go b/coreiface/routing.go new file mode 100644 index 0000000000..a28ceb9e77 --- /dev/null +++ b/coreiface/routing.go @@ -0,0 +1,14 @@ +package iface + +import ( + "context" +) + +// RoutingAPI specifies the interface to the routing layer. +type RoutingAPI interface { + // Get retrieves the best value for a given key + Get(context.Context, string) ([]byte, error) + + // Put sets a value for a given key + Put(ctx context.Context, key string, value []byte) error +} diff --git a/coreiface/swarm.go b/coreiface/swarm.go new file mode 100644 index 0000000000..9aa5466ba4 --- /dev/null +++ b/coreiface/swarm.go @@ -0,0 +1,57 @@ +package iface + +import ( + "context" + "errors" + "time" + + "github.com/libp2p/go-libp2p/core/network" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/libp2p/go-libp2p/core/protocol" + + ma "github.com/multiformats/go-multiaddr" +) + +var ( + ErrNotConnected = errors.New("not connected") + ErrConnNotFound = errors.New("conn not found") +) + +// ConnectionInfo contains information about a peer +type ConnectionInfo interface { + // ID returns PeerID + ID() peer.ID + + // Address returns the multiaddress via which we are connected with the peer + Address() ma.Multiaddr + + // Direction returns which way the connection was established + Direction() network.Direction + + // Latency returns last known round trip time to the peer + Latency() (time.Duration, error) + + // Streams returns list of streams established with the peer + Streams() ([]protocol.ID, error) +} + +// SwarmAPI specifies the interface to libp2p swarm +type SwarmAPI interface { + // Connect to a given peer + Connect(context.Context, peer.AddrInfo) error + + // Disconnect from a given address + Disconnect(context.Context, ma.Multiaddr) error + + // Peers returns the list of peers we are connected to + Peers(context.Context) ([]ConnectionInfo, error) + + // KnownAddrs returns the list of all addresses this node is aware of + KnownAddrs(context.Context) (map[peer.ID][]ma.Multiaddr, error) + + // LocalAddrs returns the list of announced listening addresses + LocalAddrs(context.Context) ([]ma.Multiaddr, error) + + // ListenAddrs returns the list of all listening addresses + ListenAddrs(context.Context) ([]ma.Multiaddr, error) +} diff --git a/coreiface/tests/api.go b/coreiface/tests/api.go new file mode 100644 index 0000000000..497ef9d276 --- /dev/null +++ b/coreiface/tests/api.go @@ -0,0 +1,97 @@ +package tests + +import ( + "context" + "errors" + "testing" + "time" + + coreiface "github.com/ipfs/boxo/coreiface" +) + +var errAPINotImplemented = errors.New("api not implemented") + +func (tp *TestSuite) makeAPI(ctx context.Context) (coreiface.CoreAPI, error) { + api, err := tp.MakeAPISwarm(ctx, false, 1) + if err != nil { + return nil, err + } + + return api[0], nil +} + +type Provider interface { + // Make creates n nodes. fullIdentity set to false can be ignored + MakeAPISwarm(ctx context.Context, fullIdentity bool, n int) ([]coreiface.CoreAPI, error) +} + +func (tp *TestSuite) MakeAPISwarm(ctx context.Context, fullIdentity bool, n int) ([]coreiface.CoreAPI, error) { + if tp.apis != nil { + tp.apis <- 1 + go func() { + <-ctx.Done() + tp.apis <- -1 + }() + } + + return tp.Provider.MakeAPISwarm(ctx, fullIdentity, n) +} + +type TestSuite struct { + Provider + + apis chan int +} + +func TestApi(p Provider) func(t *testing.T) { + running := 1 + apis := make(chan int) + zeroRunning := make(chan struct{}) + go func() { + for i := range apis { + running += i + if running < 1 { + close(zeroRunning) + return + } + } + }() + + tp := &TestSuite{Provider: p, apis: apis} + + return func(t *testing.T) { + t.Run("Block", tp.TestBlock) + t.Run("Dag", tp.TestDag) + t.Run("Dht", tp.TestDht) + t.Run("Key", tp.TestKey) + t.Run("Name", tp.TestName) + t.Run("Object", tp.TestObject) + t.Run("Path", tp.TestPath) + t.Run("Pin", tp.TestPin) + t.Run("PubSub", tp.TestPubSub) + t.Run("Routing", tp.TestRouting) + t.Run("Unixfs", tp.TestUnixfs) + + apis <- -1 + t.Run("TestsCancelCtx", func(t *testing.T) { + select { + case <-zeroRunning: + case <-time.After(time.Second): + t.Errorf("%d test swarms(s) not closed", running) + } + }) + } +} + +func (tp *TestSuite) hasApi(t *testing.T, tf func(coreiface.CoreAPI) error) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + api, err := tp.makeAPI(ctx) + if err != nil { + t.Fatal(err) + } + + if err := tf(api); err != nil { + t.Fatal(api) + } +} diff --git a/coreiface/tests/block.go b/coreiface/tests/block.go new file mode 100644 index 0000000000..c884f3e823 --- /dev/null +++ b/coreiface/tests/block.go @@ -0,0 +1,354 @@ +package tests + +import ( + "bytes" + "context" + "io" + "strings" + "testing" + + coreiface "github.com/ipfs/boxo/coreiface" + opt "github.com/ipfs/boxo/coreiface/options" + "github.com/ipfs/boxo/coreiface/path" + ipld "github.com/ipfs/go-ipld-format" + + mh "github.com/multiformats/go-multihash" +) + +var ( + pbCidV0 = "QmZULkCELmmk5XNfCgTnCyFgAVxBRBXyDHGGMVoLFLiXEN" // dag-pb + pbCid = "bafybeiffndsajwhk3lwjewwdxqntmjm4b5wxaaanokonsggenkbw6slwk4" // dag-pb + rawCid = "bafkreiffndsajwhk3lwjewwdxqntmjm4b5wxaaanokonsggenkbw6slwk4" // raw bytes + cborCid = "bafyreicnga62zhxnmnlt6ymq5hcbsg7gdhqdu6z4ehu3wpjhvqnflfy6nm" // dag-cbor + cborKCid = "bafyr2qgsohbwdlk7ajmmbb4lhoytmest4wdbe5xnexfvtxeatuyqqmwv3fgxp3pmhpc27gwey2cct56gloqefoqwcf3yqiqzsaqb7p4jefhcw" // dag-cbor keccak-512 +) + +// dag-pb +func pbBlock() io.Reader { + return bytes.NewReader([]byte{10, 12, 8, 2, 18, 6, 104, 101, 108, 108, 111, 10, 24, 6}) +} + +// dag-cbor +func cborBlock() io.Reader { + return bytes.NewReader([]byte{101, 72, 101, 108, 108, 111}) +} + +func (tp *TestSuite) TestBlock(t *testing.T) { + tp.hasApi(t, func(api coreiface.CoreAPI) error { + if api.Block() == nil { + return errAPINotImplemented + } + return nil + }) + + t.Run("TestBlockPut (get raw CIDv1)", tp.TestBlockPut) + t.Run("TestBlockPutCidCodec: dag-pb", tp.TestBlockPutCidCodecDagPb) + t.Run("TestBlockPutCidCodec: dag-cbor", tp.TestBlockPutCidCodecDagCbor) + t.Run("TestBlockPutFormat (legacy): cbor → dag-cbor", tp.TestBlockPutFormatDagCbor) + t.Run("TestBlockPutFormat (legacy): protobuf → dag-pb", tp.TestBlockPutFormatDagPb) + t.Run("TestBlockPutFormat (legacy): v0 → CIDv0", tp.TestBlockPutFormatV0) + t.Run("TestBlockPutHash", tp.TestBlockPutHash) + t.Run("TestBlockGet", tp.TestBlockGet) + t.Run("TestBlockRm", tp.TestBlockRm) + t.Run("TestBlockStat", tp.TestBlockStat) + t.Run("TestBlockPin", tp.TestBlockPin) +} + +// when no opts are passed, produced CID has 'raw' codec +func (tp *TestSuite) TestBlockPut(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + api, err := tp.makeAPI(ctx) + if err != nil { + t.Fatal(err) + } + + res, err := api.Block().Put(ctx, pbBlock()) + if err != nil { + t.Fatal(err) + } + + if res.Path().Cid().String() != rawCid { + t.Errorf("got wrong cid: %s", res.Path().Cid().String()) + } +} + +// Format is deprecated, it used invalid codec names. +// Confirm 'cbor' gets fixed to 'dag-cbor' +func (tp *TestSuite) TestBlockPutFormatDagCbor(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + api, err := tp.makeAPI(ctx) + if err != nil { + t.Fatal(err) + } + + res, err := api.Block().Put(ctx, cborBlock(), opt.Block.Format("cbor")) + if err != nil { + t.Fatal(err) + } + + if res.Path().Cid().String() != cborCid { + t.Errorf("got wrong cid: %s", res.Path().Cid().String()) + } +} + +// Format is deprecated, it used invalid codec names. +// Confirm 'protobuf' got fixed to 'dag-pb' +func (tp *TestSuite) TestBlockPutFormatDagPb(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + api, err := tp.makeAPI(ctx) + if err != nil { + t.Fatal(err) + } + + res, err := api.Block().Put(ctx, pbBlock(), opt.Block.Format("protobuf")) + if err != nil { + t.Fatal(err) + } + + if res.Path().Cid().String() != pbCid { + t.Errorf("got wrong cid: %s", res.Path().Cid().String()) + } +} + +// Format is deprecated, it used invalid codec names. +// Confirm fake codec 'v0' got fixed to CIDv0 (with implicit dag-pb codec) +func (tp *TestSuite) TestBlockPutFormatV0(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + api, err := tp.makeAPI(ctx) + if err != nil { + t.Fatal(err) + } + + res, err := api.Block().Put(ctx, pbBlock(), opt.Block.Format("v0")) + if err != nil { + t.Fatal(err) + } + + if res.Path().Cid().String() != pbCidV0 { + t.Errorf("got wrong cid: %s", res.Path().Cid().String()) + } +} + +func (tp *TestSuite) TestBlockPutCidCodecDagCbor(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + api, err := tp.makeAPI(ctx) + if err != nil { + t.Fatal(err) + } + + res, err := api.Block().Put(ctx, cborBlock(), opt.Block.CidCodec("dag-cbor")) + if err != nil { + t.Fatal(err) + } + + if res.Path().Cid().String() != cborCid { + t.Errorf("got wrong cid: %s", res.Path().Cid().String()) + } +} + +func (tp *TestSuite) TestBlockPutCidCodecDagPb(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + api, err := tp.makeAPI(ctx) + if err != nil { + t.Fatal(err) + } + + res, err := api.Block().Put(ctx, pbBlock(), opt.Block.CidCodec("dag-pb")) + if err != nil { + t.Fatal(err) + } + + if res.Path().Cid().String() != pbCid { + t.Errorf("got wrong cid: %s", res.Path().Cid().String()) + } +} + +func (tp *TestSuite) TestBlockPutHash(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + api, err := tp.makeAPI(ctx) + if err != nil { + t.Fatal(err) + } + + res, err := api.Block().Put( + ctx, + cborBlock(), + opt.Block.Hash(mh.KECCAK_512, -1), + opt.Block.CidCodec("dag-cbor"), + ) + if err != nil { + t.Fatal(err) + } + + if res.Path().Cid().String() != cborKCid { + t.Errorf("got wrong cid: %s", res.Path().Cid().String()) + } +} + +func (tp *TestSuite) TestBlockGet(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + api, err := tp.makeAPI(ctx) + if err != nil { + t.Fatal(err) + } + + res, err := api.Block().Put(ctx, strings.NewReader(`Hello`), opt.Block.Format("raw")) + if err != nil { + t.Fatal(err) + } + + r, err := api.Block().Get(ctx, res.Path()) + if err != nil { + t.Fatal(err) + } + + d, err := io.ReadAll(r) + if err != nil { + t.Fatal(err) + } + + if string(d) != "Hello" { + t.Error("didn't get correct data back") + } + + p := path.New("/ipfs/" + res.Path().Cid().String()) + + rp, err := api.ResolvePath(ctx, p) + if err != nil { + t.Fatal(err) + } + if rp.Cid().String() != res.Path().Cid().String() { + t.Error("paths didn't match") + } +} + +func (tp *TestSuite) TestBlockRm(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + api, err := tp.makeAPI(ctx) + if err != nil { + t.Fatal(err) + } + + res, err := api.Block().Put(ctx, strings.NewReader(`Hello`), opt.Block.Format("raw")) + if err != nil { + t.Fatal(err) + } + + r, err := api.Block().Get(ctx, res.Path()) + if err != nil { + t.Fatal(err) + } + + d, err := io.ReadAll(r) + if err != nil { + t.Fatal(err) + } + + if string(d) != "Hello" { + t.Error("didn't get correct data back") + } + + err = api.Block().Rm(ctx, res.Path()) + if err != nil { + t.Fatal(err) + } + + _, err = api.Block().Get(ctx, res.Path()) + if err == nil { + t.Fatal("expected err to exist") + } + if !ipld.IsNotFound(err) { + t.Errorf("unexpected error; %s", err.Error()) + } + + err = api.Block().Rm(ctx, res.Path()) + if err == nil { + t.Fatal("expected err to exist") + } + if !ipld.IsNotFound(err) { + t.Errorf("unexpected error; %s", err.Error()) + } + + err = api.Block().Rm(ctx, res.Path(), opt.Block.Force(true)) + if err != nil { + t.Fatal(err) + } +} + +func (tp *TestSuite) TestBlockStat(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + api, err := tp.makeAPI(ctx) + if err != nil { + t.Fatal(err) + } + + res, err := api.Block().Put(ctx, strings.NewReader(`Hello`), opt.Block.Format("raw")) + if err != nil { + t.Fatal(err) + } + + stat, err := api.Block().Stat(ctx, res.Path()) + if err != nil { + t.Fatal(err) + } + + if stat.Path().String() != res.Path().String() { + t.Error("paths don't match") + } + + if stat.Size() != len("Hello") { + t.Error("length doesn't match") + } +} + +func (tp *TestSuite) TestBlockPin(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + api, err := tp.makeAPI(ctx) + if err != nil { + t.Fatal(err) + } + + _, err = api.Block().Put(ctx, strings.NewReader(`Hello`), opt.Block.Format("raw")) + if err != nil { + t.Fatal(err) + } + + if pins, err := api.Pin().Ls(ctx); err != nil || len(pins) != 0 { + t.Fatal("expected 0 pins") + } + + res, err := api.Block().Put( + ctx, + strings.NewReader(`Hello`), + opt.Block.Pin(true), + opt.Block.Format("raw"), + ) + if err != nil { + t.Fatal(err) + } + + pins, err := accPins(api.Pin().Ls(ctx)) + if err != nil { + t.Fatal(err) + } + if len(pins) != 1 { + t.Fatal("expected 1 pin") + } + if pins[0].Type() != "recursive" { + t.Error("expected a recursive pin") + } + if pins[0].Path().String() != res.Path().String() { + t.Error("pin path didn't match") + } +} diff --git a/coreiface/tests/dag.go b/coreiface/tests/dag.go new file mode 100644 index 0000000000..b4118e2cc9 --- /dev/null +++ b/coreiface/tests/dag.go @@ -0,0 +1,200 @@ +package tests + +import ( + "context" + "math" + gopath "path" + "strings" + "testing" + + path "github.com/ipfs/boxo/coreiface/path" + + coreiface "github.com/ipfs/boxo/coreiface" + + ipldcbor "github.com/ipfs/go-ipld-cbor" + ipld "github.com/ipfs/go-ipld-format" + mh "github.com/multiformats/go-multihash" +) + +func (tp *TestSuite) TestDag(t *testing.T) { + tp.hasApi(t, func(api coreiface.CoreAPI) error { + if api.Dag() == nil { + return errAPINotImplemented + } + return nil + }) + + t.Run("TestPut", tp.TestPut) + t.Run("TestPutWithHash", tp.TestPutWithHash) + t.Run("TestPath", tp.TestDagPath) + t.Run("TestTree", tp.TestTree) + t.Run("TestBatch", tp.TestBatch) +} + +var ( + treeExpected = map[string]struct{}{ + "a": {}, + "b": {}, + "c": {}, + "c/d": {}, + "c/e": {}, + } +) + +func (tp *TestSuite) TestPut(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + api, err := tp.makeAPI(ctx) + if err != nil { + t.Fatal(err) + } + + nd, err := ipldcbor.FromJSON(strings.NewReader(`"Hello"`), math.MaxUint64, -1) + if err != nil { + t.Fatal(err) + } + + err = api.Dag().Add(ctx, nd) + if err != nil { + t.Fatal(err) + } + + if nd.Cid().String() != "bafyreicnga62zhxnmnlt6ymq5hcbsg7gdhqdu6z4ehu3wpjhvqnflfy6nm" { + t.Errorf("got wrong cid: %s", nd.Cid().String()) + } +} + +func (tp *TestSuite) TestPutWithHash(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + api, err := tp.makeAPI(ctx) + if err != nil { + t.Fatal(err) + } + + nd, err := ipldcbor.FromJSON(strings.NewReader(`"Hello"`), mh.SHA3_256, -1) + if err != nil { + t.Fatal(err) + } + + err = api.Dag().Add(ctx, nd) + if err != nil { + t.Fatal(err) + } + + if nd.Cid().String() != "bafyrmifu7haikttpqqgc5ewvmp76z3z4ebp7h2ph4memw7dq4nt6btmxny" { + t.Errorf("got wrong cid: %s", nd.Cid().String()) + } +} + +func (tp *TestSuite) TestDagPath(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + api, err := tp.makeAPI(ctx) + if err != nil { + t.Fatal(err) + } + + snd, err := ipldcbor.FromJSON(strings.NewReader(`"foo"`), math.MaxUint64, -1) + if err != nil { + t.Fatal(err) + } + + err = api.Dag().Add(ctx, snd) + if err != nil { + t.Fatal(err) + } + + nd, err := ipldcbor.FromJSON(strings.NewReader(`{"lnk": {"/": "`+snd.Cid().String()+`"}}`), math.MaxUint64, -1) + if err != nil { + t.Fatal(err) + } + + err = api.Dag().Add(ctx, nd) + if err != nil { + t.Fatal(err) + } + + p := path.New(gopath.Join(nd.Cid().String(), "lnk")) + + rp, err := api.ResolvePath(ctx, p) + if err != nil { + t.Fatal(err) + } + + ndd, err := api.Dag().Get(ctx, rp.Cid()) + if err != nil { + t.Fatal(err) + } + + if ndd.Cid().String() != snd.Cid().String() { + t.Errorf("got unexpected cid %s, expected %s", ndd.Cid().String(), snd.Cid().String()) + } +} + +func (tp *TestSuite) TestTree(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + api, err := tp.makeAPI(ctx) + if err != nil { + t.Fatal(err) + } + + nd, err := ipldcbor.FromJSON(strings.NewReader(`{"a": 123, "b": "foo", "c": {"d": 321, "e": 111}}`), math.MaxUint64, -1) + if err != nil { + t.Fatal(err) + } + + err = api.Dag().Add(ctx, nd) + if err != nil { + t.Fatal(err) + } + + res, err := api.Dag().Get(ctx, nd.Cid()) + if err != nil { + t.Fatal(err) + } + + lst := res.Tree("", -1) + if len(lst) != len(treeExpected) { + t.Errorf("tree length of %d doesn't match expected %d", len(lst), len(treeExpected)) + } + + for _, ent := range lst { + if _, ok := treeExpected[ent]; !ok { + t.Errorf("unexpected tree entry %s", ent) + } + } +} + +func (tp *TestSuite) TestBatch(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + api, err := tp.makeAPI(ctx) + if err != nil { + t.Fatal(err) + } + + nd, err := ipldcbor.FromJSON(strings.NewReader(`"Hello"`), math.MaxUint64, -1) + if err != nil { + t.Fatal(err) + } + + if nd.Cid().String() != "bafyreicnga62zhxnmnlt6ymq5hcbsg7gdhqdu6z4ehu3wpjhvqnflfy6nm" { + t.Errorf("got wrong cid: %s", nd.Cid().String()) + } + + _, err = api.Dag().Get(ctx, nd.Cid()) + if err == nil || !strings.Contains(err.Error(), "not found") { + t.Fatal(err) + } + + if err := api.Dag().AddMany(ctx, []ipld.Node{nd}); err != nil { + t.Fatal(err) + } + + _, err = api.Dag().Get(ctx, nd.Cid()) + if err != nil { + t.Fatal(err) + } +} diff --git a/coreiface/tests/dht.go b/coreiface/tests/dht.go new file mode 100644 index 0000000000..fb3f6d1a00 --- /dev/null +++ b/coreiface/tests/dht.go @@ -0,0 +1,166 @@ +package tests + +import ( + "context" + "io" + "testing" + "time" + + iface "github.com/ipfs/boxo/coreiface" + "github.com/ipfs/boxo/coreiface/options" +) + +func (tp *TestSuite) TestDht(t *testing.T) { + tp.hasApi(t, func(api iface.CoreAPI) error { + if api.Dht() == nil { + return errAPINotImplemented + } + return nil + }) + + t.Run("TestDhtFindPeer", tp.TestDhtFindPeer) + t.Run("TestDhtFindProviders", tp.TestDhtFindProviders) + t.Run("TestDhtProvide", tp.TestDhtProvide) +} + +func (tp *TestSuite) TestDhtFindPeer(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + apis, err := tp.MakeAPISwarm(ctx, true, 5) + if err != nil { + t.Fatal(err) + } + + self0, err := apis[0].Key().Self(ctx) + if err != nil { + t.Fatal(err) + } + + laddrs0, err := apis[0].Swarm().LocalAddrs(ctx) + if err != nil { + t.Fatal(err) + } + if len(laddrs0) != 1 { + t.Fatal("unexpected number of local addrs") + } + + time.Sleep(3 * time.Second) + + pi, err := apis[2].Dht().FindPeer(ctx, self0.ID()) + if err != nil { + t.Fatal(err) + } + + if pi.Addrs[0].String() != laddrs0[0].String() { + t.Errorf("got unexpected address from FindPeer: %s", pi.Addrs[0].String()) + } + + self2, err := apis[2].Key().Self(ctx) + if err != nil { + t.Fatal(err) + } + + pi, err = apis[1].Dht().FindPeer(ctx, self2.ID()) + if err != nil { + t.Fatal(err) + } + + laddrs2, err := apis[2].Swarm().LocalAddrs(ctx) + if err != nil { + t.Fatal(err) + } + if len(laddrs2) != 1 { + t.Fatal("unexpected number of local addrs") + } + + if pi.Addrs[0].String() != laddrs2[0].String() { + t.Errorf("got unexpected address from FindPeer: %s", pi.Addrs[0].String()) + } +} + +func (tp *TestSuite) TestDhtFindProviders(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + apis, err := tp.MakeAPISwarm(ctx, true, 5) + if err != nil { + t.Fatal(err) + } + + p, err := addTestObject(ctx, apis[0]) + if err != nil { + t.Fatal(err) + } + + time.Sleep(3 * time.Second) + + out, err := apis[2].Dht().FindProviders(ctx, p, options.Dht.NumProviders(1)) + if err != nil { + t.Fatal(err) + } + + provider := <-out + + self0, err := apis[0].Key().Self(ctx) + if err != nil { + t.Fatal(err) + } + + if provider.ID.String() != self0.ID().String() { + t.Errorf("got wrong provider: %s != %s", provider.ID.String(), self0.ID().String()) + } +} + +func (tp *TestSuite) TestDhtProvide(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + apis, err := tp.MakeAPISwarm(ctx, true, 5) + if err != nil { + t.Fatal(err) + } + + off0, err := apis[0].WithOptions(options.Api.Offline(true)) + if err != nil { + t.Fatal(err) + } + + s, err := off0.Block().Put(ctx, &io.LimitedReader{R: rnd, N: 4092}) + if err != nil { + t.Fatal(err) + } + + p := s.Path() + + time.Sleep(3 * time.Second) + + out, err := apis[2].Dht().FindProviders(ctx, p, options.Dht.NumProviders(1)) + if err != nil { + t.Fatal(err) + } + + _, ok := <-out + + if ok { + t.Fatal("did not expect to find any providers") + } + + self0, err := apis[0].Key().Self(ctx) + if err != nil { + t.Fatal(err) + } + + err = apis[0].Dht().Provide(ctx, p) + if err != nil { + t.Fatal(err) + } + + out, err = apis[2].Dht().FindProviders(ctx, p, options.Dht.NumProviders(1)) + if err != nil { + t.Fatal(err) + } + + provider := <-out + + if provider.ID.String() != self0.ID().String() { + t.Errorf("got wrong provider: %s != %s", provider.ID.String(), self0.ID().String()) + } +} diff --git a/coreiface/tests/key.go b/coreiface/tests/key.go new file mode 100644 index 0000000000..3a38c07ae5 --- /dev/null +++ b/coreiface/tests/key.go @@ -0,0 +1,538 @@ +package tests + +import ( + "context" + "strings" + "testing" + + iface "github.com/ipfs/boxo/coreiface" + opt "github.com/ipfs/boxo/coreiface/options" + "github.com/ipfs/go-cid" + mbase "github.com/multiformats/go-multibase" +) + +func (tp *TestSuite) TestKey(t *testing.T) { + tp.hasApi(t, func(api iface.CoreAPI) error { + if api.Key() == nil { + return errAPINotImplemented + } + return nil + }) + + t.Run("TestListSelf", tp.TestListSelf) + t.Run("TestRenameSelf", tp.TestRenameSelf) + t.Run("TestRemoveSelf", tp.TestRemoveSelf) + t.Run("TestGenerate", tp.TestGenerate) + t.Run("TestGenerateSize", tp.TestGenerateSize) + t.Run("TestGenerateType", tp.TestGenerateType) + t.Run("TestGenerateExisting", tp.TestGenerateExisting) + t.Run("TestList", tp.TestList) + t.Run("TestRename", tp.TestRename) + t.Run("TestRenameToSelf", tp.TestRenameToSelf) + t.Run("TestRenameToSelfForce", tp.TestRenameToSelfForce) + t.Run("TestRenameOverwriteNoForce", tp.TestRenameOverwriteNoForce) + t.Run("TestRenameOverwrite", tp.TestRenameOverwrite) + t.Run("TestRenameSameNameNoForce", tp.TestRenameSameNameNoForce) + t.Run("TestRenameSameName", tp.TestRenameSameName) + t.Run("TestRemove", tp.TestRemove) +} + +func (tp *TestSuite) TestListSelf(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + api, err := tp.makeAPI(ctx) + if err != nil { + t.Fatal(err) + return + } + + self, err := api.Key().Self(ctx) + if err != nil { + t.Fatal(err) + } + + keys, err := api.Key().List(ctx) + if err != nil { + t.Fatalf("failed to list keys: %s", err) + return + } + + if len(keys) != 1 { + t.Fatalf("there should be 1 key (self), got %d", len(keys)) + return + } + + if keys[0].Name() != "self" { + t.Errorf("expected the key to be called 'self', got '%s'", keys[0].Name()) + } + + if keys[0].Path().String() != "/ipns/"+iface.FormatKeyID(self.ID()) { + t.Errorf("expected the key to have path '/ipns/%s', got '%s'", iface.FormatKeyID(self.ID()), keys[0].Path().String()) + } +} + +func (tp *TestSuite) TestRenameSelf(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + api, err := tp.makeAPI(ctx) + if err != nil { + t.Fatal(err) + return + } + + _, _, err = api.Key().Rename(ctx, "self", "foo") + if err == nil { + t.Error("expected error to not be nil") + } else { + if !strings.Contains(err.Error(), "cannot rename key with name 'self'") { + t.Fatalf("expected error 'cannot rename key with name 'self'', got '%s'", err.Error()) + } + } + + _, _, err = api.Key().Rename(ctx, "self", "foo", opt.Key.Force(true)) + if err == nil { + t.Error("expected error to not be nil") + } else { + if !strings.Contains(err.Error(), "cannot rename key with name 'self'") { + t.Fatalf("expected error 'cannot rename key with name 'self'', got '%s'", err.Error()) + } + } +} + +func (tp *TestSuite) TestRemoveSelf(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + api, err := tp.makeAPI(ctx) + if err != nil { + t.Fatal(err) + return + } + + _, err = api.Key().Remove(ctx, "self") + if err == nil { + t.Error("expected error to not be nil") + } else { + if !strings.Contains(err.Error(), "cannot remove key with name 'self'") { + t.Fatalf("expected error 'cannot remove key with name 'self'', got '%s'", err.Error()) + } + } +} + +func (tp *TestSuite) TestGenerate(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + api, err := tp.makeAPI(ctx) + if err != nil { + t.Fatal(err) + } + + k, err := api.Key().Generate(ctx, "foo") + if err != nil { + t.Fatal(err) + return + } + + if k.Name() != "foo" { + t.Errorf("expected the key to be called 'foo', got '%s'", k.Name()) + } + + verifyIPNSPath(t, k.Path().String()) +} + +func verifyIPNSPath(t *testing.T, p string) bool { + t.Helper() + if !strings.HasPrefix(p, "/ipns/") { + t.Errorf("path %q does not look like an IPNS path", p) + return false + } + k := p[len("/ipns/"):] + c, err := cid.Decode(k) + if err != nil { + t.Errorf("failed to decode IPNS key %q (%v)", k, err) + return false + } + b36, err := c.StringOfBase(mbase.Base36) + if err != nil { + t.Fatalf("cid cannot format itself in b36") + return false + } + if b36 != k { + t.Errorf("IPNS key is not base36") + } + return true +} + +func (tp *TestSuite) TestGenerateSize(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + api, err := tp.makeAPI(ctx) + if err != nil { + t.Fatal(err) + } + + k, err := api.Key().Generate(ctx, "foo", opt.Key.Size(2048)) + if err != nil { + t.Fatal(err) + return + } + + if k.Name() != "foo" { + t.Errorf("expected the key to be called 'foo', got '%s'", k.Name()) + } + + verifyIPNSPath(t, k.Path().String()) +} + +func (tp *TestSuite) TestGenerateType(t *testing.T) { + t.Skip("disabled until libp2p/specs#111 is fixed") + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + api, err := tp.makeAPI(ctx) + if err != nil { + t.Fatal(err) + } + + k, err := api.Key().Generate(ctx, "bar", opt.Key.Type(opt.Ed25519Key)) + if err != nil { + t.Fatal(err) + return + } + + if k.Name() != "bar" { + t.Errorf("expected the key to be called 'foo', got '%s'", k.Name()) + } + + // Expected to be an inlined identity hash. + if !strings.HasPrefix(k.Path().String(), "/ipns/12") { + t.Errorf("expected the key to be prefixed with '/ipns/12', got '%s'", k.Path().String()) + } +} + +func (tp *TestSuite) TestGenerateExisting(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + api, err := tp.makeAPI(ctx) + if err != nil { + t.Fatal(err) + } + + _, err = api.Key().Generate(ctx, "foo") + if err != nil { + t.Fatal(err) + return + } + + _, err = api.Key().Generate(ctx, "foo") + if err == nil { + t.Error("expected error to not be nil") + } else { + if !strings.Contains(err.Error(), "key with name 'foo' already exists") { + t.Fatalf("expected error 'key with name 'foo' already exists', got '%s'", err.Error()) + } + } + + _, err = api.Key().Generate(ctx, "self") + if err == nil { + t.Error("expected error to not be nil") + } else { + if !strings.Contains(err.Error(), "cannot create key with name 'self'") { + t.Fatalf("expected error 'cannot create key with name 'self'', got '%s'", err.Error()) + } + } +} + +func (tp *TestSuite) TestList(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + api, err := tp.makeAPI(ctx) + if err != nil { + t.Fatal(err) + } + + _, err = api.Key().Generate(ctx, "foo") + if err != nil { + t.Fatal(err) + return + } + + l, err := api.Key().List(ctx) + if err != nil { + t.Fatal(err) + return + } + + if len(l) != 2 { + t.Fatalf("expected to get 2 keys, got %d", len(l)) + return + } + + if l[0].Name() != "self" { + t.Fatalf("expected key 0 to be called 'self', got '%s'", l[0].Name()) + return + } + + if l[1].Name() != "foo" { + t.Fatalf("expected key 1 to be called 'foo', got '%s'", l[1].Name()) + return + } + + verifyIPNSPath(t, l[0].Path().String()) + verifyIPNSPath(t, l[1].Path().String()) +} + +func (tp *TestSuite) TestRename(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + api, err := tp.makeAPI(ctx) + if err != nil { + t.Fatal(err) + } + + _, err = api.Key().Generate(ctx, "foo") + if err != nil { + t.Fatal(err) + return + } + + k, overwrote, err := api.Key().Rename(ctx, "foo", "bar") + if err != nil { + t.Fatal(err) + return + } + + if overwrote { + t.Error("overwrote should be false") + } + + if k.Name() != "bar" { + t.Errorf("returned key should be called 'bar', got '%s'", k.Name()) + } +} + +func (tp *TestSuite) TestRenameToSelf(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + api, err := tp.makeAPI(ctx) + if err != nil { + t.Fatal(err) + } + + _, err = api.Key().Generate(ctx, "foo") + if err != nil { + t.Fatal(err) + return + } + + _, _, err = api.Key().Rename(ctx, "foo", "self") + if err == nil { + t.Error("expected error to not be nil") + } else { + if !strings.Contains(err.Error(), "cannot overwrite key with name 'self'") { + t.Fatalf("expected error 'cannot overwrite key with name 'self'', got '%s'", err.Error()) + } + } +} + +func (tp *TestSuite) TestRenameToSelfForce(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + api, err := tp.makeAPI(ctx) + if err != nil { + t.Fatal(err) + } + + _, err = api.Key().Generate(ctx, "foo") + if err != nil { + t.Fatal(err) + return + } + + _, _, err = api.Key().Rename(ctx, "foo", "self", opt.Key.Force(true)) + if err == nil { + t.Error("expected error to not be nil") + } else { + if !strings.Contains(err.Error(), "cannot overwrite key with name 'self'") { + t.Fatalf("expected error 'cannot overwrite key with name 'self'', got '%s'", err.Error()) + } + } +} + +func (tp *TestSuite) TestRenameOverwriteNoForce(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + api, err := tp.makeAPI(ctx) + if err != nil { + t.Fatal(err) + } + + _, err = api.Key().Generate(ctx, "foo") + if err != nil { + t.Fatal(err) + return + } + + _, err = api.Key().Generate(ctx, "bar") + if err != nil { + t.Fatal(err) + return + } + + _, _, err = api.Key().Rename(ctx, "foo", "bar") + if err == nil { + t.Error("expected error to not be nil") + } else { + if !strings.Contains(err.Error(), "key by that name already exists, refusing to overwrite") { + t.Fatalf("expected error 'key by that name already exists, refusing to overwrite', got '%s'", err.Error()) + } + } +} + +func (tp *TestSuite) TestRenameOverwrite(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + api, err := tp.makeAPI(ctx) + if err != nil { + t.Fatal(err) + } + + kfoo, err := api.Key().Generate(ctx, "foo") + if err != nil { + t.Fatal(err) + return + } + + _, err = api.Key().Generate(ctx, "bar") + if err != nil { + t.Fatal(err) + return + } + + k, overwrote, err := api.Key().Rename(ctx, "foo", "bar", opt.Key.Force(true)) + if err != nil { + t.Fatal(err) + return + } + + if !overwrote { + t.Error("overwrote should be true") + } + + if k.Name() != "bar" { + t.Errorf("returned key should be called 'bar', got '%s'", k.Name()) + } + + if k.Path().String() != kfoo.Path().String() { + t.Errorf("k and kfoo should have equal paths, '%s'!='%s'", k.Path().String(), kfoo.Path().String()) + } +} + +func (tp *TestSuite) TestRenameSameNameNoForce(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + api, err := tp.makeAPI(ctx) + if err != nil { + t.Fatal(err) + } + + _, err = api.Key().Generate(ctx, "foo") + if err != nil { + t.Fatal(err) + return + } + + k, overwrote, err := api.Key().Rename(ctx, "foo", "foo") + if err != nil { + t.Fatal(err) + return + } + + if overwrote { + t.Error("overwrote should be false") + } + + if k.Name() != "foo" { + t.Errorf("returned key should be called 'foo', got '%s'", k.Name()) + } +} + +func (tp *TestSuite) TestRenameSameName(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + api, err := tp.makeAPI(ctx) + if err != nil { + t.Fatal(err) + } + + _, err = api.Key().Generate(ctx, "foo") + if err != nil { + t.Fatal(err) + return + } + + k, overwrote, err := api.Key().Rename(ctx, "foo", "foo", opt.Key.Force(true)) + if err != nil { + t.Fatal(err) + return + } + + if overwrote { + t.Error("overwrote should be false") + } + + if k.Name() != "foo" { + t.Errorf("returned key should be called 'foo', got '%s'", k.Name()) + } +} + +func (tp *TestSuite) TestRemove(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + api, err := tp.makeAPI(ctx) + if err != nil { + t.Fatal(err) + } + + k, err := api.Key().Generate(ctx, "foo") + if err != nil { + t.Fatal(err) + return + } + + l, err := api.Key().List(ctx) + if err != nil { + t.Fatal(err) + return + } + + if len(l) != 2 { + t.Fatalf("expected to get 2 keys, got %d", len(l)) + return + } + + p, err := api.Key().Remove(ctx, "foo") + if err != nil { + t.Fatal(err) + return + } + + if k.Path().String() != p.Path().String() { + t.Errorf("k and p should have equal paths, '%s'!='%s'", k.Path().String(), p.Path().String()) + } + + l, err = api.Key().List(ctx) + if err != nil { + t.Fatal(err) + return + } + + if len(l) != 1 { + t.Fatalf("expected to get 1 key, got %d", len(l)) + return + } + + if l[0].Name() != "self" { + t.Errorf("expected the key to be called 'self', got '%s'", l[0].Name()) + } +} diff --git a/coreiface/tests/name.go b/coreiface/tests/name.go new file mode 100644 index 0000000000..a67876cbaf --- /dev/null +++ b/coreiface/tests/name.go @@ -0,0 +1,274 @@ +package tests + +import ( + "context" + "io" + "math/rand" + gopath "path" + "testing" + "time" + + path "github.com/ipfs/boxo/coreiface/path" + + "github.com/ipfs/boxo/files" + + coreiface "github.com/ipfs/boxo/coreiface" + opt "github.com/ipfs/boxo/coreiface/options" +) + +func (tp *TestSuite) TestName(t *testing.T) { + tp.hasApi(t, func(api coreiface.CoreAPI) error { + if api.Name() == nil { + return errAPINotImplemented + } + return nil + }) + + t.Run("TestPublishResolve", tp.TestPublishResolve) + t.Run("TestBasicPublishResolveKey", tp.TestBasicPublishResolveKey) + t.Run("TestBasicPublishResolveTimeout", tp.TestBasicPublishResolveTimeout) +} + +var rnd = rand.New(rand.NewSource(0x62796532303137)) + +func addTestObject(ctx context.Context, api coreiface.CoreAPI) (path.Path, error) { + return api.Unixfs().Add(ctx, files.NewReaderFile(&io.LimitedReader{R: rnd, N: 4092})) +} + +func appendPath(p path.Path, sub string) path.Path { + return path.New(gopath.Join(p.String(), sub)) +} + +func (tp *TestSuite) TestPublishResolve(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + init := func() (coreiface.CoreAPI, path.Path) { + apis, err := tp.MakeAPISwarm(ctx, true, 5) + if err != nil { + t.Fatal(err) + return nil, nil + } + api := apis[0] + + p, err := addTestObject(ctx, api) + if err != nil { + t.Fatal(err) + return nil, nil + } + return api, p + } + run := func(t *testing.T, ropts []opt.NameResolveOption) { + t.Run("basic", func(t *testing.T) { + api, p := init() + e, err := api.Name().Publish(ctx, p) + if err != nil { + t.Fatal(err) + } + + self, err := api.Key().Self(ctx) + if err != nil { + t.Fatal(err) + } + + if e.Name() != coreiface.FormatKeyID(self.ID()) { + t.Errorf("expected e.Name to equal '%s', got '%s'", coreiface.FormatKeyID(self.ID()), e.Name()) + } + + if e.Value().String() != p.String() { + t.Errorf("expected paths to match, '%s'!='%s'", e.Value().String(), p.String()) + } + + resPath, err := api.Name().Resolve(ctx, e.Name(), ropts...) + if err != nil { + t.Fatal(err) + } + + if resPath.String() != p.String() { + t.Errorf("expected paths to match, '%s'!='%s'", resPath.String(), p.String()) + } + }) + + t.Run("publishPath", func(t *testing.T) { + api, p := init() + e, err := api.Name().Publish(ctx, appendPath(p, "/test")) + if err != nil { + t.Fatal(err) + } + + self, err := api.Key().Self(ctx) + if err != nil { + t.Fatal(err) + } + + if e.Name() != coreiface.FormatKeyID(self.ID()) { + t.Errorf("expected e.Name to equal '%s', got '%s'", coreiface.FormatKeyID(self.ID()), e.Name()) + } + + if e.Value().String() != p.String()+"/test" { + t.Errorf("expected paths to match, '%s'!='%s'", e.Value().String(), p.String()) + } + + resPath, err := api.Name().Resolve(ctx, e.Name(), ropts...) + if err != nil { + t.Fatal(err) + } + + if resPath.String() != p.String()+"/test" { + t.Errorf("expected paths to match, '%s'!='%s'", resPath.String(), p.String()+"/test") + } + }) + + t.Run("revolvePath", func(t *testing.T) { + api, p := init() + e, err := api.Name().Publish(ctx, p) + if err != nil { + t.Fatal(err) + } + + self, err := api.Key().Self(ctx) + if err != nil { + t.Fatal(err) + } + + if e.Name() != coreiface.FormatKeyID(self.ID()) { + t.Errorf("expected e.Name to equal '%s', got '%s'", coreiface.FormatKeyID(self.ID()), e.Name()) + } + + if e.Value().String() != p.String() { + t.Errorf("expected paths to match, '%s'!='%s'", e.Value().String(), p.String()) + } + + resPath, err := api.Name().Resolve(ctx, e.Name()+"/test", ropts...) + if err != nil { + t.Fatal(err) + } + + if resPath.String() != p.String()+"/test" { + t.Errorf("expected paths to match, '%s'!='%s'", resPath.String(), p.String()+"/test") + } + }) + + t.Run("publishRevolvePath", func(t *testing.T) { + api, p := init() + e, err := api.Name().Publish(ctx, appendPath(p, "/a")) + if err != nil { + t.Fatal(err) + } + + self, err := api.Key().Self(ctx) + if err != nil { + t.Fatal(err) + } + + if e.Name() != coreiface.FormatKeyID(self.ID()) { + t.Errorf("expected e.Name to equal '%s', got '%s'", coreiface.FormatKeyID(self.ID()), e.Name()) + } + + if e.Value().String() != p.String()+"/a" { + t.Errorf("expected paths to match, '%s'!='%s'", e.Value().String(), p.String()) + } + + resPath, err := api.Name().Resolve(ctx, e.Name()+"/b", ropts...) + if err != nil { + t.Fatal(err) + } + + if resPath.String() != p.String()+"/a/b" { + t.Errorf("expected paths to match, '%s'!='%s'", resPath.String(), p.String()+"/a/b") + } + }) + } + + t.Run("default", func(t *testing.T) { + run(t, []opt.NameResolveOption{}) + }) + + t.Run("nocache", func(t *testing.T) { + run(t, []opt.NameResolveOption{opt.Name.Cache(false)}) + }) +} + +func (tp *TestSuite) TestBasicPublishResolveKey(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + apis, err := tp.MakeAPISwarm(ctx, true, 5) + if err != nil { + t.Fatal(err) + } + api := apis[0] + + k, err := api.Key().Generate(ctx, "foo") + if err != nil { + t.Fatal(err) + } + + p, err := addTestObject(ctx, api) + if err != nil { + t.Fatal(err) + } + + e, err := api.Name().Publish(ctx, p, opt.Name.Key(k.Name())) + if err != nil { + t.Fatal(err) + } + + if e.Name() != coreiface.FormatKey(k) { + t.Errorf("expected e.Name to equal %s, got '%s'", e.Name(), coreiface.FormatKey(k)) + } + + if e.Value().String() != p.String() { + t.Errorf("expected paths to match, '%s'!='%s'", e.Value().String(), p.String()) + } + + resPath, err := api.Name().Resolve(ctx, e.Name()) + if err != nil { + t.Fatal(err) + } + + if resPath.String() != p.String() { + t.Errorf("expected paths to match, '%s'!='%s'", resPath.String(), p.String()) + } +} + +func (tp *TestSuite) TestBasicPublishResolveTimeout(t *testing.T) { + t.Skip("ValidTime doesn't appear to work at this time resolution") + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + apis, err := tp.MakeAPISwarm(ctx, true, 5) + if err != nil { + t.Fatal(err) + } + api := apis[0] + p, err := addTestObject(ctx, api) + if err != nil { + t.Fatal(err) + } + + e, err := api.Name().Publish(ctx, p, opt.Name.ValidTime(time.Millisecond*100)) + if err != nil { + t.Fatal(err) + } + + self, err := api.Key().Self(ctx) + if err != nil { + t.Fatal(err) + } + + if e.Name() != coreiface.FormatKeyID(self.ID()) { + t.Errorf("expected e.Name to equal '%s', got '%s'", coreiface.FormatKeyID(self.ID()), e.Name()) + } + + if e.Value().String() != p.String() { + t.Errorf("expected paths to match, '%s'!='%s'", e.Value().String(), p.String()) + } + + time.Sleep(time.Second) + + _, err = api.Name().Resolve(ctx, e.Name()) + if err == nil { + t.Fatal("Expected an error") + } +} + +//TODO: When swarm api is created, add multinode tests diff --git a/coreiface/tests/object.go b/coreiface/tests/object.go new file mode 100644 index 0000000000..8e8f52b3d4 --- /dev/null +++ b/coreiface/tests/object.go @@ -0,0 +1,467 @@ +package tests + +import ( + "bytes" + "context" + "encoding/hex" + "io" + "strings" + "testing" + + iface "github.com/ipfs/boxo/coreiface" + opt "github.com/ipfs/boxo/coreiface/options" +) + +func (tp *TestSuite) TestObject(t *testing.T) { + tp.hasApi(t, func(api iface.CoreAPI) error { + if api.Object() == nil { + return errAPINotImplemented + } + return nil + }) + + t.Run("TestNew", tp.TestNew) + t.Run("TestObjectPut", tp.TestObjectPut) + t.Run("TestObjectGet", tp.TestObjectGet) + t.Run("TestObjectData", tp.TestObjectData) + t.Run("TestObjectLinks", tp.TestObjectLinks) + t.Run("TestObjectStat", tp.TestObjectStat) + t.Run("TestObjectAddLink", tp.TestObjectAddLink) + t.Run("TestObjectAddLinkCreate", tp.TestObjectAddLinkCreate) + t.Run("TestObjectRmLink", tp.TestObjectRmLink) + t.Run("TestObjectAddData", tp.TestObjectAddData) + t.Run("TestObjectSetData", tp.TestObjectSetData) + t.Run("TestDiffTest", tp.TestDiffTest) +} + +func (tp *TestSuite) TestNew(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + api, err := tp.makeAPI(ctx) + if err != nil { + t.Fatal(err) + } + + emptyNode, err := api.Object().New(ctx) + if err != nil { + t.Fatal(err) + } + + dirNode, err := api.Object().New(ctx, opt.Object.Type("unixfs-dir")) + if err != nil { + t.Fatal(err) + } + + if emptyNode.String() != "QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n" { + t.Errorf("Unexpected emptyNode path: %s", emptyNode.String()) + } + + if dirNode.String() != "QmUNLLsPACCz1vLxQVkXqqLX5R1X345qqfHbsf67hvA3Nn" { + t.Errorf("Unexpected dirNode path: %s", dirNode.String()) + } +} + +func (tp *TestSuite) TestObjectPut(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + api, err := tp.makeAPI(ctx) + if err != nil { + t.Fatal(err) + } + + p1, err := api.Object().Put(ctx, strings.NewReader(`{"Data":"foo"}`)) + if err != nil { + t.Fatal(err) + } + + p2, err := api.Object().Put(ctx, strings.NewReader(`{"Data":"YmFy"}`), opt.Object.DataType("base64")) //bar + if err != nil { + t.Fatal(err) + } + + pbBytes, err := hex.DecodeString("0a0362617a") + if err != nil { + t.Fatal(err) + } + + p3, err := api.Object().Put(ctx, bytes.NewReader(pbBytes), opt.Object.InputEnc("protobuf")) + if err != nil { + t.Fatal(err) + } + + if p1.String() != "/ipfs/QmQeGyS87nyijii7kFt1zbe4n2PsXTFimzsdxyE9qh9TST" { + t.Errorf("unexpected path: %s", p1.String()) + } + + if p2.String() != "/ipfs/QmNeYRbCibmaMMK6Du6ChfServcLqFvLJF76PzzF76SPrZ" { + t.Errorf("unexpected path: %s", p2.String()) + } + + if p3.String() != "/ipfs/QmZreR7M2t7bFXAdb1V5FtQhjk4t36GnrvueLJowJbQM9m" { + t.Errorf("unexpected path: %s", p3.String()) + } +} + +func (tp *TestSuite) TestObjectGet(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + api, err := tp.makeAPI(ctx) + if err != nil { + t.Fatal(err) + } + + p1, err := api.Object().Put(ctx, strings.NewReader(`{"Data":"foo"}`)) + if err != nil { + t.Fatal(err) + } + + nd, err := api.Object().Get(ctx, p1) + if err != nil { + t.Fatal(err) + } + + if string(nd.RawData()[len(nd.RawData())-3:]) != "foo" { + t.Fatal("got non-matching data") + } +} + +func (tp *TestSuite) TestObjectData(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + api, err := tp.makeAPI(ctx) + if err != nil { + t.Fatal(err) + } + + p1, err := api.Object().Put(ctx, strings.NewReader(`{"Data":"foo"}`)) + if err != nil { + t.Fatal(err) + } + + r, err := api.Object().Data(ctx, p1) + if err != nil { + t.Fatal(err) + } + + data, err := io.ReadAll(r) + if err != nil { + t.Fatal(err) + } + + if string(data) != "foo" { + t.Fatal("got non-matching data") + } +} + +func (tp *TestSuite) TestObjectLinks(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + api, err := tp.makeAPI(ctx) + if err != nil { + t.Fatal(err) + } + + p1, err := api.Object().Put(ctx, strings.NewReader(`{"Data":"foo"}`)) + if err != nil { + t.Fatal(err) + } + + p2, err := api.Object().Put(ctx, strings.NewReader(`{"Links":[{"Name":"bar", "Hash":"`+p1.Cid().String()+`"}]}`)) + if err != nil { + t.Fatal(err) + } + + links, err := api.Object().Links(ctx, p2) + if err != nil { + t.Fatal(err) + } + + if len(links) != 1 { + t.Errorf("unexpected number of links: %d", len(links)) + } + + if links[0].Cid.String() != p1.Cid().String() { + t.Fatal("cids didn't batch") + } + + if links[0].Name != "bar" { + t.Fatal("unexpected link name") + } +} + +func (tp *TestSuite) TestObjectStat(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + api, err := tp.makeAPI(ctx) + if err != nil { + t.Fatal(err) + } + + p1, err := api.Object().Put(ctx, strings.NewReader(`{"Data":"foo"}`)) + if err != nil { + t.Fatal(err) + } + + p2, err := api.Object().Put(ctx, strings.NewReader(`{"Data":"bazz", "Links":[{"Name":"bar", "Hash":"`+p1.Cid().String()+`", "Size":3}]}`)) + if err != nil { + t.Fatal(err) + } + + stat, err := api.Object().Stat(ctx, p2) + if err != nil { + t.Fatal(err) + } + + if stat.Cid.String() != p2.Cid().String() { + t.Error("unexpected stat.Cid") + } + + if stat.NumLinks != 1 { + t.Errorf("unexpected stat.NumLinks") + } + + if stat.BlockSize != 51 { + t.Error("unexpected stat.BlockSize") + } + + if stat.LinksSize != 47 { + t.Errorf("unexpected stat.LinksSize: %d", stat.LinksSize) + } + + if stat.DataSize != 4 { + t.Error("unexpected stat.DataSize") + } + + if stat.CumulativeSize != 54 { + t.Error("unexpected stat.DataSize") + } +} + +func (tp *TestSuite) TestObjectAddLink(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + api, err := tp.makeAPI(ctx) + if err != nil { + t.Fatal(err) + } + + p1, err := api.Object().Put(ctx, strings.NewReader(`{"Data":"foo"}`)) + if err != nil { + t.Fatal(err) + } + + p2, err := api.Object().Put(ctx, strings.NewReader(`{"Data":"bazz", "Links":[{"Name":"bar", "Hash":"`+p1.Cid().String()+`", "Size":3}]}`)) + if err != nil { + t.Fatal(err) + } + + p3, err := api.Object().AddLink(ctx, p2, "abc", p2) + if err != nil { + t.Fatal(err) + } + + links, err := api.Object().Links(ctx, p3) + if err != nil { + t.Fatal(err) + } + + if len(links) != 2 { + t.Errorf("unexpected number of links: %d", len(links)) + } + + if links[0].Name != "abc" { + t.Errorf("unexpected link 0 name: %s", links[0].Name) + } + + if links[1].Name != "bar" { + t.Errorf("unexpected link 1 name: %s", links[1].Name) + } +} + +func (tp *TestSuite) TestObjectAddLinkCreate(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + api, err := tp.makeAPI(ctx) + if err != nil { + t.Fatal(err) + } + + p1, err := api.Object().Put(ctx, strings.NewReader(`{"Data":"foo"}`)) + if err != nil { + t.Fatal(err) + } + + p2, err := api.Object().Put(ctx, strings.NewReader(`{"Data":"bazz", "Links":[{"Name":"bar", "Hash":"`+p1.Cid().String()+`", "Size":3}]}`)) + if err != nil { + t.Fatal(err) + } + + _, err = api.Object().AddLink(ctx, p2, "abc/d", p2) + if err == nil { + t.Fatal("expected an error") + } + if !strings.Contains(err.Error(), "no link by that name") { + t.Fatalf("unexpected error: %s", err.Error()) + } + + p3, err := api.Object().AddLink(ctx, p2, "abc/d", p2, opt.Object.Create(true)) + if err != nil { + t.Fatal(err) + } + + links, err := api.Object().Links(ctx, p3) + if err != nil { + t.Fatal(err) + } + + if len(links) != 2 { + t.Errorf("unexpected number of links: %d", len(links)) + } + + if links[0].Name != "abc" { + t.Errorf("unexpected link 0 name: %s", links[0].Name) + } + + if links[1].Name != "bar" { + t.Errorf("unexpected link 1 name: %s", links[1].Name) + } +} + +func (tp *TestSuite) TestObjectRmLink(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + api, err := tp.makeAPI(ctx) + if err != nil { + t.Fatal(err) + } + + p1, err := api.Object().Put(ctx, strings.NewReader(`{"Data":"foo"}`)) + if err != nil { + t.Fatal(err) + } + + p2, err := api.Object().Put(ctx, strings.NewReader(`{"Data":"bazz", "Links":[{"Name":"bar", "Hash":"`+p1.Cid().String()+`", "Size":3}]}`)) + if err != nil { + t.Fatal(err) + } + + p3, err := api.Object().RmLink(ctx, p2, "bar") + if err != nil { + t.Fatal(err) + } + + links, err := api.Object().Links(ctx, p3) + if err != nil { + t.Fatal(err) + } + + if len(links) != 0 { + t.Errorf("unexpected number of links: %d", len(links)) + } +} + +func (tp *TestSuite) TestObjectAddData(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + api, err := tp.makeAPI(ctx) + if err != nil { + t.Fatal(err) + } + + p1, err := api.Object().Put(ctx, strings.NewReader(`{"Data":"foo"}`)) + if err != nil { + t.Fatal(err) + } + + p2, err := api.Object().AppendData(ctx, p1, strings.NewReader("bar")) + if err != nil { + t.Fatal(err) + } + + r, err := api.Object().Data(ctx, p2) + if err != nil { + t.Fatal(err) + } + + data, err := io.ReadAll(r) + if err != nil { + t.Fatal(err) + } + + if string(data) != "foobar" { + t.Error("unexpected data") + } +} + +func (tp *TestSuite) TestObjectSetData(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + api, err := tp.makeAPI(ctx) + if err != nil { + t.Fatal(err) + } + + p1, err := api.Object().Put(ctx, strings.NewReader(`{"Data":"foo"}`)) + if err != nil { + t.Fatal(err) + } + + p2, err := api.Object().SetData(ctx, p1, strings.NewReader("bar")) + if err != nil { + t.Fatal(err) + } + + r, err := api.Object().Data(ctx, p2) + if err != nil { + t.Fatal(err) + } + + data, err := io.ReadAll(r) + if err != nil { + t.Fatal(err) + } + + if string(data) != "bar" { + t.Error("unexpected data") + } +} + +func (tp *TestSuite) TestDiffTest(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + api, err := tp.makeAPI(ctx) + if err != nil { + t.Fatal(err) + } + + p1, err := api.Object().Put(ctx, strings.NewReader(`{"Data":"foo"}`)) + if err != nil { + t.Fatal(err) + } + + p2, err := api.Object().Put(ctx, strings.NewReader(`{"Data":"bar"}`)) + if err != nil { + t.Fatal(err) + } + + changes, err := api.Object().Diff(ctx, p1, p2) + if err != nil { + t.Fatal(err) + } + + if len(changes) != 1 { + t.Fatal("unexpected changes len") + } + + if changes[0].Type != iface.DiffMod { + t.Fatal("unexpected change type") + } + + if changes[0].Before.String() != p1.String() { + t.Fatal("unexpected before path") + } + + if changes[0].After.String() != p2.String() { + t.Fatal("unexpected before path") + } +} diff --git a/coreiface/tests/path.go b/coreiface/tests/path.go new file mode 100644 index 0000000000..321c79d6e8 --- /dev/null +++ b/coreiface/tests/path.go @@ -0,0 +1,197 @@ +package tests + +import ( + "context" + "math" + "strings" + "testing" + + "github.com/ipfs/boxo/coreiface/path" + + "github.com/ipfs/boxo/coreiface/options" + + ipldcbor "github.com/ipfs/go-ipld-cbor" +) + +func (tp *TestSuite) TestPath(t *testing.T) { + t.Run("TestMutablePath", tp.TestMutablePath) + t.Run("TestPathRemainder", tp.TestPathRemainder) + t.Run("TestEmptyPathRemainder", tp.TestEmptyPathRemainder) + t.Run("TestInvalidPathRemainder", tp.TestInvalidPathRemainder) + t.Run("TestPathRoot", tp.TestPathRoot) + t.Run("TestPathJoin", tp.TestPathJoin) +} + +func (tp *TestSuite) TestMutablePath(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + api, err := tp.makeAPI(ctx) + if err != nil { + t.Fatal(err) + } + + blk, err := api.Block().Put(ctx, strings.NewReader(`foo`)) + if err != nil { + t.Fatal(err) + } + + if blk.Path().Mutable() { + t.Error("expected /ipld path to be immutable") + } + + // get self /ipns path + + if api.Key() == nil { + t.Fatal(".Key not implemented") + } + + keys, err := api.Key().List(ctx) + if err != nil { + t.Fatal(err) + } + + if !keys[0].Path().Mutable() { + t.Error("expected self /ipns path to be mutable") + } +} + +func (tp *TestSuite) TestPathRemainder(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + api, err := tp.makeAPI(ctx) + if err != nil { + t.Fatal(err) + } + + if api.Dag() == nil { + t.Fatal(".Dag not implemented") + } + + nd, err := ipldcbor.FromJSON(strings.NewReader(`{"foo": {"bar": "baz"}}`), math.MaxUint64, -1) + if err != nil { + t.Fatal(err) + } + + if err := api.Dag().Add(ctx, nd); err != nil { + t.Fatal(err) + } + + rp1, err := api.ResolvePath(ctx, path.New(nd.String()+"/foo/bar")) + if err != nil { + t.Fatal(err) + } + + if rp1.Remainder() != "foo/bar" { + t.Error("expected to get path remainder") + } +} + +func (tp *TestSuite) TestEmptyPathRemainder(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + api, err := tp.makeAPI(ctx) + if err != nil { + t.Fatal(err) + } + + if api.Dag() == nil { + t.Fatal(".Dag not implemented") + } + + nd, err := ipldcbor.FromJSON(strings.NewReader(`{"foo": {"bar": "baz"}}`), math.MaxUint64, -1) + if err != nil { + t.Fatal(err) + } + + if err := api.Dag().Add(ctx, nd); err != nil { + t.Fatal(err) + } + + rp1, err := api.ResolvePath(ctx, path.New(nd.Cid().String())) + if err != nil { + t.Fatal(err) + } + + if rp1.Remainder() != "" { + t.Error("expected the resolved path to not have a remainder") + } +} + +func (tp *TestSuite) TestInvalidPathRemainder(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + api, err := tp.makeAPI(ctx) + if err != nil { + t.Fatal(err) + } + + if api.Dag() == nil { + t.Fatal(".Dag not implemented") + } + + nd, err := ipldcbor.FromJSON(strings.NewReader(`{"foo": {"bar": "baz"}}`), math.MaxUint64, -1) + if err != nil { + t.Fatal(err) + } + + if err := api.Dag().Add(ctx, nd); err != nil { + t.Fatal(err) + } + + _, err = api.ResolvePath(ctx, path.New("/ipld/"+nd.Cid().String()+"/bar/baz")) + if err == nil || !strings.Contains(err.Error(), `no link named "bar"`) { + t.Fatalf("unexpected error: %s", err) + } +} + +func (tp *TestSuite) TestPathRoot(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + api, err := tp.makeAPI(ctx) + if err != nil { + t.Fatal(err) + } + + if api.Block() == nil { + t.Fatal(".Block not implemented") + } + + blk, err := api.Block().Put(ctx, strings.NewReader(`foo`), options.Block.Format("raw")) + if err != nil { + t.Fatal(err) + } + + if api.Dag() == nil { + t.Fatal(".Dag not implemented") + } + + nd, err := ipldcbor.FromJSON(strings.NewReader(`{"foo": {"/": "`+blk.Path().Cid().String()+`"}}`), math.MaxUint64, -1) + if err != nil { + t.Fatal(err) + } + + if err := api.Dag().Add(ctx, nd); err != nil { + t.Fatal(err) + } + + rp, err := api.ResolvePath(ctx, path.New("/ipld/"+nd.Cid().String()+"/foo")) + if err != nil { + t.Fatal(err) + } + + if rp.Root().String() != nd.Cid().String() { + t.Error("unexpected path root") + } + + if rp.Cid().String() != blk.Path().Cid().String() { + t.Error("unexpected path cid") + } +} + +func (tp *TestSuite) TestPathJoin(t *testing.T) { + p1 := path.New("/ipfs/QmYNmQKp6SuaVrpgWRsPTgCQCnpxUYGq76YEKBXuj2N4H6/bar/baz") + + if path.Join(p1, "foo").String() != "/ipfs/QmYNmQKp6SuaVrpgWRsPTgCQCnpxUYGq76YEKBXuj2N4H6/bar/baz/foo" { + t.Error("unexpected path") + } +} diff --git a/coreiface/tests/pin.go b/coreiface/tests/pin.go new file mode 100644 index 0000000000..ac90d097ee --- /dev/null +++ b/coreiface/tests/pin.go @@ -0,0 +1,601 @@ +package tests + +import ( + "context" + "math" + "strings" + "testing" + + iface "github.com/ipfs/boxo/coreiface" + opt "github.com/ipfs/boxo/coreiface/options" + "github.com/ipfs/boxo/coreiface/path" + + "github.com/ipfs/go-cid" + ipldcbor "github.com/ipfs/go-ipld-cbor" + ipld "github.com/ipfs/go-ipld-format" +) + +func (tp *TestSuite) TestPin(t *testing.T) { + tp.hasApi(t, func(api iface.CoreAPI) error { + if api.Pin() == nil { + return errAPINotImplemented + } + return nil + }) + + t.Run("TestPinAdd", tp.TestPinAdd) + t.Run("TestPinSimple", tp.TestPinSimple) + t.Run("TestPinRecursive", tp.TestPinRecursive) + t.Run("TestPinLsIndirect", tp.TestPinLsIndirect) + t.Run("TestPinLsPrecedence", tp.TestPinLsPrecedence) + t.Run("TestPinIsPinned", tp.TestPinIsPinned) +} + +func (tp *TestSuite) TestPinAdd(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + api, err := tp.makeAPI(ctx) + if err != nil { + t.Fatal(err) + } + + p, err := api.Unixfs().Add(ctx, strFile("foo")()) + if err != nil { + t.Fatal(err) + } + + err = api.Pin().Add(ctx, p) + if err != nil { + t.Fatal(err) + } +} + +func (tp *TestSuite) TestPinSimple(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + api, err := tp.makeAPI(ctx) + if err != nil { + t.Fatal(err) + } + + p, err := api.Unixfs().Add(ctx, strFile("foo")()) + if err != nil { + t.Fatal(err) + } + + err = api.Pin().Add(ctx, p) + if err != nil { + t.Fatal(err) + } + + list, err := accPins(api.Pin().Ls(ctx)) + if err != nil { + t.Fatal(err) + } + + if len(list) != 1 { + t.Errorf("unexpected pin list len: %d", len(list)) + } + + if list[0].Path().Cid().String() != p.Cid().String() { + t.Error("paths don't match") + } + + if list[0].Type() != "recursive" { + t.Error("unexpected pin type") + } + + assertIsPinned(t, ctx, api, p, "recursive") + + err = api.Pin().Rm(ctx, p) + if err != nil { + t.Fatal(err) + } + + list, err = accPins(api.Pin().Ls(ctx)) + if err != nil { + t.Fatal(err) + } + + if len(list) != 0 { + t.Errorf("unexpected pin list len: %d", len(list)) + } +} + +func (tp *TestSuite) TestPinRecursive(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + api, err := tp.makeAPI(ctx) + if err != nil { + t.Fatal(err) + } + + p0, err := api.Unixfs().Add(ctx, strFile("foo")()) + if err != nil { + t.Fatal(err) + } + + p1, err := api.Unixfs().Add(ctx, strFile("bar")()) + if err != nil { + t.Fatal(err) + } + + nd2, err := ipldcbor.FromJSON(strings.NewReader(`{"lnk": {"/": "`+p0.Cid().String()+`"}}`), math.MaxUint64, -1) + if err != nil { + t.Fatal(err) + } + + nd3, err := ipldcbor.FromJSON(strings.NewReader(`{"lnk": {"/": "`+p1.Cid().String()+`"}}`), math.MaxUint64, -1) + if err != nil { + t.Fatal(err) + } + + if err := api.Dag().AddMany(ctx, []ipld.Node{nd2, nd3}); err != nil { + t.Fatal(err) + } + + err = api.Pin().Add(ctx, path.IpldPath(nd2.Cid())) + if err != nil { + t.Fatal(err) + } + + err = api.Pin().Add(ctx, path.IpldPath(nd3.Cid()), opt.Pin.Recursive(false)) + if err != nil { + t.Fatal(err) + } + + list, err := accPins(api.Pin().Ls(ctx)) + if err != nil { + t.Fatal(err) + } + + if len(list) != 3 { + t.Errorf("unexpected pin list len: %d", len(list)) + } + + list, err = accPins(api.Pin().Ls(ctx, opt.Pin.Ls.Direct())) + if err != nil { + t.Fatal(err) + } + + if len(list) != 1 { + t.Errorf("unexpected pin list len: %d", len(list)) + } + + if list[0].Path().String() != path.IpldPath(nd3.Cid()).String() { + t.Errorf("unexpected path, %s != %s", list[0].Path().String(), path.IpfsPath(nd3.Cid()).String()) + } + + list, err = accPins(api.Pin().Ls(ctx, opt.Pin.Ls.Recursive())) + if err != nil { + t.Fatal(err) + } + + if len(list) != 1 { + t.Errorf("unexpected pin list len: %d", len(list)) + } + + if list[0].Path().String() != path.IpldPath(nd2.Cid()).String() { + t.Errorf("unexpected path, %s != %s", list[0].Path().String(), path.IpldPath(nd2.Cid()).String()) + } + + list, err = accPins(api.Pin().Ls(ctx, opt.Pin.Ls.Indirect())) + if err != nil { + t.Fatal(err) + } + + if len(list) != 1 { + t.Errorf("unexpected pin list len: %d", len(list)) + } + + if list[0].Path().Cid().String() != p0.Cid().String() { + t.Errorf("unexpected path, %s != %s", list[0].Path().Cid().String(), p0.Cid().String()) + } + + res, err := api.Pin().Verify(ctx) + if err != nil { + t.Fatal(err) + } + n := 0 + for r := range res { + if !r.Ok() { + t.Error("expected pin to be ok") + } + n++ + } + + if n != 1 { + t.Errorf("unexpected verify result count: %d", n) + } + + //TODO: figure out a way to test verify without touching IpfsNode + /* + err = api.Block().Rm(ctx, p0, opt.Block.Force(true)) + if err != nil { + t.Fatal(err) + } + + res, err = api.Pin().Verify(ctx) + if err != nil { + t.Fatal(err) + } + n = 0 + for r := range res { + if r.Ok() { + t.Error("expected pin to not be ok") + } + + if len(r.BadNodes()) != 1 { + t.Fatalf("unexpected badNodes len") + } + + if r.BadNodes()[0].Path().Cid().String() != p0.Cid().String() { + t.Error("unexpected badNode path") + } + + if r.BadNodes()[0].Err().Error() != "merkledag: not found" { + t.Errorf("unexpected badNode error: %s", r.BadNodes()[0].Err().Error()) + } + n++ + } + + if n != 1 { + t.Errorf("unexpected verify result count: %d", n) + } + */ +} + +// TestPinLsIndirect verifies that indirect nodes are listed by pin ls even if a parent node is directly pinned +func (tp *TestSuite) TestPinLsIndirect(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + api, err := tp.makeAPI(ctx) + if err != nil { + t.Fatal(err) + } + + leaf, parent, grandparent := getThreeChainedNodes(t, ctx, api, "foo") + + err = api.Pin().Add(ctx, path.IpldPath(grandparent.Cid())) + if err != nil { + t.Fatal(err) + } + + err = api.Pin().Add(ctx, path.IpldPath(parent.Cid()), opt.Pin.Recursive(false)) + if err != nil { + t.Fatal(err) + } + + assertPinTypes(t, ctx, api, []cidContainer{grandparent}, []cidContainer{parent}, []cidContainer{leaf}) +} + +// TestPinLsPrecedence verifies the precedence of pins (recursive > direct > indirect) +func (tp *TestSuite) TestPinLsPrecedence(t *testing.T) { + // Testing precedence of recursive, direct and indirect pins + // Results should be recursive > indirect, direct > indirect, and recursive > direct + + t.Run("TestPinLsPredenceRecursiveIndirect", tp.TestPinLsPredenceRecursiveIndirect) + t.Run("TestPinLsPrecedenceDirectIndirect", tp.TestPinLsPrecedenceDirectIndirect) + t.Run("TestPinLsPrecedenceRecursiveDirect", tp.TestPinLsPrecedenceRecursiveDirect) +} + +func (tp *TestSuite) TestPinLsPredenceRecursiveIndirect(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + api, err := tp.makeAPI(ctx) + if err != nil { + t.Fatal(err) + } + + // Test recursive > indirect + leaf, parent, grandparent := getThreeChainedNodes(t, ctx, api, "recursive > indirect") + + err = api.Pin().Add(ctx, path.IpldPath(grandparent.Cid())) + if err != nil { + t.Fatal(err) + } + + err = api.Pin().Add(ctx, path.IpldPath(parent.Cid())) + if err != nil { + t.Fatal(err) + } + + assertPinTypes(t, ctx, api, []cidContainer{grandparent, parent}, []cidContainer{}, []cidContainer{leaf}) +} + +func (tp *TestSuite) TestPinLsPrecedenceDirectIndirect(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + api, err := tp.makeAPI(ctx) + if err != nil { + t.Fatal(err) + } + + // Test direct > indirect + leaf, parent, grandparent := getThreeChainedNodes(t, ctx, api, "direct > indirect") + + err = api.Pin().Add(ctx, path.IpldPath(grandparent.Cid())) + if err != nil { + t.Fatal(err) + } + + err = api.Pin().Add(ctx, path.IpldPath(parent.Cid()), opt.Pin.Recursive(false)) + if err != nil { + t.Fatal(err) + } + + assertPinTypes(t, ctx, api, []cidContainer{grandparent}, []cidContainer{parent}, []cidContainer{leaf}) +} + +func (tp *TestSuite) TestPinLsPrecedenceRecursiveDirect(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + api, err := tp.makeAPI(ctx) + if err != nil { + t.Fatal(err) + } + + // Test recursive > direct + leaf, parent, grandparent := getThreeChainedNodes(t, ctx, api, "recursive + direct = error") + + err = api.Pin().Add(ctx, path.IpldPath(parent.Cid())) + if err != nil { + t.Fatal(err) + } + + err = api.Pin().Add(ctx, path.IpldPath(parent.Cid()), opt.Pin.Recursive(false)) + if err == nil { + t.Fatal("expected error directly pinning a recursively pinned node") + } + + assertPinTypes(t, ctx, api, []cidContainer{parent}, []cidContainer{}, []cidContainer{leaf}) + + err = api.Pin().Add(ctx, path.IpldPath(grandparent.Cid()), opt.Pin.Recursive(false)) + if err != nil { + t.Fatal(err) + } + + err = api.Pin().Add(ctx, path.IpldPath(grandparent.Cid())) + if err != nil { + t.Fatal(err) + } + + assertPinTypes(t, ctx, api, []cidContainer{grandparent, parent}, []cidContainer{}, []cidContainer{leaf}) +} + +func (tp *TestSuite) TestPinIsPinned(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + api, err := tp.makeAPI(ctx) + if err != nil { + t.Fatal(err) + } + + leaf, parent, grandparent := getThreeChainedNodes(t, ctx, api, "foofoo") + + assertNotPinned(t, ctx, api, path.IpldPath(grandparent.Cid())) + assertNotPinned(t, ctx, api, path.IpldPath(parent.Cid())) + assertNotPinned(t, ctx, api, path.IpldPath(leaf.Cid())) + + err = api.Pin().Add(ctx, path.IpldPath(parent.Cid()), opt.Pin.Recursive(true)) + if err != nil { + t.Fatal(err) + } + + assertNotPinned(t, ctx, api, path.IpldPath(grandparent.Cid())) + assertIsPinned(t, ctx, api, path.IpldPath(parent.Cid()), "recursive") + assertIsPinned(t, ctx, api, path.IpldPath(leaf.Cid()), "indirect") + + err = api.Pin().Add(ctx, path.IpldPath(grandparent.Cid()), opt.Pin.Recursive(false)) + if err != nil { + t.Fatal(err) + } + + assertIsPinned(t, ctx, api, path.IpldPath(grandparent.Cid()), "direct") + assertIsPinned(t, ctx, api, path.IpldPath(parent.Cid()), "recursive") + assertIsPinned(t, ctx, api, path.IpldPath(leaf.Cid()), "indirect") +} + +type cidContainer interface { + Cid() cid.Cid +} + +func getThreeChainedNodes(t *testing.T, ctx context.Context, api iface.CoreAPI, leafData string) (cidContainer, cidContainer, cidContainer) { + leaf, err := api.Unixfs().Add(ctx, strFile(leafData)()) + if err != nil { + t.Fatal(err) + } + + parent, err := ipldcbor.FromJSON(strings.NewReader(`{"lnk": {"/": "`+leaf.Cid().String()+`"}}`), math.MaxUint64, -1) + if err != nil { + t.Fatal(err) + } + + grandparent, err := ipldcbor.FromJSON(strings.NewReader(`{"lnk": {"/": "`+parent.Cid().String()+`"}}`), math.MaxUint64, -1) + if err != nil { + t.Fatal(err) + } + + if err := api.Dag().AddMany(ctx, []ipld.Node{parent, grandparent}); err != nil { + t.Fatal(err) + } + + return leaf, parent, grandparent +} + +func assertPinTypes(t *testing.T, ctx context.Context, api iface.CoreAPI, recusive, direct, indirect []cidContainer) { + assertPinLsAllConsistency(t, ctx, api) + + list, err := accPins(api.Pin().Ls(ctx, opt.Pin.Ls.Recursive())) + if err != nil { + t.Fatal(err) + } + + assertPinCids(t, list, recusive...) + + list, err = accPins(api.Pin().Ls(ctx, opt.Pin.Ls.Direct())) + if err != nil { + t.Fatal(err) + } + + assertPinCids(t, list, direct...) + + list, err = accPins(api.Pin().Ls(ctx, opt.Pin.Ls.Indirect())) + if err != nil { + t.Fatal(err) + } + + assertPinCids(t, list, indirect...) +} + +// assertPinCids verifies that the pins match the expected cids +func assertPinCids(t *testing.T, pins []iface.Pin, cids ...cidContainer) { + t.Helper() + + if expected, actual := len(cids), len(pins); expected != actual { + t.Fatalf("expected pin list to have len %d, was %d", expected, actual) + } + + cSet := cid.NewSet() + for _, c := range cids { + cSet.Add(c.Cid()) + } + + valid := true + for _, p := range pins { + c := p.Path().Cid() + if cSet.Has(c) { + cSet.Remove(c) + } else { + valid = false + break + } + } + + valid = valid && cSet.Len() == 0 + + if !valid { + pinStrs := make([]string, len(pins)) + for i, p := range pins { + pinStrs[i] = p.Path().Cid().String() + } + pathStrs := make([]string, len(cids)) + for i, c := range cids { + pathStrs[i] = c.Cid().String() + } + t.Fatalf("expected: %s \nactual: %s", strings.Join(pathStrs, ", "), strings.Join(pinStrs, ", ")) + } +} + +// assertPinLsAllConsistency verifies that listing all pins gives the same result as listing the pin types individually +func assertPinLsAllConsistency(t *testing.T, ctx context.Context, api iface.CoreAPI) { + t.Helper() + allPins, err := accPins(api.Pin().Ls(ctx)) + if err != nil { + t.Fatal(err) + } + + type pinTypeProps struct { + *cid.Set + opt.PinLsOption + } + + all, recursive, direct, indirect := cid.NewSet(), cid.NewSet(), cid.NewSet(), cid.NewSet() + typeMap := map[string]*pinTypeProps{ + "recursive": {recursive, opt.Pin.Ls.Recursive()}, + "direct": {direct, opt.Pin.Ls.Direct()}, + "indirect": {indirect, opt.Pin.Ls.Indirect()}, + } + + for _, p := range allPins { + if !all.Visit(p.Path().Cid()) { + t.Fatalf("pin ls returned the same cid multiple times") + } + + typeStr := p.Type() + if typeSet, ok := typeMap[p.Type()]; ok { + typeSet.Add(p.Path().Cid()) + } else { + t.Fatalf("unknown pin type: %s", typeStr) + } + } + + for typeStr, pinProps := range typeMap { + pins, err := accPins(api.Pin().Ls(ctx, pinProps.PinLsOption)) + if err != nil { + t.Fatal(err) + } + + if expected, actual := len(pins), pinProps.Set.Len(); expected != actual { + t.Fatalf("pin ls all has %d pins of type %s, but pin ls for the type has %d", expected, typeStr, actual) + } + + for _, p := range pins { + if pinType := p.Type(); pinType != typeStr { + t.Fatalf("returned wrong pin type: expected %s, got %s", typeStr, pinType) + } + + if c := p.Path().Cid(); !pinProps.Has(c) { + t.Fatalf("%s expected to be in pin ls all as type %s", c.String(), typeStr) + } + } + } +} + +func assertIsPinned(t *testing.T, ctx context.Context, api iface.CoreAPI, p path.Path, typeStr string) { + t.Helper() + withType, err := opt.Pin.IsPinned.Type(typeStr) + if err != nil { + t.Fatal("unhandled pin type") + } + + whyPinned, pinned, err := api.Pin().IsPinned(ctx, p, withType) + if err != nil { + t.Fatal(err) + } + + if !pinned { + t.Fatalf("%s expected to be pinned with type %s", p, typeStr) + } + + switch typeStr { + case "recursive", "direct": + if typeStr != whyPinned { + t.Fatalf("reason for pinning expected to be %s for %s, got %s", typeStr, p, whyPinned) + } + case "indirect": + if whyPinned == "" { + t.Fatalf("expected to have a pin reason for %s", p) + } + } +} + +func assertNotPinned(t *testing.T, ctx context.Context, api iface.CoreAPI, p path.Path) { + t.Helper() + + _, pinned, err := api.Pin().IsPinned(ctx, p) + if err != nil { + t.Fatal(err) + } + + if pinned { + t.Fatalf("%s expected to not be pinned", p) + } +} + +func accPins(pins <-chan iface.Pin, err error) ([]iface.Pin, error) { + if err != nil { + return nil, err + } + + var result []iface.Pin + + for pin := range pins { + if pin.Err() != nil { + return nil, pin.Err() + } + result = append(result, pin) + } + + return result, nil +} diff --git a/coreiface/tests/pubsub.go b/coreiface/tests/pubsub.go new file mode 100644 index 0000000000..446e0771a8 --- /dev/null +++ b/coreiface/tests/pubsub.go @@ -0,0 +1,136 @@ +package tests + +import ( + "context" + "testing" + "time" + + iface "github.com/ipfs/boxo/coreiface" + "github.com/ipfs/boxo/coreiface/options" +) + +func (tp *TestSuite) TestPubSub(t *testing.T) { + tp.hasApi(t, func(api iface.CoreAPI) error { + if api.PubSub() == nil { + return errAPINotImplemented + } + return nil + }) + + t.Run("TestBasicPubSub", tp.TestBasicPubSub) +} + +func (tp *TestSuite) TestBasicPubSub(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + apis, err := tp.MakeAPISwarm(ctx, true, 2) + if err != nil { + t.Fatal(err) + } + + sub, err := apis[0].PubSub().Subscribe(ctx, "testch") + if err != nil { + t.Fatal(err) + } + + done := make(chan struct{}) + go func() { + defer close(done) + + ticker := time.NewTicker(100 * time.Millisecond) + defer ticker.Stop() + + for { + err := apis[1].PubSub().Publish(ctx, "testch", []byte("hello world")) + switch err { + case nil: + case context.Canceled: + return + default: + t.Error(err) + cancel() + return + } + select { + case <-ticker.C: + case <-ctx.Done(): + return + } + } + }() + + // Wait for the sender to finish before we return. + // Otherwise, we can get random errors as publish fails. + defer func() { + cancel() + <-done + }() + + m, err := sub.Next(ctx) + if err != nil { + t.Fatal(err) + } + + if string(m.Data()) != "hello world" { + t.Errorf("got invalid data: %s", string(m.Data())) + } + + self1, err := apis[1].Key().Self(ctx) + if err != nil { + t.Fatal(err) + } + + if m.From() != self1.ID() { + t.Errorf("m.From didn't match") + } + + peers, err := apis[1].PubSub().Peers(ctx, options.PubSub.Topic("testch")) + if err != nil { + t.Fatal(err) + } + + if len(peers) != 1 { + t.Fatalf("got incorrect number of peers: %d", len(peers)) + } + + self0, err := apis[0].Key().Self(ctx) + if err != nil { + t.Fatal(err) + } + + if peers[0] != self0.ID() { + t.Errorf("peer didn't match") + } + + peers, err = apis[1].PubSub().Peers(ctx, options.PubSub.Topic("nottestch")) + if err != nil { + t.Fatal(err) + } + + if len(peers) != 0 { + t.Fatalf("got incorrect number of peers: %d", len(peers)) + } + + topics, err := apis[0].PubSub().Ls(ctx) + if err != nil { + t.Fatal(err) + } + + if len(topics) != 1 { + t.Fatalf("got incorrect number of topics: %d", len(peers)) + } + + if topics[0] != "testch" { + t.Errorf("topic didn't match") + } + + topics, err = apis[1].PubSub().Ls(ctx) + if err != nil { + t.Fatal(err) + } + + if len(topics) != 0 { + t.Fatalf("got incorrect number of topics: %d", len(peers)) + } +} diff --git a/coreiface/tests/routing.go b/coreiface/tests/routing.go new file mode 100644 index 0000000000..e1d8d10604 --- /dev/null +++ b/coreiface/tests/routing.go @@ -0,0 +1,92 @@ +package tests + +import ( + "context" + "testing" + "time" + + "github.com/gogo/protobuf/proto" + iface "github.com/ipfs/boxo/coreiface" + ipns_pb "github.com/ipfs/boxo/ipns/pb" +) + +func (tp *TestSuite) TestRouting(t *testing.T) { + tp.hasApi(t, func(api iface.CoreAPI) error { + if api.Routing() == nil { + return errAPINotImplemented + } + return nil + }) + + t.Run("TestRoutingGet", tp.TestRoutingGet) + t.Run("TestRoutingPut", tp.TestRoutingPut) +} + +func (tp *TestSuite) testRoutingPublishKey(t *testing.T, ctx context.Context, api iface.CoreAPI) iface.IpnsEntry { + p, err := addTestObject(ctx, api) + if err != nil { + t.Fatal(err) + } + + entry, err := api.Name().Publish(ctx, p) + if err != nil { + t.Fatal(err) + } + + time.Sleep(3 * time.Second) + return entry +} + +func (tp *TestSuite) TestRoutingGet(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + apis, err := tp.MakeAPISwarm(ctx, true, 2) + if err != nil { + t.Fatal(err) + } + + // Node 1: publishes an IPNS name + ipnsEntry := tp.testRoutingPublishKey(t, ctx, apis[0]) + + // Node 2: retrieves the best value for the IPNS name. + data, err := apis[1].Routing().Get(ctx, "/ipns/"+ipnsEntry.Name()) + if err != nil { + t.Fatal(err) + } + + // Checks if values match. + var entry ipns_pb.IpnsEntry + err = proto.Unmarshal(data, &entry) + if err != nil { + t.Fatal(err) + } + + if string(entry.GetValue()) != ipnsEntry.Value().String() { + t.Fatalf("routing key has wrong value, expected %s, got %s", ipnsEntry.Value().String(), string(entry.GetValue())) + } +} + +func (tp *TestSuite) TestRoutingPut(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + apis, err := tp.MakeAPISwarm(ctx, true, 2) + if err != nil { + t.Fatal(err) + } + + // Create and publish IPNS entry. + ipnsEntry := tp.testRoutingPublishKey(t, ctx, apis[0]) + + // Get valid routing value. + data, err := apis[0].Routing().Get(ctx, "/ipns/"+ipnsEntry.Name()) + if err != nil { + t.Fatal(err) + } + + // Put routing value. + err = apis[1].Routing().Put(ctx, "/ipns/"+ipnsEntry.Name(), data) + if err != nil { + t.Fatal(err) + } +} diff --git a/coreiface/tests/unixfs.go b/coreiface/tests/unixfs.go new file mode 100644 index 0000000000..b6fa6aee4e --- /dev/null +++ b/coreiface/tests/unixfs.go @@ -0,0 +1,1080 @@ +package tests + +import ( + "bytes" + "context" + "encoding/hex" + "fmt" + "io" + "math" + "math/rand" + "os" + "strconv" + "strings" + "sync" + "testing" + + "github.com/ipfs/boxo/coreiface/path" + + coreiface "github.com/ipfs/boxo/coreiface" + "github.com/ipfs/boxo/coreiface/options" + + "github.com/ipfs/boxo/files" + "github.com/ipfs/boxo/unixfs" + "github.com/ipfs/boxo/unixfs/importer/helpers" + "github.com/ipfs/go-cid" + cbor "github.com/ipfs/go-ipld-cbor" + ipld "github.com/ipfs/go-ipld-format" + mdag "github.com/ipfs/boxo/ipld/merkledag" + mh "github.com/multiformats/go-multihash" +) + +func (tp *TestSuite) TestUnixfs(t *testing.T) { + tp.hasApi(t, func(api coreiface.CoreAPI) error { + if api.Unixfs() == nil { + return errAPINotImplemented + } + return nil + }) + + t.Run("TestAdd", tp.TestAdd) + t.Run("TestAddPinned", tp.TestAddPinned) + t.Run("TestAddHashOnly", tp.TestAddHashOnly) + t.Run("TestGetEmptyFile", tp.TestGetEmptyFile) + t.Run("TestGetDir", tp.TestGetDir) + t.Run("TestGetNonUnixfs", tp.TestGetNonUnixfs) + t.Run("TestLs", tp.TestLs) + t.Run("TestEntriesExpired", tp.TestEntriesExpired) + t.Run("TestLsEmptyDir", tp.TestLsEmptyDir) + t.Run("TestLsNonUnixfs", tp.TestLsNonUnixfs) + t.Run("TestAddCloses", tp.TestAddCloses) + t.Run("TestGetSeek", tp.TestGetSeek) + t.Run("TestGetReadAt", tp.TestGetReadAt) +} + +// `echo -n 'hello, world!' | ipfs add` +var hello = "/ipfs/QmQy2Dw4Wk7rdJKjThjYXzfFJNaRKRHhHP5gHHXroJMYxk" +var helloStr = "hello, world!" + +// `echo -n | ipfs add` +var emptyFile = "/ipfs/QmbFMke1KXqnYyBBWxB74N4c5SBnJMVAiMNRcGu6x1AwQH" + +func strFile(data string) func() files.Node { + return func() files.Node { + return files.NewBytesFile([]byte(data)) + } +} + +func twoLevelDir() func() files.Node { + return func() files.Node { + return files.NewMapDirectory(map[string]files.Node{ + "abc": files.NewMapDirectory(map[string]files.Node{ + "def": files.NewBytesFile([]byte("world")), + }), + + "bar": files.NewBytesFile([]byte("hello2")), + "foo": files.NewBytesFile([]byte("hello1")), + }) + } +} + +func flatDir() files.Node { + return files.NewMapDirectory(map[string]files.Node{ + "bar": files.NewBytesFile([]byte("hello2")), + "foo": files.NewBytesFile([]byte("hello1")), + }) +} + +func wrapped(names ...string) func(f files.Node) files.Node { + return func(f files.Node) files.Node { + for i := range names { + f = files.NewMapDirectory(map[string]files.Node{ + names[len(names)-i-1]: f, + }) + } + return f + } +} + +func (tp *TestSuite) TestAdd(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + api, err := tp.makeAPI(ctx) + if err != nil { + t.Fatal(err) + } + + p := func(h string) path.Resolved { + c, err := cid.Parse(h) + if err != nil { + t.Fatal(err) + } + return path.IpfsPath(c) + } + + rf, err := os.CreateTemp(os.TempDir(), "unixfs-add-real") + if err != nil { + t.Fatal(err) + } + rfp := rf.Name() + + if _, err := rf.Write([]byte(helloStr)); err != nil { + t.Fatal(err) + } + + stat, err := rf.Stat() + if err != nil { + t.Fatal(err) + } + + if err := rf.Close(); err != nil { + t.Fatal(err) + } + defer os.Remove(rfp) + + realFile := func() files.Node { + n, err := files.NewReaderPathFile(rfp, io.NopCloser(strings.NewReader(helloStr)), stat) + if err != nil { + t.Fatal(err) + } + return n + } + + cases := []struct { + name string + data func() files.Node + expect func(files.Node) files.Node + + apiOpts []options.ApiOption + + path string + err string + + wrap string + + events []coreiface.AddEvent + + opts []options.UnixfsAddOption + }{ + // Simple cases + { + name: "simpleAdd", + data: strFile(helloStr), + path: hello, + opts: []options.UnixfsAddOption{}, + }, + { + name: "addEmpty", + data: strFile(""), + path: emptyFile, + }, + // CIDv1 version / rawLeaves + { + name: "addCidV1", + data: strFile(helloStr), + path: "/ipfs/bafkreidi4zlleupgp2bvrpxyja5lbvi4mym7hz5bvhyoowby2qp7g2hxfa", + opts: []options.UnixfsAddOption{options.Unixfs.CidVersion(1)}, + }, + { + name: "addCidV1NoLeaves", + data: strFile(helloStr), + path: "/ipfs/bafybeibhbcn7k7o2m6xsqkrlfiokod3nxwe47viteynhruh6uqx7hvkjfu", + opts: []options.UnixfsAddOption{options.Unixfs.CidVersion(1), options.Unixfs.RawLeaves(false)}, + }, + // Non sha256 hash vs CID + { + name: "addCidSha3", + data: strFile(helloStr), + path: "/ipfs/bafkrmichjflejeh6aren53o7pig7zk3m3vxqcoc2i5dv326k3x6obh7jry", + opts: []options.UnixfsAddOption{options.Unixfs.Hash(mh.SHA3_256)}, + }, + { + name: "addCidSha3Cid0", + data: strFile(helloStr), + err: "CIDv0 only supports sha2-256", + opts: []options.UnixfsAddOption{options.Unixfs.CidVersion(0), options.Unixfs.Hash(mh.SHA3_256)}, + }, + // Inline + { + name: "addInline", + data: strFile(helloStr), + path: "/ipfs/bafyaafikcmeaeeqnnbswy3dpfqqho33snrsccgan", + opts: []options.UnixfsAddOption{options.Unixfs.Inline(true)}, + }, + { + name: "addInlineLimit", + data: strFile(helloStr), + path: "/ipfs/bafyaafikcmeaeeqnnbswy3dpfqqho33snrsccgan", + opts: []options.UnixfsAddOption{options.Unixfs.InlineLimit(32), options.Unixfs.Inline(true)}, + }, + { + name: "addInlineZero", + data: strFile(""), + path: "/ipfs/bafkqaaa", + opts: []options.UnixfsAddOption{options.Unixfs.InlineLimit(0), options.Unixfs.Inline(true), options.Unixfs.RawLeaves(true)}, + }, + { //TODO: after coreapi add is used in `ipfs add`, consider making this default for inline + name: "addInlineRaw", + data: strFile(helloStr), + path: "/ipfs/bafkqadlimvwgy3zmeb3w64tmmqqq", + opts: []options.UnixfsAddOption{options.Unixfs.InlineLimit(32), options.Unixfs.Inline(true), options.Unixfs.RawLeaves(true)}, + }, + // Chunker / Layout + { + name: "addChunks", + data: strFile(strings.Repeat("aoeuidhtns", 200)), + path: "/ipfs/QmRo11d4QJrST47aaiGVJYwPhoNA4ihRpJ5WaxBWjWDwbX", + opts: []options.UnixfsAddOption{options.Unixfs.Chunker("size-4")}, + }, + { + name: "addChunksTrickle", + data: strFile(strings.Repeat("aoeuidhtns", 200)), + path: "/ipfs/QmNNhDGttafX3M1wKWixGre6PrLFGjnoPEDXjBYpTv93HP", + opts: []options.UnixfsAddOption{options.Unixfs.Chunker("size-4"), options.Unixfs.Layout(options.TrickleLayout)}, + }, + // Local + { + name: "addLocal", // better cases in sharness + data: strFile(helloStr), + path: hello, + apiOpts: []options.ApiOption{options.Api.Offline(true)}, + }, + { + name: "hashOnly", // test (non)fetchability + data: strFile(helloStr), + path: hello, + opts: []options.UnixfsAddOption{options.Unixfs.HashOnly(true)}, + }, + // multi file + { + name: "simpleDirNoWrap", + data: flatDir, + path: "/ipfs/QmRKGpFfR32FVXdvJiHfo4WJ5TDYBsM1P9raAp1p6APWSp", + }, + { + name: "simpleDir", + data: flatDir, + wrap: "t", + expect: wrapped("t"), + path: "/ipfs/Qmc3nGXm1HtUVCmnXLQHvWcNwfdZGpfg2SRm1CxLf7Q2Rm", + }, + { + name: "twoLevelDir", + data: twoLevelDir(), + wrap: "t", + expect: wrapped("t"), + path: "/ipfs/QmPwsL3T5sWhDmmAWZHAzyjKtMVDS9a11aHNRqb3xoVnmg", + }, + // wrapped + { + name: "addWrapped", + path: "/ipfs/QmVE9rNpj5doj7XHzp5zMUxD7BJgXEqx4pe3xZ3JBReWHE", + data: func() files.Node { + return files.NewBytesFile([]byte(helloStr)) + }, + wrap: "foo", + expect: wrapped("foo"), + }, + // hidden + { + name: "hiddenFilesAdded", + data: func() files.Node { + return files.NewMapDirectory(map[string]files.Node{ + ".bar": files.NewBytesFile([]byte("hello2")), + "bar": files.NewBytesFile([]byte("hello2")), + "foo": files.NewBytesFile([]byte("hello1")), + }) + }, + wrap: "t", + expect: wrapped("t"), + path: "/ipfs/QmPXLSBX382vJDLrGakcbrZDkU3grfkjMox7EgSC9KFbtQ", + }, + // NoCopy + { + name: "simpleNoCopy", + data: realFile, + path: "/ipfs/bafkreidi4zlleupgp2bvrpxyja5lbvi4mym7hz5bvhyoowby2qp7g2hxfa", + opts: []options.UnixfsAddOption{options.Unixfs.Nocopy(true)}, + }, + { + name: "noCopyNoRaw", + data: realFile, + path: "/ipfs/bafkreidi4zlleupgp2bvrpxyja5lbvi4mym7hz5bvhyoowby2qp7g2hxfa", + opts: []options.UnixfsAddOption{options.Unixfs.Nocopy(true), options.Unixfs.RawLeaves(false)}, + err: "nocopy option requires '--raw-leaves' to be enabled as well", + }, + { + name: "noCopyNoPath", + data: strFile(helloStr), + path: "/ipfs/bafkreidi4zlleupgp2bvrpxyja5lbvi4mym7hz5bvhyoowby2qp7g2hxfa", + opts: []options.UnixfsAddOption{options.Unixfs.Nocopy(true)}, + err: helpers.ErrMissingFsRef.Error(), + }, + // Events / Progress + { + name: "simpleAddEvent", + data: strFile(helloStr), + path: "/ipfs/bafkreidi4zlleupgp2bvrpxyja5lbvi4mym7hz5bvhyoowby2qp7g2hxfa", + events: []coreiface.AddEvent{ + {Name: "bafkreidi4zlleupgp2bvrpxyja5lbvi4mym7hz5bvhyoowby2qp7g2hxfa", Path: p("bafkreidi4zlleupgp2bvrpxyja5lbvi4mym7hz5bvhyoowby2qp7g2hxfa"), Size: strconv.Itoa(len(helloStr))}, + }, + opts: []options.UnixfsAddOption{options.Unixfs.RawLeaves(true)}, + }, + { + name: "silentAddEvent", + data: twoLevelDir(), + path: "/ipfs/QmVG2ZYCkV1S4TK8URA3a4RupBF17A8yAr4FqsRDXVJASr", + events: []coreiface.AddEvent{ + {Name: "abc", Path: p("QmU7nuGs2djqK99UNsNgEPGh6GV4662p6WtsgccBNGTDxt"), Size: "62"}, + {Name: "", Path: p("QmVG2ZYCkV1S4TK8URA3a4RupBF17A8yAr4FqsRDXVJASr"), Size: "229"}, + }, + opts: []options.UnixfsAddOption{options.Unixfs.Silent(true)}, + }, + { + name: "dirAddEvents", + data: twoLevelDir(), + path: "/ipfs/QmVG2ZYCkV1S4TK8URA3a4RupBF17A8yAr4FqsRDXVJASr", + events: []coreiface.AddEvent{ + {Name: "abc/def", Path: p("QmNyJpQkU1cEkBwMDhDNFstr42q55mqG5GE5Mgwug4xyGk"), Size: "13"}, + {Name: "bar", Path: p("QmS21GuXiRMvJKHos4ZkEmQDmRBqRaF5tQS2CQCu2ne9sY"), Size: "14"}, + {Name: "foo", Path: p("QmfAjGiVpTN56TXi6SBQtstit5BEw3sijKj1Qkxn6EXKzJ"), Size: "14"}, + {Name: "abc", Path: p("QmU7nuGs2djqK99UNsNgEPGh6GV4662p6WtsgccBNGTDxt"), Size: "62"}, + {Name: "", Path: p("QmVG2ZYCkV1S4TK8URA3a4RupBF17A8yAr4FqsRDXVJASr"), Size: "229"}, + }, + }, + { + name: "progress1M", + data: func() files.Node { + return files.NewReaderFile(bytes.NewReader(bytes.Repeat([]byte{0}, 1000000))) + }, + path: "/ipfs/QmXXNNbwe4zzpdMg62ZXvnX1oU7MwSrQ3vAEtuwFKCm1oD", + events: []coreiface.AddEvent{ + {Name: "", Bytes: 262144}, + {Name: "", Bytes: 524288}, + {Name: "", Bytes: 786432}, + {Name: "", Bytes: 1000000}, + {Name: "QmXXNNbwe4zzpdMg62ZXvnX1oU7MwSrQ3vAEtuwFKCm1oD", Path: p("QmXXNNbwe4zzpdMg62ZXvnX1oU7MwSrQ3vAEtuwFKCm1oD"), Size: "1000256"}, + }, + wrap: "", + opts: []options.UnixfsAddOption{options.Unixfs.Progress(true)}, + }, + } + + for _, testCase := range cases { + t.Run(testCase.name, func(t *testing.T) { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + // recursive logic + + data := testCase.data() + if testCase.wrap != "" { + data = files.NewMapDirectory(map[string]files.Node{ + testCase.wrap: data, + }) + } + + // handle events if relevant to test case + + opts := testCase.opts + eventOut := make(chan interface{}) + var evtWg sync.WaitGroup + if len(testCase.events) > 0 { + opts = append(opts, options.Unixfs.Events(eventOut)) + evtWg.Add(1) + + go func() { + defer evtWg.Done() + expected := testCase.events + + for evt := range eventOut { + event, ok := evt.(*coreiface.AddEvent) + if !ok { + t.Error("unexpected event type") + continue + } + + if len(expected) < 1 { + t.Error("got more events than expected") + continue + } + + if expected[0].Size != event.Size { + t.Errorf("Event.Size didn't match, %s != %s", expected[0].Size, event.Size) + } + + if expected[0].Name != event.Name { + t.Errorf("Event.Name didn't match, %s != %s", expected[0].Name, event.Name) + } + + if expected[0].Path != nil && event.Path != nil { + if expected[0].Path.Cid().String() != event.Path.Cid().String() { + t.Errorf("Event.Hash didn't match, %s != %s", expected[0].Path, event.Path) + } + } else if event.Path != expected[0].Path { + t.Errorf("Event.Hash didn't match, %s != %s", expected[0].Path, event.Path) + } + if expected[0].Bytes != event.Bytes { + t.Errorf("Event.Bytes didn't match, %d != %d", expected[0].Bytes, event.Bytes) + } + + expected = expected[1:] + } + + if len(expected) > 0 { + t.Errorf("%d event(s) didn't arrive", len(expected)) + } + }() + } + + tapi, err := api.WithOptions(testCase.apiOpts...) + if err != nil { + t.Fatal(err) + } + + // Add! + + p, err := tapi.Unixfs().Add(ctx, data, opts...) + close(eventOut) + evtWg.Wait() + if testCase.err != "" { + if err == nil { + t.Fatalf("expected an error: %s", testCase.err) + } + if err.Error() != testCase.err { + t.Fatalf("expected an error: '%s' != '%s'", err.Error(), testCase.err) + } + return + } + if err != nil { + t.Fatal(err) + } + + if p.String() != testCase.path { + t.Errorf("expected path %s, got: %s", testCase.path, p) + } + + // compare file structure with Unixfs().Get + + var cmpFile func(origName string, orig files.Node, gotName string, got files.Node) + cmpFile = func(origName string, orig files.Node, gotName string, got files.Node) { + _, origDir := orig.(files.Directory) + _, gotDir := got.(files.Directory) + + if origName != gotName { + t.Errorf("file name mismatch, orig='%s', got='%s'", origName, gotName) + } + + if origDir != gotDir { + t.Fatalf("file type mismatch on %s", origName) + } + + if !gotDir { + defer orig.Close() + defer got.Close() + + do, err := io.ReadAll(orig.(files.File)) + if err != nil { + t.Fatal(err) + } + + dg, err := io.ReadAll(got.(files.File)) + if err != nil { + t.Fatal(err) + } + + if !bytes.Equal(do, dg) { + t.Fatal("data not equal") + } + + return + } + + origIt := orig.(files.Directory).Entries() + gotIt := got.(files.Directory).Entries() + + for { + if origIt.Next() { + if !gotIt.Next() { + t.Fatal("gotIt out of entries before origIt") + } + } else { + if gotIt.Next() { + t.Fatal("origIt out of entries before gotIt") + } + break + } + + cmpFile(origIt.Name(), origIt.Node(), gotIt.Name(), gotIt.Node()) + } + if origIt.Err() != nil { + t.Fatal(origIt.Err()) + } + if gotIt.Err() != nil { + t.Fatal(gotIt.Err()) + } + } + + f, err := tapi.Unixfs().Get(ctx, p) + if err != nil { + t.Fatal(err) + } + + orig := testCase.data() + if testCase.expect != nil { + orig = testCase.expect(orig) + } + + cmpFile("", orig, "", f) + }) + } +} + +func (tp *TestSuite) TestAddPinned(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + api, err := tp.makeAPI(ctx) + if err != nil { + t.Fatal(err) + } + + _, err = api.Unixfs().Add(ctx, strFile(helloStr)(), options.Unixfs.Pin(true)) + if err != nil { + t.Fatal(err) + } + + pins, err := accPins(api.Pin().Ls(ctx)) + if err != nil { + t.Fatal(err) + } + if len(pins) != 1 { + t.Fatalf("expected 1 pin, got %d", len(pins)) + } + + if pins[0].Path().String() != "/ipld/QmQy2Dw4Wk7rdJKjThjYXzfFJNaRKRHhHP5gHHXroJMYxk" { + t.Fatalf("got unexpected pin: %s", pins[0].Path().String()) + } +} + +func (tp *TestSuite) TestAddHashOnly(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + api, err := tp.makeAPI(ctx) + if err != nil { + t.Fatal(err) + } + + p, err := api.Unixfs().Add(ctx, strFile(helloStr)(), options.Unixfs.HashOnly(true)) + if err != nil { + t.Fatal(err) + } + + if p.String() != hello { + t.Errorf("unxepected path: %s", p.String()) + } + + _, err = api.Block().Get(ctx, p) + if err == nil { + t.Fatal("expected an error") + } + if !ipld.IsNotFound(err) { + t.Errorf("unxepected error: %s", err.Error()) + } +} + +func (tp *TestSuite) TestGetEmptyFile(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + api, err := tp.makeAPI(ctx) + if err != nil { + t.Fatal(err) + } + + _, err = api.Unixfs().Add(ctx, files.NewBytesFile([]byte{})) + if err != nil { + t.Fatal(err) + } + + emptyFilePath := path.New(emptyFile) + + r, err := api.Unixfs().Get(ctx, emptyFilePath) + if err != nil { + t.Fatal(err) + } + + buf := make([]byte, 1) // non-zero so that Read() actually tries to read + n, err := io.ReadFull(r.(files.File), buf) + if err != nil && err != io.EOF { + t.Error(err) + } + if !bytes.HasPrefix(buf, []byte{0x00}) { + t.Fatalf("expected empty data, got [%s] [read=%d]", buf, n) + } +} + +func (tp *TestSuite) TestGetDir(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + api, err := tp.makeAPI(ctx) + if err != nil { + t.Fatal(err) + } + edir := unixfs.EmptyDirNode() + err = api.Dag().Add(ctx, edir) + if err != nil { + t.Fatal(err) + } + p := path.IpfsPath(edir.Cid()) + + emptyDir, err := api.Object().New(ctx, options.Object.Type("unixfs-dir")) + if err != nil { + t.Fatal(err) + } + + if p.String() != path.IpfsPath(emptyDir.Cid()).String() { + t.Fatalf("expected path %s, got: %s", emptyDir.Cid(), p.String()) + } + + r, err := api.Unixfs().Get(ctx, path.IpfsPath(emptyDir.Cid())) + if err != nil { + t.Fatal(err) + } + + if _, ok := r.(files.Directory); !ok { + t.Fatalf("expected a directory") + } +} + +func (tp *TestSuite) TestGetNonUnixfs(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + api, err := tp.makeAPI(ctx) + if err != nil { + t.Fatal(err) + } + + nd := new(mdag.ProtoNode) + err = api.Dag().Add(ctx, nd) + if err != nil { + t.Fatal(err) + } + + _, err = api.Unixfs().Get(ctx, path.IpfsPath(nd.Cid())) + if !strings.Contains(err.Error(), "proto: required field") { + t.Fatalf("expected protobuf error, got: %s", err) + } +} + +func (tp *TestSuite) TestLs(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + api, err := tp.makeAPI(ctx) + if err != nil { + t.Fatal(err) + } + + r := strings.NewReader("content-of-file") + p, err := api.Unixfs().Add(ctx, files.NewMapDirectory(map[string]files.Node{ + "name-of-file": files.NewReaderFile(r), + "name-of-symlink": files.NewLinkFile("/foo/bar", nil), + })) + if err != nil { + t.Fatal(err) + } + + entries, err := api.Unixfs().Ls(ctx, p) + if err != nil { + t.Fatal(err) + } + + entry := <-entries + if entry.Err != nil { + t.Fatal(entry.Err) + } + if entry.Size != 15 { + t.Errorf("expected size = 15, got %d", entry.Size) + } + if entry.Name != "name-of-file" { + t.Errorf("expected name = name-of-file, got %s", entry.Name) + } + if entry.Type != coreiface.TFile { + t.Errorf("wrong type %s", entry.Type) + } + if entry.Cid.String() != "QmX3qQVKxDGz3URVC3861Z3CKtQKGBn6ffXRBBWGMFz9Lr" { + t.Errorf("expected cid = QmX3qQVKxDGz3URVC3861Z3CKtQKGBn6ffXRBBWGMFz9Lr, got %s", entry.Cid) + } + entry = <-entries + if entry.Err != nil { + t.Fatal(entry.Err) + } + if entry.Type != coreiface.TSymlink { + t.Errorf("wrong type %s", entry.Type) + } + if entry.Name != "name-of-symlink" { + t.Errorf("expected name = name-of-symlink, got %s", entry.Name) + } + if entry.Target != "/foo/bar" { + t.Errorf("expected symlink target to be /foo/bar, got %s", entry.Target) + } + + if l, ok := <-entries; ok { + t.Errorf("didn't expect a second link") + if l.Err != nil { + t.Error(l.Err) + } + } +} + +func (tp *TestSuite) TestEntriesExpired(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + api, err := tp.makeAPI(ctx) + if err != nil { + t.Fatal(err) + } + + r := strings.NewReader("content-of-file") + p, err := api.Unixfs().Add(ctx, files.NewMapDirectory(map[string]files.Node{ + "name-of-file": files.NewReaderFile(r), + })) + if err != nil { + t.Fatal(err) + } + + ctx, cancel = context.WithCancel(ctx) + + nd, err := api.Unixfs().Get(ctx, p) + if err != nil { + t.Fatal(err) + } + cancel() + + it := files.ToDir(nd).Entries() + if it == nil { + t.Fatal("it was nil") + } + + if it.Next() { + t.Fatal("Next succeeded") + } + + if it.Err() != context.Canceled { + t.Fatalf("unexpected error %s", it.Err()) + } + + if it.Next() { + t.Fatal("Next succeeded") + } +} + +func (tp *TestSuite) TestLsEmptyDir(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + api, err := tp.makeAPI(ctx) + if err != nil { + t.Fatal(err) + } + + _, err = api.Unixfs().Add(ctx, files.NewSliceDirectory([]files.DirEntry{})) + if err != nil { + t.Fatal(err) + } + + emptyDir, err := api.Object().New(ctx, options.Object.Type("unixfs-dir")) + if err != nil { + t.Fatal(err) + } + + links, err := api.Unixfs().Ls(ctx, path.IpfsPath(emptyDir.Cid())) + if err != nil { + t.Fatal(err) + } + + if len(links) != 0 { + t.Fatalf("expected 0 links, got %d", len(links)) + } +} + +// TODO(lgierth) this should test properly, with len(links) > 0 +func (tp *TestSuite) TestLsNonUnixfs(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + api, err := tp.makeAPI(ctx) + if err != nil { + t.Fatal(err) + } + + nd, err := cbor.WrapObject(map[string]interface{}{"foo": "bar"}, math.MaxUint64, -1) + if err != nil { + t.Fatal(err) + } + + err = api.Dag().Add(ctx, nd) + if err != nil { + t.Fatal(err) + } + + links, err := api.Unixfs().Ls(ctx, path.IpfsPath(nd.Cid())) + if err != nil { + t.Fatal(err) + } + + if len(links) != 0 { + t.Fatalf("expected 0 links, got %d", len(links)) + } +} + +type closeTestF struct { + files.File + closed bool + + t *testing.T +} + +type closeTestD struct { + files.Directory + closed bool + + t *testing.T +} + +func (f *closeTestD) Close() error { + f.t.Helper() + if f.closed { + f.t.Fatal("already closed") + } + f.closed = true + return nil +} + +func (f *closeTestF) Close() error { + if f.closed { + f.t.Fatal("already closed") + } + f.closed = true + return nil +} + +func (tp *TestSuite) TestAddCloses(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + api, err := tp.makeAPI(ctx) + if err != nil { + t.Fatal(err) + } + + n4 := &closeTestF{files.NewBytesFile([]byte("foo")), false, t} + d3 := &closeTestD{files.NewMapDirectory(map[string]files.Node{ + "sub": n4, + }), false, t} + n2 := &closeTestF{files.NewBytesFile([]byte("bar")), false, t} + n1 := &closeTestF{files.NewBytesFile([]byte("baz")), false, t} + d0 := &closeTestD{files.NewMapDirectory(map[string]files.Node{ + "a": d3, + "b": n1, + "c": n2, + }), false, t} + + _, err = api.Unixfs().Add(ctx, d0) + if err != nil { + t.Fatal(err) + } + + for i, n := range []*closeTestF{n1, n2, n4} { + if !n.closed { + t.Errorf("file %d not closed!", i) + } + } + + for i, n := range []*closeTestD{d0, d3} { + if !n.closed { + t.Errorf("dir %d not closed!", i) + } + } +} + +func (tp *TestSuite) TestGetSeek(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + api, err := tp.makeAPI(ctx) + if err != nil { + t.Fatal(err) + } + + dataSize := int64(100000) + tf := files.NewReaderFile(io.LimitReader(rand.New(rand.NewSource(1403768328)), dataSize)) + + p, err := api.Unixfs().Add(ctx, tf, options.Unixfs.Chunker("size-100")) + if err != nil { + t.Fatal(err) + } + + r, err := api.Unixfs().Get(ctx, p) + if err != nil { + t.Fatal(err) + } + + f := files.ToFile(r) + if f == nil { + t.Fatal("not a file") + } + + orig := make([]byte, dataSize) + if _, err := io.ReadFull(f, orig); err != nil { + t.Fatal(err) + } + f.Close() + + origR := bytes.NewReader(orig) + + r, err = api.Unixfs().Get(ctx, p) + if err != nil { + t.Fatal(err) + } + + f = files.ToFile(r) + if f == nil { + t.Fatal("not a file") + } + + test := func(offset int64, whence int, read int, expect int64, shouldEof bool) { + t.Run(fmt.Sprintf("seek%d+%d-r%d-%d", whence, offset, read, expect), func(t *testing.T) { + n, err := f.Seek(offset, whence) + if err != nil { + t.Fatal(err) + } + origN, err := origR.Seek(offset, whence) + if err != nil { + t.Fatal(err) + } + + if n != origN { + t.Fatalf("offsets didn't match, expected %d, got %d", origN, n) + } + + buf := make([]byte, read) + origBuf := make([]byte, read) + origRead, err := origR.Read(origBuf) + if err != nil { + t.Fatalf("orig: %s", err) + } + r, err := io.ReadFull(f, buf) + switch { + case shouldEof && err != nil && err != io.ErrUnexpectedEOF: + fallthrough + case !shouldEof && err != nil: + t.Fatalf("f: %s", err) + case shouldEof: + _, err := f.Read([]byte{0}) + if err != io.EOF { + t.Fatal("expected EOF") + } + _, err = origR.Read([]byte{0}) + if err != io.EOF { + t.Fatal("expected EOF (orig)") + } + } + + if int64(r) != expect { + t.Fatal("read wrong amount of data") + } + if r != origRead { + t.Fatal("read different amount of data than bytes.Reader") + } + if !bytes.Equal(buf, origBuf) { + fmt.Fprintf(os.Stderr, "original:\n%s\n", hex.Dump(origBuf)) + fmt.Fprintf(os.Stderr, "got:\n%s\n", hex.Dump(buf)) + t.Fatal("data didn't match") + } + }) + } + + test(3, io.SeekCurrent, 10, 10, false) + test(3, io.SeekCurrent, 10, 10, false) + test(500, io.SeekCurrent, 10, 10, false) + test(350, io.SeekStart, 100, 100, false) + test(-123, io.SeekCurrent, 100, 100, false) + test(0, io.SeekStart, int(dataSize), dataSize, false) + test(dataSize-50, io.SeekStart, 100, 50, true) + test(-5, io.SeekEnd, 100, 5, true) +} + +func (tp *TestSuite) TestGetReadAt(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + api, err := tp.makeAPI(ctx) + if err != nil { + t.Fatal(err) + } + + dataSize := int64(100000) + tf := files.NewReaderFile(io.LimitReader(rand.New(rand.NewSource(1403768328)), dataSize)) + + p, err := api.Unixfs().Add(ctx, tf, options.Unixfs.Chunker("size-100")) + if err != nil { + t.Fatal(err) + } + + r, err := api.Unixfs().Get(ctx, p) + if err != nil { + t.Fatal(err) + } + + f, ok := r.(interface { + files.File + io.ReaderAt + }) + if !ok { + t.Skip("ReaderAt not implemented") + } + + orig := make([]byte, dataSize) + if _, err := io.ReadFull(f, orig); err != nil { + t.Fatal(err) + } + f.Close() + + origR := bytes.NewReader(orig) + + if _, err := api.Unixfs().Get(ctx, p); err != nil { + t.Fatal(err) + } + + test := func(offset int64, read int, expect int64, shouldEof bool) { + t.Run(fmt.Sprintf("readat%d-r%d-%d", offset, read, expect), func(t *testing.T) { + origBuf := make([]byte, read) + origRead, err := origR.ReadAt(origBuf, offset) + if err != nil && err != io.EOF { + t.Fatalf("orig: %s", err) + } + buf := make([]byte, read) + r, err := f.ReadAt(buf, offset) + if shouldEof { + if err != io.EOF { + t.Fatal("expected EOF, got: ", err) + } + } else if err != nil { + t.Fatal("got: ", err) + } + + if int64(r) != expect { + t.Fatal("read wrong amount of data") + } + if r != origRead { + t.Fatal("read different amount of data than bytes.Reader") + } + if !bytes.Equal(buf, origBuf) { + fmt.Fprintf(os.Stderr, "original:\n%s\n", hex.Dump(origBuf)) + fmt.Fprintf(os.Stderr, "got:\n%s\n", hex.Dump(buf)) + t.Fatal("data didn't match") + } + }) + } + + test(3, 10, 10, false) + test(13, 10, 10, false) + test(513, 10, 10, false) + test(350, 100, 100, false) + test(0, int(dataSize), dataSize, false) + test(dataSize-50, 100, 50, true) +} diff --git a/coreiface/unixfs.go b/coreiface/unixfs.go new file mode 100644 index 0000000000..606bc8e781 --- /dev/null +++ b/coreiface/unixfs.go @@ -0,0 +1,80 @@ +package iface + +import ( + "context" + + "github.com/ipfs/boxo/coreiface/options" + path "github.com/ipfs/boxo/coreiface/path" + + "github.com/ipfs/boxo/files" + "github.com/ipfs/go-cid" +) + +type AddEvent struct { + Name string + Path path.Resolved `json:",omitempty"` + Bytes int64 `json:",omitempty"` + Size string `json:",omitempty"` +} + +// FileType is an enum of possible UnixFS file types. +type FileType int32 + +const ( + // TUnknown means the file type isn't known (e.g., it hasn't been + // resolved). + TUnknown FileType = iota + // TFile is a regular file. + TFile + // TDirectory is a directory. + TDirectory + // TSymlink is a symlink. + TSymlink +) + +func (t FileType) String() string { + switch t { + case TUnknown: + return "unknown" + case TFile: + return "file" + case TDirectory: + return "directory" + case TSymlink: + return "symlink" + default: + return "" + } +} + +// DirEntry is a directory entry returned by `Ls`. +type DirEntry struct { + Name string + Cid cid.Cid + + // Only filled when asked to resolve the directory entry. + Size uint64 // The size of the file in bytes (or the size of the symlink). + Type FileType // The type of the file. + Target string // The symlink target (if a symlink). + + Err error +} + +// UnixfsAPI is the basic interface to immutable files in IPFS +// NOTE: This API is heavily WIP, things are guaranteed to break frequently +type UnixfsAPI interface { + // Add imports the data from the reader into merkledag file + // + // TODO: a long useful comment on how to use this for many different scenarios + Add(context.Context, files.Node, ...options.UnixfsAddOption) (path.Resolved, error) + + // Get returns a read-only handle to a file tree referenced by a path + // + // Note that some implementations of this API may apply the specified context + // to operations performed on the returned file + Get(context.Context, path.Path) (files.Node, error) + + // Ls returns the list of links in a directory. Links aren't guaranteed to be + // returned in order + Ls(context.Context, path.Path, ...options.UnixfsLsOption) (<-chan DirEntry, error) +} diff --git a/coreiface/util.go b/coreiface/util.go new file mode 100644 index 0000000000..6d58bf40d2 --- /dev/null +++ b/coreiface/util.go @@ -0,0 +1,20 @@ +package iface + +import ( + "context" + "io" +) + +type Reader interface { + ReadSeekCloser + Size() uint64 + CtxReadFull(context.Context, []byte) (int, error) +} + +// A ReadSeekCloser implements interfaces to read, copy, seek and close. +type ReadSeekCloser interface { + io.Reader + io.Seeker + io.Closer + io.WriterTo +} diff --git a/datastore/dshelp/.github/ISSUE_TEMPLATE/open_an_issue.md b/datastore/dshelp/.github/ISSUE_TEMPLATE/open_an_issue.md new file mode 100644 index 0000000000..4fcbd00aca --- /dev/null +++ b/datastore/dshelp/.github/ISSUE_TEMPLATE/open_an_issue.md @@ -0,0 +1,19 @@ +--- +name: Open an issue +about: Only for actionable issues relevant to this repository. +title: '' +labels: need/triage +assignees: '' + +--- + diff --git a/datastore/dshelp/key.go b/datastore/dshelp/key.go new file mode 100644 index 0000000000..32b73a61e6 --- /dev/null +++ b/datastore/dshelp/key.go @@ -0,0 +1,51 @@ +// Package dshelp provides utilities for parsing and creating +// datastore keys used by go-ipfs +package dshelp + +import ( + "github.com/ipfs/go-cid" + "github.com/ipfs/go-datastore" + "github.com/multiformats/go-base32" + mh "github.com/multiformats/go-multihash" +) + +// NewKeyFromBinary creates a new key from a byte slice. +func NewKeyFromBinary(rawKey []byte) datastore.Key { + buf := make([]byte, 1+base32.RawStdEncoding.EncodedLen(len(rawKey))) + buf[0] = '/' + base32.RawStdEncoding.Encode(buf[1:], rawKey) + return datastore.RawKey(string(buf)) +} + +// BinaryFromDsKey returns the byte slice corresponding to the given Key. +func BinaryFromDsKey(k datastore.Key) ([]byte, error) { + return base32.RawStdEncoding.DecodeString(k.String()[1:]) +} + +// MultihashToDsKey creates a Key from the given Multihash. +// If working with Cids, you can call cid.Hash() to obtain +// the multihash. Note that different CIDs might represent +// the same multihash. +func MultihashToDsKey(k mh.Multihash) datastore.Key { + return NewKeyFromBinary(k) +} + +// DsKeyToMultihash converts a dsKey to the corresponding Multihash. +func DsKeyToMultihash(dsKey datastore.Key) (mh.Multihash, error) { + kb, err := BinaryFromDsKey(dsKey) + if err != nil { + return nil, err + } + return mh.Cast(kb) +} + +// DsKeyToCidV1Raw converts the given Key (which should be a raw multihash +// key) to a Cid V1 of the given type (see +// https://godoc.org/github.com/ipfs/go-cid#pkg-constants). +func DsKeyToCidV1(dsKey datastore.Key, codecType uint64) (cid.Cid, error) { + hash, err := DsKeyToMultihash(dsKey) + if err != nil { + return cid.Cid{}, err + } + return cid.NewCidV1(codecType, hash), nil +} diff --git a/datastore/dshelp/key_test.go b/datastore/dshelp/key_test.go new file mode 100644 index 0000000000..ff9fcc7d6f --- /dev/null +++ b/datastore/dshelp/key_test.go @@ -0,0 +1,24 @@ +package dshelp + +import ( + "testing" + + cid "github.com/ipfs/go-cid" +) + +func TestKey(t *testing.T) { + c, _ := cid.Decode("QmP63DkAFEnDYNjDYBpyNDfttu1fvUw99x1brscPzpqmmq") + dsKey := MultihashToDsKey(c.Hash()) + mh, err := DsKeyToMultihash(dsKey) + if err != nil { + t.Fatal(err) + } + if string(c.Hash()) != string(mh) { + t.Fatal("should have parsed the same multihash") + } + + c2, err := DsKeyToCidV1(dsKey, cid.Raw) + if err != nil || c.Equals(c2) || c2.Type() != cid.Raw || c2.Version() != 1 { + t.Fatal("should have been converted to CIDv1-raw") + } +} diff --git a/docs/CODEOWNERS b/docs/CODEOWNERS new file mode 100644 index 0000000000..7781c410c9 --- /dev/null +++ b/docs/CODEOWNERS @@ -0,0 +1,9 @@ +# Code owners are automatically requested for review when someone opens a pull +# request that modifies code that they own. Code owners are not automatically +# requested to review draft pull requests. + +# Deafult +* @ipfs/kubo-maintainers + +# HTTP Gateway +gateway/ @lidel @hacdias diff --git a/examples/README.md b/examples/README.md new file mode 100644 index 0000000000..438766e4c8 --- /dev/null +++ b/examples/README.md @@ -0,0 +1,11 @@ +# boxo examples and tutorials + +In this folder, you can find some examples to help you get started using boxo and its associated libraries in your applications. + +Let us know if you find any issue or if you want to contribute and add a new tutorial, feel welcome to submit a pr, thank you! + +## Examples and Tutorials + +- [Fetching a UnixFS file by CID](./unixfs-file-cid) +- [Gateway backed by a CAR file](./gateway/car) +- [Gateway backed by a remote blockstore and IPNS resolver](./gateway/proxy) diff --git a/examples/gateway/car/README.md b/examples/gateway/car/README.md new file mode 100644 index 0000000000..2fea3fa66f --- /dev/null +++ b/examples/gateway/car/README.md @@ -0,0 +1,41 @@ +# HTTP Gateway backed by a CAR File + +This is an example that shows how to build a Gateway backed by the contents of +a CAR file. A [CAR file](https://ipld.io/specs/transport/car/) is a Content +Addressable aRchive that contains blocks. + +## Build + +```bash +> go build -o car-gateway +``` + +## Usage + +First of all, you will need some content stored as a CAR file. You can easily +export your favorite website, or content, using: + +``` +ipfs dag export > data.car +``` + +Then, you can start the gateway with: + + +``` +./car-gateway -c data.car -p 8040 +``` + +### Subdomain gateway + +Now you can access the gateway in [localhost:8040](http://localhost:8040). It will +behave like a regular [Subdomain IPFS Gateway](https://docs.ipfs.tech/how-to/address-ipfs-on-web/#subdomain-gateway), +except for the fact that all contents are provided +from the CAR file. Therefore, things such as IPNS resolution and fetching contents +from nodes in the IPFS network won't work. + +### Path gateway + +If you don't need Origin isolation and only care about hosting flat files, +a plain [path gateway](https://docs.ipfs.tech/how-to/address-ipfs-on-web/#path-gateway) at [127.0.0.1:8040](http://127.0.0.1:8040) +may suffice. diff --git a/examples/gateway/car/main.go b/examples/gateway/car/main.go new file mode 100644 index 0000000000..e4b36b6d81 --- /dev/null +++ b/examples/gateway/car/main.go @@ -0,0 +1,102 @@ +package main + +import ( + "flag" + "io" + "log" + "net/http" + "os" + "strconv" + + "github.com/ipfs/boxo/examples/gateway/common" + "github.com/ipfs/boxo/gateway" + "github.com/ipfs/go-blockservice" + "github.com/ipfs/go-cid" + offline "github.com/ipfs/go-ipfs-exchange-offline" + carblockstore "github.com/ipld/go-car/v2/blockstore" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promhttp" +) + +func main() { + carFilePtr := flag.String("c", "", "path to CAR file to back this gateway from") + port := flag.Int("p", 8040, "port to run this gateway from") + flag.Parse() + + blockService, roots, f, err := newBlockServiceFromCAR(*carFilePtr) + if err != nil { + log.Fatal(err) + } + defer f.Close() + + gwAPI, err := common.NewBlocksGateway(blockService, nil) + if err != nil { + log.Fatal(err) + } + handler := common.NewBlocksHandler(gwAPI, *port) + + // Initialize the public gateways that we will want to have available through + // Host header rewritting. This step is optional and only required if you're + // running multiple public gateways and want different settings and support + // for DNSLink and Subdomain Gateways. + noDNSLink := false // If you set DNSLink to point at the CID from CAR, you can load it! + publicGateways := map[string]*gateway.Specification{ + // Support public requests with Host: CID.ipfs.example.net and ID.ipns.example.net + "example.net": { + Paths: []string{"/ipfs", "/ipns"}, + NoDNSLink: noDNSLink, + UseSubdomains: true, + }, + // Support local requests + "localhost": { + Paths: []string{"/ipfs", "/ipns"}, + NoDNSLink: noDNSLink, + UseSubdomains: true, + }, + } + + // Creates a mux to serve the prometheus metrics alongside the gateway. This + // step is optional and only required if you need or want to access the metrics. + // You may also decide to expose the metrics on a different path, or port. + mux := http.NewServeMux() + mux.Handle("/debug/metrics/prometheus", promhttp.HandlerFor(prometheus.DefaultGatherer, promhttp.HandlerOpts{})) + mux.Handle("/", handler) + + // Then wrap the mux with the hostname handler. Please note that the metrics + // will not be available under the previously defined publicGateways. + // You will be able to access the metrics via 127.0.0.1 but not localhost + // or example.net. If you want to expose the metrics on such gateways, + // you will have to add the path "/debug" to the variable Paths. + handler = gateway.WithHostname(mux, gwAPI, publicGateways, noDNSLink) + + log.Printf("Listening on http://localhost:%d", *port) + log.Printf("Metrics available at http://127.0.0.1:%d/debug/metrics/prometheus", *port) + for _, cid := range roots { + log.Printf("Hosting CAR root at http://localhost:%d/ipfs/%s", *port, cid.String()) + } + + if err := http.ListenAndServe(":"+strconv.Itoa(*port), handler); err != nil { + log.Fatal(err) + } +} + +func newBlockServiceFromCAR(filepath string) (blockservice.BlockService, []cid.Cid, io.Closer, error) { + r, err := os.Open(filepath) + if err != nil { + return nil, nil, nil, err + } + + bs, err := carblockstore.NewReadOnly(r, nil) + if err != nil { + _ = r.Close() + return nil, nil, nil, err + } + + roots, err := bs.Roots() + if err != nil { + return nil, nil, nil, err + } + + blockService := blockservice.New(bs, offline.Exchange(bs)) + return blockService, roots, r, nil +} diff --git a/examples/gateway/car/main_test.go b/examples/gateway/car/main_test.go new file mode 100644 index 0000000000..fe71741982 --- /dev/null +++ b/examples/gateway/car/main_test.go @@ -0,0 +1,108 @@ +package main + +import ( + "io" + "net/http" + "net/http/httptest" + "testing" + + "github.com/ipfs/boxo/examples/gateway/common" + "github.com/ipld/go-ipld-prime/codec/dagjson" + "github.com/ipld/go-ipld-prime/node/basicnode" + "github.com/stretchr/testify/assert" +) + +const ( + BaseCID = "bafybeidhua2wpy27vo3t7ms22ybc7m7iqkm2opiebpjmo24lvixcnvznnu" +) + +func newTestServer() (*httptest.Server, io.Closer, error) { + blockService, _, f, err := newBlockServiceFromCAR("./test.car") + if err != nil { + return nil, nil, err + } + + gateway, err := common.NewBlocksGateway(blockService, nil) + if err != nil { + _ = f.Close() + return nil, nil, err + } + + handler := common.NewBlocksHandler(gateway, 0) + ts := httptest.NewServer(handler) + return ts, f, nil +} + +func TestDirectoryTraverse(t *testing.T) { + ts, f, err := newTestServer() + assert.Nil(t, err) + defer f.Close() + + res, err := http.Get(ts.URL + "/ipfs/" + BaseCID + "/hello.txt") + assert.Nil(t, err) + + body, err := io.ReadAll(res.Body) + res.Body.Close() + assert.Nil(t, err) + assert.EqualValues(t, string(body), "hello world\n") +} + +func TestFile(t *testing.T) { + ts, f, err := newTestServer() + assert.Nil(t, err) + defer f.Close() + + res, err := http.Get(ts.URL + "/ipfs/bafkreifjjcie6lypi6ny7amxnfftagclbuxndqonfipmb64f2km2devei4") + assert.Nil(t, err) + + body, err := io.ReadAll(res.Body) + res.Body.Close() + assert.Nil(t, err) + assert.EqualValues(t, string(body), "hello world\n") +} + +func TestDirectoryAsDAG(t *testing.T) { + ts, f, err := newTestServer() + assert.Nil(t, err) + defer f.Close() + + res, err := http.Get(ts.URL + "/ipfs/" + BaseCID + "?format=dag-json") + assert.Nil(t, err) + defer res.Body.Close() + + contentType := res.Header.Get("Content-Type") + assert.EqualValues(t, contentType, "application/vnd.ipld.dag-json") + + // Parses the DAG-JSON response. + dag := basicnode.Prototype.Any.NewBuilder() + err = dagjson.Decode(dag, res.Body) + assert.Nil(t, err) + + // Checks for the links inside the logical model. + links, err := dag.Build().LookupByString("Links") + assert.Nil(t, err) + + // Checks if there are 2 links. + assert.EqualValues(t, links.Length(), 2) + + // Check if the first item is correct. + n, err := links.LookupByIndex(0) + assert.Nil(t, err) + assert.NotNil(t, n) + + nameNode, err := n.LookupByString("Name") + assert.Nil(t, err) + assert.NotNil(t, nameNode) + + name, err := nameNode.AsString() + assert.Nil(t, err) + assert.EqualValues(t, name, "eye.png") + + hashNode, err := n.LookupByString("Hash") + assert.Nil(t, err) + assert.NotNil(t, hashNode) + + hash, err := hashNode.AsLink() + assert.Nil(t, err) + assert.EqualValues(t, hash.String(), "bafybeigmlfksb374fdkxih4urny2yiyazyra2375y2e4a72b3jcrnthnau") +} diff --git a/examples/gateway/car/test.car b/examples/gateway/car/test.car new file mode 100644 index 0000000000..714a73547a Binary files /dev/null and b/examples/gateway/car/test.car differ diff --git a/examples/gateway/common/blocks.go b/examples/gateway/common/blocks.go new file mode 100644 index 0000000000..134c82e2d0 --- /dev/null +++ b/examples/gateway/common/blocks.go @@ -0,0 +1,251 @@ +package common + +import ( + "context" + "errors" + "fmt" + "net/http" + gopath "path" + + "github.com/ipfs/boxo/blocks" + "github.com/ipfs/boxo/files" + "github.com/ipfs/boxo/gateway" + "github.com/ipfs/go-blockservice" + "github.com/ipfs/go-cid" + bsfetcher "github.com/ipfs/go-fetcher/impl/blockservice" + blockstore "github.com/ipfs/go-ipfs-blockstore" + format "github.com/ipfs/go-ipld-format" + "github.com/ipfs/go-merkledag" + "github.com/ipfs/go-namesys" + "github.com/ipfs/go-namesys/resolve" + ipfspath "github.com/ipfs/go-path" + "github.com/ipfs/go-path/resolver" + "github.com/ipfs/go-unixfs" + ufile "github.com/ipfs/go-unixfs/file" + uio "github.com/ipfs/go-unixfs/io" + "github.com/ipfs/go-unixfsnode" + iface "github.com/ipfs/interface-go-ipfs-core" + nsopts "github.com/ipfs/interface-go-ipfs-core/options/namesys" + ifacepath "github.com/ipfs/interface-go-ipfs-core/path" + dagpb "github.com/ipld/go-codec-dagpb" + "github.com/ipld/go-ipld-prime" + "github.com/ipld/go-ipld-prime/node/basicnode" + "github.com/ipld/go-ipld-prime/schema" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/libp2p/go-libp2p/core/routing" + mc "github.com/multiformats/go-multicodec" +) + +func NewBlocksHandler(gw *BlocksGateway, port int) http.Handler { + headers := map[string][]string{} + gateway.AddAccessControlHeaders(headers) + + conf := gateway.Config{ + Headers: headers, + } + + mux := http.NewServeMux() + gwHandler := gateway.NewHandler(conf, gw) + mux.Handle("/ipfs/", gwHandler) + mux.Handle("/ipns/", gwHandler) + return mux +} + +type BlocksGateway struct { + blockStore blockstore.Blockstore + blockService blockservice.BlockService + dagService format.DAGService + resolver resolver.Resolver + + // Optional routing system to handle /ipns addresses. + namesys namesys.NameSystem + routing routing.ValueStore +} + +func NewBlocksGateway(blockService blockservice.BlockService, routing routing.ValueStore) (*BlocksGateway, error) { + // Setup the DAG services, which use the CAR block store. + dagService := merkledag.NewDAGService(blockService) + + // Setup the UnixFS resolver. + fetcherConfig := bsfetcher.NewFetcherConfig(blockService) + fetcherConfig.PrototypeChooser = dagpb.AddSupportToChooser(func(lnk ipld.Link, lnkCtx ipld.LinkContext) (ipld.NodePrototype, error) { + if tlnkNd, ok := lnkCtx.LinkNode.(schema.TypedLinkNode); ok { + return tlnkNd.LinkTargetNodePrototype(), nil + } + return basicnode.Prototype.Any, nil + }) + fetcher := fetcherConfig.WithReifier(unixfsnode.Reify) + resolver := resolver.NewBasicResolver(fetcher) + + // Setup a name system so that we are able to resolve /ipns links. + var ( + ns namesys.NameSystem + err error + ) + if routing != nil { + ns, err = namesys.NewNameSystem(routing) + if err != nil { + return nil, err + } + } + + return &BlocksGateway{ + blockStore: blockService.Blockstore(), + blockService: blockService, + dagService: dagService, + resolver: resolver, + routing: routing, + namesys: ns, + }, nil +} + +func (api *BlocksGateway) GetUnixFsNode(ctx context.Context, p ifacepath.Resolved) (files.Node, error) { + nd, err := api.resolveNode(ctx, p) + if err != nil { + return nil, err + } + + return ufile.NewUnixfsFile(ctx, api.dagService, nd) +} + +func (api *BlocksGateway) LsUnixFsDir(ctx context.Context, p ifacepath.Resolved) (<-chan iface.DirEntry, error) { + node, err := api.resolveNode(ctx, p) + if err != nil { + return nil, err + } + + dir, err := uio.NewDirectoryFromNode(api.dagService, node) + if err != nil { + return nil, err + } + + out := make(chan iface.DirEntry, uio.DefaultShardWidth) + + go func() { + defer close(out) + for l := range dir.EnumLinksAsync(ctx) { + select { + case out <- api.processLink(ctx, l): + case <-ctx.Done(): + return + } + } + }() + + return out, nil +} + +func (api *BlocksGateway) GetBlock(ctx context.Context, c cid.Cid) (blocks.Block, error) { + return api.blockService.GetBlock(ctx, c) +} + +func (api *BlocksGateway) GetIPNSRecord(ctx context.Context, c cid.Cid) ([]byte, error) { + if api.routing == nil { + return nil, routing.ErrNotSupported + } + + // Fails fast if the CID is not an encoded Libp2p Key, avoids wasteful + // round trips to the remote routing provider. + if mc.Code(c.Type()) != mc.Libp2pKey { + return nil, errors.New("provided cid is not an encoded libp2p key") + } + + // The value store expects the key itself to be encoded as a multihash. + id, err := peer.FromCid(c) + if err != nil { + return nil, err + } + + return api.routing.GetValue(ctx, "/ipns/"+string(id)) +} + +func (api *BlocksGateway) GetDNSLinkRecord(ctx context.Context, hostname string) (ifacepath.Path, error) { + if api.namesys != nil { + p, err := api.namesys.Resolve(ctx, "/ipns/"+hostname, nsopts.Depth(1)) + if err == namesys.ErrResolveRecursion { + err = nil + } + return ifacepath.New(p.String()), err + } + + return nil, errors.New("not implemented") +} + +func (api *BlocksGateway) IsCached(ctx context.Context, p ifacepath.Path) bool { + rp, err := api.ResolvePath(ctx, p) + if err != nil { + return false + } + + has, _ := api.blockStore.Has(ctx, rp.Cid()) + return has +} + +func (api *BlocksGateway) ResolvePath(ctx context.Context, p ifacepath.Path) (ifacepath.Resolved, error) { + if _, ok := p.(ifacepath.Resolved); ok { + return p.(ifacepath.Resolved), nil + } + + err := p.IsValid() + if err != nil { + return nil, err + } + + ipath := ipfspath.Path(p.String()) + if ipath.Segments()[0] == "ipns" { + ipath, err = resolve.ResolveIPNS(ctx, api.namesys, ipath) + if err != nil { + return nil, err + } + } + + if ipath.Segments()[0] != "ipfs" { + return nil, fmt.Errorf("unsupported path namespace: %s", p.Namespace()) + } + + node, rest, err := api.resolver.ResolveToLastNode(ctx, ipath) + if err != nil { + return nil, err + } + + root, err := cid.Parse(ipath.Segments()[1]) + if err != nil { + return nil, err + } + + return ifacepath.NewResolvedPath(ipath, node, root, gopath.Join(rest...)), nil +} + +func (api *BlocksGateway) resolveNode(ctx context.Context, p ifacepath.Path) (format.Node, error) { + rp, err := api.ResolvePath(ctx, p) + if err != nil { + return nil, err + } + + node, err := api.dagService.Get(ctx, rp.Cid()) + if err != nil { + return nil, fmt.Errorf("get node: %w", err) + } + return node, nil +} + +func (api *BlocksGateway) processLink(ctx context.Context, result unixfs.LinkResult) iface.DirEntry { + if result.Err != nil { + return iface.DirEntry{Err: result.Err} + } + + link := iface.DirEntry{ + Name: result.Link.Name, + Cid: result.Link.Cid, + } + + switch link.Cid.Type() { + case cid.Raw: + link.Type = iface.TFile + link.Size = result.Link.Size + case cid.DagProtobuf: + link.Size = result.Link.Size + } + + return link +} diff --git a/examples/gateway/proxy/README.md b/examples/gateway/proxy/README.md new file mode 100644 index 0000000000..4164aad1e3 --- /dev/null +++ b/examples/gateway/proxy/README.md @@ -0,0 +1,58 @@ +# Gateway as a Verifying Proxy for Untrusted Remote Blockstore + +This is an example of building a Gateway that uses `application/vnd.ipld.raw` +responses from another gateway acting as a remote blockstore and IPNS resolver. + +Key benefits: +1. Verifies raw blocks and IPNS records fetched from untrusted third-party gateways. +2. The proxy provides web gateway functionalities: returns deserialized files and websites, including index.html support, while the remote gateway only needs to support block responses. + +In this example, we implement two major structures: + +- [Block Store](./blockstore.go), which forwards the block requests to the backend +gateway using `?format=raw`, and +- [Routing System](./routing.go), which forwards the IPNS requests to the backend +gateway using `?format=ipns-record`. In addition, DNSLink lookups are done locally. + - Note: `ipns-record` was introduced just recently in [IPIP-351](https://github.com/ipfs/specs/pull/351) and reference support for it will ship in Kubo 0.19. Until that happens, it may not be supported by public gateways yet. + +## Build + +```bash +> go build -o verifying-proxy +``` + +## Usage + +First, you need a compliant gateway that supports both [RAW Block](https://www.iana.org/assignments/media-types/application/vnd.ipld.raw) and IPNS Record response +types. Once you have it, run the proxy gateway with its address as the host parameter: + + +``` +./verifying-proxy -g https://ipfs.io -p 8040 +``` + +### Subdomain gateway + +Now you can access the gateway in [localhost:8040](http://localhost:8040). It will +behave like a regular [Subdomain IPFS Gateway](https://docs.ipfs.tech/how-to/address-ipfs-on-web/#subdomain-gateway), +except for the fact that it runs no libp2p, and has no local blockstore. +All contents are provided by a remote gateway and fetched as RAW Blocks and Records, and verified locally. + +### Path gateway + +If you don't need Origin isolation and only care about hosting flat files, +a plain [path gateway](https://docs.ipfs.tech/how-to/address-ipfs-on-web/#path-gateway) at [127.0.0.1:8040](http://127.0.0.1:8040) +may suffice. + +### DNSLink gateway + +Gateway supports hosting of [DNSLink](https://dnslink.dev/) websites. All you need is to pass `Host` header with FQDN that has DNSLink set up: + +```console +$ curl -sH 'Host: en.wikipedia-on-ipfs.org' 'http://127.0.0.1:8080/wiki/' | head -3 + + + Wikipedia, the free encyclopedia +``` + +Put it behind a reverse proxy terminating TLS (like Nginx) and voila! diff --git a/examples/gateway/proxy/blockstore.go b/examples/gateway/proxy/blockstore.go new file mode 100644 index 0000000000..4743566f33 --- /dev/null +++ b/examples/gateway/proxy/blockstore.go @@ -0,0 +1,119 @@ +package main + +import ( + "context" + "errors" + "fmt" + "io" + "net/http" + "net/url" + + "github.com/ipfs/boxo/blocks" + "github.com/ipfs/go-cid" + blockstore "github.com/ipfs/go-ipfs-blockstore" +) + +var ( + errNotImplemented = errors.New("not implemented") +) + +type proxyStore struct { + httpClient *http.Client + gatewayURL string + validate bool +} + +func newProxyStore(gatewayURL string, client *http.Client) blockstore.Blockstore { + if client == nil { + client = http.DefaultClient + } + + return &proxyStore{ + gatewayURL: gatewayURL, + httpClient: client, + // Enables block validation by default. Important since we are + // proxying block requests to an untrusted gateway. + validate: true, + } +} + +func (ps *proxyStore) fetch(ctx context.Context, c cid.Cid) (blocks.Block, error) { + u, err := url.Parse(fmt.Sprintf("%s/ipfs/%s?format=raw", ps.gatewayURL, c)) + if err != nil { + return nil, err + } + resp, err := ps.httpClient.Do(&http.Request{ + Method: http.MethodGet, + URL: u, + Header: http.Header{ + "Accept": []string{"application/vnd.ipld.raw"}, + }, + }) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("unexpected status from remote gateway: %s", resp.Status) + } + + rb, err := io.ReadAll(resp.Body) + if err != nil { + return nil, err + } + + if ps.validate { + nc, err := c.Prefix().Sum(rb) + if err != nil { + return nil, blocks.ErrWrongHash + } + if !nc.Equals(c) { + fmt.Printf("got %s vs %s\n", nc, c) + return nil, blocks.ErrWrongHash + } + } + return blocks.NewBlockWithCid(rb, c) +} + +func (ps *proxyStore) Has(ctx context.Context, c cid.Cid) (bool, error) { + blk, err := ps.fetch(ctx, c) + if err != nil { + return false, err + } + return blk != nil, nil +} + +func (ps *proxyStore) Get(ctx context.Context, c cid.Cid) (blocks.Block, error) { + blk, err := ps.fetch(ctx, c) + if err != nil { + return nil, err + } + return blk, nil +} + +func (ps *proxyStore) GetSize(ctx context.Context, c cid.Cid) (int, error) { + blk, err := ps.fetch(ctx, c) + if err != nil { + return 0, err + } + return len(blk.RawData()), nil +} + +func (ps *proxyStore) HashOnRead(enabled bool) { + ps.validate = enabled +} + +func (c *proxyStore) Put(context.Context, blocks.Block) error { + return errNotImplemented +} + +func (c *proxyStore) PutMany(context.Context, []blocks.Block) error { + return errNotImplemented +} +func (c *proxyStore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) { + return nil, errNotImplemented +} +func (c *proxyStore) DeleteBlock(context.Context, cid.Cid) error { + return errNotImplemented +} diff --git a/examples/gateway/proxy/main.go b/examples/gateway/proxy/main.go new file mode 100644 index 0000000000..86ab178215 --- /dev/null +++ b/examples/gateway/proxy/main.go @@ -0,0 +1,77 @@ +package main + +import ( + "flag" + "log" + "net/http" + "strconv" + + "github.com/ipfs/boxo/examples/gateway/common" + "github.com/ipfs/boxo/gateway" + "github.com/ipfs/go-blockservice" + offline "github.com/ipfs/go-ipfs-exchange-offline" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promhttp" +) + +func main() { + gatewayUrlPtr := flag.String("g", "", "gateway to proxy to") + port := flag.Int("p", 8040, "port to run this gateway from") + flag.Parse() + + // Sets up the block store, which will proxy the block requests to the given gateway. + blockStore := newProxyStore(*gatewayUrlPtr, nil) + blockService := blockservice.New(blockStore, offline.Exchange(blockStore)) + + // Sets up the routing system, which will proxy the IPNS routing requests to the given gateway. + routing := newProxyRouting(*gatewayUrlPtr, nil) + + // Creates the gateway with the block service and the routing. + gwAPI, err := common.NewBlocksGateway(blockService, routing) + if err != nil { + log.Fatal(err) + } + handler := common.NewBlocksHandler(gwAPI, *port) + + // Initialize the public gateways that we will want to have available through + // Host header rewritting. This step is optional and only required if you're + // running multiple public gateways and want different settings and support + // for DNSLink and Subdomain Gateways. + noDNSLink := false + publicGateways := map[string]*gateway.Specification{ + // Support public requests with Host: CID.ipfs.example.net and ID.ipns.example.net + "example.net": { + Paths: []string{"/ipfs", "/ipns"}, + NoDNSLink: noDNSLink, + UseSubdomains: true, + }, + // Support local requests + "localhost": { + Paths: []string{"/ipfs", "/ipns"}, + NoDNSLink: noDNSLink, + UseSubdomains: true, + }, + } + + // Creates a mux to serve the prometheus metrics alongside the gateway. This + // step is optional and only required if you need or want to access the metrics. + // You may also decide to expose the metrics on a different path, or port. + mux := http.NewServeMux() + mux.Handle("/debug/metrics/prometheus", promhttp.HandlerFor(prometheus.DefaultGatherer, promhttp.HandlerOpts{})) + mux.Handle("/", handler) + + // Then wrap the mux with the hostname handler. Please note that the metrics + // will not be available under the previously defined publicGateways. + // You will be able to access the metrics via 127.0.0.1 but not localhost + // or example.net. If you want to expose the metrics on such gateways, + // you will have to add the path "/debug" to the variable Paths. + handler = gateway.WithHostname(mux, gwAPI, publicGateways, noDNSLink) + + log.Printf("Listening on http://localhost:%d", *port) + log.Printf("Try loading an image: http://localhost:%d/ipfs/bafybeigdyrzt5sfp7udm7hu76uh7y26nf3efuylqabf3oclgtqy55fbzdi", *port) + log.Printf("Try browsing Wikipedia snapshot: http://localhost:%d/ipfs/bafybeiaysi4s6lnjev27ln5icwm6tueaw2vdykrtjkwiphwekaywqhcjze", *port) + log.Printf("Metrics available at http://127.0.0.1:%d/debug/metrics/prometheus", *port) + if err := http.ListenAndServe(":"+strconv.Itoa(*port), handler); err != nil { + log.Fatal(err) + } +} diff --git a/examples/gateway/proxy/main_test.go b/examples/gateway/proxy/main_test.go new file mode 100644 index 0000000000..8ab637efd5 --- /dev/null +++ b/examples/gateway/proxy/main_test.go @@ -0,0 +1,69 @@ +package main + +import ( + "io" + "net/http" + "net/http/httptest" + "testing" + + "github.com/ipfs/boxo/blocks" + "github.com/ipfs/boxo/examples/gateway/common" + "github.com/ipfs/go-blockservice" + offline "github.com/ipfs/go-ipfs-exchange-offline" + "github.com/stretchr/testify/assert" +) + +const ( + HelloWorldCID = "bafkreifzjut3te2nhyekklss27nh3k72ysco7y32koao5eei66wof36n5e" +) + +func newProxyGateway(t *testing.T, rs *httptest.Server) *httptest.Server { + blockStore := newProxyStore(rs.URL, nil) + blockService := blockservice.New(blockStore, offline.Exchange(blockStore)) + routing := newProxyRouting(rs.URL, nil) + + gateway, err := common.NewBlocksGateway(blockService, routing) + if err != nil { + t.Error(err) + } + + handler := common.NewBlocksHandler(gateway, 0) + ts := httptest.NewServer(handler) + t.Cleanup(ts.Close) + + return ts +} + +func TestErrorOnInvalidContent(t *testing.T) { + rs := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Write([]byte("wrong data")) + })) + t.Cleanup(rs.Close) + ts := newProxyGateway(t, rs) + + res, err := http.Get(ts.URL + "/ipfs/" + HelloWorldCID) + assert.Nil(t, err) + + body, err := io.ReadAll(res.Body) + res.Body.Close() + assert.Nil(t, err) + assert.EqualValues(t, res.StatusCode, http.StatusInternalServerError) + assert.Contains(t, string(body), blocks.ErrWrongHash.Error()) +} + +func TestPassOnOnCorrectContent(t *testing.T) { + rs := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Write([]byte("hello world")) + })) + t.Cleanup(rs.Close) + ts := newProxyGateway(t, rs) + + res, err := http.Get(ts.URL + "/ipfs/" + HelloWorldCID) + assert.Nil(t, err) + + body, err := io.ReadAll(res.Body) + res.Body.Close() + assert.Nil(t, err) + assert.EqualValues(t, res.StatusCode, http.StatusOK) + assert.EqualValues(t, string(body), "hello world") +} diff --git a/examples/gateway/proxy/routing.go b/examples/gateway/proxy/routing.go new file mode 100644 index 0000000000..57d0c1492e --- /dev/null +++ b/examples/gateway/proxy/routing.go @@ -0,0 +1,129 @@ +package main + +import ( + "context" + "fmt" + "io" + "net/http" + "net/url" + "strings" + + "github.com/gogo/protobuf/proto" + "github.com/ipfs/go-ipns" + ipns_pb "github.com/ipfs/go-ipns/pb" + ic "github.com/libp2p/go-libp2p/core/crypto" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/libp2p/go-libp2p/core/routing" +) + +type proxyRouting struct { + gatewayURL string + httpClient *http.Client +} + +func newProxyRouting(gatewayURL string, client *http.Client) routing.ValueStore { + if client == nil { + client = http.DefaultClient + } + + return &proxyRouting{ + gatewayURL: gatewayURL, + httpClient: client, + } +} + +func (ps *proxyRouting) PutValue(context.Context, string, []byte, ...routing.Option) error { + return routing.ErrNotSupported +} + +func (ps *proxyRouting) GetValue(ctx context.Context, k string, opts ...routing.Option) ([]byte, error) { + if !strings.HasPrefix(k, "/ipns/") { + return nil, routing.ErrNotSupported + } + + k = strings.TrimPrefix(k, "/ipns/") + id, err := peer.IDFromBytes([]byte(k)) + if err != nil { + return nil, err + } + + return ps.fetch(ctx, id) +} + +func (ps *proxyRouting) SearchValue(ctx context.Context, k string, opts ...routing.Option) (<-chan []byte, error) { + if !strings.HasPrefix(k, "/ipns/") { + return nil, routing.ErrNotSupported + } + + k = strings.TrimPrefix(k, "/ipns/") + id, err := peer.IDFromBytes([]byte(k)) + if err != nil { + return nil, err + } + + ch := make(chan []byte) + + go func() { + v, err := ps.fetch(ctx, id) + if err != nil { + close(ch) + } else { + ch <- v + close(ch) + } + }() + + return ch, nil +} + +func (ps *proxyRouting) fetch(ctx context.Context, id peer.ID) ([]byte, error) { + u, err := url.Parse(fmt.Sprintf("%s/ipns/%s", ps.gatewayURL, peer.ToCid(id).String())) + if err != nil { + return nil, err + } + resp, err := ps.httpClient.Do(&http.Request{ + Method: http.MethodGet, + URL: u, + Header: http.Header{ + "Accept": []string{"application/vnd.ipfs.ipns-record"}, + }, + }) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("unexpected status from remote gateway: %s", resp.Status) + } + + rb, err := io.ReadAll(resp.Body) + if err != nil { + return nil, err + } + + var entry ipns_pb.IpnsEntry + err = proto.Unmarshal(rb, &entry) + if err != nil { + return nil, err + } + + pub, err := id.ExtractPublicKey() + if err != nil { + // Make sure it works with all those RSA that cannot be embedded into the + // Peer ID. + if len(entry.PubKey) > 0 { + pub, err = ic.UnmarshalPublicKey(entry.PubKey) + } + } + if err != nil { + return nil, err + } + + err = ipns.Validate(pub, &entry) + if err != nil { + return nil, err + } + + return rb, nil +} diff --git a/examples/go.mod b/examples/go.mod new file mode 100644 index 0000000000..fc68e1ebf2 --- /dev/null +++ b/examples/go.mod @@ -0,0 +1,167 @@ +module github.com/ipfs/boxo/examples + +go 1.19 + +require ( + github.com/gogo/protobuf v1.3.2 + github.com/ipfs/boxo v0.7.1-0.20230320215318-0a46dee1a780 + github.com/ipfs/go-blockservice v0.5.0 + github.com/ipfs/go-cid v0.4.0 + github.com/ipfs/go-datastore v0.6.0 + github.com/ipfs/go-fetcher v1.6.1 + github.com/ipfs/go-ipfs-blockstore v1.3.0 + github.com/ipfs/go-ipfs-chunker v0.0.5 + github.com/ipfs/go-ipfs-exchange-offline v0.3.0 + github.com/ipfs/go-ipld-format v0.4.0 + github.com/ipfs/go-ipns v0.3.0 + github.com/ipfs/go-merkledag v0.10.0 + github.com/ipfs/go-namesys v0.7.0 + github.com/ipfs/go-path v0.3.1 + github.com/ipfs/go-unixfs v0.4.5-0.20230321002036-311d68ceee08 + github.com/ipfs/go-unixfsnode v1.6.0 + github.com/ipfs/interface-go-ipfs-core v0.11.1-0.20230320221220-9a2ea127aaf8 + github.com/ipld/go-car/v2 v2.8.3-0.20230320234631-0db9700abe2e + github.com/ipld/go-codec-dagpb v1.6.0 + github.com/ipld/go-ipld-prime v0.20.0 + github.com/libp2p/go-libp2p v0.25.1 + github.com/libp2p/go-libp2p-routing-helpers v0.6.0 + github.com/multiformats/go-multiaddr v0.8.0 + github.com/multiformats/go-multicodec v0.8.1 + github.com/prometheus/client_golang v1.14.0 + github.com/stretchr/testify v1.8.2 +) + +require ( + github.com/alecthomas/units v0.0.0-20210927113745-59d0afb8317a // indirect + github.com/benbjohnson/clock v1.3.0 // indirect + github.com/beorn7/perks v1.0.1 // indirect + github.com/cespare/xxhash v1.1.0 // indirect + github.com/cespare/xxhash/v2 v2.2.0 // indirect + github.com/containerd/cgroups v1.0.4 // indirect + github.com/coreos/go-systemd/v22 v22.5.0 // indirect + github.com/crackcomm/go-gitignore v0.0.0-20170627025303-887ab5e44cc3 // indirect + github.com/cskr/pubsub v1.0.2 // indirect + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c // indirect + github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0 // indirect + github.com/docker/go-units v0.5.0 // indirect + github.com/dustin/go-humanize v1.0.0 // indirect + github.com/elastic/gosigar v0.14.2 // indirect + github.com/flynn/noise v1.0.0 // indirect + github.com/francoispqt/gojay v1.2.13 // indirect + github.com/gabriel-vasile/mimetype v1.4.1 // indirect + github.com/go-logr/logr v1.2.3 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 // indirect + github.com/godbus/dbus/v5 v5.1.0 // indirect + github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect + github.com/golang/mock v1.6.0 // indirect + github.com/golang/protobuf v1.5.2 // indirect + github.com/google/gopacket v1.1.19 // indirect + github.com/google/pprof v0.0.0-20221203041831-ce31453925ec // indirect + github.com/google/uuid v1.3.0 // indirect + github.com/hashicorp/errwrap v1.1.0 // indirect + github.com/hashicorp/go-multierror v1.1.1 // indirect + github.com/hashicorp/golang-lru v0.5.4 // indirect + github.com/huin/goupnp v1.0.3 // indirect + github.com/ipfs/bbloom v0.0.4 // indirect + github.com/ipfs/go-bitfield v1.1.0 // indirect + github.com/ipfs/go-block-format v0.1.2-0.20230320222416-ba43dc7de213 // indirect + github.com/ipfs/go-ipfs-delay v0.0.1 // indirect + github.com/ipfs/go-ipfs-ds-help v1.1.0 // indirect + github.com/ipfs/go-ipfs-exchange-interface v0.2.0 // indirect + github.com/ipfs/go-ipfs-posinfo v0.0.1 // indirect + github.com/ipfs/go-ipfs-pq v0.0.3 // indirect + github.com/ipfs/go-ipfs-redirects-file v0.1.1 // indirect + github.com/ipfs/go-ipfs-util v0.0.2 // indirect + github.com/ipfs/go-ipld-cbor v0.0.6 // indirect + github.com/ipfs/go-ipld-legacy v0.1.1 // indirect + github.com/ipfs/go-log v1.0.5 // indirect + github.com/ipfs/go-log/v2 v2.5.1 // indirect + github.com/ipfs/go-metrics-interface v0.0.1 // indirect + github.com/ipfs/go-peertaskqueue v0.8.1 // indirect + github.com/ipfs/go-verifcid v0.0.2 // indirect + github.com/ipld/go-car v0.6.1-0.20230320234631-0db9700abe2e // indirect + github.com/jackpal/go-nat-pmp v1.0.2 // indirect + github.com/jbenet/go-temp-err-catcher v0.1.0 // indirect + github.com/jbenet/goprocess v0.1.4 // indirect + github.com/klauspost/compress v1.15.12 // indirect + github.com/klauspost/cpuid/v2 v2.2.3 // indirect + github.com/koron/go-ssdp v0.0.3 // indirect + github.com/libp2p/go-buffer-pool v0.1.0 // indirect + github.com/libp2p/go-cidranger v1.1.0 // indirect + github.com/libp2p/go-flow-metrics v0.1.0 // indirect + github.com/libp2p/go-libp2p-asn-util v0.2.0 // indirect + github.com/libp2p/go-libp2p-kad-dht v0.21.0 // indirect + github.com/libp2p/go-libp2p-kbucket v0.5.0 // indirect + github.com/libp2p/go-libp2p-record v0.2.0 // indirect + github.com/libp2p/go-msgio v0.3.0 // indirect + github.com/libp2p/go-nat v0.1.0 // indirect + github.com/libp2p/go-netroute v0.2.1 // indirect + github.com/libp2p/go-reuseport v0.2.0 // indirect + github.com/libp2p/go-yamux/v4 v4.0.0 // indirect + github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd // indirect + github.com/mattn/go-isatty v0.0.17 // indirect + github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect + github.com/miekg/dns v1.1.50 // indirect + github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b // indirect + github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc // indirect + github.com/minio/sha256-simd v1.0.0 // indirect + github.com/mr-tron/base58 v1.2.0 // indirect + github.com/multiformats/go-base32 v0.1.0 // indirect + github.com/multiformats/go-base36 v0.2.0 // indirect + github.com/multiformats/go-multiaddr-dns v0.3.1 // indirect + github.com/multiformats/go-multiaddr-fmt v0.1.0 // indirect + github.com/multiformats/go-multibase v0.1.1 // indirect + github.com/multiformats/go-multihash v0.2.1 // indirect + github.com/multiformats/go-multistream v0.4.1 // indirect + github.com/multiformats/go-varint v0.0.7 // indirect + github.com/onsi/ginkgo/v2 v2.5.1 // indirect + github.com/opencontainers/runtime-spec v1.0.2 // indirect + github.com/opentracing/opentracing-go v1.2.0 // indirect + github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect + github.com/petar/GoLLRB v0.0.0-20210522233825-ae3b015fd3e9 // indirect + github.com/pkg/errors v0.9.1 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/polydawn/refmt v0.89.0 // indirect + github.com/prometheus/client_model v0.3.0 // indirect + github.com/prometheus/common v0.37.0 // indirect + github.com/prometheus/procfs v0.8.0 // indirect + github.com/quic-go/qpack v0.4.0 // indirect + github.com/quic-go/qtls-go1-18 v0.2.0 // indirect + github.com/quic-go/qtls-go1-19 v0.2.0 // indirect + github.com/quic-go/qtls-go1-20 v0.1.0 // indirect + github.com/quic-go/quic-go v0.32.0 // indirect + github.com/quic-go/webtransport-go v0.5.1 // indirect + github.com/raulk/go-watchdog v1.3.0 // indirect + github.com/spaolacci/murmur3 v1.1.0 // indirect + github.com/ucarion/urlpath v0.0.0-20200424170820-7ccc79b76bbb // indirect + github.com/whyrusleeping/base32 v0.0.0-20170828182744-c30ac30633cc // indirect + github.com/whyrusleeping/cbor v0.0.0-20171005072247-63513f603b11 // indirect + github.com/whyrusleeping/cbor-gen v0.0.0-20230126041949-52956bd4c9aa // indirect + github.com/whyrusleeping/chunker v0.0.0-20181014151217-fe64bd25879f // indirect + github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1 // indirect + go.opencensus.io v0.24.0 // indirect + go.opentelemetry.io/otel v1.13.0 // indirect + go.opentelemetry.io/otel/trace v1.13.0 // indirect + go.uber.org/atomic v1.10.0 // indirect + go.uber.org/dig v1.15.0 // indirect + go.uber.org/fx v1.18.2 // indirect + go.uber.org/multierr v1.9.0 // indirect + go.uber.org/zap v1.24.0 // indirect + golang.org/x/crypto v0.6.0 // indirect + golang.org/x/exp v0.0.0-20230213192124-5e25df0256eb // indirect + golang.org/x/mod v0.7.0 // indirect + golang.org/x/net v0.6.0 // indirect + golang.org/x/sync v0.1.0 // indirect + golang.org/x/sys v0.5.0 // indirect + golang.org/x/text v0.7.0 // indirect + golang.org/x/tools v0.3.0 // indirect + golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect + google.golang.org/protobuf v1.28.1 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect + lukechampine.com/blake3 v1.1.7 // indirect + nhooyr.io/websocket v1.8.7 // indirect +) + +replace github.com/ipfs/boxo => ../ diff --git a/examples/go.sum b/examples/go.sum new file mode 100644 index 0000000000..82ee2243cf --- /dev/null +++ b/examples/go.sum @@ -0,0 +1,1625 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.31.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.37.0/go.mod h1:TS1dMSSfndXH133OKGwekG838Om/cQT0BUHV3HcBgoo= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= +cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= +cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= +cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= +cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= +cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= +cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= +cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= +cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= +cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= +cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= +cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= +cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +dmitri.shuralyov.com/app/changes v0.0.0-20180602232624-0a106ad413e3/go.mod h1:Yl+fi1br7+Rr3LqpNJf1/uxUdtRUV+Tnj0o93V2B9MU= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +dmitri.shuralyov.com/html/belt v0.0.0-20180602232347-f7d459c86be0/go.mod h1:JLBrvjyP0v+ecvNYvCpyZgu5/xkfAUhi6wJj28eUfSU= +dmitri.shuralyov.com/service/change v0.0.0-20181023043359-a85b471d5412/go.mod h1:a1inKt/atXimZ4Mv927x+r7UpyzRUf4emIoiiSC2TN4= +dmitri.shuralyov.com/state v0.0.0-20180228185332-28bcc343414c/go.mod h1:0PRwlb0D6DFvNNtx+9ybjezNCa8XF0xaYcETyp6rHWU= +git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg= +github.com/AndreasBriese/bbloom v0.0.0-20180913140656-343706a395b7/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= +github.com/AndreasBriese/bbloom v0.0.0-20190306092124-e2d15f34fcf9/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= +github.com/Kubuxu/go-os-helper v0.0.1/go.mod h1:N8B+I7vPCT80IcP58r50u4+gEEcsZETFUpAzWW2ep1Y= +github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE= +github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= +github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= +github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= +github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII= +github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= +github.com/alecthomas/units v0.0.0-20210927113745-59d0afb8317a h1:E/8AP5dFtMhl5KPJz66Kt9G0n+7Sn41Fy1wv9/jHOrc= +github.com/alecthomas/units v0.0.0-20210927113745-59d0afb8317a/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= +github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= +github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= +github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= +github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= +github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= +github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= +github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A= +github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU= +github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= +github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= +github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= +github.com/benbjohnson/clock v1.3.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g= +github.com/btcsuite/btcd v0.0.0-20190213025234-306aecffea32/go.mod h1:DrZx5ec/dmnfpw9KyYoQyYo7d0KEvTkk/5M/vbZjAr8= +github.com/btcsuite/btcd v0.0.0-20190523000118-16327141da8c/go.mod h1:3J08xEfcugPacsc34/LKRU2yO7YmuT8yt28J8k2+rrI= +github.com/btcsuite/btcd v0.0.0-20190824003749-130ea5bddde3/go.mod h1:3J08xEfcugPacsc34/LKRU2yO7YmuT8yt28J8k2+rrI= +github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ= +github.com/btcsuite/btcd v0.21.0-beta/go.mod h1:ZSWyehm27aAuS9bvkATT+Xte3hjHZ+MRgMY/8NJ7K94= +github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA= +github.com/btcsuite/btcutil v0.0.0-20190207003914-4c204d697803/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg= +github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg= +github.com/btcsuite/btcutil v1.0.2/go.mod h1:j9HUFwoQRsZL3V4n+qG+CUnEGHOarIxfC3Le2Yhbcts= +github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd/go.mod h1:HHNXQzUsZCxOoE+CPiyCTO6x34Zs86zZUiwtpXoGdtg= +github.com/btcsuite/goleveldb v0.0.0-20160330041536-7834afc9e8cd/go.mod h1:F+uVaaLLH7j4eDXPRvw78tMflu7Ie2bzYOH4Y8rRKBY= +github.com/btcsuite/goleveldb v1.0.0/go.mod h1:QiK9vBlgftBg6rWQIj6wFzbPfRjiykIEhBH4obrXJ/I= +github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc= +github.com/btcsuite/snappy-go v1.0.0/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc= +github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY= +github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs= +github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= +github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ= +github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= +github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= +github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cheekybits/genny v1.0.0/go.mod h1:+tQajlRqAUrPI7DOSpB0XAqZYtQakVtB7wXkRAgjxjQ= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs= +github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= +github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= +github.com/containerd/cgroups v0.0.0-20201119153540-4cbc285b3327/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE= +github.com/containerd/cgroups v1.0.4 h1:jN/mbWBEaz+T1pi5OFtnkQ+8qnmEbAr1Oo1FRm5B0dA= +github.com/containerd/cgroups v1.0.4/go.mod h1:nLNQtsF7Sl2HxNebu77i1R0oDlhiTG+kO4JTrUzo6IA= +github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= +github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= +github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= +github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= +github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/crackcomm/go-gitignore v0.0.0-20170627025303-887ab5e44cc3 h1:HVTnpeuvF6Owjd5mniCL8DEXo7uYXdQEmOP4FJbV5tg= +github.com/crackcomm/go-gitignore v0.0.0-20170627025303-887ab5e44cc3/go.mod h1:p1d6YEZWvFzEh4KLyvBcVSnrfNDDvK2zfK/4x2v/4pE= +github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= +github.com/cskr/pubsub v1.0.2 h1:vlOzMhl6PFn60gRlTQQsIfVwaPB/B/8MziK8FhEPt/0= +github.com/cskr/pubsub v1.0.2/go.mod h1:/8MzYXk/NJAz782G8RPkFzXTZVu63VotefPnR9TIRis= +github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davidlazar/go-crypto v0.0.0-20170701192655-dcfb0a7ac018/go.mod h1:rQYf4tfk5sSwFsnDg3qYaBxSjsD9S8+59vW0dKUgme4= +github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c h1:pFUpOrbxDR6AkioZ1ySsx5yxlDQZ8stG2b88gTPxgJU= +github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c/go.mod h1:6UhI8N9EjYm1c2odKpFpAYeR8dsBeM7PtzQhRgxRr9U= +github.com/decred/dcrd/crypto/blake256 v1.0.0 h1:/8DMNYp9SGi5f0w7uCm6d6M4OU2rGFK09Y2A4Xv7EE0= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0 h1:HbphB4TFFXpv7MNrT52FGrrgVXF1owhMVTHFZIlnvd4= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0/go.mod h1:DZGJHZMqrU4JJqFAWUS2UO1+lbSKsdiOoYi9Zzey7Fc= +github.com/decred/dcrd/lru v1.0.0/go.mod h1:mxKOwFd7lFjN2GZYsiz/ecgqR6kkYAl+0pz0tEMk218= +github.com/dgraph-io/badger v1.5.5-0.20190226225317-8115aed38f8f/go.mod h1:VZxzAIRPHRVNRKRo6AXrX9BJegn6il06VMTZVJYCIjQ= +github.com/dgraph-io/badger v1.6.0-rc1/go.mod h1:zwt7syl517jmP8s94KqSxTlM6IMsdhYy6psNgSztDR4= +github.com/dgraph-io/badger v1.6.0/go.mod h1:zwt7syl517jmP8s94KqSxTlM6IMsdhYy6psNgSztDR4= +github.com/dgraph-io/badger v1.6.1/go.mod h1:FRmFw3uxvcpa8zG3Rxs0th+hCLIuaQg8HlNV5bjgnuU= +github.com/dgraph-io/ristretto v0.0.2/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dgryski/go-farm v0.0.0-20190104051053-3adb47b1fb0f/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= +github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= +github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= +github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= +github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= +github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= +github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= +github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= +github.com/elastic/gosigar v0.12.0/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs= +github.com/elastic/gosigar v0.14.2 h1:Dg80n8cr90OZ7x+bAax/QjoW/XqTI11RmA79ZwIm9/4= +github.com/elastic/gosigar v0.14.2/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs= +github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= +github.com/flynn/noise v1.0.0 h1:DlTHqmzmvcEiKj+4RYo/imoswx/4r6iBlCMfVtrMXpQ= +github.com/flynn/noise v1.0.0/go.mod h1:xbMo+0i6+IGbYdJhF31t2eR1BIU0CYc12+BNAKwUTag= +github.com/francoispqt/gojay v1.2.13 h1:d2m3sFjloqoIUQU3TsHBgj6qg/BVGlTBeHDUmyJnXKk= +github.com/francoispqt/gojay v1.2.13/go.mod h1:ehT5mTG4ua4581f1++1WLG0vPdaA9HaiDsoyrBGkyDY= +github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4= +github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20= +github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= +github.com/frankban/quicktest v1.14.4 h1:g2rn0vABPOOXmZUj+vbmUp0lPoXEMuhTpIluN0XL9UY= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/gabriel-vasile/mimetype v1.4.1 h1:TRWk7se+TOjCYgRth7+1/OYLNiRNIotknkFtf/dnN7Q= +github.com/gabriel-vasile/mimetype v1.4.1/go.mod h1:05Vi0w3Y9c/lNvJOdmIwvrrAhX3rYhfQQCaf9VJcv7M= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE= +github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI= +github.com/gin-gonic/gin v1.6.3 h1:ahKqKTFpO5KTPHxWZjEdPScmYaGtLo8Y4DMHoEsnp14= +github.com/gin-gonic/gin v1.6.3/go.mod h1:75u5sXoLsGZoRN5Sgbi1eraJ4GU3++wFwWzhwvtwp4M= +github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= +github.com/go-check/check v0.0.0-20180628173108-788fd7840127/go.mod h1:9ES+weclKsC9YodN5RgxqK/VD9HM9JsCSh7rNhMZE98= +github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o= +github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= +github.com/go-kit/log v0.2.0/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= +github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= +github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= +github.com/go-playground/locales v0.13.0 h1:HyWk6mgj5qFqCT5fjGBuRArbVDfE4hi8+e8ceBS/t7Q= +github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8= +github.com/go-playground/universal-translator v0.17.0 h1:icxd5fm+REJzpZx7ZfpaD876Lmtgy7VtROAbHHXk8no= +github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA= +github.com/go-playground/validator/v10 v10.2.0 h1:KgJ0snyC2R9VXYN2rneOtQcw5aHQB1Vv0sFl1UcHBOY= +github.com/go-playground/validator/v10 v10.2.0/go.mod h1:uOYAAleCW8F/7oMFd6aG0GOhaH6EGOAJShg8Id5JGkI= +github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 h1:p104kn46Q8WdvHunIJ9dAyjPVtrBPhSr3KT2yUst43I= +github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= +github.com/go-yaml/yaml v2.1.0+incompatible/go.mod h1:w2MrLa16VYP0jy6N7M5kHaCkaLENm+P+Tv+MfurjSw0= +github.com/gobwas/httphead v0.0.0-20180130184737-2c6c146eadee h1:s+21KNqlpePfkah2I+gwHF8xmJWRjooY+5248k6m4A0= +github.com/gobwas/httphead v0.0.0-20180130184737-2c6c146eadee/go.mod h1:L0fX3K22YWvt/FAX9NnzrNzcI4wNYi9Yku4O0LKYflo= +github.com/gobwas/pool v0.2.0 h1:QEmUOlnSjWtnpRGHF3SauEiOsy82Cup83Vf2LcMlnc8= +github.com/gobwas/pool v0.2.0/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw= +github.com/gobwas/ws v1.0.2 h1:CoAavW/wd/kulfZmSIBt6p24n4j7tHgNVCjsfHVNUbo= +github.com/gobwas/ws v1.0.2/go.mod h1:szmBTxLgaFppYjEmNtny/v3w89xOydFnnZMcgRRu/EM= +github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk= +github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= +github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191027212112-611e8accdfc9/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= +github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= +github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.0/go.mod h1:Qd/q+1AKNOZr9uGQzbzCmRO6sUih6GTPZv6a1/R87v0= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= +github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gopacket v1.1.17/go.mod h1:UdDNZ1OO62aGYVnPhxT1U6aI7ukYtA/kB8vaU0diBUM= +github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF8= +github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20221203041831-ce31453925ec h1:fR20TYVVwhK4O7r7y+McjRYyaTH6/vjwJOajE+XhlzM= +github.com/google/pprof v0.0.0-20221203041831-ce31453925ec/go.mod h1:dDKJzRmX4S37WGHujM7tX//fmj1uioxKzKxz3lo4HJo= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= +github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY= +github.com/googleapis/gax-go/v2 v2.0.3/go.mod h1:LLvjysVCY1JZeum8Z6l8qUty8fiNwE08qbEPm1M08qg= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= +github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= +github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= +github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/gxed/hashland/keccakpg v0.0.1/go.mod h1:kRzw3HkwxFU1mpmPP8v1WyQzwdGfmKFJ6tItnhQ67kU= +github.com/gxed/hashland/murmur3 v0.0.1/go.mod h1:KjXop02n4/ckmZSnY2+HKcLud/tcmvhST0bie/0lS48= +github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE= +github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= +github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= +github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= +github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= +github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= +github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= +github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= +github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= +github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= +github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= +github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= +github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= +github.com/huin/goupnp v1.0.0/go.mod h1:n9v9KO1tAxYH82qOn+UTIFQDmx5n1Zxd/ClZDMX7Bnc= +github.com/huin/goupnp v1.0.3 h1:N8No57ls+MnjlB+JPiCVSOyy/ot7MJTqlo7rn+NYSqQ= +github.com/huin/goupnp v1.0.3/go.mod h1:ZxNlw5WqJj6wSsRK5+YfflQGXYfccj5VgQsMNixHM7Y= +github.com/huin/goutil v0.0.0-20170803182201-1ca381bf3150/go.mod h1:PpLOETDnJ0o3iZrZfqZzyLl6l7F3c6L1oWn7OICBi6o= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= +github.com/ipfs/bbloom v0.0.4 h1:Gi+8EGJ2y5qiD5FbsbpX/TMNcJw8gSqr7eyjHa4Fhvs= +github.com/ipfs/bbloom v0.0.4/go.mod h1:cS9YprKXpoZ9lT0n/Mw/a6/aFV6DTjTLYHeA+gyqMG0= +github.com/ipfs/go-bitfield v1.1.0 h1:fh7FIo8bSwaJEh6DdTWbCeZ1eqOaOkKFI74SCnsWbGA= +github.com/ipfs/go-bitfield v1.1.0/go.mod h1:paqf1wjq/D2BBmzfTVFlJQ9IlFOZpg422HL0HqsGWHU= +github.com/ipfs/go-bitswap v0.5.1/go.mod h1:P+ckC87ri1xFLvk74NlXdP0Kj9RmWAh4+H78sC6Qopo= +github.com/ipfs/go-bitswap v0.11.0 h1:j1WVvhDX1yhG32NTC9xfxnqycqYIlhzEzLXG/cU1HyQ= +github.com/ipfs/go-block-format v0.0.2/go.mod h1:AWR46JfpcObNfg3ok2JHDUfdiHRgWhJgCQF+KIgOPJY= +github.com/ipfs/go-block-format v0.0.3/go.mod h1:4LmD4ZUw0mhO+JSKdpWwrzATiEfM7WWgQ8H5l6P8MVk= +github.com/ipfs/go-block-format v0.1.2-0.20230320222416-ba43dc7de213 h1:GxBB4xUUZj+DwG+njsJYURAa7RS8Gu0FdwYNDLRZhqk= +github.com/ipfs/go-block-format v0.1.2-0.20230320222416-ba43dc7de213/go.mod h1:4G99sJwXnroF0DtCHrujotIAEedtJn2olyQyBIzoWS8= +github.com/ipfs/go-blockservice v0.2.1/go.mod h1:k6SiwmgyYgs4M/qt+ww6amPeUH9EISLRBnvUurKJhi8= +github.com/ipfs/go-blockservice v0.5.0 h1:B2mwhhhVQl2ntW2EIpaWPwSCxSuqr5fFA93Ms4bYLEY= +github.com/ipfs/go-blockservice v0.5.0/go.mod h1:W6brZ5k20AehbmERplmERn8o2Ni3ZZubvAxaIUeaT6w= +github.com/ipfs/go-cid v0.0.1/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= +github.com/ipfs/go-cid v0.0.2/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= +github.com/ipfs/go-cid v0.0.3/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= +github.com/ipfs/go-cid v0.0.4/go.mod h1:4LLaPOQwmk5z9LBgQnpkivrx8BJjUyGwTXCd5Xfj6+M= +github.com/ipfs/go-cid v0.0.5/go.mod h1:plgt+Y5MnOey4vO4UlUazGqdbEXuFYitED67FexhXog= +github.com/ipfs/go-cid v0.0.6/go.mod h1:6Ux9z5e+HpkQdckYoX1PG/6xqKspzlEIR5SDmgqgC/I= +github.com/ipfs/go-cid v0.0.7/go.mod h1:6Ux9z5e+HpkQdckYoX1PG/6xqKspzlEIR5SDmgqgC/I= +github.com/ipfs/go-cid v0.4.0 h1:a4pdZq0sx6ZSxbCizebnKiMCx/xI/aBBFlB73IgH4rA= +github.com/ipfs/go-cid v0.4.0/go.mod h1:uQHwDeX4c6CtyrFwdqyhpNcxVewur1M7l7fNU7LKwZk= +github.com/ipfs/go-datastore v0.0.1/go.mod h1:d4KVXhMt913cLBEI/PXAy6ko+W7e9AhyAKBGh803qeE= +github.com/ipfs/go-datastore v0.1.1/go.mod h1:w38XXW9kVFNp57Zj5knbKWM2T+KOZCGDRVNdgPHtbHw= +github.com/ipfs/go-datastore v0.4.0/go.mod h1:SX/xMIKoCszPqp+z9JhPYCmoOoXTvaa13XEbGtsFUhA= +github.com/ipfs/go-datastore v0.4.1/go.mod h1:SX/xMIKoCszPqp+z9JhPYCmoOoXTvaa13XEbGtsFUhA= +github.com/ipfs/go-datastore v0.4.4/go.mod h1:SX/xMIKoCszPqp+z9JhPYCmoOoXTvaa13XEbGtsFUhA= +github.com/ipfs/go-datastore v0.4.5/go.mod h1:eXTcaaiN6uOlVCLS9GjJUJtlvJfM3xk23w3fyfrmmJs= +github.com/ipfs/go-datastore v0.5.0/go.mod h1:9zhEApYMTl17C8YDp7JmU7sQZi2/wqiYh73hakZ90Bk= +github.com/ipfs/go-datastore v0.6.0 h1:JKyz+Gvz1QEZw0LsX1IBn+JFCJQH4SJVFtM4uWU0Myk= +github.com/ipfs/go-datastore v0.6.0/go.mod h1:rt5M3nNbSO/8q1t4LNkLyUwRs8HupMeN/8O4Vn9YAT8= +github.com/ipfs/go-detect-race v0.0.1 h1:qX/xay2W3E4Q1U7d9lNs1sU9nvguX0a7319XbyQ6cOk= +github.com/ipfs/go-detect-race v0.0.1/go.mod h1:8BNT7shDZPo99Q74BpGMK+4D8Mn4j46UU0LZ723meps= +github.com/ipfs/go-ds-badger v0.0.2/go.mod h1:Y3QpeSFWQf6MopLTiZD+VT6IC1yZqaGmjvRcKeSGij8= +github.com/ipfs/go-ds-badger v0.0.5/go.mod h1:g5AuuCGmr7efyzQhLL8MzwqcauPojGPUaHzfGTzuE3s= +github.com/ipfs/go-ds-badger v0.2.1/go.mod h1:Tx7l3aTph3FMFrRS838dcSJh+jjA7cX9DrGVwx/NOwE= +github.com/ipfs/go-ds-badger v0.2.3/go.mod h1:pEYw0rgg3FIrywKKnL+Snr+w/LjJZVMTBRn4FS6UHUk= +github.com/ipfs/go-ds-leveldb v0.0.1/go.mod h1:feO8V3kubwsEF22n0YRQCffeb79OOYIykR4L04tMOYc= +github.com/ipfs/go-ds-leveldb v0.4.1/go.mod h1:jpbku/YqBSsBc1qgME8BkWS4AxzF2cEu1Ii2r79Hh9s= +github.com/ipfs/go-ds-leveldb v0.4.2/go.mod h1:jpbku/YqBSsBc1qgME8BkWS4AxzF2cEu1Ii2r79Hh9s= +github.com/ipfs/go-fetcher v1.6.1 h1:UFuRVYX5AIllTiRhi5uK/iZkfhSpBCGX7L70nSZEmK8= +github.com/ipfs/go-fetcher v1.6.1/go.mod h1:27d/xMV8bodjVs9pugh/RCjjK2OZ68UgAMspMdingNo= +github.com/ipfs/go-ipfs-blockstore v0.2.1/go.mod h1:jGesd8EtCM3/zPgx+qr0/feTXGUeRai6adgwC+Q+JvE= +github.com/ipfs/go-ipfs-blockstore v1.3.0 h1:m2EXaWgwTzAfsmt5UdJ7Is6l4gJcaM/A12XwJyvYvMM= +github.com/ipfs/go-ipfs-blockstore v1.3.0/go.mod h1:KgtZyc9fq+P2xJUiCAzbRdhhqJHvsw8u2Dlqy2MyRTE= +github.com/ipfs/go-ipfs-blocksutil v0.0.1 h1:Eh/H4pc1hsvhzsQoMEP3Bke/aW5P5rVM1IWFJMcGIPQ= +github.com/ipfs/go-ipfs-blocksutil v0.0.1/go.mod h1:Yq4M86uIOmxmGPUHv/uI7uKqZNtLb449gwKqXjIsnRk= +github.com/ipfs/go-ipfs-chunker v0.0.5 h1:ojCf7HV/m+uS2vhUGWcogIIxiO5ubl5O57Q7NapWLY8= +github.com/ipfs/go-ipfs-chunker v0.0.5/go.mod h1:jhgdF8vxRHycr00k13FM8Y0E+6BoalYeobXmUyTreP8= +github.com/ipfs/go-ipfs-delay v0.0.0-20181109222059-70721b86a9a8/go.mod h1:8SP1YXK1M1kXuc4KJZINY3TQQ03J2rwBG9QfXmbRPrw= +github.com/ipfs/go-ipfs-delay v0.0.1 h1:r/UXYyRcddO6thwOnhiznIAiSvxMECGgtv35Xs1IeRQ= +github.com/ipfs/go-ipfs-delay v0.0.1/go.mod h1:8SP1YXK1M1kXuc4KJZINY3TQQ03J2rwBG9QfXmbRPrw= +github.com/ipfs/go-ipfs-ds-help v0.1.1/go.mod h1:SbBafGJuGsPI/QL3j9Fc5YPLeAu+SzOkI0gFwAg+mOs= +github.com/ipfs/go-ipfs-ds-help v1.1.0 h1:yLE2w9RAsl31LtfMt91tRZcrx+e61O5mDxFRR994w4Q= +github.com/ipfs/go-ipfs-ds-help v1.1.0/go.mod h1:YR5+6EaebOhfcqVCyqemItCLthrpVNot+rsOU/5IatU= +github.com/ipfs/go-ipfs-exchange-interface v0.1.0/go.mod h1:ych7WPlyHqFvCi/uQI48zLZuAWVP5iTQPXEfVaw5WEI= +github.com/ipfs/go-ipfs-exchange-interface v0.2.0 h1:8lMSJmKogZYNo2jjhUs0izT+dck05pqUw4mWNW9Pw6Y= +github.com/ipfs/go-ipfs-exchange-interface v0.2.0/go.mod h1:z6+RhJuDQbqKguVyslSOuVDhqF9JtTrO3eptSAiW2/Y= +github.com/ipfs/go-ipfs-exchange-offline v0.1.1/go.mod h1:vTiBRIbzSwDD0OWm+i3xeT0mO7jG2cbJYatp3HPk5XY= +github.com/ipfs/go-ipfs-exchange-offline v0.3.0 h1:c/Dg8GDPzixGd0MC8Jh6mjOwU57uYokgWRFidfvEkuA= +github.com/ipfs/go-ipfs-exchange-offline v0.3.0/go.mod h1:MOdJ9DChbb5u37M1IcbrRB02e++Z7521fMxqCNRrz9s= +github.com/ipfs/go-ipfs-posinfo v0.0.1 h1:Esoxj+1JgSjX0+ylc0hUmJCOv6V2vFoZiETLR6OtpRs= +github.com/ipfs/go-ipfs-posinfo v0.0.1/go.mod h1:SwyeVP+jCwiDu0C313l/8jg6ZxM0qqtlt2a0vILTc1A= +github.com/ipfs/go-ipfs-pq v0.0.2/go.mod h1:LWIqQpqfRG3fNc5XsnIhz/wQ2XXGyugQwls7BgUmUfY= +github.com/ipfs/go-ipfs-pq v0.0.3 h1:YpoHVJB+jzK15mr/xsWC574tyDLkezVrDNeaalQBsTE= +github.com/ipfs/go-ipfs-pq v0.0.3/go.mod h1:btNw5hsHBpRcSSgZtiNm/SLj5gYIZ18AKtv3kERkRb4= +github.com/ipfs/go-ipfs-redirects-file v0.1.1 h1:Io++k0Vf/wK+tfnhEh63Yte1oQK5VGT2hIEYpD0Rzx8= +github.com/ipfs/go-ipfs-redirects-file v0.1.1/go.mod h1:tAwRjCV0RjLTjH8DR/AU7VYvfQECg+lpUy2Mdzv7gyk= +github.com/ipfs/go-ipfs-routing v0.2.1/go.mod h1:xiNNiwgjmLqPS1cimvAw6EyB9rkVDbiocA4yY+wRNLM= +github.com/ipfs/go-ipfs-routing v0.3.0 h1:9W/W3N+g+y4ZDeffSgqhgo7BsBSJwPMcyssET9OWevc= +github.com/ipfs/go-ipfs-util v0.0.1/go.mod h1:spsl5z8KUnrve+73pOhSVZND1SIxPW5RyBCNzQxlJBc= +github.com/ipfs/go-ipfs-util v0.0.2 h1:59Sswnk1MFaiq+VcaknX7aYEyGyGDAA73ilhEK2POp8= +github.com/ipfs/go-ipfs-util v0.0.2/go.mod h1:CbPtkWJzjLdEcezDns2XYaehFVNXG9zrdrtMecczcsQ= +github.com/ipfs/go-ipld-cbor v0.0.6 h1:pYuWHyvSpIsOOLw4Jy7NbBkCyzLDcl64Bf/LZW7eBQ0= +github.com/ipfs/go-ipld-cbor v0.0.6/go.mod h1:ssdxxaLJPXH7OjF5V4NSjBbcfh+evoR4ukuru0oPXMA= +github.com/ipfs/go-ipld-format v0.0.1/go.mod h1:kyJtbkDALmFHv3QR6et67i35QzO3S0dCDnkOJhcZkms= +github.com/ipfs/go-ipld-format v0.2.0/go.mod h1:3l3C1uKoadTPbeNfrDi+xMInYKlx2Cvg1BuydPSdzQs= +github.com/ipfs/go-ipld-format v0.4.0 h1:yqJSaJftjmjc9jEOFYlpkwOLVKv68OD27jFLlSghBlQ= +github.com/ipfs/go-ipld-format v0.4.0/go.mod h1:co/SdBE8h99968X0hViiw1MNlh6fvxxnHpvVLnH7jSM= +github.com/ipfs/go-ipld-legacy v0.1.1 h1:BvD8PEuqwBHLTKqlGFTHSwrwFOMkVESEvwIYwR2cdcc= +github.com/ipfs/go-ipld-legacy v0.1.1/go.mod h1:8AyKFCjgRPsQFf15ZQgDB8Din4DML/fOmKZkkFkrIEg= +github.com/ipfs/go-ipns v0.3.0 h1:ai791nTgVo+zTuq2bLvEGmWP1M0A6kGTXUsgv/Yq67A= +github.com/ipfs/go-ipns v0.3.0/go.mod h1:3cLT2rbvgPZGkHJoPO1YMJeh6LtkxopCkKFcio/wE24= +github.com/ipfs/go-log v0.0.1/go.mod h1:kL1d2/hzSpI0thNYjiKfjanbVNU+IIGA/WnNESY9leM= +github.com/ipfs/go-log v1.0.2/go.mod h1:1MNjMxe0u6xvJZgeqbJ8vdo2TKaGwZ1a0Bpza+sr2Sk= +github.com/ipfs/go-log v1.0.3/go.mod h1:OsLySYkwIbiSUR/yBTdv1qPtcE4FW3WPWk/ewz9Ru+A= +github.com/ipfs/go-log v1.0.4/go.mod h1:oDCg2FkjogeFOhqqb+N39l2RpTNPL6F/StPkB3kPgcs= +github.com/ipfs/go-log v1.0.5 h1:2dOuUCB1Z7uoczMWgAyDck5JLb72zHzrMnGnCNNbvY8= +github.com/ipfs/go-log v1.0.5/go.mod h1:j0b8ZoR+7+R99LD9jZ6+AJsrzkPbSXbZfGakb5JPtIo= +github.com/ipfs/go-log/v2 v2.0.2/go.mod h1:O7P1lJt27vWHhOwQmcFEvlmo49ry2VY2+JfBWFaa9+0= +github.com/ipfs/go-log/v2 v2.0.3/go.mod h1:O7P1lJt27vWHhOwQmcFEvlmo49ry2VY2+JfBWFaa9+0= +github.com/ipfs/go-log/v2 v2.0.5/go.mod h1:eZs4Xt4ZUJQFM3DlanGhy7TkwwawCZcSByscwkWG+dw= +github.com/ipfs/go-log/v2 v2.1.1/go.mod h1:2v2nsGfZsvvAJz13SyFzf9ObaqwHiHxsPLEHntrv9KM= +github.com/ipfs/go-log/v2 v2.1.3/go.mod h1:/8d0SH3Su5Ooc31QlL1WysJhvyOTDCjcCZ9Axpmri6g= +github.com/ipfs/go-log/v2 v2.3.0/go.mod h1:QqGoj30OTpnKaG/LKTGTxoP2mmQtjVMEnK72gynbe/g= +github.com/ipfs/go-log/v2 v2.5.1 h1:1XdUzF7048prq4aBjDQQ4SL5RxftpRGdXhNRwKSAlcY= +github.com/ipfs/go-log/v2 v2.5.1/go.mod h1:prSpmC1Gpllc9UYWxDiZDreBYw7zp4Iqp1kOLU9U5UI= +github.com/ipfs/go-merkledag v0.10.0 h1:IUQhj/kzTZfam4e+LnaEpoiZ9vZF6ldimVlby+6OXL4= +github.com/ipfs/go-merkledag v0.10.0/go.mod h1:zkVav8KiYlmbzUzNM6kENzkdP5+qR7+2mCwxkQ6GIj8= +github.com/ipfs/go-metrics-interface v0.0.1 h1:j+cpbjYvu4R8zbleSs36gvB7jR+wsL2fGD6n0jO4kdg= +github.com/ipfs/go-metrics-interface v0.0.1/go.mod h1:6s6euYU4zowdslK0GKHmqaIZ3j/b/tL7HTWtJ4VPgWY= +github.com/ipfs/go-namesys v0.7.0 h1:xqosk71GIVRkFDtF2UNRcXn4LdNeo7tzuy8feHD6NbU= +github.com/ipfs/go-namesys v0.7.0/go.mod h1:KYSZBVZG3VJC34EfqqJPG7T48aWgxseoMPAPA5gLyyQ= +github.com/ipfs/go-path v0.3.1 h1:wkeaCWE/NTuuPGlEkLTsED5UkzfKYZpxaFFPgk8ZVLE= +github.com/ipfs/go-path v0.3.1/go.mod h1:eNLsxJEEMxn/CDzUJ6wuNl+6No6tEUhOZcPKsZsYX0E= +github.com/ipfs/go-peertaskqueue v0.7.0/go.mod h1:M/akTIE/z1jGNXMU7kFB4TeSEFvj68ow0Rrb04donIU= +github.com/ipfs/go-peertaskqueue v0.8.1 h1:YhxAs1+wxb5jk7RvS0LHdyiILpNmRIRnZVztekOF0pg= +github.com/ipfs/go-peertaskqueue v0.8.1/go.mod h1:Oxxd3eaK279FxeydSPPVGHzbwVeHjatZ2GA8XD+KbPU= +github.com/ipfs/go-unixfs v0.4.5-0.20230321002036-311d68ceee08 h1:9LBAo8SC8lxcXT3fY+qpIqpzwCahB7v26b3Bhb9KEMA= +github.com/ipfs/go-unixfs v0.4.5-0.20230321002036-311d68ceee08/go.mod h1:WoNBxHWDOE2KowODZfEX2+NXR5DJGE7lV1h8870DpoY= +github.com/ipfs/go-unixfsnode v1.6.0 h1:JOSA02yaLylRNi2rlB4ldPr5VcZhcnaIVj5zNLcOjDo= +github.com/ipfs/go-unixfsnode v1.6.0/go.mod h1:PVfoyZkX1B34qzT3vJO4nsLUpRCyhnMuHBznRcXirlk= +github.com/ipfs/go-verifcid v0.0.1/go.mod h1:5Hrva5KBeIog4A+UpqlaIU+DEstipcJYQQZc0g37pY0= +github.com/ipfs/go-verifcid v0.0.2 h1:XPnUv0XmdH+ZIhLGKg6U2vaPaRDXb9urMyNVCE7uvTs= +github.com/ipfs/go-verifcid v0.0.2/go.mod h1:40cD9x1y4OWnFXbLNJYRe7MpNvWlMn3LZAG5Wb4xnPU= +github.com/ipfs/interface-go-ipfs-core v0.11.1-0.20230320221220-9a2ea127aaf8 h1:hl7EAAXWoZIqvOepcYzIhVti5EI8+QiICfHfukssoVU= +github.com/ipfs/interface-go-ipfs-core v0.11.1-0.20230320221220-9a2ea127aaf8/go.mod h1:Wg2BECxIJFN+G3XYVQXjOhIPpq9PWHzzjojTzyaEMxU= +github.com/ipld/go-car v0.6.1-0.20230320234631-0db9700abe2e h1:cwXJD7nqDVGts41pnDjRdnTjE+jqRqMPnnl+RGSoa8U= +github.com/ipld/go-car v0.6.1-0.20230320234631-0db9700abe2e/go.mod h1:Ug2htCfKi+ftd54ocCDcvc+Yj3O2xv7hfw+tauTr3Q8= +github.com/ipld/go-car/v2 v2.8.3-0.20230320234631-0db9700abe2e h1:CpmVTo3YgNIMXbXYtxOVQ9miC4B06fHpvjb5cED6e0k= +github.com/ipld/go-car/v2 v2.8.3-0.20230320234631-0db9700abe2e/go.mod h1:3Vx8oMPD4JMZ/vPNPmzRLLfVlLNb1w8oxX/RWa+eeNA= +github.com/ipld/go-codec-dagpb v1.6.0 h1:9nYazfyu9B1p3NAgfVdpRco3Fs2nFC72DqVsMj6rOcc= +github.com/ipld/go-codec-dagpb v1.6.0/go.mod h1:ANzFhfP2uMJxRBr8CE+WQWs5UsNa0pYtmKZ+agnUw9s= +github.com/ipld/go-ipld-prime v0.9.1-0.20210324083106-dc342a9917db/go.mod h1:KvBLMr4PX1gWptgkzRjVZCrLmSGcZCb/jioOQwCqZN8= +github.com/ipld/go-ipld-prime v0.11.0/go.mod h1:+WIAkokurHmZ/KwzDOMUuoeJgaRQktHtEaLglS3ZeV8= +github.com/ipld/go-ipld-prime v0.20.0 h1:Ud3VwE9ClxpO2LkCYP7vWPc0Fo+dYdYzgxUJZ3uRG4g= +github.com/ipld/go-ipld-prime v0.20.0/go.mod h1:PzqZ/ZR981eKbgdr3y2DJYeD/8bgMawdGVlJDE8kK+M= +github.com/ipld/go-ipld-prime/storage/bsadapter v0.0.0-20230102063945-1a409dc236dd h1:gMlw/MhNr2Wtp5RwGdsW23cs+yCuj9k2ON7i9MiJlRo= +github.com/jackpal/gateway v1.0.5/go.mod h1:lTpwd4ACLXmpyiCTRtfiNyVnUmqT9RivzCDQetPfnjA= +github.com/jackpal/go-nat-pmp v1.0.1/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc= +github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus= +github.com/jackpal/go-nat-pmp v1.0.2/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc= +github.com/jbenet/go-cienv v0.0.0-20150120210510-1bb1476777ec/go.mod h1:rGaEvXB4uRSZMmzKNLoXvTu1sfx+1kv/DojUlPrSZGs= +github.com/jbenet/go-cienv v0.1.0/go.mod h1:TqNnHUmJgXau0nCzC7kXWeotg3J9W34CUv5Djy1+FlA= +github.com/jbenet/go-temp-err-catcher v0.0.0-20150120210811-aac704a3f4f2/go.mod h1:8GXXJV31xl8whumTzdZsTt3RnUIiPqzkyf7mxToRCMs= +github.com/jbenet/go-temp-err-catcher v0.1.0 h1:zpb3ZH6wIE8Shj2sKS+khgRvf7T7RABoLk/+KKHggpk= +github.com/jbenet/go-temp-err-catcher v0.1.0/go.mod h1:0kJRvmDZXNMIiJirNPEYfhpPwbGVtZVWC34vc5WLsDk= +github.com/jbenet/goprocess v0.0.0-20160826012719-b497e2f366b8/go.mod h1:Ly/wlsjFq/qrU3Rar62tu1gASgGw6chQbSh/XgIIXCY= +github.com/jbenet/goprocess v0.1.3/go.mod h1:5yspPrukOVuOLORacaBi858NqyClJPQxYZlqdZVfqY4= +github.com/jbenet/goprocess v0.1.4 h1:DRGOFReOMqqDNXwW70QkacFW0YN9QnwLV0Vqk+3oU0o= +github.com/jbenet/goprocess v0.1.4/go.mod h1:5yspPrukOVuOLORacaBi858NqyClJPQxYZlqdZVfqY4= +github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1/go.mod h1:E0B/fFc00Y+Rasa88328GlI/XbtyysCtTHZS8h7IrBU= +github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= +github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= +github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= +github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/jtolds/gls v4.2.1+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= +github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= +github.com/kami-zh/go-capturer v0.0.0-20171211120116-e492ea43421d/go.mod h1:P2viExyCEfeWGU259JnaQ34Inuec4R38JCyBx2edgD0= +github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= +github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4= +github.com/klauspost/compress v1.10.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/klauspost/compress v1.15.12 h1:YClS/PImqYbn+UILDnqxQCZ3RehC9N318SU3kElDUEM= +github.com/klauspost/compress v1.15.12/go.mod h1:QPwzmACJjUTFsnSHH934V6woptycfrDDJnH7hvFVbGM= +github.com/klauspost/cpuid/v2 v2.0.4/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= +github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= +github.com/klauspost/cpuid/v2 v2.2.3 h1:sxCkb+qR91z4vsqw4vGGZlDgPz3G7gjaLyK3V8y70BU= +github.com/klauspost/cpuid/v2 v2.2.3/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/koron/go-ssdp v0.0.0-20191105050749-2e1c40ed0b5d/go.mod h1:5Ky9EC2xfoUKUor0Hjgi2BJhCSXJfMOFlmyYrVKGQMk= +github.com/koron/go-ssdp v0.0.3 h1:JivLMY45N76b4p/vsWGOKewBQu6uf39y8l+AQ7sDKx8= +github.com/koron/go-ssdp v0.0.3/go.mod h1:b2MxI6yh02pKrsyNoQUsk4+YNikaGhe4894J+Q5lDvA= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/leodido/go-urn v1.2.0 h1:hpXL4XnriNwQ/ABnpepYM/1vCLWNDfUNts8dX3xTG6Y= +github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII= +github.com/libp2p/go-addr-util v0.0.1/go.mod h1:4ac6O7n9rIAKB1dnd+s8IbbMXkt+oBpzX4/+RACcnlQ= +github.com/libp2p/go-addr-util v0.0.2/go.mod h1:Ecd6Fb3yIuLzq4bD7VcywcVSBtefcAwnUISBM3WG15E= +github.com/libp2p/go-buffer-pool v0.0.1/go.mod h1:xtyIz9PMobb13WaxR6Zo1Pd1zXJKYg0a8KiIvDp3TzQ= +github.com/libp2p/go-buffer-pool v0.0.2/go.mod h1:MvaB6xw5vOrDl8rYZGLFdKAuk/hRoRZd1Vi32+RXyFM= +github.com/libp2p/go-buffer-pool v0.1.0 h1:oK4mSFcQz7cTQIfqbe4MIj9gLW+mnanjyFtc6cdF0Y8= +github.com/libp2p/go-buffer-pool v0.1.0/go.mod h1:N+vh8gMqimBzdKkSMVuydVDq+UV5QTWy5HSiZacSbPg= +github.com/libp2p/go-cidranger v1.1.0 h1:ewPN8EZ0dd1LSnrtuwd4709PXVcITVeuwbag38yPW7c= +github.com/libp2p/go-cidranger v1.1.0/go.mod h1:KWZTfSr+r9qEo9OkI9/SIEeAtw+NNoU0dXIXt15Okic= +github.com/libp2p/go-conn-security-multistream v0.1.0/go.mod h1:aw6eD7LOsHEX7+2hJkDxw1MteijaVcI+/eP2/x3J1xc= +github.com/libp2p/go-conn-security-multistream v0.2.0/go.mod h1:hZN4MjlNetKD3Rq5Jb/P5ohUnFLNzEAR4DLSzpn2QLU= +github.com/libp2p/go-conn-security-multistream v0.2.1/go.mod h1:cR1d8gA0Hr59Fj6NhaTpFhJZrjSYuNmhpT2r25zYR70= +github.com/libp2p/go-eventbus v0.1.0/go.mod h1:vROgu5cs5T7cv7POWlWxBaVLxfSegC5UGQf8A2eEmx4= +github.com/libp2p/go-eventbus v0.2.1/go.mod h1:jc2S4SoEVPP48H9Wpzm5aiGwUCBMfGhVhhBjyhhCJs8= +github.com/libp2p/go-flow-metrics v0.0.1/go.mod h1:Iv1GH0sG8DtYN3SVJ2eG221wMiNpZxBdp967ls1g+k8= +github.com/libp2p/go-flow-metrics v0.0.3/go.mod h1:HeoSNUrOJVK1jEpDqVEiUOIXqhbnS27omG0uWU5slZs= +github.com/libp2p/go-flow-metrics v0.1.0 h1:0iPhMI8PskQwzh57jB9WxIuIOQ0r+15PChFGkx3Q3WM= +github.com/libp2p/go-flow-metrics v0.1.0/go.mod h1:4Xi8MX8wj5aWNDAZttg6UPmc0ZrnFNsMtpsYUClFtro= +github.com/libp2p/go-libp2p v0.6.1/go.mod h1:CTFnWXogryAHjXAKEbOf1OWY+VeAP3lDMZkfEI5sT54= +github.com/libp2p/go-libp2p v0.7.0/go.mod h1:hZJf8txWeCduQRDC/WSqBGMxaTHCOYHt2xSU1ivxn0k= +github.com/libp2p/go-libp2p v0.7.4/go.mod h1:oXsBlTLF1q7pxr+9w6lqzS1ILpyHsaBPniVO7zIHGMw= +github.com/libp2p/go-libp2p v0.8.1/go.mod h1:QRNH9pwdbEBpx5DTJYg+qxcVaDMAz3Ee/qDKwXujH5o= +github.com/libp2p/go-libp2p v0.14.3/go.mod h1:d12V4PdKbpL0T1/gsUNN8DfgMuRPDX8bS2QxCZlwRH0= +github.com/libp2p/go-libp2p v0.25.1 h1:YK+YDCHpYyTvitKWVxa5PfElgIpOONU01X5UcLEwJGA= +github.com/libp2p/go-libp2p v0.25.1/go.mod h1:xnK9/1d9+jeQCVvi/f1g12KqtVi/jP/SijtKV1hML3g= +github.com/libp2p/go-libp2p-asn-util v0.2.0 h1:rg3+Os8jbnO5DxkC7K/Utdi+DkY3q/d1/1q+8WeNAsw= +github.com/libp2p/go-libp2p-asn-util v0.2.0/go.mod h1:WoaWxbHKBymSN41hWSq/lGKJEca7TNm58+gGJi2WsLI= +github.com/libp2p/go-libp2p-autonat v0.1.1/go.mod h1:OXqkeGOY2xJVWKAGV2inNF5aKN/djNA3fdpCWloIudE= +github.com/libp2p/go-libp2p-autonat v0.2.0/go.mod h1:DX+9teU4pEEoZUqR1PiMlqliONQdNbfzE1C718tcViI= +github.com/libp2p/go-libp2p-autonat v0.2.1/go.mod h1:MWtAhV5Ko1l6QBsHQNSuM6b1sRkXrpk0/LqCr+vCVxI= +github.com/libp2p/go-libp2p-autonat v0.2.2/go.mod h1:HsM62HkqZmHR2k1xgX34WuWDzk/nBwNHoeyyT4IWV6A= +github.com/libp2p/go-libp2p-autonat v0.4.2/go.mod h1:YxaJlpr81FhdOv3W3BTconZPfhaYivRdf53g+S2wobk= +github.com/libp2p/go-libp2p-blankhost v0.1.1/go.mod h1:pf2fvdLJPsC1FsVrNP3DUUvMzUts2dsLLBEpo1vW1ro= +github.com/libp2p/go-libp2p-blankhost v0.1.4/go.mod h1:oJF0saYsAXQCSfDq254GMNmLNz6ZTHTOvtF4ZydUvwU= +github.com/libp2p/go-libp2p-blankhost v0.2.0/go.mod h1:eduNKXGTioTuQAUcZ5epXi9vMl+t4d8ugUBRQ4SqaNQ= +github.com/libp2p/go-libp2p-circuit v0.1.4/go.mod h1:CY67BrEjKNDhdTk8UgBX1Y/H5c3xkAcs3gnksxY7osU= +github.com/libp2p/go-libp2p-circuit v0.2.1/go.mod h1:BXPwYDN5A8z4OEY9sOfr2DUQMLQvKt/6oku45YUmjIo= +github.com/libp2p/go-libp2p-circuit v0.4.0/go.mod h1:t/ktoFIUzM6uLQ+o1G6NuBl2ANhBKN9Bc8jRIk31MoA= +github.com/libp2p/go-libp2p-core v0.0.1/go.mod h1:g/VxnTZ/1ygHxH3dKok7Vno1VfpvGcGip57wjTU4fco= +github.com/libp2p/go-libp2p-core v0.0.2/go.mod h1:9dAcntw/n46XycV4RnlBq3BpgrmyUi9LuoTNdPrbUco= +github.com/libp2p/go-libp2p-core v0.0.4/go.mod h1:jyuCQP356gzfCFtRKyvAbNkyeuxb7OlyhWZ3nls5d2I= +github.com/libp2p/go-libp2p-core v0.2.0/go.mod h1:X0eyB0Gy93v0DZtSYbEM7RnMChm9Uv3j7yRXjO77xSI= +github.com/libp2p/go-libp2p-core v0.2.2/go.mod h1:8fcwTbsG2B+lTgRJ1ICZtiM5GWCWZVoVrLaDRvIRng0= +github.com/libp2p/go-libp2p-core v0.2.4/go.mod h1:STh4fdfa5vDYr0/SzYYeqnt+E6KfEV5VxfIrm0bcI0g= +github.com/libp2p/go-libp2p-core v0.3.0/go.mod h1:ACp3DmS3/N64c2jDzcV429ukDpicbL6+TrrxANBjPGw= +github.com/libp2p/go-libp2p-core v0.3.1/go.mod h1:thvWy0hvaSBhnVBaW37BvzgVV68OUhgJJLAa6almrII= +github.com/libp2p/go-libp2p-core v0.4.0/go.mod h1:49XGI+kc38oGVwqSBhDEwytaAxgZasHhFfQKibzTls0= +github.com/libp2p/go-libp2p-core v0.5.0/go.mod h1:49XGI+kc38oGVwqSBhDEwytaAxgZasHhFfQKibzTls0= +github.com/libp2p/go-libp2p-core v0.5.1/go.mod h1:uN7L2D4EvPCvzSH5SrhR72UWbnSGpt5/a35Sm4upn4Y= +github.com/libp2p/go-libp2p-core v0.5.4/go.mod h1:uN7L2D4EvPCvzSH5SrhR72UWbnSGpt5/a35Sm4upn4Y= +github.com/libp2p/go-libp2p-core v0.5.5/go.mod h1:vj3awlOr9+GMZJFH9s4mpt9RHHgGqeHCopzbYKZdRjM= +github.com/libp2p/go-libp2p-core v0.5.6/go.mod h1:txwbVEhHEXikXn9gfC7/UDDw7rkxuX0bJvM49Ykaswo= +github.com/libp2p/go-libp2p-core v0.5.7/go.mod h1:txwbVEhHEXikXn9gfC7/UDDw7rkxuX0bJvM49Ykaswo= +github.com/libp2p/go-libp2p-core v0.6.0/go.mod h1:txwbVEhHEXikXn9gfC7/UDDw7rkxuX0bJvM49Ykaswo= +github.com/libp2p/go-libp2p-core v0.7.0/go.mod h1:FfewUH/YpvWbEB+ZY9AQRQ4TAD8sJBt/G1rVvhz5XT8= +github.com/libp2p/go-libp2p-core v0.8.0/go.mod h1:FfewUH/YpvWbEB+ZY9AQRQ4TAD8sJBt/G1rVvhz5XT8= +github.com/libp2p/go-libp2p-core v0.8.1/go.mod h1:FfewUH/YpvWbEB+ZY9AQRQ4TAD8sJBt/G1rVvhz5XT8= +github.com/libp2p/go-libp2p-core v0.8.2/go.mod h1:FfewUH/YpvWbEB+ZY9AQRQ4TAD8sJBt/G1rVvhz5XT8= +github.com/libp2p/go-libp2p-core v0.8.5/go.mod h1:FfewUH/YpvWbEB+ZY9AQRQ4TAD8sJBt/G1rVvhz5XT8= +github.com/libp2p/go-libp2p-crypto v0.1.0/go.mod h1:sPUokVISZiy+nNuTTH/TY+leRSxnFj/2GLjtOTW90hI= +github.com/libp2p/go-libp2p-discovery v0.2.0/go.mod h1:s4VGaxYMbw4+4+tsoQTqh7wfxg97AEdo4GYBt6BadWg= +github.com/libp2p/go-libp2p-discovery v0.3.0/go.mod h1:o03drFnz9BVAZdzC/QUQ+NeQOu38Fu7LJGEOK2gQltw= +github.com/libp2p/go-libp2p-discovery v0.5.0/go.mod h1:+srtPIU9gDaBNu//UHvcdliKBIcr4SfDcm0/PfPJLug= +github.com/libp2p/go-libp2p-kad-dht v0.21.0 h1:J0Yd22VA+sk0CJRGMgtfHvLVIkZDyJ3AJGiljywIw5U= +github.com/libp2p/go-libp2p-kad-dht v0.21.0/go.mod h1:Bhm9diAFmc6qcWAr084bHNL159srVZRKADdp96Qqd1I= +github.com/libp2p/go-libp2p-kbucket v0.5.0 h1:g/7tVm8ACHDxH29BGrpsQlnNeu+6OF1A9bno/4/U1oA= +github.com/libp2p/go-libp2p-kbucket v0.5.0/go.mod h1:zGzGCpQd78b5BNTDGHNDLaTt9aDK/A02xeZp9QeFC4U= +github.com/libp2p/go-libp2p-loggables v0.1.0/go.mod h1:EyumB2Y6PrYjr55Q3/tiJ/o3xoDasoRYM7nOzEpoa90= +github.com/libp2p/go-libp2p-mplex v0.2.0/go.mod h1:Ejl9IyjvXJ0T9iqUTE1jpYATQ9NM3g+OtR+EMMODbKo= +github.com/libp2p/go-libp2p-mplex v0.2.1/go.mod h1:SC99Rxs8Vuzrf/6WhmH41kNn13TiYdAWNYHrwImKLnE= +github.com/libp2p/go-libp2p-mplex v0.2.2/go.mod h1:74S9eum0tVQdAfFiKxAyKzNdSuLqw5oadDq7+L/FELo= +github.com/libp2p/go-libp2p-mplex v0.2.3/go.mod h1:CK3p2+9qH9x+7ER/gWWDYJ3QW5ZxWDkm+dVvjfuG3ek= +github.com/libp2p/go-libp2p-mplex v0.4.0/go.mod h1:yCyWJE2sc6TBTnFpjvLuEJgTSw/u+MamvzILKdX7asw= +github.com/libp2p/go-libp2p-mplex v0.4.1/go.mod h1:cmy+3GfqfM1PceHTLL7zQzAAYaryDu6iPSC+CIb094g= +github.com/libp2p/go-libp2p-nat v0.0.5/go.mod h1:1qubaE5bTZMJE+E/uu2URroMbzdubFz1ChgiN79yKPE= +github.com/libp2p/go-libp2p-nat v0.0.6/go.mod h1:iV59LVhB3IkFvS6S6sauVTSOrNEANnINbI/fkaLimiw= +github.com/libp2p/go-libp2p-netutil v0.1.0/go.mod h1:3Qv/aDqtMLTUyQeundkKsA+YCThNdbQD54k3TqjpbFU= +github.com/libp2p/go-libp2p-noise v0.2.0/go.mod h1:IEbYhBBzGyvdLBoxxULL/SGbJARhUeqlO8lVSREYu2Q= +github.com/libp2p/go-libp2p-peer v0.2.0/go.mod h1:RCffaCvUyW2CJmG2gAWVqwePwW7JMgxjsHm7+J5kjWY= +github.com/libp2p/go-libp2p-peerstore v0.1.0/go.mod h1:2CeHkQsr8svp4fZ+Oi9ykN1HBb6u0MOvdJ7YIsmcwtY= +github.com/libp2p/go-libp2p-peerstore v0.1.3/go.mod h1:BJ9sHlm59/80oSkpWgr1MyY1ciXAXV397W6h1GH/uKI= +github.com/libp2p/go-libp2p-peerstore v0.2.0/go.mod h1:N2l3eVIeAitSg3Pi2ipSrJYnqhVnMNQZo9nkSCuAbnQ= +github.com/libp2p/go-libp2p-peerstore v0.2.1/go.mod h1:NQxhNjWxf1d4w6PihR8btWIRjwRLBr4TYKfNgrUkOPA= +github.com/libp2p/go-libp2p-peerstore v0.2.2/go.mod h1:NQxhNjWxf1d4w6PihR8btWIRjwRLBr4TYKfNgrUkOPA= +github.com/libp2p/go-libp2p-peerstore v0.2.6/go.mod h1:ss/TWTgHZTMpsU/oKVVPQCGuDHItOpf2W8RxAi50P2s= +github.com/libp2p/go-libp2p-peerstore v0.2.7/go.mod h1:ss/TWTgHZTMpsU/oKVVPQCGuDHItOpf2W8RxAi50P2s= +github.com/libp2p/go-libp2p-pnet v0.2.0/go.mod h1:Qqvq6JH/oMZGwqs3N1Fqhv8NVhrdYcO0BW4wssv21LA= +github.com/libp2p/go-libp2p-quic-transport v0.10.0/go.mod h1:RfJbZ8IqXIhxBRm5hqUEJqjiiY8xmEuq3HUDS993MkA= +github.com/libp2p/go-libp2p-record v0.1.0/go.mod h1:ujNc8iuE5dlKWVy6wuL6dd58t0n7xI4hAIl8pE6wu5Q= +github.com/libp2p/go-libp2p-record v0.2.0 h1:oiNUOCWno2BFuxt3my4i1frNrt7PerzB3queqa1NkQ0= +github.com/libp2p/go-libp2p-record v0.2.0/go.mod h1:I+3zMkvvg5m2OcSdoL0KPljyJyvNDFGKX7QdlpYUcwk= +github.com/libp2p/go-libp2p-routing-helpers v0.6.0 h1:Rfyd+wp/cU0PjNjCphGzLYzd7Q51fjOMs5Sjj6zWGT0= +github.com/libp2p/go-libp2p-routing-helpers v0.6.0/go.mod h1:wwK/XSLt6njjO7sRbjhf8w7PGBOfdntMQ2mOQPZ5s/Q= +github.com/libp2p/go-libp2p-secio v0.1.0/go.mod h1:tMJo2w7h3+wN4pgU2LSYeiKPrfqBgkOsdiKK77hE7c8= +github.com/libp2p/go-libp2p-secio v0.2.0/go.mod h1:2JdZepB8J5V9mBp79BmwsaPQhRPNN2NrnB2lKQcdy6g= +github.com/libp2p/go-libp2p-secio v0.2.1/go.mod h1:cWtZpILJqkqrSkiYcDBh5lA3wbT2Q+hz3rJQq3iftD8= +github.com/libp2p/go-libp2p-secio v0.2.2/go.mod h1:wP3bS+m5AUnFA+OFO7Er03uO1mncHG0uVwGrwvjYlNY= +github.com/libp2p/go-libp2p-swarm v0.1.0/go.mod h1:wQVsCdjsuZoc730CgOvh5ox6K8evllckjebkdiY5ta4= +github.com/libp2p/go-libp2p-swarm v0.2.2/go.mod h1:fvmtQ0T1nErXym1/aa1uJEyN7JzaTNyBcHImCxRpPKU= +github.com/libp2p/go-libp2p-swarm v0.2.3/go.mod h1:P2VO/EpxRyDxtChXz/VPVXyTnszHvokHKRhfkEgFKNM= +github.com/libp2p/go-libp2p-swarm v0.2.8/go.mod h1:JQKMGSth4SMqonruY0a8yjlPVIkb0mdNSwckW7OYziM= +github.com/libp2p/go-libp2p-swarm v0.3.0/go.mod h1:hdv95GWCTmzkgeJpP+GK/9D9puJegb7H57B5hWQR5Kk= +github.com/libp2p/go-libp2p-swarm v0.5.0/go.mod h1:sU9i6BoHE0Ve5SKz3y9WfKrh8dUat6JknzUehFx8xW4= +github.com/libp2p/go-libp2p-testing v0.0.2/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E= +github.com/libp2p/go-libp2p-testing v0.0.3/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E= +github.com/libp2p/go-libp2p-testing v0.0.4/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E= +github.com/libp2p/go-libp2p-testing v0.1.0/go.mod h1:xaZWMJrPUM5GlDBxCeGUi7kI4eqnjVyavGroI2nxEM0= +github.com/libp2p/go-libp2p-testing v0.1.1/go.mod h1:xaZWMJrPUM5GlDBxCeGUi7kI4eqnjVyavGroI2nxEM0= +github.com/libp2p/go-libp2p-testing v0.1.2-0.20200422005655-8775583591d8/go.mod h1:Qy8sAncLKpwXtS2dSnDOP8ktexIAHKu+J+pnZOFZLTc= +github.com/libp2p/go-libp2p-testing v0.3.0/go.mod h1:efZkql4UZ7OVsEfaxNHZPzIehtsBXMrXnCfJIgDti5g= +github.com/libp2p/go-libp2p-testing v0.4.0/go.mod h1:Q+PFXYoiYFN5CAEG2w3gLPEzotlKsNSbKQ/lImlOWF0= +github.com/libp2p/go-libp2p-testing v0.12.0 h1:EPvBb4kKMWO29qP4mZGyhVzUyR25dvfUIK5WDu6iPUA= +github.com/libp2p/go-libp2p-tls v0.1.3/go.mod h1:wZfuewxOndz5RTnCAxFliGjvYSDA40sKitV4c50uI1M= +github.com/libp2p/go-libp2p-transport-upgrader v0.1.1/go.mod h1:IEtA6or8JUbsV07qPW4r01GnTenLW4oi3lOPbUMGJJA= +github.com/libp2p/go-libp2p-transport-upgrader v0.2.0/go.mod h1:mQcrHj4asu6ArfSoMuyojOdjx73Q47cYD7s5+gZOlns= +github.com/libp2p/go-libp2p-transport-upgrader v0.3.0/go.mod h1:i+SKzbRnvXdVbU3D1dwydnTmKRPXiAR/fyvi1dXuL4o= +github.com/libp2p/go-libp2p-transport-upgrader v0.4.2/go.mod h1:NR8ne1VwfreD5VIWIU62Agt/J18ekORFU/j1i2y8zvk= +github.com/libp2p/go-libp2p-yamux v0.2.0/go.mod h1:Db2gU+XfLpm6E4rG5uGCFX6uXA8MEXOxFcRoXUODaK8= +github.com/libp2p/go-libp2p-yamux v0.2.2/go.mod h1:lIohaR0pT6mOt0AZ0L2dFze9hds9Req3OfS+B+dv4qw= +github.com/libp2p/go-libp2p-yamux v0.2.5/go.mod h1:Zpgj6arbyQrmZ3wxSZxfBmbdnWtbZ48OpsfmQVTErwA= +github.com/libp2p/go-libp2p-yamux v0.2.7/go.mod h1:X28ENrBMU/nm4I3Nx4sZ4dgjZ6VhLEn0XhIoZ5viCwU= +github.com/libp2p/go-libp2p-yamux v0.2.8/go.mod h1:/t6tDqeuZf0INZMTgd0WxIRbtK2EzI2h7HbFm9eAKI4= +github.com/libp2p/go-libp2p-yamux v0.4.0/go.mod h1:+DWDjtFMzoAwYLVkNZftoucn7PelNoy5nm3tZ3/Zw30= +github.com/libp2p/go-libp2p-yamux v0.5.0/go.mod h1:AyR8k5EzyM2QN9Bbdg6X1SkVVuqLwTGf0L4DFq9g6po= +github.com/libp2p/go-libp2p-yamux v0.5.4/go.mod h1:tfrXbyaTqqSU654GTvK3ocnSZL3BuHoeTSqhcel1wsE= +github.com/libp2p/go-maddr-filter v0.0.4/go.mod h1:6eT12kSQMA9x2pvFQa+xesMKUBlj9VImZbj3B9FBH/Q= +github.com/libp2p/go-maddr-filter v0.0.5/go.mod h1:Jk+36PMfIqCJhAnaASRH83bdAvfDRp/w6ENFaC9bG+M= +github.com/libp2p/go-maddr-filter v0.1.0/go.mod h1:VzZhTXkMucEGGEOSKddrwGiOv0tUhgnKqNEmIAz/bPU= +github.com/libp2p/go-mplex v0.0.3/go.mod h1:pK5yMLmOoBR1pNCqDlA2GQrdAVTMkqFalaTWe7l4Yd0= +github.com/libp2p/go-mplex v0.1.0/go.mod h1:SXgmdki2kwCUlCCbfGLEgHjC4pFqhTp0ZoV6aiKgxDU= +github.com/libp2p/go-mplex v0.1.1/go.mod h1:Xgz2RDCi3co0LeZfgjm4OgUF15+sVR8SRcu3SFXI1lk= +github.com/libp2p/go-mplex v0.1.2/go.mod h1:Xgz2RDCi3co0LeZfgjm4OgUF15+sVR8SRcu3SFXI1lk= +github.com/libp2p/go-mplex v0.2.0/go.mod h1:0Oy/A9PQlwBytDRp4wSkFnzHYDKcpLot35JQ6msjvYQ= +github.com/libp2p/go-mplex v0.3.0/go.mod h1:0Oy/A9PQlwBytDRp4wSkFnzHYDKcpLot35JQ6msjvYQ= +github.com/libp2p/go-msgio v0.0.2/go.mod h1:63lBBgOTDKQL6EWazRMCwXsEeEeK9O2Cd+0+6OOuipQ= +github.com/libp2p/go-msgio v0.0.4/go.mod h1:63lBBgOTDKQL6EWazRMCwXsEeEeK9O2Cd+0+6OOuipQ= +github.com/libp2p/go-msgio v0.0.6/go.mod h1:4ecVB6d9f4BDSL5fqvPiC4A3KivjWn+Venn/1ALLMWA= +github.com/libp2p/go-msgio v0.3.0 h1:mf3Z8B1xcFN314sWX+2vOTShIE0Mmn2TXn3YCUQGNj0= +github.com/libp2p/go-msgio v0.3.0/go.mod h1:nyRM819GmVaF9LX3l03RMh10QdOroF++NBbxAb0mmDM= +github.com/libp2p/go-nat v0.0.4/go.mod h1:Nmw50VAvKuk38jUBcmNh6p9lUJLoODbJRvYAa/+KSDo= +github.com/libp2p/go-nat v0.0.5/go.mod h1:B7NxsVNPZmRLvMOwiEO1scOSyjA56zxYAGv1yQgRkEU= +github.com/libp2p/go-nat v0.1.0 h1:MfVsH6DLcpa04Xr+p8hmVRG4juse0s3J8HyNWYHffXg= +github.com/libp2p/go-nat v0.1.0/go.mod h1:X7teVkwRHNInVNWQiO/tAiAVRwSr5zoRz4YSTC3uRBM= +github.com/libp2p/go-netroute v0.1.2/go.mod h1:jZLDV+1PE8y5XxBySEBgbuVAXbhtuHSdmLPL2n9MKbk= +github.com/libp2p/go-netroute v0.1.3/go.mod h1:jZLDV+1PE8y5XxBySEBgbuVAXbhtuHSdmLPL2n9MKbk= +github.com/libp2p/go-netroute v0.1.5/go.mod h1:V1SR3AaECRkEQCoFFzYwVYWvYIEtlxx89+O3qcpCl4A= +github.com/libp2p/go-netroute v0.1.6/go.mod h1:AqhkMh0VuWmfgtxKPp3Oc1LdU5QSWS7wl0QLhSZqXxQ= +github.com/libp2p/go-netroute v0.2.1 h1:V8kVrpD8GK0Riv15/7VN6RbUQ3URNZVosw7H2v9tksU= +github.com/libp2p/go-netroute v0.2.1/go.mod h1:hraioZr0fhBjG0ZRXJJ6Zj2IVEVNx6tDTFQfSmcq7mQ= +github.com/libp2p/go-openssl v0.0.2/go.mod h1:v8Zw2ijCSWBQi8Pq5GAixw6DbFfa9u6VIYDXnvOXkc0= +github.com/libp2p/go-openssl v0.0.3/go.mod h1:unDrJpgy3oFr+rqXsarWifmJuNnJR4chtO1HmaZjggc= +github.com/libp2p/go-openssl v0.0.4/go.mod h1:unDrJpgy3oFr+rqXsarWifmJuNnJR4chtO1HmaZjggc= +github.com/libp2p/go-openssl v0.0.5/go.mod h1:unDrJpgy3oFr+rqXsarWifmJuNnJR4chtO1HmaZjggc= +github.com/libp2p/go-openssl v0.0.7/go.mod h1:unDrJpgy3oFr+rqXsarWifmJuNnJR4chtO1HmaZjggc= +github.com/libp2p/go-reuseport v0.0.1/go.mod h1:jn6RmB1ufnQwl0Q1f+YxAj8isJgDCQzaaxIFYDhcYEA= +github.com/libp2p/go-reuseport v0.0.2/go.mod h1:SPD+5RwGC7rcnzngoYC86GjPzjSywuQyMVAheVBD9nQ= +github.com/libp2p/go-reuseport v0.2.0 h1:18PRvIMlpY6ZK85nIAicSBuXXvrYoSw3dsBAR7zc560= +github.com/libp2p/go-reuseport v0.2.0/go.mod h1:bvVho6eLMm6Bz5hmU0LYN3ixd3nPPvtIlaURZZgOY4k= +github.com/libp2p/go-reuseport-transport v0.0.2/go.mod h1:YkbSDrvjUVDL6b8XqriyA20obEtsW9BLkuOUyQAOCbs= +github.com/libp2p/go-reuseport-transport v0.0.3/go.mod h1:Spv+MPft1exxARzP2Sruj2Wb5JSyHNncjf1Oi2dEbzM= +github.com/libp2p/go-reuseport-transport v0.0.4/go.mod h1:trPa7r/7TJK/d+0hdBLOCGvpQQVOU74OXbNCIMkufGw= +github.com/libp2p/go-sockaddr v0.0.2/go.mod h1:syPvOmNs24S3dFVGJA1/mrqdeijPxLV2Le3BRLKd68k= +github.com/libp2p/go-sockaddr v0.1.0/go.mod h1:syPvOmNs24S3dFVGJA1/mrqdeijPxLV2Le3BRLKd68k= +github.com/libp2p/go-sockaddr v0.1.1/go.mod h1:syPvOmNs24S3dFVGJA1/mrqdeijPxLV2Le3BRLKd68k= +github.com/libp2p/go-stream-muxer v0.0.1/go.mod h1:bAo8x7YkSpadMTbtTaxGVHWUQsR/l5MEaHbKaliuT14= +github.com/libp2p/go-stream-muxer-multistream v0.2.0/go.mod h1:j9eyPol/LLRqT+GPLSxvimPhNph4sfYfMoDPd7HkzIc= +github.com/libp2p/go-stream-muxer-multistream v0.3.0/go.mod h1:yDh8abSIzmZtqtOt64gFJUXEryejzNb0lisTt+fAMJA= +github.com/libp2p/go-tcp-transport v0.1.0/go.mod h1:oJ8I5VXryj493DEJ7OsBieu8fcg2nHGctwtInJVpipc= +github.com/libp2p/go-tcp-transport v0.1.1/go.mod h1:3HzGvLbx6etZjnFlERyakbaYPdfjg2pWP97dFZworkY= +github.com/libp2p/go-tcp-transport v0.2.0/go.mod h1:vX2U0CnWimU4h0SGSEsg++AzvBcroCGYw28kh94oLe0= +github.com/libp2p/go-tcp-transport v0.2.3/go.mod h1:9dvr03yqrPyYGIEN6Dy5UvdJZjyPFvl1S/igQ5QD1SU= +github.com/libp2p/go-ws-transport v0.2.0/go.mod h1:9BHJz/4Q5A9ludYWKoGCFC5gUElzlHoKzu0yY9p/klM= +github.com/libp2p/go-ws-transport v0.3.0/go.mod h1:bpgTJmRZAvVHrgHybCVyqoBmyLQ1fiZuEaBYusP5zsk= +github.com/libp2p/go-ws-transport v0.4.0/go.mod h1:EcIEKqf/7GDjth6ksuS/6p7R49V4CBY6/E7R/iyhYUA= +github.com/libp2p/go-yamux v1.2.2/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow= +github.com/libp2p/go-yamux v1.3.0/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow= +github.com/libp2p/go-yamux v1.3.3/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow= +github.com/libp2p/go-yamux v1.3.5/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow= +github.com/libp2p/go-yamux v1.3.7/go.mod h1:fr7aVgmdNGJK+N1g+b6DW6VxzbRCjCOejR/hkmpooHE= +github.com/libp2p/go-yamux v1.4.0/go.mod h1:fr7aVgmdNGJK+N1g+b6DW6VxzbRCjCOejR/hkmpooHE= +github.com/libp2p/go-yamux v1.4.1/go.mod h1:fr7aVgmdNGJK+N1g+b6DW6VxzbRCjCOejR/hkmpooHE= +github.com/libp2p/go-yamux/v2 v2.2.0/go.mod h1:3So6P6TV6r75R9jiBpiIKgU/66lOarCZjqROGxzPpPQ= +github.com/libp2p/go-yamux/v4 v4.0.0 h1:+Y80dV2Yx/kv7Y7JKu0LECyVdMXm1VUoko+VQ9rBfZQ= +github.com/libp2p/go-yamux/v4 v4.0.0/go.mod h1:NWjl8ZTLOGlozrXSOZ/HlfG++39iKNnM5wwmtQP1YB4= +github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= +github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= +github.com/lucas-clemente/quic-go v0.19.3/go.mod h1:ADXpNbTQjq1hIzCpB+y/k5iz4n4z4IwqoLb94Kh5Hu8= +github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI= +github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= +github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/marten-seemann/qpack v0.2.1/go.mod h1:F7Gl5L1jIgN1D11ucXefiuJS9UMVP2opoCp2jDKb7wc= +github.com/marten-seemann/qtls v0.10.0/go.mod h1:UvMd1oaYDACI99/oZUYLzMCkBXQVT0aGm99sJhbT8hs= +github.com/marten-seemann/qtls-go1-15 v0.1.1/go.mod h1:GyFwywLKkRt+6mfU99csTEY1joMZz5vmB1WNZH3P81I= +github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd h1:br0buuQ854V8u83wA0rVZ8ttrq5CpaPZdvrK0LP2lOk= +github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd/go.mod h1:QuCEs1Nt24+FYQEqAAncTDPJIuGs+LxK1MCiFL25pMU= +github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= +github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-isatty v0.0.13/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= +github.com/mattn/go-isatty v0.0.17 h1:BTarxUcIeDqL27Mc+vyvdWYSL28zpIhv3RoTdsLMPng= +github.com/mattn/go-isatty v0.0.17/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= +github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE= +github.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00vh2OMYv+xgHpAMF4= +github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= +github.com/miekg/dns v1.1.12/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= +github.com/miekg/dns v1.1.28/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM= +github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= +github.com/miekg/dns v1.1.50 h1:DQUfb9uc6smULcREF09Uc+/Gd46YWqJd5DbpPE9xkcA= +github.com/miekg/dns v1.1.50/go.mod h1:e3IlAVfNqAllflbibAZEWOXOQ+Ynzk/dDozDxY7XnME= +github.com/mikioh/tcp v0.0.0-20190314235350-803a9b46060c h1:bzE/A84HN25pxAuk9Eej1Kz9OUelF97nAc82bDquQI8= +github.com/mikioh/tcp v0.0.0-20190314235350-803a9b46060c/go.mod h1:0SQS9kMwD2VsyFEB++InYyBJroV/FRmBgcydeSUcJms= +github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b h1:z78hV3sbSMAUoyUMM0I83AUIT6Hu17AWfgjzIbtrYFc= +github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b/go.mod h1:lxPUiZwKoFL8DUUmalo2yJJUCxbPKtm8OKfqr2/FTNU= +github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc h1:PTfri+PuQmWDqERdnNMiD9ZejrlswWrCpBEZgWOiTrc= +github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc/go.mod h1:cGKTAVKx4SxOuR/czcZ/E2RSJ3sfHs8FpHhQ5CWMf9s= +github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1/go.mod h1:pD8RvIylQ358TN4wwqatJ8rNavkEINozVn9DtGI3dfQ= +github.com/minio/sha256-simd v0.0.0-20190131020904-2d45a736cd16/go.mod h1:2FMWW+8GMoPweT6+pI63m9YE3Lmw4J71hV56Chs1E/U= +github.com/minio/sha256-simd v0.0.0-20190328051042-05b4dd3047e5/go.mod h1:2FMWW+8GMoPweT6+pI63m9YE3Lmw4J71hV56Chs1E/U= +github.com/minio/sha256-simd v0.1.0/go.mod h1:2FMWW+8GMoPweT6+pI63m9YE3Lmw4J71hV56Chs1E/U= +github.com/minio/sha256-simd v0.1.1-0.20190913151208-6de447530771/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= +github.com/minio/sha256-simd v0.1.1/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= +github.com/minio/sha256-simd v1.0.0 h1:v1ta+49hkWZyvaKwrQB8elexRqm6Y0aMLjCNsrYxo6g= +github.com/minio/sha256-simd v1.0.0/go.mod h1:OuYzVNI5vcoYIAmbIvHPl3N3jUzVedXbKy5RFepssQM= +github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= +github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= +github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= +github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/mr-tron/base58 v1.1.0/go.mod h1:xcD2VGqlgYjBdcBLw+TuYLr8afG+Hj8g2eTVqeSzSU8= +github.com/mr-tron/base58 v1.1.1/go.mod h1:xcD2VGqlgYjBdcBLw+TuYLr8afG+Hj8g2eTVqeSzSU8= +github.com/mr-tron/base58 v1.1.2/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= +github.com/mr-tron/base58 v1.1.3/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= +github.com/mr-tron/base58 v1.2.0 h1:T/HDJBh4ZCPbU39/+c3rRvE0uKBQlU27+QI8LJ4t64o= +github.com/mr-tron/base58 v1.2.0/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= +github.com/multiformats/go-base32 v0.0.3/go.mod h1:pLiuGC8y0QR3Ue4Zug5UzK9LjgbkL8NSQj0zQ5Nz/AA= +github.com/multiformats/go-base32 v0.1.0 h1:pVx9xoSPqEIQG8o+UbAe7DNi51oej1NtK+aGkbLYxPE= +github.com/multiformats/go-base32 v0.1.0/go.mod h1:Kj3tFY6zNr+ABYMqeUNeGvkIC/UYgtWibDcT0rExnbI= +github.com/multiformats/go-base36 v0.1.0/go.mod h1:kFGE83c6s80PklsHO9sRn2NCoffoRdUUOENyW/Vv6sM= +github.com/multiformats/go-base36 v0.2.0 h1:lFsAbNOGeKtuKozrtBsAkSVhv1p9D0/qedU9rQyccr0= +github.com/multiformats/go-base36 v0.2.0/go.mod h1:qvnKE++v+2MWCfePClUEjE78Z7P2a1UV0xHgWc0hkp4= +github.com/multiformats/go-multiaddr v0.0.1/go.mod h1:xKVEak1K9cS1VdmPZW3LSIb6lgmoS58qz/pzqmAxV44= +github.com/multiformats/go-multiaddr v0.0.2/go.mod h1:xKVEak1K9cS1VdmPZW3LSIb6lgmoS58qz/pzqmAxV44= +github.com/multiformats/go-multiaddr v0.0.4/go.mod h1:xKVEak1K9cS1VdmPZW3LSIb6lgmoS58qz/pzqmAxV44= +github.com/multiformats/go-multiaddr v0.1.0/go.mod h1:xKVEak1K9cS1VdmPZW3LSIb6lgmoS58qz/pzqmAxV44= +github.com/multiformats/go-multiaddr v0.1.1/go.mod h1:aMKBKNEYmzmDmxfX88/vz+J5IU55txyt0p4aiWVohjo= +github.com/multiformats/go-multiaddr v0.2.0/go.mod h1:0nO36NvPpyV4QzvTLi/lafl2y95ncPj0vFwVF6k6wJ4= +github.com/multiformats/go-multiaddr v0.2.1/go.mod h1:s/Apk6IyxfvMjDafnhJgJ3/46z7tZ04iMk5wP4QMGGE= +github.com/multiformats/go-multiaddr v0.2.2/go.mod h1:NtfXiOtHvghW9KojvtySjH5y0u0xW5UouOmQQrn6a3Y= +github.com/multiformats/go-multiaddr v0.3.0/go.mod h1:dF9kph9wfJ+3VLAaeBqo9Of8x4fJxp6ggJGteB8HQTI= +github.com/multiformats/go-multiaddr v0.3.1/go.mod h1:uPbspcUPd5AfaP6ql3ujFY+QWzmBD8uLLL4bXW0XfGc= +github.com/multiformats/go-multiaddr v0.3.3/go.mod h1:lCKNGP1EQ1eZ35Za2wlqnabm9xQkib3fyB+nZXHLag0= +github.com/multiformats/go-multiaddr v0.8.0 h1:aqjksEcqK+iD/Foe1RRFsGZh8+XFiGo7FgUCZlpv3LU= +github.com/multiformats/go-multiaddr v0.8.0/go.mod h1:Fs50eBDWvZu+l3/9S6xAE7ZYj6yhxlvaVZjakWN7xRs= +github.com/multiformats/go-multiaddr-dns v0.0.1/go.mod h1:9kWcqw/Pj6FwxAwW38n/9403szc57zJPs45fmnznu3Q= +github.com/multiformats/go-multiaddr-dns v0.0.2/go.mod h1:9kWcqw/Pj6FwxAwW38n/9403szc57zJPs45fmnznu3Q= +github.com/multiformats/go-multiaddr-dns v0.2.0/go.mod h1:TJ5pr5bBO7Y1B18djPuRsVkduhQH2YqYSbxWJzYGdK0= +github.com/multiformats/go-multiaddr-dns v0.3.1 h1:QgQgR+LQVt3NPTjbrLLpsaT2ufAA2y0Mkk+QRVJbW3A= +github.com/multiformats/go-multiaddr-dns v0.3.1/go.mod h1:G/245BRQ6FJGmryJCrOuTdB37AMA5AMOVuO6NY3JwTk= +github.com/multiformats/go-multiaddr-fmt v0.0.1/go.mod h1:aBYjqL4T/7j4Qx+R73XSv/8JsgnRFlf0w2KGLCmXl3Q= +github.com/multiformats/go-multiaddr-fmt v0.1.0 h1:WLEFClPycPkp4fnIzoFoV9FVd49/eQsuaL3/CWe167E= +github.com/multiformats/go-multiaddr-fmt v0.1.0/go.mod h1:hGtDIW4PU4BqJ50gW2quDuPVjyWNZxToGUh/HwTZYJo= +github.com/multiformats/go-multiaddr-net v0.0.1/go.mod h1:nw6HSxNmCIQH27XPGBuX+d1tnvM7ihcFwHMSstNAVUU= +github.com/multiformats/go-multiaddr-net v0.1.0/go.mod h1:5JNbcfBOP4dnhoZOv10JJVkJO0pCCEf8mTnipAo2UZQ= +github.com/multiformats/go-multiaddr-net v0.1.1/go.mod h1:5JNbcfBOP4dnhoZOv10JJVkJO0pCCEf8mTnipAo2UZQ= +github.com/multiformats/go-multiaddr-net v0.1.2/go.mod h1:QsWt3XK/3hwvNxZJp92iMQKME1qHfpYmyIjFVsSOY6Y= +github.com/multiformats/go-multiaddr-net v0.1.3/go.mod h1:ilNnaM9HbmVFqsb/qcNysjCu4PVONlrBZpHIrw/qQuA= +github.com/multiformats/go-multiaddr-net v0.1.4/go.mod h1:ilNnaM9HbmVFqsb/qcNysjCu4PVONlrBZpHIrw/qQuA= +github.com/multiformats/go-multiaddr-net v0.1.5/go.mod h1:ilNnaM9HbmVFqsb/qcNysjCu4PVONlrBZpHIrw/qQuA= +github.com/multiformats/go-multiaddr-net v0.2.0/go.mod h1:gGdH3UXny6U3cKKYCvpXI5rnK7YaOIEOPVDI9tsJbEA= +github.com/multiformats/go-multibase v0.0.1/go.mod h1:bja2MqRZ3ggyXtZSEDKpl0uO/gviWFaSteVbWT51qgs= +github.com/multiformats/go-multibase v0.0.3/go.mod h1:5+1R4eQrT3PkYZ24C3W2Ue2tPwIdYQD509ZjSb5y9Oc= +github.com/multiformats/go-multibase v0.1.1 h1:3ASCDsuLX8+j4kx58qnJ4YFq/JWTJpCyDW27ztsVTOI= +github.com/multiformats/go-multibase v0.1.1/go.mod h1:ZEjHE+IsUrgp5mhlEAYjMtZwK1k4haNkcaPg9aoe1a8= +github.com/multiformats/go-multicodec v0.8.1 h1:ycepHwavHafh3grIbR1jIXnKCsFm0fqsfEOsJ8NtKE8= +github.com/multiformats/go-multicodec v0.8.1/go.mod h1:L3QTQvMIaVBkXOXXtVmYE+LI16i14xuaojr/H7Ai54k= +github.com/multiformats/go-multihash v0.0.1/go.mod h1:w/5tugSrLEbWqlcgJabL3oHFKTwfvkofsjW2Qa1ct4U= +github.com/multiformats/go-multihash v0.0.5/go.mod h1:lt/HCbqlQwlPBz7lv0sQCdtfcMtlJvakRUn/0Ual8po= +github.com/multiformats/go-multihash v0.0.8/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew= +github.com/multiformats/go-multihash v0.0.10/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew= +github.com/multiformats/go-multihash v0.0.13/go.mod h1:VdAWLKTwram9oKAatUcLxBNUjdtcVwxObEQBtRfuyjc= +github.com/multiformats/go-multihash v0.0.14/go.mod h1:VdAWLKTwram9oKAatUcLxBNUjdtcVwxObEQBtRfuyjc= +github.com/multiformats/go-multihash v0.0.15/go.mod h1:D6aZrWNLFTV/ynMpKsNtB40mJzmCl4jb1alC0OvHiHg= +github.com/multiformats/go-multihash v0.2.1 h1:aem8ZT0VA2nCHHk7bPJ1BjUbHNciqZC/d16Vve9l108= +github.com/multiformats/go-multihash v0.2.1/go.mod h1:WxoMcYG85AZVQUyRyo9s4wULvW5qrI9vb2Lt6evduFc= +github.com/multiformats/go-multistream v0.1.0/go.mod h1:fJTiDfXJVmItycydCnNx4+wSzZ5NwG2FEVAI30fiovg= +github.com/multiformats/go-multistream v0.1.1/go.mod h1:KmHZ40hzVxiaiwlj3MEbYgK9JFk2/9UktWZAF54Du38= +github.com/multiformats/go-multistream v0.2.1/go.mod h1:5GZPQZbkWOLOn3J2y4Y99vVW7vOfsAflxARk3x14o6k= +github.com/multiformats/go-multistream v0.2.2/go.mod h1:UIcnm7Zuo8HKG+HkWgfQsGL+/MIEhyTqbODbIUwSXKs= +github.com/multiformats/go-multistream v0.4.1 h1:rFy0Iiyn3YT0asivDUIR05leAdwZq3de4741sbiSdfo= +github.com/multiformats/go-multistream v0.4.1/go.mod h1:Mz5eykRVAjJWckE2U78c6xqdtyNUEhKSM0Lwar2p77Q= +github.com/multiformats/go-varint v0.0.1/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= +github.com/multiformats/go-varint v0.0.2/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= +github.com/multiformats/go-varint v0.0.5/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= +github.com/multiformats/go-varint v0.0.6/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= +github.com/multiformats/go-varint v0.0.7 h1:sWSGR+f/eu5ABZA2ZpYKBILXTTs9JWpdEM/nEGOHFS8= +github.com/multiformats/go-varint v0.0.7/go.mod h1:r8PUYw/fD/SjBCiKOoDlGF6QawOELpZAu9eioSos/OU= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg= +github.com/nats-io/jwt v0.3.2/go.mod h1:/euKqTS1ZD+zzjYrY7pseZrTtWQSjujC7xjPc8wL6eU= +github.com/nats-io/nats-server/v2 v2.1.2/go.mod h1:Afk+wRZqkMQs/p45uXdrVLuab3gwv3Z8C4HTBu8GD/k= +github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzEE/Zbp4w= +github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= +github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= +github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= +github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo= +github.com/neelance/sourcemap v0.0.0-20151028013722-8c68805598ab/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM= +github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs= +github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= +github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg= +github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= +github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= +github.com/onsi/ginkgo/v2 v2.5.1 h1:auzK7OI497k6x4OvWq+TKAcpcSAlod0doAH72oIN0Jw= +github.com/onsi/ginkgo/v2 v2.5.1/go.mod h1:63DOGlLAH8+REH8jUGdL3YpCpu7JODesutUjdENfUAc= +github.com/onsi/gomega v1.4.1/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= +github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= +github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= +github.com/onsi/gomega v1.24.0 h1:+0glovB9Jd6z3VR+ScSwQqXVTIfJcGA9UBM8yzQxhqg= +github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= +github.com/opencontainers/runtime-spec v1.0.2 h1:UfAcuLBJB9Coz72x1hgl8O5RVzTdNiaglX6v2DM6FI0= +github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis= +github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74= +github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs= +github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= +github.com/openzipkin-contrib/zipkin-go-opentracing v0.4.5/go.mod h1:/wsWhb9smxSfWAKL3wpBW7V8scJMt8N8gnaMCS9E/cA= +github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8= +github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= +github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= +github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= +github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM= +github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 h1:onHthvaw9LFnH4t2DcNVpwGmV9E1BkGknEliJkfwQj0= +github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58/go.mod h1:DXv8WO4yhMYhSNPKjeNKa5WY9YCIEBRbNzFFPJbWO6Y= +github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= +github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac= +github.com/petar/GoLLRB v0.0.0-20210522233825-ae3b015fd3e9 h1:1/WtZae0yGtPq+TI6+Tv1WTxkukpXeMlviSxvL7SRgk= +github.com/petar/GoLLRB v0.0.0-20210522233825-ae3b015fd3e9/go.mod h1:x3N5drFsm2uilKKuuYo6LdyD8vZAW55sH/9w+pbo1sw= +github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= +github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/polydawn/refmt v0.0.0-20190221155625-df39d6c2d992/go.mod h1:uIp+gprXxxrWSjjklXD+mN4wed/tMfjMMmN/9+JsA9o= +github.com/polydawn/refmt v0.0.0-20190807091052-3d65705ee9f1/go.mod h1:uIp+gprXxxrWSjjklXD+mN4wed/tMfjMMmN/9+JsA9o= +github.com/polydawn/refmt v0.0.0-20201211092308-30ac6d18308e/go.mod h1:uIp+gprXxxrWSjjklXD+mN4wed/tMfjMMmN/9+JsA9o= +github.com/polydawn/refmt v0.89.0 h1:ADJTApkvkeBZsN0tBTx8QjpD9JkmxbKp0cxfr9qszm4= +github.com/polydawn/refmt v0.89.0/go.mod h1:/zvteZs/GwLtCgZ4BL6CBsk9IKIlexP43ObX9AxTqTw= +github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= +github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og= +github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= +github.com/prometheus/client_golang v1.10.0/go.mod h1:WJM3cc3yu7XKBKa/I8WeZm+V3eltZnBwfENSU7mdogU= +github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= +github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= +github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw= +github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4= +github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= +github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA= +github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= +github.com/prometheus/common v0.18.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= +github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= +github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= +github.com/prometheus/common v0.37.0 h1:ccBbHCgIiT9uSoFY0vX8H3zsNR5eLt17/RQLUvn8pXE= +github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA= +github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= +github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo= +github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4= +github.com/quic-go/qpack v0.4.0 h1:Cr9BXA1sQS2SmDUWjSofMPNKmvF6IiIfDRmgU0w1ZCo= +github.com/quic-go/qpack v0.4.0/go.mod h1:UZVnYIfi5GRk+zI9UMaCPsmZ2xKJP7XBUvVyT1Knj9A= +github.com/quic-go/qtls-go1-18 v0.2.0 h1:5ViXqBZ90wpUcZS0ge79rf029yx0dYB0McyPJwqqj7U= +github.com/quic-go/qtls-go1-18 v0.2.0/go.mod h1:moGulGHK7o6O8lSPSZNoOwcLvJKJ85vVNc7oJFD65bc= +github.com/quic-go/qtls-go1-19 v0.2.0 h1:Cvn2WdhyViFUHoOqK52i51k4nDX8EwIh5VJiVM4nttk= +github.com/quic-go/qtls-go1-19 v0.2.0/go.mod h1:ySOI96ew8lnoKPtSqx2BlI5wCpUVPT05RMAlajtnyOI= +github.com/quic-go/qtls-go1-20 v0.1.0 h1:d1PK3ErFy9t7zxKsG3NXBJXZjp/kMLoIb3y/kV54oAI= +github.com/quic-go/qtls-go1-20 v0.1.0/go.mod h1:JKtK6mjbAVcUTN/9jZpvLbGxvdWIKS8uT7EiStoU1SM= +github.com/quic-go/quic-go v0.32.0 h1:lY02md31s1JgPiiyfqJijpu/UX/Iun304FI3yUqX7tA= +github.com/quic-go/quic-go v0.32.0/go.mod h1:/fCsKANhQIeD5l76c2JFU+07gVE3KaA0FP+0zMWwfwo= +github.com/quic-go/webtransport-go v0.5.1 h1:1eVb7WDWCRoaeTtFHpFBJ6WDN1bSrPrRoW6tZgSw0Ow= +github.com/quic-go/webtransport-go v0.5.1/go.mod h1:OhmmgJIzTTqXK5xvtuX0oBpLV2GkLWNDA+UeTGJXErU= +github.com/raulk/go-watchdog v1.3.0 h1:oUmdlHxdkXRJlwfG0O9omj8ukerm8MEQavSiDTEtBsk= +github.com/raulk/go-watchdog v1.3.0/go.mod h1:fIvOnLbF0b0ZwkB9YU4mOW9Did//4vPZtDqv66NfsMU= +github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8= +github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= +github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= +github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= +github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= +github.com/shurcooL/component v0.0.0-20170202220835-f88ec8f54cc4/go.mod h1:XhFIlyj5a1fBNx5aJTbKoIq0mNaPvOagO+HjB3EtxrY= +github.com/shurcooL/events v0.0.0-20181021180414-410e4ca65f48/go.mod h1:5u70Mqkb5O5cxEA8nxTsgrgLehJeAw6Oc4Ab1c/P1HM= +github.com/shurcooL/github_flavored_markdown v0.0.0-20181002035957-2122de532470/go.mod h1:2dOwnU2uBioM+SGy2aZoq1f/Sd1l9OkAeAUvjSyvgU0= +github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk= +github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ= +github.com/shurcooL/gofontwoff v0.0.0-20180329035133-29b52fc0a18d/go.mod h1:05UtEgK5zq39gLST6uB0cf3NEHjETfB4Fgr3Gx5R9Vw= +github.com/shurcooL/gopherjslib v0.0.0-20160914041154-feb6d3990c2c/go.mod h1:8d3azKNyqcHP1GaQE/c6dDgjkgSx2BZ4IoEi4F1reUI= +github.com/shurcooL/highlight_diff v0.0.0-20170515013008-09bb4053de1b/go.mod h1:ZpfEhSmds4ytuByIcDnOLkTHGUI6KNqRNPDLHDk+mUU= +github.com/shurcooL/highlight_go v0.0.0-20181028180052-98c3abbbae20/go.mod h1:UDKB5a1T23gOMUJrI+uSuH0VRDStOiUVSjBTRDVBVag= +github.com/shurcooL/home v0.0.0-20181020052607-80b7ffcb30f9/go.mod h1:+rgNQw2P9ARFAs37qieuu7ohDNQ3gds9msbT2yn85sg= +github.com/shurcooL/htmlg v0.0.0-20170918183704-d01228ac9e50/go.mod h1:zPn1wHpTIePGnXSHpsVPWEktKXHr6+SS6x/IKRb7cpw= +github.com/shurcooL/httperror v0.0.0-20170206035902-86b7830d14cc/go.mod h1:aYMfkZ6DWSJPJ6c4Wwz3QtW22G7mf/PEgaB9k/ik5+Y= +github.com/shurcooL/httpfs v0.0.0-20171119174359-809beceb2371/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg= +github.com/shurcooL/httpgzip v0.0.0-20180522190206-b1c53ac65af9/go.mod h1:919LwcH0M7/W4fcZ0/jy0qGght1GIhqyS/EgWGH2j5Q= +github.com/shurcooL/issues v0.0.0-20181008053335-6292fdc1e191/go.mod h1:e2qWDig5bLteJ4fwvDAc2NHzqFEthkqn7aOZAOpj+PQ= +github.com/shurcooL/issuesapp v0.0.0-20180602232740-048589ce2241/go.mod h1:NPpHK2TI7iSaM0buivtFUc9offApnI0Alt/K8hcHy0I= +github.com/shurcooL/notifications v0.0.0-20181007000457-627ab5aea122/go.mod h1:b5uSkrEVM1jQUspwbixRBhaIjIzL2xazXp6kntxYle0= +github.com/shurcooL/octicon v0.0.0-20181028054416-fa4f57f9efb2/go.mod h1:eWdoE5JD4R5UVWDucdOPg1g2fqQRq78IQa9zlOV1vpQ= +github.com/shurcooL/reactions v0.0.0-20181006231557-f2e0b4ca5b82/go.mod h1:TCR1lToEk4d2s07G3XGfz2QrgHXg4RJBvjrOozvoWfk= +github.com/shurcooL/sanitized_anchor_name v0.0.0-20170918181015-86672fcb3f95/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= +github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= +github.com/shurcooL/users v0.0.0-20180125191416-49c67e49c537/go.mod h1:QJTqeLYEDaXHZDBsXlPCDqdhQuJkuw4NOtaxYe3xii4= +github.com/shurcooL/webdavfs v0.0.0-20170829043945-18c3829fa133/go.mod h1:hKmq5kWdCj2z2KEozexVbfEZIWiTjhE0+UjmZgPqehw= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= +github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/assertions v1.2.0 h1:42S6lae5dvLc7BrLu/0ugRtcFVjoJNMC/N3yZFZkDFs= +github.com/smartystreets/assertions v1.2.0/go.mod h1:tcbTF8ujkAEcZ8TElKY+i30BzYlVhC/LOxJk7iOWnoo= +github.com/smartystreets/goconvey v0.0.0-20190222223459-a17d461953aa/go.mod h1:2RVY1rIf+2J2o/IM9+vPq9RzmHDSseB7FoXiSNIUsoU= +github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/smartystreets/goconvey v1.7.2 h1:9RBaZCeXEQ3UselpuwUQHltGVXvdwm6cv1hgR6gDIPg= +github.com/smartystreets/goconvey v1.7.2/go.mod h1:Vw0tHAZW6lzCRk3xgdin6fKYcG+G3Pg9vgXWeJpQFMM= +github.com/smola/gocompat v0.2.0/go.mod h1:1B0MlxbmoZNo3h8guHp8HztB3BSYR5itql9qtVc0ypY= +github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= +github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= +github.com/sourcegraph/annotate v0.0.0-20160123013949-f4cad6c6324d/go.mod h1:UdhH50NIW0fCiwBSr0co2m7BnFLdv4fQTgdqdJTHFeE= +github.com/sourcegraph/syntaxhighlight v0.0.0-20170531221838-bd320f5d308e/go.mod h1:HuIsMU8RRBOtsCgI77wP899iHVBQpCmg4ErYMZB+2IA= +github.com/spacemonkeygo/openssl v0.0.0-20181017203307-c2dcc5cca94a/go.mod h1:7AyxJNCJ7SBZ1MfVQCWD6Uqo2oubI2Eq2y2eqf+A5r0= +github.com/spacemonkeygo/spacelog v0.0.0-20180420211403-2296661a0572/go.mod h1:w0SWMsp6j9O/dk4/ZpIhL+3CkG8ofA2vuv7k+ltqUMc= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= +github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= +github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= +github.com/src-d/envconfig v1.0.0/go.mod h1:Q9YQZ7BKITldTBnoxsE5gOeB5y66RyPXeue/R4aaNBc= +github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= +github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= +github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8= +github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpPAyBWyWuQ= +github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA= +github.com/tj/assert v0.0.3 h1:Df/BlaZ20mq6kuai7f5z2TvPFiwC3xaWJSDQNiIS3Rk= +github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/ucarion/urlpath v0.0.0-20200424170820-7ccc79b76bbb h1:Ywfo8sUltxogBpFuMOFRrrSifO788kAFxmvVw31PtQQ= +github.com/ucarion/urlpath v0.0.0-20200424170820-7ccc79b76bbb/go.mod h1:ikPs9bRWicNw3S7XpJ8sK/smGwU9WcSVU3dy9qahYBM= +github.com/ugorji/go v1.1.7 h1:/68gy2h+1mWMrwZFeD1kQialdSzAb432dtpeJ42ovdo= +github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw= +github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= +github.com/ugorji/go/codec v1.1.7 h1:2SvQaVZ1ouYrrKKwoSk2pzd4A9evlKJb9oTL+OaLUSs= +github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= +github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= +github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/urfave/cli v1.22.10/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/viant/assertly v0.4.8/go.mod h1:aGifi++jvCrUaklKEKT0BU95igDNaqkvz+49uaYMPRU= +github.com/viant/toolbox v0.24.0/go.mod h1:OxMCG57V0PXuIP2HNQrtJf2CjqdmbrOx5EkMILuUhzM= +github.com/warpfork/go-testmark v0.11.0 h1:J6LnV8KpceDvo7spaNU4+DauH2n1x+6RaO2rJrmpQ9U= +github.com/warpfork/go-wish v0.0.0-20180510122957-5ad1f5abf436/go.mod h1:x6AKhvSSexNrVSrViXSHUEbICjmGXhtgABaHIySUSGw= +github.com/warpfork/go-wish v0.0.0-20200122115046-b9ea61034e4a/go.mod h1:x6AKhvSSexNrVSrViXSHUEbICjmGXhtgABaHIySUSGw= +github.com/warpfork/go-wish v0.0.0-20220906213052-39a1cc7a02d0 h1:GDDkbFiaK8jsSDJfjId/PEGEShv6ugrt4kYsC5UIDaQ= +github.com/warpfork/go-wish v0.0.0-20220906213052-39a1cc7a02d0/go.mod h1:x6AKhvSSexNrVSrViXSHUEbICjmGXhtgABaHIySUSGw= +github.com/whyrusleeping/base32 v0.0.0-20170828182744-c30ac30633cc h1:BCPnHtcboadS0DvysUuJXZ4lWVv5Bh5i7+tbIyi+ck4= +github.com/whyrusleeping/base32 v0.0.0-20170828182744-c30ac30633cc/go.mod h1:r45hJU7yEoA81k6MWNhpMj/kms0n14dkzkxYHoB96UM= +github.com/whyrusleeping/cbor v0.0.0-20171005072247-63513f603b11 h1:5HZfQkwe0mIfyDmc1Em5GqlNRzcdtlv4HTNmdpt7XH0= +github.com/whyrusleeping/cbor v0.0.0-20171005072247-63513f603b11/go.mod h1:Wlo/SzPmxVp6vXpGt/zaXhHH0fn4IxgqZc82aKg6bpQ= +github.com/whyrusleeping/cbor-gen v0.0.0-20200123233031-1cdf64d27158/go.mod h1:Xj/M2wWU+QdTdRbu/L/1dIZY8/Wb2K9pAhtroQuxJJI= +github.com/whyrusleeping/cbor-gen v0.0.0-20230126041949-52956bd4c9aa h1:EyA027ZAkuaCLoxVX4r1TZMPy1d31fM6hbfQ4OU4I5o= +github.com/whyrusleeping/cbor-gen v0.0.0-20230126041949-52956bd4c9aa/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= +github.com/whyrusleeping/chunker v0.0.0-20181014151217-fe64bd25879f h1:jQa4QT2UP9WYv2nzyawpKMOCl+Z/jW7djv2/J50lj9E= +github.com/whyrusleeping/chunker v0.0.0-20181014151217-fe64bd25879f/go.mod h1:p9UJB6dDgdPgMJZs7UjUOdulKyRr9fqkS+6JKAInPy8= +github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1 h1:EKhdznlJHPMoKr0XTrX+IlJs1LH3lyx2nfr1dOlZ79k= +github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1/go.mod h1:8UvriyWtv5Q5EOgjHaSseUEdkQfvwFv1I/In/O2M9gc= +github.com/whyrusleeping/go-logging v0.0.0-20170515211332-0457bb6b88fc/go.mod h1:bopw91TMyo8J3tvftk8xmU2kPmlrt4nScJQZU2hE5EM= +github.com/whyrusleeping/go-logging v0.0.1/go.mod h1:lDPYj54zutzG1XYfHAhcc7oNXEburHQBn+Iqd4yS4vE= +github.com/whyrusleeping/mafmt v1.2.8/go.mod h1:faQJFPbLSxzD9xpA02ttW/tS9vZykNvXwGvqIpk20FA= +github.com/whyrusleeping/mdns v0.0.0-20190826153040-b9b60ed33aa9/go.mod h1:j4l84WPFclQPj320J9gp0XwNKBb3U0zt5CBqjPp22G4= +github.com/whyrusleeping/multiaddr-filter v0.0.0-20160516205228-e903e4adabd7/go.mod h1:X2c0RVCI1eSUFI8eLcY3c0423ykwiUdxLJtkDvruhjI= +github.com/x-cray/logrus-prefixed-formatter v0.5.2/go.mod h1:2duySbKsL6M18s5GU7VPsoEPHyzalCE06qoARUCeBBE= +github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= +github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= +github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= +go.opencensus.io v0.18.0/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA= +go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= +go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.1/go.mod h1:Ap50jQcDJrx6rB6VgeeFPtuPIf3wMRvRfrfYDO6+BmA= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= +go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= +go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= +go.opentelemetry.io/otel v1.13.0 h1:1ZAKnNQKwBBxFtww/GwxNUyTf0AxkZzrukO8MeXqe4Y= +go.opentelemetry.io/otel v1.13.0/go.mod h1:FH3RtdZCzRkJYFTCsAKDy9l/XYjMdNv6QrkFFB8DvVg= +go.opentelemetry.io/otel/trace v1.13.0 h1:CBgRZ6ntv+Amuj1jDsMhZtlAPT6gbyIRdaIzFhfBSdY= +go.opentelemetry.io/otel/trace v1.13.0/go.mod h1:muCvmmO9KKpvuXSf3KKAXXB2ygNYHQ+ZfI5X08d3tds= +go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ= +go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= +go.uber.org/dig v1.15.0 h1:vq3YWr8zRj1eFGC7Gvf907hE0eRjPTZ1d3xHadD6liE= +go.uber.org/dig v1.15.0/go.mod h1:pKHs0wMynzL6brANhB2hLMro+zalv1osARTviTcqHLM= +go.uber.org/fx v1.18.2 h1:bUNI6oShr+OVFQeU8cDNbnN7VFsu+SsjHzUF51V/GAU= +go.uber.org/fx v1.18.2/go.mod h1:g0V1KMQ66zIRk8bLu3Ea5Jt2w/cHlOIp4wdRsgh0JaY= +go.uber.org/goleak v1.0.0/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= +go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= +go.uber.org/goleak v1.1.12 h1:gZAh5/EyT/HQwlpkCy6wTpqfH9H8Lz8zbm3dZh+OyzA= +go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= +go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= +go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= +go.uber.org/multierr v1.9.0 h1:7fIwc/ZtS0q++VgcfqFDxSBZVv/Xo49/SYnDFupUwlI= +go.uber.org/multierr v1.9.0/go.mod h1:X2jQV1h+kxSjClGpnseKVIxpmcjrj7MNnI0bnlfKTVQ= +go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= +go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= +go.uber.org/zap v1.14.1/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc= +go.uber.org/zap v1.15.0/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc= +go.uber.org/zap v1.16.0/go.mod h1:MA8QOfq0BHJwdXa996Y4dYkAqRKB8/1K1QMMZVaNZjQ= +go.uber.org/zap v1.19.1/go.mod h1:j3DNczoxDZroyBnOT1L/Q79cfUMGZxlv/9dzN7SM1rI= +go.uber.org/zap v1.24.0 h1:FiJd5l1UOLj0wCgbSE0rwwXHzEdAZS6hiiSnxJN/D60= +go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg= +go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE= +golang.org/x/build v0.0.0-20190111050920-041ab4dc3f9d/go.mod h1:OWs+y06UdEOHN4y+MfF/py+xQ/tYqIWW03b70/CG9Rw= +golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181030102418-4d3f4d9ffa16/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190225124518-7f87c0fbb88b/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190313024323-a1f597ede03a/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190426145343-a29dc8fdc734/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190513172903-22d7a77e9e5f/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190618222545-ea8f1a30c443/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200115085410-6d4e4cb37c7d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200221231518-2aa609cf4a9d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200602180216-279210d13fed/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= +golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= +golang.org/x/crypto v0.6.0 h1:qfktjS5LUO+fFKeJXZ+ikTRijMmljikvG68fpMMruSc= +golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/exp v0.0.0-20230213192124-5e25df0256eb h1:PaBZQdo+iSDyHT053FjUCgZQ/9uqVwPOcl7KSWhKn6w= +golang.org/x/exp v0.0.0-20230213192124-5e25df0256eb/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.7.0 h1:LapD9S96VoQRhi/GrNTqeBJFrUjs5UHCAtTlgwA5oZA= +golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/net v0.0.0-20180719180050-a680a1efc54d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181011144130-49bb7cea24b1/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181029044818-c44066c5c816/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181106065722-10aee1819953/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190227160552-c95aed5357e7/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190313220215-9f648a60d977/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210423184538-5f58ad60dda6/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= +golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210726213435-c6fcb2dbf985/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.6.0 h1:L4ZwwTvKW9gr0ZMS1yrHD9GZhIuVjOBBnaKH+SPQK0Q= +golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/perf v0.0.0-20180704124530-6e6d33e29852/go.mod h1:JLpeXjPJfIyPr5TlbXLkXWLhP8nz10XfvxElABhCtcw= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180810173357-98c5dad5d1a0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181029174526-d69651ed3497/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190219092855-153ac476189d/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190228124157-a34e9553db1e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190316082340-a2f829d7f35f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190405154228-4b34438f7a67/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190526052359-791d8a0f4d09/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210309074719-68d13333faf2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210317225723-c4fcb01b228e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210426080607-c94f62235c83/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.7.0 h1:4BRB4x83lYWy72KwLD/qYDuTu7q9PjSagHvijDw7cLo= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181030000716-a0a13e073c7b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181130052023-1c3d964395ce/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216052735-49a3e744a425/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.6-0.20210726203631-07bc1bf47fb2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.3.0 h1:SrNbZl6ECOS1qFzgTdQfWXZM9XBkiA6tkFrH9YSTPHM= +golang.org/x/tools v0.3.0/go.mod h1:/rWhSS2+zyEVwoJf8YAX6L2f0ntZ7Kn/mGgAWcipA5k= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 h1:H2TDz8ibqkAF6YGhCdN3jS9O0/s90v0rJh3X/OLHEUk= +golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= +google.golang.org/api v0.0.0-20181030000543-1d582fd0359e/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= +google.golang.org/api v0.1.0/go.mod h1:UGEZY7KEX120AnNLIHFMKIo4obdJhkp2tPbaPlQx13Y= +google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= +google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20181029155118-b69ba1387ce2/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20181202183823-bd91e49a0898/go.mod h1:7Ep/1NZk928CDR8SjdVbjWNpdIf6nzjE3BTgJDr2Atg= +google.golang.org/genproto v0.0.0-20190306203927-b5d61aea6440/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190530194941-fb225487d101/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= +google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio= +google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.22.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= +google.golang.org/grpc v1.28.1/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= +google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= +google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= +gopkg.in/src-d/go-cli.v0 v0.0.0-20181105080154-d492247bbc0d/go.mod h1:z+K8VcOYVYcSwSjGebuDL6176A1XskgbtNl64NSg+n8= +gopkg.in/src-d/go-log.v1 v1.0.1/go.mod h1:GN34hKP0g305ysm2/hctJ0Y8nWP3zxXXJ8GFabTyABE= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= +gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJdjuHRquDANNeA4x7B8WQ9o= +honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +lukechampine.com/blake3 v1.1.7 h1:GgRMhmdsuK8+ii6UZFDL8Nb+VyMwadAgcJyfYHxG6n0= +lukechampine.com/blake3 v1.1.7/go.mod h1:tkKEOtDkNtklkXtLNEOGNq5tcV90tJiA1vAA12R78LA= +nhooyr.io/websocket v1.8.7 h1:usjR2uOr/zjjkVMy0lW+PPohFok7PCow5sDjLgX4P4g= +nhooyr.io/websocket v1.8.7/go.mod h1:B70DZP8IakI65RVQ51MsWP/8jndNma26DVA/nFSCgW0= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= +sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= +sourcegraph.com/sourcegraph/appdash v0.0.0-20190731080439-ebfcffb1b5c0/go.mod h1:hI742Nqp5OhwiqlzhgfbWU4mW4yO10fP+LoT9WOswdU= +sourcegraph.com/sourcegraph/go-diff v0.5.0/go.mod h1:kuch7UrkMzY0X+p9CRK03kfuPQ2zzQcaEFbx8wA8rck= +sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4/go.mod h1:ketZ/q3QxT9HOBeFhu6RdvsftgpsbFHBF5Cas6cDKZ0= diff --git a/examples/unixfs-file-cid/README.md b/examples/unixfs-file-cid/README.md new file mode 100644 index 0000000000..eeea0d7bd6 --- /dev/null +++ b/examples/unixfs-file-cid/README.md @@ -0,0 +1,58 @@ +# Downloading a UnixFS file + +This is an example that quickly shows how to use IPFS tooling to move around a file. + +This example can be started in either server mode, or client mode. + +In server mode, it will sit and wait for data to be requested via the [Bitswap](https://docs.ipfs.tech/concepts/bitswap/#bitswap) protocol. + +In client mode, it will start up, connect to the server, request the data needed via Bitswap, write it out and shut down. + +## Build + +From the `boxo/examples` directory run the following: + +``` +> cd unixfs-file-cid/ +> go build +``` + +## Usage + +``` +> ./unixfs-file-cid +2023/01/30 21:34:11 I am /ip4/127.0.0.1/tcp/53935/p2p/QmUtp8xEVgWC5dNPthF2g37eVvCdrqY1FPxLxXZoKkPbdp +2023/01/30 21:34:11 hosting UnixFS file with CID: bafybeiecq2irw4fl5vunnxo6cegoutv4de63h7n27tekkjtak3jrvrzzhe +2023/01/30 21:34:11 listening for inbound connections and Bitswap requests +2023/01/30 21:34:11 Now run "./unixfs-file-cid -d /ip4/127.0.0.1/tcp/53935/p2p/QmUtp8xEVgWC5dNPthF2g37eVvCdrqY1FPxLxXZoKkPbdp" on a different terminal +``` + +The IPFS server hosting the data over libp2p will print out its `Multiaddress`, which indicates how it can be reached (ip4+tcp) and its randomly generated ID (`QmUtp8xEV...`) + +Now, launch another node that talks to the hosting node: + +``` +> ./unixfs-file-cid -d /ip4/127.0.0.1/tcp/53935/p2p/QmUtp8xEVgWC5dNPthF2g37eVvCdrqY1FPxLxXZoKkPbdp +``` + +The IPFS client will then download the file from the server peer and let you know that it's been received. + +## Details + +The `makeHost()` function creates a go-libp2p host that can make and receive connections and is usable by various protocols such as Bitswap. + +Both the client and the server have their own libp2p hosts which have +- A [libp2p Peer ID](https://godoc.org/github.com/libp2p/go-libp2p-peer#ID) like `QmNtX1cvrm2K6mQmMEaMxAuB4rTexhd87vpYVot4sEZzxc`. The example autogenerates a key pair on every run and uses an ID extracted from the public key (the hash of the public key). +- A [Multiaddress](https://godoc.org/github.com/multiformats/go-multiaddr), which indicates how to reach this peer. There can be several of them (using different protocols or locations for example). Example: `/ip4/127.0.0.1/tcp/1234`. + +The `startDataServer` function creates some local storage and then processes the file data into [UnixFS](https://docs.ipfs.tech/concepts/file-systems/#unix-file-system-unixfs) graph. +There are many ways to turn a file into a UnixFS graph the ones selected in the example correspond to parameters commonly seen in the IPFS ecosystem, but are just one possible set. They correspond to [kubo](https://github.com/ipfs/kubo)'s `ipfs add --cid-version=1 `. +It then starts a Bitswap server and waits for requests. + +The `runClient` function connects to the data server we started earlier and then uses UnixFS tooling to get the parts of the file we need (in this case all of it, but getting ranges is valid as well). +As we read more of the file the parts of the graph we need are being requested using Bitswap from the data server. + +Some important notes: +- The way in which a client discovers which peers to ask for data is highly situational. In this case we knew who we wanted to fetch the data from. In others we might use some system like a DHT, a coordination server, etc. to find that information. +- Downloading data using libp2p and Bitswap is just one way you can fetch data. You could also leverage other techniques including GraphSync, HTTP requests for a CAR file of your graph, or something else. +- UnixFS is only one type of data that can be moved around using IPFS tooling. A lot of IPFS tooling and infrastructure is built to work more generically with content addressable data. Other examples include data from BitTorrent, Filecoin, Git, etc. diff --git a/examples/unixfs-file-cid/main.go b/examples/unixfs-file-cid/main.go new file mode 100644 index 0000000000..a73e26ec8d --- /dev/null +++ b/examples/unixfs-file-cid/main.go @@ -0,0 +1,231 @@ +package main + +import ( + "bytes" + "context" + "crypto/rand" + "flag" + "fmt" + "io" + "log" + mrand "math/rand" + "strconv" + "strings" + + "github.com/ipfs/go-datastore" + dsync "github.com/ipfs/go-datastore/sync" + + "github.com/libp2p/go-libp2p" + "github.com/libp2p/go-libp2p/core/crypto" + "github.com/libp2p/go-libp2p/core/host" + "github.com/libp2p/go-libp2p/core/peer" + + "github.com/ipfs/go-cid" + "github.com/multiformats/go-multiaddr" + "github.com/multiformats/go-multicodec" + + "github.com/ipfs/go-blockservice" + blockstore "github.com/ipfs/go-ipfs-blockstore" + chunker "github.com/ipfs/go-ipfs-chunker" + offline "github.com/ipfs/go-ipfs-exchange-offline" + "github.com/ipfs/go-merkledag" + unixfile "github.com/ipfs/go-unixfs/file" + "github.com/ipfs/go-unixfs/importer/balanced" + uih "github.com/ipfs/go-unixfs/importer/helpers" + "github.com/libp2p/go-libp2p-routing-helpers" + + bsclient "github.com/ipfs/boxo/bitswap/client" + bsnet "github.com/ipfs/boxo/bitswap/network" + bsserver "github.com/ipfs/boxo/bitswap/server" + "github.com/ipfs/boxo/files" +) + +const exampleBinaryName = "unixfs-file-cid" + +func main() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + // Parse options from the command line + targetF := flag.String("d", "", "target peer to dial") + seedF := flag.Int64("seed", 0, "set random seed for id generation") + flag.Parse() + + // For this example we are going to be transferring data using Bitswap over libp2p + // This means we need to create a libp2p host first + + // Make a host that listens on the given multiaddress + h, err := makeHost(0, *seedF) + if err != nil { + log.Fatal(err) + } + defer h.Close() + + fullAddr := getHostAddress(h) + log.Printf("I am %s\n", fullAddr) + + if *targetF == "" { + c, bs, err := startDataServer(ctx, h) + if err != nil { + log.Fatal(err) + } + defer bs.Close() + log.Printf("hosting UnixFS file with CID: %s\n", c) + log.Println("listening for inbound connections and Bitswap requests") + log.Printf("Now run \"./%s -d %s\" on a different terminal\n", exampleBinaryName, fullAddr) + + // Run until canceled. + <-ctx.Done() + } else { + log.Printf("downloading UnixFS file with CID: %s\n", fileCid) + fileData, err := runClient(ctx, h, cid.MustParse(fileCid), *targetF) + if err != nil { + log.Fatal(err) + } + log.Println("found the data") + log.Println(string(fileData)) + log.Println("the file was all the numbers from 0 to 100k!") + } +} + +// makeHost creates a libP2P host with a random peer ID listening on the +// given multiaddress. +func makeHost(listenPort int, randseed int64) (host.Host, error) { + var r io.Reader + if randseed == 0 { + r = rand.Reader + } else { + r = mrand.New(mrand.NewSource(randseed)) + } + + // Generate a key pair for this host. We will use it at least + // to obtain a valid host ID. + priv, _, err := crypto.GenerateKeyPairWithReader(crypto.RSA, 2048, r) + if err != nil { + return nil, err + } + + // Some basic libp2p options, see the go-libp2p docs for more details + opts := []libp2p.Option{ + libp2p.ListenAddrStrings(fmt.Sprintf("/ip4/127.0.0.1/tcp/%d", listenPort)), // port we are listening on, limiting to a single interface and protocol for simplicity + libp2p.Identity(priv), + } + + return libp2p.New(opts...) +} + +func getHostAddress(h host.Host) string { + // Build host multiaddress + hostAddr, _ := multiaddr.NewMultiaddr(fmt.Sprintf("/p2p/%s", h.ID().String())) + + // Now we can build a full multiaddress to reach this host + // by encapsulating both addresses: + addr := h.Addrs()[0] + return addr.Encapsulate(hostAddr).String() +} + +// The CID of the file with the number 0 to 100k, built with the parameters: +// CIDv1 links, a 256bit sha2-256 hash function, raw-leaves, a balanced layout, 256kiB chunks, and 174 max links per block +const fileCid = "bafybeiecq2irw4fl5vunnxo6cegoutv4de63h7n27tekkjtak3jrvrzzhe" + +// createFile0to100k creates a file with the number 0 to 100k +func createFile0to100k() ([]byte, error) { + b := strings.Builder{} + for i := 0; i <= 100000; i++ { + s := strconv.Itoa(i) + _, err := b.WriteString(s) + if err != nil { + return nil, err + } + } + return []byte(b.String()), nil +} + +func startDataServer(ctx context.Context, h host.Host) (cid.Cid, *bsserver.Server, error) { + fileBytes, err := createFile0to100k() + if err != nil { + return cid.Undef, nil, err + } + fileReader := bytes.NewReader(fileBytes) + + ds := dsync.MutexWrap(datastore.NewMapDatastore()) + bs := blockstore.NewBlockstore(ds) + bs = blockstore.NewIdStore(bs) // handle identity multihashes, these don't require doing any actual lookups + + bsrv := blockservice.New(bs, offline.Exchange(bs)) + dsrv := merkledag.NewDAGService(bsrv) + + // Create a UnixFS graph from our file, parameters described here but can be visualized at https://dag.ipfs.tech/ + ufsImportParams := uih.DagBuilderParams{ + Maxlinks: uih.DefaultLinksPerBlock, // Default max of 174 links per block + RawLeaves: true, // Leave the actual file bytes untouched instead of wrapping them in a dag-pb protobuf wrapper + CidBuilder: cid.V1Builder{ // Use CIDv1 for all links + Codec: uint64(multicodec.DagPb), + MhType: uint64(multicodec.Sha2_256), // Use SHA2-256 as the hash function + MhLength: -1, // Use the default hash length for the given hash function (in this case 256 bits) + }, + Dagserv: dsrv, + NoCopy: false, + } + ufsBuilder, err := ufsImportParams.New(chunker.NewSizeSplitter(fileReader, chunker.DefaultBlockSize)) // Split the file up into fixed sized 256KiB chunks + if err != nil { + return cid.Undef, nil, err + } + nd, err := balanced.Layout(ufsBuilder) // Arrange the graph with a balanced layout + if err != nil { + return cid.Undef, nil, err + } + + // Start listening on the Bitswap protocol + // For this example we're not leveraging any content routing (DHT, IPNI, delegated routing requests, etc.) as we know the peer we are fetching from + n := bsnet.NewFromIpfsHost(h, routinghelpers.Null{}) + bswap := bsserver.New(ctx, n, bs) + n.Start(bswap) + return nd.Cid(), bswap, nil +} + +func runClient(ctx context.Context, h host.Host, c cid.Cid, targetPeer string) ([]byte, error) { + n := bsnet.NewFromIpfsHost(h, routinghelpers.Null{}) + bswap := bsclient.New(ctx, n, blockstore.NewBlockstore(datastore.NewNullDatastore())) + n.Start(bswap) + defer bswap.Close() + + // Turn the targetPeer into a multiaddr. + maddr, err := multiaddr.NewMultiaddr(targetPeer) + if err != nil { + return nil, err + } + + // Extract the peer ID from the multiaddr. + info, err := peer.AddrInfoFromP2pAddr(maddr) + if err != nil { + return nil, err + } + + // Directly connect to the peer that we know has the content + // Generally this peer will come from whatever content routing system is provided, however go-bitswap will also + // ask peers it is connected to for content so this will work + if err := h.Connect(ctx, *info); err != nil { + return nil, err + } + + dserv := merkledag.NewReadOnlyDagService(merkledag.NewSession(ctx, merkledag.NewDAGService(blockservice.New(blockstore.NewBlockstore(datastore.NewNullDatastore()), bswap)))) + nd, err := dserv.Get(ctx, c) + if err != nil { + return nil, err + } + + unixFSNode, err := unixfile.NewUnixfsFile(ctx, dserv, nd) + if err != nil { + return nil, err + } + + var buf bytes.Buffer + if f, ok := unixFSNode.(files.File); ok { + if _, err := io.Copy(&buf, f); err != nil { + return nil, err + } + } + + return buf.Bytes(), nil +} diff --git a/examples/unixfs-file-cid/main_test.go b/examples/unixfs-file-cid/main_test.go new file mode 100644 index 0000000000..7b69fb52b4 --- /dev/null +++ b/examples/unixfs-file-cid/main_test.go @@ -0,0 +1,57 @@ +package main + +import ( + "bytes" + "context" + "github.com/ipfs/go-cid" + "testing" + + "github.com/libp2p/go-libp2p/core/peer" +) + +func TestBitswapFetch(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + server, err := makeHost(0, 0) + if err != nil { + t.Fatal(err) + } + + client, err := makeHost(0, 0) + if err != nil { + t.Fatal(err) + } + + c, bs, err := startDataServer(ctx, server) + if err != nil { + t.Fatal(err) + } + defer bs.Close() + + if expectedCid := cid.MustParse(fileCid); !expectedCid.Equals(c) { + t.Fatalf("expected CID %s, got %s", expectedCid, c) + } + + multiaddrs, err := peer.AddrInfoToP2pAddrs(&peer.AddrInfo{ + ID: server.ID(), + Addrs: server.Addrs(), + }) + if err != nil { + t.Fatal(err) + } + if len(multiaddrs) != 1 { + t.Fatalf("expected a single multiaddr") + } + outputBytes, err := runClient(ctx, client, c, multiaddrs[0].String()) + if err != nil { + t.Fatal(err) + } + fileBytes, err := createFile0to100k() + if err != nil { + t.Fatal(err) + } + + if !bytes.Equal(outputBytes, fileBytes) { + t.Fatalf("retrieved bytes did not match sent bytes") + } +} diff --git a/exchange/interface.go b/exchange/interface.go new file mode 100644 index 0000000000..816f1c4e4c --- /dev/null +++ b/exchange/interface.go @@ -0,0 +1,40 @@ +// Package exchange defines the IPFS exchange interface +package exchange + +import ( + "context" + "io" + + blocks "github.com/ipfs/boxo/blocks" + cid "github.com/ipfs/go-cid" +) + +// Interface defines the functionality of the IPFS block exchange protocol. +type Interface interface { // type Exchanger interface + Fetcher + + // NotifyNewBlocks tells the exchange that new blocks are available and can be served. + NotifyNewBlocks(ctx context.Context, blocks ...blocks.Block) error + + io.Closer +} + +// Fetcher is an object that can be used to retrieve blocks +type Fetcher interface { + // GetBlock returns the block associated with a given cid. + GetBlock(context.Context, cid.Cid) (blocks.Block, error) + // GetBlocks returns the blocks associated with the given cids. + // If the requested blocks are not found immediately, this function should hang until + // they are found. If they can't be found later, it's also acceptable to terminate. + GetBlocks(context.Context, []cid.Cid) (<-chan blocks.Block, error) +} + +// SessionExchange is an exchange.Interface which supports +// sessions. +type SessionExchange interface { + Interface + // NewSession generates a new exchange session. You should use this, rather + // that calling GetBlocks, any time you intend to do several related calls + // in a row. The exchange can leverage that to be more efficient. + NewSession(context.Context) Fetcher +} diff --git a/exchange/offline/offline.go b/exchange/offline/offline.go new file mode 100644 index 0000000000..d2da288b02 --- /dev/null +++ b/exchange/offline/offline.go @@ -0,0 +1,74 @@ +// package offline implements an object that implements the exchange +// interface but returns nil values to every request. +package offline + +import ( + "context" + "fmt" + + blocks "github.com/ipfs/boxo/blocks" + blockstore "github.com/ipfs/boxo/blockstore" + exchange "github.com/ipfs/boxo/exchange" + cid "github.com/ipfs/go-cid" + ipld "github.com/ipfs/go-ipld-format" +) + +func Exchange(bs blockstore.Blockstore) exchange.Interface { + return &offlineExchange{bs: bs} +} + +// offlineExchange implements the Exchange interface but doesn't return blocks. +// For use in offline mode. +type offlineExchange struct { + bs blockstore.Blockstore +} + +// GetBlock returns nil to signal that a block could not be retrieved for the +// given key. +// NB: This function may return before the timeout expires. +func (e *offlineExchange) GetBlock(ctx context.Context, k cid.Cid) (blocks.Block, error) { + blk, err := e.bs.Get(ctx, k) + if ipld.IsNotFound(err) { + return nil, fmt.Errorf("block was not found locally (offline): %w", err) + } + return blk, err +} + +// NotifyNewBlocks tells the exchange that new blocks are available and can be served. +func (e *offlineExchange) NotifyNewBlocks(ctx context.Context, blocks ...blocks.Block) error { + // as an offline exchange we have nothing to do + return nil +} + +// Close always returns nil. +func (e *offlineExchange) Close() error { + // NB: exchange doesn't own the blockstore's underlying datastore, so it is + // not responsible for closing it. + return nil +} + +func (e *offlineExchange) GetBlocks(ctx context.Context, ks []cid.Cid) (<-chan blocks.Block, error) { + out := make(chan blocks.Block) + go func() { + defer close(out) + for _, k := range ks { + hit, err := e.bs.Get(ctx, k) + if err != nil { + // a long line of misses should abort when context is cancelled. + select { + // TODO case send misses down channel + case <-ctx.Done(): + return + default: + continue + } + } + select { + case out <- hit: + case <-ctx.Done(): + return + } + } + }() + return out, nil +} diff --git a/exchange/offline/offline_test.go b/exchange/offline/offline_test.go new file mode 100644 index 0000000000..2167f3e2e1 --- /dev/null +++ b/exchange/offline/offline_test.go @@ -0,0 +1,66 @@ +package offline + +import ( + "context" + "testing" + + blockstore "github.com/ipfs/boxo/blockstore" + u "github.com/ipfs/boxo/util" + cid "github.com/ipfs/go-cid" + ds "github.com/ipfs/go-datastore" + ds_sync "github.com/ipfs/go-datastore/sync" + blocksutil "github.com/ipfs/go-ipfs-blocksutil" +) + +func TestBlockReturnsErr(t *testing.T) { + off := Exchange(bstore()) + c := cid.NewCidV0(u.Hash([]byte("foo"))) + _, err := off.GetBlock(context.Background(), c) + if err != nil { + return // as desired + } + t.Fail() +} + +func TestGetBlocks(t *testing.T) { + store := bstore() + ex := Exchange(store) + g := blocksutil.NewBlockGenerator() + + expected := g.Blocks(2) + + for _, b := range expected { + if err := store.Put(context.Background(), b); err != nil { + t.Fatal(err) + } + if err := ex.NotifyNewBlocks(context.Background(), b); err != nil { + t.Fail() + } + } + + request := func() []cid.Cid { + var ks []cid.Cid + + for _, b := range expected { + ks = append(ks, b.Cid()) + } + return ks + }() + + received, err := ex.GetBlocks(context.Background(), request) + if err != nil { + t.Fatal(err) + } + + var count int + for range received { + count++ + } + if len(expected) != count { + t.Fail() + } +} + +func bstore() blockstore.Blockstore { + return blockstore.NewBlockstore(ds_sync.MutexWrap(ds.NewMapDatastore())) +} diff --git a/fetcher/.gitignore b/fetcher/.gitignore new file mode 100644 index 0000000000..485dee64bc --- /dev/null +++ b/fetcher/.gitignore @@ -0,0 +1 @@ +.idea diff --git a/fetcher/fetcher.go b/fetcher/fetcher.go new file mode 100644 index 0000000000..f332d9f121 --- /dev/null +++ b/fetcher/fetcher.go @@ -0,0 +1,51 @@ +package fetcher + +import ( + "context" + + "github.com/ipld/go-ipld-prime" +) + +// Fetcher is an interface for reading from a dag. Reads may be local or remote, and may employ data exchange +// protocols like graphsync and bitswap +type Fetcher interface { + // NodeMatching traverses a node graph starting with the provided root node using the given selector node and + // possibly crossing block boundaries. Each matched node is passed as FetchResult to the callback. Errors returned + // from callback will halt the traversal. The sequence of events is: NodeMatching begins, the callback is called zero + // or more times with a FetchResult, then NodeMatching returns. + NodeMatching(ctx context.Context, root ipld.Node, selector ipld.Node, cb FetchCallback) error + + // BlockOfType fetches a node graph of the provided type corresponding to single block by link. + BlockOfType(ctx context.Context, link ipld.Link, nodePrototype ipld.NodePrototype) (ipld.Node, error) + + // BlockMatchingOfType traverses a node graph starting with the given root link using the given selector node and + // possibly crossing block boundaries. The nodes will be typed using the provided prototype. Each matched node is + // passed as a FetchResult to the callback. Errors returned from callback will halt the traversal. + // The sequence of events is: BlockMatchingOfType begins, the callback is called zero or more times with a + // FetchResult, then BlockMatchingOfType returns. + BlockMatchingOfType( + ctx context.Context, + root ipld.Link, + selector ipld.Node, + nodePrototype ipld.NodePrototype, + cb FetchCallback) error + + // Uses the given link to pick a prototype to build the linked node. + PrototypeFromLink(link ipld.Link) (ipld.NodePrototype, error) +} + +// FetchResult is a single node read as part of a dag operation called on a fetcher +type FetchResult struct { + Node ipld.Node + Path ipld.Path + LastBlockPath ipld.Path + LastBlockLink ipld.Link +} + +// FetchCallback is called for each node traversed during a fetch +type FetchCallback func(result FetchResult) error + +// Factory is anything that can create new sessions of the fetcher +type Factory interface { + NewSession(ctx context.Context) Fetcher +} diff --git a/fetcher/helpers/block_visitor.go b/fetcher/helpers/block_visitor.go new file mode 100644 index 0000000000..c999bf2612 --- /dev/null +++ b/fetcher/helpers/block_visitor.go @@ -0,0 +1,43 @@ +package helpers + +import ( + "github.com/ipfs/boxo/fetcher" + "github.com/ipfs/go-cid" + "github.com/ipld/go-ipld-prime" + cidlink "github.com/ipld/go-ipld-prime/linking/cid" +) + +// BlockResult specifies a node at the top of a block boundary +type BlockResult struct { + Node ipld.Node + Link ipld.Link +} + +// BlockCallback is a callback for visiting blocks +type BlockCallback func(BlockResult) error + +// OnBlocks produces a fetch call back that only gets called when visiting blocks during a fetch +func OnBlocks(bv BlockCallback) fetcher.FetchCallback { + return func(fr fetcher.FetchResult) error { + if fr.LastBlockPath.String() == fr.Path.String() { + return bv(BlockResult{ + Node: fr.Node, + Link: fr.LastBlockLink, + }) + } + return nil + } +} + +// OnUniqueBlocks is a callback that only gets called visiting each block once +func OnUniqueBlocks(bv BlockCallback) fetcher.FetchCallback { + set := cid.NewSet() + return OnBlocks(func(br BlockResult) error { + c := br.Link.(cidlink.Link).Cid + if set.Has(c) { + return nil + } + set.Add(c) + return bv(br) + }) +} diff --git a/fetcher/helpers/block_visitor_test.go b/fetcher/helpers/block_visitor_test.go new file mode 100644 index 0000000000..c21bc543cf --- /dev/null +++ b/fetcher/helpers/block_visitor_test.go @@ -0,0 +1,142 @@ +package helpers_test + +import ( + "context" + "testing" + "time" + + delay "github.com/ipfs/go-ipfs-delay" + testinstance "github.com/ipfs/boxo/bitswap/testinstance" + tn "github.com/ipfs/boxo/bitswap/testnet" + "github.com/ipfs/boxo/blockservice" + "github.com/ipfs/boxo/fetcher/helpers" + bsfetcher "github.com/ipfs/boxo/fetcher/impl/blockservice" + "github.com/ipfs/boxo/fetcher/testutil" + mockrouting "github.com/ipfs/boxo/routing/mock" + "github.com/ipld/go-ipld-prime" + "github.com/ipld/go-ipld-prime/fluent" + cidlink "github.com/ipld/go-ipld-prime/linking/cid" + basicnode "github.com/ipld/go-ipld-prime/node/basic" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +var bg = context.Background() + +func TestFetchGraphToBlocks(t *testing.T) { + block3, node3, link3 := testutil.EncodeBlock(fluent.MustBuildMap(basicnode.Prototype__Map{}, 1, func(na fluent.MapAssembler) { + na.AssembleEntry("three").AssignBool(true) + })) + block4, node4, link4 := testutil.EncodeBlock(fluent.MustBuildMap(basicnode.Prototype__Map{}, 1, func(na fluent.MapAssembler) { + na.AssembleEntry("four").AssignBool(true) + })) + block2, node2, link2 := testutil.EncodeBlock(fluent.MustBuildMap(basicnode.Prototype__Map{}, 2, func(na fluent.MapAssembler) { + na.AssembleEntry("link3").AssignLink(link3) + na.AssembleEntry("link4").AssignLink(link4) + })) + block1, node1, _ := testutil.EncodeBlock(fluent.MustBuildMap(basicnode.Prototype__Map{}, 3, func(na fluent.MapAssembler) { + na.AssembleEntry("foo").AssignBool(true) + na.AssembleEntry("bar").AssignBool(false) + na.AssembleEntry("nested").CreateMap(2, func(na fluent.MapAssembler) { + na.AssembleEntry("link2").AssignLink(link2) + na.AssembleEntry("nonlink").AssignString("zoo") + }) + })) + + net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(0*time.Millisecond)) + ig := testinstance.NewTestInstanceGenerator(net, nil, nil) + defer ig.Close() + + peers := ig.Instances(2) + hasBlock := peers[0] + defer hasBlock.Exchange.Close() + + err := hasBlock.Exchange.NotifyNewBlocks(bg, block1) + require.NoError(t, err) + err = hasBlock.Exchange.NotifyNewBlocks(bg, block2) + require.NoError(t, err) + err = hasBlock.Exchange.NotifyNewBlocks(bg, block3) + require.NoError(t, err) + err = hasBlock.Exchange.NotifyNewBlocks(bg, block4) + require.NoError(t, err) + + wantsBlock := peers[1] + defer wantsBlock.Exchange.Close() + + wantsGetter := blockservice.New(wantsBlock.Blockstore(), wantsBlock.Exchange) + fetcherConfig := bsfetcher.NewFetcherConfig(wantsGetter) + session := fetcherConfig.NewSession(context.Background()) + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + + results := []helpers.BlockResult{} + err = helpers.BlockAll(ctx, session, cidlink.Link{Cid: block1.Cid()}, helpers.OnBlocks(func(res helpers.BlockResult) error { + results = append(results, res) + return nil + })) + require.NoError(t, err) + + assertBlocksInOrder(t, results, 4, map[int]ipld.Node{0: node1, 1: node2, 2: node3, 3: node4}) +} + +func TestFetchGraphToUniqueBlocks(t *testing.T) { + block3, node3, link3 := testutil.EncodeBlock(fluent.MustBuildMap(basicnode.Prototype__Map{}, 1, func(na fluent.MapAssembler) { + na.AssembleEntry("three").AssignBool(true) + })) + block2, node2, link2 := testutil.EncodeBlock(fluent.MustBuildMap(basicnode.Prototype__Map{}, 2, func(na fluent.MapAssembler) { + na.AssembleEntry("link3").AssignLink(link3) + })) + block1, node1, _ := testutil.EncodeBlock(fluent.MustBuildMap(basicnode.Prototype__Map{}, 3, func(na fluent.MapAssembler) { + na.AssembleEntry("foo").AssignBool(true) + na.AssembleEntry("bar").AssignBool(false) + na.AssembleEntry("nested").CreateMap(2, func(na fluent.MapAssembler) { + na.AssembleEntry("link2").AssignLink(link2) + na.AssembleEntry("link3").AssignLink(link3) + na.AssembleEntry("nonlink").AssignString("zoo") + }) + })) + + net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(0*time.Millisecond)) + ig := testinstance.NewTestInstanceGenerator(net, nil, nil) + defer ig.Close() + + peers := ig.Instances(2) + hasBlock := peers[0] + defer hasBlock.Exchange.Close() + + err := hasBlock.Exchange.NotifyNewBlocks(bg, block1) + require.NoError(t, err) + err = hasBlock.Exchange.NotifyNewBlocks(bg, block2) + require.NoError(t, err) + err = hasBlock.Exchange.NotifyNewBlocks(bg, block3) + require.NoError(t, err) + + wantsBlock := peers[1] + defer wantsBlock.Exchange.Close() + + wantsGetter := blockservice.New(wantsBlock.Blockstore(), wantsBlock.Exchange) + fetcherConfig := bsfetcher.NewFetcherConfig(wantsGetter) + session := fetcherConfig.NewSession(context.Background()) + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + + results := []helpers.BlockResult{} + err = helpers.BlockAll(ctx, session, cidlink.Link{Cid: block1.Cid()}, helpers.OnUniqueBlocks(func(res helpers.BlockResult) error { + results = append(results, res) + return nil + })) + require.NoError(t, err) + + assertBlocksInOrder(t, results, 3, map[int]ipld.Node{0: node1, 1: node2, 2: node3}) +} + +func assertBlocksInOrder(t *testing.T, results []helpers.BlockResult, nodeCount int, nodes map[int]ipld.Node) { + for order, res := range results { + expectedNode, ok := nodes[order] + if ok { + assert.Equal(t, expectedNode, res.Node) + } + } + + assert.Equal(t, nodeCount, len(results)) +} diff --git a/fetcher/helpers/traversal.go b/fetcher/helpers/traversal.go new file mode 100644 index 0000000000..5126b0ed81 --- /dev/null +++ b/fetcher/helpers/traversal.go @@ -0,0 +1,42 @@ +package helpers + +import ( + "context" + + "github.com/ipfs/boxo/fetcher" + "github.com/ipld/go-ipld-prime" + basicnode "github.com/ipld/go-ipld-prime/node/basic" + "github.com/ipld/go-ipld-prime/traversal/selector" + "github.com/ipld/go-ipld-prime/traversal/selector/builder" +) + +var matchAllSelector ipld.Node + +func init() { + ssb := builder.NewSelectorSpecBuilder(basicnode.Prototype.Any) + matchAllSelector = ssb.ExploreRecursive(selector.RecursionLimitNone(), ssb.ExploreUnion( + ssb.Matcher(), + ssb.ExploreAll(ssb.ExploreRecursiveEdge()), + )).Node() +} + +// Block fetches a schemaless node graph corresponding to single block by link. +func Block(ctx context.Context, f fetcher.Fetcher, link ipld.Link) (ipld.Node, error) { + prototype, err := f.PrototypeFromLink(link) + if err != nil { + return nil, err + } + return f.BlockOfType(ctx, link, prototype) +} + +// BlockMatching traverses a schemaless node graph starting with the given link using the given selector and possibly crossing +// block boundaries. Each matched node is sent to the FetchResult channel. +func BlockMatching(ctx context.Context, f fetcher.Fetcher, root ipld.Link, match ipld.Node, cb fetcher.FetchCallback) error { + return f.BlockMatchingOfType(ctx, root, match, nil, cb) +} + +// BlockAll traverses all nodes in the graph linked by root. The nodes will be untyped and send over the results +// channel. +func BlockAll(ctx context.Context, f fetcher.Fetcher, root ipld.Link, cb fetcher.FetchCallback) error { + return f.BlockMatchingOfType(ctx, root, matchAllSelector, nil, cb) +} diff --git a/fetcher/impl/blockservice/fetcher.go b/fetcher/impl/blockservice/fetcher.go new file mode 100644 index 0000000000..cbacd5984b --- /dev/null +++ b/fetcher/impl/blockservice/fetcher.go @@ -0,0 +1,148 @@ +package bsfetcher + +import ( + "bytes" + "context" + "fmt" + "io" + + "github.com/ipfs/boxo/blockservice" + "github.com/ipfs/boxo/fetcher" + "github.com/ipld/go-ipld-prime" + cidlink "github.com/ipld/go-ipld-prime/linking/cid" + basicnode "github.com/ipld/go-ipld-prime/node/basic" + "github.com/ipld/go-ipld-prime/schema" + "github.com/ipld/go-ipld-prime/traversal" + "github.com/ipld/go-ipld-prime/traversal/selector" +) + +type fetcherSession struct { + linkSystem ipld.LinkSystem + protoChooser traversal.LinkTargetNodePrototypeChooser +} + +// FetcherConfig defines a configuration object from which Fetcher instances are constructed +type FetcherConfig struct { + blockService blockservice.BlockService + NodeReifier ipld.NodeReifier + PrototypeChooser traversal.LinkTargetNodePrototypeChooser +} + +// NewFetcherConfig creates a FetchConfig from which session may be created and nodes retrieved. +func NewFetcherConfig(blockService blockservice.BlockService) FetcherConfig { + return FetcherConfig{ + blockService: blockService, + PrototypeChooser: DefaultPrototypeChooser, + } +} + +// NewSession creates a session from which nodes may be retrieved. +// The session ends when the provided context is canceled. +func (fc FetcherConfig) NewSession(ctx context.Context) fetcher.Fetcher { + return fc.FetcherWithSession(ctx, blockservice.NewSession(ctx, fc.blockService)) +} + +func (fc FetcherConfig) FetcherWithSession(ctx context.Context, s *blockservice.Session) fetcher.Fetcher { + ls := cidlink.DefaultLinkSystem() + // while we may be loading blocks remotely, they are already hash verified by the time they load + // into ipld-prime + ls.TrustedStorage = true + ls.StorageReadOpener = blockOpener(ctx, s) + ls.NodeReifier = fc.NodeReifier + + protoChooser := fc.PrototypeChooser + return &fetcherSession{linkSystem: ls, protoChooser: protoChooser} +} + +// WithReifier derives a different fetcher factory from the same source but +// with a chosen NodeReifier for pathing semantics. +func (fc FetcherConfig) WithReifier(nr ipld.NodeReifier) fetcher.Factory { + return FetcherConfig{ + blockService: fc.blockService, + NodeReifier: nr, + PrototypeChooser: fc.PrototypeChooser, + } +} + +// interface check +var _ fetcher.Factory = FetcherConfig{} + +// BlockOfType fetches a node graph of the provided type corresponding to single block by link. +func (f *fetcherSession) BlockOfType(ctx context.Context, link ipld.Link, ptype ipld.NodePrototype) (ipld.Node, error) { + return f.linkSystem.Load(ipld.LinkContext{}, link, ptype) +} + +func (f *fetcherSession) nodeMatching(ctx context.Context, initialProgress traversal.Progress, node ipld.Node, match ipld.Node, cb fetcher.FetchCallback) error { + matchSelector, err := selector.ParseSelector(match) + if err != nil { + return err + } + return initialProgress.WalkMatching(node, matchSelector, func(prog traversal.Progress, n ipld.Node) error { + return cb(fetcher.FetchResult{ + Node: n, + Path: prog.Path, + LastBlockPath: prog.LastBlock.Path, + LastBlockLink: prog.LastBlock.Link, + }) + }) +} + +func (f *fetcherSession) blankProgress(ctx context.Context) traversal.Progress { + return traversal.Progress{ + Cfg: &traversal.Config{ + LinkSystem: f.linkSystem, + LinkTargetNodePrototypeChooser: f.protoChooser, + }, + } +} + +func (f *fetcherSession) NodeMatching(ctx context.Context, node ipld.Node, match ipld.Node, cb fetcher.FetchCallback) error { + return f.nodeMatching(ctx, f.blankProgress(ctx), node, match, cb) +} + +func (f *fetcherSession) BlockMatchingOfType(ctx context.Context, root ipld.Link, match ipld.Node, + _ ipld.NodePrototype, cb fetcher.FetchCallback) error { + + // retrieve first node + prototype, err := f.PrototypeFromLink(root) + if err != nil { + return err + } + node, err := f.BlockOfType(ctx, root, prototype) + if err != nil { + return err + } + + progress := f.blankProgress(ctx) + progress.LastBlock.Link = root + return f.nodeMatching(ctx, progress, node, match, cb) +} + +func (f *fetcherSession) PrototypeFromLink(lnk ipld.Link) (ipld.NodePrototype, error) { + return f.protoChooser(lnk, ipld.LinkContext{}) +} + +// DefaultPrototypeChooser supports choosing the prototype from the link and falling +// back to a basicnode.Any builder +var DefaultPrototypeChooser = func(lnk ipld.Link, lnkCtx ipld.LinkContext) (ipld.NodePrototype, error) { + if tlnkNd, ok := lnkCtx.LinkNode.(schema.TypedLinkNode); ok { + return tlnkNd.LinkTargetNodePrototype(), nil + } + return basicnode.Prototype.Any, nil +} + +func blockOpener(ctx context.Context, bs *blockservice.Session) ipld.BlockReadOpener { + return func(_ ipld.LinkContext, lnk ipld.Link) (io.Reader, error) { + cidLink, ok := lnk.(cidlink.Link) + if !ok { + return nil, fmt.Errorf("invalid link type for loading: %v", lnk) + } + + blk, err := bs.GetBlock(ctx, cidLink.Cid) + if err != nil { + return nil, err + } + + return bytes.NewReader(blk.RawData()), nil + } +} diff --git a/fetcher/impl/blockservice/fetcher_test.go b/fetcher/impl/blockservice/fetcher_test.go new file mode 100644 index 0000000000..666a36e336 --- /dev/null +++ b/fetcher/impl/blockservice/fetcher_test.go @@ -0,0 +1,369 @@ +package bsfetcher_test + +import ( + "context" + "strings" + "testing" + "time" + + "github.com/ipld/go-ipld-prime/traversal/selector" + "github.com/ipld/go-ipld-prime/traversal/selector/builder" + + delay "github.com/ipfs/go-ipfs-delay" + testinstance "github.com/ipfs/boxo/bitswap/testinstance" + tn "github.com/ipfs/boxo/bitswap/testnet" + blocks "github.com/ipfs/boxo/blocks" + "github.com/ipfs/boxo/blockservice" + "github.com/ipfs/boxo/fetcher" + "github.com/ipfs/boxo/fetcher/helpers" + bsfetcher "github.com/ipfs/boxo/fetcher/impl/blockservice" + "github.com/ipfs/boxo/fetcher/testutil" + mockrouting "github.com/ipfs/boxo/routing/mock" + "github.com/ipld/go-ipld-prime" + "github.com/ipld/go-ipld-prime/fluent" + cidlink "github.com/ipld/go-ipld-prime/linking/cid" + basicnode "github.com/ipld/go-ipld-prime/node/basic" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +var bg = context.Background() + +func TestFetchIPLDPrimeNode(t *testing.T) { + block, node, _ := testutil.EncodeBlock(fluent.MustBuildMap(basicnode.Prototype__Map{}, 3, func(na fluent.MapAssembler) { + na.AssembleEntry("foo").AssignBool(true) + na.AssembleEntry("bar").AssignBool(false) + na.AssembleEntry("nested").CreateMap(2, func(na fluent.MapAssembler) { + na.AssembleEntry("nonlink").AssignString("zoo") + }) + })) + + net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(0*time.Millisecond)) + ig := testinstance.NewTestInstanceGenerator(net, nil, nil) + defer ig.Close() + + peers := ig.Instances(2) + hasBlock := peers[0] + defer hasBlock.Exchange.Close() + + err := hasBlock.Exchange.NotifyNewBlocks(bg, block) + require.NoError(t, err) + + wantsBlock := peers[1] + defer wantsBlock.Exchange.Close() + + wantsGetter := blockservice.New(wantsBlock.Blockstore(), wantsBlock.Exchange) + fetcherConfig := bsfetcher.NewFetcherConfig(wantsGetter) + session := fetcherConfig.NewSession(context.Background()) + + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + + retrievedNode, err := helpers.Block(ctx, session, cidlink.Link{Cid: block.Cid()}) + require.NoError(t, err) + assert.Equal(t, node, retrievedNode) +} + +func TestFetchIPLDGraph(t *testing.T) { + block3, node3, link3 := testutil.EncodeBlock(fluent.MustBuildMap(basicnode.Prototype__Map{}, 1, func(na fluent.MapAssembler) { + na.AssembleEntry("three").AssignBool(true) + })) + block4, node4, link4 := testutil.EncodeBlock(fluent.MustBuildMap(basicnode.Prototype__Map{}, 1, func(na fluent.MapAssembler) { + na.AssembleEntry("four").AssignBool(true) + })) + block2, node2, link2 := testutil.EncodeBlock(fluent.MustBuildMap(basicnode.Prototype__Map{}, 2, func(na fluent.MapAssembler) { + na.AssembleEntry("link3").AssignLink(link3) + na.AssembleEntry("link4").AssignLink(link4) + })) + block1, node1, _ := testutil.EncodeBlock(fluent.MustBuildMap(basicnode.Prototype__Map{}, 3, func(na fluent.MapAssembler) { + na.AssembleEntry("foo").AssignBool(true) + na.AssembleEntry("bar").AssignBool(false) + na.AssembleEntry("nested").CreateMap(2, func(na fluent.MapAssembler) { + na.AssembleEntry("link2").AssignLink(link2) + na.AssembleEntry("nonlink").AssignString("zoo") + }) + })) + + net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(0*time.Millisecond)) + ig := testinstance.NewTestInstanceGenerator(net, nil, nil) + defer ig.Close() + + peers := ig.Instances(2) + hasBlock := peers[0] + defer hasBlock.Exchange.Close() + + err := hasBlock.Exchange.NotifyNewBlocks(bg, block1) + require.NoError(t, err) + err = hasBlock.Exchange.NotifyNewBlocks(bg, block2) + require.NoError(t, err) + err = hasBlock.Exchange.NotifyNewBlocks(bg, block3) + require.NoError(t, err) + err = hasBlock.Exchange.NotifyNewBlocks(bg, block4) + require.NoError(t, err) + + wantsBlock := peers[1] + defer wantsBlock.Exchange.Close() + + wantsGetter := blockservice.New(wantsBlock.Blockstore(), wantsBlock.Exchange) + fetcherConfig := bsfetcher.NewFetcherConfig(wantsGetter) + session := fetcherConfig.NewSession(context.Background()) + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + + results := []fetcher.FetchResult{} + err = helpers.BlockAll(ctx, session, cidlink.Link{Cid: block1.Cid()}, func(res fetcher.FetchResult) error { + results = append(results, res) + return nil + }) + require.NoError(t, err) + + assertNodesInOrder(t, results, 10, map[int]ipld.Node{0: node1, 4: node2, 5: node3, 7: node4}) +} + +func TestFetchIPLDPath(t *testing.T) { + block5, node5, link5 := testutil.EncodeBlock(fluent.MustBuildMap(basicnode.Prototype__Map{}, 1, func(na fluent.MapAssembler) { + na.AssembleEntry("five").AssignBool(true) + })) + block3, _, link3 := testutil.EncodeBlock(fluent.MustBuildMap(basicnode.Prototype__Map{}, 1, func(na fluent.MapAssembler) { + na.AssembleEntry("three").AssignLink(link5) + })) + block4, _, link4 := testutil.EncodeBlock(fluent.MustBuildMap(basicnode.Prototype__Map{}, 1, func(na fluent.MapAssembler) { + na.AssembleEntry("four").AssignBool(true) + })) + block2, _, link2 := testutil.EncodeBlock(fluent.MustBuildMap(basicnode.Prototype__Map{}, 2, func(na fluent.MapAssembler) { + na.AssembleEntry("link3").AssignLink(link3) + na.AssembleEntry("link4").AssignLink(link4) + })) + block1, _, _ := testutil.EncodeBlock(fluent.MustBuildMap(basicnode.Prototype__Map{}, 3, func(na fluent.MapAssembler) { + na.AssembleEntry("foo").AssignBool(true) + na.AssembleEntry("bar").AssignBool(false) + na.AssembleEntry("nested").CreateMap(2, func(na fluent.MapAssembler) { + na.AssembleEntry("link2").AssignLink(link2) + na.AssembleEntry("nonlink").AssignString("zoo") + }) + })) + + net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(0*time.Millisecond)) + ig := testinstance.NewTestInstanceGenerator(net, nil, nil) + defer ig.Close() + + peers := ig.Instances(2) + hasBlock := peers[0] + defer hasBlock.Exchange.Close() + + for _, blk := range []blocks.Block{block1, block2, block3, block4, block5} { + err := hasBlock.Exchange.NotifyNewBlocks(bg, blk) + require.NoError(t, err) + } + + wantsBlock := peers[1] + defer wantsBlock.Exchange.Close() + + wantsGetter := blockservice.New(wantsBlock.Blockstore(), wantsBlock.Exchange) + fetcherConfig := bsfetcher.NewFetcherConfig(wantsGetter) + session := fetcherConfig.NewSession(context.Background()) + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + + path := strings.Split("nested/link2/link3/three", "/") + ssb := builder.NewSelectorSpecBuilder(basicnode.Prototype.Any) + spec := ssb.Matcher() + explorePath := func(p string, s builder.SelectorSpec) builder.SelectorSpec { + return ssb.ExploreFields(func(efsb builder.ExploreFieldsSpecBuilder) { efsb.Insert(p, s) }) + } + for i := len(path) - 1; i >= 0; i-- { + spec = explorePath(path[i], spec) + } + sel := spec.Node() + + results := []fetcher.FetchResult{} + err := helpers.BlockMatching(ctx, session, cidlink.Link{Cid: block1.Cid()}, sel, func(res fetcher.FetchResult) error { + results = append(results, res) + return nil + }) + require.NoError(t, err) + + assertNodesInOrder(t, results, 1, map[int]ipld.Node{0: node5}) +} + +func TestHelpers(t *testing.T) { + block3, node3, link3 := testutil.EncodeBlock(fluent.MustBuildMap(basicnode.Prototype__Map{}, 1, func(na fluent.MapAssembler) { + na.AssembleEntry("three").AssignBool(true) + })) + block4, node4, link4 := testutil.EncodeBlock(fluent.MustBuildMap(basicnode.Prototype__Map{}, 1, func(na fluent.MapAssembler) { + na.AssembleEntry("four").AssignBool(true) + })) + block2, node2, link2 := testutil.EncodeBlock(fluent.MustBuildMap(basicnode.Prototype__Map{}, 2, func(na fluent.MapAssembler) { + na.AssembleEntry("link3").AssignLink(link3) + na.AssembleEntry("link4").AssignLink(link4) + })) + block1, node1, _ := testutil.EncodeBlock(fluent.MustBuildMap(basicnode.Prototype__Map{}, 3, func(na fluent.MapAssembler) { + na.AssembleEntry("foo").AssignBool(true) + na.AssembleEntry("bar").AssignBool(false) + na.AssembleEntry("nested").CreateMap(2, func(na fluent.MapAssembler) { + na.AssembleEntry("link2").AssignLink(link2) + na.AssembleEntry("nonlink").AssignString("zoo") + }) + })) + + net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(0*time.Millisecond)) + ig := testinstance.NewTestInstanceGenerator(net, nil, nil) + defer ig.Close() + + peers := ig.Instances(2) + hasBlock := peers[0] + defer hasBlock.Exchange.Close() + + err := hasBlock.Exchange.NotifyNewBlocks(bg, block1) + require.NoError(t, err) + err = hasBlock.Exchange.NotifyNewBlocks(bg, block2) + require.NoError(t, err) + err = hasBlock.Exchange.NotifyNewBlocks(bg, block3) + require.NoError(t, err) + err = hasBlock.Exchange.NotifyNewBlocks(bg, block4) + require.NoError(t, err) + + wantsBlock := peers[1] + defer wantsBlock.Exchange.Close() + + wantsGetter := blockservice.New(wantsBlock.Blockstore(), wantsBlock.Exchange) + + t.Run("Block retrieves node", func(t *testing.T) { + fetcherConfig := bsfetcher.NewFetcherConfig(wantsGetter) + session := fetcherConfig.NewSession(context.Background()) + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + + node, err := helpers.Block(ctx, session, cidlink.Link{Cid: block1.Cid()}) + require.NoError(t, err) + + assert.Equal(t, node, node1) + }) + + t.Run("BlockMatching retrieves nodes matching selector", func(t *testing.T) { + // limit recursion depth to 2 nodes and expect to get only 2 blocks (4 nodes) + ssb := builder.NewSelectorSpecBuilder(basicnode.Prototype__Any{}) + sel := ssb.ExploreRecursive(selector.RecursionLimitDepth(2), ssb.ExploreUnion( + ssb.Matcher(), + ssb.ExploreAll(ssb.ExploreRecursiveEdge()), + )).Node() + + fetcherConfig := bsfetcher.NewFetcherConfig(wantsGetter) + session := fetcherConfig.NewSession(context.Background()) + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + + results := []fetcher.FetchResult{} + err = helpers.BlockMatching(ctx, session, cidlink.Link{Cid: block1.Cid()}, sel, func(res fetcher.FetchResult) error { + results = append(results, res) + return nil + }) + require.NoError(t, err) + + assertNodesInOrder(t, results, 4, map[int]ipld.Node{0: node1, 4: node2}) + }) + + t.Run("BlockAllOfType retrieves all nodes with a schema", func(t *testing.T) { + // limit recursion depth to 2 nodes and expect to get only 2 blocks (4 nodes) + fetcherConfig := bsfetcher.NewFetcherConfig(wantsGetter) + session := fetcherConfig.NewSession(context.Background()) + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + + results := []fetcher.FetchResult{} + err = helpers.BlockAll(ctx, session, cidlink.Link{Cid: block1.Cid()}, func(res fetcher.FetchResult) error { + results = append(results, res) + return nil + }) + require.NoError(t, err) + + assertNodesInOrder(t, results, 10, map[int]ipld.Node{0: node1, 4: node2, 5: node3, 7: node4}) + }) +} + +func assertNodesInOrder(t *testing.T, results []fetcher.FetchResult, nodeCount int, nodes map[int]ipld.Node) { + for order, res := range results { + expectedNode, ok := nodes[order] + if ok { + assert.Equal(t, expectedNode, res.Node) + } + } + + assert.Equal(t, nodeCount, len(results)) +} + +type selfLoader struct { + ipld.Node + ctx context.Context + ls *ipld.LinkSystem +} + +func (sl *selfLoader) LookupByString(key string) (ipld.Node, error) { + nd, err := sl.Node.LookupByString(key) + if err != nil { + return nd, err + } + if nd.Kind() == ipld.Kind_Link { + lnk, _ := nd.AsLink() + nd, err = sl.ls.Load(ipld.LinkContext{Ctx: sl.ctx}, lnk, basicnode.Prototype.Any) + } + return nd, err +} + +func TestNodeReification(t *testing.T) { + // demonstrates how to use the augment chooser to build an ADL that self loads its own nodes + block3, node3, link3 := testutil.EncodeBlock(fluent.MustBuildMap(basicnode.Prototype__Map{}, 1, func(na fluent.MapAssembler) { + na.AssembleEntry("three").AssignBool(true) + })) + block4, node4, link4 := testutil.EncodeBlock(fluent.MustBuildMap(basicnode.Prototype__Map{}, 1, func(na fluent.MapAssembler) { + na.AssembleEntry("four").AssignBool(true) + })) + block2, _, _ := testutil.EncodeBlock(fluent.MustBuildMap(basicnode.Prototype__Map{}, 2, func(na fluent.MapAssembler) { + na.AssembleEntry("link3").AssignLink(link3) + na.AssembleEntry("link4").AssignLink(link4) + })) + + net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(0*time.Millisecond)) + ig := testinstance.NewTestInstanceGenerator(net, nil, nil) + defer ig.Close() + + peers := ig.Instances(2) + hasBlock := peers[0] + defer hasBlock.Exchange.Close() + + err := hasBlock.Exchange.NotifyNewBlocks(bg, block2) + require.NoError(t, err) + err = hasBlock.Exchange.NotifyNewBlocks(bg, block3) + require.NoError(t, err) + err = hasBlock.Exchange.NotifyNewBlocks(bg, block4) + require.NoError(t, err) + + wantsBlock := peers[1] + defer wantsBlock.Exchange.Close() + + wantsGetter := blockservice.New(wantsBlock.Blockstore(), wantsBlock.Exchange) + fetcherConfig := bsfetcher.NewFetcherConfig(wantsGetter) + nodeReifier := func(lnkCtx ipld.LinkContext, nd ipld.Node, ls *ipld.LinkSystem) (ipld.Node, error) { + return &selfLoader{Node: nd, ctx: lnkCtx.Ctx, ls: ls}, nil + } + fetcherConfig.NodeReifier = nodeReifier + session := fetcherConfig.NewSession(context.Background()) + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + + retrievedNode, err := helpers.Block(ctx, session, cidlink.Link{Cid: block2.Cid()}) + require.NoError(t, err) + + // instead of getting links back, we automatically load the nodes + + retrievedNode3, err := retrievedNode.LookupByString("link3") + require.NoError(t, err) + underlying3 := retrievedNode3.(*selfLoader).Node + assert.Equal(t, node3, underlying3) + + retrievedNode4, err := retrievedNode.LookupByString("link4") + require.NoError(t, err) + underlying4 := retrievedNode4.(*selfLoader).Node + assert.Equal(t, node4, underlying4) + +} diff --git a/fetcher/testutil/testutil.go b/fetcher/testutil/testutil.go new file mode 100644 index 0000000000..6772a628b7 --- /dev/null +++ b/fetcher/testutil/testutil.go @@ -0,0 +1,51 @@ +package testutil + +import ( + "bytes" + "fmt" + "io" + + blocks "github.com/ipfs/boxo/blocks" + "github.com/ipfs/go-cid" + "github.com/ipld/go-ipld-prime" + + // used to make sure we have dagcbor encoding + _ "github.com/ipld/go-ipld-prime/codec/dagcbor" + cidlink "github.com/ipld/go-ipld-prime/linking/cid" +) + +// EncodeBlock produces an encoded block from a node +func EncodeBlock(n ipld.Node) (blocks.Block, ipld.Node, ipld.Link) { + ls := cidlink.DefaultLinkSystem() + var b blocks.Block + lb := cidlink.LinkPrototype{Prefix: cid.Prefix{ + Version: 1, + Codec: 0x71, + MhType: 0x17, + MhLength: 20, + }} + ls.StorageReadOpener = func(ipld.LinkContext, ipld.Link) (io.Reader, error) { + return bytes.NewReader(b.RawData()), nil + } + ls.StorageWriteOpener = func(ipld.LinkContext) (io.Writer, ipld.BlockWriteCommitter, error) { + buf := bytes.Buffer{} + return &buf, func(lnk ipld.Link) error { + clnk, ok := lnk.(cidlink.Link) + if !ok { + return fmt.Errorf("incorrect link type %v", lnk) + } + var err error + b, err = blocks.NewBlockWithCid(buf.Bytes(), clnk.Cid) + return err + }, nil + } + lnk, err := ls.Store(ipld.LinkContext{}, lb, n) + if err != nil { + panic(err) + } + ln, err := ls.Load(ipld.LinkContext{}, lnk, n.Prototype()) + if err != nil { + panic(err) + } + return b, ln, lnk +} diff --git a/files/README.md b/files/README.md new file mode 100644 index 0000000000..5de61887ef --- /dev/null +++ b/files/README.md @@ -0,0 +1,22 @@ +# boxo/files + +> File interfaces and utils used in GO implementations of [IPFS](https://ipfs.tech) + +## Documentation + +https://pkg.go.dev/github.com/ipfs/boxo/files + +## Contribute + +Feel free to join in. All welcome. Open an [issue](https://github.com/ipfs/boxo/issues)! + +This repository falls under the IPFS [Code of Conduct](https://github.com/ipfs/community/blob/master/code-of-conduct.md). + +### Want to hack on IPFS? + +[![](https://cdn.rawgit.com/jbenet/contribute-ipfs-gif/master/img/contribute.gif)](https://github.com/ipfs/community/blob/master/CONTRIBUTING.md) + +## License + +MIT + diff --git a/files/file.go b/files/file.go new file mode 100644 index 0000000000..7ac1fc98a7 --- /dev/null +++ b/files/file.go @@ -0,0 +1,96 @@ +package files + +import ( + "errors" + "io" + "os" +) + +var ( + ErrNotDirectory = errors.New("file isn't a directory") + ErrNotReader = errors.New("file isn't a regular file") + + ErrNotSupported = errors.New("operation not supported") +) + +// Node is a common interface for files, directories and other special files +type Node interface { + io.Closer + + // Size returns size of this file (if this file is a directory, total size of + // all files stored in the tree should be returned). Some implementations may + // choose not to implement this + Size() (int64, error) +} + +// Node represents a regular Unix file +type File interface { + Node + + io.Reader + io.Seeker +} + +// DirEntry exposes information about a directory entry +type DirEntry interface { + // Name returns base name of this entry, which is the base name of referenced + // file + Name() string + + // Node returns the file referenced by this DirEntry + Node() Node +} + +// DirIterator is a iterator over directory entries. +// See Directory.Entries for more +type DirIterator interface { + // DirEntry holds information about current directory entry. + // Note that after creating new iterator you MUST call Next() at least once + // before accessing these methods. Calling these methods without prior calls + // to Next() and after Next() returned false may result in undefined behavior + DirEntry + + // Next advances iterator to the next file. + Next() bool + + // Err may return an error after previous call to Next() returned `false`. + // If previous call to Next() returned `true`, Err() is guaranteed to + // return nil + Err() error +} + +// Directory is a special file which can link to any number of files. +type Directory interface { + Node + + // Entries returns a stateful iterator over directory entries. The iterator + // may consume the Directory state so it must be called only once (this + // applies specifically to the multipartIterator). + // + // Example usage: + // + // it := dir.Entries() + // for it.Next() { + // name := it.Name() + // file := it.Node() + // [...] + // } + // if it.Err() != nil { + // return err + // } + // + // Note that you can't store the result of it.Node() and use it after + // advancing the iterator + Entries() DirIterator +} + +// FileInfo exposes information on files in local filesystem +type FileInfo interface { + Node + + // AbsPath returns full real file path. + AbsPath() string + + // Stat returns os.Stat of this file, may be nil for some files + Stat() os.FileInfo +} diff --git a/files/file_test.go b/files/file_test.go new file mode 100644 index 0000000000..8c6c62229d --- /dev/null +++ b/files/file_test.go @@ -0,0 +1,142 @@ +package files + +import ( + "io" + "mime/multipart" + "strings" + "testing" +) + +func TestSliceFiles(t *testing.T) { + sf := NewMapDirectory(map[string]Node{ + "1": NewBytesFile([]byte("Some text!\n")), + "2": NewBytesFile([]byte("beep")), + "3": NewBytesFile([]byte("boop")), + }) + + CheckDir(t, sf, []Event{ + { + kind: TFile, + name: "1", + value: "Some text!\n", + }, + { + kind: TFile, + name: "2", + value: "beep", + }, + { + kind: TFile, + name: "3", + value: "boop", + }, + }) +} + +func TestReaderFiles(t *testing.T) { + message := "beep boop" + rf := NewBytesFile([]byte(message)) + buf := make([]byte, len(message)) + + if n, err := rf.Read(buf); n == 0 || err != nil { + t.Fatal("Expected to be able to read") + } + if err := rf.Close(); err != nil { + t.Fatal("Should be able to close") + } + if n, err := rf.Read(buf); n != 0 || err != io.EOF { + t.Fatal("Expected EOF when reading after close") + } +} +func TestMultipartFiles(t *testing.T) { + data := ` +--Boundary! +Content-Type: text/plain +Content-Disposition: file; filename="name" +Some-Header: beep + +beep +--Boundary! +Content-Type: application/x-directory +Content-Disposition: file; filename="dir" + +--Boundary! +Content-Type: text/plain +Content-Disposition: file; filename="dir/nested" + +some content +--Boundary! +Content-Type: application/symlink +Content-Disposition: file; filename="dir/simlynk" + +anotherfile +--Boundary! +Content-Type: text/plain +Content-Disposition: file; filename="implicit1/implicit2/deep_implicit" + +implicit file1 +--Boundary! +Content-Type: text/plain +Content-Disposition: file; filename="implicit1/shallow_implicit" + +implicit file2 +--Boundary!-- + +` + + reader := strings.NewReader(data) + mpReader := multipart.NewReader(reader, "Boundary!") + dir, err := NewFileFromPartReader(mpReader, multipartFormdataType) + if err != nil { + t.Fatal(err) + } + + CheckDir(t, dir, []Event{ + { + kind: TFile, + name: "name", + value: "beep", + }, + { + kind: TDirStart, + name: "dir", + }, + { + kind: TFile, + name: "nested", + value: "some content", + }, + { + kind: TSymlink, + name: "simlynk", + value: "anotherfile", + }, + { + kind: TDirEnd, + }, + { + kind: TDirStart, + name: "implicit1", + }, + { + kind: TDirStart, + name: "implicit2", + }, + { + kind: TFile, + name: "deep_implicit", + value: "implicit file1", + }, + { + kind: TDirEnd, + }, + { + kind: TFile, + name: "shallow_implicit", + value: "implicit file2", + }, + { + kind: TDirEnd, + }, + }) +} diff --git a/files/filewriter.go b/files/filewriter.go new file mode 100644 index 0000000000..bf4bcf6494 --- /dev/null +++ b/files/filewriter.go @@ -0,0 +1,59 @@ +package files + +import ( + "errors" + "fmt" + "io" + "os" + "path/filepath" +) + +var ErrInvalidDirectoryEntry = errors.New("invalid directory entry name") +var ErrPathExistsOverwrite = errors.New("path already exists and overwriting is not allowed") + +// WriteTo writes the given node to the local filesystem at fpath. +func WriteTo(nd Node, fpath string) error { + if _, err := os.Lstat(fpath); err == nil { + return ErrPathExistsOverwrite + } else if !os.IsNotExist(err) { + return err + } + switch nd := nd.(type) { + case *Symlink: + return os.Symlink(nd.Target, fpath) + case File: + f, err := createNewFile(fpath) + defer f.Close() + if err != nil { + return err + } + _, err = io.Copy(f, nd) + if err != nil { + return err + } + return nil + case Directory: + err := os.Mkdir(fpath, 0777) + if err != nil { + return err + } + + entries := nd.Entries() + for entries.Next() { + entryName := entries.Name() + if entryName == "" || + entryName == "." || + entryName == ".." || + !isValidFilename(entryName) { + return ErrInvalidDirectoryEntry + } + child := filepath.Join(fpath, entryName) + if err := WriteTo(entries.Node(), child); err != nil { + return err + } + } + return entries.Err() + default: + return fmt.Errorf("file type %T at %q is not supported", nd, fpath) + } +} diff --git a/files/filewriter_test.go b/files/filewriter_test.go new file mode 100644 index 0000000000..00a0b1ce25 --- /dev/null +++ b/files/filewriter_test.go @@ -0,0 +1,100 @@ +package files + +import ( + "fmt" + "os" + "path/filepath" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestWriteTo(t *testing.T) { + sf := NewMapDirectory(map[string]Node{ + "1": NewBytesFile([]byte("Some text!\n")), + "2": NewBytesFile([]byte("beep")), + "3": NewMapDirectory(nil), + "4": NewBytesFile([]byte("boop")), + "5": NewMapDirectory(map[string]Node{ + "a": NewBytesFile([]byte("foobar")), + }), + }) + tmppath, err := os.MkdirTemp("", "files-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmppath) + + path := filepath.Join(tmppath, "output") + + err = WriteTo(sf, path) + if err != nil { + t.Fatal(err) + } + expected := map[string]string{ + ".": "", + "1": "Some text!\n", + "2": "beep", + "3": "", + "4": "boop", + "5": "", + filepath.FromSlash("5/a"): "foobar", + } + err = filepath.Walk(path, func(cpath string, info os.FileInfo, err error) error { + if err != nil { + return err + } + rpath, err := filepath.Rel(path, cpath) + if err != nil { + return err + } + data, ok := expected[rpath] + if !ok { + return fmt.Errorf("expected something at %q", rpath) + } + delete(expected, rpath) + + if info.IsDir() { + if data != "" { + return fmt.Errorf("expected a directory at %q", rpath) + } + } else { + actual, err := os.ReadFile(cpath) + if err != nil { + return err + } + if string(actual) != data { + return fmt.Errorf("expected %q, got %q", data, string(actual)) + } + } + return nil + }) + if err != nil { + t.Fatal(err) + } + if len(expected) > 0 { + t.Fatalf("failed to find: %#v", expected) + } +} + +func TestDontAllowOverwrite(t *testing.T) { + tmppath, err := os.MkdirTemp("", "files-test") + assert.NoError(t, err) + defer os.RemoveAll(tmppath) + + path := filepath.Join(tmppath, "output") + + // Check we can actually write to the output path before trying invalid entries + // and leave an existing entry to test overwrite protection. + assert.NoError(t, WriteTo(NewMapDirectory(map[string]Node{ + "exisiting-entry": NewBytesFile(nil), + }), path)) + + assert.Equal(t, ErrPathExistsOverwrite, WriteTo(NewBytesFile(nil), filepath.Join(path))) + assert.Equal(t, ErrPathExistsOverwrite, WriteTo(NewBytesFile(nil), filepath.Join(path, "exisiting-entry"))) + // The directory in `path` has already been created so this should fail too: + assert.Equal(t, ErrPathExistsOverwrite, WriteTo(NewMapDirectory(map[string]Node{ + "any-name": NewBytesFile(nil), + }), filepath.Join(path))) + os.RemoveAll(path) +} diff --git a/files/filewriter_unix.go b/files/filewriter_unix.go new file mode 100644 index 0000000000..98d0400188 --- /dev/null +++ b/files/filewriter_unix.go @@ -0,0 +1,19 @@ +//go:build darwin || linux || netbsd || openbsd || freebsd || dragonfly + +package files + +import ( + "os" + "strings" + "syscall" +) + +var invalidChars = `/` + "\x00" + +func isValidFilename(filename string) bool { + return !strings.ContainsAny(filename, invalidChars) +} + +func createNewFile(path string) (*os.File, error) { + return os.OpenFile(path, os.O_EXCL|os.O_CREATE|os.O_WRONLY|syscall.O_NOFOLLOW, 0666) +} diff --git a/files/filewriter_unix_test.go b/files/filewriter_unix_test.go new file mode 100644 index 0000000000..ffc33ce519 --- /dev/null +++ b/files/filewriter_unix_test.go @@ -0,0 +1,33 @@ +//go:build darwin || linux || netbsd || openbsd + +package files + +import ( + "os" + "path/filepath" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestWriteToInvalidPaths(t *testing.T) { + tmppath, err := os.MkdirTemp("", "files-test") + assert.NoError(t, err) + defer os.RemoveAll(tmppath) + + path := filepath.Join(tmppath, "output") + + // Check we can actually write to the output path before trying invalid entries. + assert.NoError(t, WriteTo(NewMapDirectory(map[string]Node{ + "valid-entry": NewBytesFile(nil), + }), path)) + os.RemoveAll(path) + + // Now try all invalid entry names + for _, entryName := range []string{"", ".", "..", "/", "", "not/a/base/path"} { + assert.Equal(t, ErrInvalidDirectoryEntry, WriteTo(NewMapDirectory(map[string]Node{ + entryName: NewBytesFile(nil), + }), filepath.Join(path))) + os.RemoveAll(path) + } +} diff --git a/files/filewriter_windows.go b/files/filewriter_windows.go new file mode 100644 index 0000000000..a5d6261998 --- /dev/null +++ b/files/filewriter_windows.go @@ -0,0 +1,45 @@ +//go:build windows + +package files + +import ( + "os" + "strings" +) + +var invalidChars = `<>:"/\|?*` + "\x00" + +var reservedNames = map[string]struct{}{ + "CON": {}, + "PRN": {}, + "AUX": {}, + "NUL": {}, + "COM1": {}, + "COM2": {}, + "COM3": {}, + "COM4": {}, + "COM5": {}, + "COM6": {}, + "COM7": {}, + "COM8": {}, + "COM9": {}, + "LPT1": {}, + "LPT2": {}, + "LPT3": {}, + "LPT4": {}, + "LPT5": {}, + "LPT6": {}, + "LPT7": {}, + "LPT8": {}, + "LPT9": {}, +} + +func isValidFilename(filename string) bool { + _, isReservedName := reservedNames[filename] + return !strings.ContainsAny(filename, invalidChars) && + !isReservedName +} + +func createNewFile(path string) (*os.File, error) { + return os.OpenFile(path, os.O_EXCL|os.O_CREATE|os.O_WRONLY, 0666) +} diff --git a/files/filewriter_windows_test.go b/files/filewriter_windows_test.go new file mode 100644 index 0000000000..ca0222ba30 --- /dev/null +++ b/files/filewriter_windows_test.go @@ -0,0 +1,35 @@ +//go:build windows + +package files + +import ( + "os" + "path/filepath" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestWriteToInvalidPaths(t *testing.T) { + tmppath, err := os.MkdirTemp("", "files-test") + assert.NoError(t, err) + defer os.RemoveAll(tmppath) + + path := filepath.Join(tmppath, "output") + + // Check we can actually write to the output path before trying invalid entries. + assert.NoError(t, WriteTo(NewMapDirectory(map[string]Node{ + "valid-entry": NewBytesFile(nil), + }), path)) + os.RemoveAll(path) + + // Now try all invalid entry names + for _, entryName := range []string{"", ".", "..", "/", "", "not/a/base/path", + "<", ">", ":", "\"", "\\", "|", "?", "*", "\x00", + "CON", "PRN", "AUX", "NUL", "COM1", "COM2", "COM3", "COM4", "COM5", "COM6", "COM7", "COM8", "COM9", "LPT1", "LPT2", "LPT3", "LPT4", "LPT5", "LPT6", "LPT7", "LPT8", "LPT9"} { + assert.Equal(t, ErrInvalidDirectoryEntry, WriteTo(NewMapDirectory(map[string]Node{ + entryName: NewBytesFile(nil), + }), filepath.Join(path))) + os.RemoveAll(path) + } +} diff --git a/files/filter.go b/files/filter.go new file mode 100644 index 0000000000..6b90f1f347 --- /dev/null +++ b/files/filter.go @@ -0,0 +1,49 @@ +package files + +import ( + "os" + + ignore "github.com/crackcomm/go-gitignore" +) + +// Filter represents a set of rules for determining if a file should be included or excluded. +// A rule follows the syntax for patterns used in .gitgnore files for specifying untracked files. +// Examples: +// foo.txt +// *.app +// bar/ +// **/baz +// fizz/** +type Filter struct { + // IncludeHidden - Include hidden files + IncludeHidden bool + // Rules - File filter rules + Rules *ignore.GitIgnore +} + +// NewFilter creates a new file filter from a .gitignore file and/or a list of ignore rules. +// An ignoreFile is a path to a file with .gitignore-style patterns to exclude, one per line +// rules is an array of strings representing .gitignore-style patterns +// For reference on ignore rule syntax, see https://git-scm.com/docs/gitignore +func NewFilter(ignoreFile string, rules []string, includeHidden bool) (*Filter, error) { + var ignoreRules *ignore.GitIgnore + var err error + if ignoreFile == "" { + ignoreRules, err = ignore.CompileIgnoreLines(rules...) + } else { + ignoreRules, err = ignore.CompileIgnoreFileAndLines(ignoreFile, rules...) + } + if err != nil { + return nil, err + } + return &Filter{IncludeHidden: includeHidden, Rules: ignoreRules}, nil +} + +// ShouldExclude takes an os.FileInfo object and applies rules to determine if its target should be excluded. +func (filter *Filter) ShouldExclude(fileInfo os.FileInfo) (result bool) { + path := fileInfo.Name() + if !filter.IncludeHidden && isHidden(fileInfo) { + return true + } + return filter.Rules.MatchesPath(path) +} diff --git a/files/filter_test.go b/files/filter_test.go new file mode 100644 index 0000000000..8ce25ee3bd --- /dev/null +++ b/files/filter_test.go @@ -0,0 +1,53 @@ +package files + +import ( + "os" + "path/filepath" + "testing" +) + +type mockFileInfo struct { + os.FileInfo + name string +} + +func (m *mockFileInfo) Name() string { + return m.name +} + +func (m *mockFileInfo) Sys() interface{} { + return nil +} + +var _ os.FileInfo = &mockFileInfo{} + +func TestFileFilter(t *testing.T) { + includeHidden := true + filter, err := NewFilter("", nil, includeHidden) + if err != nil { + t.Errorf("failed to create filter with empty rules") + } + if filter.IncludeHidden != includeHidden { + t.Errorf("new filter should include hidden files") + } + _, err = NewFilter("ignoreFileThatDoesNotExist", nil, false) + if err == nil { + t.Errorf("creating a filter without an invalid ignore file path should have failed") + } + tmppath, err := os.MkdirTemp("", "filter-test") + if err != nil { + t.Fatal(err) + } + ignoreFilePath := filepath.Join(tmppath, "ignoreFile") + ignoreFileContents := []byte("a.txt") + if err := os.WriteFile(ignoreFilePath, ignoreFileContents, 0666); err != nil { + t.Fatal(err) + } + filterWithIgnoreFile, err := NewFilter(ignoreFilePath, nil, false) + if err != nil { + t.Errorf("failed to create filter with ignore file") + } + if !filterWithIgnoreFile.ShouldExclude(&mockFileInfo{name: "a.txt"}) { + t.Errorf("filter should've excluded expected file from ignoreFile: %s", "a.txt") + } +} diff --git a/files/helpers_test.go b/files/helpers_test.go new file mode 100644 index 0000000000..0180b8f270 --- /dev/null +++ b/files/helpers_test.go @@ -0,0 +1,126 @@ +package files + +import ( + "io" + "testing" +) + +type Kind int + +const ( + TFile Kind = iota + TSymlink + TDirStart + TDirEnd +) + +type Event struct { + kind Kind + name string + value string +} + +func CheckDir(t *testing.T, dir Directory, expected []Event) { + expectedIndex := 0 + expect := func() (Event, int) { + t.Helper() + + if expectedIndex > len(expected) { + t.Fatal("no more expected entries") + } + i := expectedIndex + expectedIndex++ + + // Add an implicit "end" event at the end. It makes this + // function a bit easier to write. + next := Event{kind: TDirEnd} + if i < len(expected) { + next = expected[i] + } + return next, i + } + var check func(d Directory) + check = func(d Directory) { + it := d.Entries() + + for it.Next() { + next, i := expect() + + if it.Name() != next.name { + t.Fatalf("[%d] expected filename to be %q", i, next.name) + } + + switch next.kind { + case TFile: + mf, ok := it.Node().(File) + if !ok { + t.Fatalf("[%d] expected file to be a normal file: %T", i, it.Node()) + } + out, err := io.ReadAll(mf) + if err != nil { + t.Errorf("[%d] failed to read file", i) + continue + } + if string(out) != next.value { + t.Errorf( + "[%d] while reading %q, expected %q, got %q", + i, + it.Name(), + next.value, + string(out), + ) + continue + } + case TSymlink: + mf, ok := it.Node().(*Symlink) + if !ok { + t.Errorf("[%d] expected file to be a symlink: %T", i, it.Node()) + continue + } + if mf.Target != next.value { + t.Errorf( + "[%d] target of symlink %q should have been %q but was %q", + i, + it.Name(), + next.value, + mf.Target, + ) + continue + } + case TDirStart: + mf, ok := it.Node().(Directory) + if !ok { + t.Fatalf( + "[%d] expected file to be a directory: %T", + i, + it.Node(), + ) + } + check(mf) + case TDirEnd: + t.Errorf( + "[%d] expected end of directory, found %#v at %q", + i, + it.Node(), + it.Name(), + ) + return + default: + t.Fatal("unhandled type", next.kind) + } + if err := it.Node().Close(); err != nil { + t.Fatalf("[%d] expected to be able to close node", i) + } + } + next, i := expect() + + if it.Err() != nil { + t.Fatalf("[%d] got error: %s", i, it.Err()) + } + + if next.kind != TDirEnd { + t.Fatalf("[%d] found end of directory, expected %#v", i, next) + } + } + check(dir) +} diff --git a/files/is_hidden.go b/files/is_hidden.go new file mode 100644 index 0000000000..9842ca2325 --- /dev/null +++ b/files/is_hidden.go @@ -0,0 +1,17 @@ +//go:build !windows + +package files + +import ( + "os" +) + +func isHidden(fi os.FileInfo) bool { + fName := fi.Name() + switch fName { + case "", ".", "..": + return false + default: + return fName[0] == '.' + } +} diff --git a/files/is_hidden_windows.go b/files/is_hidden_windows.go new file mode 100644 index 0000000000..9a0703863d --- /dev/null +++ b/files/is_hidden_windows.go @@ -0,0 +1,32 @@ +//go:build windows + +package files + +import ( + "os" + + windows "golang.org/x/sys/windows" +) + +func isHidden(fi os.FileInfo) bool { + fName := fi.Name() + switch fName { + case "", ".", "..": + return false + } + + if fName[0] == '.' { + return true + } + + sys := fi.Sys() + if sys == nil { + return false + } + wi, ok := sys.(*windows.Win32FileAttributeData) + if !ok { + return false + } + + return wi.FileAttributes&windows.FILE_ATTRIBUTE_HIDDEN != 0 +} diff --git a/files/linkfile.go b/files/linkfile.go new file mode 100644 index 0000000000..526998652b --- /dev/null +++ b/files/linkfile.go @@ -0,0 +1,42 @@ +package files + +import ( + "os" + "strings" +) + +type Symlink struct { + Target string + + stat os.FileInfo + reader strings.Reader +} + +func NewLinkFile(target string, stat os.FileInfo) File { + lf := &Symlink{Target: target, stat: stat} + lf.reader.Reset(lf.Target) + return lf +} + +func (lf *Symlink) Close() error { + return nil +} + +func (lf *Symlink) Read(b []byte) (int, error) { + return lf.reader.Read(b) +} + +func (lf *Symlink) Seek(offset int64, whence int) (int64, error) { + return lf.reader.Seek(offset, whence) +} + +func (lf *Symlink) Size() (int64, error) { + return lf.reader.Size(), nil +} + +func ToSymlink(n Node) *Symlink { + l, _ := n.(*Symlink) + return l +} + +var _ File = &Symlink{} diff --git a/files/multifilereader.go b/files/multifilereader.go new file mode 100644 index 0000000000..af708dc7f3 --- /dev/null +++ b/files/multifilereader.go @@ -0,0 +1,151 @@ +package files + +import ( + "bytes" + "fmt" + "io" + "mime/multipart" + "net/textproto" + "net/url" + "path" + "sync" +) + +// MultiFileReader reads from a `commands.Node` (which can be a directory of files +// or a regular file) as HTTP multipart encoded data. +type MultiFileReader struct { + io.Reader + + // directory stack for NextFile + files []DirIterator + path []string + + currentFile Node + buf bytes.Buffer + mpWriter *multipart.Writer + closed bool + mutex *sync.Mutex + + // if true, the content disposition will be "form-data" + // if false, the content disposition will be "attachment" + form bool +} + +// NewMultiFileReader constructs a MultiFileReader. `file` can be any `commands.Directory`. +// If `form` is set to true, the Content-Disposition will be "form-data". +// Otherwise, it will be "attachment". +func NewMultiFileReader(file Directory, form bool) *MultiFileReader { + it := file.Entries() + + mfr := &MultiFileReader{ + files: []DirIterator{it}, + path: []string{""}, + form: form, + mutex: &sync.Mutex{}, + } + mfr.mpWriter = multipart.NewWriter(&mfr.buf) + + return mfr +} + +func (mfr *MultiFileReader) Read(buf []byte) (written int, err error) { + mfr.mutex.Lock() + defer mfr.mutex.Unlock() + + // if we are closed and the buffer is flushed, end reading + if mfr.closed && mfr.buf.Len() == 0 { + return 0, io.EOF + } + + // if the current file isn't set, advance to the next file + if mfr.currentFile == nil { + var entry DirEntry + + for entry == nil { + if len(mfr.files) == 0 { + mfr.mpWriter.Close() + mfr.closed = true + return mfr.buf.Read(buf) + } + + if !mfr.files[len(mfr.files)-1].Next() { + if mfr.files[len(mfr.files)-1].Err() != nil { + return 0, mfr.files[len(mfr.files)-1].Err() + } + mfr.files = mfr.files[:len(mfr.files)-1] + mfr.path = mfr.path[:len(mfr.path)-1] + continue + } + + entry = mfr.files[len(mfr.files)-1] + } + + // handle starting a new file part + if !mfr.closed { + + mfr.currentFile = entry.Node() + + // write the boundary and headers + header := make(textproto.MIMEHeader) + filename := url.QueryEscape(path.Join(path.Join(mfr.path...), entry.Name())) + dispositionPrefix := "attachment" + if mfr.form { + dispositionPrefix = "form-data; name=\"file\"" + } + + header.Set("Content-Disposition", fmt.Sprintf("%s; filename=\"%s\"", dispositionPrefix, filename)) + + var contentType string + + switch f := entry.Node().(type) { + case *Symlink: + contentType = "application/symlink" + case Directory: + newIt := f.Entries() + mfr.files = append(mfr.files, newIt) + mfr.path = append(mfr.path, entry.Name()) + contentType = "application/x-directory" + case File: + // otherwise, use the file as a reader to read its contents + contentType = "application/octet-stream" + default: + return 0, ErrNotSupported + } + + header.Set("Content-Type", contentType) + if rf, ok := entry.Node().(FileInfo); ok { + header.Set("abspath", rf.AbsPath()) + } + + _, err := mfr.mpWriter.CreatePart(header) + if err != nil { + return 0, err + } + } + } + + // if the buffer has something in it, read from it + if mfr.buf.Len() > 0 { + return mfr.buf.Read(buf) + } + + // otherwise, read from file data + if f, ok := mfr.currentFile.(File); ok { + written, err = f.Read(buf) + if err != io.EOF { + return written, err + } + } + + if err := mfr.currentFile.Close(); err != nil { + return written, err + } + + mfr.currentFile = nil + return written, nil +} + +// Boundary returns the boundary string to be used to separate files in the multipart data +func (mfr *MultiFileReader) Boundary() string { + return mfr.mpWriter.Boundary() +} diff --git a/files/multifilereader_test.go b/files/multifilereader_test.go new file mode 100644 index 0000000000..e36788a919 --- /dev/null +++ b/files/multifilereader_test.go @@ -0,0 +1,197 @@ +package files + +import ( + "io" + "mime/multipart" + "testing" +) + +var text = "Some text! :)" + +func getTestMultiFileReader(t *testing.T) *MultiFileReader { + sf := NewMapDirectory(map[string]Node{ + "file.txt": NewBytesFile([]byte(text)), + "boop": NewMapDirectory(map[string]Node{ + "a.txt": NewBytesFile([]byte("bleep")), + "b.txt": NewBytesFile([]byte("bloop")), + }), + "beep.txt": NewBytesFile([]byte("beep")), + }) + + // testing output by reading it with the go stdlib "mime/multipart" Reader + return NewMultiFileReader(sf, true) +} + +func TestMultiFileReaderToMultiFile(t *testing.T) { + mfr := getTestMultiFileReader(t) + mpReader := multipart.NewReader(mfr, mfr.Boundary()) + mf, err := NewFileFromPartReader(mpReader, multipartFormdataType) + if err != nil { + t.Fatal(err) + } + + it := mf.Entries() + + if !it.Next() || it.Name() != "beep.txt" { + t.Fatal("iterator didn't work as expected") + } + + if !it.Next() || it.Name() != "boop" || DirFromEntry(it) == nil { + t.Fatal("iterator didn't work as expected") + } + + subIt := DirFromEntry(it).Entries() + + if !subIt.Next() || subIt.Name() != "a.txt" || DirFromEntry(subIt) != nil { + t.Fatal("iterator didn't work as expected") + } + + if !subIt.Next() || subIt.Name() != "b.txt" || DirFromEntry(subIt) != nil { + t.Fatal("iterator didn't work as expected") + } + + if subIt.Next() || it.Err() != nil { + t.Fatal("iterator didn't work as expected") + } + + // try to break internal state + if subIt.Next() || it.Err() != nil { + t.Fatal("iterator didn't work as expected") + } + + if !it.Next() || it.Name() != "file.txt" || DirFromEntry(it) != nil || it.Err() != nil { + t.Fatal("iterator didn't work as expected") + } + + if it.Next() || it.Err() != nil { + t.Fatal("iterator didn't work as expected") + } +} + +func TestMultiFileReaderToMultiFileSkip(t *testing.T) { + mfr := getTestMultiFileReader(t) + mpReader := multipart.NewReader(mfr, mfr.Boundary()) + mf, err := NewFileFromPartReader(mpReader, multipartFormdataType) + if err != nil { + t.Fatal(err) + } + + it := mf.Entries() + + if !it.Next() || it.Name() != "beep.txt" { + t.Fatal("iterator didn't work as expected") + } + + if !it.Next() || it.Name() != "boop" || DirFromEntry(it) == nil { + t.Fatal("iterator didn't work as expected") + } + + if !it.Next() || it.Name() != "file.txt" || DirFromEntry(it) != nil || it.Err() != nil { + t.Fatal("iterator didn't work as expected") + } + + if it.Next() || it.Err() != nil { + t.Fatal("iterator didn't work as expected") + } +} + +func TestOutput(t *testing.T) { + mfr := getTestMultiFileReader(t) + walker := &multipartWalker{reader: multipart.NewReader(mfr, mfr.Boundary())} + buf := make([]byte, 20) + + mpf, err := walker.nextFile() + if mpf == nil || err != nil { + t.Fatal("Expected non-nil multipartFile, nil error") + } + mpr, ok := mpf.(File) + if !ok { + t.Fatal("Expected file to be a regular file") + } + if n, err := mpr.Read(buf); n != 4 || err != nil { + t.Fatal("Expected to read from file", n, err) + } + if string(buf[:4]) != "beep" { + t.Fatal("Data read was different than expected") + } + + mpf, err = walker.nextFile() + if mpf == nil || err != nil { + t.Fatal("Expected non-nil multipartFile, nil error") + } + mpd, ok := mpf.(Directory) + if !ok { + t.Fatal("Expected file to be a directory") + } + + child, err := walker.nextFile() + if child == nil || err != nil { + t.Fatal("Expected to be able to read a child file") + } + if _, ok := child.(File); !ok { + t.Fatal("Expected file to not be a directory") + } + + child, err = walker.nextFile() + if child == nil || err != nil { + t.Fatal("Expected to be able to read a child file") + } + if _, ok := child.(File); !ok { + t.Fatal("Expected file to not be a directory") + } + + it := mpd.Entries() + if it.Next() { + t.Fatal("Expected to get false") + } + + mpf, err = walker.nextFile() + if mpf == nil || err != nil { + t.Fatal("Expected non-nil multipartFile, nil error") + } + + part, err := walker.getPart() + if part != nil || err != io.EOF { + t.Fatal("Expected to get (nil, io.EOF)") + } +} + +func TestCommonPrefix(t *testing.T) { + sf := NewMapDirectory(map[string]Node{ + "boop": NewMapDirectory(map[string]Node{ + "a": NewBytesFile([]byte("bleep")), + "aa": NewBytesFile([]byte("bleep")), + "aaa": NewBytesFile([]byte("bleep")), + }), + }) + mfr := NewMultiFileReader(sf, true) + reader, err := NewFileFromPartReader(multipart.NewReader(mfr, mfr.Boundary()), multipartFormdataType) + if err != nil { + t.Fatal(err) + } + + CheckDir(t, reader, []Event{ + { + kind: TDirStart, + name: "boop", + }, + { + kind: TFile, + name: "a", + value: "bleep", + }, + { + kind: TFile, + name: "aa", + value: "bleep", + }, + { + kind: TFile, + name: "aaa", + value: "bleep", + }, + { + kind: TDirEnd, + }, + }) +} diff --git a/files/multipartfile.go b/files/multipartfile.go new file mode 100644 index 0000000000..27653982c4 --- /dev/null +++ b/files/multipartfile.go @@ -0,0 +1,232 @@ +package files + +import ( + "io" + "mime" + "mime/multipart" + "net/url" + "path" + "strings" +) + +const ( + multipartFormdataType = "multipart/form-data" + multipartMixedType = "multipart/mixed" + + applicationDirectory = "application/x-directory" + applicationSymlink = "application/symlink" + applicationFile = "application/octet-stream" + + contentTypeHeader = "Content-Type" +) + +type multipartDirectory struct { + path string + walker *multipartWalker + + // part is the part describing the directory. It's nil when implicit. + part *multipart.Part +} + +type multipartWalker struct { + part *multipart.Part + reader *multipart.Reader +} + +func (m *multipartWalker) consumePart() { + m.part = nil +} + +func (m *multipartWalker) getPart() (*multipart.Part, error) { + if m.part != nil { + return m.part, nil + } + if m.reader == nil { + return nil, io.EOF + } + + var err error + m.part, err = m.reader.NextPart() + if err == io.EOF { + m.reader = nil + } + return m.part, err +} + +// NewFileFromPartReader creates a Directory from a multipart reader. +func NewFileFromPartReader(reader *multipart.Reader, mediatype string) (Directory, error) { + switch mediatype { + case applicationDirectory, multipartFormdataType: + default: + return nil, ErrNotDirectory + } + + return &multipartDirectory{ + path: "/", + walker: &multipartWalker{ + reader: reader, + }, + }, nil +} + +func (w *multipartWalker) nextFile() (Node, error) { + part, err := w.getPart() + if err != nil { + return nil, err + } + w.consumePart() + + contentType := part.Header.Get(contentTypeHeader) + if contentType != "" { + var err error + contentType, _, err = mime.ParseMediaType(contentType) + if err != nil { + return nil, err + } + } + + switch contentType { + case multipartFormdataType, applicationDirectory: + return &multipartDirectory{ + part: part, + path: fileName(part), + walker: w, + }, nil + case applicationSymlink: + out, err := io.ReadAll(part) + if err != nil { + return nil, err + } + + return NewLinkFile(string(out), nil), nil + default: + return &ReaderFile{ + reader: part, + abspath: part.Header.Get("abspath"), + }, nil + } +} + +// fileName returns a normalized filename from a part. +func fileName(part *multipart.Part) string { + v := part.Header.Get("Content-Disposition") + _, params, err := mime.ParseMediaType(v) + if err != nil { + return "" + } + filename := params["filename"] + if escaped, err := url.QueryUnescape(filename); err == nil { + filename = escaped + } // if there is a unescape error, just treat the name as unescaped + + return path.Clean("/" + filename) +} + +// dirName appends a slash to the end of the filename, if not present. +// expects a _cleaned_ path. +func dirName(filename string) string { + if !strings.HasSuffix(filename, "/") { + filename += "/" + } + return filename +} + +// isChild checks if child is a child of parent directory. +// expects a _cleaned_ path. +func isChild(child, parent string) bool { + return strings.HasPrefix(child, dirName(parent)) +} + +// makeRelative makes the child path relative to the parent path. +// expects a _cleaned_ path. +func makeRelative(child, parent string) string { + return strings.TrimPrefix(child, dirName(parent)) +} + +type multipartIterator struct { + f *multipartDirectory + + curFile Node + curName string + err error +} + +func (it *multipartIterator) Name() string { + return it.curName +} + +func (it *multipartIterator) Node() Node { + return it.curFile +} + +func (it *multipartIterator) Next() bool { + if it.f.walker.reader == nil || it.err != nil { + return false + } + var part *multipart.Part + for { + part, it.err = it.f.walker.getPart() + if it.err != nil { + return false + } + + name := fileName(part) + + // Is the file in a different directory? + if !isChild(name, it.f.path) { + return false + } + + // Have we already entered this directory? + if it.curName != "" && isChild(name, path.Join(it.f.path, it.curName)) { + it.f.walker.consumePart() + continue + } + + // Make the path relative to the current directory. + name = makeRelative(name, it.f.path) + + // Check if we need to create a fake directory (more than one + // path component). + if idx := strings.IndexByte(name, '/'); idx >= 0 { + it.curName = name[:idx] + it.curFile = &multipartDirectory{ + path: path.Join(it.f.path, it.curName), + walker: it.f.walker, + } + return true + } + it.curName = name + + // Finally, advance to the next file. + it.curFile, it.err = it.f.walker.nextFile() + + return it.err == nil + } +} + +func (it *multipartIterator) Err() error { + // We use EOF to signal that this iterator is done. That way, we don't + // need to check every time `Next` is called. + if it.err == io.EOF { + return nil + } + return it.err +} + +func (f *multipartDirectory) Entries() DirIterator { + return &multipartIterator{f: f} +} + +func (f *multipartDirectory) Close() error { + if f.part != nil { + return f.part.Close() + } + return nil +} + +func (f *multipartDirectory) Size() (int64, error) { + return 0, ErrNotSupported +} + +var _ Directory = &multipartDirectory{} diff --git a/files/readerfile.go b/files/readerfile.go new file mode 100644 index 0000000000..a03dae23f0 --- /dev/null +++ b/files/readerfile.go @@ -0,0 +1,81 @@ +package files + +import ( + "bytes" + "io" + "os" + "path/filepath" +) + +// ReaderFile is a implementation of File created from an `io.Reader`. +// ReaderFiles are never directories, and can be read from and closed. +type ReaderFile struct { + abspath string + reader io.ReadCloser + stat os.FileInfo + + fsize int64 +} + +func NewBytesFile(b []byte) File { + return &ReaderFile{"", NewReaderFile(bytes.NewReader(b)), nil, int64(len(b))} +} + +func NewReaderFile(reader io.Reader) File { + return NewReaderStatFile(reader, nil) +} + +func NewReaderStatFile(reader io.Reader, stat os.FileInfo) File { + rc, ok := reader.(io.ReadCloser) + if !ok { + rc = io.NopCloser(reader) + } + + return &ReaderFile{"", rc, stat, -1} +} + +func NewReaderPathFile(path string, reader io.ReadCloser, stat os.FileInfo) (*ReaderFile, error) { + abspath, err := filepath.Abs(path) + if err != nil { + return nil, err + } + + return &ReaderFile{abspath, reader, stat, -1}, nil +} + +func (f *ReaderFile) AbsPath() string { + return f.abspath +} + +func (f *ReaderFile) Read(p []byte) (int, error) { + return f.reader.Read(p) +} + +func (f *ReaderFile) Close() error { + return f.reader.Close() +} + +func (f *ReaderFile) Stat() os.FileInfo { + return f.stat +} + +func (f *ReaderFile) Size() (int64, error) { + if f.stat == nil { + if f.fsize >= 0 { + return f.fsize, nil + } + return 0, ErrNotSupported + } + return f.stat.Size(), nil +} + +func (f *ReaderFile) Seek(offset int64, whence int) (int64, error) { + if s, ok := f.reader.(io.Seeker); ok { + return s.Seek(offset, whence) + } + + return 0, ErrNotSupported +} + +var _ File = &ReaderFile{} +var _ FileInfo = &ReaderFile{} diff --git a/files/serialfile.go b/files/serialfile.go new file mode 100644 index 0000000000..ab4c1e2fe4 --- /dev/null +++ b/files/serialfile.go @@ -0,0 +1,168 @@ +package files + +import ( + "errors" + "fmt" + "io/fs" + "os" + "path/filepath" +) + +// serialFile implements Node, and reads from a path on the OS filesystem. +// No more than one file will be opened at a time. +type serialFile struct { + path string + files []os.FileInfo + stat os.FileInfo + filter *Filter +} + +type serialIterator struct { + files []os.FileInfo + path string + filter *Filter + + curName string + curFile Node + + err error +} + +// NewSerialFile takes a filepath, a bool specifying if hidden files should be included, +// and a fileInfo and returns a Node representing file, directory or special file. +func NewSerialFile(path string, includeHidden bool, stat os.FileInfo) (Node, error) { + filter, err := NewFilter("", nil, includeHidden) + if err != nil { + return nil, err + } + return NewSerialFileWithFilter(path, filter, stat) +} + +// NewSerialFileWith takes a filepath, a filter for determining which files should be +// operated upon if the filepath is a directory, and a fileInfo and returns a +// Node representing file, directory or special file. +func NewSerialFileWithFilter(path string, filter *Filter, stat os.FileInfo) (Node, error) { + switch mode := stat.Mode(); { + case mode.IsRegular(): + file, err := os.Open(path) + if err != nil { + return nil, err + } + return NewReaderPathFile(path, file, stat) + case mode.IsDir(): + // for directories, stat all of the contents first, so we know what files to + // open when Entries() is called + entries, err := os.ReadDir(path) + if err != nil { + return nil, err + } + contents := make([]fs.FileInfo, 0, len(entries)) + for _, entry := range entries { + content, err := entry.Info() + if err != nil { + return nil, err + } + contents = append(contents, content) + } + return &serialFile{path, contents, stat, filter}, nil + case mode&os.ModeSymlink != 0: + target, err := os.Readlink(path) + if err != nil { + return nil, err + } + return NewLinkFile(target, stat), nil + default: + return nil, fmt.Errorf("unrecognized file type for %s: %s", path, mode.String()) + } +} + +func (it *serialIterator) Name() string { + return it.curName +} + +func (it *serialIterator) Node() Node { + return it.curFile +} + +func (it *serialIterator) Next() bool { + // if there aren't any files left in the root directory, we're done + if len(it.files) == 0 { + return false + } + + stat := it.files[0] + it.files = it.files[1:] + for it.filter.ShouldExclude(stat) { + if len(it.files) == 0 { + return false + } + + stat = it.files[0] + it.files = it.files[1:] + } + + // open the next file + filePath := filepath.ToSlash(filepath.Join(it.path, stat.Name())) + + // recursively call the constructor on the next file + // if it's a regular file, we will open it as a ReaderFile + // if it's a directory, files in it will be opened serially + sf, err := NewSerialFileWithFilter(filePath, it.filter, stat) + if err != nil { + it.err = err + return false + } + + it.curName = stat.Name() + it.curFile = sf + return true +} + +func (it *serialIterator) Err() error { + return it.err +} + +func (f *serialFile) Entries() DirIterator { + return &serialIterator{ + path: f.path, + files: f.files, + filter: f.filter, + } +} + +func (f *serialFile) Close() error { + return nil +} + +func (f *serialFile) Stat() os.FileInfo { + return f.stat +} + +func (f *serialFile) Size() (int64, error) { + if !f.stat.IsDir() { + // something went terribly, terribly wrong + return 0, errors.New("serialFile is not a directory") + } + + var du int64 + err := filepath.Walk(f.path, func(p string, fi os.FileInfo, err error) error { + if err != nil || fi == nil { + return err + } + + if f.filter.ShouldExclude(fi) { + if fi.Mode().IsDir() { + return filepath.SkipDir + } + } else if fi.Mode().IsRegular() { + du += fi.Size() + } + + return nil + }) + + return du, err +} + +var _ Directory = &serialFile{} +var _ DirIterator = &serialIterator{} diff --git a/files/serialfile_test.go b/files/serialfile_test.go new file mode 100644 index 0000000000..80c252a7e7 --- /dev/null +++ b/files/serialfile_test.go @@ -0,0 +1,194 @@ +package files + +import ( + "fmt" + "io" + "os" + "path/filepath" + "sort" + "strings" + "testing" +) + +func isFullPathHidden(p string) bool { + return strings.HasPrefix(p, ".") || strings.Contains(p, "/.") +} + +func TestSerialFile(t *testing.T) { + t.Run("Hidden/NoFilter", func(t *testing.T) { testSerialFile(t, true, false) }) + t.Run("Hidden/Filter", func(t *testing.T) { testSerialFile(t, true, true) }) + t.Run("NotHidden/NoFilter", func(t *testing.T) { testSerialFile(t, false, false) }) + t.Run("NotHidden/Filter", func(t *testing.T) { testSerialFile(t, false, true) }) +} + +func testSerialFile(t *testing.T, hidden, withIgnoreRules bool) { + tmppath, err := os.MkdirTemp("", "files-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmppath) + + testInputs := map[string]string{ + "1": "Some text!\n", + "2": "beep", + "3": "", + "4": "boop", + "5": "", + filepath.FromSlash("5/a"): "foobar", + ".6": "thing", + "7": "", + filepath.FromSlash("7/.foo"): "bla", + ".8": "", + filepath.FromSlash(".8/foo"): "bla", + } + fileFilter, err := NewFilter("", []string{"9", "10"}, hidden) + if err != nil { + t.Fatal(err) + } + if withIgnoreRules { + testInputs["9"] = "" + testInputs[filepath.FromSlash("9/b")] = "bebop" + testInputs["10"] = "" + testInputs[filepath.FromSlash("10/.c")] = "doowop" + } + + for p, c := range testInputs { + path := filepath.Join(tmppath, p) + if c != "" { + continue + } + if err := os.MkdirAll(path, 0777); err != nil { + t.Fatal(err) + } + } + + for p, c := range testInputs { + path := filepath.Join(tmppath, p) + if c == "" { + continue + } + if err := os.WriteFile(path, []byte(c), 0666); err != nil { + t.Fatal(err) + } + } + expectedPaths := make([]string, 0, 4) + expectedSize := int64(0) + +testInputs: + for p := range testInputs { + components := strings.Split(p, string(filepath.Separator)) + var stat os.FileInfo + for i := range components { + stat, err = os.Stat(filepath.Join( + append([]string{tmppath}, components[:i+1]...)..., + )) + if err != nil { + t.Fatal(err) + } + if fileFilter.ShouldExclude(stat) { + continue testInputs + } + } + expectedPaths = append(expectedPaths, p) + if stat.Mode().IsRegular() { + expectedSize += stat.Size() + } + } + + sort.Strings(expectedPaths) + + stat, err := os.Stat(tmppath) + if err != nil { + t.Fatal(err) + } + + sf, err := NewSerialFile(tmppath, hidden, stat) + if withIgnoreRules { + sf, err = NewSerialFileWithFilter(tmppath, fileFilter, stat) + } + if err != nil { + t.Fatal(err) + } + defer sf.Close() + + if size, err := sf.Size(); err != nil { + t.Fatalf("failed to determine size: %s", err) + } else if size != expectedSize { + t.Fatalf("expected size %d, got size %d", expectedSize, size) + } + + rootFound := false + actualPaths := make([]string, 0, len(expectedPaths)) + err = Walk(sf, func(path string, nd Node) error { + defer nd.Close() + + // root node. + if path == "" { + if rootFound { + return fmt.Errorf("found root twice") + } + if sf != nd { + return fmt.Errorf("wrong root") + } + rootFound = true + return nil + } + actualPaths = append(actualPaths, path) + if !hidden && isFullPathHidden(path) { + return fmt.Errorf("found a hidden file") + } + components := filepath.SplitList(path) + for i := range components { + if fileFilter.Rules.MatchesPath(filepath.Join(components[:i+1]...)) { + return fmt.Errorf("found a file that should be excluded") + } + } + + data, ok := testInputs[path] + if !ok { + return fmt.Errorf("expected something at %q", path) + } + delete(testInputs, path) + + switch nd := nd.(type) { + case *Symlink: + return fmt.Errorf("didn't expect a symlink") + case Directory: + if data != "" { + return fmt.Errorf("expected a directory at %q", path) + } + case File: + actual, err := io.ReadAll(nd) + if err != nil { + return err + } + if string(actual) != data { + return fmt.Errorf("expected %q, got %q", data, string(actual)) + } + } + return nil + }) + if err != nil { + t.Fatal(err) + } + if !rootFound { + t.Fatal("didn't find the root") + } + + if len(expectedPaths) != len(actualPaths) { + t.Fatalf("expected %d paths, found %d", + len(expectedPaths), + len(actualPaths), + ) + } + + for i := range expectedPaths { + if expectedPaths[i] != actualPaths[i] { + t.Errorf( + "expected path %q does not match actual %q", + expectedPaths[i], + actualPaths[i], + ) + } + } +} diff --git a/files/slicedirectory.go b/files/slicedirectory.go new file mode 100644 index 0000000000..d11656261d --- /dev/null +++ b/files/slicedirectory.go @@ -0,0 +1,97 @@ +package files + +import "sort" + +type fileEntry struct { + name string + file Node +} + +func (e fileEntry) Name() string { + return e.name +} + +func (e fileEntry) Node() Node { + return e.file +} + +func FileEntry(name string, file Node) DirEntry { + return fileEntry{ + name: name, + file: file, + } +} + +type sliceIterator struct { + files []DirEntry + n int +} + +func (it *sliceIterator) Name() string { + return it.files[it.n].Name() +} + +func (it *sliceIterator) Node() Node { + return it.files[it.n].Node() +} + +func (it *sliceIterator) Next() bool { + it.n++ + return it.n < len(it.files) +} + +func (it *sliceIterator) Err() error { + return nil +} + +// SliceFile implements Node, and provides simple directory handling. +// It contains children files, and is created from a `[]Node`. +// SliceFiles are always directories, and can't be read from or closed. +type SliceFile struct { + files []DirEntry +} + +func NewMapDirectory(f map[string]Node) Directory { + ents := make([]DirEntry, 0, len(f)) + for name, nd := range f { + ents = append(ents, FileEntry(name, nd)) + } + sort.Slice(ents, func(i, j int) bool { + return ents[i].Name() < ents[j].Name() + }) + + return NewSliceDirectory(ents) +} + +func NewSliceDirectory(files []DirEntry) Directory { + return &SliceFile{files} +} + +func (f *SliceFile) Entries() DirIterator { + return &sliceIterator{files: f.files, n: -1} +} + +func (f *SliceFile) Close() error { + return nil +} + +func (f *SliceFile) Length() int { + return len(f.files) +} + +func (f *SliceFile) Size() (int64, error) { + var size int64 + + for _, file := range f.files { + s, err := file.Node().Size() + if err != nil { + return 0, err + } + size += s + } + + return size, nil +} + +var _ Directory = &SliceFile{} +var _ DirEntry = fileEntry{} diff --git a/files/tarwriter.go b/files/tarwriter.go new file mode 100644 index 0000000000..cecbcae42f --- /dev/null +++ b/files/tarwriter.go @@ -0,0 +1,137 @@ +package files + +import ( + "archive/tar" + "errors" + "fmt" + "io" + "path" + "strings" + "time" +) + +var ( + ErrUnixFSPathOutsideRoot = errors.New("relative UnixFS paths outside the root are now allowed, use CAR instead") +) + +type TarWriter struct { + TarW *tar.Writer + baseDirSet bool + baseDir string +} + +// NewTarWriter wraps given io.Writer into a new tar writer +func NewTarWriter(w io.Writer) (*TarWriter, error) { + return &TarWriter{ + TarW: tar.NewWriter(w), + }, nil +} + +func (w *TarWriter) writeDir(f Directory, fpath string) error { + if err := writeDirHeader(w.TarW, fpath); err != nil { + return err + } + + it := f.Entries() + for it.Next() { + if err := w.WriteFile(it.Node(), path.Join(fpath, it.Name())); err != nil { + return err + } + } + return it.Err() +} + +func (w *TarWriter) writeFile(f File, fpath string) error { + size, err := f.Size() + if err != nil { + return err + } + + if err := writeFileHeader(w.TarW, fpath, uint64(size)); err != nil { + return err + } + + if _, err := io.Copy(w.TarW, f); err != nil { + return err + } + w.TarW.Flush() + return nil +} + +func validateTarFilePath(baseDir, fpath string) bool { + // Ensure the filepath has no ".", "..", etc within the known root directory. + fpath = path.Clean(fpath) + + // If we have a non-empty baseDir, check if the filepath starts with baseDir. + // If not, we can exclude it immediately. For 'ipfs get' and for the gateway, + // the baseDir would be '{cid}.tar'. + if baseDir != "" && !strings.HasPrefix(path.Clean(fpath), baseDir) { + return false + } + + // Otherwise, check if the path starts with '..' which would make it fall + // outside the root path. This works since the path has already been cleaned. + if strings.HasPrefix(fpath, "..") { + return false + } + + return true +} + +// WriteNode adds a node to the archive. +func (w *TarWriter) WriteFile(nd Node, fpath string) error { + if !w.baseDirSet { + w.baseDirSet = true // Use a variable for this as baseDir may be an empty string. + w.baseDir = fpath + } + + if !validateTarFilePath(w.baseDir, fpath) { + return ErrUnixFSPathOutsideRoot + } + + switch nd := nd.(type) { + case *Symlink: + return writeSymlinkHeader(w.TarW, nd.Target, fpath) + case File: + return w.writeFile(nd, fpath) + case Directory: + return w.writeDir(nd, fpath) + default: + return fmt.Errorf("file type %T is not supported", nd) + } +} + +// Close closes the tar writer. +func (w *TarWriter) Close() error { + return w.TarW.Close() +} + +func writeDirHeader(w *tar.Writer, fpath string) error { + return w.WriteHeader(&tar.Header{ + Name: fpath, + Typeflag: tar.TypeDir, + Mode: 0777, + ModTime: time.Now().Truncate(time.Second), + // TODO: set mode, dates, etc. when added to unixFS + }) +} + +func writeFileHeader(w *tar.Writer, fpath string, size uint64) error { + return w.WriteHeader(&tar.Header{ + Name: fpath, + Size: int64(size), + Typeflag: tar.TypeReg, + Mode: 0644, + ModTime: time.Now().Truncate(time.Second), + // TODO: set mode, dates, etc. when added to unixFS + }) +} + +func writeSymlinkHeader(w *tar.Writer, target, fpath string) error { + return w.WriteHeader(&tar.Header{ + Name: fpath, + Linkname: target, + Mode: 0777, + Typeflag: tar.TypeSymlink, + }) +} diff --git a/files/tarwriter_test.go b/files/tarwriter_test.go new file mode 100644 index 0000000000..0e1488e7f2 --- /dev/null +++ b/files/tarwriter_test.go @@ -0,0 +1,149 @@ +package files + +import ( + "archive/tar" + "errors" + "io" + "testing" + "time" +) + +func TestTarWriter(t *testing.T) { + tf := NewMapDirectory(map[string]Node{ + "file.txt": NewBytesFile([]byte(text)), + "boop": NewMapDirectory(map[string]Node{ + "a.txt": NewBytesFile([]byte("bleep")), + "b.txt": NewBytesFile([]byte("bloop")), + }), + "beep.txt": NewBytesFile([]byte("beep")), + }) + + pr, pw := io.Pipe() + tw, err := NewTarWriter(pw) + if err != nil { + t.Fatal(err) + } + tr := tar.NewReader(pr) + + go func() { + defer tw.Close() + if err := tw.WriteFile(tf, ""); err != nil { + t.Error(err) + } + }() + + var cur *tar.Header + + checkHeader := func(name string, typ byte, size int64) { + if cur.Name != name { + t.Errorf("got wrong name: %s != %s", cur.Name, name) + } + if cur.Typeflag != typ { + t.Errorf("got wrong type: %d != %d", cur.Typeflag, typ) + } + if cur.Size != size { + t.Errorf("got wrong size: %d != %d", cur.Size, size) + } + now := time.Now() + if cur.ModTime.After(now) { + t.Errorf("wrote timestamp in the future: %s (now) < %s", now, cur.ModTime) + } + } + + if cur, err = tr.Next(); err != nil { + t.Fatal(err) + } + checkHeader("", tar.TypeDir, 0) + + if cur, err = tr.Next(); err != nil { + t.Fatal(err) + } + checkHeader("beep.txt", tar.TypeReg, 4) + + if cur, err = tr.Next(); err != nil { + t.Fatal(err) + } + checkHeader("boop", tar.TypeDir, 0) + + if cur, err = tr.Next(); err != nil { + t.Fatal(err) + } + checkHeader("boop/a.txt", tar.TypeReg, 5) + + if cur, err = tr.Next(); err != nil { + t.Fatal(err) + } + checkHeader("boop/b.txt", tar.TypeReg, 5) + + if cur, err = tr.Next(); err != nil { + t.Fatal(err) + } + checkHeader("file.txt", tar.TypeReg, 13) + + if cur, err = tr.Next(); err != io.EOF { + t.Fatal(err) + } +} + +func TestTarWriterRelativePathInsideRoot(t *testing.T) { + tf := NewMapDirectory(map[string]Node{ + "file.txt": NewBytesFile([]byte(text)), + "boop": NewMapDirectory(map[string]Node{ + "../a.txt": NewBytesFile([]byte("bleep")), + "b.txt": NewBytesFile([]byte("bloop")), + }), + "beep.txt": NewBytesFile([]byte("beep")), + }) + + tw, err := NewTarWriter(io.Discard) + if err != nil { + t.Fatal(err) + } + + defer tw.Close() + if err := tw.WriteFile(tf, ""); err != nil { + t.Error(err) + } +} + +func TestTarWriterFailsFileOutsideRoot(t *testing.T) { + tf := NewMapDirectory(map[string]Node{ + "file.txt": NewBytesFile([]byte(text)), + "boop": NewMapDirectory(map[string]Node{ + "../../a.txt": NewBytesFile([]byte("bleep")), + "b.txt": NewBytesFile([]byte("bloop")), + }), + "beep.txt": NewBytesFile([]byte("beep")), + }) + + tw, err := NewTarWriter(io.Discard) + if err != nil { + t.Fatal(err) + } + + defer tw.Close() + if err := tw.WriteFile(tf, ""); !errors.Is(err, ErrUnixFSPathOutsideRoot) { + t.Errorf("unexpected error, wanted: %v; got: %v", ErrUnixFSPathOutsideRoot, err) + } +} + +func TestTarWriterFailsFileOutsideRootWithBaseDir(t *testing.T) { + tf := NewMapDirectory(map[string]Node{ + "../file.txt": NewBytesFile([]byte(text)), + "boop": NewMapDirectory(map[string]Node{ + "a.txt": NewBytesFile([]byte("bleep")), + "b.txt": NewBytesFile([]byte("bloop")), + }), + "beep.txt": NewBytesFile([]byte("beep")), + }) + + tw, err := NewTarWriter(io.Discard) + if err != nil { + t.Fatal(err) + } + + defer tw.Close() + if err := tw.WriteFile(tf, "test.tar"); !errors.Is(err, ErrUnixFSPathOutsideRoot) { + t.Errorf("unexpected error, wanted: %v; got: %v", ErrUnixFSPathOutsideRoot, err) + } +} diff --git a/files/util.go b/files/util.go new file mode 100644 index 0000000000..e727e7ae6e --- /dev/null +++ b/files/util.go @@ -0,0 +1,25 @@ +package files + +// ToFile is an alias for n.(File). If the file isn't a regular file, nil value +// will be returned +func ToFile(n Node) File { + f, _ := n.(File) + return f +} + +// ToDir is an alias for n.(Directory). If the file isn't directory, a nil value +// will be returned +func ToDir(n Node) Directory { + d, _ := n.(Directory) + return d +} + +// FileFromEntry calls ToFile on Node in the given entry +func FileFromEntry(e DirEntry) File { + return ToFile(e.Node()) +} + +// DirFromEntry calls ToDir on Node in the given entry +func DirFromEntry(e DirEntry) Directory { + return ToDir(e.Node()) +} diff --git a/files/walk.go b/files/walk.go new file mode 100644 index 0000000000..f23e7e47fe --- /dev/null +++ b/files/walk.go @@ -0,0 +1,27 @@ +package files + +import ( + "path/filepath" +) + +// Walk walks a file tree, like `os.Walk`. +func Walk(nd Node, cb func(fpath string, nd Node) error) error { + var helper func(string, Node) error + helper = func(path string, nd Node) error { + if err := cb(path, nd); err != nil { + return err + } + dir, ok := nd.(Directory) + if !ok { + return nil + } + iter := dir.Entries() + for iter.Next() { + if err := helper(filepath.Join(path, iter.Name()), iter.Node()); err != nil { + return err + } + } + return iter.Err() + } + return helper("", nd) +} diff --git a/files/webfile.go b/files/webfile.go new file mode 100644 index 0000000000..594b81c828 --- /dev/null +++ b/files/webfile.go @@ -0,0 +1,89 @@ +package files + +import ( + "errors" + "fmt" + "io" + "net/http" + "net/url" + "os" +) + +// WebFile is an implementation of File which reads it +// from a Web URL (http). A GET request will be performed +// against the source when calling Read(). +type WebFile struct { + body io.ReadCloser + url *url.URL + contentLength int64 +} + +// NewWebFile creates a WebFile with the given URL, which +// will be used to perform the GET request on Read(). +func NewWebFile(url *url.URL) *WebFile { + return &WebFile{ + url: url, + } +} + +func (wf *WebFile) start() error { + if wf.body == nil { + s := wf.url.String() + resp, err := http.Get(s) + if err != nil { + return err + } + if resp.StatusCode < 200 || resp.StatusCode > 299 { + return fmt.Errorf("got non-2XX status code %d: %s", resp.StatusCode, s) + } + wf.body = resp.Body + wf.contentLength = resp.ContentLength + } + return nil +} + +// Read reads the File from it's web location. On the first +// call to Read, a GET request will be performed against the +// WebFile's URL, using Go's default HTTP client. Any further +// reads will keep reading from the HTTP Request body. +func (wf *WebFile) Read(b []byte) (int, error) { + if err := wf.start(); err != nil { + return 0, err + } + return wf.body.Read(b) +} + +// Close closes the WebFile (or the request body). +func (wf *WebFile) Close() error { + if wf.body == nil { + return nil + } + return wf.body.Close() +} + +// TODO: implement +func (wf *WebFile) Seek(offset int64, whence int) (int64, error) { + return 0, ErrNotSupported +} + +func (wf *WebFile) Size() (int64, error) { + if err := wf.start(); err != nil { + return 0, err + } + if wf.contentLength < 0 { + return -1, errors.New("Content-Length hearer was not set") + } + + return wf.contentLength, nil +} + +func (wf *WebFile) AbsPath() string { + return wf.url.String() +} + +func (wf *WebFile) Stat() os.FileInfo { + return nil +} + +var _ File = &WebFile{} +var _ FileInfo = &WebFile{} diff --git a/files/webfile_test.go b/files/webfile_test.go new file mode 100644 index 0000000000..94cddb5d2f --- /dev/null +++ b/files/webfile_test.go @@ -0,0 +1,97 @@ +package files + +import ( + "fmt" + "io" + "net/http" + "net/http/httptest" + "net/url" + "testing" +) + +func TestWebFile(t *testing.T) { + const content = "Hello world!" + s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + fmt.Fprint(w, content) + })) + defer s.Close() + + u, err := url.Parse(s.URL) + if err != nil { + t.Fatal(err) + } + wf := NewWebFile(u) + body, err := io.ReadAll(wf) + if err != nil { + t.Fatal(err) + } + if string(body) != content { + t.Fatalf("expected %q but got %q", content, string(body)) + } +} + +func TestWebFile_notFound(t *testing.T) { + s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + http.Error(w, "File not found.", http.StatusNotFound) + })) + defer s.Close() + + u, err := url.Parse(s.URL) + if err != nil { + t.Fatal(err) + } + wf := NewWebFile(u) + _, err = io.ReadAll(wf) + if err == nil { + t.Fatal("expected error") + } +} + +func TestWebFileSize(t *testing.T) { + body := "Hello world!" + s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + fmt.Fprint(w, body) + })) + defer s.Close() + + u, err := url.Parse(s.URL) + if err != nil { + t.Fatal(err) + } + + // Read size before reading file. + + wf1 := NewWebFile(u) + if size, err := wf1.Size(); err != nil { + t.Error(err) + } else if int(size) != len(body) { + t.Errorf("expected size to be %d, got %d", len(body), size) + } + + actual, err := io.ReadAll(wf1) + if err != nil { + t.Fatal(err) + } + if string(actual) != body { + t.Fatal("should have read the web file") + } + + wf1.Close() + + // Read size after reading file. + + wf2 := NewWebFile(u) + actual, err = io.ReadAll(wf2) + if err != nil { + t.Fatal(err) + } + if string(actual) != body { + t.Fatal("should have read the web file") + } + + if size, err := wf2.Size(); err != nil { + t.Error(err) + } else if int(size) != len(body) { + t.Errorf("expected size to be %d, got %d", len(body), size) + } +} diff --git a/filestore/filestore.go b/filestore/filestore.go new file mode 100644 index 0000000000..d6e5c6a370 --- /dev/null +++ b/filestore/filestore.go @@ -0,0 +1,245 @@ +// Package filestore implements a Blockstore which is able to read certain +// blocks of data directly from its original location in the filesystem. +// +// In a Filestore, object leaves are stored as FilestoreNodes. FilestoreNodes +// include a filesystem path and an offset, allowing a Blockstore dealing with +// such blocks to avoid storing the whole contents and reading them from their +// filesystem location instead. +package filestore + +import ( + "context" + "errors" + + blocks "github.com/ipfs/boxo/blocks" + blockstore "github.com/ipfs/boxo/blockstore" + posinfo "github.com/ipfs/boxo/filestore/posinfo" + cid "github.com/ipfs/go-cid" + dsq "github.com/ipfs/go-datastore/query" + ipld "github.com/ipfs/go-ipld-format" + logging "github.com/ipfs/go-log" +) + +var logger = logging.Logger("filestore") + +var ErrFilestoreNotEnabled = errors.New("filestore is not enabled, see https://git.io/vNItf") +var ErrUrlstoreNotEnabled = errors.New("urlstore is not enabled") + +// Filestore implements a Blockstore by combining a standard Blockstore +// to store regular blocks and a special Blockstore called +// FileManager to store blocks which data exists in an external file. +type Filestore struct { + fm *FileManager + bs blockstore.Blockstore +} + +// FileManager returns the FileManager in Filestore. +func (f *Filestore) FileManager() *FileManager { + return f.fm +} + +// MainBlockstore returns the standard Blockstore in the Filestore. +func (f *Filestore) MainBlockstore() blockstore.Blockstore { + return f.bs +} + +// NewFilestore creates one using the given Blockstore and FileManager. +func NewFilestore(bs blockstore.Blockstore, fm *FileManager) *Filestore { + return &Filestore{fm, bs} +} + +// AllKeysChan returns a channel from which to read the keys stored in +// the blockstore. If the given context is cancelled the channel will be closed. +func (f *Filestore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) { + ctx, cancel := context.WithCancel(ctx) + + a, err := f.bs.AllKeysChan(ctx) + if err != nil { + cancel() + return nil, err + } + + out := make(chan cid.Cid, dsq.KeysOnlyBufSize) + go func() { + defer cancel() + defer close(out) + + var done bool + for !done { + select { + case c, ok := <-a: + if !ok { + done = true + continue + } + select { + case out <- c: + case <-ctx.Done(): + return + } + case <-ctx.Done(): + return + } + } + + // Can't do these at the same time because the abstractions around + // leveldb make us query leveldb for both operations. We apparently + // cant query leveldb concurrently + b, err := f.fm.AllKeysChan(ctx) + if err != nil { + logger.Error("error querying filestore: ", err) + return + } + + done = false + for !done { + select { + case c, ok := <-b: + if !ok { + done = true + continue + } + select { + case out <- c: + case <-ctx.Done(): + return + } + case <-ctx.Done(): + return + } + } + }() + return out, nil +} + +// DeleteBlock deletes the block with the given key from the +// blockstore. As expected, in the case of FileManager blocks, only the +// reference is deleted, not its contents. It may return +// ErrNotFound when the block is not stored. +func (f *Filestore) DeleteBlock(ctx context.Context, c cid.Cid) error { + err1 := f.bs.DeleteBlock(ctx, c) + if err1 != nil && !ipld.IsNotFound(err1) { + return err1 + } + + err2 := f.fm.DeleteBlock(ctx, c) + + // if we successfully removed something from the blockstore, but the + // filestore didnt have it, return success + if !ipld.IsNotFound(err2) { + return err2 + } + + if ipld.IsNotFound(err1) { + return err1 + } + + return nil +} + +// Get retrieves the block with the given Cid. It may return +// ErrNotFound when the block is not stored. +func (f *Filestore) Get(ctx context.Context, c cid.Cid) (blocks.Block, error) { + blk, err := f.bs.Get(ctx, c) + if ipld.IsNotFound(err) { + return f.fm.Get(ctx, c) + } + return blk, err +} + +// GetSize returns the size of the requested block. It may return ErrNotFound +// when the block is not stored. +func (f *Filestore) GetSize(ctx context.Context, c cid.Cid) (int, error) { + size, err := f.bs.GetSize(ctx, c) + if err != nil { + if ipld.IsNotFound(err) { + return f.fm.GetSize(ctx, c) + } + return -1, err + } + return size, nil +} + +// Has returns true if the block with the given Cid is +// stored in the Filestore. +func (f *Filestore) Has(ctx context.Context, c cid.Cid) (bool, error) { + has, err := f.bs.Has(ctx, c) + if err != nil { + return false, err + } + + if has { + return true, nil + } + + return f.fm.Has(ctx, c) +} + +// Put stores a block in the Filestore. For blocks of +// underlying type FilestoreNode, the operation is +// delegated to the FileManager, while the rest of blocks +// are handled by the regular blockstore. +func (f *Filestore) Put(ctx context.Context, b blocks.Block) error { + has, err := f.Has(ctx, b.Cid()) + if err != nil { + return err + } + + if has { + return nil + } + + switch b := b.(type) { + case *posinfo.FilestoreNode: + return f.fm.Put(ctx, b) + default: + return f.bs.Put(ctx, b) + } +} + +// PutMany is like Put(), but takes a slice of blocks, allowing +// the underlying blockstore to perform batch transactions. +func (f *Filestore) PutMany(ctx context.Context, bs []blocks.Block) error { + var normals []blocks.Block + var fstores []*posinfo.FilestoreNode + + for _, b := range bs { + has, err := f.Has(ctx, b.Cid()) + if err != nil { + return err + } + + if has { + continue + } + + switch b := b.(type) { + case *posinfo.FilestoreNode: + fstores = append(fstores, b) + default: + normals = append(normals, b) + } + } + + if len(normals) > 0 { + err := f.bs.PutMany(ctx, normals) + if err != nil { + return err + } + } + + if len(fstores) > 0 { + err := f.fm.PutMany(ctx, fstores) + if err != nil { + return err + } + } + return nil +} + +// HashOnRead calls blockstore.HashOnRead. +func (f *Filestore) HashOnRead(enabled bool) { + f.bs.HashOnRead(enabled) +} + +var _ blockstore.Blockstore = (*Filestore)(nil) diff --git a/filestore/filestore_test.go b/filestore/filestore_test.go new file mode 100644 index 0000000000..e3614560ff --- /dev/null +++ b/filestore/filestore_test.go @@ -0,0 +1,180 @@ +package filestore + +import ( + "bytes" + "context" + "math/rand" + "os" + "testing" + + dag "github.com/ipfs/boxo/ipld/merkledag" + + blockstore "github.com/ipfs/boxo/blockstore" + posinfo "github.com/ipfs/boxo/filestore/posinfo" + cid "github.com/ipfs/go-cid" + ds "github.com/ipfs/go-datastore" + ipld "github.com/ipfs/go-ipld-format" +) + +var bg = context.Background() + +func newTestFilestore(t *testing.T) (string, *Filestore) { + mds := ds.NewMapDatastore() + + testdir, err := os.MkdirTemp("", "filestore-test") + if err != nil { + t.Fatal(err) + } + fm := NewFileManager(mds, testdir) + fm.AllowFiles = true + + bs := blockstore.NewBlockstore(mds) + fstore := NewFilestore(bs, fm) + return testdir, fstore +} + +func makeFile(dir string, data []byte) (string, error) { + f, err := os.CreateTemp(dir, "file") + if err != nil { + return "", err + } + + _, err = f.Write(data) + if err != nil { + return "", err + } + + return f.Name(), nil +} + +func TestBasicFilestore(t *testing.T) { + dir, fs := newTestFilestore(t) + + buf := make([]byte, 1000) + rand.Read(buf) + + fname, err := makeFile(dir, buf) + if err != nil { + t.Fatal(err) + } + + var cids []cid.Cid + for i := 0; i < 100; i++ { + n := &posinfo.FilestoreNode{ + PosInfo: &posinfo.PosInfo{ + FullPath: fname, + Offset: uint64(i * 10), + }, + Node: dag.NewRawNode(buf[i*10 : (i+1)*10]), + } + + err := fs.Put(bg, n) + if err != nil { + t.Fatal(err) + } + cids = append(cids, n.Node.Cid()) + } + + for i, c := range cids { + blk, err := fs.Get(bg, c) + if err != nil { + t.Fatal(err) + } + + if !bytes.Equal(blk.RawData(), buf[i*10:(i+1)*10]) { + t.Fatal("data didnt match on the way out") + } + } + + kch, err := fs.AllKeysChan(context.Background()) + if err != nil { + t.Fatal(err) + } + + out := make(map[string]struct{}) + for c := range kch { + out[c.KeyString()] = struct{}{} + } + + if len(out) != len(cids) { + t.Fatal("mismatch in number of entries") + } + + for _, c := range cids { + if _, ok := out[c.KeyString()]; !ok { + t.Fatal("missing cid: ", c) + } + } +} + +func randomFileAdd(t *testing.T, fs *Filestore, dir string, size int) (string, []cid.Cid) { + buf := make([]byte, size) + rand.Read(buf) + + fname, err := makeFile(dir, buf) + if err != nil { + t.Fatal(err) + } + + var out []cid.Cid + for i := 0; i < size/10; i++ { + n := &posinfo.FilestoreNode{ + PosInfo: &posinfo.PosInfo{ + FullPath: fname, + Offset: uint64(i * 10), + }, + Node: dag.NewRawNode(buf[i*10 : (i+1)*10]), + } + err := fs.Put(bg, n) + if err != nil { + t.Fatal(err) + } + out = append(out, n.Cid()) + } + + return fname, out +} + +func TestDeletes(t *testing.T) { + dir, fs := newTestFilestore(t) + _, cids := randomFileAdd(t, fs, dir, 100) + todelete := cids[:4] + for _, c := range todelete { + err := fs.DeleteBlock(bg, c) + if err != nil { + t.Fatal(err) + } + } + + deleted := make(map[string]bool) + for _, c := range todelete { + _, err := fs.Get(bg, c) + if !ipld.IsNotFound(err) { + t.Fatal("expected blockstore not found error") + } + deleted[c.KeyString()] = true + } + + keys, err := fs.AllKeysChan(context.Background()) + if err != nil { + t.Fatal(err) + } + + for c := range keys { + if deleted[c.KeyString()] { + t.Fatal("shouldnt have reference to this key anymore") + } + } +} + +func TestIsURL(t *testing.T) { + if !IsURL("http://www.example.com") { + t.Fatal("IsURL failed: http://www.example.com") + } + if !IsURL("https://www.example.com") { + t.Fatal("IsURL failed: https://www.example.com") + } + if IsURL("adir/afile") || IsURL("http:/ /afile") || IsURL("http:/a/file") { + t.Fatal("IsURL recognized non-url") + } +} diff --git a/filestore/fsrefstore.go b/filestore/fsrefstore.go new file mode 100644 index 0000000000..b21ff27323 --- /dev/null +++ b/filestore/fsrefstore.go @@ -0,0 +1,340 @@ +package filestore + +import ( + "context" + "fmt" + "io" + "net/http" + "os" + "path/filepath" + + pb "github.com/ipfs/boxo/filestore/pb" + + proto "github.com/gogo/protobuf/proto" + blocks "github.com/ipfs/boxo/blocks" + dshelp "github.com/ipfs/boxo/datastore/dshelp" + posinfo "github.com/ipfs/boxo/filestore/posinfo" + cid "github.com/ipfs/go-cid" + ds "github.com/ipfs/go-datastore" + dsns "github.com/ipfs/go-datastore/namespace" + dsq "github.com/ipfs/go-datastore/query" + ipld "github.com/ipfs/go-ipld-format" + mh "github.com/multiformats/go-multihash" +) + +// FilestorePrefix identifies the key prefix for FileManager blocks. +var FilestorePrefix = ds.NewKey("filestore") + +// FileManager is a blockstore implementation which stores special +// blocks FilestoreNode type. These nodes only contain a reference +// to the actual location of the block data in the filesystem +// (a path and an offset). +type FileManager struct { + AllowFiles bool + AllowUrls bool + ds ds.Batching + root string +} + +// CorruptReferenceError implements the error interface. +// It is used to indicate that the block contents pointed +// by the referencing blocks cannot be retrieved (i.e. the +// file is not found, or the data changed as it was being read). +type CorruptReferenceError struct { + Code Status + Err error +} + +// Error() returns the error message in the CorruptReferenceError +// as a string. +func (c CorruptReferenceError) Error() string { + return c.Err.Error() +} + +// NewFileManager initializes a new file manager with the given +// datastore and root. All FilestoreNodes paths are relative to the +// root path given here, which is prepended for any operations. +func NewFileManager(ds ds.Batching, root string) *FileManager { + return &FileManager{ds: dsns.Wrap(ds, FilestorePrefix), root: root} +} + +// AllKeysChan returns a channel from which to read the keys stored in +// the FileManager. If the given context is cancelled the channel will be +// closed. +// +// All CIDs returned are of type Raw. +func (f *FileManager) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) { + q := dsq.Query{KeysOnly: true} + + res, err := f.ds.Query(ctx, q) + if err != nil { + return nil, err + } + + out := make(chan cid.Cid, dsq.KeysOnlyBufSize) + go func() { + defer close(out) + for { + v, ok := res.NextSync() + if !ok { + return + } + + k := ds.RawKey(v.Key) + mhash, err := dshelp.DsKeyToMultihash(k) + if err != nil { + logger.Errorf("decoding cid from filestore: %s", err) + continue + } + + select { + case out <- cid.NewCidV1(cid.Raw, mhash): + case <-ctx.Done(): + return + } + } + }() + + return out, nil +} + +// DeleteBlock deletes the reference-block from the underlying +// datastore. It does not touch the referenced data. +func (f *FileManager) DeleteBlock(ctx context.Context, c cid.Cid) error { + err := f.ds.Delete(ctx, dshelp.MultihashToDsKey(c.Hash())) + if err == ds.ErrNotFound { + return ipld.ErrNotFound{Cid: c} + } + return err +} + +// Get reads a block from the datastore. Reading a block +// is done in two steps: the first step retrieves the reference +// block from the datastore. The second step uses the stored +// path and offsets to read the raw block data directly from disk. +func (f *FileManager) Get(ctx context.Context, c cid.Cid) (blocks.Block, error) { + dobj, err := f.getDataObj(ctx, c.Hash()) + if err != nil { + return nil, err + } + out, err := f.readDataObj(ctx, c.Hash(), dobj) + if err != nil { + return nil, err + } + + return blocks.NewBlockWithCid(out, c) +} + +// GetSize gets the size of the block from the datastore. +// +// This method may successfully return the size even if returning the block +// would fail because the associated file is no longer available. +func (f *FileManager) GetSize(ctx context.Context, c cid.Cid) (int, error) { + dobj, err := f.getDataObj(ctx, c.Hash()) + if err != nil { + return -1, err + } + return int(dobj.GetSize_()), nil +} + +func (f *FileManager) readDataObj(ctx context.Context, m mh.Multihash, d *pb.DataObj) ([]byte, error) { + if IsURL(d.GetFilePath()) { + return f.readURLDataObj(ctx, m, d) + } + return f.readFileDataObj(m, d) +} + +func (f *FileManager) getDataObj(ctx context.Context, m mh.Multihash) (*pb.DataObj, error) { + o, err := f.ds.Get(ctx, dshelp.MultihashToDsKey(m)) + switch err { + case ds.ErrNotFound: + return nil, ipld.ErrNotFound{Cid: cid.NewCidV1(cid.Raw, m)} + case nil: + // + default: + return nil, err + } + + return unmarshalDataObj(o) +} + +func unmarshalDataObj(data []byte) (*pb.DataObj, error) { + var dobj pb.DataObj + if err := proto.Unmarshal(data, &dobj); err != nil { + return nil, err + } + + return &dobj, nil +} + +func (f *FileManager) readFileDataObj(m mh.Multihash, d *pb.DataObj) ([]byte, error) { + if !f.AllowFiles { + return nil, ErrFilestoreNotEnabled + } + + p := filepath.FromSlash(d.GetFilePath()) + abspath := filepath.Join(f.root, p) + + fi, err := os.Open(abspath) + if os.IsNotExist(err) { + return nil, &CorruptReferenceError{StatusFileNotFound, err} + } else if err != nil { + return nil, &CorruptReferenceError{StatusFileError, err} + } + defer fi.Close() + + _, err = fi.Seek(int64(d.GetOffset()), io.SeekStart) + if err != nil { + return nil, &CorruptReferenceError{StatusFileError, err} + } + + outbuf := make([]byte, d.GetSize_()) + _, err = io.ReadFull(fi, outbuf) + if err == io.EOF || err == io.ErrUnexpectedEOF { + return nil, &CorruptReferenceError{StatusFileChanged, err} + } else if err != nil { + return nil, &CorruptReferenceError{StatusFileError, err} + } + + // Work with CIDs for this, as they are a nice wrapper and things + // will not break if multihashes underlying types change. + origCid := cid.NewCidV1(cid.Raw, m) + outcid, err := origCid.Prefix().Sum(outbuf) + if err != nil { + return nil, err + } + + if !origCid.Equals(outcid) { + return nil, &CorruptReferenceError{StatusFileChanged, + fmt.Errorf("data in file did not match. %s offset %d", d.GetFilePath(), d.GetOffset())} + } + + return outbuf, nil +} + +// reads and verifies the block from URL +func (f *FileManager) readURLDataObj(ctx context.Context, m mh.Multihash, d *pb.DataObj) ([]byte, error) { + if !f.AllowUrls { + return nil, ErrUrlstoreNotEnabled + } + + req, err := http.NewRequestWithContext(ctx, "GET", d.GetFilePath(), nil) + if err != nil { + return nil, err + } + + req.Header.Add("Range", fmt.Sprintf("bytes=%d-%d", d.GetOffset(), d.GetOffset()+d.GetSize_()-1)) + + res, err := http.DefaultClient.Do(req) + if err != nil { + return nil, &CorruptReferenceError{StatusFileError, err} + } + if res.StatusCode != http.StatusOK && res.StatusCode != http.StatusPartialContent { + return nil, &CorruptReferenceError{StatusFileError, + fmt.Errorf("expected HTTP 200 or 206 got %d", res.StatusCode)} + } + + outbuf := make([]byte, d.GetSize_()) + _, err = io.ReadFull(res.Body, outbuf) + if err == io.EOF || err == io.ErrUnexpectedEOF { + return nil, &CorruptReferenceError{StatusFileChanged, err} + } else if err != nil { + return nil, &CorruptReferenceError{StatusFileError, err} + } + res.Body.Close() + + // Work with CIDs for this, as they are a nice wrapper and things + // will not break if multihashes underlying types change. + origCid := cid.NewCidV1(cid.Raw, m) + outcid, err := origCid.Prefix().Sum(outbuf) + if err != nil { + return nil, err + } + + if !origCid.Equals(outcid) { + return nil, &CorruptReferenceError{StatusFileChanged, + fmt.Errorf("data in file did not match. %s offset %d", d.GetFilePath(), d.GetOffset())} + } + + return outbuf, nil +} + +// Has returns if the FileManager is storing a block reference. It does not +// validate the data, nor checks if the reference is valid. +func (f *FileManager) Has(ctx context.Context, c cid.Cid) (bool, error) { + // NOTE: interesting thing to consider. Has doesnt validate the data. + // So the data on disk could be invalid, and we could think we have it. + dsk := dshelp.MultihashToDsKey(c.Hash()) + return f.ds.Has(ctx, dsk) +} + +type putter interface { + Put(context.Context, ds.Key, []byte) error +} + +// Put adds a new reference block to the FileManager. It does not check +// that the reference is valid. +func (f *FileManager) Put(ctx context.Context, b *posinfo.FilestoreNode) error { + return f.putTo(ctx, b, f.ds) +} + +func (f *FileManager) putTo(ctx context.Context, b *posinfo.FilestoreNode, to putter) error { + var dobj pb.DataObj + + if IsURL(b.PosInfo.FullPath) { + if !f.AllowUrls { + return ErrUrlstoreNotEnabled + } + dobj.FilePath = b.PosInfo.FullPath + } else { + if !f.AllowFiles { + return ErrFilestoreNotEnabled + } + //lint:ignore SA1019 // ignore staticcheck + if !filepath.HasPrefix(b.PosInfo.FullPath, f.root) { + return fmt.Errorf("cannot add filestore references outside ipfs root (%s)", f.root) + } + + p, err := filepath.Rel(f.root, b.PosInfo.FullPath) + if err != nil { + return err + } + + dobj.FilePath = filepath.ToSlash(p) + } + dobj.Offset = b.PosInfo.Offset + dobj.Size_ = uint64(len(b.RawData())) + + data, err := proto.Marshal(&dobj) + if err != nil { + return err + } + + return to.Put(ctx, dshelp.MultihashToDsKey(b.Cid().Hash()), data) +} + +// PutMany is like Put() but takes a slice of blocks instead, +// allowing it to create a batch transaction. +func (f *FileManager) PutMany(ctx context.Context, bs []*posinfo.FilestoreNode) error { + batch, err := f.ds.Batch(ctx) + if err != nil { + return err + } + + for _, b := range bs { + if err := f.putTo(ctx, b, batch); err != nil { + return err + } + } + + return batch.Commit(ctx) +} + +// IsURL returns true if the string represents a valid URL that the +// urlstore can handle. More specifically it returns true if a string +// begins with 'http://' or 'https://'. +func IsURL(str string) bool { + return (len(str) > 7 && str[0] == 'h' && str[1] == 't' && str[2] == 't' && str[3] == 'p') && + ((len(str) > 8 && str[4] == 's' && str[5] == ':' && str[6] == '/' && str[7] == '/') || + (str[4] == ':' && str[5] == '/' && str[6] == '/')) +} diff --git a/filestore/pb/dataobj.pb.go b/filestore/pb/dataobj.pb.go new file mode 100644 index 0000000000..d342cabe51 --- /dev/null +++ b/filestore/pb/dataobj.pb.go @@ -0,0 +1,376 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: dataobj.proto + +package datastore_pb + +import ( + fmt "fmt" + io "io" + math "math" + math_bits "math/bits" + + proto "github.com/gogo/protobuf/proto" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type DataObj struct { + FilePath string `protobuf:"bytes,1,opt,name=FilePath" json:"FilePath"` + Offset uint64 `protobuf:"varint,2,opt,name=Offset" json:"Offset"` + Size_ uint64 `protobuf:"varint,3,opt,name=Size" json:"Size"` +} + +func (m *DataObj) Reset() { *m = DataObj{} } +func (m *DataObj) String() string { return proto.CompactTextString(m) } +func (*DataObj) ProtoMessage() {} +func (*DataObj) Descriptor() ([]byte, []int) { + return fileDescriptor_a76cb282d869d683, []int{0} +} +func (m *DataObj) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DataObj) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_DataObj.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *DataObj) XXX_Merge(src proto.Message) { + xxx_messageInfo_DataObj.Merge(m, src) +} +func (m *DataObj) XXX_Size() int { + return m.Size() +} +func (m *DataObj) XXX_DiscardUnknown() { + xxx_messageInfo_DataObj.DiscardUnknown(m) +} + +var xxx_messageInfo_DataObj proto.InternalMessageInfo + +func (m *DataObj) GetFilePath() string { + if m != nil { + return m.FilePath + } + return "" +} + +func (m *DataObj) GetOffset() uint64 { + if m != nil { + return m.Offset + } + return 0 +} + +func (m *DataObj) GetSize_() uint64 { + if m != nil { + return m.Size_ + } + return 0 +} + +func init() { + proto.RegisterType((*DataObj)(nil), "datastore.pb.DataObj") +} + +func init() { proto.RegisterFile("dataobj.proto", fileDescriptor_a76cb282d869d683) } + +var fileDescriptor_a76cb282d869d683 = []byte{ + // 150 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x4d, 0x49, 0x2c, 0x49, + 0xcc, 0x4f, 0xca, 0xd2, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x01, 0x71, 0x8b, 0x4b, 0xf2, + 0x8b, 0x52, 0xf5, 0x0a, 0x92, 0x94, 0x92, 0xb9, 0xd8, 0x5d, 0x12, 0x4b, 0x12, 0xfd, 0x93, 0xb2, + 0x84, 0x14, 0xb8, 0x38, 0xdc, 0x32, 0x73, 0x52, 0x03, 0x12, 0x4b, 0x32, 0x24, 0x18, 0x15, 0x18, + 0x35, 0x38, 0x9d, 0x58, 0x4e, 0xdc, 0x93, 0x67, 0x08, 0x82, 0x8b, 0x0a, 0xc9, 0x70, 0xb1, 0xf9, + 0xa7, 0xa5, 0x15, 0xa7, 0x96, 0x48, 0x30, 0x29, 0x30, 0x6a, 0xb0, 0x40, 0xe5, 0xa1, 0x62, 0x42, + 0x12, 0x5c, 0x2c, 0xc1, 0x99, 0x55, 0xa9, 0x12, 0xcc, 0x48, 0x72, 0x60, 0x11, 0x27, 0x89, 0x13, + 0x8f, 0xe4, 0x18, 0x2f, 0x3c, 0x92, 0x63, 0x7c, 0xf0, 0x48, 0x8e, 0x71, 0xc2, 0x63, 0x39, 0x86, + 0x0b, 0x8f, 0xe5, 0x18, 0x6e, 0x3c, 0x96, 0x63, 0x00, 0x04, 0x00, 0x00, 0xff, 0xff, 0x5d, 0x4a, + 0x76, 0xa0, 0x9c, 0x00, 0x00, 0x00, +} + +func (m *DataObj) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DataObj) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DataObj) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i = encodeVarintDataobj(dAtA, i, uint64(m.Size_)) + i-- + dAtA[i] = 0x18 + i = encodeVarintDataobj(dAtA, i, uint64(m.Offset)) + i-- + dAtA[i] = 0x10 + i -= len(m.FilePath) + copy(dAtA[i:], m.FilePath) + i = encodeVarintDataobj(dAtA, i, uint64(len(m.FilePath))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintDataobj(dAtA []byte, offset int, v uint64) int { + offset -= sovDataobj(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *DataObj) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.FilePath) + n += 1 + l + sovDataobj(uint64(l)) + n += 1 + sovDataobj(uint64(m.Offset)) + n += 1 + sovDataobj(uint64(m.Size_)) + return n +} + +func sovDataobj(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozDataobj(x uint64) (n int) { + return sovDataobj(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *DataObj) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDataobj + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DataObj: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DataObj: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FilePath", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDataobj + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDataobj + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthDataobj + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FilePath = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Offset", wireType) + } + m.Offset = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDataobj + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Offset |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Size_", wireType) + } + m.Size_ = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDataobj + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Size_ |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipDataobj(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthDataobj + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthDataobj + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipDataobj(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowDataobj + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowDataobj + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowDataobj + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthDataobj + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupDataobj + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthDataobj + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthDataobj = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowDataobj = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupDataobj = fmt.Errorf("proto: unexpected end of group") +) diff --git a/filestore/pb/dataobj.proto b/filestore/pb/dataobj.proto new file mode 100644 index 0000000000..909d22b77f --- /dev/null +++ b/filestore/pb/dataobj.proto @@ -0,0 +1,9 @@ +syntax = "proto2"; + +package datastore.pb; + +message DataObj { + optional string FilePath = 1; + optional uint64 Offset = 2; + optional uint64 Size = 3; +} diff --git a/filestore/posinfo/.github/ISSUE_TEMPLATE/open_an_issue.md b/filestore/posinfo/.github/ISSUE_TEMPLATE/open_an_issue.md new file mode 100644 index 0000000000..4fcbd00aca --- /dev/null +++ b/filestore/posinfo/.github/ISSUE_TEMPLATE/open_an_issue.md @@ -0,0 +1,19 @@ +--- +name: Open an issue +about: Only for actionable issues relevant to this repository. +title: '' +labels: need/triage +assignees: '' + +--- + diff --git a/filestore/posinfo/.gitignore b/filestore/posinfo/.gitignore new file mode 100644 index 0000000000..a1338d6851 --- /dev/null +++ b/filestore/posinfo/.gitignore @@ -0,0 +1,14 @@ +# Binaries for programs and plugins +*.exe +*.dll +*.so +*.dylib + +# Test binary, build with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736 +.glide/ diff --git a/filestore/posinfo/Makefile b/filestore/posinfo/Makefile new file mode 100644 index 0000000000..24d71558e7 --- /dev/null +++ b/filestore/posinfo/Makefile @@ -0,0 +1,18 @@ +all: deps +gx: + go get github.com/whyrusleeping/gx + go get github.com/whyrusleeping/gx-go +deps: gx + gx --verbose install --global + gx-go rewrite +test: deps + go test -v -covermode count -coverprofile=coverage.out . +rw: + gx-go rewrite +rwundo: + gx-go rewrite --undo +publish: rwundo + gx publish +.PHONY: all gx deps test rw rwundo publish + + diff --git a/filestore/posinfo/README.md b/filestore/posinfo/README.md new file mode 100644 index 0000000000..bd509c17e0 --- /dev/null +++ b/filestore/posinfo/README.md @@ -0,0 +1,37 @@ +# go-ipfs-posinfo + +[![](https://img.shields.io/badge/made%20by-Protocol%20Labs-blue.svg?style=flat-square)](http://ipn.io) +[![](https://img.shields.io/badge/project-IPFS-blue.svg?style=flat-square)](http://ipfs.io/) +[![standard-readme compliant](https://img.shields.io/badge/standard--readme-OK-green.svg?style=flat-square)](https://github.com/RichardLitt/standard-readme) +[![GoDoc](https://godoc.org/github.com/ipfs/go-ipfs-posinfo?status.svg)](https://godoc.org/github.com/ipfs/go-ipfs-posinfo) +[![Build Status](https://travis-ci.org/ipfs/go-ipfs-posinfo.svg?branch=master)](https://travis-ci.org/ipfs/go-ipfs-posinfo) + +> Posinfo wraps offset information for ipfs filestore nodes + +## Table of Contents + +- [Install](#install) +- [Usage](#usage) +- [Contribute](#contribute) +- [License](#license) + +## Install + +``` +go get github.com/ipfs/go-ipfs-posinfo +``` + +## Usage + +See the [GoDoc documentation](https://godoc.org/github.com/ipfs/go-ipfs-posinfo) + + +## Contribute + +PRs accepted. + +Small note: If editing the README, please conform to the [standard-readme](https://github.com/RichardLitt/standard-readme) specification. + +## License + +MIT © Protocol Labs, Inc. diff --git a/filestore/posinfo/posinfo.go b/filestore/posinfo/posinfo.go new file mode 100644 index 0000000000..0b32c89da1 --- /dev/null +++ b/filestore/posinfo/posinfo.go @@ -0,0 +1,23 @@ +// Package posinfo wraps offset information used by ipfs filestore nodes +package posinfo + +import ( + "os" + + ipld "github.com/ipfs/go-ipld-format" +) + +// PosInfo stores information about the file offset, its path and +// stat. +type PosInfo struct { + Offset uint64 + FullPath string + Stat os.FileInfo // can be nil +} + +// FilestoreNode is an ipld.Node which arries PosInfo with it +// allowing to map it directly to a filesystem object. +type FilestoreNode struct { + ipld.Node + PosInfo *PosInfo +} diff --git a/filestore/util.go b/filestore/util.go new file mode 100644 index 0000000000..80bec61fd8 --- /dev/null +++ b/filestore/util.go @@ -0,0 +1,292 @@ +package filestore + +import ( + "context" + "fmt" + "sort" + + pb "github.com/ipfs/boxo/filestore/pb" + + dshelp "github.com/ipfs/boxo/datastore/dshelp" + cid "github.com/ipfs/go-cid" + ds "github.com/ipfs/go-datastore" + dsq "github.com/ipfs/go-datastore/query" + ipld "github.com/ipfs/go-ipld-format" + mh "github.com/multiformats/go-multihash" +) + +// Status is used to identify the state of the block data referenced +// by a FilestoreNode. Among other places, it is used by CorruptReferenceError. +type Status int32 + +// These are the supported Status codes. +const ( + StatusOk Status = 0 + StatusFileError Status = 10 // Backing File Error + StatusFileNotFound Status = 11 // Backing File Not Found + StatusFileChanged Status = 12 // Contents of the file changed + StatusOtherError Status = 20 // Internal Error, likely corrupt entry + StatusKeyNotFound Status = 30 +) + +// String provides a human-readable representation for Status codes. +func (s Status) String() string { + switch s { + case StatusOk: + return "ok" + case StatusFileError: + return "error" + case StatusFileNotFound: + return "no-file" + case StatusFileChanged: + return "changed" + case StatusOtherError: + return "ERROR" + case StatusKeyNotFound: + return "missing" + default: + return "???" + } +} + +// Format returns the status formatted as a string +// with leading 0s. +func (s Status) Format() string { + return fmt.Sprintf("%-7s", s.String()) +} + +// ListRes wraps the response of the List*() functions, which +// allows to obtain and verify blocks stored by the FileManager +// of a Filestore. It includes information about the referenced +// block. +type ListRes struct { + Status Status + ErrorMsg string + Key cid.Cid + FilePath string + Offset uint64 + Size uint64 +} + +// FormatLong returns a human readable string for a ListRes object +func (r *ListRes) FormatLong(enc func(cid.Cid) string) string { + if enc == nil { + enc = (cid.Cid).String + } + switch { + case !r.Key.Defined(): + return "" + case r.FilePath == "": + return r.Key.String() + default: + return fmt.Sprintf("%-50s %6d %s %d", enc(r.Key), r.Size, r.FilePath, r.Offset) + } +} + +// List fetches the block with the given key from the Filemanager +// of the given Filestore and returns a ListRes object with the information. +// List does not verify that the reference is valid or whether the +// raw data is accesible. See Verify(). +func List(ctx context.Context, fs *Filestore, key cid.Cid) *ListRes { + return list(ctx, fs, false, key.Hash()) +} + +// ListAll returns a function as an iterator which, once invoked, returns +// one by one each block in the Filestore's FileManager. +// ListAll does not verify that the references are valid or whether +// the raw data is accessible. See VerifyAll(). +func ListAll(ctx context.Context, fs *Filestore, fileOrder bool) (func(context.Context) *ListRes, error) { + if fileOrder { + return listAllFileOrder(ctx, fs, false) + } + return listAll(ctx, fs, false) +} + +// Verify fetches the block with the given key from the Filemanager +// of the given Filestore and returns a ListRes object with the information. +// Verify makes sure that the reference is valid and the block data can be +// read. +func Verify(ctx context.Context, fs *Filestore, key cid.Cid) *ListRes { + return list(ctx, fs, true, key.Hash()) +} + +// VerifyAll returns a function as an iterator which, once invoked, +// returns one by one each block in the Filestore's FileManager. +// VerifyAll checks that the reference is valid and that the block data +// can be read. +func VerifyAll(ctx context.Context, fs *Filestore, fileOrder bool) (func(context.Context) *ListRes, error) { + if fileOrder { + return listAllFileOrder(ctx, fs, true) + } + return listAll(ctx, fs, true) +} + +func list(ctx context.Context, fs *Filestore, verify bool, key mh.Multihash) *ListRes { + dobj, err := fs.fm.getDataObj(ctx, key) + if err != nil { + return mkListRes(key, nil, err) + } + if verify { + _, err = fs.fm.readDataObj(ctx, key, dobj) + } + return mkListRes(key, dobj, err) +} + +func listAll(ctx context.Context, fs *Filestore, verify bool) (func(context.Context) *ListRes, error) { + q := dsq.Query{} + qr, err := fs.fm.ds.Query(ctx, q) + if err != nil { + return nil, err + } + + return func(ctx context.Context) *ListRes { + mhash, dobj, err := next(qr) + if dobj == nil && err == nil { + return nil + } else if err == nil && verify { + _, err = fs.fm.readDataObj(ctx, mhash, dobj) + } + return mkListRes(mhash, dobj, err) + }, nil +} + +func next(qr dsq.Results) (mh.Multihash, *pb.DataObj, error) { + v, ok := qr.NextSync() + if !ok { + return nil, nil, nil + } + + k := ds.RawKey(v.Key) + mhash, err := dshelp.DsKeyToMultihash(k) + if err != nil { + return nil, nil, fmt.Errorf("decoding multihash from filestore: %s", err) + } + + dobj, err := unmarshalDataObj(v.Value) + if err != nil { + return mhash, nil, err + } + + return mhash, dobj, nil +} + +func listAllFileOrder(ctx context.Context, fs *Filestore, verify bool) (func(context.Context) *ListRes, error) { + q := dsq.Query{} + qr, err := fs.fm.ds.Query(ctx, q) + if err != nil { + return nil, err + } + + var entries listEntries + + for { + v, ok := qr.NextSync() + if !ok { + break + } + dobj, err := unmarshalDataObj(v.Value) + if err != nil { + entries = append(entries, &listEntry{ + dsKey: v.Key, + err: err, + }) + } else { + entries = append(entries, &listEntry{ + dsKey: v.Key, + filePath: dobj.GetFilePath(), + offset: dobj.GetOffset(), + size: dobj.GetSize_(), + }) + } + } + sort.Sort(entries) + + i := 0 + return func(ctx context.Context) *ListRes { + if i >= len(entries) { + return nil + } + v := entries[i] + i++ + // attempt to convert the datastore key to a Multihash, + // store the error but don't use it yet + mhash, keyErr := dshelp.DsKeyToMultihash(ds.RawKey(v.dsKey)) + // first if they listRes already had an error return that error + if v.err != nil { + return mkListRes(mhash, nil, v.err) + } + // now reconstruct the DataObj + dobj := pb.DataObj{ + FilePath: v.filePath, + Offset: v.offset, + Size_: v.size, + } + // now if we could not convert the datastore key return that + // error + if keyErr != nil { + return mkListRes(mhash, &dobj, keyErr) + } + // finally verify the dataobj if requested + var err error + if verify { + _, err = fs.fm.readDataObj(ctx, mhash, &dobj) + } + return mkListRes(mhash, &dobj, err) + }, nil +} + +type listEntry struct { + filePath string + offset uint64 + dsKey string + size uint64 + err error +} + +type listEntries []*listEntry + +func (l listEntries) Len() int { return len(l) } +func (l listEntries) Swap(i, j int) { l[i], l[j] = l[j], l[i] } +func (l listEntries) Less(i, j int) bool { + if l[i].filePath == l[j].filePath { + if l[i].offset == l[j].offset { + return l[i].dsKey < l[j].dsKey + } + return l[i].offset < l[j].offset + } + return l[i].filePath < l[j].filePath +} + +func mkListRes(m mh.Multihash, d *pb.DataObj, err error) *ListRes { + status := StatusOk + errorMsg := "" + if err != nil { + if err == ds.ErrNotFound || ipld.IsNotFound(err) { + status = StatusKeyNotFound + } else if err, ok := err.(*CorruptReferenceError); ok { + status = err.Code + } else { + status = StatusOtherError + } + errorMsg = err.Error() + } + + c := cid.NewCidV1(cid.Raw, m) + + if d == nil { + return &ListRes{ + Status: status, + ErrorMsg: errorMsg, + Key: c, + } + } + + return &ListRes{ + Status: status, + ErrorMsg: errorMsg, + Key: c, + FilePath: d.FilePath, + Size: d.Size_, + Offset: d.Offset, + } +} diff --git a/gateway/README.md b/gateway/README.md new file mode 100644 index 0000000000..0aacd9069d --- /dev/null +++ b/gateway/README.md @@ -0,0 +1,34 @@ +# IPFS Gateway + +> A reference implementation of HTTP Gateway Specifications. + +## Documentation + +* Go Documentation: https://pkg.go.dev/github.com/ipfs/boxo/gateway +* Gateway Specification: https://github.com/ipfs/specs/tree/main/http-gateways#readme +* Types of HTTP Gateways: https://docs.ipfs.tech/how-to/address-ipfs-on-web/#http-gateways +## Example + +```go +// Initialize your headers and apply the default headers. +headers := map[string][]string{} +gateway.AddAccessControlHeaders(headers) + +conf := gateway.Config{ + Headers: headers, +} + +// Initialize a NodeAPI interface for both an online and offline versions. +// The offline version should not make any network request for missing content. +ipfs := ... + +// Create http mux and setup path gateway handler. +mux := http.NewServeMux() +gwHandler := gateway.NewHandler(conf, ipfs) +mux.Handle("/ipfs/", gwHandler) +mux.Handle("/ipns/", gwHandler) + +// Start the server on :8080 and voilá! You have a basic IPFS gateway running +// in http://localhost:8080. +_ = http.ListenAndServe(":8080", mux) +``` diff --git a/gateway/assets/README.md b/gateway/assets/README.md new file mode 100644 index 0000000000..25d1a35e80 --- /dev/null +++ b/gateway/assets/README.md @@ -0,0 +1,27 @@ +# Required Assets for the Gateway + +> DAG and Directory HTML for HTTP gateway + +## Updating + +When making updates to the templates, please note the following: + +1. Make your changes to the (human-friendly) source documents in the `src` directory. +2. Before testing or releasing, go to `assets/` and run `go generate .`. + +## Testing + +1. Make sure you have [Go](https://golang.org/dl/) installed +2. Start the test server, which lives in its own directory: + +```bash +> cd test +> go run . +``` + +This will listen on [`localhost:3000`](http://localhost:3000/) and reload the template every time you refresh the page. Here you have two pages: + +- [`localhost:3000/dag`](http://localhost:3000/dag) for the DAG template preview; and +- [`localhost:3000/directory`](http://localhost:3000/directory) for the Directory template preview. + +If you get a "no such file or directory" error upon trying `go run .`, make sure you ran `go generate .` to generate the minified artifact that the test is looking for. diff --git a/gateway/assets/assets.go b/gateway/assets/assets.go new file mode 100644 index 0000000000..ceb88545b2 --- /dev/null +++ b/gateway/assets/assets.go @@ -0,0 +1,203 @@ +//go:generate ./build.sh +package assets + +import ( + "embed" + "io" + "io/fs" + "net" + "strconv" + + "html/template" + "net/url" + "path" + "strings" + + "github.com/cespare/xxhash" + + ipfspath "github.com/ipfs/boxo/path" +) + +//go:embed dag-index.html directory-index.html knownIcons.txt +var asset embed.FS + +// AssetHash a non-cryptographic hash of all embedded assets +var AssetHash string + +var ( + DirectoryTemplate *template.Template + DagTemplate *template.Template +) + +func init() { + initAssetsHash() + initTemplates() +} + +func initAssetsHash() { + sum := xxhash.New() + err := fs.WalkDir(asset, ".", func(path string, d fs.DirEntry, err error) error { + if err != nil { + return err + } + + if d.IsDir() { + return nil + } + + file, err := asset.Open(path) + if err != nil { + return err + } + defer file.Close() + _, err = io.Copy(sum, file) + return err + }) + if err != nil { + panic("error creating asset sum: " + err.Error()) + } + + AssetHash = strconv.FormatUint(sum.Sum64(), 32) +} + +func initTemplates() { + knownIconsBytes, err := asset.ReadFile("knownIcons.txt") + if err != nil { + panic(err) + } + knownIcons := make(map[string]struct{}) + for _, ext := range strings.Split(strings.TrimSuffix(string(knownIconsBytes), "\n"), "\n") { + knownIcons[ext] = struct{}{} + } + + // helper to guess the type/icon for it by the extension name + iconFromExt := func(name string) string { + ext := path.Ext(name) + _, ok := knownIcons[ext] + if !ok { + // default blank icon + return "ipfs-_blank" + } + return "ipfs-" + ext[1:] // slice of the first dot + } + + // custom template-escaping function to escape a full path, including '#' and '?' + urlEscape := func(rawUrl string) string { + pathURL := url.URL{Path: rawUrl} + return pathURL.String() + } + + // Directory listing template + dirIndexBytes, err := asset.ReadFile("directory-index.html") + if err != nil { + panic(err) + } + + DirectoryTemplate = template.Must(template.New("dir").Funcs(template.FuncMap{ + "iconFromExt": iconFromExt, + "urlEscape": urlEscape, + }).Parse(string(dirIndexBytes))) + + // DAG Index template + dagIndexBytes, err := asset.ReadFile("dag-index.html") + if err != nil { + panic(err) + } + + DagTemplate = template.Must(template.New("dir").Parse(string(dagIndexBytes))) +} + +type DagTemplateData struct { + Path string + CID string + CodecName string + CodecHex string +} + +type DirectoryTemplateData struct { + GatewayURL string + DNSLink bool + Listing []DirectoryItem + Size string + Path string + Breadcrumbs []Breadcrumb + BackLink string + Hash string +} + +type DirectoryItem struct { + Size string + Name string + Path string + Hash string + ShortHash string +} + +type Breadcrumb struct { + Name string + Path string +} + +func Breadcrumbs(urlPath string, dnslinkOrigin bool) []Breadcrumb { + var ret []Breadcrumb + + p, err := ipfspath.ParsePath(urlPath) + if err != nil { + // No assets.Breadcrumbs, fallback to bare Path in template + return ret + } + segs := p.Segments() + contentRoot := segs[1] + for i, seg := range segs { + if i == 0 { + ret = append(ret, Breadcrumb{Name: seg}) + } else { + ret = append(ret, Breadcrumb{ + Name: seg, + Path: "/" + strings.Join(segs[0:i+1], "/"), + }) + } + } + + // Drop the /ipns/ prefix from assets.Breadcrumb Paths when directory + // listing on a DNSLink website (loaded due to Host header in HTTP + // request). Necessary because the hostname most likely won't have a + // public gateway mounted. + if dnslinkOrigin { + prefix := "/ipns/" + contentRoot + for i, crumb := range ret { + if strings.HasPrefix(crumb.Path, prefix) { + ret[i].Path = strings.Replace(crumb.Path, prefix, "", 1) + } + } + // Make contentRoot assets.Breadcrumb link to the website root + ret[1].Path = "/" + } + + return ret +} + +func ShortHash(hash string) string { + if len(hash) <= 8 { + return hash + } + return (hash[0:4] + "\u2026" + hash[len(hash)-4:]) +} + +// helper to detect DNSLink website context +// (when hostname from gwURL is matching /ipns/ in path) +func HasDNSLinkOrigin(gwURL string, path string) bool { + if gwURL != "" { + fqdn := stripPort(strings.TrimPrefix(gwURL, "//")) + return strings.HasPrefix(path, "/ipns/"+fqdn) + } + return false +} + +func stripPort(hostname string) string { + host, _, err := net.SplitHostPort(hostname) + if err == nil { + return host + } + return hostname +} diff --git a/gateway/assets/build.sh b/gateway/assets/build.sh new file mode 100755 index 0000000000..531bbfc024 --- /dev/null +++ b/gateway/assets/build.sh @@ -0,0 +1,14 @@ +#!/bin/sh + +set -euo pipefail + +function build() { + rm -f $1 + sed '/ ./base-html.html + (echo "") > ./minified-wrapped-style.html + sed '/<\/title>/ r ./minified-wrapped-style.html' ./base-html.html > ./$1 + rm ./base-html.html && rm ./minified-wrapped-style.html +} + +build "directory-index.html" +build "dag-index.html" diff --git a/gateway/assets/dag-index.html b/gateway/assets/dag-index.html new file mode 100644 index 0000000000..5bba8f5c0d --- /dev/null +++ b/gateway/assets/dag-index.html @@ -0,0 +1,67 @@ + +{{ $root := . }} + + + + + + + + + + + + + + + + + +{{ .Path }} + + + + +
+
+

CID: {{.CID}}
+ Codec: {{.CodecName}} ({{.CodecHex}})

+
+
+ + + + + + + +
+

Preview as JSON
(application/json)

+
+

Or download as: +

+

+
+
+
+ + diff --git a/gateway/assets/directory-index.html b/gateway/assets/directory-index.html new file mode 100644 index 0000000000..d861cb6570 --- /dev/null +++ b/gateway/assets/directory-index.html @@ -0,0 +1,99 @@ + +{{ $root := . }} + + + + + + + + + + + + + + + + + +{{ .Path }} + + + + +
+
+
+ + Index of + {{ range .Breadcrumbs -}} + /{{ if .Path }}{{ .Name }}{{ else }}{{ .Name }}{{ end }} + {{- else }} + {{ .Path }} + {{ end }} + + {{ if .Hash }} +
+ {{ .Hash }} +
+ {{ end }} +
+ {{ if .Size }} +
+  {{ .Size }} +
+ {{ end }} +
+
+ + {{ if .BackLink }} + + + + + + + {{ end }} + {{ range .Listing }} + + + + + + + {{ end }} +
+
 
+
+ .. +
+
 
+
+ {{ .Name }} + + {{ if .Hash }} + + {{ .ShortHash }} + + {{ end }} + {{ .Size }}
+
+
+ + diff --git a/gateway/assets/knownIcons.txt b/gateway/assets/knownIcons.txt new file mode 100644 index 0000000000..c110530ea5 --- /dev/null +++ b/gateway/assets/knownIcons.txt @@ -0,0 +1,65 @@ +.aac +.aiff +.ai +.avi +.bmp +.c +.cpp +.css +.dat +.dmg +.doc +.dotx +.dwg +.dxf +.eps +.exe +.flv +.gif +.h +.hpp +.html +.ics +.iso +.java +.jpg +.jpeg +.js +.key +.less +.mid +.mkv +.mov +.mp3 +.mp4 +.mpg +.odf +.ods +.odt +.otp +.ots +.ott +.pdf +.php +.png +.ppt +.psd +.py +.qt +.rar +.rb +.rtf +.sass +.scss +.sql +.tga +.tgz +.tiff +.txt +.wav +.wmv +.xls +.xlsx +.xml +.yml +.zip diff --git a/gateway/assets/src/dag-index.html b/gateway/assets/src/dag-index.html new file mode 100644 index 0000000000..7a42ef6bed --- /dev/null +++ b/gateway/assets/src/dag-index.html @@ -0,0 +1,66 @@ + +{{ $root := . }} + + + + + + + + + + + + + + + + + + + +{{ .Path }} + + + +
+
+

CID: {{.CID}}
+ Codec: {{.CodecName}} ({{.CodecHex}})

+
+
+ + + + + + + +
+

Preview as JSON
(application/json)

+
+

Or download as: +

+

+
+
+
+ + diff --git a/gateway/assets/src/directory-index.html b/gateway/assets/src/directory-index.html new file mode 100644 index 0000000000..109c7afbf4 --- /dev/null +++ b/gateway/assets/src/directory-index.html @@ -0,0 +1,98 @@ + +{{ $root := . }} + + + + + + + + + + + + + + + + + + + +{{ .Path }} + + + +
+
+
+ + Index of + {{ range .Breadcrumbs -}} + /{{ if .Path }}{{ .Name }}{{ else }}{{ .Name }}{{ end }} + {{- else }} + {{ .Path }} + {{ end }} + + {{ if .Hash }} +
+ {{ .Hash }} +
+ {{ end }} +
+ {{ if .Size }} +
+  {{ .Size }} +
+ {{ end }} +
+
+ + {{ if .BackLink }} + + + + + + + {{ end }} + {{ range .Listing }} + + + + + + + {{ end }} +
+
 
+
+ .. +
+
 
+
+ {{ .Name }} + + {{ if .Hash }} + + {{ .ShortHash }} + + {{ end }} + {{ .Size }}
+
+
+ + diff --git a/gateway/assets/src/icons.css b/gateway/assets/src/icons.css new file mode 100644 index 0000000000..dcdbd3cd9e --- /dev/null +++ b/gateway/assets/src/icons.css @@ -0,0 +1,403 @@ +/* Source - fileicons.org */ + +.ipfs-_blank { + background-image:url(data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAGXRFWHRTb2Z0d2FyZQBBZG9iZSBJbWFnZVJlYWR5ccllPAAAAWBJREFUeNqEUj1LxEAQnd1MVA4lyIEWx6UIKEGUExGsbC3tLfwJ/hT/g7VlCnubqxXBwg/Q4hQP/LhKL5nZuBsvuGfW5MGyuzM7jzdvVuR5DgYnZ+f99ai7Vt5t9K9unu4HLweI3qWYxI6PDosdy0fhcntxO44CcOBzPA7mfEyuHwf7ntQk4jcnywOxIlfxOCNYaLVgb6cXbkTdhJXq2SIlNMC0xIqhHczDbi8OVzpLSUa0WebRfmigLHqj1EcPZnwf7gbDIrYVRyEinurj6jTBHyI7pqVrFQqEbt6TEmZ9v1NRAJNC1xTYxIQh/MmRUlmFQE3qWOW1nqB2TWk1/3tgJV0waVvkFIEeZbHq4ElyKzAmEXOx6gnEVJuWBzmkRJBRPYGZBDsVaOlpSgVJE2yVaAe/0kx/3azBRO0VsbMFZE3CDSZKweZfYIVg+DZ6v7h9GDVOwZPw/PoxKu/fAgwALbDAXf7DdQkAAAAASUVORK5CYII=); + background-repeat:no-repeat; + background-size:contain +} + +.ipfs-_page { + background-image:url(data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAGXRFWHRTb2Z0d2FyZQBBZG9iZSBJbWFnZVJlYWR5ccllPAAAAmhJREFUeNpsUztv01AYPfdhOy/XTZ80VV1VoCqlA2zQqUgwMEErWBALv4GJDfEDmOEHsFTqVCTExAiiSI2QEKJKESVFFBWo04TESRzfy2c7LY/kLtf2d8+555zvM9NaI1ora5svby9OnbUEBxgDlIKiWjXQeLy19/X17sEtcPY2rtHS96/Hu0RvXXLz+cUzM87zShsI29DpHCYt4E6Box4IZzTnbDx7V74GjhOSfwgE0H2638K9h08A3iHGVbjTw7g6YmAyw/BgecHNGGJjvfQhIfmfIFDAXJpjuugi7djIFVI4P0plctgJQ0xnFe5eOO02OwEp2VkhSCnC8WOCdqgwnzFx4/IyppwRVN+XYXsecqZA1pB48ekAnw9/4GZx3L04N/GoTwEjX4cNH5vlPfjtAIYp8cWrQutxrC5Mod3VsXVTMFSqtaE+gl9dhaUxE2tXZiF7nYiiatJ3v5s8R/1yOCNLOuwjkELiTbmC9dJHpIaGASsDkoFQGJQwHWMcHWJYOmUj1OjvQotuytt5nHMLEGkCyx6QU384jwkUAd2sxJbS/QShZtg/8rHzzQOzSaFhxQrA6YgQMQHojCUlgnCAAvKFBoXXaHfArSCZDE0gyWJgFIKmvUFKO4MUNIk2a4+hODtDUVuJ/J732AKS6ZtImdTyAQQB3bZN8l9t75IFh0JMUdVKsohsUPqRgnka0tYgggYpCHkKGTsHI5NOMojB4iTICCepvX53AIEfQta1iUCmoTiBmdEri2RgddKFhuJoqb/af/yw/d3zTNM6UkaOfis62aUgddAbnz+rXuPY+Vnzjt9/CzAAbmLjCrfBiRgAAAAASUVORK5CYII=); + background-repeat:no-repeat; + background-size:contain +} + +.ipfs-aac { + background-image:url(data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAGXRFWHRTb2Z0d2FyZQBBZG9iZSBJbWFnZVJlYWR5ccllPAAAAnhJREFUeNp0Uk1PE0EYftruVlvAUkhVEPoBcsEoLRJBY01MPHjCs3cvogcT/4qJJN5NvHhoohcOnPw4YEGIkCh+oLGBKm3Z7nZ3dme2vjOhTcjiJJvZzPvOM8/HG2q325Dr3kLp7Y1ibpIxjs4KhQBZfvV6s7K5Vb0bjeof5ZlcGysP1a51mifODybvzE8mzCbrAoTDIThMoGXZiZ4YSiurf+Z1XeuCqJ7Oj+sK3jQcNAmg8xkGQ71mYejcAB49vpmeuzJccl0+dUj6KIAvfHCPg3N+uAv4vg9BOxcCmfEzuP/genpmeqhEMgude10Jwm+DuUIyUdTlqu2byoMfX/dRermBeExHsTiWNi3+lMpzRwDki8zxCIATmzbevfmClukiP5NFhJgwkjeRTeLShdOoVJqnAgwkgCAZ6+UdLC9twjQZ8pdzioFkZBHY3q6B3l4dJEEEPOCeD4cYVH7Xsf15F+FImC775INAJBJSkVoWo0QY9YqgiR4ZZzRaGBkdwK3bFxGLRZUfB3Rm2x4x9CGtsUxH9QYkKICDFuLxKAozGZwdTqBRs2FbLlXbiPdECMCHadj/AaDXZNFqedCIvnRcS4UpRo7+hC5zUmw8Ope9wUFinvpmZ7NKt2RTmB4hKZo6n8qP4Oq1HBkKlVYAQBrUlziB0XQSif4YmQhksgNIJk9iaLhPaV9b/Um+uJSCdzyDbGZQRSkvjo+n4JNxubGUSsCj+ZCpODYjkGMAND2k7exUsfhkCd+29yguB88Wl7FW/o6tT7/gcXqAgGv7hhx1LWBireHVn79YP6ChQ3njb/eFlfWqGqT3H3ZlGIhGI2i2UO/U/wkwAAmoalcxlNA1AAAAAElFTkSuQmCC); + background-repeat:no-repeat; + background-size:contain +} + +.ipfs-ai { + background-image:url(data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAGXRFWHRTb2Z0d2FyZQBBZG9iZSBJbWFnZVJlYWR5ccllPAAAAk5JREFUeNpsU01vElEUPTPzZqBAQaSFQiJYUmlKYhoTF41L3Tbu/Q/+AvsX3Bp/gPsuWLrqyqQ7TUxMtAvF1tYGoXwNw7wv7zwYgtKX3Lw379575p5z77O01ohW+/DVh8zj7aYKhflGdG9ZsGwLNydffgVfr19YHvsEa+Zu/nxndob5StQK+dyzvZzyw/gKlmMj7IygFM+xvNcanp4/t5dAomXHBy2UUBOO2MAl/B9/cPb6PULuoHx0WM0e3GvpUOxD3wZAJWutZqYUYmqpSg5OMgH3YQObL59W0/ullpryR3HegkKEqiWBSGV4R3vQ7sIhScTZFTpHx3A215B5sluVY/WWMg7+ATB/lcLsKpTonHzD+OMFEuTz8ikkt9Kwt9YJZB38cpBdoQAZJdLvCGByfoPB6Xdk90pYy6Xg3c/DaWwArg09DaG5lCsUFN0pckZAojdC8m4auBqaALuSgez7VB1RtDSUWOQvUaBLFUzJBMJ2DwmPgd1Jwm0WoSgJfjDvrTKxtwAIyEkAOQ5hU//Zdg5uowDlUNMnwZLW0sSuUuACYhwQRwFvJxupCjEYUUccOkoaKmdOlZnY1TkgAcXAhxhOwLsDsHoN3u4O5JTDfVCH6I9nfjId3gIgSUATFJk/hVevGtOMwS0XwQ3AzB/FrlKg8Q27I2javVoZrFgwD4qVipAEyMlnaFArzaj/D0DiMXlJAFQyK2r8fnMMRZp4lQ1MaSL5tU/1kqAkMCh2tYI+7+kh70cjPbr4bEZ51jZr8TJnB9PJXpz3V4ABAPOQVJn2Q60GAAAAAElFTkSuQmCC); + background-repeat:no-repeat; + background-size:contain +} + +.ipfs-aiff { + background-image:url(data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAGXRFWHRTb2Z0d2FyZQBBZG9iZSBJbWFnZVJlYWR5ccllPAAAAohJREFUeNpkU9tqE1EUXZmZpE3aTBLbJFPTtFURtSCthr7UCyKKFJ/9An3og6Ag/oXfoUj7og9asCBYKT6UIPHaWtpq7NU2aZK5z5wZ9xxMpMwZDuewz9prr32ZiO/7CNaDx3OLt6fOjBqGg/aKRCIInp8+KzfKH7fudnVF58nE16el+/yU2mBFSWZKpWJKVc0OgUBo02K4NDmU6o75Mx+Wdu9IUXFeiOA/pn1xHeYaugVDdzpbp91qGlAKGTx8dC19/Wpxhjnsxj/RRwk85hGJC9d1O6fneWAuoztDYSSLe9OT6SuXB2ccx73Z9uukwDwfls1g0xZIY/Ad/Gnyt/XVfbyYrSDRE8PExHB6/8B6QuaxIwRBFMt0iIAiMx+LCys8jfGJEUik2WpZOD2SQf9oDtVqQwopCAiY66FS/om3b75CVS2MlU7AJ2WiJBCZjZ2dJuRkDJZFwFAR7UCBja3fNfxY2YEoCtRCj9em3Tpds6FpJseGCBxS0GgYGBzqw62p84gnYnAI2CSbSbPhEpFAaE2zODaUAlWWwDoS5DheGqbWpVE/0CmqCY9qkEyINBceb2uADRNQ8bSWAVVzIFKomCQim+0luS4yKYlsHlRyZo7EsSEC23K5vAsXh/H92zZkuRvxeBS5nEx2yp2KqhxPoV5TYS/8CtdApylM9sZQKKSQzyeRTseRV2QoAzIYY8jme5DN9fI0dQoUIjANGydP9VM7PZw9p/AiBpNYrdbw/t0yTJqRtdU9UrfJCUMpSJIgbWzsYe51BcViHzLHeqCRqhZ1YX1tFwNfZBxS9O3NWkAcHqR606k/n/3coKAoV/Y7vQ/OYCZevlrmv3c0GsFh06u3/f4KMABvSWfDHmbK2gAAAABJRU5ErkJggg==); + background-repeat:no-repeat; + background-size:contain +} + +.ipfs-avi { + background-image:url(data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAGXRFWHRTb2Z0d2FyZQBBZG9iZSBJbWFnZVJlYWR5ccllPAAAAm1JREFUeNpsU8tu00AUPXZcN0nzTpq2KQ3pAwkIAnWHqCoeexBb+AQ+ABZ8A2s+AIkdm266QUJIFWKBkHg1KpRHi5omJGkbJ3bGHj+4M1EQrTvSyGPPueeec++1EgQBxHp+/9mbyuriRZdxjJaiKBD3W+u1+p9a856max+gDO8ebT+WT20Ezi9NZi/crqadvn2MQBAGfpCOpqNru2937vxPIpY6Onjccx3Twck9MBiSU0ncfHirXFmZX3Md9wqCUwiEVN/zaQfHt0vfbBe5uQyuPVgpl5Zn11ybL4/i/lkICOw5niQRGQShoiqI6Bo43W2ub8n3hRtLZT7gTynk6gkCX9gAOxpAnxhHZDwC1/aI1EViJolu/QhKRMHZ1UX0Gr1USIEn5FPWHy+/wTokkrQOq2vBaHZBN4hmY9Jwfr4An/teiEB45ZZDwDiMhoExT0N+sYDCuUkkplLIlXP4/XEXdo+RUhdhBSSfUwtVTUG8MIHK9QVqI7D/uY6vr2pwmCPrkz+Tk9gwARWQ9WxppbXZhNnpw+ya4A5HZi6L4lIR8WyCcL6sTZiAWjWgAmpxkn5+kqTamK6WkCwmERmLDLvjB0ML9ikWXPLFuozYOap3L8HYN6DHdbS/d5CeTVBndBz87FCBLYkNTyIjBQemnIEsSY5lYrK1+UoWcToLMjEHAyIQ2BCBSx/NVh+ZUhrqmEqBebS3WyhdLg0zt/ugAaIklsSGLHCLa6zDMGhZ2HjyGsnpFPqNHnY2fmHv3R5SMymYbROszSQ2ROAY9qHiofvlxSc5xsKKqqnY3diRE9h4X5d/pzg7lnM4ivsrwADe9Wg/CQJgFAAAAABJRU5ErkJggg==); + background-repeat:no-repeat; + background-size:contain +} + +.ipfs-bmp { + background-image:url(data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAGXRFWHRTb2Z0d2FyZQBBZG9iZSBJbWFnZVJlYWR5ccllPAAAAmZJREFUeNp0U+1rUlEY/13v9YV0vq2wttI5CdpL9aEGBZUDv0df668I6n+or0UQ/RuuD0EgVDAZrsKF4AR1a6COKW5qXvXec27PuVeda3bgcF6e8/ye5/d7niMZhgExnK9fbTrm5pbBGMZDkgCyq+VyhTUaT6Eo2ZHJePPWXJXRhez3B1yxmM/QdctXUSCgtV4Py4CvY3cky4e1x5DlLCaGbbzjXDcousG5OQe5HPRSCQPK4PpsEM/XH4WvhS4noeu3JwHGGRiULhsMoKZS4I0GtEIB9mgULJGA0+9DPBpBT7sffvf1W/Lg6OgJufw8C0CRGEXWazUwiiyFQjA8bsjVKjaJzovMD/Q5gxyJhG2cvyeXe2cAuADQNGBmBvLaGuTFRaDfh31lBTWi9pumjbK0B4JQul3vOQpM8JdskOLrdCvDcDjAsjtg5TIkoiKLaokMNR2cnZbqNAMycqG7XbHKR2fMzwO/dsxSwu0BiBJsNsv2LwAJAJCI5ux2gXYbqNetcz5PoORI1cDS0n8AxGW7A+zvEYBKZ2ZlcsEtJLbedMjePBaCTQMghx45ulyWkzxMVUQ2RMQhLfFO16YAqCrixPnm6iqKrRb2W23EfF4cUNSrHg90cr7hDyB33MTnSmUKALVs4uIlROjxg+AsPhGVl3fuIl2tIOB0Ya91gkOi9mxhAal0ekork1ic/kGLBORMxy2K1qS9V1ZQbNThIj2EGh+2tsyOnSai8r1UxMNIBB+LRTTULr4Uds0K1tU/uOLxIrmbNz8XXSrnASSpubG9fbKRyVh1n/zSw29t9oC1b47MfwUYAAUsLiWr4QUJAAAAAElFTkSuQmCC); + background-repeat:no-repeat; + background-size:contain +} + +.ipfs-c { + background-image:url(data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAGXRFWHRTb2Z0d2FyZQBBZG9iZSBJbWFnZVJlYWR5ccllPAAAAcxJREFUeNqEUk1rE0EYfmZnkgoJCaGNCehuJTalhJZSUZB66a0HwXsP/Qn+FM+9+hty0LNYCr2I7UVLIW0Fc0hpQpSS7O7MrO9MspuvVV8YMnk/nn2e5x0WRRFMvP/w6WSz5jbi/9NxfP693Wp3DrJCnMW5d28P7a+IE15lufR8o1ZEStwPhkWHsWbrZ+eNEPxsuubEF6m0TBv2Q4liPofXuzveulttSqW2UwH+GjqC0horpSL2njU89+FyMwjlTlxOJMTa9ZQHzDQIjgwdom9zLzfXPc75kbnOAswBJTlC2XrqQRMLxhi442DgB4UFBhgPpm3B5pgBHNUUxQKAHs8pHf3TEuFMetM9IKr/i2mWMwC0SnuSFTG2YKyppwKYVdGO7TFhzBqGIenVeLCUtfURgErucx5ECKREKBU4d3B718PHz6cICGT/1Qs8qpQtGOdyhtGEARWDQFqQJSeDL98u4VbLaKw9IRAJPwjtoJGlVAoDQ800+fRFTTYXcjlcXN2g++s36p5Lzzlve1iEROa8BGH1EbrSAeqrjxEqicHQt8/YSDHMpaNs7wJAp9vvfb287idboAVkRAa5fBYXP9rxO4Mgf0xvPPdHgAEA8OoGd40i1j0AAAAASUVORK5CYII=); + background-repeat:no-repeat; + background-size:contain +} + +.ipfs-cpp { + background-image:url(data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAGXRFWHRTb2Z0d2FyZQBBZG9iZSBJbWFnZVJlYWR5ccllPAAAAfJJREFUeNqEUs9PE0EU/mZ2WgqpXX+QIDFdalVslh8NlAOQaOKFAwfvHvwT/FM8e/U/MOnBmwcj8WD0ACEGghIkbU0baaEthe3OTJ0ZWV26q37JZt68ee/b9733yGAwgMbL12/fz+azbnAPY2Nrt7Zfqz9JMrYZ+J4/e2pOFjiciRvXlgp5GzHonXk2o6S8V6k/TjBrM/xGA4MLyeOSPZ8jkx7D+uqCU3Amy1yIYizB36AlCSkwfjWDR4uu40yMl/s+XwjeWThQQ4Z6QNSnSkYykcDXasP4lmfvOZTSF9q8TDBEFPbN5bOqCglCCCxK0TvvZyIV4CIxbgpC+4gm/PUmFCIE8iJPyME/e8Lon9j4HvyHYLjKSwRCSEUgf9+15mFbx8QS6CZJMzJ9SlBCwX3fJDLG4PX7ykcwkmQmJtpEhWa7g1dvNlSwjwelebz7tAXLolh0p/Fxe9fErK2WDFGEgKjxfNjegX0lDTc/heNuF99/HGEslcKXwyoazWNDdlCr6+DoJgrBzdI0T9rYO6yg2zszMlaKM3Dv5OBzbuyZuzm1B16U4Nzz2f3cFOx0Gq12F9cztpExncsqYoaHpSIKtx0zJdVIFpHQ6py29muNk1uTN829o/6SHEnh80HFaE6NjmLnWxUJy1LyTltB3k8BBgBeEeQTiWRskAAAAABJRU5ErkJggg==); + background-repeat:no-repeat; + background-size:contain +} + +.ipfs-css { + background-image:url(data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAGXRFWHRTb2Z0d2FyZQBBZG9iZSBJbWFnZVJlYWR5ccllPAAAAk1JREFUeNpsUktvUlEQ/u5DoCLl/RAKKKUvWmIxjYntQtcu3LvwJ/hTXLt16coFC2PsojEaMKZtCqFaTdGmjbS0CG3By+vei3OOBSGXSU7uzNyZ78z3zRF6vR6YvXzzPrMUCyf68bB9zO+VfpROn5hkOdfPPX/2lH/lfiLidztX5mN2jLGG0rKLENIE8liWpdzwP7HvqJqujmvudFU4bFY8Wk1FZsOBtKppd8YCDNu77CZevd3gflfTUFcUhP0ePLibiIR9rjSBpgwAfe4dVcV6dhtep4PH5msylGYLrzeybErcT85FYiH/CyPAf74gObC2vMhzsiRhPhpC6eQUM+EA1pJzILEnjRSuJsju7MJqsUCSRei6Dp3yXqcdGlHZ/rLPazQWGCn8+6YW4pAkEW0SjzUzanWlCa/LgcR0lNfovTEi6lcIkzesnM/R8RlN0INGp3h4DHoDsE5YRvQyiKiRSMzikRAOS2WoqoZWu41K7RwzlOOAVDMMMHhIGvFlRxJFrKYW0ep0IYgC3SDh4b1lTJjNfENsrazOAMAw680mPuW+8lFno1P4XDigRhOiwQAyJK7TbsNS/PaA7giAIAhYz2yRgBIfsVA8wIetPG6FAqhdNrC5u0f+TUyHgyMTDDToEt/ftQsEvW4EPG5OZcrvw0mlimarTXkPfpXPcNlQoGtjACgpryQXsPNtH/nvRXqBJpoKHMzGNkNB0Odls7LNyAYKpUq1dt1iuvB7fRDp9kr9D1xOFwkpoksXusmXaZWFn0coV89r/b6/AgwAkUENaQaRxswAAAAASUVORK5CYII=); + background-repeat:no-repeat; + background-size:contain +} + +.ipfs-dat { + background-image:url(data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAGXRFWHRTb2Z0d2FyZQBBZG9iZSBJbWFnZVJlYWR5ccllPAAAAfVJREFUeNqMU01PE1EUPe/Na0uptmlASg3MoiZgCA3hQ8PHAjbqwsS9C3+CP8W1W/+BSReyYUPwI4QAVkAgUEgIbVIg1FZb2pl5b3zv2cHBjsaTTOa+e989OffcGeK6LhTevFv+OJoZHPHOfrz/sl86KpWfhxnLe7lXL1/oN/MSZqonOXU/k0AA6lfNhEFIrlAsP2PMyPtr1AscLpyg5pbtIHErhqez4+awmc45nI8FEvwNaiQuBHqTcSxMjJhmX0/Osp1xr878FxWEzwMinxAzEA4xFIpnOjedHTKpYbxW4U2CP4j8uWxmUKsghMCgFI2mFe9QgHZj0Ba4yhFF+KvGJToIRLuPC/efnjD6+26wB1Lq/xgbSCBXKeWJG/OTdky8cWTdT3C9RmWSGk2XCLlWo4xTNbfN5qh7PpXM72GjZeHt0gpq9QbmH4whGb+NpU/reDQ7hcWVVXxvXOHxzCQopQEKXKEbL6o1ZIcy+LC5g62DY2zsHeC0fA4zndIrHOjvg2XbAQRSfsuy9XxC2qzi/H5B6/68W0AsGkW0KyJPBLbDO0fg3JX/CUM81i0bD6WKe6j9qOPJ3EMcF0tSNsFA6g6alqW+VtZBUL78Vtk+Oqne7U9rs5qOQCjSheJFBeFIFOfVujSUYu3rIc4uqxWv76cAAwCwbvRb3SgYxQAAAABJRU5ErkJggg==); + background-repeat:no-repeat; + background-size:contain +} + +.ipfs-dmg { + background-image:url(data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAGXRFWHRTb2Z0d2FyZQBBZG9iZSBJbWFnZVJlYWR5ccllPAAAAn9JREFUeNpsU01rE1EUPe9lkk47yWTStCmtNhFSWxos2EXVhSsRcasuxYV05V8Qf4DgD/AvCK5EV1oFI7iUBqmCNdDvppq2mWSSzEzy3vPOpFFq+uDNfR/3nnvueXeYUgrBWH1/9/NE7k5BKRnuRcfF2qdnmJq9DeF9tQ+2isuMsxXGWHh/a1mEVsPJSI5fSU3OPEj291IIlN49RXz0KqzEQjIeZS/L5Y/3wPGhDxIM/i/A7fZWgVG0t5EaG0ZUa0JGM8gvPrZmLt58QYwv91mfAqCIE0sAqgumBFITGQzpUYhuF0KfRa7waDyXXXolpVrsh/0tgSLDr5I+wUZo1UHCSkAficPzY6juFSmbRPrC/azjq+fkcO00gAqoU7B0ETKkfWbuCTjTYeq5oESAauexcTScX+ZACWFm0YQSLZKhHdr67+/wW0e0dgjYo3sCEXXybYtBDVSHLp2es3IpsILS24c42lkBg6DzRjgRzCDZ/xr0GNRJwwYiWgzt+hYMawleu0V3wbkT+kUirOc7IGJAz68R/Qak1BAlx3hqASPGBJRXpXOv58dkz3eAgQoOm4hyj57NgZm0MHvpBmK6QdUdg/DAg9cRkhicBSDaKJdeo1bdxmR2DtWDDUxl51HZ+QHTysD3XdQO95Gfv06aeGcAdBrY3Chi8lwO3768QWX7J5q1XWyVSxgajiOXLyBG2hzurRKV9lmt7ISNkkjo6HhNyjoK+2gXRsKE57ZIE2ot10Z1fz0Ue4ABVw3NMjnW14rInh8jTYywoTg3EOFpOM4mXNfH9PQUfGlrAwBOs3I8ljbtuMWhRWzIIPrkn+GcYcgIWEowbZ+0qB334/4IMADESjqbnHbH0gAAAABJRU5ErkJggg==); + background-repeat:no-repeat; + background-size:contain +} + +.ipfs-doc { + background-image:url(data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAGXRFWHRTb2Z0d2FyZQBBZG9iZSBJbWFnZVJlYWR5ccllPAAAAppJREFUeNpsU79PFEEU/mZ39vZu77g7DokcP04BBSUmiEKCSCxs7Ei00JAYO2NlTKyMrX+CJhaGwopSQ0dMtFEsbDRBgiZEQIF4IHcg+2t2Z8eZ5QDlnM1mZ9+8973vfe8NEUJArfSNhzPG0VIfeIiDRSDkw1cWVt3N8rhG6SdSO2Gvn8dfuueqZwuNZqk3Jxg7iNcIfBbgXD6ZC8u5qffzX8eoYeyDxC77uygKhcouovgVUQj1H4YB2ovNuD9+tTTU0zMVBmG/+C8AIYh8F361DL/yE5HnADKYlVdg6MDAmW7cuz5WGuw+PsWDYGAvbL8ECFUt4K7/AHd/I9c7BLaxinD2Ld5Zo7g78RLuRhlBS2cpWbGfStfhfwCEpK0nUjCbWuGsLciSOELPhkq/YgdY3l6HsLfRcLYf+pHNbH0JigEPkLAyMsiEJ7NrqQzM1i7wyhoMZqOhvQs6Z0ovXgdAJACRoulEg5HOwrOroKk0zOY2BDtVpTF0CU6kLkQJXa+BNEoG0lMSsBBKQXWNQktmoGcaYeSaQCIVWOvUYQAiWZFQtk5mSMoSzEILtBrTfEcviC5bwVwQmoh96wA0ic5dB57ngeoaTIPCdb34zDITYNLOOIeVSsW+dQC+7+NSWx6jJ4tY/rWNV7PfcGv0tBoPTM7M4eKJVgx2FTE9u4QPS6x+kHzfw/mOAjarW2hJG3hy8zIceweuY+PRtREMdzbjzcd5WBqPB6xeRGUMGRzHjWvMmxQ7tiOF1JBN6FiTd6Sy9RuFbHpX7MMMqOD088Ii+op5OUAO7jyeRGfBwrF8Cg8mXuDL4neMXzgFwhwZz+hf7a9d5yu3Z6DTPjVQIY9k7erO7Y63Lvc8ErEeyq6JaM6efjai4v4IMABI0DEPqPKkigAAAABJRU5ErkJggg==); + background-repeat:no-repeat; + background-size:contain +} + +.ipfs-dotx { + background-image:url(data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAGXRFWHRTb2Z0d2FyZQBBZG9iZSBJbWFnZVJlYWR5ccllPAAAAndJREFUeNpsU01rE1EUPTPzJk0y+WhMStW2qdVWxUVEQUF0I+4ELQiC7lz4N9z0T+hG9wrdZKUgLqulhrbSag1CKpT0g7RpYjqZmffle5NEKdMHlzfvvXvPPffcO4aUEno9f3Vt4dTp+BXOe+fB0u/NbVpv7h89NU1j1TCM8H7+xY9wJwPHZMbOjRadLAvE/2gToJTiTPx89k+OlVd/LT+0TPIPpO/SzyQk40xCMxBSZ9Z3CoAx5DOjeHT7SbE0XSpzwa8OWB9jINELolQg8AR0EgUKn1PIlIWpkUt4cPNxkTOU12trs8p95RiAXpqaztqou8q6SKQJJmZSqGwsodFsIJk1kcyLYv7IeafcLx4HUNkFF4jFTExMZ0B9DrfD4HUEusYhWs4GPEJg5wly/tBYRIOeDhpEwlS34xcyajdQr3UwOT2MlJOEBRuGNHWp9AQRVXDfQiFV/U5GBSiQ5p6ngBEa5z3fiIhC6g6IMDBwOdoHPkYnHPVyhN0tF7E4QSpr94CEOKELffq+y9Bq+DCJ7rWBoQQBVbPR2O6G4OlsLASJMtCZfQqm0NP5IVWnamdAkUxbyuIYtD7wWegb0YAzAVMkkI6NwPM9xEwHloyDGAmk7AKS9rAS0FKOdugbYeAHPu7OPEM+MY7q3hIKqTFQHmC3XcONc/fxdfMDrk/ew/edzyhvvTmBAddocVRqH3Frahau56qpZDho7+PnTgXffi/gbHYmLEvPSIQBp5JU62sYz13G609zKBXvoOMdYn2zgm7Xg2MVML/4Eu3uPgxhk2gXmNl8v/i2pcXTP8tKdTEcbWLZqDQXwu/l6pfwbEnSGsT9FWAA4mdHv2/9YJ4AAAAASUVORK5CYII=); + background-repeat:no-repeat; + background-size:contain +} + +.ipfs-dwg { + background-image:url(data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAGXRFWHRTb2Z0d2FyZQBBZG9iZSBJbWFnZVJlYWR5ccllPAAAAoFJREFUeNpsU0tPE2EUPfOg006hD4rQh8WgbCSwkKgbF2owujaCiQsXxpX+D6MmbtXEsHCLmIAbE6NLo8YlGIxREIshIqVl+mQ6j8/zFVCb4UtuZua795577rl3FCEE5Bl79vPd5LHYiOP7cH1AUWi85ytmvlas1bJ9E5ryBntH3BpuP/X9i7ovkluuiE8N9SDepaLpCcRCCqa/VDCaMuIjSWP25Upl6n+QDoCz6Yh7KKzh3sI2LuUimPtRRyaqodj0MDloYiITSTi+mH29Wu0AUf9CsZPJoW5czJl48LmCc5kIKo5Al67B9gUGYxrun+5NnMlFZ+GKiQADj2a7AquseLIvjMv5KMaSBu4sWVir+3i8VIVKYSby0UTdFU8Znu8AYBHQgVOJEN5uOXi4UsdawwU0FSf6TaSoyw6DRvukPkgGWpDKy4F8a3jImCrqFDFn6rhKPR4VGnhvOTAY3WLcjifcQAsqRfhUc/Gq1MKNbBh9nIAMDjEppocxs9HCMktfGTCwP/oOBkUKNk/qF3pDYC6Ktk8RfWzyaaoKrqdDaBDwya8W1m0/CPCR3kFy7CcnmWQRUJqcRJFUKtTnPCeR71LwoeYF92CYyVnCFZpCTrRtCv5to2St8SOrKxiPqEEA4fkYT+mI0rdoeUiH1XZVuQPpsIKqw2QmfifTsnOABiWySlH9uU0Hh2MqjsZV5LtpPSoGeN9rKnhBX7ehoOSLIIPfnGONXGMMWN7xUfVldYDbjM3mrh5HCDgS17DhHgDQcIU+XbBxnDTn1x1UuQcJ9iv7l5Q5e1zLGri92EDJFnoAgHtcfr6wbbVXUqq193+0z97n3UJt1+d51n7aHwEGAAHXJoAuZNlzAAAAAElFTkSuQmCC); + background-repeat:no-repeat; + background-size:contain +} + +.ipfs-dxf { + background-image:url(data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAGXRFWHRTb2Z0d2FyZQBBZG9iZSBJbWFnZVJlYWR5ccllPAAAAo5JREFUeNpsU0trE1EYPfNMmtdoH2kDNmJbaVFcaBVFpAsREQpFwY0bu3HjQnTj1mVd+ANcuC3qQixmry6E0kWFVIQ+bKy2tbFJm3emyXTujGca+4DkwsedfLnn3POd77uS67rw1vC79ek7fZEzpu3AYUqS9tKQGZPLpa3VXP0uFCmJ/8t9OLC3q/uJbcs5bkIybvdHoMsSbLKENRmvU2WcNnTjRFD7ML1WGSPJHI6sA4KRWMAWVDPxLYex3iCmfpuIh1QsFSyMxQO4GvXHHwOJ6XWSyIck8v6HQsnjAxFc7vTj2VwBg4aG78VdBHQFCk+dbVcxMdwev9gTSEC455sIBOu2KLsoJFzqasP9vjCeDBlYqzn4VXXwarGKZN7Crd5QfLDT/7KpBM84c9fFUFjFp2wdk6smflRsKKqMa7EgfJJ3Ac2OKlit2pEmBTQfngdpnupoU7BUtRGiiTe7fXiRqmK+KuDn6TpvYogmBRJcrOwIJLIWxmM+dOsyLKryQAaJpjJ1/AxrGO3SqdZt7kKZJrzJWBg5piHENuY8vV6e0UOye1TyftvC5l+gZB8SHJTwpSx4q4JeTUKaxhXoR57h7Rn+3iFolJ3xvPhab6HgJG/pJ7jsNP4sUX+jZiCgEsWd/DjH5IrSYpBUAr0yHpzSoXKOP25a6OBhndh0zcX1qIYM2RIbu6i0KiHD5B/GTMHG03kTGpEL7H80wHFOWwhqDZ+SpkBOtCDYJDhZE4gRcKNbYynAqbCMbXpwpVPFbEng0aKJGbYzK1p4wIegLlcEPmdt+DjXbzcsxFlCynRwwVAwW6hjqeg0Zt521SYCWCJvbe0Un29UDx7Hgrs3IEitHXkw3jOv2fl92D8BBgAJeyqBh90ENQAAAABJRU5ErkJggg==); + background-repeat:no-repeat; + background-size:contain +} + +.ipfs-eps { + background-image:url(data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAGXRFWHRTb2Z0d2FyZQBBZG9iZSBJbWFnZVJlYWR5ccllPAAAAmlJREFUeNp0U01vElEUPfMFCEVArdoSqEA0KV246UJdUJM2Lo2JK/9FjXu3utJqTNz4D9worrsQExbFpAFT0TYp0CZ8pIAiyMfMvBnvm2Foa9uX3Lw7c98979x77hNM0wRf7ufPsq7Z2SQYw2QJAkDxQalUZa3WI8hy3gmZr15bu+z8kILBkCeRCJi6bufKMji0NhwiCQR6iitdatTvQ5LyOLLEiWcYukm3m4Zhmbq1BX13FyoxuH7xAlbvpqKRK1fT0PWbRwEmDEyiy1QVg/V1GO02tO1tKLEY2PIy3KEAlmJRDLXb0TeZL+n9g4MHlLJ5HIBuYnSzXq+DlcsQLk/D9Hoh1WrIUjlPcpsYGQzS3LWoaBhvKeXWMQCDA1D9pt8PaXERUjwOjEZQFhZQp9L2yERiqYRCkPt/z58ogTGqHQLE1BLgUmC6XGD5AlipBIFKkbhanKHGYLBDqQ4ZED0OAbfLlo8OIxwGvhVgyTHlA3xkomjH/gegBgDURMv6faDbBZpN+/tHkUApkdTA/PwZAPxntwdUyjYA/+ZMqJHjLgM9iv/6zRt2GgMaIE21aVIjnSm0DGPfmhzyde0UAE2Dj+p7urKCPvkZku9eJILOSMUnkvVhIo7GYIB3xSKYdhoA1erXGVKXpvFxZwdBonnD68PQ7YEwM4O4xwMPxc8RYE87g4FIcz+kvfmnA0YzIJIy77/m0OCqsTkkCTysKPjJG3viLei63Gm3kCO6UWqcMejjxecMPmxsoFKtYop6UNirYL9Wtc5OHqzznIXHq1na7OfMJROcK8a6O7MjW7nfzZdrd7jzT4ABACh3NGsh3GcdAAAAAElFTkSuQmCC); + background-repeat:no-repeat; + background-size:contain +} + +.ipfs-exe { + background-image:url(data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAGXRFWHRTb2Z0d2FyZQBBZG9iZSBJbWFnZVJlYWR5ccllPAAAAo1JREFUeNp0k8tPE1EUxr+ZzvRJO62lUAQaKIQ0FVJFjBBdoIkrDDHuXJi4NnHtX+HCjW408Q/QmHTRaCRRohIJifgiiBICTQu29mHfnc7MHc+MlECKdxZz595zf+c737nD6boOYzxJLC6Nhwej7e/24HkO779s7G6mMjcEwfKZ21+/d+em+RbagaFev28qEpZwzKg3ZckqCPH1nfS8hScIdyhBe6JqTG3PfyTTeLrwFhvbKdy9/xi5QglXL0yGJsKDccZY7LDIAwWHpSferWBh+RN8ni4UylVER8MY6PHj0uSpUK0hxzfTmWsUtnoEwO3rer64jEyxim6/Hy67DXaHExvJX3jw7CX8XjfORUdDlOohhU4fAVjILCPbm9V1yIqK2FgYt+ZmsZcv4lH8Nb5upXD7+hVMjIRQa8qeDg8UTYPU5cTcxSk4nS709XTD53ZhpD+IYMAPj+TBz93fZiz5oHV4AP1fGdlyHZIkIZkrI7GyhnK9CZXy+Aig6p1+HQAY003AcF8AVtGGfLWG9XTO4MLZ5cL0WAixoT4zVmPHADSiMo3hzHA/xgeDWFjbNg8H3A7kKnX0koEcPdTu/ylgRGZgOjNv38zoSXC8BZJDRKOlwGEV0VJVGM0y4joAPO1spXbx6sNHeD1uRIYGUCxVSRlDt1fC8rfvcDnsmJ+dOaLgoAs6AVLZPJJ7WdhEkUyT8GJpBflSBcVKDTvpDBw2GzQqQT1OgaZqUOhtFQUTUKnVTVWNpgy51YLVKph7sqKYkA4A1ScEfT66vm5kC3+ofh6Xz59FQ5bpkvE4QW3M5Apoyorhl9ABIKnFgNdTOh2NkJG6WSf9eRBJtmFwLDJmriUzeaOkYvvcXwEGAIVNH6cDA1DkAAAAAElFTkSuQmCC); + background-repeat:no-repeat; + background-size:contain +} + +.ipfs-flv { + background-image:url(data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAGXRFWHRTb2Z0d2FyZQBBZG9iZSBJbWFnZVJlYWR5ccllPAAAAmtJREFUeNpsUl1PE0EUPbssLYUCXdpaC9gWoSTgAyFigiRGY+KjvuuTr/4A44MP/gx/gMYfwIsan0RjIjGiJIZgSIGFIoXSD0t3Z3dnd70zpITazuZmJzP3nnvumaMEQQCx3jx69SV3a3KWMxetpSgKxP3m242Do43SQy2k/YRydvds67n8a63k+FRSn7l/bdg5tdsAuM3he/5weDC8vLdqPLgIIpba2niux52mg//DqlsYSg3iztO7mczN3DJ3+ByCLgCBH4hOFEF7cDpzPCRyOpaeLGXSc2PL3HbnW3XaRQCPEgWI2MsRVAVqrwbX9bHxbhOKpiJ/bzpDOr2k68V2BtRNzMtqDEqPejY/4zSGjb54BM0mQ8k4xsDoIMauXxnqYOD7PmwScP31d0SS/eAuh1lrolFpIBQNQw2pqJdqsAlIceB1AJCIkkE/FZskXDQVRXw6IYHiE0nBEcaPXSSvJnGwWkQXAE4acAhbxPMJpOdHweoMhc9b2F8zwKizbdlyPLVH7QLg+JKBYzoorxzjz3oRzUoToaEw9KyO8XQW5AE5jrFT6AbAYVVNxCZ0Ka3So+DSTAoDiej5ywTySbls1OEDobhFlMcXxrHw+AbINEjNXgb7y6BndLhk8cRkHHbD7g4gEhiJFxsdhrDqaamBaDKKerGGSKwPI9kR9EZCaNA5ubE7A5s8IFhsrxQkgJhZoa/06xC5xRz2v+3BOjFlbqcGlquxsondT9vY+2pAJdeZR6fI355CgQCN2A4O1w7gkQ7cdLUOAKdhV6uFSv3kd/n8mT68eC8dKWLnY4FsfeZQh7nVVt0/AQYAsf5g+SvepeQAAAAASUVORK5CYII=); + background-repeat:no-repeat; + background-size:contain +} + +.ipfs-gif { + background-image:url(data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAGXRFWHRTb2Z0d2FyZQBBZG9iZSBJbWFnZVJlYWR5ccllPAAAAmVJREFUeNp0U0tPE1EU/trplAqlL0laiw40xASByEJIZFGVnSvj1j+gWxNXJq7VrbrwF7h10cSNhMRHojEuACVBKmH6SJQyJeXRxzzv9dyZPiCtN5lMe8853znf953xcc4hztDzZ1+C6fQMHAfd4/MBFG+p6h/n4OAeAoGNToi/eOm+A50LKRaLh6amoty2vVpZdotNXccMEK3LwZxa2bsDSdrAqePv/mLM5tSdMwYBYqyvw9zdhUn/L59P4OGtG8qlZCoH254/DdCdQBCxqZu+ugqnWoW9swN5ehp2NotgIo6bGQWGtaS8+vQ5V9a0u5S+1gfABEilAqdUgm98HDwUQkDT8JXoPPq+BoM5kCYmFT9jryn1+hkAt7heBx8dhbSwACmTAUwTgdlZ/CVKJaLnI1GD8TikZiPSR8Gxib8chH95mZTxgwWHwH7+gFMswqcokIRbjMO2HDCnZ1VvArpjEmnKZc8+cZJJYGsLsMiZ8AgwEqaY6Mb6RQR33JFhGECzCRyfAFXNu9v+RVNRZWIMuDJNuYMAaDycUFGhCOgtuAtFVDA83G5A8TrFDw+F5QMAxAKJJxz2xnW3RPJGbm+rCyjotZetH4DGzaSSeDA3h4Zl4R0JOEZWTpIzF4n/m995bNdqZwB6m0gFft3Ak6vz+KYWwFsGlqIxXItEcDt1ARMEtKdVgZb+fwA0G2C2hXM0ZTZNRcSf0b1pmXi7uYnjI+Lfanm5fRQsK8BIxKcrK7i/uIgP+Tw+FlREqHN5fx/vyU4uHBE6UO4gDWqk/JFaLuMxcXeFk6TuJ90V0HOk1in7J8AAjmgkPfjU+isAAAAASUVORK5CYII=); + background-repeat:no-repeat; + background-size:contain +} + +.ipfs-h { + background-image:url(data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAGXRFWHRTb2Z0d2FyZQBBZG9iZSBJbWFnZVJlYWR5ccllPAAAAbRJREFUeNqMUk1Lw0AQnf0woK0ttVqp0hwqVCl+UBERT94F7x78Cf4Uz179DT14F8WbYHtRkBYRLNqDtdaPZLObuLs1NGlXcWDJZGbey+x7QUEQgIqT07PL5WKhHL5H46J+22q22vsWpbWwdnR4oJ80LNiz2czGUjENhvj4ctIE4Wrj8XmPUlKL9nCYcOFzE9j1OKSTCdjdrtiLdr7KhVgzEvwW6krC92E6k4Kd9bJt57JV5vFK2KfRQRV+RAMkzxglYI1RaDy2dW1rpWRjQo5VGicYIorWVooFvQVCCAjG8Omw1MgG8AM0uSBUDSnCfk/IGCHwf3DCD/7UhOLBrFkDuep/hDUSSCv1iYo4rIfqGwmUSNJjfYbBcQKhZw0aBMA4B48LwBhBt/cON80HmM9NQ6fXg/Wlku4TwmNWDzaQqzHG+0PSKod5cH5Vh2RiAhYKc8DlV1UPSyuFMGygVlMg1/P6BC6DqXQK8jNZDXAYA1f21V34wMXYFaiyVw0rJyzLgs3VMkxOjGtix/V0XWChZ0cI2i/dzvXdfTd0Qf91BMPrhyNzgKfOmxaWypqaDXHfAgwAtCL8XOfF47gAAAAASUVORK5CYII=); + background-repeat:no-repeat; + background-size:contain +} + +.ipfs-hpp { + background-image:url(data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAGXRFWHRTb2Z0d2FyZQBBZG9iZSBJbWFnZVJlYWR5ccllPAAAAehJREFUeNqEUk1v00AUHK/XKf1yZdESVRBXjRSRFqMQVBA5Ic5I3DnwE/gpnLnyG3LgXglx4UDDLZS0RWkDLiRxSusk9u6GXSembmLgWZbX7+2bnZl92mg0goo3b3ffO/ncdvyfjHef6q2Dlvs8Q2ktzr16+SL60jhhZ69bO8X8ClLC7w9XdKJVG8fuM0r1WrJG4gXjgqU1D0MGc2kBTytl+7a9XmWcl1IB/hZKEhccq5aJJ/e3bTu7Wg1CVo7rNLlRhUh4oMnXoDoyhoHGyWmUe+QUbELIa7W8CjAFlMzdzeckCwFN06ATAn8QmDMMMGlMuwWucpoCHNe4jBkAMenjYvRPTyi53JvuwX8AplleAeBcRFrH6rXIxLim9I/pi3QA1RhKaYxdjkN8IwalCMIwWs9ljMkh0wzk+9M7w179C3LZNXxve2h+c3Hu91HeKmD/6zHOLnw83ilB1/V0CeqU3Q81LC/O41b2Btx2N2JVP2riR8eTUxmi0TzBwrKZMsqMoz8MsDh/DWuWhUBKURLKxQIeOMWoptYPnS1c+INZBkwISomOSsmBZS7B+3WOzZvrKGzkMAiGqNy7g+LmRkRfekBnANy2163PZXrSbrQ6vch19Xz8fPDHyL39QzkHBKedXjfu+y3AAGU37INBJto1AAAAAElFTkSuQmCC); + background-repeat:no-repeat; + background-size:contain +} + +.ipfs-html { + background-image:url(data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAGXRFWHRTb2Z0d2FyZQBBZG9iZSBJbWFnZVJlYWR5ccllPAAAAmBJREFUeNqEUktPE1EU/mY605a+hhZTBNKRDApNrWIRA4nEBUZdmCgLNi4MK5f+FNdu3bFv1J1EXODCR1JJSMTwpqUP6NiCpe10Zjz3hj5Mm3iSybl37jnf+c53jmDbNpi9eb+6Ftcisea909bWNzNb6dwzSXKkhIt/r14+515qBqmDA8HpqKagh53XaopblpIbe+knDpFAhPab2Dw0TKvRK7lmNODzePBgZlK9oUWSpmVNdpIU8T+jaMsyMaD4MDcZVa+NhJMN00w0n6V2nN3yQgdHWZag+LzYPTomIAtT0THVtPGanmb/BbjwLFkvn2IttYGYplKyDzsHh7gdmyAWfh5zVq0Guhg4RAHFUhmfvq3j134aXo8bd+ITnMFOOovU5jbGRoZwNxFn1cxuAIcDW/sZDjA/c4u+BNxOJyxqaenpI3z88gMfPn9Hv98HQZS6RazW6kjExvFi8TGdDSy/W0Emf4LS6R8sv11BmfzSwkPcm74Jo9Ei0GZgmkw8QCOao8OXcaz/5vSZnPdnp3ApqBBLkWJE0Ci7ASzbIhCLLQ1E0iOkBDh9NpUgiUejo8oNuJwyn0YPABtn51UYFFivG3yBGCNZkuDtc/MW+ZQI3OrYpBaARCKufk3B5XIiWyhiL5ODp8+FfFHH+KiKSqWKUL8fC/NznGlPBmz+24dZjKnD0CJDcMoyW0SqXuMtHBFw7rhIAD1ErNUNafxKBNevapwu65NpEQ4FqXIA+RMd6VwBP3cPSERb6gLIFIq61+UqGWaFdcrVt/lmAuWjAi2aiMFwmOYuIJ/N6M28vwIMAMoNDyg4rcU9AAAAAElFTkSuQmCC); + background-repeat:no-repeat; + background-size:contain +} + +.ipfs-ics { + background-image:url(data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAGXRFWHRTb2Z0d2FyZQBBZG9iZSBJbWFnZVJlYWR5ccllPAAAAhRJREFUeNqEUkFPE0EU/mZ2dra7bLNpi2AxQFKalkJrohICiYkXPagXrx78Df4K48GDBzmQePLMhUODNxQ5ciEkJVqDtJGmMWrCATRbd2ecoS5u3aovmezsvu9973vfPiKlhI4XL7c2r5YL81LIELEghLA3u/udxmHnPmfGW/Wuv+LpwwdneRYBx7PeWK0wOYYhcXxyckGV1fdbnbuMsXcklqPRJQxFMKz4RxDCtVO4s3xlRjWoB0FYjlQPEEBieChwKCRGMx5uLtaKs1P5ei8IKlGa/YkXMXYtlTEDlsnw/mMXhBJcqxSK6vlcpa4PEpCooUyIqs5M6hG1o2CUwqA091cFcYLf/sjzcX75EiQIojI9779CTYR4jwTBf+r7GAwh0AxCiL6JMT/04vQ79u8aI2O/7Jzg69o6Go8ewycUahtBpADhHKLnK/eVbkMdtROWIv80NQ2sPhncA9Htwn+9hZG0rY6DzFwJl+7dhs0ZstUy8rduwPS/wd/ehmi3kwq4zTHiWUgXp+EuL8FvNvFl5Rn4xAS86iyI2kY3n0Mv48ByrOQmancdi8I0Kcj3U5iuA29xAelKCUHrEIayzltagG2E4IwkFaQgSC6lYI09iN0d8It5uNV5nG5sgJdKYC0G8WoTOZvBISFNEBxnsuzD3GX4vfDsszzqAu0jkJQDedCGbB6AWg54pYbPo+NGVPdTgAEAqQq70PytIL0AAAAASUVORK5CYII=); + background-repeat:no-repeat; + background-size:contain +} + +.ipfs-iso { + background-image:url(data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAGXRFWHRTb2Z0d2FyZQBBZG9iZSBJbWFnZVJlYWR5ccllPAAAAjlJREFUeNp0kstrU0EUxr/k5qbJzdPYpGkpsUJoA2q1oLjTdiGiIC5cuXHlxv9BEOrStTvBnQvRrSAIsejCrlqpsURq2hCJNQ+TNLm5uc/x3MmzJh34mDNnvvnNzOE4GGOwx8+t9XQkfn0VE0Y5/7Z+kHm+dvOhtd3P9c/xwNZh7nWaMYtNUmX/Fct/vlN7/8J5aRRgyzm8xzpRDjGE2aVH4VTqdnoUYg/XkEhmy+Cx3DhA5tMzdFolvg5Mx3Fx9SmH0JIg79Zo3j4GADMIokJTKtjbfAKXU4Y/2NvSfyH75TFOxa9Cmr0XnlPFl5ReOQ6wNMDsoFX6AElqQlNV1KsOuNwS/AGFjEUIDhmn5+/DMM16/9igBowAzFKIswPJr6MjlxFP3sV04gaP7RzMPe6xvWM1gNUBM2UKYlBau3QghGphg29J3gDlLLilWNdD3gkvIIDRhD9yGe2mCV0V4HFXuCxT5Dlv8Dz3sIkAs03FalDxBMQSt9BRBMhNncuO7dyU28c9tnf8C/Q0ZtR4GImeQSj8APLRH772BWcgiFODffCv/t8H9tO0v3RjV7VqkeeXLlzDfvYjj88uXhl4JwIsrYxmLY/M1gYclIvGE9jZfNPrSCD3/QgLyeWTADV6wW9AryIcCkB0u1Aq/oCPumlufoF72vIheaLDr4wCLIOqrYnULA14PSoqpSJEAUilZrD77Sv3LK+cI0+Be8cAbbmAOrob0agtD491LYfkoqvnyZLsWRkA/gkwABL4S3L78XYyAAAAAElFTkSuQmCC); + background-repeat:no-repeat; + background-size:contain +} + +.ipfs-java { + background-image:url(data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAGXRFWHRTb2Z0d2FyZQBBZG9iZSBJbWFnZVJlYWR5ccllPAAAAjxJREFUeNp8U01v00AUnNiOEyepQyhQobRBSlVIoRCBEPTAjQsSEneE+An8FM5cuXLNoQduIAE3qopKNJAIIppA2jrOR93aa6/N8yZuUxyxkrXr3ffmzczbTQRBgHC83nj3ca28dD36nx6fvnzrNNrdp4oibyUmey9fPBezEgWVFuYLdyvlPGaMY4fl1aRS+9pqP5ElAkmcnknRwuO+Nyt5u/ETYfyj9WrpZnmpxn2/Ok1Swn/GvtnH5k4TLue4kNfxoFoprRQv1TzOb8cAIu3+ZD7oD/Hm7XuxzqRUNDtdkuLiTmW5tFxceBXlnXgQTAORSMt2oGezUJJJrK9dFWdEH7Ik4dB29LiESeUEJXd7/dAT3L+1ivlCHr8NEzutXTBvbJPPSdO/AH5wysChwM/1HzCGlmAzOrKxu2eCud6Z2Jke2MwThpUXL6Nn2ZAVFTlNw70bK0iRnGAq9qwHtOmTRpsx1NsHyKRVnNPnoMoK9kc2BjbD4vk5JGV5NkBoEPM4FFnCteJFWOS4ntHEfphQyKaFTWFLw704AJ26ZFx/ZEEi3YyY0O1Dmr4EKTUHA8hUnS6siI0DEHLYog+b28RCRuNXR/iQUpPUEQ+NVht6Lodnjx+GXYgDSFRnq97Ed2pXSlXhUSeGhxYc5sKlNXM5DGLR2TMwfZVPAIi+otGNWy1fEZUKeo4qc4ysI+F8VksLIJfYcD9QYgB/DNPMptWBlsnBIS86xmDMTBo/PWd0LB6VZfdEbJT3V4ABAA5HIzlv9dtdAAAAAElFTkSuQmCC); + background-repeat:no-repeat; + background-size:contain +} + +.ipfs-jpeg, +.ipfs-jpg { + background-image:url(data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAGXRFWHRTb2Z0d2FyZQBBZG9iZSBJbWFnZVJlYWR5ccllPAAAAmlJREFUeNpsU8luUlEY/s4dmMpkWxRopGJNNbiwhk1tItbGtXHr0hcwmvgOdWld6Bu4coXumtREE3ZKu8FgOlC1kIoXtC3jPfdc/8PUIpzkBM7wf+f/hsts24YczuerGUc0moBlYTAYA+i8sbdXtAzjITRtq39kr73s/Gr9DTUYPOeamwvYnHdrdR0SnDebuCbswJGqpX+Uf92Hqm7hzFAG/4TgNr1uCwEJ0trcBC8U0Kb1/PQkHt9JxSLnL6TB+Y2zAIMOJBGLXmtsbEAYBsx8HnqCGKVScAX8uHf5EpqmGXv18VO6VDEe0PXsKABN8+AAgiabmYFNNJTDQ2RUFc8+Z9G0OPR4PKYwvKari0MAgiY/OQGCAajhMNR4nDZMaInrKBGl70SPMScck1NQG3X/CAWLE3/dAWV5hRRVIJxOWNksrP19sFgMqqAebUGYHMI6teq0A9oTVAhqu2sfbYYjsL7lCZ3683gA70T3TK7/B4BNoO020GwB9TpwfAz8LgMtWn/NkV8EHgoB81c7nYwCyBZlEVkHcqMTKFnkmehJTOPvEfCnKi0fAyADJKfXC/h83TaZTJjaa5lANLpOFqAXtlEAorAwO9u5syT5UxLfU0e3o1FMu1x4u7ODYq02BKAMAVSrSNLrK1MhLPj8mNF0vFm+C1ZvwKBwXXE4AGn1WAASazESwUW3BzUSMeJ2o1Aq4sPurvQYSRLwlhRR6mSaYyi0WlpAJrFRx3ouh5/lMt5lv8BLwXp0M4lSpYL17e2uK5wP6lj/c2ZPn2RI+YT8fDvqoyegVLyfG5kBKaQQOfvF2pLc+ifAABiQH3PEc1i/AAAAAElFTkSuQmCC); + background-repeat:no-repeat; + background-size:contain +} + +.ipfs-js { + background-image:url(data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAGXRFWHRTb2Z0d2FyZQBBZG9iZSBJbWFnZVJlYWR5ccllPAAAAyJpVFh0WE1MOmNvbS5hZG9iZS54bXAAAAAAADw/eHBhY2tldCBiZWdpbj0i77u/IiBpZD0iVzVNME1wQ2VoaUh6cmVTek5UY3prYzlkIj8+IDx4OnhtcG1ldGEgeG1sbnM6eD0iYWRvYmU6bnM6bWV0YS8iIHg6eG1wdGs9IkFkb2JlIFhNUCBDb3JlIDUuMy1jMDExIDY2LjE0NTY2MSwgMjAxMi8wMi8wNi0xNDo1NjoyNyAgICAgICAgIj4gPHJkZjpSREYgeG1sbnM6cmRmPSJodHRwOi8vd3d3LnczLm9yZy8xOTk5LzAyLzIyLXJkZi1zeW50YXgtbnMjIj4gPHJkZjpEZXNjcmlwdGlvbiByZGY6YWJvdXQ9IiIgeG1sbnM6eG1wPSJodHRwOi8vbnMuYWRvYmUuY29tL3hhcC8xLjAvIiB4bWxuczp4bXBNTT0iaHR0cDovL25zLmFkb2JlLmNvbS94YXAvMS4wL21tLyIgeG1sbnM6c3RSZWY9Imh0dHA6Ly9ucy5hZG9iZS5jb20veGFwLzEuMC9zVHlwZS9SZXNvdXJjZVJlZiMiIHhtcDpDcmVhdG9yVG9vbD0iQWRvYmUgUGhvdG9zaG9wIENTNiAoV2luZG93cykiIHhtcE1NOkluc3RhbmNlSUQ9InhtcC5paWQ6RUQ5ODY5Q0NGMTE4MTFFMTlDRjlDN0VBQTY3QTk0MTEiIHhtcE1NOkRvY3VtZW50SUQ9InhtcC5kaWQ6RUQ5ODY5Q0RGMTE4MTFFMTlDRjlDN0VBQTY3QTk0MTEiPiA8eG1wTU06RGVyaXZlZEZyb20gc3RSZWY6aW5zdGFuY2VJRD0ieG1wLmlpZDpFRDk4NjlDQUYxMTgxMUUxOUNGOUM3RUFBNjdBOTQxMSIgc3RSZWY6ZG9jdW1lbnRJRD0ieG1wLmRpZDpFRDk4NjlDQkYxMTgxMUUxOUNGOUM3RUFBNjdBOTQxMSIvPiA8L3JkZjpEZXNjcmlwdGlvbj4gPC9yZGY6UkRGPiA8L3g6eG1wbWV0YT4gPD94cGFja2V0IGVuZD0iciI/PoT8zQ8AAAJdSURBVHjadFNbTxNREP52t7S0bktbKFAvTUVaw60YqkExUTD6oD74qC/yD/wp/gh885XEEI0RAyYQUiMpIBGMkYR6o23abi+73e2uc04v1LROMtnZPTPffvPNHMGyLDB7sbJ2ciUSli3U35smkK9t7x9v7n2dD/g8KUkUwWqeP3vKz23NxJGzgwOx0RC6mSgIo+WKuvP56MeUzy2nJEk8PWsGJVVTuhWbpgmHw47FB7d98Wg4mVWK52o1sxOg3Va3PmFp+Q2PdUquaFUM9/vw+O6cP3bxwm46Xwh1ALR3/vL1e+hGjcc9koScUsTSq3coVDQsXJ3wzo5HEs3clgZNMTVdx1T0Ep7cn6//QRQwMhzA6uZHLD5cIFEFSKIU+G8LK+tb0KsGZKcTJoEyP08AbpcLy6sbPKdQrigdAGaDwWxsDH1uGbliCYIgcM8WFPg8Mq5Pjzdyu4jYbCE44EepXMHuwXe+A8x3KKYxYsjvbUzmlPGpBmYdgI1oYjSMbL4Ao1YXMkcM2Dd2xnbAamPQAqg1GORLZdycmYTdJqFKk2DPR3fmwI4zBDrg9RADqxPAbPBif2WTSB584/3/TGegEOit+DRcvQ4OZJi1LgwIQKVCg2i6nb1I7H3Br3QWqT9pBAP9uDY5xjdSM3RqxeoUkfVnEOW8UkLykERTNXjkM7h3Iw6NNvHw6JjuhAhVrba0+QeALozcI9nQR0VvNxJc/ZmxCNGvIBQcpDG6udA22kyW29HC72wu8yG579ZoiSYuR/ly2+y9CA4NceWLmo717T1i5ULqJNtapL8CDACskxPFZRxLwQAAAABJRU5ErkJggg==); + background-repeat:no-repeat; + background-size:contain +} + +.ipfs-key { + background-image:url(data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAGXRFWHRTb2Z0d2FyZQBBZG9iZSBJbWFnZVJlYWR5ccllPAAAAlZJREFUeNpsU11PE0EUPbM7u/2AtJUWU6qiiSYYo5EmmPDCD9AH46sx8cEnja/+CB989z+Y+MKPgMiDsYQACcbaWBBogYD92t2Zud7ZlQZsbzKZ3bl3zj3n3IwgItjYeDO3MlWme0bjUth8e8/fO2tHzx3XqUEk50uft+Ndnhdmc3SlfNPkVZT8Cy600DoIISvVfKYtlvfX1p66XmoIYsMZdjJQWvEFbbsC/S5g2QhSkKUK7rx6OzvzqLpsovAhaAxA3DUBQn2TUFsl7KwTfm4Z9DoO5LW7uPXi9Wxpfn7ZKF09vyPxX2iWcNRkKGZz0mQWKoNs8AVB6x1yRY2pYnc2LLofuXTxMgAlmlXIfngCxNxEzM+DPv6NQa2BygLgZyX6JT83ngHTN5GAL0WSoUQkSQnXkyBh/k0GegTAaldM20sTKvet+yyhIZApECamL0jUSe3oFChx3TopM4TeEQP2gc6BgGIwb4KGNXRhCkMGxgg2kJeybRiZM45D8W61qEAknSmpHStBhywu0nFVupSCTAcM4ECwqapv+NQ6LS9JGALoMIIoPYDjZiEL1xHtbyO39AQUDaA7R1AH23DSeSA4hv5RG/VAhxomPYP8sw9A4TaC9iHkjUWmrtGvbyC18BLe3GP0m3WW4I5hEBEnPIStXzyuFIxb4EkMEJ79Qa/xHbKxCdM7xeCwzUZOjgEwnuzt7qLz6T3cySmQP43uzjeIiTJM6io6W19B/NLCKMVGCzkCoLR/0lrfOI2fNy/huKC1FTsK/rbGNeMRC8dHpHByfu+vAAMAL/0jvAVZQl0AAAAASUVORK5CYII=); + background-repeat:no-repeat; + background-size:contain +} + +.ipfs-less { + background-image:url(data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAGXRFWHRTb2Z0d2FyZQBBZG9iZSBJbWFnZVJlYWR5ccllPAAAAyJpVFh0WE1MOmNvbS5hZG9iZS54bXAAAAAAADw/eHBhY2tldCBiZWdpbj0i77u/IiBpZD0iVzVNME1wQ2VoaUh6cmVTek5UY3prYzlkIj8+IDx4OnhtcG1ldGEgeG1sbnM6eD0iYWRvYmU6bnM6bWV0YS8iIHg6eG1wdGs9IkFkb2JlIFhNUCBDb3JlIDUuMy1jMDExIDY2LjE0NTY2MSwgMjAxMi8wMi8wNi0xNDo1NjoyNyAgICAgICAgIj4gPHJkZjpSREYgeG1sbnM6cmRmPSJodHRwOi8vd3d3LnczLm9yZy8xOTk5LzAyLzIyLXJkZi1zeW50YXgtbnMjIj4gPHJkZjpEZXNjcmlwdGlvbiByZGY6YWJvdXQ9IiIgeG1sbnM6eG1wPSJodHRwOi8vbnMuYWRvYmUuY29tL3hhcC8xLjAvIiB4bWxuczp4bXBNTT0iaHR0cDovL25zLmFkb2JlLmNvbS94YXAvMS4wL21tLyIgeG1sbnM6c3RSZWY9Imh0dHA6Ly9ucy5hZG9iZS5jb20veGFwLzEuMC9zVHlwZS9SZXNvdXJjZVJlZiMiIHhtcDpDcmVhdG9yVG9vbD0iQWRvYmUgUGhvdG9zaG9wIENTNiAoV2luZG93cykiIHhtcE1NOkluc3RhbmNlSUQ9InhtcC5paWQ6RjZERjZENTJGMTE4MTFFMUIwOEVERjQ5MTZEMkVBREUiIHhtcE1NOkRvY3VtZW50SUQ9InhtcC5kaWQ6RjZERjZENTNGMTE4MTFFMUIwOEVERjQ5MTZEMkVBREUiPiA8eG1wTU06RGVyaXZlZEZyb20gc3RSZWY6aW5zdGFuY2VJRD0ieG1wLmlpZDpGNkRGNkQ1MEYxMTgxMUUxQjA4RURGNDkxNkQyRUFERSIgc3RSZWY6ZG9jdW1lbnRJRD0ieG1wLmRpZDpGNkRGNkQ1MUYxMTgxMUUxQjA4RURGNDkxNkQyRUFERSIvPiA8L3JkZjpEZXNjcmlwdGlvbj4gPC9yZGY6UkRGPiA8L3g6eG1wbWV0YT4gPD94cGFja2V0IGVuZD0iciI/Pl1w97IAAAJhSURBVHjahJNLbxJRFMf/wPAIMIxMkUI7tS0VYqlGDLGhjdKkqyZ24cJFN925de+XcONHaHRj4k7TND6SGo1VWwmp2kSLhlqMDbQ87gzPYcY7k4GgoJ6bmdw598zvnvM/95pUVYVma+svcovx8yMnFZHAMJPJBJfDzq5vpX6+/vD5qo/z7DOMBdo/d26t6jFMJ3iY51jBz4M+LP6wxEw40Gy23qYzB3HO7fpmpZCOmfEfa7Xb4NxOrC4lvbPToe2yKE3K1PdPwNOtHdx79ESfq4qKkijB5/XgevIyHxEC24USmewDqD2ABxubaLRkfW6zMqjWGlh7/ByyAtxYnOPnL0Q2+gGGmKRaw8zUBJaTiS5QOO1FJnuIAM8hciaIWHgi8NcSNt+loVDY8JBXh2ojJAR1HbTSNFMUpV8Dxcjg0nSYBrtBxdLbqI1iheCUh9XXNGurAwCdEkb9QyBSFam9TDfoPZ1LUg1BH28IiwEARTVAQOzcFKRaHZpLoa9avY6L1Gfs0c32t4PU6W2lWsV8LAorw0Cs1nXftYWE3qZGqwWHzYp2zzlgetuolVFvtiDLbRRKFTAWCxx2G/KlMtXFhWPqOzsWHJwBx7rxKv2R7mwFz3lw9/5DLC/M4Us2RwV0g3U58XJnF7dvrsBOoX0Abbej/DFKRMKI30fTVGC32WA2m5H9cQQvhYi0vE/7Wdgczn6ARA9QPBrBszcp/XvpyqxebzQ0Tlsq6llxLhe9bD4cFMr9XdjLHpLv+SLGBYHAYiVu1kNOpAaRTWbCejgiw0zGhFGSK1aw+zXbvfK/BBgAPwADAs5GpGsAAAAASUVORK5CYII=); + background-repeat:no-repeat; + background-size:contain +} + +.ipfs-logo { + background-image:url("data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 553 235.3'%3E%3Cdefs%3E%3C/defs%3E%3Cpath fill='%23ffffff' d='M239 63h17.8v105H239V63zm35.6 0h36.3c7.9 0 14.5.9 19.6 2.6s9.2 4.1 12.1 7.1a24.45 24.45 0 0 1 6.2 10.2 40.75 40.75 0 0 1 1.8 12.1 45.69 45.69 0 0 1-1.8 12.9 26.58 26.58 0 0 1-6.2 10.8 30.59 30.59 0 0 1-12.1 7.3c-5.1 1.8-11.5 2.7-19.3 2.7h-19.1V168h-17.5V63zm36.2 51a38.37 38.37 0 0 0 11.1-1.3 16.3 16.3 0 0 0 6.8-3.7 13.34 13.34 0 0 0 3.5-5.8 29.75 29.75 0 0 0 1-7.6 25.68 25.68 0 0 0-1-7.7 12 12 0 0 0-3.6-5.5 17.15 17.15 0 0 0-6.9-3.4 41.58 41.58 0 0 0-10.9-1.2h-18.5V114h18.5zm119.9-51v15.3h-49.2V108h46.3v15.4h-46.3V168h-17.8V63h67zm26.2 72.9c.8 6.9 3.3 11.9 7.4 15s10.4 4.7 18.6 4.7a32.61 32.61 0 0 0 10.1-1.3 20.52 20.52 0 0 0 6.6-3.5 12 12 0 0 0 3.5-5.2 19.08 19.08 0 0 0 1-6.4 16.14 16.14 0 0 0-.7-4.9 12.87 12.87 0 0 0-2.6-4.5 16.59 16.59 0 0 0-5.1-3.6 35 35 0 0 0-8.2-2.4l-13.4-2.5a89.76 89.76 0 0 1-14.1-3.7 33.51 33.51 0 0 1-10.4-5.8 22.28 22.28 0 0 1-6.3-8.8 34.1 34.1 0 0 1-2.1-12.7 26 26 0 0 1 11.3-22.4 36.35 36.35 0 0 1 12.6-5.6 65.89 65.89 0 0 1 15.8-1.8c7.2 0 13.3.8 18.2 2.5a34.46 34.46 0 0 1 11.9 6.5 28.21 28.21 0 0 1 6.9 9.3 42.1 42.1 0 0 1 3.2 11l-16.8 2.6c-1.4-5.9-3.7-10.2-7.1-13.1s-8.7-4.3-16.1-4.3a43.9 43.9 0 0 0-10.5 1.1 19.47 19.47 0 0 0-6.8 3.1 11.63 11.63 0 0 0-3.7 4.6 14.08 14.08 0 0 0-1.1 5.4c0 4.6 1.2 8 3.7 10.3s6.9 4 13.2 5.3l14.5 2.8c11.1 2.1 19.2 5.6 24.4 10.5s7.8 12.1 7.8 21.4a31.37 31.37 0 0 1-2.4 12.3 25.27 25.27 0 0 1-7.4 9.8 36.58 36.58 0 0 1-12.4 6.6 56 56 0 0 1-17.3 2.4c-13.4 0-24-2.8-31.6-8.5s-11.9-14.4-12.6-26.2h18z'/%3E%3Cpath fill='%23469ea2' d='M30.3 164l84 48.5 84-48.5V67l-84-48.5-84 48.5v97z'/%3E%3Cpath fill='%236acad1' d='M105.7 30.1l-61 35.2a18.19 18.19 0 0 1 0 3.3l60.9 35.2a14.55 14.55 0 0 1 17.3 0l60.9-35.2a18.19 18.19 0 0 1 0-3.3L123 30.1a14.55 14.55 0 0 1-17.3 0zm84 48.2l-61 35.6a14.73 14.73 0 0 1-8.6 15l.1 70a15.57 15.57 0 0 1 2.8 1.6l60.9-35.2a14.73 14.73 0 0 1 8.6-15V79.9a20 20 0 0 1-2.8-1.6zm-150.8.4a15.57 15.57 0 0 1-2.8 1.6v70.4a14.38 14.38 0 0 1 8.6 15l60.9 35.2a15.57 15.57 0 0 1 2.8-1.6v-70.4a14.38 14.38 0 0 1-8.6-15L38.9 78.7z'/%3E%3Cpath fill='%23469ea2' d='M114.3 29l75.1 43.4v86.7l-75.1 43.4-75.1-43.4V72.3L114.3 29m0-10.3l-84 48.5v97l84 48.5 84-48.5v-97l-84-48.5z'/%3E%3Cpath fill='%23469ea2' d='M114.9 132h-1.2A15.66 15.66 0 0 1 98 116.3v-1.2a15.66 15.66 0 0 1 15.7-15.7h1.2a15.66 15.66 0 0 1 15.7 15.7v1.2a15.66 15.66 0 0 1-15.7 15.7zm0 64.5h-1.2a15.65 15.65 0 0 0-13.7 8l14.3 8.2 14.3-8.2a15.65 15.65 0 0 0-13.7-8zm83.5-48.5h-.6a15.66 15.66 0 0 0-15.7 15.7v1.2a15.13 15.13 0 0 0 2 7.6l14.3-8.3V148zm-14.3-89a15.4 15.4 0 0 0-2 7.6v1.2a15.66 15.66 0 0 0 15.7 15.7h.6V67.2L184.1 59zm-69.8-40.3L100 26.9a15.73 15.73 0 0 0 13.7 8.1h1.2a15.65 15.65 0 0 0 13.7-8l-14.3-8.3zM44.6 58.9l-14.3 8.3v16.3h.6a15.66 15.66 0 0 0 15.7-15.7v-1.2a16.63 16.63 0 0 0-2-7.7zM30.9 148h-.6v16.2l14.3 8.3a15.4 15.4 0 0 0 2-7.6v-1.2A15.66 15.66 0 0 0 30.9 148z'/%3E%3Cpath fill='%23083b54' fill-opacity='0.15' d='M114.3 213.2v-97.1l-84-48.5v97.1z'/%3E%3Cpath fill='%23083b54' fill-opacity='0.05' d='M198.4 163.8v-97l-84 48.5v97.1z'/%3E%3C/svg%3E%0A"); + background-repeat:no-repeat; + background-size:contain +} + +.ipfs-mid { + background-repeat:no-repeat; + background-size:contain +} + +.ipfs-mkv { + background-image:url("data:image/svg+xml;charset=utf8,%3Csvg id='Layer_2' xmlns='http://www.w3.org/2000/svg' viewBox='0 0 72 100'%3E%3Cstyle/%3E%3ClinearGradient id='SVGID_1_' gradientUnits='userSpaceOnUse' x1='36.2' y1='101' x2='36.2' y2='3.005' gradientTransform='matrix(1 0 0 -1 0 102)'%3E%3Cstop offset='0' stop-color='%23e2cde4'/%3E%3Cstop offset='.17' stop-color='%23e0cae2'/%3E%3Cstop offset='.313' stop-color='%23dbc0dd'/%3E%3Cstop offset='.447' stop-color='%23d2b1d4'/%3E%3Cstop offset='.575' stop-color='%23c79dc7'/%3E%3Cstop offset='.698' stop-color='%23ba84b9'/%3E%3Cstop offset='.819' stop-color='%23ab68a9'/%3E%3Cstop offset='.934' stop-color='%239c4598'/%3E%3Cstop offset='1' stop-color='%23932a8e'/%3E%3C/linearGradient%3E%3Cpath d='M45.2 1l27 26.7V99H.2V1h45z' fill='url(%23SVGID_1_)'/%3E%3Cpath d='M45.2 1l27 26.7V99H.2V1h45z' fill-opacity='0' stroke='%23882383' stroke-width='2'/%3E%3Cpath d='M7.5 91.1V71.2h6.1l3.6 13.5 3.6-13.5h6.1V91h-3.8V75.4l-4 15.6h-3.9l-4-15.6V91H7.5zm23.5 0V71.2h4V80l8.2-8.8h5.4L41.1 79l8 12.1h-5.2l-5.5-9.3-3.4 3.3v6h-4zm25.2 0L49 71.3h4.4L58.5 86l4.9-14.7h4.3l-7.2 19.8h-4.3z' fill='%23fff'/%3E%3ClinearGradient id='SVGID_2_' gradientUnits='userSpaceOnUse' x1='18.2' y1='50.023' x2='18.2' y2='50.023' gradientTransform='matrix(1 0 0 -1 0 102)'%3E%3Cstop offset='.005' stop-color='%23963491'/%3E%3Cstop offset='1' stop-color='%2370136b'/%3E%3C/linearGradient%3E%3ClinearGradient id='SVGID_3_' gradientUnits='userSpaceOnUse' x1='11.511' y1='51.716' x2='65.211' y2='51.716' gradientTransform='matrix(1 0 0 -1 0 102)'%3E%3Cstop offset='.005' stop-color='%23963491'/%3E%3Cstop offset='1' stop-color='%2370136b'/%3E%3C/linearGradient%3E%3Cpath d='M64.3 55.5c-1.7-.2-3.4-.3-5.1-.3-7.3-.1-13.3 1.6-18.8 3.7S29.6 63.6 23.3 64c-3.4.2-7.3-.6-8.5-2.4-.8-1.3-.8-3.5-1-5.7-.6-5.7-1.6-11.7-2.4-17.3.8-.9 2.1-1.3 3.4-1.7.4 1.1.2 2.7.6 3.8 7.1.7 13.6-.4 20-1.5 6.3-1.1 12.4-2.2 19.4-2.6 3.4-.2 6.9-.2 10.3 0m-9.9 15.3c.5-.2 1.1-.3 1.9-.2.2-3.7.3-7.3.3-11.2-6.2.2-11.9.9-17 2.2.2 4 .4 7.8.3 12 4-1.1 7.7-2.5 12.6-2.7m2-12.1h1.1c.4-.4.2-1.2.2-1.9-1.5-.6-1.8 1-1.3 1.9zm3.9-.2h1.5V38h-1.3c0 .7-.4.9-.2 1.7zm4 0c.5-.1.8 0 1.1.2.4-.3.2-1.2.2-1.9h-1.3v1.7zm-11.5.3h.9c.4-.3.2-1.2.2-1.9-1.4-.4-1.6 1.2-1.1 1.9zm-4 .4c.7.2.8-.3 1.5-.2v-1.7c-1.5-.4-1.7.6-1.5 1.9zm-3.6-1.1c0 .6-.1 1.4.2 1.7.5.1.5-.4 1.1-.2-.2-.6.5-2-.4-1.9-.1.4-.8.1-.9.4zm-31.5.8c.4-.1 1.1.6 1.3 0-.5 0-.1-.8-.2-1.1-.7.2-1.3.3-1.1 1.1zm28.3-.4c-.3.3.2 1.1 0 1.9.6.2.6-.3 1.1-.2-.2-.6.5-2-.4-1.9-.1.3-.4.2-.7.2zm-3.5 2.8c.5-.1.9-.2 1.3-.4.2-.8-.4-.9-.2-1.7h-.9c-.3.3-.1 1.3-.2 2.1zm26.9-1.8c-2.1-.1-3.3-.2-5.5-.2-.5 3.4 0 7.8-.5 11.2 2.4 0 3.6.1 5.8.3M33.4 41.6c.5.2.1 1.2.2 1.7.5-.1 1.1-.2 1.5-.4.6-1.9-.9-2.4-1.7-1.3zm-4.7.6v1.9c.9.2 1.2-.2 1.9-.2-.1-.7.2-1.7-.2-2.1-.5.2-1.3.1-1.7.4zm-5.3.6c.3.5 0 1.6.4 2.1.7.1.8-.4 1.5-.2-.1-.7-.3-1.2-.2-2.1-.8-.2-.9.3-1.7.2zm-7.5 2H17c.2-.9-.4-1.2-.2-2.1-.4.1-1.2-.3-1.3.2.6.2-.1 1.7.4 1.9zm3.4 1c.1 4.1.9 9.3 1.4 13.7 8 .1 13.1-2.7 19.2-4.5-.5-3.9.1-8.7-.7-12.2-6.2 1.6-12.1 3.2-19.9 3zm.5-.8h1.1c.4-.5-.2-1.2 0-2.1h-1.5c.1.7.1 1.6.4 2.1zm-5.4 7.8c.2 0 .3.2.4.4-.4-.7-.7.5-.2.6.1-.2 0-.4.2-.4.3.5-.8.7-.2.8.7-.5 1.3-1.2 2.4-1.5-.1 1.5.4 2.4.4 3.8-.7.5-1.7.7-1.9 1.7 1.2.7 2.5 1.2 4.2 1.3-.7-4.9-1.1-8.8-1.6-13.7-2.2.3-4-.8-5.1-.9.9.8.6 2.5.8 3.6 0-.2 0-.4.2-.4-.1.7.1 1.7-.2 2.1.7.3.5-.2.4.9m44.6 3.2h1.1c.3-.3.2-1.1.2-1.7h-1.3v1.7zm-4-1.4v1.3c.4.4.7-.2 1.5 0v-1.5c-.6 0-1.2 0-1.5.2zm7.6 1.4h1.3v-1.5h-1.3c.1.5 0 1 0 1.5zm-11-1v1.3h1.1c.3-.3.4-1.7-.2-1.7-.1.4-.8.1-.9.4zm-3.6.4c.1.6-.3 1.7.4 1.7 0-.3.5-.2.9-.2-.2-.5.4-1.8-.4-1.7-.1.3-.6.2-.9.2zm-3.4 1v1.5c.7.2.6-.4 1.3-.2-.2-.5.4-1.8-.4-1.7-.1.3-.8.2-.9.4zM15 57c.7-.5 1.3-1.7.2-2.3-.7.4-.8 1.6-.2 2.3zm26.1-1.3c-.1.7.4.8.2 1.5.9 0 1.2-.6 1.1-1.7-.4-.5-.8.1-1.3.2zm-3 2.7c1 0 1.2-.8 1.1-1.9h-.9c-.3.4-.1 1.3-.2 1.9zm-3.6-.4v1.7c.6-.1 1.3-.2 1.5-.8-.6 0 .3-1.6-.6-1.3 0 .4-.7.1-.9.4zM16 60.8c-.4-.7-.2-2-1.3-1.9.2.7.2 2.7 1.3 1.9zm13.8-.9c.5 0 .1.9.2 1.3.8.1 1.2-.2 1.7-.4v-1.7c-.9-.1-1.6.1-1.9.8zm-4.7.6c0 .8-.1 1.7.4 1.9 0-.5.8-.1 1.1-.2.3-.3-.2-1.1 0-1.9-.7-.2-1 .1-1.5.2zM19 62.3v-1.7c-.5 0-.6-.4-1.3-.2-.1 1.1 0 2.1 1.3 1.9zm2.5.2h1.3c.2-.9-.3-1.1-.2-1.9h-1.3c-.1.9.2 1.2.2 1.9z' fill='url(%23SVGID_3_)'/%3E%3ClinearGradient id='SVGID_4_' gradientUnits='userSpaceOnUse' x1='45.269' y1='74.206' x2='58.769' y2='87.706' gradientTransform='matrix(1 0 0 -1 0 102)'%3E%3Cstop offset='0' stop-color='%23f9eff6'/%3E%3Cstop offset='.378' stop-color='%23f8edf5'/%3E%3Cstop offset='.515' stop-color='%23f3e6f1'/%3E%3Cstop offset='.612' stop-color='%23ecdbeb'/%3E%3Cstop offset='.69' stop-color='%23e3cce2'/%3E%3Cstop offset='.757' stop-color='%23d7b8d7'/%3E%3Cstop offset='.817' stop-color='%23caa1c9'/%3E%3Cstop offset='.871' stop-color='%23bc88bb'/%3E%3Cstop offset='.921' stop-color='%23ae6cab'/%3E%3Cstop offset='.965' stop-color='%239f4d9b'/%3E%3Cstop offset='1' stop-color='%23932a8e'/%3E%3C/linearGradient%3E%3Cpath d='M45.2 1l27 26.7h-27V1z' fill='url(%23SVGID_4_)'/%3E%3Cpath d='M45.2 1l27 26.7h-27V1z' fill-opacity='0' stroke='%23882383' stroke-width='2' stroke-linejoin='bevel'/%3E%3C/svg%3E"); + background-repeat:no-repeat; + background-size:contain +} + +.ipfs-mov { + background-image:url("data:image/svg+xml;charset=utf8,%3Csvg id='Layer_2' xmlns='http://www.w3.org/2000/svg' viewBox='0 0 72 100'%3E%3Cstyle/%3E%3ClinearGradient id='SVGID_1_' gradientUnits='userSpaceOnUse' x1='36.2' y1='101' x2='36.2' y2='3.005' gradientTransform='matrix(1 0 0 -1 0 102)'%3E%3Cstop offset='0' stop-color='%23e2cde4'/%3E%3Cstop offset='.17' stop-color='%23e0cae2'/%3E%3Cstop offset='.313' stop-color='%23dbc0dd'/%3E%3Cstop offset='.447' stop-color='%23d2b1d4'/%3E%3Cstop offset='.575' stop-color='%23c79dc7'/%3E%3Cstop offset='.698' stop-color='%23ba84b9'/%3E%3Cstop offset='.819' stop-color='%23ab68a9'/%3E%3Cstop offset='.934' stop-color='%239c4598'/%3E%3Cstop offset='1' stop-color='%23932a8e'/%3E%3C/linearGradient%3E%3Cpath d='M45.2 1l27 26.7V99H.2V1h45z' fill='url(%23SVGID_1_)'/%3E%3Cpath d='M45.2 1l27 26.7V99H.2V1h45z' fill-opacity='0' stroke='%23882383' stroke-width='2'/%3E%3Cpath d='M6.1 91.1V71.2h6.1l3.6 13.5 3.6-13.5h6.1V91h-3.8V75.4l-4 15.6h-3.9l-4-15.6V91H6.1zm22.6-9.8c0-2 .3-3.7.9-5.1.5-1 1.1-1.9 1.9-2.7.8-.8 1.7-1.4 2.6-1.8 1.2-.5 2.7-.8 4.3-.8 3 0 5.3.9 7.1 2.7 1.8 1.8 2.7 4.3 2.7 7.6 0 3.2-.9 5.7-2.6 7.5-1.8 1.8-4.1 2.7-7.1 2.7s-5.4-.9-7.1-2.7c-1.8-1.8-2.7-4.3-2.7-7.4zm4.1-.2c0 2.2.5 4 1.6 5.1 1 1.2 2.4 1.7 4 1.7s2.9-.6 4-1.7c1-1.2 1.6-2.9 1.6-5.2 0-2.3-.5-4-1.5-5.1-1-1.1-2.3-1.7-4-1.7s-3 .6-4 1.7c-1.1 1.2-1.7 3-1.7 5.2zm23.6 10l-7.2-19.8h4.4L58.7 86l4.9-14.7h4.3l-7.2 19.8h-4.3z' fill='%23fff'/%3E%3ClinearGradient id='SVGID_2_' gradientUnits='userSpaceOnUse' x1='18.2' y1='50.023' x2='18.2' y2='50.023' gradientTransform='matrix(1 0 0 -1 0 102)'%3E%3Cstop offset='.005' stop-color='%23963491'/%3E%3Cstop offset='1' stop-color='%2370136b'/%3E%3C/linearGradient%3E%3ClinearGradient id='SVGID_3_' gradientUnits='userSpaceOnUse' x1='11.511' y1='51.716' x2='65.211' y2='51.716' gradientTransform='matrix(1 0 0 -1 0 102)'%3E%3Cstop offset='.005' stop-color='%23963491'/%3E%3Cstop offset='1' stop-color='%2370136b'/%3E%3C/linearGradient%3E%3Cpath d='M64.3 55.5c-1.7-.2-3.4-.3-5.1-.3-7.3-.1-13.3 1.6-18.8 3.7S29.6 63.6 23.3 64c-3.4.2-7.3-.6-8.5-2.4-.8-1.3-.8-3.5-1-5.7-.6-5.7-1.6-11.7-2.4-17.3.8-.9 2.1-1.3 3.4-1.7.4 1.1.2 2.7.6 3.8 7.1.7 13.6-.4 20-1.5 6.3-1.1 12.4-2.2 19.4-2.6 3.4-.2 6.9-.2 10.3 0m-9.9 15.3c.5-.2 1.1-.3 1.9-.2.2-3.7.3-7.3.3-11.2-6.2.2-11.9.9-17 2.2.2 4 .4 7.8.3 12 4-1.1 7.7-2.5 12.6-2.7m2-12.1h1.1c.4-.4.2-1.2.2-1.9-1.5-.6-1.8 1-1.3 1.9zm3.9-.2h1.5V38h-1.3c0 .7-.4.9-.2 1.7zm4 0c.5-.1.8 0 1.1.2.4-.3.2-1.2.2-1.9h-1.3v1.7zm-11.5.3h.9c.4-.3.2-1.2.2-1.9-1.4-.4-1.6 1.2-1.1 1.9zm-4 .4c.7.2.8-.3 1.5-.2v-1.7c-1.5-.4-1.7.6-1.5 1.9zm-3.6-1.1c0 .6-.1 1.4.2 1.7.5.1.5-.4 1.1-.2-.2-.6.5-2-.4-1.9-.1.4-.8.1-.9.4zm-31.5.8c.4-.1 1.1.6 1.3 0-.5 0-.1-.8-.2-1.1-.7.2-1.3.3-1.1 1.1zm28.3-.4c-.3.3.2 1.1 0 1.9.6.2.6-.3 1.1-.2-.2-.6.5-2-.4-1.9-.1.3-.4.2-.7.2zm-3.5 2.8c.5-.1.9-.2 1.3-.4.2-.8-.4-.9-.2-1.7h-.9c-.3.3-.1 1.3-.2 2.1zm26.9-1.8c-2.1-.1-3.3-.2-5.5-.2-.5 3.4 0 7.8-.5 11.2 2.4 0 3.6.1 5.8.3M33.4 41.6c.5.2.1 1.2.2 1.7.5-.1 1.1-.2 1.5-.4.6-1.9-.9-2.4-1.7-1.3zm-4.7.6v1.9c.9.2 1.2-.2 1.9-.2-.1-.7.2-1.7-.2-2.1-.5.2-1.3.1-1.7.4zm-5.3.6c.3.5 0 1.6.4 2.1.7.1.8-.4 1.5-.2-.1-.7-.3-1.2-.2-2.1-.8-.2-.9.3-1.7.2zm-7.5 2H17c.2-.9-.4-1.2-.2-2.1-.4.1-1.2-.3-1.3.2.6.2-.1 1.7.4 1.9zm3.4 1c.1 4.1.9 9.3 1.4 13.7 8 .1 13.1-2.7 19.2-4.5-.5-3.9.1-8.7-.7-12.2-6.2 1.6-12.1 3.2-19.9 3zm.5-.8h1.1c.4-.5-.2-1.2 0-2.1h-1.5c.1.7.1 1.6.4 2.1zm-5.4 7.8c.2 0 .3.2.4.4-.4-.7-.7.5-.2.6.1-.2 0-.4.2-.4.3.5-.8.7-.2.8.7-.5 1.3-1.2 2.4-1.5-.1 1.5.4 2.4.4 3.8-.7.5-1.7.7-1.9 1.7 1.2.7 2.5 1.2 4.2 1.3-.7-4.9-1.1-8.8-1.6-13.7-2.2.3-4-.8-5.1-.9.9.8.6 2.5.8 3.6 0-.2 0-.4.2-.4-.1.7.1 1.7-.2 2.1.7.3.5-.2.4.9m44.6 3.2h1.1c.3-.3.2-1.1.2-1.7h-1.3v1.7zm-4-1.4v1.3c.4.4.7-.2 1.5 0v-1.5c-.6 0-1.2 0-1.5.2zm7.6 1.4h1.3v-1.5h-1.3c.1.5 0 1 0 1.5zm-11-1v1.3h1.1c.3-.3.4-1.7-.2-1.7-.1.4-.8.1-.9.4zm-3.6.4c.1.6-.3 1.7.4 1.7 0-.3.5-.2.9-.2-.2-.5.4-1.8-.4-1.7-.1.3-.6.2-.9.2zm-3.4 1v1.5c.7.2.6-.4 1.3-.2-.2-.5.4-1.8-.4-1.7-.1.3-.8.2-.9.4zM15 57c.7-.5 1.3-1.7.2-2.3-.7.4-.8 1.6-.2 2.3zm26.1-1.3c-.1.7.4.8.2 1.5.9 0 1.2-.6 1.1-1.7-.4-.5-.8.1-1.3.2zm-3 2.7c1 0 1.2-.8 1.1-1.9h-.9c-.3.4-.1 1.3-.2 1.9zm-3.6-.4v1.7c.6-.1 1.3-.2 1.5-.8-.6 0 .3-1.6-.6-1.3 0 .4-.7.1-.9.4zM16 60.8c-.4-.7-.2-2-1.3-1.9.2.7.2 2.7 1.3 1.9zm13.8-.9c.5 0 .1.9.2 1.3.8.1 1.2-.2 1.7-.4v-1.7c-.9-.1-1.6.1-1.9.8zm-4.7.6c0 .8-.1 1.7.4 1.9 0-.5.8-.1 1.1-.2.3-.3-.2-1.1 0-1.9-.7-.2-1 .1-1.5.2zM19 62.3v-1.7c-.5 0-.6-.4-1.3-.2-.1 1.1 0 2.1 1.3 1.9zm2.5.2h1.3c.2-.9-.3-1.1-.2-1.9h-1.3c-.1.9.2 1.2.2 1.9z' fill='url(%23SVGID_3_)'/%3E%3ClinearGradient id='SVGID_4_' gradientUnits='userSpaceOnUse' x1='45.269' y1='74.206' x2='58.769' y2='87.706' gradientTransform='matrix(1 0 0 -1 0 102)'%3E%3Cstop offset='0' stop-color='%23f9eff6'/%3E%3Cstop offset='.378' stop-color='%23f8edf5'/%3E%3Cstop offset='.515' stop-color='%23f3e6f1'/%3E%3Cstop offset='.612' stop-color='%23ecdbeb'/%3E%3Cstop offset='.69' stop-color='%23e3cce2'/%3E%3Cstop offset='.757' stop-color='%23d7b8d7'/%3E%3Cstop offset='.817' stop-color='%23caa1c9'/%3E%3Cstop offset='.871' stop-color='%23bc88bb'/%3E%3Cstop offset='.921' stop-color='%23ae6cab'/%3E%3Cstop offset='.965' stop-color='%239f4d9b'/%3E%3Cstop offset='1' stop-color='%23932a8e'/%3E%3C/linearGradient%3E%3Cpath d='M45.2 1l27 26.7h-27V1z' fill='url(%23SVGID_4_)'/%3E%3Cpath d='M45.2 1l27 26.7h-27V1z' fill-opacity='0' stroke='%23882383' stroke-width='2' stroke-linejoin='bevel'/%3E%3C/svg%3E"); + background-repeat:no-repeat; + background-size:contain +} + +.ipfs-mp3 { + background-image:url(data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAGXRFWHRTb2Z0d2FyZQBBZG9iZSBJbWFnZVJlYWR5ccllPAAAAnxJREFUeNp0U89PE0EU/ra7XWxpSsFYIbVQf9REFBHkYBRIPJh4wrN3DsZ4MPGP8b/wUCIHEw5EY0w04o9ILcREGmwVgaXbbXdnd2bXNxPahGyczebtzrz3ve99740WRRHkWn5cebu4cH6SMY7e0jRAHr9c3WxsVvcemmbys9yT6+uHJ8oaPefypdPDD5Ymh5w26wMkEho8JtDtuEOZFCrvN/4uJZNGH0T59D58X/C27aFNAL3Xthmsww5GCyN4+uzu+OLtQsUPxPQx6ZMAoQjBAw7O+bEVCMMQgqygs+LFs1h+dGd8bna0QmXO9OL6JYgwAvOFZKKoy3V44CgNfv7Yx8oLH+lUEgvzF8Ydhz+n41snAGRG5gUEwClzhHdvttFxfNyYK0EnJozKK5eGcf1qHo1GOxtjwI+pfvm4g/W1qtJgerYE2SXJSIL9+W0jk0mCShAxDXgQKgbNXxZq35vQKCiKQkSUXdc1+gcch1FHGPmKuIgBCdc66qJQHMG9+1NIpUylxxHtuW6gEiTIu+N4yjdWgty0yTmdNjFzcwKjY0MU7MLt+IjoSad16FoIx3b/A0DZ7FYXnsdpAjUMDOjI5zPgfoBsRodhhGhZHfBBU/nGAGRtxWIOg5lT2NtrI5dL0SB5KJzLodloqXaOEatPGztKq5gG3S5DNjuAK5NjKJfPYKI0okBkSdemCiSgS/rkQNLSePtxBj4LSCwfFtE0krqqX7ZVMnu9XlMXy2l7ME0dzA3iANQyY6vWxC61UY41zTyNcYh6/QCNXQvzi5dR39nHVq1BUyuMGAARsF6tbbe4iKD1r7Om5iFBdmW1SsDflLiuB6sX90+AAQDHAW7dW0YnzgAAAABJRU5ErkJggg==); + background-repeat:no-repeat; + background-size:contain +} + +.ipfs-mp4 { + background-image:url(data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAGXRFWHRTb2Z0d2FyZQBBZG9iZSBJbWFnZVJlYWR5ccllPAAAAnBJREFUeNpsk99r01AUx79psrTrujVtbceabnZs4DYRHSoMh6Dgq77rn+AfoA/+If4Bok+C0CfxVRDBh+I2NqZzrpS1DVvbtU3SJPcm8SSlsJlecsn9dT73nO85V/B9H0H78OLdt/LDlQ1uMYybIAgI9n99OWxoe83nkiz9hDDae330JvxL48O51Xxm/enNtKPbVwAh0Ec6kYpXat9Pnl2GBC02HrjM5Y7h4P8+7FtIFVJ49OrxUnl7ucIdfhv+BIDv+fBcj7p/tXMPrs2RXVTw4OX2UnFTrXCbbY7tpMsA13FDSDAOQ4gJEGUJLs0PPh9CkESsPrmxxEz2lra3rnpAt3G6adgdQhBpmeLkFodNmsjpOPoXBrQTDcmFFNS7i3MRDzzPCw/vva8ikU+COQxm14BBhvJcHLGpGPTOAJxxeLbrRgAkYujBdH4G5oWJWXUW19YL4XqunAMFhnq1BqWYgaY1MAHASQOiU96zKzkU76mwehaOvx6h9uMv7KFN3RopL4oTAI4HRh4wSl399xla+00YbR3yrIzM9SzSqgJJnoKcklGrH08CcJjnBtLLCsSEGGpSWJvHtDKNoFippsJ0ulIsDDUCCATMlBQkNuahEyiZTcLsmFBKaQxaOk53TlHeKkM70AjAooCghBOk9sKtIvqtPqS4FBaRnJSRX8tj2DOh3lFB5Qw2ZNFK5LRo6w4sKt2ggAzywidAMN/9uIPSZglBLDO5FF3mRD3wHE9qVRvoHrUpfn+UEQK0/7ShtwboHJ6jdH8RZxSC57hSVETb7e5/2u0FxqPHJow+8iZ4lYY2QGu3idhIxO7Y7p8AAwALCGZKEPBGCgAAAABJRU5ErkJggg==); + background-repeat:no-repeat; + background-size:contain +} + +.ipfs-mpg { + background-image:url(data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAGXRFWHRTb2Z0d2FyZQBBZG9iZSBJbWFnZVJlYWR5ccllPAAAAnxJREFUeNpsU0tPE1EU/ubRdlqmnUBboa0UeUQDiUGCC1+JmrhxoXt/gBvXJi74If4AV0Y3sNKF0YUaICqoIfjgVShEiGF4tDOdO/fOeOaSKtie5GZu7pzz3e/c7ztKGIaI4vn9p+/P3h4e4a6Pv6EoQBDiy7P5rc1P1Xt6XP8M5ejXo6UJ+dWbuemeTGdpvNdiNe9YvQLe4Bi4PmTpRmyq8m71rp74BxKF2twIHvAo+f/l1T2Yp0zceHizfOZa/xRnfBRhG4CQqAYioBWeXDyA8Di6ei1ceXC1XBwrTXHPH2vW6ccBBBMI6BsSUEQzakGL6xB0tvjyBxRNxdCtc2Xf8R9TyaWTDOg2TjfVdw6hqIoE9B2GxkEDWlLH7s4ette2kSp0oDRezrQwCIIA3oGHr0/mKMmE53qo23W4+w5S+Q5ohob9X3tgHgO8ULQACC7gMx9mKQP30EW6mEHpYi8xcJEdzMucjfkKcrTfmqmiFYBxCF/Id+gayKJwoQjHdrA5v4HK7Cq44KjZNWpagaqp7QACks0H9znW365ia24DzoEDozOJbH8eVtGShXHTwNracnsG7q6LzsEuaAlNPm9h7DSSVjLyCMkppDI+GS2StQWA1RlKo0X56n2X+6QHkmkDakxF9WMVqWyK+s/BrthYfvWz1Ug+zUDcjMPMm0h3pxEjFma3CbIuCud7oMc0LL1ZgmElpGJtW3B+15HIGNITrMYIlOH7i0U41NrInREylYbu4R5qQbQBaAh95fVKZCnpQCnb9DrWZyrRERS6NDeUw+yHaXh7rt4C4B8y+9vkwn7kwKNRpDoa9aiFKBYnF+RcREqQ2e1m3R8BBgAy9kz9ysCE6QAAAABJRU5ErkJggg==); + background-repeat:no-repeat; + background-size:contain +} + +.ipfs-odf { + background-image:url(data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAGXRFWHRTb2Z0d2FyZQBBZG9iZSBJbWFnZVJlYWR5ccllPAAAAi5JREFUeNp0UktrU0EU/mbu3FfE1KRRUpWYheALNBURUVy7cy9UkO6KW/+Lbt0IPsFui4gLBbUqFaUuXETUKCYa0jS5yZ2ZO557b5MmTXpgmDPnfOc7jznMGINYPi0de5UvmpORxpjE/kbNqW005DVu8TWw1H758ZfkFgNgJmtyxSPRjJIj0QTW/RDiYGXGb7Dl32/eXrVsd0gSCx9miqC0ooCdp69g5Q/h6OLN0ty5ynIkwzMwUwh2FwMdcbDiCZQXlkqFCpEoPT/wih1YjLInANcD+/Ua9bu3wJlGvrBZCmet2+S6ME5g4oGlZ9A/I70XCDhhDexPNTFmswJBwcnuXkF86VSNZxVu0ukLSGnBcqlnN4HoCQIaIuIv7LUooMOgQ7q75LAAb59B9gCBHSKgqemRr94mMKmD24CfM8nb7THYGQNLpAkUkcb66JyGBFFEWRVL57gFEH5qj8Lxwca2qS3EZaugmzAw24dR/XQgwtsCSBjPIdWbUoE2UJLBnV8Ac/ciWHsK9/glWLnD6K2vgPszsOdOQdfeQ1c/ThKoTgDn9A3KUED/52d45xchZsvorD6Bf/Z60riV3Q9Z/0bbGU1uopYGkfERSQ3VbsMwl0qlqoIARmSoPYXWy0dor79LfBMEEd8jGs/uQ3Yl7PJFNFbuEXiV2riCf88fovXhBbo/vqP3t02/ZYmJFqTkzY160Go9uEMbFK8hR/NrdXtFuUVmnmySVGgO4v4LMAAjRgmO+SJJiQAAAABJRU5ErkJggg==); + background-repeat:no-repeat; + background-size:contain +} + +.ipfs-ods { + background-image:url(data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAGXRFWHRTb2Z0d2FyZQBBZG9iZSBJbWFnZVJlYWR5ccllPAAAAetJREFUeNqMUj1IHEEU/i7u7Z23e8tGgneGQPw3hZDkkhQiSuwMQREba4uUgpVlCrvEQhurkCoWqcQQ0oTAaYKNqJygGEwgHCSB6Knn7eXcdX/GmdHVPWYFP3gw78173/vmvYkQQsAwNvckq96UnyIEh7/d4t7uUd/8y+85P+bXSX4grkhI6nJYPW7LrXpBK2YxiSoShhu4Buq1NPofDeqdrZ3Z4cl7D4J3UtA5VyVAlmJoru9Af2ZAp1lcCQ3nqgiuKmbY3l/BH+MnHM9GVLP0Ww3KNA33CQoQQnL834Fj74PUGkANEIkCSSsa8gQqgYTIcB0PVsXB318GInRiCVWCkpRFAs+j5gKlA4t29Ggh4d0t04FKt9PQqF4UFgumSEA8ApeaElilWbYRVy/lsns/N1QBkxtENF4jxPxcgcB1CZVOrvMteK5IQDtJJIGh++PcX9iYwWjXK37+vP0WdYk0Ht99jtX8JywWFkQChw4tc+cZcvlF7rMze+ubbxN40fMalRMDP/6twaiUeK7wlZ0TD0a5hLTWxo2d45KKprqHKJslTsy209s2wnMFBTYNZjc/oLt9gPvLOx+hxVJIKS2YW5pCbSyJTGMK775O8VyBwDJd2LTDl/X5i8v3S7NVw9vJb51tITDEUwEGANCx2/rXEEFFAAAAAElFTkSuQmCC); + background-repeat:no-repeat; + background-size:contain +} + +.ipfs-odt { + background-image:url(data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAGXRFWHRTb2Z0d2FyZQBBZG9iZSBJbWFnZVJlYWR5ccllPAAAAepJREFUeNqMkz1II1EQx/+7Ca6JkqyYiJ8cKEpAQbBQFDm0sVOsFBS9wt5KOTgEG5twxVlZ+XEnKNiIghYKxx5nwEpIIXaiSAgKGmMi0d23u8+3T7OaZJEMLG9mmPnN/w1vBUopLPNNhRWXHOyDg0nx82TiJtZPlPVoNpftc2cTotcHtxx06kdXpSQ/BvzKESZzIDmAz6y+NojOjpDMZiqRPIgNoFyWM8DrKUV7axO+gcp4g7AzmquAdVNqOgL2z2I4id1B0wgeygOyt/rLL5buLwAIDgA9dY+L+DkuDQOCrkMgBsRglcMOqAGwIstMg8AkGsuZMNUMRMkLqE+QGloglvlA7uIOAKvZajR0qJkUj/XHe0BTIclVKKlrfKsj9qA8gA6wqSJzPaXlr7ky//tdLEUfawsBjExUFGVWbT7AxSa42H2LMfODmvd3wKb7RAMLYwM8nts8xJ/pEe7/3PmP2eGv3D+9usb35W0bINoA7RmjXSHsH0f5Z/mUSZ0Ir2JmsBtD80s8/rGyzWsLFTD5yUQCbfUBHl9d38LvkdDTXIuHVBo0k+bbt06qO+yAPGXwe/cA4wO9PN44jKDG70GougIzi2tQ00ms7/3lpwnBBgjZ37Kkd1Shht5XzBIFl/ufFtniT/lFgAEAU//g6kvdGBMAAAAASUVORK5CYII=); + background-repeat:no-repeat; + background-size:contain +} + +.ipfs-otp { + background-image:url(data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAGXRFWHRTb2Z0d2FyZQBBZG9iZSBJbWFnZVJlYWR5ccllPAAAAcJJREFUeNqMkssvA1EUxr+ZjkdbrfFKVD12ErYSRELY2fkH+BMsLcQaSwsrSzZi47EjJEQkEhYkFlhYSVtFpdqOqpk717l3jKZmiC+5mZlzv/s795wzCuccQncz3YeRBj4KHz0/RrOZe2NsZPP20o255zQ3EAxzEAC+6uzTw13G4TFQAakA/CWtIYbY0KBOrx7IvwDQqlHV1o3YxKTOvyAUvfQCfqmA3e4ikyS/zRAKvOot7eoSHEgZIHrCfQAfBqBaKQQDKScQAExd8emBANg+2U2CvNMkkgSqBmrCxFB8mujeoJBWwEqARcssKTAJEGrmaGrjqK1zvNknH4BtyxKl2VUpRxmj5W+x73q9AEaZrR/ND1EJluIpS3i9JQiA+a+hSq8HwJjTsLrRaWitPTCOlhEZn5N75sM1qigmlN+dB3u++Qao5W4TtbEXXIsiszGL4PA00itTsu6XnQWo0TjMTAJqfMDx/ryBJcaVzSNSH4fW0Q+rkIf5rsjRiid7yyN7uoXS3Zn0egE0NiORAN9bQ017D1Lri7CLlP2EDr3Rf7C/itzV2bfXA/igLDaRixfngFhSCooH2xVPCWBlwKcAAwBX1suA6te+hAAAAABJRU5ErkJggg==); + background-repeat:no-repeat; + background-size:contain +} + +.ipfs-ots { + background-image:url(data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAGXRFWHRTb2Z0d2FyZQBBZG9iZSBJbWFnZVJlYWR5ccllPAAAAfZJREFUeNqMUk1rE1EUPS8zmabJdDKB2glEwY9ExJYiBUEQpV25qgtBXfgbpEtXuujKf+AfEKRddOdOGHClbYVCvyKWaijT2mhjphk7Sd7Me76ZONp0EsiBYWbOvfe88+69hHOOAE9f3zTVnDKNHvhlsfqPw/rM0ovyWsRFdXJEpDIyRnSlVz0KSkmvabaJeXSJBEhgAJzTDNybmtUnS5Pmg/lrN07H5NM/f13FoMgpXDSuhiIiK3Qi6LUugX7FAbaPPsJqfIHHKCStqRsXVFPQuZgD9BBxjikSiRq41AAkgCQBzVf0+BWEBX7GBm0xgHHUqk1UbBuEcIydzyCZlOI9YEGuDxwduCCitS3Xh3viCZ4jrcq4PJ6DLHd67tjtuAAXib54dCPVEfQ5XIcik/0/2iDeOYz3ceCxrisMi904y0XiMQFfkB7lg6xFHwFxEqUMV0anUNBLWKm8xd3i4zBWOzmASx0UsiW831mA59Xjm+h7HCOygduXHqJatzA7Poey9QnXjTuoVD/j/sRcmDOWLgqnLC5A2wwST+Pn8T629lahSCo291bwu9XA7vcy3m2+gTaUR14thrk9BXasbdiOjSe3nmPpwys0xSi/HpbDd3bIQC6dx/q3ZbRb/j8BEi3Po5cTJpHI9CBNDEa++GyDBN9/BBgAwfDlCVUQaNAAAAAASUVORK5CYII=); + background-repeat:no-repeat; + background-size:contain +} + +.ipfs-ott { + background-image:url(data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAGXRFWHRTb2Z0d2FyZQBBZG9iZSBJbWFnZVJlYWR5ccllPAAAAdFJREFUeNqMU89r02AYfpJ0iVm7EqhVOxw7dDBEdpiCE1RoEZRddvUgbIex/Rs7eehppyF4LOzQu4MxwYp0HgShIuwwUVSCVtl0s13afl+SzzcpyZYmyF74eN583/s+PO+PSEIIeJZdrtQVI19Cgmk/Ph39bpllXq82g7sgLxVcyKNZpIx8Uj5u5zSjc9Gov8ZihCRC8D+7On4JczevGeTGSEIC4ctKJtB1DTPXi1iCCEkIm1EFlC2Em0iwtWfinXkIzjiO0jljtDC5TtflGIGUQMB+mfja/oPv2Rx9MMjpMdJxOXyXTwkcwIkewfqQ1QtQNB385zcI14FrtQexsSb6SRysZ4Fbf+F6eHwATc9gJGNAm5iCTL5n/LCVRGADNoeaGoHqyaXj5gqQlTODovcwNk5Aj6wXqV8eCo7EDhMonEHpW+dZC7gUG98D3geo7vkb01h9cAvPdt76OGy1xntUd3bjUxAk3+l2sHJ/FgtrT0MUJNfDSm0bjQ/72Hzxxo+NK+h3B7XRNO4UrwymQtMIkdTBU0m+sBOayLsn8Ka78mQDjx/e87HXPkb1+UsfP37+AmZ1fP/suknBb6nefVQXjl06TxMlJfWKNWr+Kv8TYAAkUueexJF47QAAAABJRU5ErkJggg==); + background-repeat:no-repeat; + background-size:contain +} + +.ipfs-pdf { + background-image:url(data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAGXRFWHRTb2Z0d2FyZQBBZG9iZSBJbWFnZVJlYWR5ccllPAAAAmhJREFUeNp0U0trU0EYPTP35qYxaW6TlDapNKWGbgo2FkF8rARB6rboXusf0F/hyq2U4krFqugqSBeuAyL4SERBstHa0iR9JKZJ7mvu+M0tqZGkH3x8987jzDnnm2FSSqh4ns0VU1ybFzj674Wa3uWiWbfsFQb+jrGj8Xvbm0HlvYVRxhJprpmTlGmum+OMm5uNPZNbtjk3l82ey8++8oW4Jv/H/wdA456g2kvH99FyHNiuAz2dwflbN8YW8zMK5Go/CMfQkAhpGsyQgRCtlpE4jIULyC9fHzu7MPPEl/5ib6WOE0JJNRiHHg6j86mMjw/2gG4bkbY4PW4Yj2j64skA5FTHdaEMPiAJszt1sK0d4suJmY4k0+IDDGRfqmh0u5gejQc+fG8eYCIahRQCEfgQnIuhEkgtONE+dGxYxEDj1DhiEycZ+1YXdUpHCqTMJIYyEES5aXXQsi2kYlGEia5GtHVKn+amPBeCutPgfLALPuVu+xDVPw2EQyFEjHDghbpYNm1yKVVnYjTOerepn4E6XQmLGSPkPkOXWATMSDcjQEkAaqOu6+i/rccALtFL53LI3r0Nq1ZD4/MXZJaWYFer+PXiJc6s3IEgY3+uPYZHTAcAHM+DTE8gnM1CSyaCulv+GrRy8uYyElcu4XfhLVpkpNtn/DGA5Uu0abFH36WnzzCayWAkmYJvWeCkfb9SwY+NDbSoOx4bYqJF8rZqVRRXV/HhzWtUSmWwmWl0RmN4v76OUqGASrmMOkntSHF8MOs954dT08W248wzYsJDOujRBAaqqikTpRo/qqd0/dv97c3Lat9fAQYA4z8bX9nTsb8AAAAASUVORK5CYII=); + background-repeat:no-repeat; + background-size:contain +} + +.ipfs-php { + background-image:url(data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAGXRFWHRTb2Z0d2FyZQBBZG9iZSBJbWFnZVJlYWR5ccllPAAAAhNJREFUeNqMkltrE0EUx//ZbDaXNrvZzdIkbYOXGgxYQlCK2IIY6EufxGdB8Av44AdR8AP44JOPBR+Ego0PClUKTTXQSmkTYtOkmubSJrQ1e3H2yJSEJNIDs3PmP+f89pyZcdm2DcdWvn7LzkxFHmCIra7nm9ulg8yLZ09yXON55Dgjt1PM2iPs0+aW/frdh8bzV2/SvQBnCLiEqcFxLKSSodlrU9leiGPihWePBkgeEZO6ShC2dCAZNuf6ADb+ldQ5PUPx4BCFcgXfdwq4Ph1Dtd5CZi4Nw7SQiMdCXkl6yVIy/QBWgcU+yx/XsLK2cdHndqlK/lZxH/OpJO7fnsWY3z/YAq+g0TmHpoUH2vB5PXi8RD9Fo10aAmDJTgWyIuOupmK38rsPcOvqJO33XWEvwLJsmKxHRVEwf/MKWl/yUMf8mIloWN8rw+sP0D6PHQmYuzGNgCRiMZVA17IQV4OIaTI8buH/AJMFd02Tkp05PO4jnWvc57EDAINt7u1X8Pb9KgI+Lxbv3cFR8xjx6AQ+b+Txs/qL9KePlih2CMBCq92hg2qzt1AoV7H5YxdhdqhHzRbgcpFeqdUplpvQW4FhmAixZ/sws4BoWCM/qmsE5XqE3dDQCrqGAYWdejqZgK6GUD8+IV9VghBFN1RZJv3sT5diBwC15gncggCPJKF0WCPN8dun55jQdVpz3Ynl9leAAQAJhiGatD9AOgAAAABJRU5ErkJggg==); + background-repeat:no-repeat; + background-size:contain +} + +.ipfs-png { + background-image:url(data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAGXRFWHRTb2Z0d2FyZQBBZG9iZSBJbWFnZVJlYWR5ccllPAAAAmtJREFUeNpsU9tOE1EUXXPp0CAUWmJbC04xBANNTF+kKhG8fID6aqL/gPEj9E0lIf6Dj30HL03wxQtVIC0QKrWxNG1Dk9Z2Oj1zxn1m0oIZTnIyZ8/ee+211z5Hsm0bYg29fLGpxWIJWBYGS5IA8ncKhT9Wvf4Yqprtu+w3q85X7f9QxseD/pmZMZsxN9fnc5JNw0ACGGv6tPSvyvEDKEoWZ5Y8OHHObKpucw4B0t3agnl4CJPs2YkQVu4s61ORaBqMJc8CDBiIRhhVM9bXYdVqYAcH8M3NgS0tQQsFcfdKHEbvlr6WyaR/V6uPKPy7B4DT7lUq4MUipMlJ2MPDUKtVfKZ2nn/5BoNbkONxXeb8LYXe/A9AJLNWCxgdhZJagDI9DZg9qIkEytRSkdqTSFQtGILSbgc8LViM+tc0yPfukzIyOJ359k9YR0eQdB2KmBbpwXoM3Dod1SkD+scpEapCI5DdpsJhIJcjajQZagcjI+5oLe4VkeQnyiZgdIH2X6BJ7dSqQLfrggjw0AQwP+/GegCIHppNoFAgEMO1RZKo7BQgRi3yN05cnwdA0BQMAgF3C6pnbuNg92M9AFT1diSCh6kb+FGvo2MxnBB9ocZxp4Mns1cde213B81e7xwAcl4jkaa0IUSjUdLJwkL0Ej6VSvArCt7l81iku6GrKnYEU89VJlSJRmR0Dax+fI9suYxSo4HlWIw6M3FBlnD9YhiXabyOsOeIqG7TzDeIYo6EDGp+ZPb2kKKqH8h+mkxiI5/D1/19J3bwYPvPWXq2skkiJVxesqt0XzghpKM8nRVV2Lv2q9eLIvSfAAMAaacnllcFBmYAAAAASUVORK5CYII=); + background-repeat:no-repeat; + background-size:contain +} + +.ipfs-ppt { + background-image:url(data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAGXRFWHRTb2Z0d2FyZQBBZG9iZSBJbWFnZVJlYWR5ccllPAAAAkhJREFUeNpsU11rE0EUPTM7ySZpmzT9DNamWAtFfSiCigr+AxF9zKtv/hvf/Aki+FEi6ov4ItWHPGiwiBUKoUqqTUJImmR3M7Mz3t0kNe1m4LIwc+65595zlxljEJzdR5uf5nLmsvZx6gSvtd9W9bjhF7jg5dH9nRc/wq8YXaTSJptb0xklx7IZoKUEz1zJ2DUU69/37vFYrDxegJ9U0lC+AoIIVGg9CL+vIObP48KDQn7x0sWiVnJrnEDg7KGk+i/Ac4iUM/R7BsmrSSxtXMfa3X7el8+Kjf3KfUJ+iRJQw4w0Tc8BRyWGRAZY3rBR/VlC+XED2ayDhZyXl03+hNA3TxNQshlGLAnE44zCIL1goXZwiMNvB1i6zbC0KuAsxNITWwgNMYPeLVJiFEO9ArjHAivrAjNzBr4f4vwIgdGD4YUACsZCE8AtYGWT5jCsGQw5wEYJzP/pj5RwYTA1b07eQmfZ8P0sgdaM2FlYwWkMgMpl6NQAO33GKM0wsQWflkh1uqGVmVWblsiDkQyqxwfag35SqcktaEWTUTHYNx4iGU/C29+BvX4Lpu/C7zYgFjegSY63WySsHyXwpYHU00ieu0bAOuJbBTArBkiXKiaAmTzcvRJUV9E8rOgqBwqlY8ASs/AadbRLb8CzeTjVClqft6FdB17tL7yeCbFRBYoLr6vR/PiSEl5BZJaBD0/R2nkOZqfQ2fsKt+0SEQ+GLSIEUvJm+6jbah2+pS2aon+4g/afd4SYJVuA7vvXdC/IHQtSoTnK+yfAAIEaId1m+vudAAAAAElFTkSuQmCC); + background-repeat:no-repeat; + background-size:contain +} + +.ipfs-psd { + background-image:url(data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAGXRFWHRTb2Z0d2FyZQBBZG9iZSBJbWFnZVJlYWR5ccllPAAAAqxJREFUeNpsU01ME0EYfbtdKKWGtoItRWgJHApCBE2I0YuoiSaaeDJeOJh41YN3TfTixcRwMfEk8eDJGA+Eg0YTTRRMg02KKFooCBbTlkJLS7f7P+u3K9Xo8iWT3Zn55s173/uGM00TVlwZfzJztD92iKO5ouvQGQPHcQDN380vlDPr65fdLj4Oa41i9sFt+ytgN7o7woGOrqgvvpLBaF8vWj1NUAwGTVNRM3mf5vU/zaU+XySQuTqIFXz9hxmGLkoS7r+YxvVnrzGzlgXPDOzUZPT4m3Dt/KlIuH9oUjXYEHZZ/wOgGQZi4TZcGI5hLb+FO++TSOSKcLtcMA0dI0EPrp4+HtnfG5skiUecDGwQE2MjAwiGWlFVNDz+tIyCokJhPKYSX7Gdz2I01hOJdnY9rJ/7UwPGTEiqjtbmJtw4MYx78S/4Wa3h5UoOYwPdIOp2Xi/t18rlFgcDw6o+ydiWVRwOBnCpL0oOAMmNEhLZIgSeoxwGSWcERon/M9DoBknTIdNQNAMnO4PIVGpIFXcwndlA2OtGc4MAxml27p4AIulWSIa9QVadiYSoJxhqBJivKgh5ad3k9gaw6JdlDaqq7q5wINY4F22HaLHSDZQkBW72O9cBYFEviBIURQH7a7MN0uDisUW12ZZcaGlmdq4DwCqeTo1zNtZuW7hUqGIw7MNqSUS2ImNsKEpSdEwt5lGhfQdAkQBEoub3NNrDJfAIeBuRrcrY5xGQ2RFJAjl00I8PCckJUCB9q1URBnk38XEJEuk41tmGwZAf66s1VOh2keqwoUnYpFxHH4iKIixkN3HzVQKP3iQR/5GDKMuYmE3h+fx3MHqh1sMafztHLuiCg0FAk0uFdLqcpGY5QEXbTC/j7mIaVjc18DxufUtBJ/vcggs+3ijVz/0SYABsJHPUtu/OYwAAAABJRU5ErkJggg==); + background-repeat:no-repeat; + background-size:contain +} + +.ipfs-py { + background-image:url(data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAGXRFWHRTb2Z0d2FyZQBBZG9iZSBJbWFnZVJlYWR5ccllPAAAAlVJREFUeNpsUktvEmEUPTPzTUFmgJK2UqXQFG3pA6OBLrQxamJcaYwuu3Dp0l9iXLvVtRuDpgt3JIYaTVSaxtRHsJq2xEJBHgXmifebMhECXzKZme+ee+65516h2+2Cn2cb2VwyHl12//vP2/zOQaF4uD7GWN69e/LogfNm7kUsPBFaXYwHMeK0OlpQEJApHJTuykzK98dE98O0bLM/UNgr4v32Dj1fwSQRt9dSsfmZcMa0rIv9ODaqYrPVxuPnL1Cu1aEbJu7fvIZUIo4bqeVYRzcyv/8c3SPYpwECt/dmu4ON3Ed4TymI+hQc1ZqoE+F+uQLDsnHlwkKMscJTgl4eJOi9fxZLePNhGx6ZQRRFqH4VjZaGSv0Y6cQcJLpra0ZguIWegqDiw7lYBBZV6xiGk9DQDLzK5bEyF4Hi9VLMsoYI7J6Es5PjeHjnOl5ubqHaaJGBEkzbxplQAKIgDmBHekDTgI+qKKqKLvNApgmEgyquLs1CoFn2Y4cIeLJpkjoCLkWnUSIF3JxISIUsCjAoxhWNJLBIJs3YeXj/08oYZkOKY65HllE/bkMmY504YUd40HUq2JSSyW6iVPmLiXE/ZMYQCU+hXK3h1toqdNN0sEObyKtqtDQ6kXDwcadDS2TBryp4nX2HxXjsJK6bDnZIAZem6Tp5YMMmicn5OC4lztNWtvB9cg+hQABtWjKL2jH/T3GgBcYDXEE6mcDM6SlaJAGMWkivLBC54ZgniZaDHSI4rNSqn7/t1vgkGJPwZXffSeCjk2iUWz9+nSTQN8e6ef8EGAClUi/qoiOc3wAAAABJRU5ErkJggg==); + background-repeat:no-repeat; + background-size:contain +} + +.ipfs-qt { + background-image:url(data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAGXRFWHRTb2Z0d2FyZQBBZG9iZSBJbWFnZVJlYWR5ccllPAAAAnVJREFUeNpsU8tu00AUPU5sp41NkzRxpfSZqi0VIIQqEEJUZYXECvbwCWxYsuBD+ABUFrDrCnWBQEJdIWigBSr6pqRJ1ebhxrE9M7aZmSrQ4o505fHMnXPPPWdGiaIIYrx89GKpNDdxmXkU3aEoCsT+z8W1Sm21+jCpJctQTvaerj+TX7WbnJ+0cpfuX8mQtn8GgJ4AZtIFY2Hz3foDVRcgyt+cRHcS0IARh+D/8G0PpmVi7smd0dLs+AIjwTVEiANEYYQwCHlEZyJgIQKfoX84g9uPZ0cHZ4YWmE9nuufU0wABCSSImMsWEgqSuoqA/39/swZFTWLy7vQo7dDnfPvWWQa8GuOV3IYLJXmyzDzG2/ChZ3pwbHdQ267BKJoYuj7SF2MQhiF8LuDK/Gf0DKTBKINz1IbTbEMzU1ANDW7LAfEIQKIgBsBFlAx6LYOz6MAcvoDCtAVGGPKlAiIu/F55F33FDA6W93EOAOMaMOl7biKPwRtD8Foetj5sYPfTDtxjl1f3Ubo5jkQieQ4ACSUD2iE4XDpAdbUiW9D7UsiN9WNkZgxajwbd0LGzt3keAJPUc1N5SVeENT0Ao2BKV6QzwlZeRBSKAYhe3aYHcZWn7l1EfjyPypcK9LQGa8qCvW9j9+MvaasQOHaRhGWdhsNLR8hwodYWf6B4tYjDjSOovRqq32rSYq/lytw4A77o1V2ERiAtzY5kkUrrsH+3QF2KY87ArTtQuQ6nAf4x6FCV1D001+vYersBM2vA4y1Rm2D7/Rac/TZIw4d/6MrcGAPf9htN0miJh7Lyuoyvr8rQeP9iVJcrSKgJ+TrFcyYebXTP/RFgAFQobmIOBxbsAAAAAElFTkSuQmCC); + background-repeat:no-repeat; + background-size:contain +} + +.ipfs-rar { + background-image:url(data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAGXRFWHRTb2Z0d2FyZQBBZG9iZSBJbWFnZVJlYWR5ccllPAAAAnpJREFUeNpsUktPE1EU/u68OgylZXi0hZACQU1LEKKCMcat7jTRnQsXxsQtv4E/4M74P1iriUaNCw1FgxpjCJQKKAU60+m8mJnrmSll4XCTc8+959zz3e88GOcc8aq9evChOHl/lvMoubvWX/z4+BwTlbvw7bXdg8b7h6LE1gGW+O88CRMt4XTlR6/rYxce5Xv3jlHH19fPkBu+gWy5mlcFb3Wn/umeKOEMJF5C7xCFbtA9dRXjFoYKGiTRAlPGUV1aKU9O3VwNQ74A8DQAIZxqAuAhBPIMFYpQVAVB4CPSZjEzv1weH5tbDQN+JQ2Abu488mnzIbAAA3o/VK2PwDJo7r5Fy7ZRuvi4PFS6+qIXdVYD8Jg6BUcuOD8BozSLlRWyicgVKkTMQWwUlFF0Ooe5FIPk57BD7G0SiywyjD8bCDyHsOkeeeR3SUxEkROmU6BfQYFJMHfhWXV8efkUrb13VPMTsrcTQSzxZ/+n0GVA6EGbSGdgG9vo15fg2nFgbO8k70SRdd+mahDT81vUxTZRlJBRMsjq89C0EXCvSf7TIBZ136YZUJEiE7LgJ2dN01BZuE0dkIhxE7KcQTK1QUj+cwAEyrPZ+IydzRoyah+mLy2isbWBweESJEnB9q+1RM9Ub9GQOWkABg8HjRr2d9Yh0hTlBlRsfn+D4vg0BvUC9rZqECUJuk7Tzr1zahCYlB6HJAREPwfbbMBzLBzsbUKVI0qBgQkc+SxgWUYaIAqOpKwKXJ6bgGlaaDV/YvHaFNrtDsKTfVSrJeqIg/bRNwjclFIALeP3saybhu8SC4VBHwnhBXXIKocYRXD9QzBi4Xgchmkd9+L+CTAAMqwy+ZzluBgAAAAASUVORK5CYII=); + background-repeat:no-repeat; + background-size:contain +} + +.ipfs-rb { + background-image:url(data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAGXRFWHRTb2Z0d2FyZQBBZG9iZSBJbWFnZVJlYWR5ccllPAAAAixJREFUeNqEUktvElEU/mag5f2yJhXLwxIt0kiqsVEXujP+A925cu1Pce3WtXVtYuJCF7KtTY0NrVQIpRVKeXTkMcO9F8+9ZVooJJ5kcmbmfOe733fO1YbDIWS8+/g1dycVX7W/xyO3vdsuVKqvnE7HZ230783rlyo7bVBicSGyfjsVwozomVbIPe/c+FmsPHfoRKJd1HT7hXHBZjVbA4aA14NnD9bC2VR8gwuxPi5Sx39Cp+M0XUP0ahhP1jLhW7HFD4zze3b93ILtXYyyVKlR8/5hFbnvO9gtlrGSjOF+OpXkYviWyo8mCS4R6bqO4p86vm3v4fC4DrPfw4unj1XN6JvBaQtjChzUXK43sVU4wNFJA43Tv/B73edQwTmfIhAjCVL6UdPAj1IVFSKhCdAcAI9rnjBiAjtBYEu3GEeh1sKJ0YXR68sVIujzIhzwY8DEBHZqiLRKkicQDfvABxaiQTc4Y/C65pCOXwcjcmlvJgHtlwi4epYifiQWgmoLZwPW6HQG07LgcOgKO0UglAKOTt/E+09fwAiUWU7QAE9xUK3jbvomsispZVHMVEDSZdHo9rCZ/4VIMKAu0XGjpU7d2S8hk0pCELHEzrjKnCQOYJoD+Dxu1RyiwUm5LaMDo9NFt2cqDLvY4oQFp/QpfT/MrmI5FkWebt+NpWto0j2QmQkOjZ9hpwhqjXZzM/+7LU+cc7lRrjXh8/lVLRK5ovLWXglOsiOxdt8/AQYAzv8qbmu6vgEAAAAASUVORK5CYII=); + background-repeat:no-repeat; + background-size:contain +} + +.ipfs-rtf { + background-image:url(data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAGXRFWHRTb2Z0d2FyZQBBZG9iZSBJbWFnZVJlYWR5ccllPAAAAe5JREFUeNqEU01PE0EYfnZmd5FSvgLYFuwWt9EgHyEaox68eDJevHvwJ/hTPHv1N/QgZ2NC4g3kUAQKFKGhjVKqRrvbnRlnht262FHfy+y8877PPM8z71pCCKh4/ebt+rJfXEz26Vjf2mnsN5rPKKWbVpx7+eK5Xu2kyMtNTd5d8MdhiJ9BOO7atFI9ajy1UyAqSPIRMR6ZmoNehNHMMB7fX/UWvEKFMbYKE8DfQnAhwRmmJkbx6M6S5+WmK2Evup2c9yUk2nnKA0XVcSiGXAe1k5beP1i+4RFCXqnPywB/AKVzK34RjHNYlgVKCH50w7EBBogbTa/AVM5SgBdn0gc2AMDjPsbFPz2xye9asweS6n+NTbG8BCCfUtLjff2WoVnVpAH6z6hMUtJE3EykYfpF4vUiL3QNS7FMeSAQRBHW3r1Hq91B+VoBQRji4+ExFsvz6Hz7jm7Yw5OH92AcJKW9G4SoHhzhy/lXbB98Qmm2oCXN5WawsV2TACEoJXqwTKOsb3BtR2ucmZxANpPB8JUhyPnHWDaDpfJ1eZFALzJJ4MKO5MEtv4TSXB7V/br8iQLMz+almRZWbvoo5q9qRlxwewCgeXbe3qrVO5ZkUD/9jJGRLPaOm6COi92TU1DbxYe9umRD0DrrtJO+XwIMABWp9nS+FgaoAAAAAElFTkSuQmCC); + background-repeat:no-repeat; + background-size:contain +} + +.ipfs-sass { + background-image:url(data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAGXRFWHRTb2Z0d2FyZQBBZG9iZSBJbWFnZVJlYWR5ccllPAAAAyJpVFh0WE1MOmNvbS5hZG9iZS54bXAAAAAAADw/eHBhY2tldCBiZWdpbj0i77u/IiBpZD0iVzVNME1wQ2VoaUh6cmVTek5UY3prYzlkIj8+IDx4OnhtcG1ldGEgeG1sbnM6eD0iYWRvYmU6bnM6bWV0YS8iIHg6eG1wdGs9IkFkb2JlIFhNUCBDb3JlIDUuMy1jMDExIDY2LjE0NTY2MSwgMjAxMi8wMi8wNi0xNDo1NjoyNyAgICAgICAgIj4gPHJkZjpSREYgeG1sbnM6cmRmPSJodHRwOi8vd3d3LnczLm9yZy8xOTk5LzAyLzIyLXJkZi1zeW50YXgtbnMjIj4gPHJkZjpEZXNjcmlwdGlvbiByZGY6YWJvdXQ9IiIgeG1sbnM6eG1wPSJodHRwOi8vbnMuYWRvYmUuY29tL3hhcC8xLjAvIiB4bWxuczp4bXBNTT0iaHR0cDovL25zLmFkb2JlLmNvbS94YXAvMS4wL21tLyIgeG1sbnM6c3RSZWY9Imh0dHA6Ly9ucy5hZG9iZS5jb20veGFwLzEuMC9zVHlwZS9SZXNvdXJjZVJlZiMiIHhtcDpDcmVhdG9yVG9vbD0iQWRvYmUgUGhvdG9zaG9wIENTNiAoV2luZG93cykiIHhtcE1NOkluc3RhbmNlSUQ9InhtcC5paWQ6MDNDMTBBM0JGMTE5MTFFMTg3N0NFOTIyMTQ2QzhBNkQiIHhtcE1NOkRvY3VtZW50SUQ9InhtcC5kaWQ6MDNDMTBBM0NGMTE5MTFFMTg3N0NFOTIyMTQ2QzhBNkQiPiA8eG1wTU06RGVyaXZlZEZyb20gc3RSZWY6aW5zdGFuY2VJRD0ieG1wLmlpZDowM0MxMEEzOUYxMTkxMUUxODc3Q0U5MjIxNDZDOEE2RCIgc3RSZWY6ZG9jdW1lbnRJRD0ieG1wLmRpZDowM0MxMEEzQUYxMTkxMUUxODc3Q0U5MjIxNDZDOEE2RCIvPiA8L3JkZjpEZXNjcmlwdGlvbj4gPC9yZGY6UkRGPiA8L3g6eG1wbWV0YT4gPD94cGFja2V0IGVuZD0iciI/Po72XUcAAAJcSURBVHjahFJdTxNBFD1bykc/ttvdtttWGgI0bYrUgDZoNYqRJ014kMRXHvwB/hQTH/wFhMREJfFBQxBjhMRIFEQSCAlQxKYGggiU3e3HbnfX2bFt1EU9k9m9mblz5p4zlzFNExYmpue/jmTSZw5PZAl1MAwDT0c7O72wvPdudeNakPNtOZ0tsM7cvzdOc5yN5LDAsTFRAJks/kC2PxFRVe39Si6f4byez62EpAEH/gNN18F53Ri/Ocxf7OtdLMpKT42s/ZPg1cISJp/P0tg0TBzLCoK8D7eHh4RkLLJ4cCz12AjMXwgez8yhqtVo3NbqRKlcxcSL16gZwJ2Ry8KVc8kZO0HdTKlURn+8G6PD2SZhLMQj96WAiMAh2RXFYKI78lcJcx9WYBCycICnpNbojUWpD5Y0C4Zh2D0w6hWc70uQZC+IWfQZrXF0IsHvY+meBd08haAhoVMMQFJKWF7PNZM+klhRyogGhbqxOIXAMOtEwGAqDqVcgbVkkE+5UsEAWavf0az2t0ZqvK2qabh6IU3joizDwTgwej1LdVfJXkdbK8mt2QkayO99A0/0trQ46I1lVcX+UREhnsP34yLp1AD1xibBMuntpzU8mJyi3Tc1O4+l9U06n7x8Q/8PHz1DrrALt8tlr0CrkbJMHTop9Sk5sLa1g8L+ARJdnShKClY3tunN69t5iGLYTlCtakjFY7gxNABdN3B37BaqqoYT8pyX0in4ORbRkIA46YlDRbUTbBZ2Jb/Pw4qiKFnapcpPo9pdbrg8DjAOBsFgELJmsGs7eWkkc5bu/xBgAHkWC6UPADTOAAAAAElFTkSuQmCC); + background-repeat:no-repeat; + background-size:contain +} + +.ipfs-scss { + background-image:url(data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAGXRFWHRTb2Z0d2FyZQBBZG9iZSBJbWFnZVJlYWR5ccllPAAAAyJpVFh0WE1MOmNvbS5hZG9iZS54bXAAAAAAADw/eHBhY2tldCBiZWdpbj0i77u/IiBpZD0iVzVNME1wQ2VoaUh6cmVTek5UY3prYzlkIj8+IDx4OnhtcG1ldGEgeG1sbnM6eD0iYWRvYmU6bnM6bWV0YS8iIHg6eG1wdGs9IkFkb2JlIFhNUCBDb3JlIDUuMy1jMDExIDY2LjE0NTY2MSwgMjAxMi8wMi8wNi0xNDo1NjoyNyAgICAgICAgIj4gPHJkZjpSREYgeG1sbnM6cmRmPSJodHRwOi8vd3d3LnczLm9yZy8xOTk5LzAyLzIyLXJkZi1zeW50YXgtbnMjIj4gPHJkZjpEZXNjcmlwdGlvbiByZGY6YWJvdXQ9IiIgeG1sbnM6eG1wPSJodHRwOi8vbnMuYWRvYmUuY29tL3hhcC8xLjAvIiB4bWxuczp4bXBNTT0iaHR0cDovL25zLmFkb2JlLmNvbS94YXAvMS4wL21tLyIgeG1sbnM6c3RSZWY9Imh0dHA6Ly9ucy5hZG9iZS5jb20veGFwLzEuMC9zVHlwZS9SZXNvdXJjZVJlZiMiIHhtcDpDcmVhdG9yVG9vbD0iQWRvYmUgUGhvdG9zaG9wIENTNiAoV2luZG93cykiIHhtcE1NOkluc3RhbmNlSUQ9InhtcC5paWQ6RkM4QjYyNDVGMTE4MTFFMTlBREZCNDNEM0ExMTk0MUIiIHhtcE1NOkRvY3VtZW50SUQ9InhtcC5kaWQ6RkM4QjYyNDZGMTE4MTFFMTlBREZCNDNEM0ExMTk0MUIiPiA8eG1wTU06RGVyaXZlZEZyb20gc3RSZWY6aW5zdGFuY2VJRD0ieG1wLmlpZDpGQzhCNjI0M0YxMTgxMUUxOUFERkI0M0QzQTExOTQxQiIgc3RSZWY6ZG9jdW1lbnRJRD0ieG1wLmRpZDpGQzhCNjI0NEYxMTgxMUUxOUFERkI0M0QzQTExOTQxQiIvPiA8L3JkZjpEZXNjcmlwdGlvbj4gPC9yZGY6UkRGPiA8L3g6eG1wbWV0YT4gPD94cGFja2V0IGVuZD0iciI/Pkf1yeMAAAJbSURBVHjahFNdTxNBFD0tLULpB91uodVWPmorUIxo0VSiNSExMYYHE33l0Ud/in+C+OSjYgjRGDBRCKJIUkIEWi0WKlja0ul22+5219lJ26gLeiezuXvn7rnnnrlrUFUVms3Mvd2bjIyezRVLBA0zGAzo6jhjm1te+7EU37rFO+w7JlMbtG+ePJ5mOaZmci/nsPl6ONBtw18WDQc9tZq0sp7YjTisXV/NFKRpRvzHpHodDqsF03djzuvDg6vHJWFAprF/Arxe/oins6+YryoqCiUBvNOO+7FrXMjnWc0WyIAOQP0N4Nn8IqqSzPx2swllsYqZl28gK8DDyRvcxKXQvB6gISYpiwgH+jEVi7YAfW4nEqk0PJwDofNejAX7Pae2sPhhHQoF63U5Gai2Bn1epoPWmmaKoug1UBoMrgwHabIVVCx2jdrKFwm67TZ2plldPQGg2cK5HheIUMbaZqKV9In6giDCy3MNYXECgKI2gICxoQAEsQItpNCHWKngMo01arTY/jFIzbutShJuXh1Fm9FImYiM7tTtKOtbO+toN9Nc+fQ5SGUOIVYl7HzPIH2YRZ0y2KZ+sVzBHn2v1mpMGx0DTaR3nzfwfGEJdybGkdo/wEigDyvxLzg4yiESvojZhfd49OAeLJ2degaSLIPOO6vwgiYaaRErTRREEdn8MeJbSVZ5M7nLdNExqFLaQwEfFfACQn1+HBWKSKb3MT4Sgstuh9vVDa+bQ4DORE6o6RlspzMk9TOPfr+fiLJCLFYr3TZSKNcI7+aJwWQmPM+TkqRg49tu65f/JcAAMwMas6WUKd8AAAAASUVORK5CYII=); + background-repeat:no-repeat; + background-size:contain +} + +.ipfs-sql { + background-image:url(data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAGXRFWHRTb2Z0d2FyZQBBZG9iZSBJbWFnZVJlYWR5ccllPAAAAh5JREFUeNp8kctrE1EUxr+ZyXMkoa1NBROaSkpTBE23PhZ25cql2y5duvAPUdGFS1FxIRRBXZlFQ9GVdDENIhGJxkDsw2mneZnM83ruNZlOmNoDhzlzz3d/9zv3Sowx8Ch/qlYK2XM3cEJsbH0+qjV/rd6/u6aN18b7RMFT+9aosP/Ex+0ae/puw7j36PlKEMAzctKJ3aGFamMHjV0d+wcGitkMrpWWp6hVIciEk2MAOwbUWjosx0UiFoWqJpGMx5DNzODq5aIPoa82AWBg/lyKLMH1PMp/a9XvLXLzG1cuFlBaWpiKxaIPSLY6CaC93ggQjyiQZRkeQSzLRovGaPciWLt5faSWEBoh6KBvOhiaNga0+Y9pwaFxvu7rfp8F5pWDt+qNMp2IijHGwddWCvN+33/CoAOP5nVdT9SdoQ1JkggiQ6Yvr7V60+9z7akA2gfH9cRF8hO5F5Ve4lQAF9uuK+qFsylkzsQxrcaQm04hdWkR83Mzfp9rQ3fAFzu9Ph6+WMfjl6/pGBdb2jbKmx8QlRjWy5vkyhUZBPgOeGNHN9AbDLGUz6He2hVj3Ll9C8/evsdgaMK0HV8bcmDTU0UUBYXcedR+NLGnH0I3jvDk1Rsy46FP4C/1BtrdntCGHNiOAzWZgEKQ5Qt5lIqLojbaXSQTcRy2OwT4SZqk0IYAOgkVWUE+lxX/zb0DpFNpkTzmZmfFtzewhHYcfwUYAMZmVaZQlLFHAAAAAElFTkSuQmCC); + background-repeat:no-repeat; + background-size:contain +} + +.ipfs-tga { + background-image:url(data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAGXRFWHRTb2Z0d2FyZQBBZG9iZSBJbWFnZVJlYWR5ccllPAAAAnxJREFUeNp0U89PE0EU/ra725K22ILRGipb22pMG6JcSEQTbUIwnozxpBcvepeEP0KPogcT/wlNT17kIKbEmChFUYKGVtL0R2gLtNCl3Z1Z3+zSAlonmezOe/O+973vvZEsy4JYnqdPMu6RkSQYQ29JEkB+PZcrslrtPhQl23VZc8/tr9I1yMHg0EA8HrBM04lVFAhoY38fSSDQVN3pfKV8G7KcxZHl6v1xblqU3eLc3p2VFZjr6+gQgwsnhzGTuq6Nhs6kYZqXjwL0GFhEl3U60OfnwWs1GGtrUKNRsKkpeIIBpKIRtI1J7cX7hXRhc/MOhXw5DkCZGG2zXAajzFIoBMvng1ypIKOqmP30GW3OIEcimovzlxRy5RgAFwDEAIODkCcmIMdiQLsNdWwMZdJlg8pzEUt1aBhKq3XinxKYqF9yQbqRIqsMy+0Gyy47bKgUWXSLtDENE5wdtuqQATm50F1VnPbRGeEw8HXZbiV8fsDvI9ldju9vADAyihLEbrWAZhOoVp3z6iqBUiB1A4nEfwCEsbkL/M4TgE5n5jDx+oTEzp1d8m9tC8H6MaAB0imzx0NU/WKUYE+loEyawDBo2ui6TGfT6ANAxrvx87gYCGCxXEKVJvCWFsG3eh1vN/J4OD6Od4UC8o0G3TX7TGLHwI9iEQmvF9X6Fh7F4/iYy+GcLOMSlfEgGsP0qdNOmX0BiGKpVkV1bw/1nW2b/gCpf1PTcI+Y7eg6ps+G4bG4PR99SjAVo9HE4q+fKNE0vl5awuSohjeijbRefVjAtUgEQRK7Yhi9OKn7nKWZxxlSPWl3QwgnaIrW8QMhD542vUbx/W49m7sq4v4IMABOqi3Ej7bAEAAAAABJRU5ErkJggg==); + background-repeat:no-repeat; + background-size:contain +} + +.ipfs-tgz { + background-image:url(data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAGXRFWHRTb2Z0d2FyZQBBZG9iZSBJbWFnZVJlYWR5ccllPAAAAnhJREFUeNpsU1trE0EYPbMzSTfdtInFtkkpiaXVWou2FRUEn/so6JugL/oH/Af+B1988if40jcFERQURNBSQdDWlLQN2lsue8neZsZvc7FoOrDszM75znfOmVmmtUYyvry++36yfOeS1qqzDtvH2P76ApPlW3Drb2sHex/uccHWAdbZX30kO2+B3siN3zhTnHuQ66+95i423jzFzOVljBdKOZNHazvVT7e5wF+SZBj9iZJ+3J11mbW2kR8T4LwFli5i4fqTUvnczTUp9RLtDhKgJx0q4dEwWAxrREKICHEsoYYXMXvlcWmquLgmY71yCkG/c0AkARgLMZpnMDMpGNzEYe0dGp6HwvmHpbHC1Wf9MnFCkHQOyYEPzSJwQ2B65Tm5NZG3Fshim6wbMNJn4bpHowMKtIqo2COgR2IcAptwjvcgo6i77igjEmVDqbY8xQJ1VwRULhiBI6+G9Zf3cbTziuzIDkmHSNqECTFgQScEcYuc2NA8TcdYwXD+GkK/TYVN+u72WrIudiAD8o6oAR2RRCmQMjis3CIy1iSpPySCXhFTXeyAgh4BR+JVw8pauLi0Cp4yCX9A90FQhnSBYtnF/k+Q+HYam9itfIZB3QvT8zj8XSW5EhNTs9ivbSLwPUzPLNPJBIMEKnaQYg6aB9+RGR5F5VsNgnNKXMI1NdJGG5WfHzFVLJ7k8c8xUngpVodlDSGbFYj8Y4yMpOG09lHf3yIFPzA3fwHZTAQVtU4JUTeFDrdgDdlI8wAz5Qy2KxswReI7QODZcOr0ZH3q2hIDBI7zq16tuk3FNPxAI4wN+pkoccYoE4YJU5EdUtM4Qst26v26PwIMAKj3P/2YUKgYAAAAAElFTkSuQmCC); + background-repeat:no-repeat; + background-size:contain +} + +.ipfs-tiff { + background-image:url(data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAGXRFWHRTb2Z0d2FyZQBBZG9iZSBJbWFnZVJlYWR5ccllPAAAAmRJREFUeNp0UktPE1EU/qYzHWstlrYJNcWUElyUJsaNGh9B0g1Lo0v9Ey78EbrVxBhXuHShm25YGBJRQpAYBDEWpaEPEhksdVpbyjzveO4MfZDCTWbauefc736PIziOA77OPH2yJCcSGdg2uksQAKofFou/7VrtASRpvVNynj13f6XOhjg8HAlMTIQdy/LO+v3uYUPTkAHCTb+cK+0pdyGK6+hbvu4/xiyHbncYAwfR19ZgbG/DoO9LsSgeTd9JXoxfyMG2rvQDdBlwIZauQ5ufh12twioU4E+nYU1NIRCNIDs+Bt28mXzx8VNuZ796j9q/DgAwomwqClilAmF0FE4wCInAlkjO4y+r0JgNX2os6XPYS2q/cQyAcQatFjA0BPH6NYipccAwIGUy2CVJFZInkKlyJAqx3T4/IMGmJkeWIWSz5KgI5pdhb3yDXS5DSCYh8rTID8s0wexeVD0GtMd85KkkefFxUfE47M1NokbJkByEQl6tL+ouAI+MUwbFhnYbaJKc/Sqg0x4H4eDRGDA56fUOABA9/GsCpaIHwr8FOhQ823O5RfW66tUGADhNy3RNRDjcN41HLxdQ8J6jYTsOQLfOJBK4f+s2/uoathoNGKT1MtFeVHZxdWTEZfEq/wMKl3rCJOIzTV6ADs2R5ulYDDNkYjp0DhrF+zCVgkw31+v1UxjQZkNV0SADd2o1MIuc9gmY+/kLxb0/UFoHePd9A1qzeUoKpilx9xcLWzgg+u/zeVfuQqkM9bCN1ysrWKXxdtPgvScwUAm58XZ52W16QyPtifRUzi588GbEi1ztHPsvwAC4uC9qhnsZvwAAAABJRU5ErkJggg==); + background-repeat:no-repeat; + background-size:contain +} + +.ipfs-txt { + background-image:url(data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAGXRFWHRTb2Z0d2FyZQBBZG9iZSBJbWFnZVJlYWR5ccllPAAAAeJJREFUeNp8UrtOG1EQPfsyXiyzBguIJSyChZBBEFCKpKHLo6egpErNn8CHgH8gkZIiTSIXLhJAWCgkoMgRMSiRBSK29z4y9+I1d/HCrFb3MTPnnjkzlpQSynY+fP70fGF2gQuByCz6lfdd9Uurfvrrjes6762eb3tzQ69uFJwPsqOPC+MBEmxxphi4tlU5OGmsOzaBWLc+O9oIIVhScidkyGZ8vH62nHtSKlaI4cse6TjAfSaFBBcco0EWqyvzubmpyQrj/FXk75cQaSEMeMXU8xykPA/Hjd/6/LRcyjEpt2i7HAe4A2TeLZWKUOJaVLxj27j813EHGKCXaAJExu/4BOdiAED08riQD2riOrexyRoYc3CvsAbLGAAjZga7vgZG23WMCdBvoxKJc36TRBlMiaa2JByjNqqD8qkYc1pjDK7abey+/YhrWlfKswhpiCR96aEU9o5+QE3g2ovVWDm2Sc22bBQm8vrVpbkS9r+doPr1EOWZaQ0yFoxg2PcREosEAI4uvZhJpzFMP+cSXRbq+043RManez+tNWKMI6GN0g0Z04HFR+NoNC/0yx717efZOSbzY3AcR4Op2AGA5p/W31r9e0vNgSrh9OwCrpeCkqvZuqTybnpRqx/r2CjvvwADAJC/7lzAzQmwAAAAAElFTkSuQmCC); + background-repeat:no-repeat; + background-size:contain +} + +.ipfs-wav { + background-image:url(data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAGXRFWHRTb2Z0d2FyZQBBZG9iZSBJbWFnZVJlYWR5ccllPAAAApFJREFUeNpsU1tPE0EYPXtpKbX0wqUQKVQMFdIXQBNCQBs06KP+B8ODGh+Mf4b/4IsGE54kxhcMBrkp7YOQgBRvSKG73fvsrt8Otoask0xmd+b7zpxzvm8E3/cRjPkniyulW0NFy2JoDkEAguOlpXJ9p3L8MBqVl4O9YHxae8pXuRlcGO7KPLhfTDVUqwUgigJMy4Whm6lEXHjxYf3XnByRN0QB/2KaH7btMlUxoRJAcyqKhdOaht7+DJ49n+2cvTnwynXcsb+kLwJ4rgfmMDDGWqvneXCZS9ND7mov5h9ND85M9y86Dpto5rUkuJ4Py3YDJpy6QGJPayqB+Njf+43XL220t0cwOZkfrNXsBUqZugDA6CbLdAiAwaek1ZU9LmP8Rh6S78GsGxjOp9FdzKJaVZIhBgGASzK21w/wbrnCk8euX+EMAjaaZuPHdwUdHVFYluuGPGCORwwYjg5rqOwccRk+3Ux0IEvntmsNG4ZmUayL/wAwKHUNfZfTKN0ZRaw9Cof8qJ/pMAyHy5KkAMTksSEJtnMenM7EMVMawbejMzJRh67bXEYiIXEAVTW50SEAhzqwfqrBcXx4VOhYm4RsNgHbsJFOyZTsQ1MN+hcohoUlkFiMT+TQFpMwXOjGpXgE+XwGk1N5pFJtKNCequgYGupCRBbCDOp0KBJc4VoP3dyBONW8uydBgBHUThqQKCk3mEZ/LoUG+RBioJO7VarAwEAntjYPiUUW9Hh4b2R7k9j98hN37xWx8fGAt3eIAdVMLn+uUv+b2KReSCZjZJiB9bV9jIz2ofr1BKvvd7G9dRC80lae0HzOt+cWVnrSKDrMJykifwNBpCgE/UAllEXufmDu8Zlffvvm8XSQ90eAAQA0pF7c08o4PAAAAABJRU5ErkJggg==); + background-repeat:no-repeat; + background-size:contain +} + +.ipfs-wmv { + background-image:url("data:image/svg+xml;charset=utf8,%3Csvg id='Layer_2' xmlns='http://www.w3.org/2000/svg' viewBox='0 0 72 100'%3E%3Cstyle/%3E%3ClinearGradient id='SVGID_1_' gradientUnits='userSpaceOnUse' x1='36.2' y1='101' x2='36.2' y2='3.005' gradientTransform='matrix(1 0 0 -1 0 102)'%3E%3Cstop offset='0' stop-color='%23e2cde4'/%3E%3Cstop offset='.17' stop-color='%23e0cae2'/%3E%3Cstop offset='.313' stop-color='%23dbc0dd'/%3E%3Cstop offset='.447' stop-color='%23d2b1d4'/%3E%3Cstop offset='.575' stop-color='%23c79dc7'/%3E%3Cstop offset='.698' stop-color='%23ba84b9'/%3E%3Cstop offset='.819' stop-color='%23ab68a9'/%3E%3Cstop offset='.934' stop-color='%239c4598'/%3E%3Cstop offset='1' stop-color='%23932a8e'/%3E%3C/linearGradient%3E%3Cpath d='M45.2 1l27 26.7V99H.2V1h45z' fill='url(%23SVGID_1_)'/%3E%3Cpath d='M45.2 1l27 26.7V99H.2V1h45z' fill-opacity='0' stroke='%23882383' stroke-width='2'/%3E%3Cpath d='M9.1 91.1L4.7 72.5h3.9l2.8 12.8 3.4-12.8h4.5l3.3 13 2.9-13h3.8l-4.6 18.6h-4L17 77.2l-3.7 13.9H9.1zm22.1 0V72.5h5.7l3.4 12.7 3.4-12.7h5.7v18.6h-3.5V76.4l-3.7 14.7h-3.7l-3.7-14.7v14.7h-3.6zm26.7 0l-6.7-18.6h4.1l4.8 13.8 4.6-13.8h4L62 91.1h-4.1z' fill='%23fff'/%3E%3ClinearGradient id='SVGID_2_' gradientUnits='userSpaceOnUse' x1='18.2' y1='50.023' x2='18.2' y2='50.023' gradientTransform='matrix(1 0 0 -1 0 102)'%3E%3Cstop offset='.005' stop-color='%23963491'/%3E%3Cstop offset='1' stop-color='%2370136b'/%3E%3C/linearGradient%3E%3ClinearGradient id='SVGID_3_' gradientUnits='userSpaceOnUse' x1='11.511' y1='51.716' x2='65.211' y2='51.716' gradientTransform='matrix(1 0 0 -1 0 102)'%3E%3Cstop offset='.005' stop-color='%23963491'/%3E%3Cstop offset='1' stop-color='%2370136b'/%3E%3C/linearGradient%3E%3Cpath d='M64.3 55.5c-1.7-.2-3.4-.3-5.1-.3-7.3-.1-13.3 1.6-18.8 3.7S29.6 63.6 23.3 64c-3.4.2-7.3-.6-8.5-2.4-.8-1.3-.8-3.5-1-5.7-.6-5.7-1.6-11.7-2.4-17.3.8-.9 2.1-1.3 3.4-1.7.4 1.1.2 2.7.6 3.8 7.1.7 13.6-.4 20-1.5 6.3-1.1 12.4-2.2 19.4-2.6 3.4-.2 6.9-.2 10.3 0m-9.9 15.3c.5-.2 1.1-.3 1.9-.2.2-3.7.3-7.3.3-11.2-6.2.2-11.9.9-17 2.2.2 4 .4 7.8.3 12 4-1.1 7.7-2.5 12.6-2.7m2-12.1h1.1c.4-.4.2-1.2.2-1.9-1.5-.6-1.8 1-1.3 1.9zm3.9-.2h1.5V38h-1.3c0 .7-.4.9-.2 1.7zm4 0c.5-.1.8 0 1.1.2.4-.3.2-1.2.2-1.9h-1.3v1.7zm-11.5.3h.9c.4-.3.2-1.2.2-1.9-1.4-.4-1.6 1.2-1.1 1.9zm-4 .4c.7.2.8-.3 1.5-.2v-1.7c-1.5-.4-1.7.6-1.5 1.9zm-3.6-1.1c0 .6-.1 1.4.2 1.7.5.1.5-.4 1.1-.2-.2-.6.5-2-.4-1.9-.1.4-.8.1-.9.4zm-31.5.8c.4-.1 1.1.6 1.3 0-.5 0-.1-.8-.2-1.1-.7.2-1.3.3-1.1 1.1zm28.3-.4c-.3.3.2 1.1 0 1.9.6.2.6-.3 1.1-.2-.2-.6.5-2-.4-1.9-.1.3-.4.2-.7.2zm-3.5 2.8c.5-.1.9-.2 1.3-.4.2-.8-.4-.9-.2-1.7h-.9c-.3.3-.1 1.3-.2 2.1zm26.9-1.8c-2.1-.1-3.3-.2-5.5-.2-.5 3.4 0 7.8-.5 11.2 2.4 0 3.6.1 5.8.3M33.4 41.6c.5.2.1 1.2.2 1.7.5-.1 1.1-.2 1.5-.4.6-1.9-.9-2.4-1.7-1.3zm-4.7.6v1.9c.9.2 1.2-.2 1.9-.2-.1-.7.2-1.7-.2-2.1-.5.2-1.3.1-1.7.4zm-5.3.6c.3.5 0 1.6.4 2.1.7.1.8-.4 1.5-.2-.1-.7-.3-1.2-.2-2.1-.8-.2-.9.3-1.7.2zm-7.5 2H17c.2-.9-.4-1.2-.2-2.1-.4.1-1.2-.3-1.3.2.6.2-.1 1.7.4 1.9zm3.4 1c.1 4.1.9 9.3 1.4 13.7 8 .1 13.1-2.7 19.2-4.5-.5-3.9.1-8.7-.7-12.2-6.2 1.6-12.1 3.2-19.9 3zm.5-.8h1.1c.4-.5-.2-1.2 0-2.1h-1.5c.1.7.1 1.6.4 2.1zm-5.4 7.8c.2 0 .3.2.4.4-.4-.7-.7.5-.2.6.1-.2 0-.4.2-.4.3.5-.8.7-.2.8.7-.5 1.3-1.2 2.4-1.5-.1 1.5.4 2.4.4 3.8-.7.5-1.7.7-1.9 1.7 1.2.7 2.5 1.2 4.2 1.3-.7-4.9-1.1-8.8-1.6-13.7-2.2.3-4-.8-5.1-.9.9.8.6 2.5.8 3.6 0-.2 0-.4.2-.4-.1.7.1 1.7-.2 2.1.7.3.5-.2.4.9m44.6 3.2h1.1c.3-.3.2-1.1.2-1.7h-1.3v1.7zm-4-1.4v1.3c.4.4.7-.2 1.5 0v-1.5c-.6 0-1.2 0-1.5.2zm7.6 1.4h1.3v-1.5h-1.3c.1.5 0 1 0 1.5zm-11-1v1.3h1.1c.3-.3.4-1.7-.2-1.7-.1.4-.8.1-.9.4zm-3.6.4c.1.6-.3 1.7.4 1.7 0-.3.5-.2.9-.2-.2-.5.4-1.8-.4-1.7-.1.3-.6.2-.9.2zm-3.4 1v1.5c.7.2.6-.4 1.3-.2-.2-.5.4-1.8-.4-1.7-.1.3-.8.2-.9.4zM15 57c.7-.5 1.3-1.7.2-2.3-.7.4-.8 1.6-.2 2.3zm26.1-1.3c-.1.7.4.8.2 1.5.9 0 1.2-.6 1.1-1.7-.4-.5-.8.1-1.3.2zm-3 2.7c1 0 1.2-.8 1.1-1.9h-.9c-.3.4-.1 1.3-.2 1.9zm-3.6-.4v1.7c.6-.1 1.3-.2 1.5-.8-.6 0 .3-1.6-.6-1.3 0 .4-.7.1-.9.4zM16 60.8c-.4-.7-.2-2-1.3-1.9.2.7.2 2.7 1.3 1.9zm13.8-.9c.5 0 .1.9.2 1.3.8.1 1.2-.2 1.7-.4v-1.7c-.9-.1-1.6.1-1.9.8zm-4.7.6c0 .8-.1 1.7.4 1.9 0-.5.8-.1 1.1-.2.3-.3-.2-1.1 0-1.9-.7-.2-1 .1-1.5.2zM19 62.3v-1.7c-.5 0-.6-.4-1.3-.2-.1 1.1 0 2.1 1.3 1.9zm2.5.2h1.3c.2-.9-.3-1.1-.2-1.9h-1.3c-.1.9.2 1.2.2 1.9z' fill='url(%23SVGID_3_)'/%3E%3ClinearGradient id='SVGID_4_' gradientUnits='userSpaceOnUse' x1='45.269' y1='74.206' x2='58.769' y2='87.706' gradientTransform='matrix(1 0 0 -1 0 102)'%3E%3Cstop offset='0' stop-color='%23f9eff6'/%3E%3Cstop offset='.378' stop-color='%23f8edf5'/%3E%3Cstop offset='.515' stop-color='%23f3e6f1'/%3E%3Cstop offset='.612' stop-color='%23ecdbeb'/%3E%3Cstop offset='.69' stop-color='%23e3cce2'/%3E%3Cstop offset='.757' stop-color='%23d7b8d7'/%3E%3Cstop offset='.817' stop-color='%23caa1c9'/%3E%3Cstop offset='.871' stop-color='%23bc88bb'/%3E%3Cstop offset='.921' stop-color='%23ae6cab'/%3E%3Cstop offset='.965' stop-color='%239f4d9b'/%3E%3Cstop offset='1' stop-color='%23932a8e'/%3E%3C/linearGradient%3E%3Cpath d='M45.2 1l27 26.7h-27V1z' fill='url(%23SVGID_4_)'/%3E%3Cpath d='M45.2 1l27 26.7h-27V1z' fill-opacity='0' stroke='%23882383' stroke-width='2' stroke-linejoin='bevel'/%3E%3C/svg%3E"); + background-repeat:no-repeat; + background-size:contain +} + +.ipfs-xls { + background-image:url(data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAGXRFWHRTb2Z0d2FyZQBBZG9iZSBJbWFnZVJlYWR5ccllPAAAAmxJREFUeNpsU0trFEEQ/mamZ3Y2+0zIC2MmITEkUYgERFQErx5E8KTi1b/h79A/4SW3nCNeYggBYZVEMU/y3N3Z7M7OTD/G6lk2ruw20zRdU/XV91VVG0mSQK/3n1a/jky6d6Xs3G8WXS+Pw5N6LXjLLGuna/78oZKerGsYKtrDE16uJGL1L9gEOOcYd2dL1fNwrbL//aXN7J1efPMmkUqEFAk0A0VZNbFEaQCBscIkXj975y3NLq9xye8PBkAniHOFph+j2eC4rsdoB4LsFubGl/Hq8RtvYWpxTQi52o1jvWiGYaRZL0/auDgOkC/Z8BYL2Pqxidp1FZkhoDxpeaXA/Ujuj/4HoOxKKjiOiek7RUShRNQWaNYFQuMafrYCxiw4ozZKfqbYJ0EvRdl1DQyyTs8XCNTA6UELMwvDyLpZWIZNNlNLlQOK2LMJRJ+5AkuZ1S7CFFzJzk56GnUjQWlYkqCoBWFbonEVYcLLA4dNnB624GQsDBWIgfZJEgxkoChzSFWvn4VpQemDm2VwXQsXJwF1h6c+gxlQ5jgSiEUEt0wdIe7tMES+nEG2aCLiJMOIIWIr9e0DEELAMUrwRuchVAyTKimUwO75Jm6VF3Bv7imOaj+xd7UFKVS/BPJF1b/E4tgTrE49J60O5kceoNqowiuuYKa8ghHXA48U9MT2AQgyRvTThE30bQiaSGa4yLMJNFo+Dq/2cHt4CYlwyFf2S6BHwwrMw/avDbR5C1k7h1YQ4KH3Amf+AcZyEbZPv9CItzQD1l9EbtYOjv74v/d3O9RMPTDrsEwGIWN8q2yk7XNYRs9JrRv3V4ABADSGR6eQ0/NQAAAAAElFTkSuQmCC); + background-repeat:no-repeat; + background-size:contain +} + +.ipfs-xlsx { + background-image:url(data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAGXRFWHRTb2Z0d2FyZQBBZG9iZSBJbWFnZVJlYWR5ccllPAAAAmlJREFUeNpsU8tqFEEUPVXdPY/ueWZIoiYZiSYKYhJc6EbduHOhgijo3t/wH1z6B0JAhOyMILhxo4kJGk1ASTAxwWF0Mpp5dHc9vFUzYwidaoqmq+8959xzbzGtNcx69PTS26ETmQtS9r4Hy/xv7MW7jV+th5yzVcaYPX/++It9u4NAv+CVR6tBUUTqMJsDcRzjZOZM8W9ZLKx+/XDb4e5/kH5In0lpIYWGUaC0YTZnBCAEKoVR3L36oDo7NbsglZwbqD6iQKOXFMcKUVfBkBAoQhlD5xxMDp/HrSv3q1JgYW3z0x0KXzkCYJaRZljru23aHWTzLiamAyytv0O9UYdf5PArqlppBfMUfu4oALErqZBKcUxMFRCHEp0DgW5Lo4N9NIN1dF0XXsVFOUyPJTzo+WBANDidjp8tgHGG3c0DnJ4uIRf4cOCBaW5KjY8xkZL72xpJ9QcFz5bVqHUJGHZL2YtNmKi06YCyiVFb4s/vEKMTAf1p4edOG6mMi1zR6wEpdUwX+vLDtkCzHoK7ptcM6ayLmGajvtex4PliyoIkFRjmUEASelB2rXQRSfjUCT9PlWpmW21iTGzCAyEkUixPRqXhe2V4zKczbdmybgkpJ0cGOuA6Y2MTCsKoi5HsNK7N3MN+uwYaWbxYfoLLkzdxcew6lrYWaZhm8PHHG3zffp1UwJSHz9vvkU8PodbcQYYYS5lxYkxTkGdVDQdV1Js1qPgYD6JIuIE7gsXVefIhIuM05k7dwMbeMmh87a18ufIMaVYyprrJLgje2Nr+1tzYXANnDnr3zRhHj37Vvy2wpXHtNAd5/wQYAD6WMuT2CwoVAAAAAElFTkSuQmCC); + background-repeat:no-repeat; + background-size:contain +} + +.ipfs-xml { + background-image:url(data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAGXRFWHRTb2Z0d2FyZQBBZG9iZSBJbWFnZVJlYWR5ccllPAAAAilJREFUeNqMks1PE0EYxh+g3W2t1G0sEqyISynUFJsSOShNwCamiYZED3LgIkcuxoN/iCZePZiYGD2aGD+i0F5KMChxlVaakAK2ykcAt+WzdLu7zkxo3WZL4pu8mXfmeeY3885ug67roPFh5nvc62m9hjoR+5LMp7MrkYf370qVtco+VtCUFpbj+jGR+JbWn76OyQ8ePwsZATQb8R/hanZgINgj9IqeuBFCw1Kt9OMBnNWCs24XwkG/QKYUEiGjVAPQof/rq0783pShET3ULQo8xz0iS5FaANmrHQH2DoqY+DSLSz6RzecWlnD9ymU47LYjd4O5BXqDTG4FM3NpTEkpdJ5rw0AowLRMbhUfp58gTOaD/UHmNQPI6YmvKWRX1zESHUJ/oBs2nmPa+Mgw0ZIM3tZyGoJwygzQNB2jNyJIZX7iB0lpPoM70UGmPX8zCU+rG8NDVxHwdiC5mKsPUFUN/gvtLLf39sFzVqaN3YrC6TjBauqhXhNA1TQoqloV7Da+pjZq1FsXUCamF29j6LvYhf3iISamZ3Fv9DZevouhRzzPfOG+3hpA9U9UyioOlTJ7pFeTCQS6RGzIebyf+oz5pSzWtmSW1EO9phvQ00slBRt/8qR3DoWdXbiczUiTzd52D+tdLmyTB14mx1rMAKVcRpEATjrsuElee/HXGmnFRyBOGD30C/nEDjNgs7CDpsYmnHG3YPegBCvHs9oYfm8nG9dJa5X4K8AAQzQX4KSN3wcAAAAASUVORK5CYII=); + background-repeat:no-repeat; + background-size:contain +} + +.ipfs-yml { + background-image:url(data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAGXRFWHRTb2Z0d2FyZQBBZG9iZSBJbWFnZVJlYWR5ccllPAAAAdxJREFUeNqMUl1rE0EUPbM7m5Y0Zptu21AwWwhYpfSDFh+kvvRd8N0Hf4I/xWdf/Q158F0QoQ+CVsFKaLSQpt/dpmvztTOzzky6cetOpWcZZvbO3MO5514SxzEU3r57/3GpWllM/tP4sL3TarROXuSo/SWJvX71Uu80Cfhlr/T4UdWFAVfdnmsTUtvdP35OUyQKVnJgXDBTcj9icAsTeLax7j/052qM81UjwW1QJXEhMF0qYnN90fdnvdogYmvJPU0/VBApD4hcDrWRcyikfB17srzgW7b9Rh1vEvxDlI4tVytaBSEEtmWh0xsUMwpwnWjqAlcxogiHd1wiQyCu87iI/+sJtf6+NXsgpd7FWCMB50KvkYMGMbLdZgLlfj+K9K4+FnFQ2x7WntIs50AbmiGwLILt+k+EvzvSNIHzdigdJ/AmXQRhiHv5POSwYmG+cqPVo0HqDxj8uTK2vn1Hfa+JmdIkvtZ/4fOPXU3WPDpFeNWVyUKryCiIGMN4zsH98gym3CIcOTwT+XHdXrdQQHAZotE8kBPpSqPNHtBOr48HUmLOcXRJT9dWNMGYJFby91pHOAvaykSaITg+bwefdhrteDRTMSwyrFCgI88E056Hy+4Ah2cXQZL3R4ABALUe7fqXWFN6AAAAAElFTkSuQmCC); + background-repeat:no-repeat; + background-size:contain +} + +.ipfs-zip { + background-image:url(data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAGXRFWHRTb2Z0d2FyZQBBZG9iZSBJbWFnZVJlYWR5ccllPAAAAm9JREFUeNpsk0tv00AUhc+MY6dOmgeFJg1FoVVpUWlFC0s2IFF1jxBbhKj4BSxYdscPYcEmQmIDq0gsERIViy4TpD7VFzF1Ho5je2a4thOqNhlp5Mz4zudzzp0wpRTC8fPrk0/TC6+fDtYicLH97T1Kc2vQDcs+rH3eUAxVznn0fn1DRM8E+iOdv5ct3XmZG6yVlNj6solUbgVTt0q5FGtX6vXqC6VklTE+KAO/OODHSIQPRQpsXC+kkEz2ELA0ystv84tLzyucsbWByisAGf+QAS2CCDRRLMJMmxC+i8C4jdLCm/zM7OOKFGptcO6/BTpJ0yeQB0Y+mfKQuZZG0jQgeRbW8Xdomobs9LN8scc+UPHNy4Dwq8IljotIIQEm59/RoSyM1CKkXKZNBm7kIVgyM6wgAnSgRK9vqQfHPiMFDHqyFVsLR9Cm0o4YzoAASrSjCelQfRPb1Vc4qn0EY5L2W9GEaBLcxQgFHpGbkMIDJ69e+wjJ8VXqRgKid0r7ftQdxkRs9SqA2kgAm14SSIQh9uhuLGPMnKJs/5KquL1x0N0RCsizigoDaLqBdHoMiyvrlBsHVx1wphD4BCewoqxGKKDwAgtOy8JufYuk+5golGGaGZwc1sIGoDz3AOPZSVLaHgVwydoJDM1H4DbQODughB3YpOD44HfoHgnu4e7So0uAi0stHLJ3Aud8B9bpHu6vPoSu9TtDl6tUuoFiIYOgu0+158MKmOxomtyD3Qi/3MTR7i8K0EDG1GHO5DE3X4DvNahZlJOwEkOATvdPc2//hx3mXJ5lFJaF8K8bStd0YGfnOJbMGex21x6c+yfAAOlIPDJzr7cLAAAAAElFTkSuQmCC); + background-repeat:no-repeat; + background-size:contain +} diff --git a/gateway/assets/src/style.css b/gateway/assets/src/style.css new file mode 100644 index 0000000000..3e7b8a734b --- /dev/null +++ b/gateway/assets/src/style.css @@ -0,0 +1,212 @@ +body { + color:#34373f; + font-family:"Helvetica Neue", Helvetica, Arial, sans-serif; + font-size:14px; + line-height:1.43; + margin:0; + word-break:break-all; + -webkit-text-size-adjust:100%; + -ms-text-size-adjust:100%; + -webkit-tap-highlight-color:transparent +} + +a { + color:#117eb3; + text-decoration:none +} + +a:hover { + color:#00b0e9; + text-decoration:underline +} + +a:active, +a:visited { + color:#00b0e9 +} + +strong { + font-weight:700 +} + +table { + border-collapse:collapse; + border-spacing:0; + max-width:100%; + width:100% +} + +table:last-child { + border-bottom-left-radius:3px; + border-bottom-right-radius:3px +} + +tr:first-child td { + border-top:0 +} + +tr:nth-of-type(even) { + background-color:#f7f8fa +} + +td { + border-top:1px solid #d9dbe2; + padding:.65em; + vertical-align:top +} + +#page-header { + align-items:center; + background:#0b3a53; + border-bottom:4px solid #69c4cd; + color:#fff; + display:flex; + font-size:1.12em; + font-weight:500; + justify-content:space-between; + padding:0 1em +} + +#page-header a { + color:#69c4cd +} + +#page-header a:active { + color:#9ad4db +} + +#page-header a:hover { + color:#fff +} + +#page-header-logo { + height:2.25em; + margin:.7em .7em .7em 0; + width:7.15em +} + +#page-header-menu { + align-items:center; + display:flex; + margin:.65em 0 +} + +#page-header-menu div { + margin:0 .6em +} + +#page-header-menu div:last-child { + margin:0 0 0 .6em +} + +#page-header-menu svg { + fill:#69c4cd; + height:1.8em; + margin-top:.125em +} + +#page-header-menu svg:hover { + fill:#fff +} + +.menu-item-narrow { + display:none +} + +#content { + border:1px solid #d9dbe2; + border-radius:4px; + margin:1em +} + +#content-header { + background-color:#edf0f4; + border-bottom:1px solid #d9dbe2; + border-top-left-radius:3px; + border-top-right-radius:3px; + padding:.7em 1em +} + +.type-icon, +.type-icon>* { + width:1.15em +} + +.no-linebreak { + white-space:nowrap +} + +.ipfs-hash { + color:#7f8491; + font-family:monospace +} + +@media only screen and (max-width:500px) { + .menu-item-narrow { + display:inline + } + .menu-item-wide { + display:none + } +} + +@media print { + #page-header { + display:none + } + #content-header, + .ipfs-hash, + body { + color:#000 + } + #content-header { + border-bottom:1px solid #000 + } + #content { + border:1px solid #000 + } + a, + a:visited { + color:#000; + text-decoration:underline + } + a[href]:after { + content:" (" attr(href) ")" + } + tr { + page-break-inside:avoid + } + tr:nth-of-type(even) { + background-color:transparent + } + td { + border-top:1px solid #000 + } +} + +@-ms-viewport { + width:device-width +} + +.d-flex { + display:flex +} + +.flex-wrap { + flex-flow:wrap +} + +.flex-shrink-1 { + flex-shrink:1 +} + +.ml-auto { + margin-left:auto +} + +.table-responsive { + display:block; + width:100%; + overflow-x:auto; + -webkit-overflow-scrolling:touch +} diff --git a/gateway/assets/test/go.mod b/gateway/assets/test/go.mod new file mode 100644 index 0000000000..8980d9a71e --- /dev/null +++ b/gateway/assets/test/go.mod @@ -0,0 +1,3 @@ +module gateway-test + +go 1.19 diff --git a/gateway/assets/test/main.go b/gateway/assets/test/main.go new file mode 100644 index 0000000000..96d940496e --- /dev/null +++ b/gateway/assets/test/main.go @@ -0,0 +1,156 @@ +package main + +import ( + "fmt" + "html/template" + "net/http" + "net/url" + "os" +) + +const ( + directoryTemplateFile = "../directory-index.html" + dagTemplateFile = "../dag-index.html" + + testPath = "/ipfs/QmFooBarQXB2mzChmMeKY47C43LxUdg1NDJ5MWcKMKxDu7/a/b/c" +) + +var directoryTestData = DirectoryTemplateData{ + GatewayURL: "//localhost:3000", + DNSLink: true, + Listing: []DirectoryItem{{ + Size: "25 MiB", + Name: "short-film.mov", + Path: testPath + "/short-film.mov", + Hash: "QmbWqxBEKC3P8tqsKc98xmWNzrzDtRLMiMPL8wBuTGsMnR", + ShortHash: "QmbW\u2026sMnR", + }, { + Size: "23 KiB", + Name: "250pxيوسف_الوزاني_صورة_ملتقطة_بواسطة_مرصد_هابل_الفضائي_توضح_سديم_السرطان،_وهو_بقايا_مستعر_أعظم._.jpg", + Path: testPath + "/250pxيوسف_الوزاني_صورة_ملتقطة_بواسطة_مرصد_هابل_الفضائي_توضح_سديم_السرطان،_وهو_بقايا_مستعر_أعظم._.jpg", + Hash: "QmUwrKrMTrNv8QjWGKMMH5QV9FMPUtRCoQ6zxTdgxATQW6", + ShortHash: "QmUw\u2026TQW6", + }, { + Size: "1 KiB", + Name: "this-piece-of-papers-got-47-words-37-sentences-58-words-we-wanna-know.txt", + Path: testPath + "/this-piece-of-papers-got-47-words-37-sentences-58-words-we-wanna-know.txt", + Hash: "bafybeigdyrzt5sfp7udm7hu76uh7y26nf3efuylqabf3oclgtqy55fbzdi", + ShortHash: "bafy\u2026bzdi", + }}, + Size: "25 MiB", + Path: testPath, + Breadcrumbs: []Breadcrumb{{ + Name: "ipfs", + }, { + Name: "QmFooBarQXB2mzChmMeKY47C43LxUdg1NDJ5MWcKMKxDu7", + Path: testPath + "/../../..", + }, { + Name: "a", + Path: testPath + "/../..", + }, { + Name: "b", + Path: testPath + "/..", + }, { + Name: "c", + Path: testPath, + }}, + BackLink: testPath + "/..", + Hash: "QmFooBazBar2mzChmMeKY47C43LxUdg1NDJ5MWcKMKxDu7", +} + +var dagTestData = DagTemplateData{ + Path: "/ipfs/baguqeerabn4wonmz6icnk7dfckuizcsf4e4igua2ohdboecku225xxmujepa", + CID: "baguqeerabn4wonmz6icnk7dfckuizcsf4e4igua2ohdboecku225xxmujepa", + CodecName: "dag-json", + CodecHex: "0x129", +} + +func main() { + mux := http.NewServeMux() + mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { + switch r.URL.Path { + case "/dag": + dagTemplate, err := template.New("dag-index.html").ParseFiles(dagTemplateFile) + if err != nil { + http.Error(w, fmt.Sprintf("failed to parse template file: %s", err), http.StatusInternalServerError) + return + } + err = dagTemplate.Execute(w, &dagTestData) + if err != nil { + http.Error(w, fmt.Sprintf("failed to execute template: %s", err), http.StatusInternalServerError) + return + } + case "/directory": + directoryTemplate, err := template.New("directory-index.html").Funcs(template.FuncMap{ + "iconFromExt": func(name string) string { + return "ipfs-_blank" // place-holder + }, + "urlEscape": func(rawUrl string) string { + pathURL := url.URL{Path: rawUrl} + return pathURL.String() + }, + }).ParseFiles(directoryTemplateFile) + if err != nil { + http.Error(w, fmt.Sprintf("failed to parse template file: %s", err), http.StatusInternalServerError) + return + } + err = directoryTemplate.Execute(w, &directoryTestData) + if err != nil { + http.Error(w, fmt.Sprintf("failed to execute template: %s", err), http.StatusInternalServerError) + return + } + case "/": + html := `

Test paths: DAG, Directory.` + _, _ = w.Write([]byte(html)) + default: + http.Redirect(w, r, "/", http.StatusSeeOther) + } + }) + + if _, err := os.Stat(directoryTemplateFile); err != nil { + wd, _ := os.Getwd() + fmt.Printf("could not open template file %q, relative to %q: %s\n", directoryTemplateFile, wd, err) + os.Exit(1) + } + + if _, err := os.Stat(dagTemplateFile); err != nil { + wd, _ := os.Getwd() + fmt.Printf("could not open template file %q, relative to %q: %s\n", dagTemplateFile, wd, err) + os.Exit(1) + } + + fmt.Printf("listening on localhost:3000\n") + _ = http.ListenAndServe("localhost:3000", mux) +} + +// Copied from ../assets.go +type DagTemplateData struct { + Path string + CID string + CodecName string + CodecHex string +} + +type DirectoryTemplateData struct { + GatewayURL string + DNSLink bool + Listing []DirectoryItem + Size string + Path string + Breadcrumbs []Breadcrumb + BackLink string + Hash string +} + +type DirectoryItem struct { + Size string + Name string + Path string + Hash string + ShortHash string +} + +type Breadcrumb struct { + Name string + Path string +} diff --git a/gateway/errors.go b/gateway/errors.go new file mode 100644 index 0000000000..191426742d --- /dev/null +++ b/gateway/errors.go @@ -0,0 +1,181 @@ +package gateway + +import ( + "context" + "errors" + "fmt" + "net/http" + "strconv" + "time" + + "github.com/ipfs/boxo/path/resolver" + "github.com/ipfs/go-cid" + ipld "github.com/ipfs/go-ipld-format" +) + +var ( + ErrInternalServerError = NewErrorResponseForCode(http.StatusInternalServerError) + ErrGatewayTimeout = NewErrorResponseForCode(http.StatusGatewayTimeout) + ErrBadGateway = NewErrorResponseForCode(http.StatusBadGateway) + ErrServiceUnavailable = NewErrorResponseForCode(http.StatusServiceUnavailable) + ErrTooManyRequests = NewErrorResponseForCode(http.StatusTooManyRequests) +) + +type ErrorRetryAfter struct { + Err error + RetryAfter time.Duration +} + +// NewErrorWithRetryAfter wraps any error in RetryAfter hint that +// gets passed to HTTP clients in Retry-After HTTP header. +func NewErrorRetryAfter(err error, retryAfter time.Duration) *ErrorRetryAfter { + if err == nil { + err = ErrServiceUnavailable + } + if retryAfter < 0 { + retryAfter = 0 + } + return &ErrorRetryAfter{ + RetryAfter: retryAfter, + Err: err, + } +} + +func (e *ErrorRetryAfter) Error() string { + var text string + if e.Err != nil { + text = e.Err.Error() + } + if e.RetryAfter != 0 { + text += fmt.Sprintf(", retry after %s", e.Humanized()) + } + return text +} + +func (e *ErrorRetryAfter) Unwrap() error { + return e.Err +} + +func (e *ErrorRetryAfter) Is(err error) bool { + switch err.(type) { + case *ErrorRetryAfter: + return true + default: + return false + } +} + +func (e *ErrorRetryAfter) RoundSeconds() time.Duration { + return e.RetryAfter.Round(time.Second) +} + +func (e *ErrorRetryAfter) Humanized() string { + return e.RoundSeconds().String() +} + +// HTTPHeaderValue returns the Retry-After header value as a string, representing the number +// of seconds to wait before making a new request, rounded to the nearest second. +// This function follows the Retry-After header definition as specified in RFC 9110. +func (e *ErrorRetryAfter) HTTPHeaderValue() string { + return strconv.Itoa(int(e.RoundSeconds().Seconds())) +} + +// Custom type for collecting error details to be handled by `webError`. When an error +// of this type is returned to the gateway handler, the StatusCode will be used for +// the response status. +type ErrorResponse struct { + StatusCode int + Err error +} + +func NewErrorResponseForCode(statusCode int) *ErrorResponse { + return NewErrorResponse(errors.New(http.StatusText(statusCode)), statusCode) +} + +func NewErrorResponse(err error, statusCode int) *ErrorResponse { + return &ErrorResponse{ + Err: err, + StatusCode: statusCode, + } +} + +func (e *ErrorResponse) Is(err error) bool { + switch err.(type) { + case *ErrorResponse: + return true + default: + return false + } +} + +func (e *ErrorResponse) Error() string { + var text string + if e.Err != nil { + text = e.Err.Error() + } + return text +} + +func (e *ErrorResponse) Unwrap() error { + return e.Err +} + +func webError(w http.ResponseWriter, err error, defaultCode int) { + code := defaultCode + + // Pass Retry-After hint to the client + var era *ErrorRetryAfter + if errors.As(err, &era) { + if era.RetryAfter > 0 { + w.Header().Set("Retry-After", era.HTTPHeaderValue()) + // Adjust defaultCode if needed + if code != http.StatusTooManyRequests && code != http.StatusServiceUnavailable { + code = http.StatusTooManyRequests + } + } + err = era.Unwrap() + } + + // Handle status code + switch { + case errors.Is(err, &cid.ErrInvalidCid{}): + code = http.StatusBadRequest + case isErrNotFound(err): + code = http.StatusNotFound + case errors.Is(err, context.DeadlineExceeded): + code = http.StatusGatewayTimeout + } + + // Handle explicit code in ErrorResponse + var gwErr *ErrorResponse + if errors.As(err, &gwErr) { + code = gwErr.StatusCode + } + + http.Error(w, err.Error(), code) +} + +func isErrNotFound(err error) bool { + if ipld.IsNotFound(err) { + return true + } + + // Checks if err is a resolver.ErrNoLink. resolver.ErrNoLink does not implement + // the .Is interface and cannot be directly compared to. Therefore, errors.Is + // always returns false with it. + for { + _, ok := err.(resolver.ErrNoLink) + if ok { + return true + } + + err = errors.Unwrap(err) + if err == nil { + return false + } + } +} + +func webRequestError(w http.ResponseWriter, err *ErrorResponse) { + webError(w, err.Err, err.StatusCode) +} diff --git a/gateway/errors_test.go b/gateway/errors_test.go new file mode 100644 index 0000000000..05e6ca887b --- /dev/null +++ b/gateway/errors_test.go @@ -0,0 +1,65 @@ +package gateway + +import ( + "errors" + "fmt" + "net/http" + "net/http/httptest" + "testing" + "time" + + "github.com/stretchr/testify/assert" +) + +func TestErrRetryAfterIs(t *testing.T) { + var err error + + err = NewErrorRetryAfter(errors.New("test"), 10*time.Second) + assert.True(t, errors.Is(err, &ErrorRetryAfter{}), "pointer to error must be error") + + err = fmt.Errorf("wrapped: %w", err) + assert.True(t, errors.Is(err, &ErrorRetryAfter{}), "wrapped pointer to error must be error") +} + +func TestErrRetryAfterAs(t *testing.T) { + var ( + err error + errRA *ErrorRetryAfter + ) + + err = NewErrorRetryAfter(errors.New("test"), 25*time.Second) + assert.True(t, errors.As(err, &errRA), "pointer to error must be error") + assert.EqualValues(t, errRA.RetryAfter, 25*time.Second) + + err = fmt.Errorf("wrapped: %w", err) + assert.True(t, errors.As(err, &errRA), "wrapped pointer to error must be error") + assert.EqualValues(t, errRA.RetryAfter, 25*time.Second) +} + +func TestWebError(t *testing.T) { + t.Parallel() + + t.Run("429 Too Many Requests", func(t *testing.T) { + err := fmt.Errorf("wrapped for testing: %w", NewErrorRetryAfter(ErrTooManyRequests, 0)) + w := httptest.NewRecorder() + webError(w, err, http.StatusInternalServerError) + assert.Equal(t, http.StatusTooManyRequests, w.Result().StatusCode) + assert.Zero(t, len(w.Result().Header.Values("Retry-After"))) + }) + + t.Run("429 Too Many Requests with Retry-After header", func(t *testing.T) { + err := NewErrorRetryAfter(ErrTooManyRequests, 25*time.Second) + w := httptest.NewRecorder() + webError(w, err, http.StatusInternalServerError) + assert.Equal(t, http.StatusTooManyRequests, w.Result().StatusCode) + assert.Equal(t, "25", w.Result().Header.Get("Retry-After")) + }) + + t.Run("503 Service Unavailable with Retry-After header", func(t *testing.T) { + err := NewErrorRetryAfter(ErrServiceUnavailable, 50*time.Second) + w := httptest.NewRecorder() + webError(w, err, http.StatusInternalServerError) + assert.Equal(t, http.StatusServiceUnavailable, w.Result().StatusCode) + assert.Equal(t, "50", w.Result().Header.Get("Retry-After")) + }) +} diff --git a/gateway/gateway.go b/gateway/gateway.go new file mode 100644 index 0000000000..f32e0646f9 --- /dev/null +++ b/gateway/gateway.go @@ -0,0 +1,120 @@ +package gateway + +import ( + "context" + "net/http" + "sort" + + "github.com/ipfs/boxo/blocks" + iface "github.com/ipfs/boxo/coreiface" + "github.com/ipfs/boxo/coreiface/path" + "github.com/ipfs/boxo/files" + cid "github.com/ipfs/go-cid" +) + +// Config is the configuration used when creating a new gateway handler. +type Config struct { + Headers map[string][]string +} + +// API defines the minimal set of API services required for a gateway handler. +type API interface { + // GetUnixFsNode returns a read-only handle to a file tree referenced by a path. + GetUnixFsNode(context.Context, path.Resolved) (files.Node, error) + + // LsUnixFsDir returns the list of links in a directory. + LsUnixFsDir(context.Context, path.Resolved) (<-chan iface.DirEntry, error) + + // GetBlock return a block from a certain CID. + GetBlock(context.Context, cid.Cid) (blocks.Block, error) + + // GetIPNSRecord retrieves the best IPNS record for a given CID (libp2p-key) + // from the routing system. + GetIPNSRecord(context.Context, cid.Cid) ([]byte, error) + + // GetDNSLinkRecord returns the DNSLink TXT record for the provided FQDN. + // Unlike ResolvePath, it does not perform recursive resolution. It only + // checks for the existence of a DNSLink TXT record with path starting with + // /ipfs/ or /ipns/ and returns the path as-is. + GetDNSLinkRecord(context.Context, string) (path.Path, error) + + // IsCached returns whether or not the path exists locally. + IsCached(context.Context, path.Path) bool + + // ResolvePath resolves the path using UnixFS resolver. If the path does not + // exist due to a missing link, it should return an error of type: + // https://pkg.go.dev/github.com/ipfs/go-path@v0.3.0/resolver#ErrNoLink + ResolvePath(context.Context, path.Path) (path.Resolved, error) +} + +// A helper function to clean up a set of headers: +// 1. Canonicalizes. +// 2. Deduplicates. +// 3. Sorts. +func cleanHeaderSet(headers []string) []string { + // Deduplicate and canonicalize. + m := make(map[string]struct{}, len(headers)) + for _, h := range headers { + m[http.CanonicalHeaderKey(h)] = struct{}{} + } + result := make([]string, 0, len(m)) + for k := range m { + result = append(result, k) + } + + // Sort + sort.Strings(result) + return result +} + +// AddAccessControlHeaders adds default headers used for controlling +// cross-origin requests. This function adds several values to the +// Access-Control-Allow-Headers and Access-Control-Expose-Headers entries. +// If the Access-Control-Allow-Origin entry is missing a value of '*' is +// added, indicating that browsers should allow requesting code from any +// origin to access the resource. +// If the Access-Control-Allow-Methods entry is missing a value of 'GET' is +// added, indicating that browsers may use the GET method when issuing cross +// origin requests. +func AddAccessControlHeaders(headers map[string][]string) { + // Hard-coded headers. + const ACAHeadersName = "Access-Control-Allow-Headers" + const ACEHeadersName = "Access-Control-Expose-Headers" + const ACAOriginName = "Access-Control-Allow-Origin" + const ACAMethodsName = "Access-Control-Allow-Methods" + + if _, ok := headers[ACAOriginName]; !ok { + // Default to *all* + headers[ACAOriginName] = []string{"*"} + } + if _, ok := headers[ACAMethodsName]; !ok { + // Default to GET + headers[ACAMethodsName] = []string{http.MethodGet} + } + + headers[ACAHeadersName] = cleanHeaderSet( + append([]string{ + "Content-Type", + "User-Agent", + "Range", + "X-Requested-With", + }, headers[ACAHeadersName]...)) + + headers[ACEHeadersName] = cleanHeaderSet( + append([]string{ + "Content-Length", + "Content-Range", + "X-Chunked-Output", + "X-Stream-Output", + "X-Ipfs-Path", + "X-Ipfs-Roots", + }, headers[ACEHeadersName]...)) +} + +type RequestContextKey string + +const ( + DNSLinkHostnameKey RequestContextKey = "dnslink-hostname" + GatewayHostnameKey RequestContextKey = "gw-hostname" + ContentPathKey RequestContextKey = "content-path" +) diff --git a/gateway/gateway_test.go b/gateway/gateway_test.go new file mode 100644 index 0000000000..b78f95e455 --- /dev/null +++ b/gateway/gateway_test.go @@ -0,0 +1,637 @@ +package gateway + +import ( + "context" + "errors" + "fmt" + "io" + "net/http" + "net/http/httptest" + "os" + gopath "path" + "regexp" + "strings" + "testing" + + "github.com/ipfs/boxo/blocks" + "github.com/ipfs/boxo/blockservice" + blockstore "github.com/ipfs/boxo/blockstore" + iface "github.com/ipfs/boxo/coreiface" + nsopts "github.com/ipfs/boxo/coreiface/options/namesys" + ipath "github.com/ipfs/boxo/coreiface/path" + offline "github.com/ipfs/boxo/exchange/offline" + bsfetcher "github.com/ipfs/boxo/fetcher/impl/blockservice" + "github.com/ipfs/boxo/files" + carblockstore "github.com/ipfs/boxo/ipld/car/v2/blockstore" + "github.com/ipfs/boxo/ipld/merkledag" + "github.com/ipfs/boxo/namesys" + "github.com/ipfs/boxo/namesys/resolve" + path "github.com/ipfs/boxo/path" + "github.com/ipfs/boxo/path/resolver" + "github.com/ipfs/boxo/unixfs" + ufile "github.com/ipfs/boxo/unixfs/file" + uio "github.com/ipfs/boxo/unixfs/io" + "github.com/ipfs/go-cid" + format "github.com/ipfs/go-ipld-format" + "github.com/ipfs/go-unixfsnode" + dagpb "github.com/ipld/go-codec-dagpb" + "github.com/ipld/go-ipld-prime" + "github.com/ipld/go-ipld-prime/node/basicnode" + "github.com/ipld/go-ipld-prime/schema" + "github.com/libp2p/go-libp2p/core/crypto" + "github.com/libp2p/go-libp2p/core/routing" + "github.com/stretchr/testify/assert" +) + +type mockNamesys map[string]path.Path + +func (m mockNamesys) Resolve(ctx context.Context, name string, opts ...nsopts.ResolveOpt) (value path.Path, err error) { + cfg := nsopts.DefaultResolveOpts() + for _, o := range opts { + o(&cfg) + } + depth := cfg.Depth + if depth == nsopts.UnlimitedDepth { + // max uint + depth = ^uint(0) + } + for strings.HasPrefix(name, "/ipns/") { + if depth == 0 { + return value, namesys.ErrResolveRecursion + } + depth-- + + var ok bool + value, ok = m[name] + if !ok { + return "", namesys.ErrResolveFailed + } + name = value.String() + } + return value, nil +} + +func (m mockNamesys) ResolveAsync(ctx context.Context, name string, opts ...nsopts.ResolveOpt) <-chan namesys.Result { + out := make(chan namesys.Result, 1) + v, err := m.Resolve(ctx, name, opts...) + out <- namesys.Result{Path: v, Err: err} + close(out) + return out +} + +func (m mockNamesys) Publish(ctx context.Context, name crypto.PrivKey, value path.Path, opts ...nsopts.PublishOption) error { + return errors.New("not implemented for mockNamesys") +} + +func (m mockNamesys) GetResolver(subs string) (namesys.Resolver, bool) { + return nil, false +} + +type mockAPI struct { + blockStore blockstore.Blockstore + blockService blockservice.BlockService + dagService format.DAGService + resolver resolver.Resolver + namesys mockNamesys +} + +func newMockAPI(t *testing.T) (*mockAPI, cid.Cid) { + r, err := os.Open("./testdata/fixtures.car") + assert.Nil(t, err) + + blockStore, err := carblockstore.NewReadOnly(r, nil) + assert.Nil(t, err) + + t.Cleanup(func() { + blockStore.Close() + r.Close() + }) + + cids, err := blockStore.Roots() + assert.Nil(t, err) + assert.Len(t, cids, 1) + + blockService := blockservice.New(blockStore, offline.Exchange(blockStore)) + dagService := merkledag.NewDAGService(blockService) + + fetcherConfig := bsfetcher.NewFetcherConfig(blockService) + fetcherConfig.PrototypeChooser = dagpb.AddSupportToChooser(func(lnk ipld.Link, lnkCtx ipld.LinkContext) (ipld.NodePrototype, error) { + if tlnkNd, ok := lnkCtx.LinkNode.(schema.TypedLinkNode); ok { + return tlnkNd.LinkTargetNodePrototype(), nil + } + return basicnode.Prototype.Any, nil + }) + fetcher := fetcherConfig.WithReifier(unixfsnode.Reify) + resolver := resolver.NewBasicResolver(fetcher) + + return &mockAPI{ + blockStore: blockService.Blockstore(), + blockService: blockService, + dagService: dagService, + resolver: resolver, + namesys: mockNamesys{}, + }, cids[0] +} + +func (api *mockAPI) GetUnixFsNode(ctx context.Context, p ipath.Resolved) (files.Node, error) { + nd, err := api.resolveNode(ctx, p) + if err != nil { + return nil, err + } + + return ufile.NewUnixfsFile(ctx, api.dagService, nd) +} + +func (api *mockAPI) LsUnixFsDir(ctx context.Context, p ipath.Resolved) (<-chan iface.DirEntry, error) { + node, err := api.resolveNode(ctx, p) + if err != nil { + return nil, err + } + + dir, err := uio.NewDirectoryFromNode(api.dagService, node) + if err != nil { + return nil, err + } + + out := make(chan iface.DirEntry, uio.DefaultShardWidth) + + go func() { + defer close(out) + for l := range dir.EnumLinksAsync(ctx) { + select { + case out <- api.processLink(ctx, l): + case <-ctx.Done(): + return + } + } + }() + + return out, nil +} + +func (api *mockAPI) GetBlock(ctx context.Context, c cid.Cid) (blocks.Block, error) { + return api.blockService.GetBlock(ctx, c) +} + +func (api *mockAPI) GetIPNSRecord(ctx context.Context, c cid.Cid) ([]byte, error) { + return nil, routing.ErrNotSupported +} + +func (api *mockAPI) GetDNSLinkRecord(ctx context.Context, hostname string) (ipath.Path, error) { + if api.namesys != nil { + p, err := api.namesys.Resolve(ctx, "/ipns/"+hostname, nsopts.Depth(1)) + if err == namesys.ErrResolveRecursion { + err = nil + } + return ipath.New(p.String()), err + } + + return nil, errors.New("not implemented") +} + +func (api *mockAPI) IsCached(ctx context.Context, p ipath.Path) bool { + rp, err := api.ResolvePath(ctx, p) + if err != nil { + return false + } + + has, _ := api.blockStore.Has(ctx, rp.Cid()) + return has +} + +func (api *mockAPI) ResolvePath(ctx context.Context, ip ipath.Path) (ipath.Resolved, error) { + if _, ok := ip.(ipath.Resolved); ok { + return ip.(ipath.Resolved), nil + } + + err := ip.IsValid() + if err != nil { + return nil, err + } + + p := path.Path(ip.String()) + if p.Segments()[0] == "ipns" { + p, err = resolve.ResolveIPNS(ctx, api.namesys, p) + if err != nil { + return nil, err + } + } + + if p.Segments()[0] != "ipfs" { + return nil, fmt.Errorf("unsupported path namespace: %s", ip.Namespace()) + } + + node, rest, err := api.resolver.ResolveToLastNode(ctx, p) + if err != nil { + return nil, err + } + + root, err := cid.Parse(p.Segments()[1]) + if err != nil { + return nil, err + } + + return ipath.NewResolvedPath(p, node, root, gopath.Join(rest...)), nil +} + +func (api *mockAPI) resolveNode(ctx context.Context, p ipath.Path) (format.Node, error) { + rp, err := api.ResolvePath(ctx, p) + if err != nil { + return nil, err + } + + node, err := api.dagService.Get(ctx, rp.Cid()) + if err != nil { + return nil, fmt.Errorf("get node: %w", err) + } + return node, nil +} + +func (api *mockAPI) processLink(ctx context.Context, result unixfs.LinkResult) iface.DirEntry { + if result.Err != nil { + return iface.DirEntry{Err: result.Err} + } + + link := iface.DirEntry{ + Name: result.Link.Name, + Cid: result.Link.Cid, + } + + switch link.Cid.Type() { + case cid.Raw: + link.Type = iface.TFile + link.Size = result.Link.Size + case cid.DagProtobuf: + link.Size = result.Link.Size + } + + return link +} + +func doWithoutRedirect(req *http.Request) (*http.Response, error) { + tag := "without-redirect" + c := &http.Client{ + CheckRedirect: func(req *http.Request, via []*http.Request) error { + return errors.New(tag) + }, + } + res, err := c.Do(req) + if err != nil && !strings.Contains(err.Error(), tag) { + return nil, err + } + return res, nil +} + +func newTestServerAndNode(t *testing.T, ns mockNamesys) (*httptest.Server, *mockAPI, cid.Cid) { + api, root := newMockAPI(t) + ts := newTestServer(t, api) + return ts, api, root +} + +func newTestServer(t *testing.T, api API) *httptest.Server { + config := Config{Headers: map[string][]string{}} + AddAccessControlHeaders(config.Headers) + + handler := NewHandler(config, api) + mux := http.NewServeMux() + mux.Handle("/ipfs/", handler) + mux.Handle("/ipns/", handler) + handler = WithHostname(mux, api, map[string]*Specification{}, false) + + ts := httptest.NewServer(handler) + t.Cleanup(func() { ts.Close() }) + + return ts +} + +func matchPathOrBreadcrumbs(s string, expected string) bool { + matched, _ := regexp.MatchString("Index of(\n|\r\n)[\t ]*"+regexp.QuoteMeta(expected), s) + return matched +} + +func TestGatewayGet(t *testing.T) { + ts, api, root := newTestServerAndNode(t, nil) + t.Logf("test server url: %s", ts.URL) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + k, err := api.ResolvePath(ctx, ipath.Join(ipath.IpfsPath(root), t.Name(), "fnord")) + assert.Nil(t, err) + + api.namesys["/ipns/example.com"] = path.FromCid(k.Cid()) + api.namesys["/ipns/working.example.com"] = path.FromString(k.String()) + api.namesys["/ipns/double.example.com"] = path.FromString("/ipns/working.example.com") + api.namesys["/ipns/triple.example.com"] = path.FromString("/ipns/double.example.com") + api.namesys["/ipns/broken.example.com"] = path.FromString("/ipns/" + k.Cid().String()) + // We picked .man because: + // 1. It's a valid TLD. + // 2. Go treats it as the file extension for "man" files (even though + // nobody actually *uses* this extension, AFAIK). + // + // Unfortunately, this may not work on all platforms as file type + // detection is platform dependent. + api.namesys["/ipns/example.man"] = path.FromString(k.String()) + + t.Log(ts.URL) + for _, test := range []struct { + host string + path string + status int + text string + }{ + {"127.0.0.1:8080", "/", http.StatusNotFound, "404 page not found\n"}, + {"127.0.0.1:8080", "/" + k.Cid().String(), http.StatusNotFound, "404 page not found\n"}, + {"127.0.0.1:8080", "/ipfs/this-is-not-a-cid", http.StatusBadRequest, "failed to resolve /ipfs/this-is-not-a-cid: invalid path \"/ipfs/this-is-not-a-cid\": invalid CID: invalid cid: illegal base32 data at input byte 3\n"}, + {"127.0.0.1:8080", k.String(), http.StatusOK, "fnord"}, + {"127.0.0.1:8080", "/ipns/nxdomain.example.com", http.StatusInternalServerError, "failed to resolve /ipns/nxdomain.example.com: " + namesys.ErrResolveFailed.Error() + "\n"}, + {"127.0.0.1:8080", "/ipns/%0D%0A%0D%0Ahello", http.StatusInternalServerError, "failed to resolve /ipns/\\r\\n\\r\\nhello: " + namesys.ErrResolveFailed.Error() + "\n"}, + {"127.0.0.1:8080", "/ipns/k51qzi5uqu5djucgtwlxrbfiyfez1nb0ct58q5s4owg6se02evza05dfgi6tw5", http.StatusInternalServerError, "failed to resolve /ipns/k51qzi5uqu5djucgtwlxrbfiyfez1nb0ct58q5s4owg6se02evza05dfgi6tw5: " + namesys.ErrResolveFailed.Error() + "\n"}, + {"127.0.0.1:8080", "/ipns/example.com", http.StatusOK, "fnord"}, + {"example.com", "/", http.StatusOK, "fnord"}, + + {"working.example.com", "/", http.StatusOK, "fnord"}, + {"double.example.com", "/", http.StatusOK, "fnord"}, + {"triple.example.com", "/", http.StatusOK, "fnord"}, + {"working.example.com", k.String(), http.StatusNotFound, "failed to resolve /ipns/working.example.com" + k.String() + ": no link named \"ipfs\" under " + k.Cid().String() + "\n"}, + {"broken.example.com", "/", http.StatusInternalServerError, "failed to resolve /ipns/broken.example.com/: " + namesys.ErrResolveFailed.Error() + "\n"}, + {"broken.example.com", k.String(), http.StatusInternalServerError, "failed to resolve /ipns/broken.example.com" + k.String() + ": " + namesys.ErrResolveFailed.Error() + "\n"}, + // This test case ensures we don't treat the TLD as a file extension. + {"example.man", "/", http.StatusOK, "fnord"}, + } { + testName := "http://" + test.host + test.path + t.Run(testName, func(t *testing.T) { + var c http.Client + r, err := http.NewRequest(http.MethodGet, ts.URL+test.path, nil) + assert.Nil(t, err) + r.Host = test.host + resp, err := c.Do(r) + assert.Nil(t, err) + defer resp.Body.Close() + assert.Equal(t, "text/plain; charset=utf-8", resp.Header.Get("Content-Type")) + body, err := io.ReadAll(resp.Body) + assert.Nil(t, err) + assert.Equal(t, test.status, resp.StatusCode, "body", body) + assert.Equal(t, test.text, string(body)) + }) + } +} + +func TestUriQueryRedirect(t *testing.T) { + ts, _, _ := newTestServerAndNode(t, mockNamesys{}) + + cid := "QmbWqxBEKC3P8tqsKc98xmWNzrzDtRLMiMPL8wBuTGsMnR" + for _, test := range []struct { + path string + status int + location string + }{ + // - Browsers will send original URI in URL-escaped form + // - We expect query parameters to be persisted + // - We drop fragments, as those should not be sent by a browser + {"/ipfs/?uri=ipfs%3A%2F%2FQmXoypizjW3WknFiJnKLwHCnL72vedxjQkDDP1mXWo6uco%2Fwiki%2FFoo_%C4%85%C4%99.html%3Ffilename%3Dtest-%C4%99.html%23header-%C4%85", http.StatusMovedPermanently, "/ipfs/QmXoypizjW3WknFiJnKLwHCnL72vedxjQkDDP1mXWo6uco/wiki/Foo_%c4%85%c4%99.html?filename=test-%c4%99.html"}, + {"/ipfs/?uri=ipns%3A%2F%2Fexample.com%2Fwiki%2FFoo_%C4%85%C4%99.html%3Ffilename%3Dtest-%C4%99.html", http.StatusMovedPermanently, "/ipns/example.com/wiki/Foo_%c4%85%c4%99.html?filename=test-%c4%99.html"}, + {"/ipfs/?uri=ipfs://" + cid, http.StatusMovedPermanently, "/ipfs/" + cid}, + {"/ipfs?uri=ipfs://" + cid, http.StatusMovedPermanently, "/ipfs/?uri=ipfs://" + cid}, + {"/ipfs/?uri=ipns://" + cid, http.StatusMovedPermanently, "/ipns/" + cid}, + {"/ipns/?uri=ipfs%3A%2F%2FQmXoypizjW3WknFiJnKLwHCnL72vedxjQkDDP1mXWo6uco%2Fwiki%2FFoo_%C4%85%C4%99.html%3Ffilename%3Dtest-%C4%99.html%23header-%C4%85", http.StatusMovedPermanently, "/ipfs/QmXoypizjW3WknFiJnKLwHCnL72vedxjQkDDP1mXWo6uco/wiki/Foo_%c4%85%c4%99.html?filename=test-%c4%99.html"}, + {"/ipns/?uri=ipns%3A%2F%2Fexample.com%2Fwiki%2FFoo_%C4%85%C4%99.html%3Ffilename%3Dtest-%C4%99.html", http.StatusMovedPermanently, "/ipns/example.com/wiki/Foo_%c4%85%c4%99.html?filename=test-%c4%99.html"}, + {"/ipns?uri=ipns://" + cid, http.StatusMovedPermanently, "/ipns/?uri=ipns://" + cid}, + {"/ipns/?uri=ipns://" + cid, http.StatusMovedPermanently, "/ipns/" + cid}, + {"/ipns/?uri=ipfs://" + cid, http.StatusMovedPermanently, "/ipfs/" + cid}, + {"/ipfs/?uri=unsupported://" + cid, http.StatusBadRequest, ""}, + {"/ipfs/?uri=invaliduri", http.StatusBadRequest, ""}, + {"/ipfs/?uri=" + cid, http.StatusBadRequest, ""}, + } { + testName := ts.URL + test.path + t.Run(testName, func(t *testing.T) { + r, err := http.NewRequest(http.MethodGet, ts.URL+test.path, nil) + assert.Nil(t, err) + resp, err := doWithoutRedirect(r) + assert.Nil(t, err) + defer resp.Body.Close() + assert.Equal(t, test.status, resp.StatusCode) + assert.Equal(t, test.location, resp.Header.Get("Location")) + }) + } +} + +func TestIPNSHostnameRedirect(t *testing.T) { + ts, api, root := newTestServerAndNode(t, nil) + t.Logf("test server url: %s", ts.URL) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + k, err := api.ResolvePath(ctx, ipath.Join(ipath.IpfsPath(root), t.Name())) + assert.Nil(t, err) + + t.Logf("k: %s\n", k) + api.namesys["/ipns/example.net"] = path.FromString(k.String()) + + // make request to directory containing index.html + req, err := http.NewRequest(http.MethodGet, ts.URL+"/foo", nil) + assert.Nil(t, err) + req.Host = "example.net" + + res, err := doWithoutRedirect(req) + assert.Nil(t, err) + + // expect 301 redirect to same path, but with trailing slash + assert.Equal(t, http.StatusMovedPermanently, res.StatusCode) + hdr := res.Header["Location"] + assert.Positive(t, len(hdr), "location header not present") + assert.Equal(t, hdr[0], "/foo/") + + // make request with prefix to directory containing index.html + req, err = http.NewRequest(http.MethodGet, ts.URL+"/foo", nil) + assert.Nil(t, err) + req.Host = "example.net" + + res, err = doWithoutRedirect(req) + assert.Nil(t, err) + // expect 301 redirect to same path, but with prefix and trailing slash + assert.Equal(t, http.StatusMovedPermanently, res.StatusCode) + + hdr = res.Header["Location"] + assert.Positive(t, len(hdr), "location header not present") + assert.Equal(t, hdr[0], "/foo/") + + // make sure /version isn't exposed + req, err = http.NewRequest(http.MethodGet, ts.URL+"/version", nil) + assert.Nil(t, err) + req.Host = "example.net" + + res, err = doWithoutRedirect(req) + assert.Nil(t, err) + assert.Equal(t, http.StatusNotFound, res.StatusCode) +} + +// Test directory listing on DNSLink website +// (scenario when Host header is the same as URL hostname) +// This is basic regression test: additional end-to-end tests +// can be found in test/sharness/t0115-gateway-dir-listing.sh +func TestIPNSHostnameBacklinks(t *testing.T) { + ts, api, root := newTestServerAndNode(t, nil) + t.Logf("test server url: %s", ts.URL) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + k, err := api.ResolvePath(ctx, ipath.Join(ipath.IpfsPath(root), t.Name())) + assert.Nil(t, err) + + // create /ipns/example.net/foo/ + k2, err := api.ResolvePath(ctx, ipath.Join(k, "foo? #<'")) + assert.Nil(t, err) + + k3, err := api.ResolvePath(ctx, ipath.Join(k, "foo? #<'/bar")) + assert.Nil(t, err) + + t.Logf("k: %s\n", k) + api.namesys["/ipns/example.net"] = path.FromString(k.String()) + + // make request to directory listing + req, err := http.NewRequest(http.MethodGet, ts.URL+"/foo%3F%20%23%3C%27/", nil) + assert.Nil(t, err) + req.Host = "example.net" + + res, err := doWithoutRedirect(req) + assert.Nil(t, err) + + // expect correct links + body, err := io.ReadAll(res.Body) + assert.Nil(t, err) + s := string(body) + t.Logf("body: %s\n", string(body)) + + assert.True(t, matchPathOrBreadcrumbs(s, "/ipns/example.net/foo? #<'"), "expected a path in directory listing") + // https://github.com/ipfs/dir-index-html/issues/42 + assert.Contains(t, s, "", "expected backlink in directory listing") + assert.Contains(t, s, "", "expected file in directory listing") + assert.Contains(t, s, s, k2.Cid().String(), "expected hash in directory listing") + + // make request to directory listing at root + req, err = http.NewRequest(http.MethodGet, ts.URL, nil) + assert.Nil(t, err) + req.Host = "example.net" + + res, err = doWithoutRedirect(req) + assert.Nil(t, err) + + // expect correct backlinks at root + body, err = io.ReadAll(res.Body) + assert.Nil(t, err) + + s = string(body) + t.Logf("body: %s\n", string(body)) + + assert.True(t, matchPathOrBreadcrumbs(s, "/"), "expected a path in directory listing") + assert.NotContains(t, s, "", "expected no backlink in directory listing of the root CID") + assert.Contains(t, s, "", "expected file in directory listing") + // https://github.com/ipfs/dir-index-html/issues/42 + assert.Contains(t, s, "example.net/foo? #<'/bar"), "expected a path in directory listing") + assert.Contains(t, s, "", "expected backlink in directory listing") + assert.Contains(t, s, "", "expected file in directory listing") + assert.Contains(t, s, k3.Cid().String(), "expected hash in directory listing") +} + +func TestPretty404(t *testing.T) { + ts, api, root := newTestServerAndNode(t, nil) + t.Logf("test server url: %s", ts.URL) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + k, err := api.ResolvePath(ctx, ipath.Join(ipath.IpfsPath(root), t.Name())) + assert.Nil(t, err) + + host := "example.net" + api.namesys["/ipns/"+host] = path.FromString(k.String()) + + for _, test := range []struct { + path string + accept string + status int + text string + }{ + {"/ipfs-404.html", "text/html", http.StatusOK, "Custom 404"}, + {"/nope", "text/html", http.StatusNotFound, "Custom 404"}, + {"/nope", "text/*", http.StatusNotFound, "Custom 404"}, + {"/nope", "*/*", http.StatusNotFound, "Custom 404"}, + {"/nope", "application/json", http.StatusNotFound, fmt.Sprintf("failed to resolve /ipns/example.net/nope: no link named \"nope\" under %s\n", k.Cid().String())}, + {"/deeper/nope", "text/html", http.StatusNotFound, "Deep custom 404"}, + {"/deeper/", "text/html", http.StatusOK, ""}, + {"/deeper", "text/html", http.StatusOK, ""}, + {"/nope/nope", "text/html", http.StatusNotFound, "Custom 404"}, + } { + testName := fmt.Sprintf("%s %s", test.path, test.accept) + t.Run(testName, func(t *testing.T) { + var c http.Client + req, err := http.NewRequest("GET", ts.URL+test.path, nil) + assert.Nil(t, err) + req.Header.Add("Accept", test.accept) + req.Host = host + resp, err := c.Do(req) + assert.Nil(t, err) + defer resp.Body.Close() + assert.Equal(t, test.status, resp.StatusCode) + body, err := io.ReadAll(resp.Body) + assert.Nil(t, err) + if test.text != "" { + assert.Equal(t, test.text, string(body)) + } + }) + } +} + +func TestCacheControlImmutable(t *testing.T) { + ts, _, root := newTestServerAndNode(t, nil) + t.Logf("test server url: %s", ts.URL) + + req, err := http.NewRequest(http.MethodGet, ts.URL+"/ipfs/"+root.String()+"/", nil) + assert.Nil(t, err) + + res, err := doWithoutRedirect(req) + assert.Nil(t, err) + + // check the immutable tag isn't set + hdrs, ok := res.Header["Cache-Control"] + if ok { + for _, hdr := range hdrs { + assert.NotContains(t, hdr, "immutable", "unexpected Cache-Control: immutable on directory listing") + } + } +} + +func TestGoGetSupport(t *testing.T) { + ts, _, root := newTestServerAndNode(t, nil) + t.Logf("test server url: %s", ts.URL) + + // mimic go-get + req, err := http.NewRequest(http.MethodGet, ts.URL+"/ipfs/"+root.String()+"?go-get=1", nil) + assert.Nil(t, err) + + res, err := doWithoutRedirect(req) + assert.Nil(t, err) + assert.Equal(t, http.StatusOK, res.StatusCode) +} diff --git a/gateway/handler.go b/gateway/handler.go new file mode 100644 index 0000000000..02b907f6f1 --- /dev/null +++ b/gateway/handler.go @@ -0,0 +1,880 @@ +package gateway + +import ( + "context" + "fmt" + "html/template" + "io" + "mime" + "net/http" + "net/textproto" + "net/url" + gopath "path" + "regexp" + "runtime/debug" + "strings" + "time" + + coreiface "github.com/ipfs/boxo/coreiface" + ipath "github.com/ipfs/boxo/coreiface/path" + cid "github.com/ipfs/go-cid" + logging "github.com/ipfs/go-log" + mc "github.com/multiformats/go-multicodec" + prometheus "github.com/prometheus/client_golang/prometheus" + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" + "go.uber.org/zap" +) + +var log = logging.Logger("core/server") + +const ( + ipfsPathPrefix = "/ipfs/" + ipnsPathPrefix = "/ipns/" + immutableCacheControl = "public, max-age=29030400, immutable" +) + +var ( + onlyASCII = regexp.MustCompile("[[:^ascii:]]") + noModtime = time.Unix(0, 0) // disables Last-Modified header if passed as modtime +) + +// HTML-based redirect for errors which can be recovered from, but we want +// to provide hint to people that they should fix things on their end. +var redirectTemplate = template.Must(template.New("redirect").Parse(` + + + + + + + +

{{.ErrorMsg}}
(if a redirect does not happen in 10 seconds, use "{{.SuggestedPath}}" instead)
+ +`)) + +type redirectTemplateData struct { + RedirectURL string + SuggestedPath string + ErrorMsg string +} + +// handler is a HTTP handler that serves IPFS objects (accessible by default at /ipfs/) +// (it serves requests like GET /ipfs/QmVRzPKPzNtSrEzBFm2UZfxmPAgnaLke4DMcerbsGGSaFe/link) +type handler struct { + config Config + api API + + // generic metrics + firstContentBlockGetMetric *prometheus.HistogramVec + unixfsGetMetric *prometheus.SummaryVec // deprecated, use firstContentBlockGetMetric + + // response type metrics + getMetric *prometheus.HistogramVec + unixfsFileGetMetric *prometheus.HistogramVec + unixfsDirIndexGetMetric *prometheus.HistogramVec + unixfsGenDirListingGetMetric *prometheus.HistogramVec + carStreamGetMetric *prometheus.HistogramVec + rawBlockGetMetric *prometheus.HistogramVec + tarStreamGetMetric *prometheus.HistogramVec + jsoncborDocumentGetMetric *prometheus.HistogramVec + ipnsRecordGetMetric *prometheus.HistogramVec +} + +// StatusResponseWriter enables us to override HTTP Status Code passed to +// WriteHeader function inside of http.ServeContent. Decision is based on +// presence of HTTP Headers such as Location. +type statusResponseWriter struct { + http.ResponseWriter +} + +func (sw *statusResponseWriter) WriteHeader(code int) { + // Check if we need to adjust Status Code to account for scheduled redirect + // This enables us to return payload along with HTTP 301 + // for subdomain redirect in web browsers while also returning body for cli + // tools which do not follow redirects by default (curl, wget). + redirect := sw.ResponseWriter.Header().Get("Location") + if redirect != "" && code == http.StatusOK { + code = http.StatusMovedPermanently + log.Debugw("subdomain redirect", "location", redirect, "status", code) + } + sw.ResponseWriter.WriteHeader(code) +} + +// ServeContent replies to the request using the content in the provided ReadSeeker +// and returns the status code written and any error encountered during a write. +// It wraps http.ServeContent which takes care of If-None-Match+Etag, +// Content-Length and range requests. +func ServeContent(w http.ResponseWriter, req *http.Request, name string, modtime time.Time, content io.ReadSeeker) (int, bool, error) { + ew := &errRecordingResponseWriter{ResponseWriter: w} + http.ServeContent(ew, req, name, modtime, content) + + // When we calculate some metrics we want a flag that lets us to ignore + // errors and 304 Not Modified, and only care when requested data + // was sent in full. + dataSent := ew.code/100 == 2 && ew.err == nil + + return ew.code, dataSent, ew.err +} + +// errRecordingResponseWriter wraps a ResponseWriter to record the status code and any write error. +type errRecordingResponseWriter struct { + http.ResponseWriter + code int + err error +} + +func (w *errRecordingResponseWriter) WriteHeader(code int) { + if w.code == 0 { + w.code = code + } + w.ResponseWriter.WriteHeader(code) +} + +func (w *errRecordingResponseWriter) Write(p []byte) (int, error) { + n, err := w.ResponseWriter.Write(p) + if err != nil && w.err == nil { + w.err = err + } + return n, err +} + +// ReadFrom exposes errRecordingResponseWriter's underlying ResponseWriter to io.Copy +// to allow optimized methods to be taken advantage of. +func (w *errRecordingResponseWriter) ReadFrom(r io.Reader) (n int64, err error) { + n, err = io.Copy(w.ResponseWriter, r) + if err != nil && w.err == nil { + w.err = err + } + return n, err +} + +func newSummaryMetric(name string, help string) *prometheus.SummaryVec { + summaryMetric := prometheus.NewSummaryVec( + prometheus.SummaryOpts{ + Namespace: "ipfs", + Subsystem: "http", + Name: name, + Help: help, + }, + []string{"gateway"}, + ) + if err := prometheus.Register(summaryMetric); err != nil { + if are, ok := err.(prometheus.AlreadyRegisteredError); ok { + summaryMetric = are.ExistingCollector.(*prometheus.SummaryVec) + } else { + log.Errorf("failed to register ipfs_http_%s: %v", name, err) + } + } + return summaryMetric +} + +func newHistogramMetric(name string, help string) *prometheus.HistogramVec { + // We can add buckets as a parameter in the future, but for now using static defaults + // suggested in https://github.com/ipfs/kubo/issues/8441 + defaultBuckets := []float64{0.05, 0.1, 0.25, 0.5, 1, 2, 5, 10, 30, 60} + histogramMetric := prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Namespace: "ipfs", + Subsystem: "http", + Name: name, + Help: help, + Buckets: defaultBuckets, + }, + []string{"gateway"}, + ) + if err := prometheus.Register(histogramMetric); err != nil { + if are, ok := err.(prometheus.AlreadyRegisteredError); ok { + histogramMetric = are.ExistingCollector.(*prometheus.HistogramVec) + } else { + log.Errorf("failed to register ipfs_http_%s: %v", name, err) + } + } + return histogramMetric +} + +// NewHandler returns an http.Handler that can act as a gateway to IPFS content +// offlineApi is a version of the API that should not make network requests for missing data +func NewHandler(c Config, api API) http.Handler { + return newHandler(c, api) +} + +func newHandler(c Config, api API) *handler { + i := &handler{ + config: c, + api: api, + // Improved Metrics + // ---------------------------- + // Time till the first content block (bar in /ipfs/cid/foo/bar) + // (format-agnostic, across all response types) + firstContentBlockGetMetric: newHistogramMetric( + "gw_first_content_block_get_latency_seconds", + "The time till the first content block is received on GET from the gateway.", + ), + + // Response-type specific metrics + // ---------------------------- + // Generic: time it takes to execute a successful gateway request (all request types) + getMetric: newHistogramMetric( + "gw_get_duration_seconds", + "The time to GET a successful response to a request (all content types).", + ), + // UnixFS: time it takes to return a file + unixfsFileGetMetric: newHistogramMetric( + "gw_unixfs_file_get_duration_seconds", + "The time to serve an entire UnixFS file from the gateway.", + ), + // UnixFS: time it takes to find and serve an index.html file on behalf of a directory. + unixfsDirIndexGetMetric: newHistogramMetric( + "gw_unixfs_dir_indexhtml_get_duration_seconds", + "The time to serve an index.html file on behalf of a directory from the gateway. This is a subset of gw_unixfs_file_get_duration_seconds.", + ), + // UnixFS: time it takes to generate static HTML with directory listing + unixfsGenDirListingGetMetric: newHistogramMetric( + "gw_unixfs_gen_dir_listing_get_duration_seconds", + "The time to serve a generated UnixFS HTML directory listing from the gateway.", + ), + // CAR: time it takes to return requested CAR stream + carStreamGetMetric: newHistogramMetric( + "gw_car_stream_get_duration_seconds", + "The time to GET an entire CAR stream from the gateway.", + ), + // Block: time it takes to return requested Block + rawBlockGetMetric: newHistogramMetric( + "gw_raw_block_get_duration_seconds", + "The time to GET an entire raw Block from the gateway.", + ), + // TAR: time it takes to return requested TAR stream + tarStreamGetMetric: newHistogramMetric( + "gw_tar_stream_get_duration_seconds", + "The time to GET an entire TAR stream from the gateway.", + ), + // JSON/CBOR: time it takes to return requested DAG-JSON/-CBOR document + jsoncborDocumentGetMetric: newHistogramMetric( + "gw_jsoncbor_get_duration_seconds", + "The time to GET an entire DAG-JSON/CBOR block from the gateway.", + ), + // IPNS Record: time it takes to return IPNS record + ipnsRecordGetMetric: newHistogramMetric( + "gw_ipns_record_get_duration_seconds", + "The time to GET an entire IPNS Record from the gateway.", + ), + + // Legacy Metrics + // ---------------------------- + unixfsGetMetric: newSummaryMetric( // TODO: remove? + // (deprecated, use firstContentBlockGetMetric instead) + "unixfs_get_latency_seconds", + "DEPRECATED: does not do what you think, use gw_first_content_block_get_latency_seconds instead.", + ), + } + return i +} + +func (i *handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + defer panicHandler(w) + + // the hour is a hard fallback, we don't expect it to happen, but just in case + ctx, cancel := context.WithTimeout(r.Context(), time.Hour) + defer cancel() + r = r.WithContext(ctx) + + switch r.Method { + case http.MethodGet, http.MethodHead: + i.getOrHeadHandler(w, r) + return + case http.MethodOptions: + i.optionsHandler(w, r) + return + } + + w.Header().Add("Allow", http.MethodGet) + w.Header().Add("Allow", http.MethodHead) + w.Header().Add("Allow", http.MethodOptions) + + errmsg := "Method " + r.Method + " not allowed: read only access" + http.Error(w, errmsg, http.StatusMethodNotAllowed) +} + +func (i *handler) optionsHandler(w http.ResponseWriter, r *http.Request) { + /* + OPTIONS is a noop request that is used by the browsers to check + if server accepts cross-site XMLHttpRequest (indicated by the presence of CORS headers) + https://developer.mozilla.org/en-US/docs/Web/HTTP/Access_control_CORS#Preflighted_requests + */ + i.addUserHeaders(w) // return all custom headers (including CORS ones, if set) +} + +func (i *handler) getOrHeadHandler(w http.ResponseWriter, r *http.Request) { + begin := time.Now() + + logger := log.With("from", r.RequestURI) + logger.Debug("http request received") + + if err := handleUnsupportedHeaders(r); err != nil { + webRequestError(w, err) + return + } + + if requestHandled := handleProtocolHandlerRedirect(w, r, logger); requestHandled { + return + } + + if err := handleServiceWorkerRegistration(r); err != nil { + webRequestError(w, err) + return + } + + contentPath := ipath.New(r.URL.Path) + ctx := context.WithValue(r.Context(), ContentPathKey, contentPath) + r = r.WithContext(ctx) + + if requestHandled := i.handleOnlyIfCached(w, r, contentPath, logger); requestHandled { + return + } + + if requestHandled := handleSuperfluousNamespace(w, r, contentPath); requestHandled { + return + } + + // Detect when explicit Accept header or ?format parameter are present + responseFormat, formatParams, err := customResponseFormat(r) + if err != nil { + webError(w, fmt.Errorf("error while processing the Accept header: %w", err), http.StatusBadRequest) + return + } + trace.SpanFromContext(r.Context()).SetAttributes(attribute.String("ResponseFormat", responseFormat)) + + resolvedPath, contentPath, ok := i.handlePathResolution(w, r, responseFormat, contentPath, logger) + if !ok { + return + } + trace.SpanFromContext(r.Context()).SetAttributes(attribute.String("ResolvedPath", resolvedPath.String())) + + // Detect when If-None-Match HTTP header allows returning HTTP 304 Not Modified + if inm := r.Header.Get("If-None-Match"); inm != "" { + pathCid := resolvedPath.Cid() + // need to check against both File and Dir Etag variants + // because this inexpensive check happens before we do any I/O + cidEtag := getEtag(r, pathCid) + dirEtag := getDirListingEtag(pathCid) + if etagMatch(inm, cidEtag, dirEtag) { + // Finish early if client already has a matching Etag + w.WriteHeader(http.StatusNotModified) + return + } + } + + if err := i.handleGettingFirstBlock(r, begin, contentPath, resolvedPath); err != nil { + webRequestError(w, err) + return + } + + if err := i.setCommonHeaders(w, r, contentPath); err != nil { + webRequestError(w, err) + return + } + + var success bool + + // Support custom response formats passed via ?format or Accept HTTP header + switch responseFormat { + case "", "application/json", "application/cbor": + switch mc.Code(resolvedPath.Cid().Prefix().Codec) { + case mc.Json, mc.DagJson, mc.Cbor, mc.DagCbor: + logger.Debugw("serving codec", "path", contentPath) + success = i.serveCodec(r.Context(), w, r, resolvedPath, contentPath, begin, responseFormat) + default: + logger.Debugw("serving unixfs", "path", contentPath) + success = i.serveUnixFS(r.Context(), w, r, resolvedPath, contentPath, begin, logger) + } + case "application/vnd.ipld.raw": + logger.Debugw("serving raw block", "path", contentPath) + success = i.serveRawBlock(r.Context(), w, r, resolvedPath, contentPath, begin) + case "application/vnd.ipld.car": + logger.Debugw("serving car stream", "path", contentPath) + carVersion := formatParams["version"] + success = i.serveCAR(r.Context(), w, r, resolvedPath, contentPath, carVersion, begin) + case "application/x-tar": + logger.Debugw("serving tar file", "path", contentPath) + success = i.serveTAR(r.Context(), w, r, resolvedPath, contentPath, begin, logger) + case "application/vnd.ipld.dag-json", "application/vnd.ipld.dag-cbor": + logger.Debugw("serving codec", "path", contentPath) + success = i.serveCodec(r.Context(), w, r, resolvedPath, contentPath, begin, responseFormat) + case "application/vnd.ipfs.ipns-record": + logger.Debugw("serving ipns record", "path", contentPath) + success = i.serveIpnsRecord(r.Context(), w, r, resolvedPath, contentPath, begin, logger) + default: // catch-all for unsuported application/vnd.* + err := fmt.Errorf("unsupported format %q", responseFormat) + webError(w, err, http.StatusBadRequest) + return + } + + if success { + i.getMetric.WithLabelValues(contentPath.Namespace()).Observe(time.Since(begin).Seconds()) + } +} + +func (i *handler) addUserHeaders(w http.ResponseWriter) { + for k, v := range i.config.Headers { + w.Header()[k] = v + } +} + +func panicHandler(w http.ResponseWriter) { + if r := recover(); r != nil { + log.Error("A panic occurred in the gateway handler!") + log.Error(r) + debug.PrintStack() + w.WriteHeader(http.StatusInternalServerError) + } +} + +func addCacheControlHeaders(w http.ResponseWriter, r *http.Request, contentPath ipath.Path, fileCid cid.Cid) (modtime time.Time) { + // Set Etag to based on CID (override whatever was set before) + w.Header().Set("Etag", getEtag(r, fileCid)) + + // Set Cache-Control and Last-Modified based on contentPath properties + if contentPath.Mutable() { + // mutable namespaces such as /ipns/ can't be cached forever + + /* For now we set Last-Modified to Now() to leverage caching heuristics built into modern browsers: + * https://github.com/ipfs/kubo/pull/8074#pullrequestreview-645196768 + * but we should not set it to fake values and use Cache-Control based on TTL instead */ + modtime = time.Now() + + // TODO: set Cache-Control based on TTL of IPNS/DNSLink: https://github.com/ipfs/kubo/issues/1818#issuecomment-1015849462 + // TODO: set Last-Modified based on /ipns/ publishing timestamp? + } else { + // immutable! CACHE ALL THE THINGS, FOREVER! wolololol + w.Header().Set("Cache-Control", immutableCacheControl) + + // Set modtime to 'zero time' to disable Last-Modified header (superseded by Cache-Control) + modtime = noModtime + + // TODO: set Last-Modified? - TBD - /ipfs/ modification metadata is present in unixfs 1.5 https://github.com/ipfs/kubo/issues/6920? + } + + return modtime +} + +// Set Content-Disposition if filename URL query param is present, return preferred filename +func addContentDispositionHeader(w http.ResponseWriter, r *http.Request, contentPath ipath.Path) string { + /* This logic enables: + * - creation of HTML links that trigger "Save As.." dialog instead of being rendered by the browser + * - overriding the filename used when saving subresource assets on HTML page + * - providing a default filename for HTTP clients when downloading direct /ipfs/CID without any subpath + */ + + // URL param ?filename=cat.jpg triggers Content-Disposition: [..] filename + // which impacts default name used in "Save As.." dialog + name := getFilename(contentPath) + urlFilename := r.URL.Query().Get("filename") + if urlFilename != "" { + disposition := "inline" + // URL param ?download=true triggers Content-Disposition: [..] attachment + // which skips rendering and forces "Save As.." dialog in browsers + if r.URL.Query().Get("download") == "true" { + disposition = "attachment" + } + setContentDispositionHeader(w, urlFilename, disposition) + name = urlFilename + } + return name +} + +// Set Content-Disposition to arbitrary filename and disposition +func setContentDispositionHeader(w http.ResponseWriter, filename string, disposition string) { + utf8Name := url.PathEscape(filename) + asciiName := url.PathEscape(onlyASCII.ReplaceAllLiteralString(filename, "_")) + w.Header().Set("Content-Disposition", fmt.Sprintf("%s; filename=\"%s\"; filename*=UTF-8''%s", disposition, asciiName, utf8Name)) +} + +// Set X-Ipfs-Roots with logical CID array for efficient HTTP cache invalidation. +func (i *handler) buildIpfsRootsHeader(contentPath string, r *http.Request) (string, error) { + /* + These are logical roots where each CID represent one path segment + and resolves to either a directory or the root block of a file. + The main purpose of this header is allow HTTP caches to do smarter decisions + around cache invalidation (eg. keep specific subdirectory/file if it did not change) + + A good example is Wikipedia, which is HAMT-sharded, but we only care about + logical roots that represent each segment of the human-readable content + path: + + Given contentPath = /ipns/en.wikipedia-on-ipfs.org/wiki/Block_of_Wikipedia_in_Turkey + rootCidList is a generated by doing `ipfs resolve -r` on each sub path: + /ipns/en.wikipedia-on-ipfs.org → bafybeiaysi4s6lnjev27ln5icwm6tueaw2vdykrtjkwiphwekaywqhcjze + /ipns/en.wikipedia-on-ipfs.org/wiki/ → bafybeihn2f7lhumh4grizksi2fl233cyszqadkn424ptjajfenykpsaiw4 + /ipns/en.wikipedia-on-ipfs.org/wiki/Block_of_Wikipedia_in_Turkey → bafkreibn6euazfvoghepcm4efzqx5l3hieof2frhp254hio5y7n3hv5rma + + The result is an ordered array of values: + X-Ipfs-Roots: bafybeiaysi4s6lnjev27ln5icwm6tueaw2vdykrtjkwiphwekaywqhcjze,bafybeihn2f7lhumh4grizksi2fl233cyszqadkn424ptjajfenykpsaiw4,bafkreibn6euazfvoghepcm4efzqx5l3hieof2frhp254hio5y7n3hv5rma + + Note that while the top one will change every time any article is changed, + the last root (responsible for specific article) may not change at all. + */ + var sp strings.Builder + var pathRoots []string + pathSegments := strings.Split(contentPath[6:], "/") + sp.WriteString(contentPath[:5]) // /ipfs or /ipns + for _, root := range pathSegments { + if root == "" { + continue + } + sp.WriteString("/") + sp.WriteString(root) + resolvedSubPath, err := i.api.ResolvePath(r.Context(), ipath.New(sp.String())) + if err != nil { + return "", err + } + pathRoots = append(pathRoots, resolvedSubPath.Cid().String()) + } + rootCidList := strings.Join(pathRoots, ",") // convention from rfc2616#sec4.2 + return rootCidList, nil +} + +func getFilename(contentPath ipath.Path) string { + s := contentPath.String() + if (strings.HasPrefix(s, ipfsPathPrefix) || strings.HasPrefix(s, ipnsPathPrefix)) && strings.Count(gopath.Clean(s), "/") <= 2 { + // Don't want to treat ipfs.io in /ipns/ipfs.io as a filename. + return "" + } + return gopath.Base(s) +} + +// etagMatch evaluates if we can respond with HTTP 304 Not Modified +// It supports multiple weak and strong etags passed in If-None-Matc stringh +// including the wildcard one. +func etagMatch(ifNoneMatchHeader string, cidEtag string, dirEtag string) bool { + buf := ifNoneMatchHeader + for { + buf = textproto.TrimString(buf) + if len(buf) == 0 { + break + } + if buf[0] == ',' { + buf = buf[1:] + continue + } + // If-None-Match: * should match against any etag + if buf[0] == '*' { + return true + } + etag, remain := scanETag(buf) + if etag == "" { + break + } + // Check for match both strong and weak etags + if etagWeakMatch(etag, cidEtag) || etagWeakMatch(etag, dirEtag) { + return true + } + buf = remain + } + return false +} + +// scanETag determines if a syntactically valid ETag is present at s. If so, +// the ETag and remaining text after consuming ETag is returned. Otherwise, +// it returns "", "". +// (This is the same logic as one executed inside of http.ServeContent) +func scanETag(s string) (etag string, remain string) { + s = textproto.TrimString(s) + start := 0 + if strings.HasPrefix(s, "W/") { + start = 2 + } + if len(s[start:]) < 2 || s[start] != '"' { + return "", "" + } + // ETag is either W/"text" or "text". + // See RFC 7232 2.3. + for i := start + 1; i < len(s); i++ { + c := s[i] + switch { + // Character values allowed in ETags. + case c == 0x21 || c >= 0x23 && c <= 0x7E || c >= 0x80: + case c == '"': + return s[:i+1], s[i+1:] + default: + return "", "" + } + } + return "", "" +} + +// etagWeakMatch reports whether a and b match using weak ETag comparison. +func etagWeakMatch(a, b string) bool { + return strings.TrimPrefix(a, "W/") == strings.TrimPrefix(b, "W/") +} + +// generate Etag value based on HTTP request and CID +func getEtag(r *http.Request, cid cid.Cid) string { + prefix := `"` + suffix := `"` + responseFormat, _, err := customResponseFormat(r) + if err == nil && responseFormat != "" { + // application/vnd.ipld.foo → foo + // application/x-bar → x-bar + shortFormat := responseFormat[strings.LastIndexAny(responseFormat, "/.")+1:] + // Etag: "cid.shortFmt" (gives us nice compression together with Content-Disposition in block (raw) and car responses) + suffix = `.` + shortFormat + suffix + } + // TODO: include selector suffix when https://github.com/ipfs/kubo/issues/8769 lands + return prefix + cid.String() + suffix +} + +// return explicit response format if specified in request as query parameter or via Accept HTTP header +func customResponseFormat(r *http.Request) (mediaType string, params map[string]string, err error) { + if formatParam := r.URL.Query().Get("format"); formatParam != "" { + // translate query param to a content type + switch formatParam { + case "raw": + return "application/vnd.ipld.raw", nil, nil + case "car": + return "application/vnd.ipld.car", nil, nil + case "tar": + return "application/x-tar", nil, nil + case "json": + return "application/json", nil, nil + case "cbor": + return "application/cbor", nil, nil + case "dag-json": + return "application/vnd.ipld.dag-json", nil, nil + case "dag-cbor": + return "application/vnd.ipld.dag-cbor", nil, nil + case "ipns-record": + return "application/vnd.ipfs.ipns-record", nil, nil + } + } + // Browsers and other user agents will send Accept header with generic types like: + // Accept:text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,*/*;q=0.8 + // We only care about explicit, vendor-specific content-types and respond to the first match (in order). + // TODO: make this RFC compliant and respect weights (eg. return CAR for Accept:application/vnd.ipld.dag-json;q=0.1,application/vnd.ipld.car;q=0.2) + for _, header := range r.Header.Values("Accept") { + for _, value := range strings.Split(header, ",") { + accept := strings.TrimSpace(value) + // respond to the very first matching content type + if strings.HasPrefix(accept, "application/vnd.ipld") || + strings.HasPrefix(accept, "application/x-tar") || + strings.HasPrefix(accept, "application/json") || + strings.HasPrefix(accept, "application/cbor") || + strings.HasPrefix(accept, "application/vnd.ipfs") { + mediatype, params, err := mime.ParseMediaType(accept) + if err != nil { + return "", nil, err + } + return mediatype, params, nil + } + } + } + // If none of special-cased content types is found, return empty string + // to indicate default, implicit UnixFS response should be prepared + return "", nil, nil +} + +// returns unquoted path with all special characters revealed as \u codes +func debugStr(path string) string { + q := fmt.Sprintf("%+q", path) + if len(q) >= 3 { + q = q[1 : len(q)-1] + } + return q +} + +// Resolve the provided contentPath including any special handling related to +// the requested responseFormat. Returned ok flag indicates if gateway handler +// should continue processing the request. +func (i *handler) handlePathResolution(w http.ResponseWriter, r *http.Request, responseFormat string, contentPath ipath.Path, logger *zap.SugaredLogger) (resolvedPath ipath.Resolved, newContentPath ipath.Path, ok bool) { + // Attempt to resolve the provided path. + resolvedPath, err := i.api.ResolvePath(r.Context(), contentPath) + + switch err { + case nil: + return resolvedPath, contentPath, true + case coreiface.ErrOffline: + err = fmt.Errorf("failed to resolve %s: %w", debugStr(contentPath.String()), err) + webError(w, err, http.StatusServiceUnavailable) + return nil, nil, false + default: + // The path can't be resolved. + if isUnixfsResponseFormat(responseFormat) { + // If we have origin isolation (subdomain gw, DNSLink website), + // and response type is UnixFS (default for website hosting) + // check for presence of _redirects file and apply rules defined there. + // See: https://github.com/ipfs/specs/pull/290 + if hasOriginIsolation(r) { + resolvedPath, newContentPath, ok, hadMatchingRule := i.serveRedirectsIfPresent(w, r, resolvedPath, contentPath, logger) + if hadMatchingRule { + logger.Debugw("applied a rule from _redirects file") + return resolvedPath, newContentPath, ok + } + } + + // if Accept is text/html, see if ipfs-404.html is present + // This logic isn't documented and will likely be removed at some point. + // Any 404 logic in _redirects above will have already run by this time, so it's really an extra fall back + if i.serveLegacy404IfPresent(w, r, contentPath) { + logger.Debugw("served legacy 404") + return nil, nil, false + } + } + + err = fmt.Errorf("failed to resolve %s: %w", debugStr(contentPath.String()), err) + webError(w, err, http.StatusInternalServerError) + return nil, nil, false + } +} + +// Detect 'Cache-Control: only-if-cached' in request and return data if it is already in the local datastore. +// https://github.com/ipfs/specs/blob/main/http-gateways/PATH_GATEWAY.md#cache-control-request-header +func (i *handler) handleOnlyIfCached(w http.ResponseWriter, r *http.Request, contentPath ipath.Path, logger *zap.SugaredLogger) (requestHandled bool) { + if r.Header.Get("Cache-Control") == "only-if-cached" { + if !i.api.IsCached(r.Context(), contentPath) { + if r.Method == http.MethodHead { + w.WriteHeader(http.StatusPreconditionFailed) + return true + } + errMsg := fmt.Sprintf("%q not in local datastore", contentPath.String()) + http.Error(w, errMsg, http.StatusPreconditionFailed) + return true + } + if r.Method == http.MethodHead { + w.WriteHeader(http.StatusOK) + return true + } + } + return false +} + +func handleUnsupportedHeaders(r *http.Request) (err *ErrorResponse) { + // X-Ipfs-Gateway-Prefix was removed (https://github.com/ipfs/kubo/issues/7702) + // TODO: remove this after go-ipfs 0.13 ships + if prfx := r.Header.Get("X-Ipfs-Gateway-Prefix"); prfx != "" { + err := fmt.Errorf("unsupported HTTP header: X-Ipfs-Gateway-Prefix support was removed: https://github.com/ipfs/kubo/issues/7702") + return NewErrorResponse(err, http.StatusBadRequest) + } + return nil +} + +// ?uri query param support for requests produced by web browsers +// via navigator.registerProtocolHandler Web API +// https://developer.mozilla.org/en-US/docs/Web/API/Navigator/registerProtocolHandler +// TLDR: redirect /ipfs/?uri=ipfs%3A%2F%2Fcid%3Fquery%3Dval to /ipfs/cid?query=val +func handleProtocolHandlerRedirect(w http.ResponseWriter, r *http.Request, logger *zap.SugaredLogger) (requestHandled bool) { + if uriParam := r.URL.Query().Get("uri"); uriParam != "" { + u, err := url.Parse(uriParam) + if err != nil { + webError(w, fmt.Errorf("failed to parse uri query parameter: %w", err), http.StatusBadRequest) + return true + } + if u.Scheme != "ipfs" && u.Scheme != "ipns" { + webError(w, fmt.Errorf("uri query parameter scheme must be ipfs or ipns: %w", err), http.StatusBadRequest) + return true + } + path := u.Path + if u.RawQuery != "" { // preserve query if present + path = path + "?" + u.RawQuery + } + + redirectURL := gopath.Join("/", u.Scheme, u.Host, path) + logger.Debugw("uri param, redirect", "to", redirectURL, "status", http.StatusMovedPermanently) + http.Redirect(w, r, redirectURL, http.StatusMovedPermanently) + return true + } + + return false +} + +// Disallow Service Worker registration on namespace roots +// https://github.com/ipfs/kubo/issues/4025 +func handleServiceWorkerRegistration(r *http.Request) (err *ErrorResponse) { + if r.Header.Get("Service-Worker") == "script" { + matched, _ := regexp.MatchString(`^/ip[fn]s/[^/]+$`, r.URL.Path) + if matched { + err := fmt.Errorf("registration is not allowed for this scope") + return NewErrorResponse(fmt.Errorf("navigator.serviceWorker: %w", err), http.StatusBadRequest) + } + } + + return nil +} + +// Attempt to fix redundant /ipfs/ namespace as long as resulting +// 'intended' path is valid. This is in case gremlins were tickled +// wrong way and user ended up at /ipfs/ipfs/{cid} or /ipfs/ipns/{id} +// like in bafybeien3m7mdn6imm425vc2s22erzyhbvk5n3ofzgikkhmdkh5cuqbpbq :^)) +func handleSuperfluousNamespace(w http.ResponseWriter, r *http.Request, contentPath ipath.Path) (requestHandled bool) { + // If the path is valid, there's nothing to do + if pathErr := contentPath.IsValid(); pathErr == nil { + return false + } + + // If there's no superflous namespace, there's nothing to do + if !(strings.HasPrefix(r.URL.Path, "/ipfs/ipfs/") || strings.HasPrefix(r.URL.Path, "/ipfs/ipns/")) { + return false + } + + // Attempt to fix the superflous namespace + intendedPath := ipath.New(strings.TrimPrefix(r.URL.Path, "/ipfs")) + if err := intendedPath.IsValid(); err != nil { + webError(w, fmt.Errorf("invalid ipfs path: %w", err), http.StatusBadRequest) + return true + } + intendedURL := intendedPath.String() + if r.URL.RawQuery != "" { + // we render HTML, so ensure query entries are properly escaped + q, _ := url.ParseQuery(r.URL.RawQuery) + intendedURL = intendedURL + "?" + q.Encode() + } + // return HTTP 400 (Bad Request) with HTML error page that: + // - points at correct canonical path via header + // - displays human-readable error + // - redirects to intendedURL after a short delay + + w.WriteHeader(http.StatusBadRequest) + if err := redirectTemplate.Execute(w, redirectTemplateData{ + RedirectURL: intendedURL, + SuggestedPath: intendedPath.String(), + ErrorMsg: fmt.Sprintf("invalid path: %q should be %q", r.URL.Path, intendedPath.String()), + }); err != nil { + webError(w, fmt.Errorf("failed to redirect when fixing superfluous namespace: %w", err), http.StatusBadRequest) + } + + return true +} + +func (i *handler) handleGettingFirstBlock(r *http.Request, begin time.Time, contentPath ipath.Path, resolvedPath ipath.Resolved) *ErrorResponse { + // Update the global metric of the time it takes to read the final root block of the requested resource + // NOTE: for legacy reasons this happens before we go into content-type specific code paths + _, err := i.api.GetBlock(r.Context(), resolvedPath.Cid()) + if err != nil { + err = fmt.Errorf("could not get block %s: %w", resolvedPath.Cid().String(), err) + return NewErrorResponse(err, http.StatusInternalServerError) + } + ns := contentPath.Namespace() + timeToGetFirstContentBlock := time.Since(begin).Seconds() + i.unixfsGetMetric.WithLabelValues(ns).Observe(timeToGetFirstContentBlock) // deprecated, use firstContentBlockGetMetric instead + i.firstContentBlockGetMetric.WithLabelValues(ns).Observe(timeToGetFirstContentBlock) + return nil +} + +func (i *handler) setCommonHeaders(w http.ResponseWriter, r *http.Request, contentPath ipath.Path) *ErrorResponse { + i.addUserHeaders(w) // ok, _now_ write user's headers. + w.Header().Set("X-Ipfs-Path", contentPath.String()) + + if rootCids, err := i.buildIpfsRootsHeader(contentPath.String(), r); err == nil { + w.Header().Set("X-Ipfs-Roots", rootCids) + } else { // this should never happen, as we resolved the contentPath already + err = fmt.Errorf("error while resolving X-Ipfs-Roots: %w", err) + return NewErrorResponse(err, http.StatusInternalServerError) + } + + return nil +} + +// spanTrace starts a new span using the standard IPFS tracing conventions. +func spanTrace(ctx context.Context, spanName string, opts ...trace.SpanStartOption) (context.Context, trace.Span) { + return otel.Tracer("boxo").Start(ctx, fmt.Sprintf("%s.%s", " Gateway", spanName), opts...) +} diff --git a/gateway/handler_block.go b/gateway/handler_block.go new file mode 100644 index 0000000000..773088c17d --- /dev/null +++ b/gateway/handler_block.go @@ -0,0 +1,53 @@ +package gateway + +import ( + "bytes" + "context" + "fmt" + "net/http" + "time" + + ipath "github.com/ipfs/boxo/coreiface/path" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" +) + +// serveRawBlock returns bytes behind a raw block +func (i *handler) serveRawBlock(ctx context.Context, w http.ResponseWriter, r *http.Request, resolvedPath ipath.Resolved, contentPath ipath.Path, begin time.Time) bool { + ctx, span := spanTrace(ctx, "ServeRawBlock", trace.WithAttributes(attribute.String("path", resolvedPath.String()))) + defer span.End() + + blockCid := resolvedPath.Cid() + block, err := i.api.GetBlock(ctx, blockCid) + if err != nil { + err = fmt.Errorf("error getting block %s: %w", blockCid.String(), err) + webError(w, err, http.StatusInternalServerError) + return false + } + content := bytes.NewReader(block.RawData()) + + // Set Content-Disposition + var name string + if urlFilename := r.URL.Query().Get("filename"); urlFilename != "" { + name = urlFilename + } else { + name = blockCid.String() + ".bin" + } + setContentDispositionHeader(w, name, "attachment") + + // Set remaining headers + modtime := addCacheControlHeaders(w, r, contentPath, blockCid) + w.Header().Set("Content-Type", "application/vnd.ipld.raw") + w.Header().Set("X-Content-Type-Options", "nosniff") // no funny business in the browsers :^) + + // ServeContent will take care of + // If-None-Match+Etag, Content-Length and range requests + _, dataSent, _ := ServeContent(w, r, name, modtime, content) + + if dataSent { + // Update metrics + i.rawBlockGetMetric.WithLabelValues(contentPath.Namespace()).Observe(time.Since(begin).Seconds()) + } + + return dataSent +} diff --git a/gateway/handler_car.go b/gateway/handler_car.go new file mode 100644 index 0000000000..22611adaf0 --- /dev/null +++ b/gateway/handler_car.go @@ -0,0 +1,99 @@ +package gateway + +import ( + "context" + "fmt" + "net/http" + "time" + + blocks "github.com/ipfs/boxo/blocks" + ipath "github.com/ipfs/boxo/coreiface/path" + gocar "github.com/ipfs/boxo/ipld/car" + cid "github.com/ipfs/go-cid" + selectorparse "github.com/ipld/go-ipld-prime/traversal/selector/parse" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" +) + +// serveCAR returns a CAR stream for specific DAG+selector +func (i *handler) serveCAR(ctx context.Context, w http.ResponseWriter, r *http.Request, resolvedPath ipath.Resolved, contentPath ipath.Path, carVersion string, begin time.Time) bool { + ctx, span := spanTrace(ctx, "ServeCAR", trace.WithAttributes(attribute.String("path", resolvedPath.String()))) + defer span.End() + + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + switch carVersion { + case "": // noop, client does not care about version + case "1": // noop, we support this + default: + err := fmt.Errorf("unsupported CAR version: only version=1 is supported") + webError(w, err, http.StatusBadRequest) + return false + } + rootCid := resolvedPath.Cid() + + // Set Content-Disposition + var name string + if urlFilename := r.URL.Query().Get("filename"); urlFilename != "" { + name = urlFilename + } else { + name = rootCid.String() + ".car" + } + setContentDispositionHeader(w, name, "attachment") + + // Set Cache-Control (same logic as for a regular files) + addCacheControlHeaders(w, r, contentPath, rootCid) + + // Weak Etag W/ because we can't guarantee byte-for-byte identical + // responses, but still want to benefit from HTTP Caching. Two CAR + // responses for the same CID and selector will be logically equivalent, + // but when CAR is streamed, then in theory, blocks may arrive from + // datastore in non-deterministic order. + etag := `W/` + getEtag(r, rootCid) + w.Header().Set("Etag", etag) + + // Finish early if Etag match + if r.Header.Get("If-None-Match") == etag { + w.WriteHeader(http.StatusNotModified) + return false + } + + // Make it clear we don't support range-requests over a car stream + // Partial downloads and resumes should be handled using requests for + // sub-DAGs and IPLD selectors: https://github.com/ipfs/go-ipfs/issues/8769 + w.Header().Set("Accept-Ranges", "none") + + w.Header().Set("Content-Type", "application/vnd.ipld.car; version=1") + w.Header().Set("X-Content-Type-Options", "nosniff") // no funny business in the browsers :^) + + // Same go-car settings as dag.export command + store := dagStore{api: i.api, ctx: ctx} + + // TODO: support selectors passed as request param: https://github.com/ipfs/kubo/issues/8769 + dag := gocar.Dag{Root: rootCid, Selector: selectorparse.CommonSelector_ExploreAllRecursively} + car := gocar.NewSelectiveCar(ctx, store, []gocar.Dag{dag}, gocar.TraverseLinksOnlyOnce()) + + if err := car.Write(w); err != nil { + // We return error as a trailer, however it is not something browsers can access + // (https://github.com/mdn/browser-compat-data/issues/14703) + // Due to this, we suggest client always verify that + // the received CAR stream response is matching requested DAG selector + w.Header().Set("X-Stream-Error", err.Error()) + return false + } + + // Update metrics + i.carStreamGetMetric.WithLabelValues(contentPath.Namespace()).Observe(time.Since(begin).Seconds()) + return true +} + +// FIXME(@Jorropo): https://github.com/ipld/go-car/issues/315 +type dagStore struct { + api API + ctx context.Context +} + +func (ds dagStore) Get(_ context.Context, c cid.Cid) (blocks.Block, error) { + return ds.api.GetBlock(ds.ctx, c) +} diff --git a/gateway/handler_codec.go b/gateway/handler_codec.go new file mode 100644 index 0000000000..35199679cc --- /dev/null +++ b/gateway/handler_codec.go @@ -0,0 +1,271 @@ +package gateway + +import ( + "bytes" + "context" + "fmt" + "net/http" + "strings" + "time" + + ipath "github.com/ipfs/boxo/coreiface/path" + "github.com/ipfs/boxo/gateway/assets" + cid "github.com/ipfs/go-cid" + "github.com/ipld/go-ipld-prime/multicodec" + "github.com/ipld/go-ipld-prime/node/basicnode" + mc "github.com/multiformats/go-multicodec" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" +) + +// codecToContentType maps the supported IPLD codecs to the HTTP Content +// Type they should have. +var codecToContentType = map[mc.Code]string{ + mc.Json: "application/json", + mc.Cbor: "application/cbor", + mc.DagJson: "application/vnd.ipld.dag-json", + mc.DagCbor: "application/vnd.ipld.dag-cbor", +} + +// contentTypeToRaw maps the HTTP Content Type to the respective codec that +// allows raw response without any conversion. +var contentTypeToRaw = map[string][]mc.Code{ + "application/json": {mc.Json, mc.DagJson}, + "application/cbor": {mc.Cbor, mc.DagCbor}, +} + +// contentTypeToCodec maps the HTTP Content Type to the respective codec. We +// only add here the codecs that we want to convert-to-from. +var contentTypeToCodec = map[string]mc.Code{ + "application/vnd.ipld.dag-json": mc.DagJson, + "application/vnd.ipld.dag-cbor": mc.DagCbor, +} + +// contentTypeToExtension maps the HTTP Content Type to the respective file +// extension, used in Content-Disposition header when downloading the file. +var contentTypeToExtension = map[string]string{ + "application/json": ".json", + "application/vnd.ipld.dag-json": ".json", + "application/cbor": ".cbor", + "application/vnd.ipld.dag-cbor": ".cbor", +} + +func (i *handler) serveCodec(ctx context.Context, w http.ResponseWriter, r *http.Request, resolvedPath ipath.Resolved, contentPath ipath.Path, begin time.Time, requestedContentType string) bool { + ctx, span := spanTrace(ctx, "ServeCodec", trace.WithAttributes(attribute.String("path", resolvedPath.String()), attribute.String("requestedContentType", requestedContentType))) + defer span.End() + + cidCodec := mc.Code(resolvedPath.Cid().Prefix().Codec) + responseContentType := requestedContentType + + // If the resolved path still has some remainder, return error for now. + // TODO: handle this when we have IPLD Patch (https://ipld.io/specs/patch/) via HTTP PUT + // TODO: (depends on https://github.com/ipfs/kubo/issues/4801 and https://github.com/ipfs/kubo/issues/4782) + if resolvedPath.Remainder() != "" { + path := strings.TrimSuffix(resolvedPath.String(), resolvedPath.Remainder()) + err := fmt.Errorf("%q of %q could not be returned: reading IPLD Kinds other than Links (CBOR Tag 42) is not implemented: try reading %q instead", resolvedPath.Remainder(), resolvedPath.String(), path) + webError(w, err, http.StatusNotImplemented) + return false + } + + // If no explicit content type was requested, the response will have one based on the codec from the CID + if requestedContentType == "" { + cidContentType, ok := codecToContentType[cidCodec] + if !ok { + // Should not happen unless function is called with wrong parameters. + err := fmt.Errorf("content type not found for codec: %v", cidCodec) + webError(w, err, http.StatusInternalServerError) + return false + } + responseContentType = cidContentType + } + + // Set HTTP headers (for caching etc) + modtime := addCacheControlHeaders(w, r, contentPath, resolvedPath.Cid()) + name := setCodecContentDisposition(w, r, resolvedPath, responseContentType) + w.Header().Set("Content-Type", responseContentType) + w.Header().Set("X-Content-Type-Options", "nosniff") + + // No content type is specified by the user (via Accept, or format=). However, + // we support this format. Let's handle it. + if requestedContentType == "" { + isDAG := cidCodec == mc.DagJson || cidCodec == mc.DagCbor + acceptsHTML := strings.Contains(r.Header.Get("Accept"), "text/html") + download := r.URL.Query().Get("download") == "true" + + if isDAG && acceptsHTML && !download { + return i.serveCodecHTML(ctx, w, r, resolvedPath, contentPath) + } else { + // This covers CIDs with codec 'json' and 'cbor' as those do not have + // an explicit requested content type. + return i.serveCodecRaw(ctx, w, r, resolvedPath, contentPath, name, modtime, begin) + } + } + + // If DAG-JSON or DAG-CBOR was requested using corresponding plain content type + // return raw block as-is, without conversion + skipCodecs, ok := contentTypeToRaw[requestedContentType] + if ok { + for _, skipCodec := range skipCodecs { + if skipCodec == cidCodec { + return i.serveCodecRaw(ctx, w, r, resolvedPath, contentPath, name, modtime, begin) + } + } + } + + // Otherwise, the user has requested a specific content type (a DAG-* variant). + // Let's first get the codecs that can be used with this content type. + toCodec, ok := contentTypeToCodec[requestedContentType] + if !ok { + // This is never supposed to happen unless function is called with wrong parameters. + err := fmt.Errorf("unsupported content type: %q", requestedContentType) + webError(w, err, http.StatusInternalServerError) + return false + } + + // This handles DAG-* conversions and validations. + return i.serveCodecConverted(ctx, w, r, resolvedPath, contentPath, toCodec, modtime, begin) +} + +func (i *handler) serveCodecHTML(ctx context.Context, w http.ResponseWriter, r *http.Request, resolvedPath ipath.Resolved, contentPath ipath.Path) bool { + // A HTML directory index will be presented, be sure to set the correct + // type instead of relying on autodetection (which may fail). + w.Header().Set("Content-Type", "text/html") + + // Clear Content-Disposition -- we want HTML to be rendered inline + w.Header().Del("Content-Disposition") + + // Generated index requires custom Etag (output may change between Kubo versions) + dagEtag := getDagIndexEtag(resolvedPath.Cid()) + w.Header().Set("Etag", dagEtag) + + // Remove Cache-Control for now to match UnixFS dir-index-html responses + // (we don't want browser to cache HTML forever) + // TODO: if we ever change behavior for UnixFS dir listings, same changes should be applied here + w.Header().Del("Cache-Control") + + cidCodec := mc.Code(resolvedPath.Cid().Prefix().Codec) + if err := assets.DagTemplate.Execute(w, assets.DagTemplateData{ + Path: contentPath.String(), + CID: resolvedPath.Cid().String(), + CodecName: cidCodec.String(), + CodecHex: fmt.Sprintf("0x%x", uint64(cidCodec)), + }); err != nil { + err = fmt.Errorf("failed to generate HTML listing for this DAG: try fetching raw block with ?format=raw: %w", err) + webError(w, err, http.StatusInternalServerError) + return false + } + + return true +} + +// serveCodecRaw returns the raw block without any conversion +func (i *handler) serveCodecRaw(ctx context.Context, w http.ResponseWriter, r *http.Request, resolvedPath ipath.Resolved, contentPath ipath.Path, name string, modtime, begin time.Time) bool { + blockCid := resolvedPath.Cid() + block, err := i.api.GetBlock(ctx, blockCid) + if err != nil { + err = fmt.Errorf("error getting block %s: %w", blockCid.String(), err) + webError(w, err, http.StatusInternalServerError) + return false + } + content := bytes.NewReader(block.RawData()) + + // ServeContent will take care of + // If-None-Match+Etag, Content-Length and range requests + _, dataSent, _ := ServeContent(w, r, name, modtime, content) + + if dataSent { + // Update metrics + i.jsoncborDocumentGetMetric.WithLabelValues(contentPath.Namespace()).Observe(time.Since(begin).Seconds()) + } + + return dataSent +} + +// serveCodecConverted returns payload converted to codec specified in toCodec +func (i *handler) serveCodecConverted(ctx context.Context, w http.ResponseWriter, r *http.Request, resolvedPath ipath.Resolved, contentPath ipath.Path, toCodec mc.Code, modtime, begin time.Time) bool { + blockCid := resolvedPath.Cid() + block, err := i.api.GetBlock(ctx, blockCid) + if err != nil { + err = fmt.Errorf("error getting block %s: %w", blockCid.String(), err) + webError(w, err, http.StatusInternalServerError) + return false + } + + codec := blockCid.Prefix().Codec + decoder, err := multicodec.LookupDecoder(codec) + if err != nil { + webError(w, err, http.StatusInternalServerError) + return false + } + + node := basicnode.Prototype.Any.NewBuilder() + err = decoder(node, bytes.NewReader(block.RawData())) + if err != nil { + webError(w, err, http.StatusInternalServerError) + return false + } + + encoder, err := multicodec.LookupEncoder(uint64(toCodec)) + if err != nil { + webError(w, err, http.StatusInternalServerError) + return false + } + + // Ensure IPLD node conforms to the codec specification. + var buf bytes.Buffer + err = encoder(node.Build(), &buf) + if err != nil { + webError(w, err, http.StatusInternalServerError) + return false + } + + // Sets correct Last-Modified header. This code is borrowed from the standard + // library (net/http/server.go) as we cannot use serveFile. + if !(modtime.IsZero() || modtime.Equal(unixEpochTime)) { + w.Header().Set("Last-Modified", modtime.UTC().Format(http.TimeFormat)) + } + + _, err = w.Write(buf.Bytes()) + if err == nil { + // Update metrics + i.jsoncborDocumentGetMetric.WithLabelValues(contentPath.Namespace()).Observe(time.Since(begin).Seconds()) + return true + } + + return false +} + +func setCodecContentDisposition(w http.ResponseWriter, r *http.Request, resolvedPath ipath.Resolved, contentType string) string { + var dispType, name string + + ext, ok := contentTypeToExtension[contentType] + if !ok { + // Should never happen. + ext = ".bin" + } + + if urlFilename := r.URL.Query().Get("filename"); urlFilename != "" { + name = urlFilename + } else { + name = resolvedPath.Cid().String() + ext + } + + // JSON should be inlined, but ?download=true should still override + if r.URL.Query().Get("download") == "true" { + dispType = "attachment" + } else { + switch ext { + case ".json": // codecs that serialize to JSON can be rendered by browsers + dispType = "inline" + default: // everything else is assumed binary / opaque bytes + dispType = "attachment" + } + } + + setContentDispositionHeader(w, name, dispType) + return name +} + +func getDagIndexEtag(dagCid cid.Cid) string { + return `"DagIndex-` + assets.AssetHash + `_CID-` + dagCid.String() + `"` +} diff --git a/gateway/handler_ipns_record.go b/gateway/handler_ipns_record.go new file mode 100644 index 0000000000..a1487f0c8e --- /dev/null +++ b/gateway/handler_ipns_record.go @@ -0,0 +1,90 @@ +package gateway + +import ( + "context" + "errors" + "fmt" + "net/http" + "strings" + "time" + + "github.com/gogo/protobuf/proto" + ipath "github.com/ipfs/boxo/coreiface/path" + ipns_pb "github.com/ipfs/boxo/ipns/pb" + "github.com/ipfs/go-cid" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" + "go.uber.org/zap" +) + +func (i *handler) serveIpnsRecord(ctx context.Context, w http.ResponseWriter, r *http.Request, resolvedPath ipath.Resolved, contentPath ipath.Path, begin time.Time, logger *zap.SugaredLogger) bool { + ctx, span := spanTrace(ctx, "ServeIPNSRecord", trace.WithAttributes(attribute.String("path", resolvedPath.String()))) + defer span.End() + + if contentPath.Namespace() != "ipns" { + err := fmt.Errorf("%s is not an IPNS link", contentPath.String()) + webError(w, err, http.StatusBadRequest) + return false + } + + key := contentPath.String() + key = strings.TrimSuffix(key, "/") + key = strings.TrimPrefix(key, "/ipns/") + if strings.Count(key, "/") != 0 { + err := errors.New("cannot find ipns key for subpath") + webError(w, err, http.StatusBadRequest) + return false + } + + c, err := cid.Decode(key) + if err != nil { + webError(w, err, http.StatusBadRequest) + return false + } + + rawRecord, err := i.api.GetIPNSRecord(ctx, c) + if err != nil { + webError(w, err, http.StatusInternalServerError) + return false + } + + var record ipns_pb.IpnsEntry + err = proto.Unmarshal(rawRecord, &record) + if err != nil { + webError(w, err, http.StatusInternalServerError) + return false + } + + // Set cache control headers based on the TTL set in the IPNS record. If the + // TTL is not present, we use the Last-Modified tag. We are tracking IPNS + // caching on: https://github.com/ipfs/kubo/issues/1818. + // TODO: use addCacheControlHeaders once #1818 is fixed. + w.Header().Set("Etag", getEtag(r, resolvedPath.Cid())) + if record.Ttl != nil { + seconds := int(time.Duration(*record.Ttl).Seconds()) + w.Header().Set("Cache-Control", fmt.Sprintf("public, max-age=%d", seconds)) + } else { + w.Header().Set("Last-Modified", time.Now().UTC().Format(http.TimeFormat)) + } + + // Set Content-Disposition + var name string + if urlFilename := r.URL.Query().Get("filename"); urlFilename != "" { + name = urlFilename + } else { + name = key + ".ipns-record" + } + setContentDispositionHeader(w, name, "attachment") + + w.Header().Set("Content-Type", "application/vnd.ipfs.ipns-record") + w.Header().Set("X-Content-Type-Options", "nosniff") + + _, err = w.Write(rawRecord) + if err == nil { + // Update metrics + i.ipnsRecordGetMetric.WithLabelValues(contentPath.Namespace()).Observe(time.Since(begin).Seconds()) + return true + } + + return false +} diff --git a/gateway/handler_tar.go b/gateway/handler_tar.go new file mode 100644 index 0000000000..7d835bb337 --- /dev/null +++ b/gateway/handler_tar.go @@ -0,0 +1,97 @@ +package gateway + +import ( + "context" + "fmt" + "html" + "net/http" + "time" + + ipath "github.com/ipfs/boxo/coreiface/path" + "github.com/ipfs/boxo/files" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" + "go.uber.org/zap" +) + +var unixEpochTime = time.Unix(0, 0) + +func (i *handler) serveTAR(ctx context.Context, w http.ResponseWriter, r *http.Request, resolvedPath ipath.Resolved, contentPath ipath.Path, begin time.Time, logger *zap.SugaredLogger) bool { + ctx, span := spanTrace(ctx, "ServeTAR", trace.WithAttributes(attribute.String("path", resolvedPath.String()))) + defer span.End() + + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + // Get Unixfs file + file, err := i.api.GetUnixFsNode(ctx, resolvedPath) + if err != nil { + err = fmt.Errorf("error getting UnixFS node for %s: %w", html.EscapeString(contentPath.String()), err) + webError(w, err, http.StatusInternalServerError) + return false + } + defer file.Close() + + rootCid := resolvedPath.Cid() + + // Set Cache-Control and read optional Last-Modified time + modtime := addCacheControlHeaders(w, r, contentPath, rootCid) + + // Weak Etag W/ because we can't guarantee byte-for-byte identical + // responses, but still want to benefit from HTTP Caching. Two TAR + // responses for the same CID will be logically equivalent, + // but when TAR is streamed, then in theory, files and directories + // may arrive in different order (depends on TAR lib and filesystem/inodes). + etag := `W/` + getEtag(r, rootCid) + w.Header().Set("Etag", etag) + + // Finish early if Etag match + if r.Header.Get("If-None-Match") == etag { + w.WriteHeader(http.StatusNotModified) + return false + } + + // Set Content-Disposition + var name string + if urlFilename := r.URL.Query().Get("filename"); urlFilename != "" { + name = urlFilename + } else { + name = rootCid.String() + ".tar" + } + setContentDispositionHeader(w, name, "attachment") + + // Construct the TAR writer + tarw, err := files.NewTarWriter(w) + if err != nil { + webError(w, fmt.Errorf("could not build tar writer: %w", err), http.StatusInternalServerError) + return false + } + defer tarw.Close() + + // Sets correct Last-Modified header. This code is borrowed from the standard + // library (net/http/server.go) as we cannot use serveFile without throwing the entire + // TAR into the memory first. + if !(modtime.IsZero() || modtime.Equal(unixEpochTime)) { + w.Header().Set("Last-Modified", modtime.UTC().Format(http.TimeFormat)) + } + + w.Header().Set("Content-Type", "application/x-tar") + w.Header().Set("X-Content-Type-Options", "nosniff") // no funny business in the browsers :^) + + // The TAR has a top-level directory (or file) named by the CID. + if err := tarw.WriteFile(file, rootCid.String()); err != nil { + w.Header().Set("X-Stream-Error", err.Error()) + // Trailer headers do not work in web browsers + // (see https://github.com/mdn/browser-compat-data/issues/14703) + // and we have limited options around error handling in browser contexts. + // To improve UX/DX, we finish response stream with error message, allowing client to + // (1) detect error by having corrupted TAR + // (2) be able to reason what went wrong by instecting the tail of TAR stream + _, _ = w.Write([]byte(err.Error())) + return false + } + + // Update metrics + i.tarStreamGetMetric.WithLabelValues(contentPath.Namespace()).Observe(time.Since(begin).Seconds()) + return true +} diff --git a/gateway/handler_test.go b/gateway/handler_test.go new file mode 100644 index 0000000000..57fcead819 --- /dev/null +++ b/gateway/handler_test.go @@ -0,0 +1,208 @@ +package gateway + +import ( + "context" + "errors" + "fmt" + "net/http" + + "testing" + "time" + + "github.com/ipfs/boxo/blocks" + iface "github.com/ipfs/boxo/coreiface" + ipath "github.com/ipfs/boxo/coreiface/path" + "github.com/ipfs/boxo/files" + "github.com/ipfs/boxo/path/resolver" + cid "github.com/ipfs/go-cid" + ipld "github.com/ipfs/go-ipld-format" + "github.com/stretchr/testify/assert" +) + +func TestEtagMatch(t *testing.T) { + for _, test := range []struct { + header string // value in If-None-Match HTTP header + cidEtag string + dirEtag string + expected bool // expected result of etagMatch(header, cidEtag, dirEtag) + }{ + {"", `"etag"`, "", false}, // no If-None-Match + {"", "", `"etag"`, false}, // no If-None-Match + {`"etag"`, `"etag"`, "", true}, // file etag match + {`W/"etag"`, `"etag"`, "", true}, // file etag match + {`"foo", W/"bar", W/"etag"`, `"etag"`, "", true}, // file etag match (array) + {`"foo",W/"bar",W/"etag"`, `"etag"`, "", true}, // file etag match (compact array) + {`"etag"`, "", `W/"etag"`, true}, // dir etag match + {`"etag"`, "", `W/"etag"`, true}, // dir etag match + {`W/"etag"`, "", `W/"etag"`, true}, // dir etag match + {`*`, `"etag"`, "", true}, // wildcard etag match + } { + result := etagMatch(test.header, test.cidEtag, test.dirEtag) + assert.Equalf(t, test.expected, result, "etagMatch(%q, %q, %q)", test.header, test.cidEtag, test.dirEtag) + } +} + +type errorMockAPI struct { + err error +} + +func (api *errorMockAPI) GetUnixFsNode(context.Context, ipath.Resolved) (files.Node, error) { + return nil, api.err +} + +func (api *errorMockAPI) LsUnixFsDir(ctx context.Context, p ipath.Resolved) (<-chan iface.DirEntry, error) { + return nil, api.err +} + +func (api *errorMockAPI) GetBlock(ctx context.Context, c cid.Cid) (blocks.Block, error) { + return nil, api.err +} + +func (api *errorMockAPI) GetIPNSRecord(ctx context.Context, c cid.Cid) ([]byte, error) { + return nil, api.err +} + +func (api *errorMockAPI) GetDNSLinkRecord(ctx context.Context, hostname string) (ipath.Path, error) { + return nil, api.err +} + +func (api *errorMockAPI) IsCached(ctx context.Context, p ipath.Path) bool { + return false +} + +func (api *errorMockAPI) ResolvePath(ctx context.Context, ip ipath.Path) (ipath.Resolved, error) { + return nil, api.err +} + +func TestGatewayBadRequestInvalidPath(t *testing.T) { + api, _ := newMockAPI(t) + ts := newTestServer(t, api) + t.Logf("test server url: %s", ts.URL) + + req, err := http.NewRequest(http.MethodGet, ts.URL+"/ipfs/QmInvalid/Path", nil) + assert.Nil(t, err) + + res, err := ts.Client().Do(req) + assert.Nil(t, err) + + assert.Equal(t, http.StatusBadRequest, res.StatusCode) +} + +func TestErrorBubblingFromAPI(t *testing.T) { + t.Parallel() + + for _, test := range []struct { + name string + err error + status int + }{ + {"404 Not Found from IPLD", &ipld.ErrNotFound{}, http.StatusNotFound}, + {"404 Not Found from path resolver", resolver.ErrNoLink{}, http.StatusNotFound}, + {"502 Bad Gateway", ErrBadGateway, http.StatusBadGateway}, + {"504 Gateway Timeout", ErrGatewayTimeout, http.StatusGatewayTimeout}, + } { + t.Run(test.name, func(t *testing.T) { + api := &errorMockAPI{err: fmt.Errorf("wrapped for testing purposes: %w", test.err)} + ts := newTestServer(t, api) + t.Logf("test server url: %s", ts.URL) + + req, err := http.NewRequest(http.MethodGet, ts.URL+"/ipns/en.wikipedia-on-ipfs.org", nil) + assert.Nil(t, err) + + res, err := ts.Client().Do(req) + assert.Nil(t, err) + assert.Equal(t, test.status, res.StatusCode) + }) + } + + for _, test := range []struct { + name string + err error + status int + headerName string + headerValue string + headerLength int // how many times was headerName set + }{ + {"429 Too Many Requests without Retry-After header", ErrTooManyRequests, http.StatusTooManyRequests, "Retry-After", "", 0}, + {"429 Too Many Requests without Retry-After header", NewErrorRetryAfter(ErrTooManyRequests, 0*time.Second), http.StatusTooManyRequests, "Retry-After", "", 0}, + {"429 Too Many Requests with Retry-After header", NewErrorRetryAfter(ErrTooManyRequests, 3600*time.Second), http.StatusTooManyRequests, "Retry-After", "3600", 1}, + } { + api := &errorMockAPI{err: fmt.Errorf("wrapped for testing purposes: %w", test.err)} + ts := newTestServer(t, api) + t.Logf("test server url: %s", ts.URL) + + req, err := http.NewRequest(http.MethodGet, ts.URL+"/ipns/en.wikipedia-on-ipfs.org", nil) + assert.Nil(t, err) + + res, err := ts.Client().Do(req) + assert.Nil(t, err) + assert.Equal(t, test.status, res.StatusCode) + assert.Equal(t, test.headerValue, res.Header.Get(test.headerName)) + assert.Equal(t, test.headerLength, len(res.Header.Values(test.headerName))) + } +} + +type panicMockAPI struct { + panicOnHostnameHandler bool +} + +func (api *panicMockAPI) GetUnixFsNode(context.Context, ipath.Resolved) (files.Node, error) { + panic("i am panicking") +} + +func (api *panicMockAPI) LsUnixFsDir(ctx context.Context, p ipath.Resolved) (<-chan iface.DirEntry, error) { + panic("i am panicking") +} + +func (api *panicMockAPI) GetBlock(ctx context.Context, c cid.Cid) (blocks.Block, error) { + panic("i am panicking") +} + +func (api *panicMockAPI) GetIPNSRecord(ctx context.Context, c cid.Cid) ([]byte, error) { + panic("i am panicking") +} + +func (api *panicMockAPI) GetDNSLinkRecord(ctx context.Context, hostname string) (ipath.Path, error) { + // GetDNSLinkRecord is also called on the WithHostname handler. We have this option + // to disable panicking here so we can test if both the regular gateway handler + // and the hostname handler can handle panics. + if api.panicOnHostnameHandler { + panic("i am panicking") + } + + return nil, errors.New("not implemented") +} + +func (api *panicMockAPI) IsCached(ctx context.Context, p ipath.Path) bool { + panic("i am panicking") +} + +func (api *panicMockAPI) ResolvePath(ctx context.Context, ip ipath.Path) (ipath.Resolved, error) { + panic("i am panicking") +} + +func TestGatewayStatusCodeOnPanic(t *testing.T) { + api := &panicMockAPI{} + ts := newTestServer(t, api) + t.Logf("test server url: %s", ts.URL) + + req, err := http.NewRequest(http.MethodGet, ts.URL+"/ipfs/bafkreifzjut3te2nhyekklss27nh3k72ysco7y32koao5eei66wof36n5e", nil) + assert.Nil(t, err) + + res, err := ts.Client().Do(req) + assert.Nil(t, err) + assert.Equal(t, http.StatusInternalServerError, res.StatusCode) +} + +func TestGatewayStatusCodeOnHostnamePanic(t *testing.T) { + api := &panicMockAPI{panicOnHostnameHandler: true} + ts := newTestServer(t, api) + t.Logf("test server url: %s", ts.URL) + + req, err := http.NewRequest(http.MethodGet, ts.URL+"/ipfs/bafkreifzjut3te2nhyekklss27nh3k72ysco7y32koao5eei66wof36n5e", nil) + assert.Nil(t, err) + + res, err := ts.Client().Do(req) + assert.Nil(t, err) + assert.Equal(t, http.StatusInternalServerError, res.StatusCode) +} diff --git a/gateway/handler_unixfs.go b/gateway/handler_unixfs.go new file mode 100644 index 0000000000..f5a96b146a --- /dev/null +++ b/gateway/handler_unixfs.go @@ -0,0 +1,44 @@ +package gateway + +import ( + "context" + "fmt" + "net/http" + "time" + + ipath "github.com/ipfs/boxo/coreiface/path" + "github.com/ipfs/boxo/files" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" + "go.uber.org/zap" +) + +func (i *handler) serveUnixFS(ctx context.Context, w http.ResponseWriter, r *http.Request, resolvedPath ipath.Resolved, contentPath ipath.Path, begin time.Time, logger *zap.SugaredLogger) bool { + ctx, span := spanTrace(ctx, "ServeUnixFS", trace.WithAttributes(attribute.String("path", resolvedPath.String()))) + defer span.End() + + // Handling UnixFS + dr, err := i.api.GetUnixFsNode(ctx, resolvedPath) + if err != nil { + err = fmt.Errorf("error while getting UnixFS node: %w", err) + webError(w, err, http.StatusInternalServerError) + return false + } + defer dr.Close() + + // Handling Unixfs file + if f, ok := dr.(files.File); ok { + logger.Debugw("serving unixfs file", "path", contentPath) + return i.serveFile(ctx, w, r, resolvedPath, contentPath, f, begin) + } + + // Handling Unixfs directory + dir, ok := dr.(files.Directory) + if !ok { + webError(w, fmt.Errorf("unsupported UnixFS type"), http.StatusInternalServerError) + return false + } + + logger.Debugw("serving unixfs directory", "path", contentPath) + return i.serveDirectory(ctx, w, r, resolvedPath, contentPath, dir, begin, logger) +} diff --git a/gateway/handler_unixfs__redirects.go b/gateway/handler_unixfs__redirects.go new file mode 100644 index 0000000000..afd0c7c3c8 --- /dev/null +++ b/gateway/handler_unixfs__redirects.go @@ -0,0 +1,287 @@ +package gateway + +import ( + "fmt" + "io" + "net/http" + gopath "path" + "strconv" + "strings" + + ipath "github.com/ipfs/boxo/coreiface/path" + "github.com/ipfs/boxo/files" + redirects "github.com/ipfs/go-ipfs-redirects-file" + "go.uber.org/zap" +) + +// Resolving a UnixFS path involves determining if the provided `path.Path` exists and returning the `path.Resolved` +// corresponding to that path. For UnixFS, path resolution is more involved. +// +// When a path under requested CID does not exist, Gateway will check if a `_redirects` file exists +// underneath the root CID of the path, and apply rules defined there. +// See sepcification introduced in: https://github.com/ipfs/specs/pull/290 +// +// Scenario 1: +// If a path exists, we always return the `path.Resolved` corresponding to that path, regardless of the existence of a `_redirects` file. +// +// Scenario 2: +// If a path does not exist, usually we should return a `nil` resolution path and an error indicating that the path +// doesn't exist. However, a `_redirects` file may exist and contain a redirect rule that redirects that path to a different path. +// We need to evaluate the rule and perform the redirect if present. +// +// Scenario 3: +// Another possibility is that the path corresponds to a rewrite rule (i.e. a rule with a status of 200). +// In this case, we don't perform a redirect, but do need to return a `path.Resolved` and `path.Path` corresponding to +// the rewrite destination path. +// +// Note that for security reasons, redirect rules are only processed when the request has origin isolation. +// See https://github.com/ipfs/specs/pull/290 for more information. +func (i *handler) serveRedirectsIfPresent(w http.ResponseWriter, r *http.Request, resolvedPath ipath.Resolved, contentPath ipath.Path, logger *zap.SugaredLogger) (newResolvedPath ipath.Resolved, newContentPath ipath.Path, continueProcessing bool, hadMatchingRule bool) { + redirectsFile := i.getRedirectsFile(r, contentPath, logger) + if redirectsFile != nil { + redirectRules, err := i.getRedirectRules(r, redirectsFile) + if err != nil { + webError(w, err, http.StatusInternalServerError) + return nil, nil, false, true + } + + redirected, newPath, err := i.handleRedirectsFileRules(w, r, contentPath, redirectRules) + if err != nil { + err = fmt.Errorf("trouble processing _redirects file at %q: %w", redirectsFile.String(), err) + webError(w, err, http.StatusInternalServerError) + return nil, nil, false, true + } + + if redirected { + return nil, nil, false, true + } + + // 200 is treated as a rewrite, so update the path and continue + if newPath != "" { + // Reassign contentPath and resolvedPath since the URL was rewritten + contentPath = ipath.New(newPath) + resolvedPath, err = i.api.ResolvePath(r.Context(), contentPath) + if err != nil { + webError(w, err, http.StatusInternalServerError) + return nil, nil, false, true + } + + return resolvedPath, contentPath, true, true + } + } + // No matching rule, paths remain the same, continue regular processing + return resolvedPath, contentPath, true, false +} + +func (i *handler) handleRedirectsFileRules(w http.ResponseWriter, r *http.Request, contentPath ipath.Path, redirectRules []redirects.Rule) (redirected bool, newContentPath string, err error) { + // Attempt to match a rule to the URL path, and perform the corresponding redirect or rewrite + pathParts := strings.Split(contentPath.String(), "/") + if len(pathParts) > 3 { + // All paths should start with /ipfs/cid/, so get the path after that + urlPath := "/" + strings.Join(pathParts[3:], "/") + rootPath := strings.Join(pathParts[:3], "/") + // Trim off the trailing / + urlPath = strings.TrimSuffix(urlPath, "/") + + for _, rule := range redirectRules { + // Error right away if the rule is invalid + if !rule.MatchAndExpandPlaceholders(urlPath) { + continue + } + + // We have a match! + + // Rewrite + if rule.Status == 200 { + // Prepend the rootPath + toPath := rootPath + rule.To + return false, toPath, nil + } + + // Or 4xx + if rule.Status == 404 || rule.Status == 410 || rule.Status == 451 { + toPath := rootPath + rule.To + content4xxPath := ipath.New(toPath) + err := i.serve4xx(w, r, content4xxPath, rule.Status) + return true, toPath, err + } + + // Or redirect + if rule.Status >= 301 && rule.Status <= 308 { + http.Redirect(w, r, rule.To, rule.Status) + return true, "", nil + } + } + } + + // No redirects matched + return false, "", nil +} + +func (i *handler) getRedirectRules(r *http.Request, redirectsFilePath ipath.Resolved) ([]redirects.Rule, error) { + // Convert the path into a file node + node, err := i.api.GetUnixFsNode(r.Context(), redirectsFilePath) + if err != nil { + return nil, fmt.Errorf("could not get _redirects: %w", err) + } + defer node.Close() + + // Convert the node into a file + f, ok := node.(files.File) + if !ok { + return nil, fmt.Errorf("could not parse _redirects: %w", err) + } + + // Parse redirect rules from file + redirectRules, err := redirects.Parse(f) + if err != nil { + return nil, fmt.Errorf("could not parse _redirects: %w", err) + } + + return redirectRules, nil +} + +// Returns a resolved path to the _redirects file located in the root CID path of the requested path +func (i *handler) getRedirectsFile(r *http.Request, contentPath ipath.Path, logger *zap.SugaredLogger) ipath.Resolved { + // contentPath is the full ipfs path to the requested resource, + // regardless of whether path or subdomain resolution is used. + rootPath := getRootPath(contentPath) + + // Check for _redirects file. + // Any path resolution failures are ignored and we just assume there's no _redirects file. + // Note that ignoring these errors also ensures that the use of the empty CID (bafkqaaa) in tests doesn't fail. + path := ipath.Join(rootPath, "_redirects") + resolvedPath, err := i.api.ResolvePath(r.Context(), path) + if err != nil { + return nil + } + return resolvedPath +} + +// Returns the root CID Path for the given path +func getRootPath(path ipath.Path) ipath.Path { + parts := strings.Split(path.String(), "/") + return ipath.New(gopath.Join("/", path.Namespace(), parts[2])) +} + +func (i *handler) serve4xx(w http.ResponseWriter, r *http.Request, content4xxPath ipath.Path, status int) error { + resolved4xxPath, err := i.api.ResolvePath(r.Context(), content4xxPath) + if err != nil { + return err + } + + node, err := i.api.GetUnixFsNode(r.Context(), resolved4xxPath) + if err != nil { + return err + } + defer node.Close() + + f, ok := node.(files.File) + if !ok { + return fmt.Errorf("could not convert node for %d page to file", status) + } + + size, err := f.Size() + if err != nil { + return fmt.Errorf("could not get size of %d page", status) + } + + log.Debugf("using _redirects: custom %d file at %q", status, content4xxPath) + w.Header().Set("Content-Type", "text/html") + w.Header().Set("Content-Length", strconv.FormatInt(size, 10)) + addCacheControlHeaders(w, r, content4xxPath, resolved4xxPath.Cid()) + w.WriteHeader(status) + _, err = io.CopyN(w, f, size) + return err +} + +func hasOriginIsolation(r *http.Request) bool { + _, gw := r.Context().Value(GatewayHostnameKey).(string) + _, dnslink := r.Context().Value(DNSLinkHostnameKey).(string) + + if gw || dnslink { + return true + } + + return false +} + +func isUnixfsResponseFormat(responseFormat string) bool { + // The implicit response format is UnixFS + return responseFormat == "" +} + +// Deprecated: legacy ipfs-404.html files are superseded by _redirects file +// This is provided only for backward-compatibility, until websites migrate +// to 404s managed via _redirects file (https://github.com/ipfs/specs/pull/290) +func (i *handler) serveLegacy404IfPresent(w http.ResponseWriter, r *http.Request, contentPath ipath.Path) bool { + resolved404Path, ctype, err := i.searchUpTreeFor404(r, contentPath) + if err != nil { + return false + } + + dr, err := i.api.GetUnixFsNode(r.Context(), resolved404Path) + if err != nil { + return false + } + defer dr.Close() + + f, ok := dr.(files.File) + if !ok { + return false + } + + size, err := f.Size() + if err != nil { + return false + } + + log.Debugw("using pretty 404 file", "path", contentPath) + w.Header().Set("Content-Type", ctype) + w.Header().Set("Content-Length", strconv.FormatInt(size, 10)) + w.WriteHeader(http.StatusNotFound) + _, err = io.CopyN(w, f, size) + return err == nil +} + +func (i *handler) searchUpTreeFor404(r *http.Request, contentPath ipath.Path) (ipath.Resolved, string, error) { + filename404, ctype, err := preferred404Filename(r.Header.Values("Accept")) + if err != nil { + return nil, "", err + } + + pathComponents := strings.Split(contentPath.String(), "/") + + for idx := len(pathComponents); idx >= 3; idx-- { + pretty404 := gopath.Join(append(pathComponents[0:idx], filename404)...) + parsed404Path := ipath.New("/" + pretty404) + if parsed404Path.IsValid() != nil { + break + } + resolvedPath, err := i.api.ResolvePath(r.Context(), parsed404Path) + if err != nil { + continue + } + return resolvedPath, ctype, nil + } + + return nil, "", fmt.Errorf("no pretty 404 in any parent folder") +} + +func preferred404Filename(acceptHeaders []string) (string, string, error) { + // If we ever want to offer a 404 file for a different content type + // then this function will need to parse q weightings, but for now + // the presence of anything matching HTML is enough. + for _, acceptHeader := range acceptHeaders { + accepted := strings.Split(acceptHeader, ",") + for _, spec := range accepted { + contentType := strings.SplitN(spec, ";", 1)[0] + switch contentType { + case "*/*", "text/*", "text/html": + return "ipfs-404.html", "text/html", nil + } + } + } + + return "", "", fmt.Errorf("there is no 404 file for the requested content types") +} diff --git a/gateway/handler_unixfs_dir.go b/gateway/handler_unixfs_dir.go new file mode 100644 index 0000000000..40ea6ae0ba --- /dev/null +++ b/gateway/handler_unixfs_dir.go @@ -0,0 +1,212 @@ +package gateway + +import ( + "context" + "fmt" + "net/http" + "net/url" + gopath "path" + "strings" + "time" + + "github.com/dustin/go-humanize" + ipath "github.com/ipfs/boxo/coreiface/path" + "github.com/ipfs/boxo/files" + "github.com/ipfs/boxo/gateway/assets" + path "github.com/ipfs/boxo/path" + "github.com/ipfs/boxo/path/resolver" + cid "github.com/ipfs/go-cid" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" + "go.uber.org/zap" +) + +// serveDirectory returns the best representation of UnixFS directory +// +// It will return index.html if present, or generate directory listing otherwise. +func (i *handler) serveDirectory(ctx context.Context, w http.ResponseWriter, r *http.Request, resolvedPath ipath.Resolved, contentPath ipath.Path, dir files.Directory, begin time.Time, logger *zap.SugaredLogger) bool { + ctx, span := spanTrace(ctx, "ServeDirectory", trace.WithAttributes(attribute.String("path", resolvedPath.String()))) + defer span.End() + + // HostnameOption might have constructed an IPNS/IPFS path using the Host header. + // In this case, we need the original path for constructing redirects + // and links that match the requested URL. + // For example, http://example.net would become /ipns/example.net, and + // the redirects and links would end up as http://example.net/ipns/example.net + requestURI, err := url.ParseRequestURI(r.RequestURI) + if err != nil { + webError(w, fmt.Errorf("failed to parse request path: %w", err), http.StatusInternalServerError) + return false + } + originalURLPath := requestURI.Path + + // Ensure directory paths end with '/' + if originalURLPath[len(originalURLPath)-1] != '/' { + // don't redirect to trailing slash if it's go get + // https://github.com/ipfs/kubo/pull/3963 + goget := r.URL.Query().Get("go-get") == "1" + if !goget { + suffix := "/" + // preserve query parameters + if r.URL.RawQuery != "" { + suffix = suffix + "?" + r.URL.RawQuery + } + // /ipfs/cid/foo?bar must be redirected to /ipfs/cid/foo/?bar + redirectURL := originalURLPath + suffix + logger.Debugw("directory location moved permanently", "status", http.StatusMovedPermanently) + http.Redirect(w, r, redirectURL, http.StatusMovedPermanently) + return true + } + } + + // Check if directory has index.html, if so, serveFile + idxPath := ipath.Join(contentPath, "index.html") + idxResolvedPath, err := i.api.ResolvePath(ctx, idxPath) + switch err.(type) { + case nil: + idx, err := i.api.GetUnixFsNode(ctx, idxResolvedPath) + if err != nil { + webError(w, err, http.StatusInternalServerError) + return false + } + + f, ok := idx.(files.File) + if !ok { + webError(w, files.ErrNotReader, http.StatusInternalServerError) + return false + } + + logger.Debugw("serving index.html file", "path", idxPath) + // write to request + success := i.serveFile(ctx, w, r, resolvedPath, idxPath, f, begin) + if success { + i.unixfsDirIndexGetMetric.WithLabelValues(contentPath.Namespace()).Observe(time.Since(begin).Seconds()) + } + return success + case resolver.ErrNoLink: + logger.Debugw("no index.html; noop", "path", idxPath) + default: + webError(w, err, http.StatusInternalServerError) + return false + } + + // See statusResponseWriter.WriteHeader + // and https://github.com/ipfs/kubo/issues/7164 + // Note: this needs to occur before listingTemplate.Execute otherwise we get + // superfluous response.WriteHeader call from prometheus/client_golang + if w.Header().Get("Location") != "" { + logger.Debugw("location moved permanently", "status", http.StatusMovedPermanently) + w.WriteHeader(http.StatusMovedPermanently) + return true + } + + // A HTML directory index will be presented, be sure to set the correct + // type instead of relying on autodetection (which may fail). + w.Header().Set("Content-Type", "text/html") + + // Generated dir index requires custom Etag (output may change between go-ipfs versions) + dirEtag := getDirListingEtag(resolvedPath.Cid()) + w.Header().Set("Etag", dirEtag) + + if r.Method == http.MethodHead { + logger.Debug("return as request's HTTP method is HEAD") + return true + } + + results, err := i.api.LsUnixFsDir(ctx, resolvedPath) + if err != nil { + webError(w, err, http.StatusInternalServerError) + return false + } + + dirListing := make([]assets.DirectoryItem, 0, len(results)) + for link := range results { + if link.Err != nil { + webError(w, link.Err, http.StatusInternalServerError) + return false + } + + hash := link.Cid.String() + di := assets.DirectoryItem{ + Size: humanize.Bytes(uint64(link.Size)), + Name: link.Name, + Path: gopath.Join(originalURLPath, link.Name), + Hash: hash, + ShortHash: assets.ShortHash(hash), + } + dirListing = append(dirListing, di) + } + + // construct the correct back link + // https://github.com/ipfs/kubo/issues/1365 + backLink := originalURLPath + + // don't go further up than /ipfs/$hash/ + pathSplit := path.SplitList(contentPath.String()) + switch { + // skip backlink when listing a content root + case len(pathSplit) == 3: // url: /ipfs/$hash + backLink = "" + + // skip backlink when listing a content root + case len(pathSplit) == 4 && pathSplit[3] == "": // url: /ipfs/$hash/ + backLink = "" + + // add the correct link depending on whether the path ends with a slash + default: + if strings.HasSuffix(backLink, "/") { + backLink += ".." + } else { + backLink += "/.." + } + } + + size := "?" + if s, err := dir.Size(); err == nil { + // Size may not be defined/supported. Continue anyways. + size = humanize.Bytes(uint64(s)) + } + + hash := resolvedPath.Cid().String() + + // Gateway root URL to be used when linking to other rootIDs. + // This will be blank unless subdomain or DNSLink resolution is being used + // for this request. + var gwURL string + + // Get gateway hostname and build gateway URL. + if h, ok := r.Context().Value(GatewayHostnameKey).(string); ok { + gwURL = "//" + h + } else { + gwURL = "" + } + + dnslink := assets.HasDNSLinkOrigin(gwURL, contentPath.String()) + + // See comment above where originalUrlPath is declared. + tplData := assets.DirectoryTemplateData{ + GatewayURL: gwURL, + DNSLink: dnslink, + Listing: dirListing, + Size: size, + Path: contentPath.String(), + Breadcrumbs: assets.Breadcrumbs(contentPath.String(), dnslink), + BackLink: backLink, + Hash: hash, + } + + logger.Debugw("request processed", "tplDataDNSLink", dnslink, "tplDataSize", size, "tplDataBackLink", backLink, "tplDataHash", hash) + + if err := assets.DirectoryTemplate.Execute(w, tplData); err != nil { + webError(w, err, http.StatusInternalServerError) + return false + } + + // Update metrics + i.unixfsGenDirListingGetMetric.WithLabelValues(contentPath.Namespace()).Observe(time.Since(begin).Seconds()) + return true +} + +func getDirListingEtag(dirCid cid.Cid) string { + return `"DirIndex-` + assets.AssetHash + `_CID-` + dirCid.String() + `"` +} diff --git a/gateway/handler_unixfs_file.go b/gateway/handler_unixfs_file.go new file mode 100644 index 0000000000..980885cb2f --- /dev/null +++ b/gateway/handler_unixfs_file.go @@ -0,0 +1,105 @@ +package gateway + +import ( + "context" + "fmt" + "io" + "mime" + "net/http" + gopath "path" + "strings" + "time" + + "github.com/gabriel-vasile/mimetype" + ipath "github.com/ipfs/boxo/coreiface/path" + "github.com/ipfs/boxo/files" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" +) + +// serveFile returns data behind a file along with HTTP headers based on +// the file itself, its CID and the contentPath used for accessing it. +func (i *handler) serveFile(ctx context.Context, w http.ResponseWriter, r *http.Request, resolvedPath ipath.Resolved, contentPath ipath.Path, file files.File, begin time.Time) bool { + _, span := spanTrace(ctx, "ServeFile", trace.WithAttributes(attribute.String("path", resolvedPath.String()))) + defer span.End() + + // Set Cache-Control and read optional Last-Modified time + modtime := addCacheControlHeaders(w, r, contentPath, resolvedPath.Cid()) + + // Set Content-Disposition + name := addContentDispositionHeader(w, r, contentPath) + + // Prepare size value for Content-Length HTTP header (set inside of http.ServeContent) + size, err := file.Size() + if err != nil { + http.Error(w, "cannot serve files with unknown sizes", http.StatusBadGateway) + return false + } + + if size == 0 { + // We override null files to 200 to avoid issues with fragment caching reverse proxies. + // Also whatever you are asking for, it's cheaper to just give you the complete file (nothing). + // TODO: remove this if clause once https://github.com/golang/go/issues/54794 is fixed in two latest releases of go + w.Header().Set("Content-Type", "text/plain") + w.WriteHeader(http.StatusOK) + return true + } + + // Lazy seeker enables efficient range-requests and HTTP HEAD responses + content := &lazySeeker{ + size: size, + reader: file, + } + + // Calculate deterministic value for Content-Type HTTP header + // (we prefer to do it here, rather than using implicit sniffing in http.ServeContent) + var ctype string + if _, isSymlink := file.(*files.Symlink); isSymlink { + // We should be smarter about resolving symlinks but this is the + // "most correct" we can be without doing that. + ctype = "inode/symlink" + } else { + ctype = mime.TypeByExtension(gopath.Ext(name)) + if ctype == "" { + // uses https://github.com/gabriel-vasile/mimetype library to determine the content type. + // Fixes https://github.com/ipfs/kubo/issues/7252 + mimeType, err := mimetype.DetectReader(content) + if err != nil { + http.Error(w, fmt.Sprintf("cannot detect content-type: %s", err.Error()), http.StatusInternalServerError) + return false + } + + ctype = mimeType.String() + _, err = content.Seek(0, io.SeekStart) + if err != nil { + http.Error(w, "seeker can't seek", http.StatusInternalServerError) + return false + } + } + // Strip the encoding from the HTML Content-Type header and let the + // browser figure it out. + // + // Fixes https://github.com/ipfs/kubo/issues/2203 + if strings.HasPrefix(ctype, "text/html;") { + ctype = "text/html" + } + } + // Setting explicit Content-Type to avoid mime-type sniffing on the client + // (unifies behavior across gateways and web browsers) + w.Header().Set("Content-Type", ctype) + + // special fixup around redirects + w = &statusResponseWriter{w} + + // ServeContent will take care of + // If-None-Match+Etag, Content-Length and range requests + _, dataSent, _ := ServeContent(w, r, name, modtime, content) + + // Was response successful? + if dataSent { + // Update metrics + i.unixfsFileGetMetric.WithLabelValues(contentPath.Namespace()).Observe(time.Since(begin).Seconds()) + } + + return dataSent +} diff --git a/gateway/hostname.go b/gateway/hostname.go new file mode 100644 index 0000000000..97e8bd41ee --- /dev/null +++ b/gateway/hostname.go @@ -0,0 +1,594 @@ +package gateway + +import ( + "context" + "fmt" + "net" + "net/http" + "net/url" + "regexp" + "strings" + + cid "github.com/ipfs/go-cid" + "github.com/libp2p/go-libp2p/core/peer" + dns "github.com/miekg/dns" + + mbase "github.com/multiformats/go-multibase" +) + +// Specification is the specification of an IPFS Public Gateway. +type Specification struct { + // Paths is explicit list of path prefixes that should be handled by + // this gateway. Example: `["/ipfs", "/ipns"]` + // Useful if you only want to support immutable `/ipfs`. + Paths []string + + // UseSubdomains indicates whether or not this gateway uses subdomains + // for IPFS resources instead of paths. That is: http://CID.ipfs.GATEWAY/... + // + // If this flag is set, any /ipns/$id and/or /ipfs/$id paths in Paths + // will be permanently redirected to http://$id.[ipns|ipfs].$gateway/. + // + // We do not support using both paths and subdomains for a single domain + // for security reasons (Origin isolation). + UseSubdomains bool + + // NoDNSLink configures this gateway to _not_ resolve DNSLink for the + // specific FQDN provided in `Host` HTTP header. Useful when you want to + // explicitly allow or refuse hosting a single hostname. To refuse all + // DNSLinks in `Host` processing, pass noDNSLink to `WithHostname` instead. + // This flag overrides the global one. + NoDNSLink bool + + // InlineDNSLink configures this gateway to always inline DNSLink names + // (FQDN) into a single DNS label in order to interop with wildcard TLS certs + // and Origin per CID isolation provided by rules like https://publicsuffix.org + // This should be set to true if you use HTTPS. + InlineDNSLink bool +} + +// WithHostname is a middleware that can wrap an http.Handler in order to parse the +// Host header and translating it to the content path. This is useful for Subdomain +// and DNSLink gateways. +// +// publicGateways configures the behavior of known public gateways. Each key is a +// fully qualified domain name (FQDN). +// +// noDNSLink configures the gateway to _not_ perform DNS TXT record lookups in +// response to requests with values in `Host` HTTP header. This flag can be overridden +// per FQDN in publicGateways. +func WithHostname(next http.Handler, api API, publicGateways map[string]*Specification, noDNSLink bool) http.HandlerFunc { + gateways := prepareHostnameGateways(publicGateways) + + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + defer panicHandler(w) + + // Unfortunately, many (well, ipfs.io) gateways use + // DNSLink so if we blindly rewrite with DNSLink, we'll + // break /ipfs links. + // + // We fix this by maintaining a list of known gateways + // and the paths that they serve "gateway" content on. + // That way, we can use DNSLink for everything else. + + // Support X-Forwarded-Host if added by a reverse proxy + // https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Forwarded-Host + host := r.Host + if xHost := r.Header.Get("X-Forwarded-Host"); xHost != "" { + host = xHost + } + + // HTTP Host & Path check: is this one of our "known gateways"? + if gw, ok := gateways.isKnownHostname(host); ok { + // This is a known gateway but request is not using + // the subdomain feature. + + // Does this gateway _handle_ this path? + if hasPrefix(r.URL.Path, gw.Paths...) { + // It does. + + // Should this gateway use subdomains instead of paths? + if gw.UseSubdomains { + // Yes, redirect if applicable + // Example: dweb.link/ipfs/{cid} → {cid}.ipfs.dweb.link + useInlinedDNSLink := gw.InlineDNSLink + newURL, err := toSubdomainURL(host, r.URL.Path, r, useInlinedDNSLink, api) + if err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + if newURL != "" { + // Set "Location" header with redirect destination. + // It is ignored by curl in default mode, but will + // be respected by user agents that follow + // redirects by default, namely web browsers + w.Header().Set("Location", newURL) + + // Note: we continue regular gateway processing: + // HTTP Status Code http.StatusMovedPermanently + // will be set later, in statusResponseWriter + } + } + + // Not a subdomain resource, continue with path processing + // Example: 127.0.0.1:8080/ipfs/{CID}, ipfs.io/ipfs/{CID} etc + next.ServeHTTP(w, r) + return + } + // Not a whitelisted path + + // Try DNSLink, if it was not explicitly disabled for the hostname + if !gw.NoDNSLink && hasDNSLinkRecord(r.Context(), api, host) { + // rewrite path and handle as DNSLink + r.URL.Path = "/ipns/" + stripPort(host) + r.URL.Path + next.ServeHTTP(w, withHostnameContext(r, host)) + return + } + + // If not, resource does not exist on the hostname, return 404 + http.NotFound(w, r) + return + } + + // HTTP Host check: is this one of our subdomain-based "known gateways"? + // IPFS details extracted from the host: {rootID}.{ns}.{gwHostname} + // /ipfs/ example: {cid}.ipfs.localhost:8080, {cid}.ipfs.dweb.link + // /ipns/ example: {libp2p-key}.ipns.localhost:8080, {inlined-dnslink-fqdn}.ipns.dweb.link + if gw, gwHostname, ns, rootID, ok := gateways.knownSubdomainDetails(host); ok { + // Looks like we're using a known gateway in subdomain mode. + + // Assemble original path prefix. + pathPrefix := "/" + ns + "/" + rootID + + // Retrieve whether or not we should inline DNSLink. + useInlinedDNSLink := gw.InlineDNSLink + + // Does this gateway _handle_ subdomains AND this path? + if !(gw.UseSubdomains && hasPrefix(pathPrefix, gw.Paths...)) { + // If not, resource does not exist, return 404 + http.NotFound(w, r) + return + } + + // Check if rootID is a valid CID + if rootCID, err := cid.Decode(rootID); err == nil { + // Do we need to redirect root CID to a canonical DNS representation? + dnsCID, err := toDNSLabel(rootID, rootCID) + if err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + if !strings.HasPrefix(r.Host, dnsCID) { + dnsPrefix := "/" + ns + "/" + dnsCID + newURL, err := toSubdomainURL(gwHostname, dnsPrefix+r.URL.Path, r, useInlinedDNSLink, api) + if err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + if newURL != "" { + // Redirect to deterministic CID to ensure CID + // always gets the same Origin on the web + http.Redirect(w, r, newURL, http.StatusMovedPermanently) + return + } + } + + // Do we need to fix multicodec in PeerID represented as CIDv1? + if isPeerIDNamespace(ns) { + if rootCID.Type() != cid.Libp2pKey { + newURL, err := toSubdomainURL(gwHostname, pathPrefix+r.URL.Path, r, useInlinedDNSLink, api) + if err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + if newURL != "" { + // Redirect to CID fixed inside of toSubdomainURL() + http.Redirect(w, r, newURL, http.StatusMovedPermanently) + return + } + } + } + } else { // rootID is not a CID.. + // Check if rootID is a single DNS label with an inlined + // DNSLink FQDN a single DNS label. We support this so + // loading DNSLink names over TLS "just works" on public + // HTTP gateways. + // + // Rationale for doing this can be found under "Option C" + // at: https://github.com/ipfs/in-web-browsers/issues/169 + // + // TLDR is: + // https://dweb.link/ipns/my.v-long.example.com + // can be loaded from a subdomain gateway with a wildcard + // TLS cert if represented as a single DNS label: + // https://my-v--long-example-com.ipns.dweb.link + if ns == "ipns" && !strings.Contains(rootID, ".") { + // if there is no TXT recordfor rootID + if !hasDNSLinkRecord(r.Context(), api, rootID) { + // my-v--long-example-com → my.v-long.example.com + dnslinkFQDN := toDNSLinkFQDN(rootID) + if hasDNSLinkRecord(r.Context(), api, dnslinkFQDN) { + // update path prefix to use real FQDN with DNSLink + pathPrefix = "/ipns/" + dnslinkFQDN + } + } + } + } + + // Rewrite the path to not use subdomains + r.URL.Path = pathPrefix + r.URL.Path + + // Serve path request + next.ServeHTTP(w, withHostnameContext(r, gwHostname)) + return + } + + // We don't have a known gateway. Fallback on DNSLink lookup + + // Wildcard HTTP Host check: + // 1. is wildcard DNSLink enabled (Gateway.NoDNSLink=false)? + // 2. does Host header include a fully qualified domain name (FQDN)? + // 3. does DNSLink record exist in DNS? + if !noDNSLink && hasDNSLinkRecord(r.Context(), api, host) { + // rewrite path and handle as DNSLink + r.URL.Path = "/ipns/" + stripPort(host) + r.URL.Path + ctx := context.WithValue(r.Context(), DNSLinkHostnameKey, host) + next.ServeHTTP(w, withHostnameContext(r.WithContext(ctx), host)) + return + } + + // else, treat it as an old school gateway, I guess. + next.ServeHTTP(w, r) + + }) +} + +// Extends request context to include hostname of a canonical gateway root +// (subdomain root or dnslink fqdn) +func withHostnameContext(r *http.Request, hostname string) *http.Request { + // This is required for links on directory listing pages to work correctly + // on subdomain and dnslink gateways. While DNSlink could read value from + // Host header, subdomain gateways have more comples rules (knownSubdomainDetails) + // More: https://github.com/ipfs/dir-index-html/issues/42 + // nolint: staticcheck // non-backward compatible change + ctx := context.WithValue(r.Context(), GatewayHostnameKey, hostname) + return r.WithContext(ctx) +} + +// isDomainNameAndNotPeerID returns bool if string looks like a valid DNS name AND is not a PeerID +func isDomainNameAndNotPeerID(hostname string) bool { + if len(hostname) == 0 { + return false + } + if _, err := peer.Decode(hostname); err == nil { + return false + } + _, ok := dns.IsDomainName(hostname) + return ok +} + +// hasDNSLinkRecord returns if a DNS TXT record exists for the provided host. +func hasDNSLinkRecord(ctx context.Context, api API, host string) bool { + dnslinkName := stripPort(host) + + if !isDomainNameAndNotPeerID(dnslinkName) { + return false + } + + _, err := api.GetDNSLinkRecord(ctx, dnslinkName) + return err == nil +} + +func isSubdomainNamespace(ns string) bool { + switch ns { + case "ipfs", "ipns", "p2p", "ipld": + // Note: 'p2p' and 'ipld' is only kept here for compatibility with Kubo. + return true + default: + return false + } +} + +func isPeerIDNamespace(ns string) bool { + switch ns { + case "ipns", "p2p": + // Note: 'p2p' and 'ipld' is only kept here for compatibility with Kubo. + return true + default: + return false + } +} + +// Label's max length in DNS (https://tools.ietf.org/html/rfc1034#page-7) +const dnsLabelMaxLength int = 63 + +// Converts a CID to DNS-safe representation that fits in 63 characters +func toDNSLabel(rootID string, rootCID cid.Cid) (dnsCID string, err error) { + // Return as-is if things fit + if len(rootID) <= dnsLabelMaxLength { + return rootID, nil + } + + // Convert to Base36 and see if that helped + rootID, err = cid.NewCidV1(rootCID.Type(), rootCID.Hash()).StringOfBase(mbase.Base36) + if err != nil { + return "", err + } + if len(rootID) <= dnsLabelMaxLength { + return rootID, nil + } + + // Can't win with DNS at this point, return error + return "", fmt.Errorf("CID incompatible with DNS label length limit of 63: %s", rootID) +} + +// Returns true if HTTP request involves TLS certificate. +// See https://github.com/ipfs/in-web-browsers/issues/169 to understand how it +// impacts DNSLink websites on public gateways. +func isHTTPSRequest(r *http.Request) bool { + // X-Forwarded-Proto if added by a reverse proxy + // https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Forwarded-Proto + xproto := r.Header.Get("X-Forwarded-Proto") + // Is request a native TLS (not used atm, but future-proofing) + // or a proxied HTTPS (eg. go-ipfs behind nginx at a public gw)? + return r.URL.Scheme == "https" || xproto == "https" +} + +// Converts a FQDN to DNS-safe representation that fits in 63 characters: +// my.v-long.example.com → my-v--long-example-com +func toDNSLinkDNSLabel(fqdn string) (dnsLabel string, err error) { + dnsLabel = strings.ReplaceAll(fqdn, "-", "--") + dnsLabel = strings.ReplaceAll(dnsLabel, ".", "-") + if len(dnsLabel) > dnsLabelMaxLength { + return "", fmt.Errorf("DNSLink representation incompatible with DNS label length limit of 63: %s", dnsLabel) + } + return dnsLabel, nil +} + +// Converts a DNS-safe representation of DNSLink FQDN to real FQDN: +// my-v--long-example-com → my.v-long.example.com +func toDNSLinkFQDN(dnsLabel string) (fqdn string) { + fqdn = strings.ReplaceAll(dnsLabel, "--", "@") // @ placeholder is unused in DNS labels + fqdn = strings.ReplaceAll(fqdn, "-", ".") + fqdn = strings.ReplaceAll(fqdn, "@", "-") + return fqdn +} + +// Converts a hostname/path to a subdomain-based URL, if applicable. +func toSubdomainURL(hostname, path string, r *http.Request, inlineDNSLink bool, api API) (redirURL string, err error) { + var scheme, ns, rootID, rest string + + query := r.URL.RawQuery + parts := strings.SplitN(path, "/", 4) + isHTTPS := isHTTPSRequest(r) + safeRedirectURL := func(in string) (out string, err error) { + safeURI, err := url.ParseRequestURI(in) + if err != nil { + return "", err + } + return safeURI.String(), nil + } + + if isHTTPS { + scheme = "https:" + } else { + scheme = "http:" + } + + switch len(parts) { + case 4: + rest = parts[3] + fallthrough + case 3: + ns = parts[1] + rootID = parts[2] + default: + return "", nil + } + + if !isSubdomainNamespace(ns) { + return "", nil + } + + // add prefix if query is present + if query != "" { + query = "?" + query + } + + // Normalize problematic PeerIDs (eg. ed25519+identity) to CID representation + if isPeerIDNamespace(ns) && !isDomainNameAndNotPeerID(rootID) { + peerID, err := peer.Decode(rootID) + // Note: PeerID CIDv1 with protobuf multicodec will fail, but we fix it + // in the next block + if err == nil { + rootID = peer.ToCid(peerID).String() + } + } + + // If rootID is a CID, ensure it uses DNS-friendly text representation + if rootCID, err := cid.Decode(rootID); err == nil { + multicodec := rootCID.Type() + var base mbase.Encoding = mbase.Base32 + + // Normalizations specific to /ipns/{libp2p-key} + if isPeerIDNamespace(ns) { + // Using Base36 for /ipns/ for consistency + // Context: https://github.com/ipfs/kubo/pull/7441#discussion_r452372828 + base = mbase.Base36 + + // PeerIDs represented as CIDv1 are expected to have libp2p-key + // multicodec (https://github.com/libp2p/specs/pull/209). + // We ease the transition by fixing multicodec on the fly: + // https://github.com/ipfs/kubo/issues/5287#issuecomment-492163929 + if multicodec != cid.Libp2pKey { + multicodec = cid.Libp2pKey + } + } + + // Ensure CID text representation used in subdomain is compatible + // with the way DNS and URIs are implemented in user agents. + // + // 1. Switch to CIDv1 and enable case-insensitive Base encoding + // to avoid issues when user agent force-lowercases the hostname + // before making the request + // (https://github.com/ipfs/in-web-browsers/issues/89) + rootCID = cid.NewCidV1(multicodec, rootCID.Hash()) + rootID, err = rootCID.StringOfBase(base) + if err != nil { + return "", err + } + // 2. Make sure CID fits in a DNS label, adjust encoding if needed + // (https://github.com/ipfs/kubo/issues/7318) + rootID, err = toDNSLabel(rootID, rootCID) + if err != nil { + return "", err + } + } else { // rootID is not a CID + + // Check if rootID is a FQDN with DNSLink and convert it to TLS-safe + // representation that fits in a single DNS label. We support this so + // loading DNSLink names over TLS "just works" on public HTTP gateways + // that pass 'https' in X-Forwarded-Proto to go-ipfs. + // + // Rationale can be found under "Option C" + // at: https://github.com/ipfs/in-web-browsers/issues/169 + // + // TLDR is: + // /ipns/my.v-long.example.com + // can be loaded from a subdomain gateway with a wildcard TLS cert if + // represented as a single DNS label: + // https://my-v--long-example-com.ipns.dweb.link + if (inlineDNSLink || isHTTPS) && ns == "ipns" && strings.Contains(rootID, ".") { + if hasDNSLinkRecord(r.Context(), api, rootID) { + // my.v-long.example.com → my-v--long-example-com + dnsLabel, err := toDNSLinkDNSLabel(rootID) + if err != nil { + return "", err + } + // update path prefix to use real FQDN with DNSLink + rootID = dnsLabel + } + } + } + + return safeRedirectURL(fmt.Sprintf( + "%s//%s.%s.%s/%s%s", + scheme, + rootID, + ns, + hostname, + rest, + query, + )) +} + +func hasPrefix(path string, prefixes ...string) bool { + for _, prefix := range prefixes { + // Assume people are creative with trailing slashes in Gateway config + p := strings.TrimSuffix(prefix, "/") + // Support for both /version and /ipfs/$cid + if p == path || strings.HasPrefix(path, p+"/") { + return true + } + } + return false +} + +func stripPort(hostname string) string { + host, _, err := net.SplitHostPort(hostname) + if err == nil { + return host + } + return hostname +} + +type hostnameGateways struct { + exact map[string]*Specification + wildcard map[*regexp.Regexp]*Specification +} + +// prepareHostnameGateways converts the user given gateways into an internal format +// split between exact and wildcard-based gateway hostnames. +func prepareHostnameGateways(gateways map[string]*Specification) *hostnameGateways { + h := &hostnameGateways{ + exact: map[string]*Specification{}, + wildcard: map[*regexp.Regexp]*Specification{}, + } + + for hostname, gw := range gateways { + if strings.Contains(hostname, "*") { + // from *.domain.tld, construct a regexp that match any direct subdomain + // of .domain.tld. + // + // Regexp will be in the form of ^[^.]+\.domain.tld(?::\d+)?$ + escaped := strings.ReplaceAll(hostname, ".", `\.`) + regexed := strings.ReplaceAll(escaped, "*", "[^.]+") + + re, err := regexp.Compile(fmt.Sprintf(`^%s(?::\d+)?$`, regexed)) + if err != nil { + log.Warn("invalid wildcard gateway hostname \"%s\"", hostname) + } + + h.wildcard[re] = gw + } else { + h.exact[hostname] = gw + } + } + + return h +} + +// isKnownHostname checks the given hostname gateways and returns a matching +// specification with graceful fallback to version without port. +func (gws *hostnameGateways) isKnownHostname(hostname string) (gw *Specification, ok bool) { + // Try hostname (host+optional port - value from Host header as-is) + if gw, ok := gws.exact[hostname]; ok { + return gw, ok + } + // Also test without port + if gw, ok = gws.exact[stripPort(hostname)]; ok { + return gw, ok + } + + // Wildcard support. Test both with and without port. + for re, spec := range gws.wildcard { + if re.MatchString(hostname) { + return spec, true + } + } + + return nil, false +} + +// knownSubdomainDetails parses the Host header and looks for a known gateway matching +// the subdomain host. If found, returns a Specification and the subdomain components +// extracted from Host header: {rootID}.{ns}.{gwHostname}. +// Note: hostname is host + optional port +func (gws *hostnameGateways) knownSubdomainDetails(hostname string) (gw *Specification, gwHostname, ns, rootID string, ok bool) { + labels := strings.Split(hostname, ".") + // Look for FQDN of a known gateway hostname. + // Example: given "dist.ipfs.tech.ipns.dweb.link": + // 1. Lookup "link" TLD in knownGateways: negative + // 2. Lookup "dweb.link" in knownGateways: positive + // + // Stops when we have 2 or fewer labels left as we need at least a + // rootId and a namespace. + for i := len(labels) - 1; i >= 2; i-- { + fqdn := strings.Join(labels[i:], ".") + gw, ok := gws.isKnownHostname(fqdn) + if !ok { + continue + } + + ns := labels[i-1] + if !isSubdomainNamespace(ns) { + continue + } + + // Merge remaining labels (could be a FQDN with DNSLink) + rootID := strings.Join(labels[:i-1], ".") + return gw, fqdn, ns, rootID, true + } + // no match + return nil, "", "", "", false +} diff --git a/gateway/hostname_test.go b/gateway/hostname_test.go new file mode 100644 index 0000000000..f8e7dff6c7 --- /dev/null +++ b/gateway/hostname_test.go @@ -0,0 +1,290 @@ +package gateway + +import ( + "errors" + "fmt" + "net/http" + "net/http/httptest" + "testing" + + path "github.com/ipfs/boxo/path" + cid "github.com/ipfs/go-cid" + "github.com/stretchr/testify/assert" +) + +func TestToSubdomainURL(t *testing.T) { + gwAPI, _ := newMockAPI(t) + testCID, err := cid.Decode("bafkqaglimvwgy3zakrsxg5cun5jxkyten5wwc2lokvjeycq") + assert.Nil(t, err) + + gwAPI.namesys["/ipns/dnslink.long-name.example.com"] = path.FromString(testCID.String()) + gwAPI.namesys["/ipns/dnslink.too-long.f1siqrebi3vir8sab33hu5vcy008djegvay6atmz91ojesyjs8lx350b7y7i1nvyw2haytfukfyu2f2x4tocdrfa0zgij6p4zpl4u5o.example.com"] = path.FromString(testCID.String()) + httpRequest := httptest.NewRequest("GET", "http://127.0.0.1:8080", nil) + httpsRequest := httptest.NewRequest("GET", "https://https-request-stub.example.com", nil) + httpsProxiedRequest := httptest.NewRequest("GET", "http://proxied-https-request-stub.example.com", nil) + httpsProxiedRequest.Header.Set("X-Forwarded-Proto", "https") + + for _, test := range []struct { + // in: + request *http.Request + gwHostname string + inlineDNSLink bool + path string + // out: + url string + err error + }{ + + // DNSLink + {httpRequest, "localhost", false, "/ipns/dnslink.io", "http://dnslink.io.ipns.localhost/", nil}, + // Hostname with port + {httpRequest, "localhost:8080", false, "/ipns/dnslink.io", "http://dnslink.io.ipns.localhost:8080/", nil}, + // CIDv0 → CIDv1base32 + {httpRequest, "localhost", false, "/ipfs/QmbCMUZw6JFeZ7Wp9jkzbye3Fzp2GGcPgC3nmeUjfVF87n", "http://bafybeif7a7gdklt6hodwdrmwmxnhksctcuav6lfxlcyfz4khzl3qfmvcgu.ipfs.localhost/", nil}, + // CIDv1 with long sha512 + {httpRequest, "localhost", false, "/ipfs/bafkrgqe3ohjcjplc6n4f3fwunlj6upltggn7xqujbsvnvyw764srszz4u4rshq6ztos4chl4plgg4ffyyxnayrtdi5oc4xb2332g645433aeg", "", errors.New("CID incompatible with DNS label length limit of 63: kf1siqrebi3vir8sab33hu5vcy008djegvay6atmz91ojesyjs8lx350b7y7i1nvyw2haytfukfyu2f2x4tocdrfa0zgij6p4zpl4u5oj")}, + // PeerID as CIDv1 needs to have libp2p-key multicodec + {httpRequest, "localhost", false, "/ipns/QmY3hE8xgFCjGcz6PHgnvJz5HZi1BaKRfPkn1ghZUcYMjD", "http://k2k4r8n0flx3ra0y5dr8fmyvwbzy3eiztmtq6th694k5a3rznayp3e4o.ipns.localhost/", nil}, + {httpRequest, "localhost", false, "/ipns/bafybeickencdqw37dpz3ha36ewrh4undfjt2do52chtcky4rxkj447qhdm", "http://k2k4r8l9ja7hkzynavdqup76ou46tnvuaqegbd04a4o1mpbsey0meucb.ipns.localhost/", nil}, + // PeerID: ed25519+identity multihash → CIDv1Base36 + {httpRequest, "localhost", false, "/ipns/12D3KooWFB51PRY9BxcXSH6khFXw1BZeszeLDy7C8GciskqCTZn5", "http://k51qzi5uqu5di608geewp3nqkg0bpujoasmka7ftkyxgcm3fh1aroup0gsdrna.ipns.localhost/", nil}, + {httpRequest, "sub.localhost", false, "/ipfs/QmbCMUZw6JFeZ7Wp9jkzbye3Fzp2GGcPgC3nmeUjfVF87n", "http://bafybeif7a7gdklt6hodwdrmwmxnhksctcuav6lfxlcyfz4khzl3qfmvcgu.ipfs.sub.localhost/", nil}, + // HTTPS requires DNSLink name to fit in a single DNS label – see "Option C" from https://github.com/ipfs/in-web-browsers/issues/169 + {httpRequest, "dweb.link", false, "/ipns/dnslink.long-name.example.com", "http://dnslink.long-name.example.com.ipns.dweb.link/", nil}, + {httpsRequest, "dweb.link", false, "/ipns/dnslink.long-name.example.com", "https://dnslink-long--name-example-com.ipns.dweb.link/", nil}, + {httpsProxiedRequest, "dweb.link", false, "/ipns/dnslink.long-name.example.com", "https://dnslink-long--name-example-com.ipns.dweb.link/", nil}, + // HTTP requests can also be converted to fit into a single DNS label - https://github.com/ipfs/kubo/issues/9243 + {httpRequest, "localhost", true, "/ipns/dnslink.long-name.example.com", "http://dnslink-long--name-example-com.ipns.localhost/", nil}, + {httpRequest, "dweb.link", true, "/ipns/dnslink.long-name.example.com", "http://dnslink-long--name-example-com.ipns.dweb.link/", nil}, + } { + testName := fmt.Sprintf("%s, %v, %s", test.gwHostname, test.inlineDNSLink, test.path) + t.Run(testName, func(t *testing.T) { + url, err := toSubdomainURL(test.gwHostname, test.path, test.request, test.inlineDNSLink, gwAPI) + assert.Equal(t, test.url, url) + assert.Equal(t, test.err, err) + }) + } +} + +func TestToDNSLinkDNSLabel(t *testing.T) { + for _, test := range []struct { + in string + out string + err error + }{ + {"dnslink.long-name.example.com", "dnslink-long--name-example-com", nil}, + {"dnslink.too-long.f1siqrebi3vir8sab33hu5vcy008djegvay6atmz91ojesyjs8lx350b7y7i1nvyw2haytfukfyu2f2x4tocdrfa0zgij6p4zpl4u5o.example.com", "", errors.New("DNSLink representation incompatible with DNS label length limit of 63: dnslink-too--long-f1siqrebi3vir8sab33hu5vcy008djegvay6atmz91ojesyjs8lx350b7y7i1nvyw2haytfukfyu2f2x4tocdrfa0zgij6p4zpl4u5o-example-com")}, + } { + t.Run(test.in, func(t *testing.T) { + out, err := toDNSLinkDNSLabel(test.in) + assert.Equal(t, test.out, out) + assert.Equal(t, test.err, err) + }) + } +} + +func TestToDNSLinkFQDN(t *testing.T) { + for _, test := range []struct { + in string + out string + }{ + {"singlelabel", "singlelabel"}, + {"docs-ipfs-tech", "docs.ipfs.tech"}, + {"dnslink-long--name-example-com", "dnslink.long-name.example.com"}, + } { + t.Run(test.in, func(t *testing.T) { + out := toDNSLinkFQDN(test.in) + assert.Equal(t, test.out, out) + }) + } +} + +func TestIsHTTPSRequest(t *testing.T) { + httpRequest := httptest.NewRequest("GET", "http://127.0.0.1:8080", nil) + httpsRequest := httptest.NewRequest("GET", "https://https-request-stub.example.com", nil) + httpsProxiedRequest := httptest.NewRequest("GET", "http://proxied-https-request-stub.example.com", nil) + httpsProxiedRequest.Header.Set("X-Forwarded-Proto", "https") + httpProxiedRequest := httptest.NewRequest("GET", "http://proxied-http-request-stub.example.com", nil) + httpProxiedRequest.Header.Set("X-Forwarded-Proto", "http") + oddballRequest := httptest.NewRequest("GET", "foo://127.0.0.1:8080", nil) + for _, test := range []struct { + in *http.Request + out bool + }{ + {httpRequest, false}, + {httpsRequest, true}, + {httpsProxiedRequest, true}, + {httpProxiedRequest, false}, + {oddballRequest, false}, + } { + testName := fmt.Sprintf("%+v", test.in) + t.Run(testName, func(t *testing.T) { + out := isHTTPSRequest(test.in) + assert.Equal(t, test.out, out) + }) + } +} + +func TestHasPrefix(t *testing.T) { + for _, test := range []struct { + prefixes []string + path string + out bool + }{ + {[]string{"/ipfs"}, "/ipfs/cid", true}, + {[]string{"/ipfs/"}, "/ipfs/cid", true}, + {[]string{"/version/"}, "/version", true}, + {[]string{"/version"}, "/version", true}, + } { + testName := fmt.Sprintf("%+v, %s", test.prefixes, test.path) + t.Run(testName, func(t *testing.T) { + out := hasPrefix(test.path, test.prefixes...) + assert.Equal(t, test.out, out) + }) + } +} + +func TestIsDomainNameAndNotPeerID(t *testing.T) { + for _, test := range []struct { + hostname string + out bool + }{ + {"", false}, + {"example.com", true}, + {"non-icann.something", true}, + {"..", false}, + {"12D3KooWFB51PRY9BxcXSH6khFXw1BZeszeLDy7C8GciskqCTZn5", false}, // valid peerid + {"k51qzi5uqu5di608geewp3nqkg0bpujoasmka7ftkyxgcm3fh1aroup0gsdrna", false}, // valid peerid + } { + t.Run(test.hostname, func(t *testing.T) { + out := isDomainNameAndNotPeerID(test.hostname) + assert.Equal(t, test.out, out) + }) + } +} + +func TestPortStripping(t *testing.T) { + for _, test := range []struct { + in string + out string + }{ + {"localhost:8080", "localhost"}, + {"bafkreicysg23kiwv34eg2d7qweipxwosdo2py4ldv42nbauguluen5v6am.ipfs.localhost:8080", "bafkreicysg23kiwv34eg2d7qweipxwosdo2py4ldv42nbauguluen5v6am.ipfs.localhost"}, + {"example.com:443", "example.com"}, + {"example.com", "example.com"}, + {"foo-dweb.ipfs.pvt.k12.ma.us:8080", "foo-dweb.ipfs.pvt.k12.ma.us"}, + {"localhost", "localhost"}, + {"[::1]:8080", "::1"}, + } { + t.Run(test.in, func(t *testing.T) { + out := stripPort(test.in) + assert.Equal(t, test.out, out) + }) + } +} + +func TestToDNSLabel(t *testing.T) { + for _, test := range []struct { + in string + out string + err error + }{ + // <= 63 + {"QmbCMUZw6JFeZ7Wp9jkzbye3Fzp2GGcPgC3nmeUjfVF87n", "QmbCMUZw6JFeZ7Wp9jkzbye3Fzp2GGcPgC3nmeUjfVF87n", nil}, + {"bafybeickencdqw37dpz3ha36ewrh4undfjt2do52chtcky4rxkj447qhdm", "bafybeickencdqw37dpz3ha36ewrh4undfjt2do52chtcky4rxkj447qhdm", nil}, + // > 63 + // PeerID: ed25519+identity multihash → CIDv1Base36 + {"bafzaajaiaejca4syrpdu6gdx4wsdnokxkprgzxf4wrstuc34gxw5k5jrag2so5gk", "k51qzi5uqu5dj16qyiq0tajolkojyl9qdkr254920wxv7ghtuwcz593tp69z9m", nil}, + // CIDv1 with long sha512 → error + {"bafkrgqe3ohjcjplc6n4f3fwunlj6upltggn7xqujbsvnvyw764srszz4u4rshq6ztos4chl4plgg4ffyyxnayrtdi5oc4xb2332g645433aeg", "", errors.New("CID incompatible with DNS label length limit of 63: kf1siqrebi3vir8sab33hu5vcy008djegvay6atmz91ojesyjs8lx350b7y7i1nvyw2haytfukfyu2f2x4tocdrfa0zgij6p4zpl4u5oj")}, + } { + t.Run(test.in, func(t *testing.T) { + inCID, _ := cid.Decode(test.in) + out, err := toDNSLabel(test.in, inCID) + assert.Equal(t, test.out, out) + assert.Equal(t, test.err, err) + }) + } +} + +func TestKnownSubdomainDetails(t *testing.T) { + gwLocalhost := &Specification{Paths: []string{"/ipfs", "/ipns", "/api"}, UseSubdomains: true} + gwDweb := &Specification{Paths: []string{"/ipfs", "/ipns", "/api"}, UseSubdomains: true} + gwLong := &Specification{Paths: []string{"/ipfs", "/ipns", "/api"}, UseSubdomains: true} + gwWildcard1 := &Specification{Paths: []string{"/ipfs", "/ipns", "/api"}, UseSubdomains: true} + gwWildcard2 := &Specification{Paths: []string{"/ipfs", "/ipns", "/api"}, UseSubdomains: true} + + gateways := prepareHostnameGateways(map[string]*Specification{ + "localhost": gwLocalhost, + "dweb.link": gwDweb, + "devgateway.dweb.link": gwDweb, + "dweb.ipfs.pvt.k12.ma.us": gwLong, // note the sneaky ".ipfs." ;-) + "*.wildcard1.tld": gwWildcard1, + "*.*.wildcard2.tld": gwWildcard2, + }) + + for _, test := range []struct { + // in: + hostHeader string + // out: + gw *Specification + hostname string + ns string + rootID string + ok bool + }{ + // no subdomain + {"127.0.0.1:8080", nil, "", "", "", false}, + {"[::1]:8080", nil, "", "", "", false}, + {"hey.look.example.com", nil, "", "", "", false}, + {"dweb.link", nil, "", "", "", false}, + // malformed Host header + {".....dweb.link", nil, "", "", "", false}, + {"link", nil, "", "", "", false}, + {"8080:dweb.link", nil, "", "", "", false}, + {" ", nil, "", "", "", false}, + {"", nil, "", "", "", false}, + // unknown gateway host + {"bafkreicysg23kiwv34eg2d7qweipxwosdo2py4ldv42nbauguluen5v6am.ipfs.unknown.example.com", nil, "", "", "", false}, + // cid in subdomain, known gateway + {"bafkreicysg23kiwv34eg2d7qweipxwosdo2py4ldv42nbauguluen5v6am.ipfs.localhost:8080", gwLocalhost, "localhost:8080", "ipfs", "bafkreicysg23kiwv34eg2d7qweipxwosdo2py4ldv42nbauguluen5v6am", true}, + {"bafkreicysg23kiwv34eg2d7qweipxwosdo2py4ldv42nbauguluen5v6am.ipfs.dweb.link", gwDweb, "dweb.link", "ipfs", "bafkreicysg23kiwv34eg2d7qweipxwosdo2py4ldv42nbauguluen5v6am", true}, + {"bafkreicysg23kiwv34eg2d7qweipxwosdo2py4ldv42nbauguluen5v6am.ipfs.devgateway.dweb.link", gwDweb, "devgateway.dweb.link", "ipfs", "bafkreicysg23kiwv34eg2d7qweipxwosdo2py4ldv42nbauguluen5v6am", true}, + // capture everything before .ipfs. + {"foo.bar.boo-buzz.ipfs.dweb.link", gwDweb, "dweb.link", "ipfs", "foo.bar.boo-buzz", true}, + // ipns + {"bafzbeihe35nmjqar22thmxsnlsgxppd66pseq6tscs4mo25y55juhh6bju.ipns.localhost:8080", gwLocalhost, "localhost:8080", "ipns", "bafzbeihe35nmjqar22thmxsnlsgxppd66pseq6tscs4mo25y55juhh6bju", true}, + {"bafzbeihe35nmjqar22thmxsnlsgxppd66pseq6tscs4mo25y55juhh6bju.ipns.dweb.link", gwDweb, "dweb.link", "ipns", "bafzbeihe35nmjqar22thmxsnlsgxppd66pseq6tscs4mo25y55juhh6bju", true}, + // edge case check: public gateway under long TLD (see: https://publicsuffix.org) + {"bafkreicysg23kiwv34eg2d7qweipxwosdo2py4ldv42nbauguluen5v6am.ipfs.dweb.ipfs.pvt.k12.ma.us", gwLong, "dweb.ipfs.pvt.k12.ma.us", "ipfs", "bafkreicysg23kiwv34eg2d7qweipxwosdo2py4ldv42nbauguluen5v6am", true}, + {"bafzbeihe35nmjqar22thmxsnlsgxppd66pseq6tscs4mo25y55juhh6bju.ipns.dweb.ipfs.pvt.k12.ma.us", gwLong, "dweb.ipfs.pvt.k12.ma.us", "ipns", "bafzbeihe35nmjqar22thmxsnlsgxppd66pseq6tscs4mo25y55juhh6bju", true}, + // dnslink in subdomain + {"en.wikipedia-on-ipfs.org.ipns.localhost:8080", gwLocalhost, "localhost:8080", "ipns", "en.wikipedia-on-ipfs.org", true}, + {"en.wikipedia-on-ipfs.org.ipns.localhost", gwLocalhost, "localhost", "ipns", "en.wikipedia-on-ipfs.org", true}, + {"dist.ipfs.tech.ipns.localhost:8080", gwLocalhost, "localhost:8080", "ipns", "dist.ipfs.tech", true}, + {"en.wikipedia-on-ipfs.org.ipns.dweb.link", gwDweb, "dweb.link", "ipns", "en.wikipedia-on-ipfs.org", true}, + // edge case check: public gateway under long TLD (see: https://publicsuffix.org) + {"foo.dweb.ipfs.pvt.k12.ma.us", nil, "", "", "", false}, + {"bafkreicysg23kiwv34eg2d7qweipxwosdo2py4ldv42nbauguluen5v6am.ipfs.dweb.ipfs.pvt.k12.ma.us", gwLong, "dweb.ipfs.pvt.k12.ma.us", "ipfs", "bafkreicysg23kiwv34eg2d7qweipxwosdo2py4ldv42nbauguluen5v6am", true}, + {"bafzbeihe35nmjqar22thmxsnlsgxppd66pseq6tscs4mo25y55juhh6bju.ipns.dweb.ipfs.pvt.k12.ma.us", gwLong, "dweb.ipfs.pvt.k12.ma.us", "ipns", "bafzbeihe35nmjqar22thmxsnlsgxppd66pseq6tscs4mo25y55juhh6bju", true}, + // other namespaces + {"api.localhost", nil, "", "", "", false}, + {"peerid.p2p.localhost", gwLocalhost, "localhost", "p2p", "peerid", true}, + // wildcards + {"wildcard1.tld", nil, "", "", "", false}, + {".wildcard1.tld", nil, "", "", "", false}, + {"bafkreicysg23kiwv34eg2d7qweipxwosdo2py4ldv42nbauguluen5v6am.ipfs.wildcard1.tld", nil, "", "", "", false}, + {"bafkreicysg23kiwv34eg2d7qweipxwosdo2py4ldv42nbauguluen5v6am.ipfs.sub.wildcard1.tld", gwWildcard1, "sub.wildcard1.tld", "ipfs", "bafkreicysg23kiwv34eg2d7qweipxwosdo2py4ldv42nbauguluen5v6am", true}, + {"bafkreicysg23kiwv34eg2d7qweipxwosdo2py4ldv42nbauguluen5v6am.ipfs.sub1.sub2.wildcard1.tld", nil, "", "", "", false}, + {"bafkreicysg23kiwv34eg2d7qweipxwosdo2py4ldv42nbauguluen5v6am.ipfs.sub1.sub2.wildcard2.tld", gwWildcard2, "sub1.sub2.wildcard2.tld", "ipfs", "bafkreicysg23kiwv34eg2d7qweipxwosdo2py4ldv42nbauguluen5v6am", true}, + } { + t.Run(test.hostHeader, func(t *testing.T) { + gw, hostname, ns, rootID, ok := gateways.knownSubdomainDetails(test.hostHeader) + assert.Equal(t, test.ok, ok) + assert.Equal(t, test.rootID, rootID) + assert.Equal(t, test.ns, ns) + assert.Equal(t, test.hostname, hostname) + assert.Equal(t, test.gw, gw) + }) + } +} diff --git a/gateway/lazyseek.go b/gateway/lazyseek.go new file mode 100644 index 0000000000..0f4920fad4 --- /dev/null +++ b/gateway/lazyseek.go @@ -0,0 +1,60 @@ +package gateway + +import ( + "fmt" + "io" +) + +// The HTTP server uses seek to determine the file size. Actually _seeking_ can +// be slow so we wrap the seeker in a _lazy_ seeker. +type lazySeeker struct { + reader io.ReadSeeker + + size int64 + offset int64 + realOffset int64 +} + +func (s *lazySeeker) Seek(offset int64, whence int) (int64, error) { + switch whence { + case io.SeekEnd: + return s.Seek(s.size+offset, io.SeekStart) + case io.SeekCurrent: + return s.Seek(s.offset+offset, io.SeekStart) + case io.SeekStart: + if offset < 0 { + return s.offset, fmt.Errorf("invalid seek offset") + } + s.offset = offset + return s.offset, nil + default: + return s.offset, fmt.Errorf("invalid whence: %d", whence) + } +} + +func (s *lazySeeker) Read(b []byte) (int, error) { + // If we're past the end, EOF. + if s.offset >= s.size { + return 0, io.EOF + } + + // actually seek + for s.offset != s.realOffset { + off, err := s.reader.Seek(s.offset, io.SeekStart) + if err != nil { + return 0, err + } + s.realOffset = off + } + off, err := s.reader.Read(b) + s.realOffset += int64(off) + s.offset += int64(off) + return off, err +} + +func (s *lazySeeker) Close() error { + if closer, ok := s.reader.(io.Closer); ok { + return closer.Close() + } + return nil +} diff --git a/gateway/lazyseek_test.go b/gateway/lazyseek_test.go new file mode 100644 index 0000000000..b10b6a2754 --- /dev/null +++ b/gateway/lazyseek_test.go @@ -0,0 +1,98 @@ +package gateway + +import ( + "fmt" + "io" + "strings" + "testing" + + "github.com/stretchr/testify/assert" +) + +type badSeeker struct { + io.ReadSeeker +} + +var errBadSeek = fmt.Errorf("bad seeker") + +func (bs badSeeker) Seek(offset int64, whence int) (int64, error) { + off, err := bs.ReadSeeker.Seek(0, io.SeekCurrent) + if err != nil { + panic(err) + } + return off, errBadSeek +} + +func TestLazySeekerError(t *testing.T) { + underlyingBuffer := strings.NewReader("fubar") + s := &lazySeeker{ + reader: badSeeker{underlyingBuffer}, + size: underlyingBuffer.Size(), + } + off, err := s.Seek(0, io.SeekEnd) + assert.Nil(t, err) + assert.Equal(t, s.size, off, "expected to seek to the end") + + // shouldn't have actually seeked. + b, err := io.ReadAll(s) + assert.Nil(t, err) + assert.Equal(t, 0, len(b), "expected to read nothing") + + // shouldn't need to actually seek. + off, err = s.Seek(0, io.SeekStart) + assert.Nil(t, err) + assert.Equal(t, int64(0), off, "expected to seek to the start") + + b, err = io.ReadAll(s) + assert.Nil(t, err) + assert.Equal(t, "fubar", string(b), "expected to read string") + + // should fail the second time. + off, err = s.Seek(0, io.SeekStart) + assert.Nil(t, err) + assert.Equal(t, int64(0), off, "expected to seek to the start") + + // right here... + b, err = io.ReadAll(s) + assert.NotNil(t, err) + assert.Equal(t, errBadSeek, err) + assert.Equal(t, 0, len(b), "expected to read nothing") +} + +func TestLazySeeker(t *testing.T) { + underlyingBuffer := strings.NewReader("fubar") + s := &lazySeeker{ + reader: underlyingBuffer, + size: underlyingBuffer.Size(), + } + expectByte := func(b byte) { + t.Helper() + var buf [1]byte + n, err := io.ReadFull(s, buf[:]) + assert.Nil(t, err) + assert.Equal(t, 1, n, "expected to read one byte, read %d", n) + assert.Equal(t, b, buf[0]) + } + expectSeek := func(whence int, off, expOff int64, expErr string) { + t.Helper() + n, err := s.Seek(off, whence) + if expErr == "" { + assert.Nil(t, err) + } else { + assert.EqualError(t, err, expErr) + } + assert.Equal(t, expOff, n) + } + + expectSeek(io.SeekEnd, 0, s.size, "") + b, err := io.ReadAll(s) + assert.Nil(t, err) + assert.Equal(t, 0, len(b), "expected to read nothing") + expectSeek(io.SeekEnd, -1, s.size-1, "") + expectByte('r') + expectSeek(io.SeekStart, 0, 0, "") + expectByte('f') + expectSeek(io.SeekCurrent, 1, 2, "") + expectByte('b') + expectSeek(io.SeekCurrent, -100, 3, "invalid seek offset") +} diff --git a/gateway/testdata/fixtures.car b/gateway/testdata/fixtures.car new file mode 100644 index 0000000000..e01ca5c31c Binary files /dev/null and b/gateway/testdata/fixtures.car differ diff --git a/go.mod b/go.mod new file mode 100644 index 0000000000..e3bf4c64e8 --- /dev/null +++ b/go.mod @@ -0,0 +1,184 @@ +module github.com/ipfs/boxo + +go 1.19 + +require ( + github.com/alecthomas/units v0.0.0-20210927113745-59d0afb8317a + github.com/benbjohnson/clock v1.3.0 + github.com/cenkalti/backoff v2.2.1+incompatible + github.com/cespare/xxhash v1.1.0 + github.com/crackcomm/go-gitignore v0.0.0-20170627025303-887ab5e44cc3 + github.com/cskr/pubsub v1.0.2 + github.com/dustin/go-humanize v1.0.0 + github.com/gabriel-vasile/mimetype v1.4.1 + github.com/gogo/protobuf v1.3.2 + github.com/google/uuid v1.3.0 + github.com/gorilla/mux v1.8.0 + github.com/hashicorp/golang-lru v0.5.4 + github.com/ipfs/bbloom v0.0.4 + github.com/ipfs/go-bitfield v1.1.0 + github.com/ipfs/go-cid v0.4.0 + github.com/ipfs/go-cidutil v0.1.0 + github.com/ipfs/go-datastore v0.6.0 + github.com/ipfs/go-detect-race v0.0.1 + github.com/ipfs/go-ds-badger v0.3.0 + github.com/ipfs/go-ds-leveldb v0.5.0 + github.com/ipfs/go-ipfs-blockstore v1.3.0 + github.com/ipfs/go-ipfs-blocksutil v0.0.1 + github.com/ipfs/go-ipfs-delay v0.0.1 + github.com/ipfs/go-ipfs-redirects-file v0.1.1 + github.com/ipfs/go-ipld-cbor v0.0.6 + github.com/ipfs/go-ipld-format v0.4.0 + github.com/ipfs/go-ipld-legacy v0.1.1 + github.com/ipfs/go-libipfs v0.6.1 + github.com/ipfs/go-log v1.0.5 + github.com/ipfs/go-log/v2 v2.5.1 + github.com/ipfs/go-merkledag v0.10.0 + github.com/ipfs/go-metrics-interface v0.0.1 + github.com/ipfs/go-peertaskqueue v0.8.1 + github.com/ipfs/go-unixfsnode v1.6.0 + github.com/ipld/go-car v0.6.1-0.20230320234631-0db9700abe2e + github.com/ipld/go-car/v2 v2.8.3-0.20230320234631-0db9700abe2e + github.com/ipld/go-codec-dagpb v1.6.0 + github.com/ipld/go-ipld-prime v0.20.0 + github.com/ipld/go-ipld-prime/storage/bsadapter v0.0.0-20230102063945-1a409dc236dd + github.com/jbenet/goprocess v0.1.4 + github.com/libp2p/go-buffer-pool v0.1.0 + github.com/libp2p/go-libp2p v0.26.3 + github.com/libp2p/go-libp2p-kad-dht v0.21.1 + github.com/libp2p/go-libp2p-record v0.2.0 + github.com/libp2p/go-libp2p-testing v0.12.0 + github.com/libp2p/go-msgio v0.3.0 + github.com/miekg/dns v1.1.50 + github.com/mr-tron/base58 v1.2.0 + github.com/multiformats/go-base32 v0.1.0 + github.com/multiformats/go-multiaddr v0.8.0 + github.com/multiformats/go-multiaddr-dns v0.3.1 + github.com/multiformats/go-multibase v0.1.1 + github.com/multiformats/go-multicodec v0.8.1 + github.com/multiformats/go-multihash v0.2.1 + github.com/multiformats/go-multistream v0.4.1 + github.com/multiformats/go-varint v0.0.7 + github.com/petar/GoLLRB v0.0.0-20210522233825-ae3b015fd3e9 + github.com/pkg/errors v0.9.1 + github.com/polydawn/refmt v0.89.0 + github.com/prometheus/client_golang v1.14.0 + github.com/rogpeppe/go-internal v1.9.0 + github.com/samber/lo v1.36.0 + github.com/spaolacci/murmur3 v1.1.0 + github.com/stretchr/testify v1.8.2 + github.com/urfave/cli/v2 v2.25.0 + github.com/whyrusleeping/base32 v0.0.0-20170828182744-c30ac30633cc + github.com/whyrusleeping/cbor v0.0.0-20171005072247-63513f603b11 + github.com/whyrusleeping/chunker v0.0.0-20181014151217-fe64bd25879f + go.opencensus.io v0.24.0 + go.opentelemetry.io/otel v1.13.0 + go.opentelemetry.io/otel/trace v1.13.0 + go.uber.org/atomic v1.10.0 + go.uber.org/multierr v1.9.0 + go.uber.org/zap v1.24.0 + golang.org/x/exp v0.0.0-20230213192124-5e25df0256eb + golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b + golang.org/x/sync v0.1.0 + golang.org/x/sys v0.5.0 +) + +require ( + github.com/AndreasBriese/bbloom v0.0.0-20190825152654-46b345b51c96 // indirect + github.com/beorn7/perks v1.0.1 // indirect + github.com/cespare/xxhash/v2 v2.2.0 // indirect + github.com/containerd/cgroups v1.0.4 // indirect + github.com/coreos/go-systemd/v22 v22.5.0 // indirect + github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c // indirect + github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0 // indirect + github.com/dgraph-io/badger v1.6.2 // indirect + github.com/dgraph-io/ristretto v0.0.2 // indirect + github.com/docker/go-units v0.5.0 // indirect + github.com/elastic/gosigar v0.14.2 // indirect + github.com/flynn/noise v1.0.0 // indirect + github.com/francoispqt/gojay v1.2.13 // indirect + github.com/fsnotify/fsnotify v1.6.0 // indirect + github.com/go-logr/logr v1.2.3 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 // indirect + github.com/godbus/dbus/v5 v5.1.0 // indirect + github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect + github.com/golang/mock v1.6.0 // indirect + github.com/golang/protobuf v1.5.2 // indirect + github.com/golang/snappy v0.0.4 // indirect + github.com/google/gopacket v1.1.19 // indirect + github.com/google/pprof v0.0.0-20221203041831-ce31453925ec // indirect + github.com/hashicorp/errwrap v1.1.0 // indirect + github.com/hashicorp/go-multierror v1.1.1 // indirect + github.com/huin/goupnp v1.0.3 // indirect + github.com/ipfs/go-block-format v0.1.2-0.20230320222416-ba43dc7de213 // indirect + github.com/ipfs/go-blockservice v0.5.0 // indirect + github.com/ipfs/go-ipfs-chunker v0.0.5 // indirect + github.com/ipfs/go-ipfs-ds-help v1.1.0 // indirect + github.com/ipfs/go-ipfs-exchange-interface v0.2.0 // indirect + github.com/ipfs/go-ipfs-exchange-offline v0.3.0 // indirect + github.com/ipfs/go-ipfs-pq v0.0.3 // indirect + github.com/ipfs/go-ipfs-util v0.0.2 // indirect + github.com/ipfs/go-ipns v0.3.0 // indirect + github.com/ipfs/go-unixfs v0.4.5-0.20230321002036-311d68ceee08 // indirect + github.com/ipfs/go-verifcid v0.0.2 // indirect + github.com/jackpal/go-nat-pmp v1.0.2 // indirect + github.com/jbenet/go-temp-err-catcher v0.1.0 // indirect + github.com/klauspost/compress v1.15.12 // indirect + github.com/klauspost/cpuid/v2 v2.2.3 // indirect + github.com/koron/go-ssdp v0.0.3 // indirect + github.com/libp2p/go-cidranger v1.1.0 // indirect + github.com/libp2p/go-flow-metrics v0.1.0 // indirect + github.com/libp2p/go-libp2p-asn-util v0.2.0 // indirect + github.com/libp2p/go-libp2p-kbucket v0.5.0 // indirect + github.com/libp2p/go-nat v0.1.0 // indirect + github.com/libp2p/go-netroute v0.2.1 // indirect + github.com/libp2p/go-reuseport v0.2.0 // indirect + github.com/libp2p/go-yamux/v4 v4.0.0 // indirect + github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd // indirect + github.com/mattn/go-isatty v0.0.17 // indirect + github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect + github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b // indirect + github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc // indirect + github.com/minio/sha256-simd v1.0.0 // indirect + github.com/multiformats/go-base36 v0.2.0 // indirect + github.com/multiformats/go-multiaddr-fmt v0.1.0 // indirect + github.com/nxadm/tail v1.4.8 // indirect + github.com/onsi/ginkgo/v2 v2.5.1 // indirect + github.com/opencontainers/runtime-spec v1.0.2 // indirect + github.com/opentracing/opentracing-go v1.2.0 // indirect + github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect + github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/prometheus/client_model v0.3.0 // indirect + github.com/prometheus/common v0.37.0 // indirect + github.com/prometheus/procfs v0.8.0 // indirect + github.com/quic-go/qpack v0.4.0 // indirect + github.com/quic-go/qtls-go1-19 v0.2.1 // indirect + github.com/quic-go/qtls-go1-20 v0.1.1 // indirect + github.com/quic-go/quic-go v0.33.0 // indirect + github.com/quic-go/webtransport-go v0.5.2 // indirect + github.com/raulk/go-watchdog v1.3.0 // indirect + github.com/russross/blackfriday/v2 v2.1.0 // indirect + github.com/stretchr/objx v0.5.0 // indirect + github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 // indirect + github.com/ucarion/urlpath v0.0.0-20200424170820-7ccc79b76bbb // indirect + github.com/whyrusleeping/cbor-gen v0.0.0-20230126041949-52956bd4c9aa // indirect + github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1 // indirect + github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect + go.uber.org/dig v1.15.0 // indirect + go.uber.org/fx v1.18.2 // indirect + golang.org/x/crypto v0.6.0 // indirect + golang.org/x/mod v0.7.0 // indirect + golang.org/x/net v0.7.0 // indirect + golang.org/x/text v0.7.0 // indirect + golang.org/x/tools v0.3.0 // indirect + golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect + google.golang.org/appengine v1.6.6 // indirect + google.golang.org/protobuf v1.28.1 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect + lukechampine.com/blake3 v1.1.7 // indirect + nhooyr.io/websocket v1.8.7 // indirect +) diff --git a/go.sum b/go.sum new file mode 100644 index 0000000000..5de81e9a41 --- /dev/null +++ b/go.sum @@ -0,0 +1,1174 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.31.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.37.0/go.mod h1:TS1dMSSfndXH133OKGwekG838Om/cQT0BUHV3HcBgoo= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= +cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= +cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= +cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= +cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= +cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= +cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= +cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= +cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= +cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= +cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= +cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= +cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +dmitri.shuralyov.com/app/changes v0.0.0-20180602232624-0a106ad413e3/go.mod h1:Yl+fi1br7+Rr3LqpNJf1/uxUdtRUV+Tnj0o93V2B9MU= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +dmitri.shuralyov.com/html/belt v0.0.0-20180602232347-f7d459c86be0/go.mod h1:JLBrvjyP0v+ecvNYvCpyZgu5/xkfAUhi6wJj28eUfSU= +dmitri.shuralyov.com/service/change v0.0.0-20181023043359-a85b471d5412/go.mod h1:a1inKt/atXimZ4Mv927x+r7UpyzRUf4emIoiiSC2TN4= +dmitri.shuralyov.com/state v0.0.0-20180228185332-28bcc343414c/go.mod h1:0PRwlb0D6DFvNNtx+9ybjezNCa8XF0xaYcETyp6rHWU= +git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg= +github.com/AndreasBriese/bbloom v0.0.0-20190825152654-46b345b51c96 h1:cTp8I5+VIoKjsnZuH8vjyaysT/ses3EvZeaV/1UkF2M= +github.com/AndreasBriese/bbloom v0.0.0-20190825152654-46b345b51c96/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE= +github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= +github.com/alecthomas/units v0.0.0-20210927113745-59d0afb8317a h1:E/8AP5dFtMhl5KPJz66Kt9G0n+7Sn41Fy1wv9/jHOrc= +github.com/alecthomas/units v0.0.0-20210927113745-59d0afb8317a/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= +github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= +github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= +github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= +github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= +github.com/benbjohnson/clock v1.3.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g= +github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= +github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4= +github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= +github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= +github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/containerd/cgroups v0.0.0-20201119153540-4cbc285b3327/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE= +github.com/containerd/cgroups v1.0.4 h1:jN/mbWBEaz+T1pi5OFtnkQ+8qnmEbAr1Oo1FRm5B0dA= +github.com/containerd/cgroups v1.0.4/go.mod h1:nLNQtsF7Sl2HxNebu77i1R0oDlhiTG+kO4JTrUzo6IA= +github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= +github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= +github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= +github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= +github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w= +github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/crackcomm/go-gitignore v0.0.0-20170627025303-887ab5e44cc3 h1:HVTnpeuvF6Owjd5mniCL8DEXo7uYXdQEmOP4FJbV5tg= +github.com/crackcomm/go-gitignore v0.0.0-20170627025303-887ab5e44cc3/go.mod h1:p1d6YEZWvFzEh4KLyvBcVSnrfNDDvK2zfK/4x2v/4pE= +github.com/cskr/pubsub v1.0.2 h1:vlOzMhl6PFn60gRlTQQsIfVwaPB/B/8MziK8FhEPt/0= +github.com/cskr/pubsub v1.0.2/go.mod h1:/8MzYXk/NJAz782G8RPkFzXTZVu63VotefPnR9TIRis= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c h1:pFUpOrbxDR6AkioZ1ySsx5yxlDQZ8stG2b88gTPxgJU= +github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c/go.mod h1:6UhI8N9EjYm1c2odKpFpAYeR8dsBeM7PtzQhRgxRr9U= +github.com/decred/dcrd/crypto/blake256 v1.0.0 h1:/8DMNYp9SGi5f0w7uCm6d6M4OU2rGFK09Y2A4Xv7EE0= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0 h1:HbphB4TFFXpv7MNrT52FGrrgVXF1owhMVTHFZIlnvd4= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0/go.mod h1:DZGJHZMqrU4JJqFAWUS2UO1+lbSKsdiOoYi9Zzey7Fc= +github.com/dgraph-io/badger v1.6.2 h1:mNw0qs90GVgGGWylh0umH5iag1j6n/PeJtNvL6KY/x8= +github.com/dgraph-io/badger v1.6.2/go.mod h1:JW2yswe3V058sS0kZ2h/AXeDSqFjxnZcRrVH//y2UQE= +github.com/dgraph-io/ristretto v0.0.2 h1:a5WaUrDa0qm0YrAAS1tUykT5El3kt62KNZZeMxQn3po= +github.com/dgraph-io/ristretto v0.0.2/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E= +github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2 h1:tdlZCpZ/P9DhczCTSixgIKmwPv6+wP5DGjqLYw5SUiA= +github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= +github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= +github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= +github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/elastic/gosigar v0.12.0/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs= +github.com/elastic/gosigar v0.14.2 h1:Dg80n8cr90OZ7x+bAax/QjoW/XqTI11RmA79ZwIm9/4= +github.com/elastic/gosigar v0.14.2/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= +github.com/flynn/noise v1.0.0 h1:DlTHqmzmvcEiKj+4RYo/imoswx/4r6iBlCMfVtrMXpQ= +github.com/flynn/noise v1.0.0/go.mod h1:xbMo+0i6+IGbYdJhF31t2eR1BIU0CYc12+BNAKwUTag= +github.com/francoispqt/gojay v1.2.13 h1:d2m3sFjloqoIUQU3TsHBgj6qg/BVGlTBeHDUmyJnXKk= +github.com/francoispqt/gojay v1.2.13/go.mod h1:ehT5mTG4ua4581f1++1WLG0vPdaA9HaiDsoyrBGkyDY= +github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= +github.com/frankban/quicktest v1.14.4 h1:g2rn0vABPOOXmZUj+vbmUp0lPoXEMuhTpIluN0XL9UY= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= +github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= +github.com/gabriel-vasile/mimetype v1.4.1 h1:TRWk7se+TOjCYgRth7+1/OYLNiRNIotknkFtf/dnN7Q= +github.com/gabriel-vasile/mimetype v1.4.1/go.mod h1:05Vi0w3Y9c/lNvJOdmIwvrrAhX3rYhfQQCaf9VJcv7M= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE= +github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI= +github.com/gin-gonic/gin v1.6.3 h1:ahKqKTFpO5KTPHxWZjEdPScmYaGtLo8Y4DMHoEsnp14= +github.com/gin-gonic/gin v1.6.3/go.mod h1:75u5sXoLsGZoRN5Sgbi1eraJ4GU3++wFwWzhwvtwp4M= +github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= +github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= +github.com/go-kit/log v0.2.0/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= +github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= +github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= +github.com/go-playground/locales v0.13.0 h1:HyWk6mgj5qFqCT5fjGBuRArbVDfE4hi8+e8ceBS/t7Q= +github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8= +github.com/go-playground/universal-translator v0.17.0 h1:icxd5fm+REJzpZx7ZfpaD876Lmtgy7VtROAbHHXk8no= +github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA= +github.com/go-playground/validator/v10 v10.2.0 h1:KgJ0snyC2R9VXYN2rneOtQcw5aHQB1Vv0sFl1UcHBOY= +github.com/go-playground/validator/v10 v10.2.0/go.mod h1:uOYAAleCW8F/7oMFd6aG0GOhaH6EGOAJShg8Id5JGkI= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 h1:p104kn46Q8WdvHunIJ9dAyjPVtrBPhSr3KT2yUst43I= +github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= +github.com/go-yaml/yaml v2.1.0+incompatible/go.mod h1:w2MrLa16VYP0jy6N7M5kHaCkaLENm+P+Tv+MfurjSw0= +github.com/gobwas/httphead v0.0.0-20180130184737-2c6c146eadee h1:s+21KNqlpePfkah2I+gwHF8xmJWRjooY+5248k6m4A0= +github.com/gobwas/httphead v0.0.0-20180130184737-2c6c146eadee/go.mod h1:L0fX3K22YWvt/FAX9NnzrNzcI4wNYi9Yku4O0LKYflo= +github.com/gobwas/pool v0.2.0 h1:QEmUOlnSjWtnpRGHF3SauEiOsy82Cup83Vf2LcMlnc8= +github.com/gobwas/pool v0.2.0/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw= +github.com/gobwas/ws v1.0.2 h1:CoAavW/wd/kulfZmSIBt6p24n4j7tHgNVCjsfHVNUbo= +github.com/gobwas/ws v1.0.2/go.mod h1:szmBTxLgaFppYjEmNtny/v3w89xOydFnnZMcgRRu/EM= +github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk= +github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= +github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= +github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= +github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= +github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= +github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gopacket v1.1.17/go.mod h1:UdDNZ1OO62aGYVnPhxT1U6aI7ukYtA/kB8vaU0diBUM= +github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF8= +github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20221203041831-ce31453925ec h1:fR20TYVVwhK4O7r7y+McjRYyaTH6/vjwJOajE+XhlzM= +github.com/google/pprof v0.0.0-20221203041831-ce31453925ec/go.mod h1:dDKJzRmX4S37WGHujM7tX//fmj1uioxKzKxz3lo4HJo= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= +github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY= +github.com/googleapis/gax-go/v2 v2.0.3/go.mod h1:LLvjysVCY1JZeum8Z6l8qUty8fiNwE08qbEPm1M08qg= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= +github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= +github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= +github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= +github.com/gxed/hashland/keccakpg v0.0.1/go.mod h1:kRzw3HkwxFU1mpmPP8v1WyQzwdGfmKFJ6tItnhQ67kU= +github.com/gxed/hashland/murmur3 v0.0.1/go.mod h1:KjXop02n4/ckmZSnY2+HKcLud/tcmvhST0bie/0lS48= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= +github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= +github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= +github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/huin/goupnp v1.0.0/go.mod h1:n9v9KO1tAxYH82qOn+UTIFQDmx5n1Zxd/ClZDMX7Bnc= +github.com/huin/goupnp v1.0.3 h1:N8No57ls+MnjlB+JPiCVSOyy/ot7MJTqlo7rn+NYSqQ= +github.com/huin/goupnp v1.0.3/go.mod h1:ZxNlw5WqJj6wSsRK5+YfflQGXYfccj5VgQsMNixHM7Y= +github.com/huin/goutil v0.0.0-20170803182201-1ca381bf3150/go.mod h1:PpLOETDnJ0o3iZrZfqZzyLl6l7F3c6L1oWn7OICBi6o= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/ipfs/bbloom v0.0.4 h1:Gi+8EGJ2y5qiD5FbsbpX/TMNcJw8gSqr7eyjHa4Fhvs= +github.com/ipfs/bbloom v0.0.4/go.mod h1:cS9YprKXpoZ9lT0n/Mw/a6/aFV6DTjTLYHeA+gyqMG0= +github.com/ipfs/go-bitfield v1.1.0 h1:fh7FIo8bSwaJEh6DdTWbCeZ1eqOaOkKFI74SCnsWbGA= +github.com/ipfs/go-bitfield v1.1.0/go.mod h1:paqf1wjq/D2BBmzfTVFlJQ9IlFOZpg422HL0HqsGWHU= +github.com/ipfs/go-bitswap v0.11.0 h1:j1WVvhDX1yhG32NTC9xfxnqycqYIlhzEzLXG/cU1HyQ= +github.com/ipfs/go-block-format v0.0.2/go.mod h1:AWR46JfpcObNfg3ok2JHDUfdiHRgWhJgCQF+KIgOPJY= +github.com/ipfs/go-block-format v0.0.3/go.mod h1:4LmD4ZUw0mhO+JSKdpWwrzATiEfM7WWgQ8H5l6P8MVk= +github.com/ipfs/go-block-format v0.1.2-0.20230320222416-ba43dc7de213 h1:GxBB4xUUZj+DwG+njsJYURAa7RS8Gu0FdwYNDLRZhqk= +github.com/ipfs/go-block-format v0.1.2-0.20230320222416-ba43dc7de213/go.mod h1:4G99sJwXnroF0DtCHrujotIAEedtJn2olyQyBIzoWS8= +github.com/ipfs/go-blockservice v0.5.0 h1:B2mwhhhVQl2ntW2EIpaWPwSCxSuqr5fFA93Ms4bYLEY= +github.com/ipfs/go-blockservice v0.5.0/go.mod h1:W6brZ5k20AehbmERplmERn8o2Ni3ZZubvAxaIUeaT6w= +github.com/ipfs/go-cid v0.0.1/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= +github.com/ipfs/go-cid v0.0.2/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= +github.com/ipfs/go-cid v0.0.3/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= +github.com/ipfs/go-cid v0.0.4/go.mod h1:4LLaPOQwmk5z9LBgQnpkivrx8BJjUyGwTXCd5Xfj6+M= +github.com/ipfs/go-cid v0.0.5/go.mod h1:plgt+Y5MnOey4vO4UlUazGqdbEXuFYitED67FexhXog= +github.com/ipfs/go-cid v0.0.6/go.mod h1:6Ux9z5e+HpkQdckYoX1PG/6xqKspzlEIR5SDmgqgC/I= +github.com/ipfs/go-cid v0.0.7/go.mod h1:6Ux9z5e+HpkQdckYoX1PG/6xqKspzlEIR5SDmgqgC/I= +github.com/ipfs/go-cid v0.4.0 h1:a4pdZq0sx6ZSxbCizebnKiMCx/xI/aBBFlB73IgH4rA= +github.com/ipfs/go-cid v0.4.0/go.mod h1:uQHwDeX4c6CtyrFwdqyhpNcxVewur1M7l7fNU7LKwZk= +github.com/ipfs/go-cidutil v0.1.0 h1:RW5hO7Vcf16dplUU60Hs0AKDkQAVPVplr7lk97CFL+Q= +github.com/ipfs/go-cidutil v0.1.0/go.mod h1:e7OEVBMIv9JaOxt9zaGEmAoSlXW9jdFZ5lP/0PwcfpA= +github.com/ipfs/go-datastore v0.5.0/go.mod h1:9zhEApYMTl17C8YDp7JmU7sQZi2/wqiYh73hakZ90Bk= +github.com/ipfs/go-datastore v0.6.0 h1:JKyz+Gvz1QEZw0LsX1IBn+JFCJQH4SJVFtM4uWU0Myk= +github.com/ipfs/go-datastore v0.6.0/go.mod h1:rt5M3nNbSO/8q1t4LNkLyUwRs8HupMeN/8O4Vn9YAT8= +github.com/ipfs/go-detect-race v0.0.1 h1:qX/xay2W3E4Q1U7d9lNs1sU9nvguX0a7319XbyQ6cOk= +github.com/ipfs/go-detect-race v0.0.1/go.mod h1:8BNT7shDZPo99Q74BpGMK+4D8Mn4j46UU0LZ723meps= +github.com/ipfs/go-ds-badger v0.3.0 h1:xREL3V0EH9S219kFFueOYJJTcjgNSZ2HY1iSvN7U1Ro= +github.com/ipfs/go-ds-badger v0.3.0/go.mod h1:1ke6mXNqeV8K3y5Ak2bAA0osoTfmxUdupVCGm4QUIek= +github.com/ipfs/go-ds-leveldb v0.5.0 h1:s++MEBbD3ZKc9/8/njrn4flZLnCuY9I79v94gBUNumo= +github.com/ipfs/go-ds-leveldb v0.5.0/go.mod h1:d3XG9RUDzQ6V4SHi8+Xgj9j1XuEk1z82lquxrVbml/Q= +github.com/ipfs/go-ipfs-blockstore v1.3.0 h1:m2EXaWgwTzAfsmt5UdJ7Is6l4gJcaM/A12XwJyvYvMM= +github.com/ipfs/go-ipfs-blockstore v1.3.0/go.mod h1:KgtZyc9fq+P2xJUiCAzbRdhhqJHvsw8u2Dlqy2MyRTE= +github.com/ipfs/go-ipfs-blocksutil v0.0.1 h1:Eh/H4pc1hsvhzsQoMEP3Bke/aW5P5rVM1IWFJMcGIPQ= +github.com/ipfs/go-ipfs-blocksutil v0.0.1/go.mod h1:Yq4M86uIOmxmGPUHv/uI7uKqZNtLb449gwKqXjIsnRk= +github.com/ipfs/go-ipfs-chunker v0.0.5 h1:ojCf7HV/m+uS2vhUGWcogIIxiO5ubl5O57Q7NapWLY8= +github.com/ipfs/go-ipfs-chunker v0.0.5/go.mod h1:jhgdF8vxRHycr00k13FM8Y0E+6BoalYeobXmUyTreP8= +github.com/ipfs/go-ipfs-delay v0.0.0-20181109222059-70721b86a9a8/go.mod h1:8SP1YXK1M1kXuc4KJZINY3TQQ03J2rwBG9QfXmbRPrw= +github.com/ipfs/go-ipfs-delay v0.0.1 h1:r/UXYyRcddO6thwOnhiznIAiSvxMECGgtv35Xs1IeRQ= +github.com/ipfs/go-ipfs-delay v0.0.1/go.mod h1:8SP1YXK1M1kXuc4KJZINY3TQQ03J2rwBG9QfXmbRPrw= +github.com/ipfs/go-ipfs-ds-help v1.1.0 h1:yLE2w9RAsl31LtfMt91tRZcrx+e61O5mDxFRR994w4Q= +github.com/ipfs/go-ipfs-ds-help v1.1.0/go.mod h1:YR5+6EaebOhfcqVCyqemItCLthrpVNot+rsOU/5IatU= +github.com/ipfs/go-ipfs-exchange-interface v0.2.0 h1:8lMSJmKogZYNo2jjhUs0izT+dck05pqUw4mWNW9Pw6Y= +github.com/ipfs/go-ipfs-exchange-interface v0.2.0/go.mod h1:z6+RhJuDQbqKguVyslSOuVDhqF9JtTrO3eptSAiW2/Y= +github.com/ipfs/go-ipfs-exchange-offline v0.3.0 h1:c/Dg8GDPzixGd0MC8Jh6mjOwU57uYokgWRFidfvEkuA= +github.com/ipfs/go-ipfs-exchange-offline v0.3.0/go.mod h1:MOdJ9DChbb5u37M1IcbrRB02e++Z7521fMxqCNRrz9s= +github.com/ipfs/go-ipfs-pq v0.0.3 h1:YpoHVJB+jzK15mr/xsWC574tyDLkezVrDNeaalQBsTE= +github.com/ipfs/go-ipfs-pq v0.0.3/go.mod h1:btNw5hsHBpRcSSgZtiNm/SLj5gYIZ18AKtv3kERkRb4= +github.com/ipfs/go-ipfs-redirects-file v0.1.1 h1:Io++k0Vf/wK+tfnhEh63Yte1oQK5VGT2hIEYpD0Rzx8= +github.com/ipfs/go-ipfs-redirects-file v0.1.1/go.mod h1:tAwRjCV0RjLTjH8DR/AU7VYvfQECg+lpUy2Mdzv7gyk= +github.com/ipfs/go-ipfs-routing v0.3.0 h1:9W/W3N+g+y4ZDeffSgqhgo7BsBSJwPMcyssET9OWevc= +github.com/ipfs/go-ipfs-util v0.0.1/go.mod h1:spsl5z8KUnrve+73pOhSVZND1SIxPW5RyBCNzQxlJBc= +github.com/ipfs/go-ipfs-util v0.0.2 h1:59Sswnk1MFaiq+VcaknX7aYEyGyGDAA73ilhEK2POp8= +github.com/ipfs/go-ipfs-util v0.0.2/go.mod h1:CbPtkWJzjLdEcezDns2XYaehFVNXG9zrdrtMecczcsQ= +github.com/ipfs/go-ipld-cbor v0.0.6 h1:pYuWHyvSpIsOOLw4Jy7NbBkCyzLDcl64Bf/LZW7eBQ0= +github.com/ipfs/go-ipld-cbor v0.0.6/go.mod h1:ssdxxaLJPXH7OjF5V4NSjBbcfh+evoR4ukuru0oPXMA= +github.com/ipfs/go-ipld-format v0.0.1/go.mod h1:kyJtbkDALmFHv3QR6et67i35QzO3S0dCDnkOJhcZkms= +github.com/ipfs/go-ipld-format v0.2.0/go.mod h1:3l3C1uKoadTPbeNfrDi+xMInYKlx2Cvg1BuydPSdzQs= +github.com/ipfs/go-ipld-format v0.4.0 h1:yqJSaJftjmjc9jEOFYlpkwOLVKv68OD27jFLlSghBlQ= +github.com/ipfs/go-ipld-format v0.4.0/go.mod h1:co/SdBE8h99968X0hViiw1MNlh6fvxxnHpvVLnH7jSM= +github.com/ipfs/go-ipld-legacy v0.1.1 h1:BvD8PEuqwBHLTKqlGFTHSwrwFOMkVESEvwIYwR2cdcc= +github.com/ipfs/go-ipld-legacy v0.1.1/go.mod h1:8AyKFCjgRPsQFf15ZQgDB8Din4DML/fOmKZkkFkrIEg= +github.com/ipfs/go-ipns v0.3.0 h1:ai791nTgVo+zTuq2bLvEGmWP1M0A6kGTXUsgv/Yq67A= +github.com/ipfs/go-ipns v0.3.0/go.mod h1:3cLT2rbvgPZGkHJoPO1YMJeh6LtkxopCkKFcio/wE24= +github.com/ipfs/go-libipfs v0.6.1 h1:OSO9cm1H3r4OXfP0MP1Q5UhTnhd2fByGl6CVYyz/Rhk= +github.com/ipfs/go-libipfs v0.6.1/go.mod h1:FmhKgxMOQA572TK5DA3MZ5GL44ZqsMHIrkgK4gLn4A8= +github.com/ipfs/go-log v0.0.1/go.mod h1:kL1d2/hzSpI0thNYjiKfjanbVNU+IIGA/WnNESY9leM= +github.com/ipfs/go-log v1.0.5 h1:2dOuUCB1Z7uoczMWgAyDck5JLb72zHzrMnGnCNNbvY8= +github.com/ipfs/go-log v1.0.5/go.mod h1:j0b8ZoR+7+R99LD9jZ6+AJsrzkPbSXbZfGakb5JPtIo= +github.com/ipfs/go-log/v2 v2.0.5/go.mod h1:eZs4Xt4ZUJQFM3DlanGhy7TkwwawCZcSByscwkWG+dw= +github.com/ipfs/go-log/v2 v2.1.3/go.mod h1:/8d0SH3Su5Ooc31QlL1WysJhvyOTDCjcCZ9Axpmri6g= +github.com/ipfs/go-log/v2 v2.5.1 h1:1XdUzF7048prq4aBjDQQ4SL5RxftpRGdXhNRwKSAlcY= +github.com/ipfs/go-log/v2 v2.5.1/go.mod h1:prSpmC1Gpllc9UYWxDiZDreBYw7zp4Iqp1kOLU9U5UI= +github.com/ipfs/go-merkledag v0.10.0 h1:IUQhj/kzTZfam4e+LnaEpoiZ9vZF6ldimVlby+6OXL4= +github.com/ipfs/go-merkledag v0.10.0/go.mod h1:zkVav8KiYlmbzUzNM6kENzkdP5+qR7+2mCwxkQ6GIj8= +github.com/ipfs/go-metrics-interface v0.0.1 h1:j+cpbjYvu4R8zbleSs36gvB7jR+wsL2fGD6n0jO4kdg= +github.com/ipfs/go-metrics-interface v0.0.1/go.mod h1:6s6euYU4zowdslK0GKHmqaIZ3j/b/tL7HTWtJ4VPgWY= +github.com/ipfs/go-peertaskqueue v0.8.1 h1:YhxAs1+wxb5jk7RvS0LHdyiILpNmRIRnZVztekOF0pg= +github.com/ipfs/go-peertaskqueue v0.8.1/go.mod h1:Oxxd3eaK279FxeydSPPVGHzbwVeHjatZ2GA8XD+KbPU= +github.com/ipfs/go-unixfs v0.4.5-0.20230321002036-311d68ceee08 h1:9LBAo8SC8lxcXT3fY+qpIqpzwCahB7v26b3Bhb9KEMA= +github.com/ipfs/go-unixfs v0.4.5-0.20230321002036-311d68ceee08/go.mod h1:WoNBxHWDOE2KowODZfEX2+NXR5DJGE7lV1h8870DpoY= +github.com/ipfs/go-unixfsnode v1.6.0 h1:JOSA02yaLylRNi2rlB4ldPr5VcZhcnaIVj5zNLcOjDo= +github.com/ipfs/go-unixfsnode v1.6.0/go.mod h1:PVfoyZkX1B34qzT3vJO4nsLUpRCyhnMuHBznRcXirlk= +github.com/ipfs/go-verifcid v0.0.2 h1:XPnUv0XmdH+ZIhLGKg6U2vaPaRDXb9urMyNVCE7uvTs= +github.com/ipfs/go-verifcid v0.0.2/go.mod h1:40cD9x1y4OWnFXbLNJYRe7MpNvWlMn3LZAG5Wb4xnPU= +github.com/ipld/go-car v0.6.1-0.20230320234631-0db9700abe2e h1:cwXJD7nqDVGts41pnDjRdnTjE+jqRqMPnnl+RGSoa8U= +github.com/ipld/go-car v0.6.1-0.20230320234631-0db9700abe2e/go.mod h1:Ug2htCfKi+ftd54ocCDcvc+Yj3O2xv7hfw+tauTr3Q8= +github.com/ipld/go-car/v2 v2.8.3-0.20230320234631-0db9700abe2e h1:CpmVTo3YgNIMXbXYtxOVQ9miC4B06fHpvjb5cED6e0k= +github.com/ipld/go-car/v2 v2.8.3-0.20230320234631-0db9700abe2e/go.mod h1:3Vx8oMPD4JMZ/vPNPmzRLLfVlLNb1w8oxX/RWa+eeNA= +github.com/ipld/go-codec-dagpb v1.6.0 h1:9nYazfyu9B1p3NAgfVdpRco3Fs2nFC72DqVsMj6rOcc= +github.com/ipld/go-codec-dagpb v1.6.0/go.mod h1:ANzFhfP2uMJxRBr8CE+WQWs5UsNa0pYtmKZ+agnUw9s= +github.com/ipld/go-ipld-prime v0.9.1-0.20210324083106-dc342a9917db/go.mod h1:KvBLMr4PX1gWptgkzRjVZCrLmSGcZCb/jioOQwCqZN8= +github.com/ipld/go-ipld-prime v0.20.0 h1:Ud3VwE9ClxpO2LkCYP7vWPc0Fo+dYdYzgxUJZ3uRG4g= +github.com/ipld/go-ipld-prime v0.20.0/go.mod h1:PzqZ/ZR981eKbgdr3y2DJYeD/8bgMawdGVlJDE8kK+M= +github.com/ipld/go-ipld-prime/storage/bsadapter v0.0.0-20230102063945-1a409dc236dd h1:gMlw/MhNr2Wtp5RwGdsW23cs+yCuj9k2ON7i9MiJlRo= +github.com/ipld/go-ipld-prime/storage/bsadapter v0.0.0-20230102063945-1a409dc236dd/go.mod h1:wZ8hH8UxeryOs4kJEJaiui/s00hDSbE37OKsL47g+Sw= +github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus= +github.com/jackpal/go-nat-pmp v1.0.2/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc= +github.com/jbenet/go-cienv v0.1.0/go.mod h1:TqNnHUmJgXau0nCzC7kXWeotg3J9W34CUv5Djy1+FlA= +github.com/jbenet/go-temp-err-catcher v0.1.0 h1:zpb3ZH6wIE8Shj2sKS+khgRvf7T7RABoLk/+KKHggpk= +github.com/jbenet/go-temp-err-catcher v0.1.0/go.mod h1:0kJRvmDZXNMIiJirNPEYfhpPwbGVtZVWC34vc5WLsDk= +github.com/jbenet/goprocess v0.1.4 h1:DRGOFReOMqqDNXwW70QkacFW0YN9QnwLV0Vqk+3oU0o= +github.com/jbenet/goprocess v0.1.4/go.mod h1:5yspPrukOVuOLORacaBi858NqyClJPQxYZlqdZVfqY4= +github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1/go.mod h1:E0B/fFc00Y+Rasa88328GlI/XbtyysCtTHZS8h7IrBU= +github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/jtolds/gls v4.2.1+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= +github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= +github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= +github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.10.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/klauspost/compress v1.15.12 h1:YClS/PImqYbn+UILDnqxQCZ3RehC9N318SU3kElDUEM= +github.com/klauspost/compress v1.15.12/go.mod h1:QPwzmACJjUTFsnSHH934V6woptycfrDDJnH7hvFVbGM= +github.com/klauspost/cpuid/v2 v2.0.4/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= +github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= +github.com/klauspost/cpuid/v2 v2.2.3 h1:sxCkb+qR91z4vsqw4vGGZlDgPz3G7gjaLyK3V8y70BU= +github.com/klauspost/cpuid/v2 v2.2.3/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/koron/go-ssdp v0.0.0-20191105050749-2e1c40ed0b5d/go.mod h1:5Ky9EC2xfoUKUor0Hjgi2BJhCSXJfMOFlmyYrVKGQMk= +github.com/koron/go-ssdp v0.0.3 h1:JivLMY45N76b4p/vsWGOKewBQu6uf39y8l+AQ7sDKx8= +github.com/koron/go-ssdp v0.0.3/go.mod h1:b2MxI6yh02pKrsyNoQUsk4+YNikaGhe4894J+Q5lDvA= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/leodido/go-urn v1.2.0 h1:hpXL4XnriNwQ/ABnpepYM/1vCLWNDfUNts8dX3xTG6Y= +github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII= +github.com/libp2p/go-buffer-pool v0.0.2/go.mod h1:MvaB6xw5vOrDl8rYZGLFdKAuk/hRoRZd1Vi32+RXyFM= +github.com/libp2p/go-buffer-pool v0.1.0 h1:oK4mSFcQz7cTQIfqbe4MIj9gLW+mnanjyFtc6cdF0Y8= +github.com/libp2p/go-buffer-pool v0.1.0/go.mod h1:N+vh8gMqimBzdKkSMVuydVDq+UV5QTWy5HSiZacSbPg= +github.com/libp2p/go-cidranger v1.1.0 h1:ewPN8EZ0dd1LSnrtuwd4709PXVcITVeuwbag38yPW7c= +github.com/libp2p/go-cidranger v1.1.0/go.mod h1:KWZTfSr+r9qEo9OkI9/SIEeAtw+NNoU0dXIXt15Okic= +github.com/libp2p/go-flow-metrics v0.1.0 h1:0iPhMI8PskQwzh57jB9WxIuIOQ0r+15PChFGkx3Q3WM= +github.com/libp2p/go-flow-metrics v0.1.0/go.mod h1:4Xi8MX8wj5aWNDAZttg6UPmc0ZrnFNsMtpsYUClFtro= +github.com/libp2p/go-libp2p v0.26.3 h1:6g/psubqwdaBqNNoidbRKSTBEYgaOuKBhHl8Q5tO+PM= +github.com/libp2p/go-libp2p v0.26.3/go.mod h1:x75BN32YbwuY0Awm2Uix4d4KOz+/4piInkp4Wr3yOo8= +github.com/libp2p/go-libp2p-asn-util v0.2.0 h1:rg3+Os8jbnO5DxkC7K/Utdi+DkY3q/d1/1q+8WeNAsw= +github.com/libp2p/go-libp2p-asn-util v0.2.0/go.mod h1:WoaWxbHKBymSN41hWSq/lGKJEca7TNm58+gGJi2WsLI= +github.com/libp2p/go-libp2p-kad-dht v0.21.1 h1:xpfp8/t9+X2ip1l8Umap1/UGNnJ3RHJgKGAEsnRAlTo= +github.com/libp2p/go-libp2p-kad-dht v0.21.1/go.mod h1:Oy8wvbdjpB70eS5AaFaI68tOtrdo3KylTvXDjikxqFo= +github.com/libp2p/go-libp2p-kbucket v0.5.0 h1:g/7tVm8ACHDxH29BGrpsQlnNeu+6OF1A9bno/4/U1oA= +github.com/libp2p/go-libp2p-kbucket v0.5.0/go.mod h1:zGzGCpQd78b5BNTDGHNDLaTt9aDK/A02xeZp9QeFC4U= +github.com/libp2p/go-libp2p-record v0.2.0 h1:oiNUOCWno2BFuxt3my4i1frNrt7PerzB3queqa1NkQ0= +github.com/libp2p/go-libp2p-record v0.2.0/go.mod h1:I+3zMkvvg5m2OcSdoL0KPljyJyvNDFGKX7QdlpYUcwk= +github.com/libp2p/go-libp2p-testing v0.12.0 h1:EPvBb4kKMWO29qP4mZGyhVzUyR25dvfUIK5WDu6iPUA= +github.com/libp2p/go-libp2p-testing v0.12.0/go.mod h1:KcGDRXyN7sQCllucn1cOOS+Dmm7ujhfEyXQL5lvkcPg= +github.com/libp2p/go-msgio v0.3.0 h1:mf3Z8B1xcFN314sWX+2vOTShIE0Mmn2TXn3YCUQGNj0= +github.com/libp2p/go-msgio v0.3.0/go.mod h1:nyRM819GmVaF9LX3l03RMh10QdOroF++NBbxAb0mmDM= +github.com/libp2p/go-nat v0.1.0 h1:MfVsH6DLcpa04Xr+p8hmVRG4juse0s3J8HyNWYHffXg= +github.com/libp2p/go-nat v0.1.0/go.mod h1:X7teVkwRHNInVNWQiO/tAiAVRwSr5zoRz4YSTC3uRBM= +github.com/libp2p/go-netroute v0.1.2/go.mod h1:jZLDV+1PE8y5XxBySEBgbuVAXbhtuHSdmLPL2n9MKbk= +github.com/libp2p/go-netroute v0.2.1 h1:V8kVrpD8GK0Riv15/7VN6RbUQ3URNZVosw7H2v9tksU= +github.com/libp2p/go-netroute v0.2.1/go.mod h1:hraioZr0fhBjG0ZRXJJ6Zj2IVEVNx6tDTFQfSmcq7mQ= +github.com/libp2p/go-reuseport v0.2.0 h1:18PRvIMlpY6ZK85nIAicSBuXXvrYoSw3dsBAR7zc560= +github.com/libp2p/go-reuseport v0.2.0/go.mod h1:bvVho6eLMm6Bz5hmU0LYN3ixd3nPPvtIlaURZZgOY4k= +github.com/libp2p/go-sockaddr v0.0.2/go.mod h1:syPvOmNs24S3dFVGJA1/mrqdeijPxLV2Le3BRLKd68k= +github.com/libp2p/go-yamux/v4 v4.0.0 h1:+Y80dV2Yx/kv7Y7JKu0LECyVdMXm1VUoko+VQ9rBfZQ= +github.com/libp2p/go-yamux/v4 v4.0.0/go.mod h1:NWjl8ZTLOGlozrXSOZ/HlfG++39iKNnM5wwmtQP1YB4= +github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI= +github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd h1:br0buuQ854V8u83wA0rVZ8ttrq5CpaPZdvrK0LP2lOk= +github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd/go.mod h1:QuCEs1Nt24+FYQEqAAncTDPJIuGs+LxK1MCiFL25pMU= +github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= +github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= +github.com/mattn/go-isatty v0.0.17 h1:BTarxUcIeDqL27Mc+vyvdWYSL28zpIhv3RoTdsLMPng= +github.com/mattn/go-isatty v0.0.17/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= +github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00vh2OMYv+xgHpAMF4= +github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= +github.com/miekg/dns v1.1.50 h1:DQUfb9uc6smULcREF09Uc+/Gd46YWqJd5DbpPE9xkcA= +github.com/miekg/dns v1.1.50/go.mod h1:e3IlAVfNqAllflbibAZEWOXOQ+Ynzk/dDozDxY7XnME= +github.com/mikioh/tcp v0.0.0-20190314235350-803a9b46060c h1:bzE/A84HN25pxAuk9Eej1Kz9OUelF97nAc82bDquQI8= +github.com/mikioh/tcp v0.0.0-20190314235350-803a9b46060c/go.mod h1:0SQS9kMwD2VsyFEB++InYyBJroV/FRmBgcydeSUcJms= +github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b h1:z78hV3sbSMAUoyUMM0I83AUIT6Hu17AWfgjzIbtrYFc= +github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b/go.mod h1:lxPUiZwKoFL8DUUmalo2yJJUCxbPKtm8OKfqr2/FTNU= +github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc h1:PTfri+PuQmWDqERdnNMiD9ZejrlswWrCpBEZgWOiTrc= +github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc/go.mod h1:cGKTAVKx4SxOuR/czcZ/E2RSJ3sfHs8FpHhQ5CWMf9s= +github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1/go.mod h1:pD8RvIylQ358TN4wwqatJ8rNavkEINozVn9DtGI3dfQ= +github.com/minio/sha256-simd v0.0.0-20190131020904-2d45a736cd16/go.mod h1:2FMWW+8GMoPweT6+pI63m9YE3Lmw4J71hV56Chs1E/U= +github.com/minio/sha256-simd v0.1.1-0.20190913151208-6de447530771/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= +github.com/minio/sha256-simd v1.0.0 h1:v1ta+49hkWZyvaKwrQB8elexRqm6Y0aMLjCNsrYxo6g= +github.com/minio/sha256-simd v1.0.0/go.mod h1:OuYzVNI5vcoYIAmbIvHPl3N3jUzVedXbKy5RFepssQM= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/mr-tron/base58 v1.1.0/go.mod h1:xcD2VGqlgYjBdcBLw+TuYLr8afG+Hj8g2eTVqeSzSU8= +github.com/mr-tron/base58 v1.1.2/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= +github.com/mr-tron/base58 v1.1.3/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= +github.com/mr-tron/base58 v1.2.0 h1:T/HDJBh4ZCPbU39/+c3rRvE0uKBQlU27+QI8LJ4t64o= +github.com/mr-tron/base58 v1.2.0/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= +github.com/multiformats/go-base32 v0.0.3/go.mod h1:pLiuGC8y0QR3Ue4Zug5UzK9LjgbkL8NSQj0zQ5Nz/AA= +github.com/multiformats/go-base32 v0.1.0 h1:pVx9xoSPqEIQG8o+UbAe7DNi51oej1NtK+aGkbLYxPE= +github.com/multiformats/go-base32 v0.1.0/go.mod h1:Kj3tFY6zNr+ABYMqeUNeGvkIC/UYgtWibDcT0rExnbI= +github.com/multiformats/go-base36 v0.1.0/go.mod h1:kFGE83c6s80PklsHO9sRn2NCoffoRdUUOENyW/Vv6sM= +github.com/multiformats/go-base36 v0.2.0 h1:lFsAbNOGeKtuKozrtBsAkSVhv1p9D0/qedU9rQyccr0= +github.com/multiformats/go-base36 v0.2.0/go.mod h1:qvnKE++v+2MWCfePClUEjE78Z7P2a1UV0xHgWc0hkp4= +github.com/multiformats/go-multiaddr v0.1.1/go.mod h1:aMKBKNEYmzmDmxfX88/vz+J5IU55txyt0p4aiWVohjo= +github.com/multiformats/go-multiaddr v0.2.0/go.mod h1:0nO36NvPpyV4QzvTLi/lafl2y95ncPj0vFwVF6k6wJ4= +github.com/multiformats/go-multiaddr v0.8.0 h1:aqjksEcqK+iD/Foe1RRFsGZh8+XFiGo7FgUCZlpv3LU= +github.com/multiformats/go-multiaddr v0.8.0/go.mod h1:Fs50eBDWvZu+l3/9S6xAE7ZYj6yhxlvaVZjakWN7xRs= +github.com/multiformats/go-multiaddr-dns v0.3.1 h1:QgQgR+LQVt3NPTjbrLLpsaT2ufAA2y0Mkk+QRVJbW3A= +github.com/multiformats/go-multiaddr-dns v0.3.1/go.mod h1:G/245BRQ6FJGmryJCrOuTdB37AMA5AMOVuO6NY3JwTk= +github.com/multiformats/go-multiaddr-fmt v0.1.0 h1:WLEFClPycPkp4fnIzoFoV9FVd49/eQsuaL3/CWe167E= +github.com/multiformats/go-multiaddr-fmt v0.1.0/go.mod h1:hGtDIW4PU4BqJ50gW2quDuPVjyWNZxToGUh/HwTZYJo= +github.com/multiformats/go-multibase v0.0.1/go.mod h1:bja2MqRZ3ggyXtZSEDKpl0uO/gviWFaSteVbWT51qgs= +github.com/multiformats/go-multibase v0.0.3/go.mod h1:5+1R4eQrT3PkYZ24C3W2Ue2tPwIdYQD509ZjSb5y9Oc= +github.com/multiformats/go-multibase v0.1.1 h1:3ASCDsuLX8+j4kx58qnJ4YFq/JWTJpCyDW27ztsVTOI= +github.com/multiformats/go-multibase v0.1.1/go.mod h1:ZEjHE+IsUrgp5mhlEAYjMtZwK1k4haNkcaPg9aoe1a8= +github.com/multiformats/go-multicodec v0.8.1 h1:ycepHwavHafh3grIbR1jIXnKCsFm0fqsfEOsJ8NtKE8= +github.com/multiformats/go-multicodec v0.8.1/go.mod h1:L3QTQvMIaVBkXOXXtVmYE+LI16i14xuaojr/H7Ai54k= +github.com/multiformats/go-multihash v0.0.1/go.mod h1:w/5tugSrLEbWqlcgJabL3oHFKTwfvkofsjW2Qa1ct4U= +github.com/multiformats/go-multihash v0.0.8/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew= +github.com/multiformats/go-multihash v0.0.10/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew= +github.com/multiformats/go-multihash v0.0.13/go.mod h1:VdAWLKTwram9oKAatUcLxBNUjdtcVwxObEQBtRfuyjc= +github.com/multiformats/go-multihash v0.0.14/go.mod h1:VdAWLKTwram9oKAatUcLxBNUjdtcVwxObEQBtRfuyjc= +github.com/multiformats/go-multihash v0.0.15/go.mod h1:D6aZrWNLFTV/ynMpKsNtB40mJzmCl4jb1alC0OvHiHg= +github.com/multiformats/go-multihash v0.2.1 h1:aem8ZT0VA2nCHHk7bPJ1BjUbHNciqZC/d16Vve9l108= +github.com/multiformats/go-multihash v0.2.1/go.mod h1:WxoMcYG85AZVQUyRyo9s4wULvW5qrI9vb2Lt6evduFc= +github.com/multiformats/go-multistream v0.4.1 h1:rFy0Iiyn3YT0asivDUIR05leAdwZq3de4741sbiSdfo= +github.com/multiformats/go-multistream v0.4.1/go.mod h1:Mz5eykRVAjJWckE2U78c6xqdtyNUEhKSM0Lwar2p77Q= +github.com/multiformats/go-varint v0.0.1/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= +github.com/multiformats/go-varint v0.0.5/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= +github.com/multiformats/go-varint v0.0.6/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= +github.com/multiformats/go-varint v0.0.7 h1:sWSGR+f/eu5ABZA2ZpYKBILXTTs9JWpdEM/nEGOHFS8= +github.com/multiformats/go-varint v0.0.7/go.mod h1:r8PUYw/fD/SjBCiKOoDlGF6QawOELpZAu9eioSos/OU= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo= +github.com/neelance/sourcemap v0.0.0-20151028013722-8c68805598ab/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM= +github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= +github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= +github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= +github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= +github.com/onsi/ginkgo/v2 v2.5.1 h1:auzK7OI497k6x4OvWq+TKAcpcSAlod0doAH72oIN0Jw= +github.com/onsi/ginkgo/v2 v2.5.1/go.mod h1:63DOGlLAH8+REH8jUGdL3YpCpu7JODesutUjdENfUAc= +github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= +github.com/onsi/gomega v1.24.0 h1:+0glovB9Jd6z3VR+ScSwQqXVTIfJcGA9UBM8yzQxhqg= +github.com/opencontainers/runtime-spec v1.0.2 h1:UfAcuLBJB9Coz72x1hgl8O5RVzTdNiaglX6v2DM6FI0= +github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs= +github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= +github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8= +github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 h1:onHthvaw9LFnH4t2DcNVpwGmV9E1BkGknEliJkfwQj0= +github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58/go.mod h1:DXv8WO4yhMYhSNPKjeNKa5WY9YCIEBRbNzFFPJbWO6Y= +github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/petar/GoLLRB v0.0.0-20210522233825-ae3b015fd3e9 h1:1/WtZae0yGtPq+TI6+Tv1WTxkukpXeMlviSxvL7SRgk= +github.com/petar/GoLLRB v0.0.0-20210522233825-ae3b015fd3e9/go.mod h1:x3N5drFsm2uilKKuuYo6LdyD8vZAW55sH/9w+pbo1sw= +github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e h1:aoZm08cpOy4WuID//EZDgcC4zIxODThtZNPirFr42+A= +github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/polydawn/refmt v0.0.0-20190221155625-df39d6c2d992/go.mod h1:uIp+gprXxxrWSjjklXD+mN4wed/tMfjMMmN/9+JsA9o= +github.com/polydawn/refmt v0.0.0-20190807091052-3d65705ee9f1/go.mod h1:uIp+gprXxxrWSjjklXD+mN4wed/tMfjMMmN/9+JsA9o= +github.com/polydawn/refmt v0.89.0 h1:ADJTApkvkeBZsN0tBTx8QjpD9JkmxbKp0cxfr9qszm4= +github.com/polydawn/refmt v0.89.0/go.mod h1:/zvteZs/GwLtCgZ4BL6CBsk9IKIlexP43ObX9AxTqTw= +github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= +github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= +github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= +github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw= +github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4= +github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= +github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= +github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= +github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= +github.com/prometheus/common v0.37.0 h1:ccBbHCgIiT9uSoFY0vX8H3zsNR5eLt17/RQLUvn8pXE= +github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA= +github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo= +github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4= +github.com/quic-go/qpack v0.4.0 h1:Cr9BXA1sQS2SmDUWjSofMPNKmvF6IiIfDRmgU0w1ZCo= +github.com/quic-go/qpack v0.4.0/go.mod h1:UZVnYIfi5GRk+zI9UMaCPsmZ2xKJP7XBUvVyT1Knj9A= +github.com/quic-go/qtls-go1-19 v0.2.1 h1:aJcKNMkH5ASEJB9FXNeZCyTEIHU1J7MmHyz1Q1TSG1A= +github.com/quic-go/qtls-go1-19 v0.2.1/go.mod h1:ySOI96ew8lnoKPtSqx2BlI5wCpUVPT05RMAlajtnyOI= +github.com/quic-go/qtls-go1-20 v0.1.1 h1:KbChDlg82d3IHqaj2bn6GfKRj84Per2VGf5XV3wSwQk= +github.com/quic-go/qtls-go1-20 v0.1.1/go.mod h1:JKtK6mjbAVcUTN/9jZpvLbGxvdWIKS8uT7EiStoU1SM= +github.com/quic-go/quic-go v0.33.0 h1:ItNoTDN/Fm/zBlq769lLJc8ECe9gYaW40veHCCco7y0= +github.com/quic-go/quic-go v0.33.0/go.mod h1:YMuhaAV9/jIu0XclDXwZPAsP/2Kgr5yMYhe9oxhhOFA= +github.com/quic-go/webtransport-go v0.5.2 h1:GA6Bl6oZY+g/flt00Pnu0XtivSD8vukOu3lYhJjnGEk= +github.com/quic-go/webtransport-go v0.5.2/go.mod h1:OhmmgJIzTTqXK5xvtuX0oBpLV2GkLWNDA+UeTGJXErU= +github.com/raulk/go-watchdog v1.3.0 h1:oUmdlHxdkXRJlwfG0O9omj8ukerm8MEQavSiDTEtBsk= +github.com/raulk/go-watchdog v1.3.0/go.mod h1:fIvOnLbF0b0ZwkB9YU4mOW9Did//4vPZtDqv66NfsMU= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8= +github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= +github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= +github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/samber/lo v1.36.0 h1:4LaOxH1mHnbDGhTVE0i1z8v/lWaQW8AIfOD3HU4mSaw= +github.com/samber/lo v1.36.0/go.mod h1:HLeWcJRRyLKp3+/XBJvOrerCQn9mhdKMHyd7IRlgeQ8= +github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= +github.com/shurcooL/component v0.0.0-20170202220835-f88ec8f54cc4/go.mod h1:XhFIlyj5a1fBNx5aJTbKoIq0mNaPvOagO+HjB3EtxrY= +github.com/shurcooL/events v0.0.0-20181021180414-410e4ca65f48/go.mod h1:5u70Mqkb5O5cxEA8nxTsgrgLehJeAw6Oc4Ab1c/P1HM= +github.com/shurcooL/github_flavored_markdown v0.0.0-20181002035957-2122de532470/go.mod h1:2dOwnU2uBioM+SGy2aZoq1f/Sd1l9OkAeAUvjSyvgU0= +github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk= +github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ= +github.com/shurcooL/gofontwoff v0.0.0-20180329035133-29b52fc0a18d/go.mod h1:05UtEgK5zq39gLST6uB0cf3NEHjETfB4Fgr3Gx5R9Vw= +github.com/shurcooL/gopherjslib v0.0.0-20160914041154-feb6d3990c2c/go.mod h1:8d3azKNyqcHP1GaQE/c6dDgjkgSx2BZ4IoEi4F1reUI= +github.com/shurcooL/highlight_diff v0.0.0-20170515013008-09bb4053de1b/go.mod h1:ZpfEhSmds4ytuByIcDnOLkTHGUI6KNqRNPDLHDk+mUU= +github.com/shurcooL/highlight_go v0.0.0-20181028180052-98c3abbbae20/go.mod h1:UDKB5a1T23gOMUJrI+uSuH0VRDStOiUVSjBTRDVBVag= +github.com/shurcooL/home v0.0.0-20181020052607-80b7ffcb30f9/go.mod h1:+rgNQw2P9ARFAs37qieuu7ohDNQ3gds9msbT2yn85sg= +github.com/shurcooL/htmlg v0.0.0-20170918183704-d01228ac9e50/go.mod h1:zPn1wHpTIePGnXSHpsVPWEktKXHr6+SS6x/IKRb7cpw= +github.com/shurcooL/httperror v0.0.0-20170206035902-86b7830d14cc/go.mod h1:aYMfkZ6DWSJPJ6c4Wwz3QtW22G7mf/PEgaB9k/ik5+Y= +github.com/shurcooL/httpfs v0.0.0-20171119174359-809beceb2371/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg= +github.com/shurcooL/httpgzip v0.0.0-20180522190206-b1c53ac65af9/go.mod h1:919LwcH0M7/W4fcZ0/jy0qGght1GIhqyS/EgWGH2j5Q= +github.com/shurcooL/issues v0.0.0-20181008053335-6292fdc1e191/go.mod h1:e2qWDig5bLteJ4fwvDAc2NHzqFEthkqn7aOZAOpj+PQ= +github.com/shurcooL/issuesapp v0.0.0-20180602232740-048589ce2241/go.mod h1:NPpHK2TI7iSaM0buivtFUc9offApnI0Alt/K8hcHy0I= +github.com/shurcooL/notifications v0.0.0-20181007000457-627ab5aea122/go.mod h1:b5uSkrEVM1jQUspwbixRBhaIjIzL2xazXp6kntxYle0= +github.com/shurcooL/octicon v0.0.0-20181028054416-fa4f57f9efb2/go.mod h1:eWdoE5JD4R5UVWDucdOPg1g2fqQRq78IQa9zlOV1vpQ= +github.com/shurcooL/reactions v0.0.0-20181006231557-f2e0b4ca5b82/go.mod h1:TCR1lToEk4d2s07G3XGfz2QrgHXg4RJBvjrOozvoWfk= +github.com/shurcooL/sanitized_anchor_name v0.0.0-20170918181015-86672fcb3f95/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= +github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= +github.com/shurcooL/users v0.0.0-20180125191416-49c67e49c537/go.mod h1:QJTqeLYEDaXHZDBsXlPCDqdhQuJkuw4NOtaxYe3xii4= +github.com/shurcooL/webdavfs v0.0.0-20170829043945-18c3829fa133/go.mod h1:hKmq5kWdCj2z2KEozexVbfEZIWiTjhE0+UjmZgPqehw= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= +github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/assertions v1.2.0 h1:42S6lae5dvLc7BrLu/0ugRtcFVjoJNMC/N3yZFZkDFs= +github.com/smartystreets/assertions v1.2.0/go.mod h1:tcbTF8ujkAEcZ8TElKY+i30BzYlVhC/LOxJk7iOWnoo= +github.com/smartystreets/goconvey v0.0.0-20190222223459-a17d461953aa/go.mod h1:2RVY1rIf+2J2o/IM9+vPq9RzmHDSseB7FoXiSNIUsoU= +github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/smartystreets/goconvey v1.7.2 h1:9RBaZCeXEQ3UselpuwUQHltGVXvdwm6cv1hgR6gDIPg= +github.com/smartystreets/goconvey v1.7.2/go.mod h1:Vw0tHAZW6lzCRk3xgdin6fKYcG+G3Pg9vgXWeJpQFMM= +github.com/sourcegraph/annotate v0.0.0-20160123013949-f4cad6c6324d/go.mod h1:UdhH50NIW0fCiwBSr0co2m7BnFLdv4fQTgdqdJTHFeE= +github.com/sourcegraph/syntaxhighlight v0.0.0-20170531221838-bd320f5d308e/go.mod h1:HuIsMU8RRBOtsCgI77wP899iHVBQpCmg4ErYMZB+2IA= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= +github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= +github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8= +github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpPAyBWyWuQ= +github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY= +github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc= +github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA= +github.com/thoas/go-funk v0.9.1 h1:O549iLZqPpTUQ10ykd26sZhzD+rmR5pWhuElrhbC20M= +github.com/tj/assert v0.0.3 h1:Df/BlaZ20mq6kuai7f5z2TvPFiwC3xaWJSDQNiIS3Rk= +github.com/ucarion/urlpath v0.0.0-20200424170820-7ccc79b76bbb h1:Ywfo8sUltxogBpFuMOFRrrSifO788kAFxmvVw31PtQQ= +github.com/ucarion/urlpath v0.0.0-20200424170820-7ccc79b76bbb/go.mod h1:ikPs9bRWicNw3S7XpJ8sK/smGwU9WcSVU3dy9qahYBM= +github.com/ugorji/go v1.1.7 h1:/68gy2h+1mWMrwZFeD1kQialdSzAb432dtpeJ42ovdo= +github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw= +github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= +github.com/ugorji/go/codec v1.1.7 h1:2SvQaVZ1ouYrrKKwoSk2pzd4A9evlKJb9oTL+OaLUSs= +github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= +github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/urfave/cli v1.22.10/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/urfave/cli/v2 v2.25.0 h1:ykdZKuQey2zq0yin/l7JOm9Mh+pg72ngYMeB0ABn6q8= +github.com/urfave/cli/v2 v2.25.0/go.mod h1:GHupkWPMM0M/sj1a2b4wUrWBPzazNrIjouW6fmdJLxc= +github.com/viant/assertly v0.4.8/go.mod h1:aGifi++jvCrUaklKEKT0BU95igDNaqkvz+49uaYMPRU= +github.com/viant/toolbox v0.24.0/go.mod h1:OxMCG57V0PXuIP2HNQrtJf2CjqdmbrOx5EkMILuUhzM= +github.com/warpfork/go-testmark v0.11.0 h1:J6LnV8KpceDvo7spaNU4+DauH2n1x+6RaO2rJrmpQ9U= +github.com/warpfork/go-wish v0.0.0-20180510122957-5ad1f5abf436/go.mod h1:x6AKhvSSexNrVSrViXSHUEbICjmGXhtgABaHIySUSGw= +github.com/warpfork/go-wish v0.0.0-20200122115046-b9ea61034e4a/go.mod h1:x6AKhvSSexNrVSrViXSHUEbICjmGXhtgABaHIySUSGw= +github.com/warpfork/go-wish v0.0.0-20220906213052-39a1cc7a02d0 h1:GDDkbFiaK8jsSDJfjId/PEGEShv6ugrt4kYsC5UIDaQ= +github.com/warpfork/go-wish v0.0.0-20220906213052-39a1cc7a02d0/go.mod h1:x6AKhvSSexNrVSrViXSHUEbICjmGXhtgABaHIySUSGw= +github.com/whyrusleeping/base32 v0.0.0-20170828182744-c30ac30633cc h1:BCPnHtcboadS0DvysUuJXZ4lWVv5Bh5i7+tbIyi+ck4= +github.com/whyrusleeping/base32 v0.0.0-20170828182744-c30ac30633cc/go.mod h1:r45hJU7yEoA81k6MWNhpMj/kms0n14dkzkxYHoB96UM= +github.com/whyrusleeping/cbor v0.0.0-20171005072247-63513f603b11 h1:5HZfQkwe0mIfyDmc1Em5GqlNRzcdtlv4HTNmdpt7XH0= +github.com/whyrusleeping/cbor v0.0.0-20171005072247-63513f603b11/go.mod h1:Wlo/SzPmxVp6vXpGt/zaXhHH0fn4IxgqZc82aKg6bpQ= +github.com/whyrusleeping/cbor-gen v0.0.0-20200123233031-1cdf64d27158/go.mod h1:Xj/M2wWU+QdTdRbu/L/1dIZY8/Wb2K9pAhtroQuxJJI= +github.com/whyrusleeping/cbor-gen v0.0.0-20230126041949-52956bd4c9aa h1:EyA027ZAkuaCLoxVX4r1TZMPy1d31fM6hbfQ4OU4I5o= +github.com/whyrusleeping/cbor-gen v0.0.0-20230126041949-52956bd4c9aa/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= +github.com/whyrusleeping/chunker v0.0.0-20181014151217-fe64bd25879f h1:jQa4QT2UP9WYv2nzyawpKMOCl+Z/jW7djv2/J50lj9E= +github.com/whyrusleeping/chunker v0.0.0-20181014151217-fe64bd25879f/go.mod h1:p9UJB6dDgdPgMJZs7UjUOdulKyRr9fqkS+6JKAInPy8= +github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1 h1:EKhdznlJHPMoKr0XTrX+IlJs1LH3lyx2nfr1dOlZ79k= +github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1/go.mod h1:8UvriyWtv5Q5EOgjHaSseUEdkQfvwFv1I/In/O2M9gc= +github.com/whyrusleeping/go-logging v0.0.0-20170515211332-0457bb6b88fc/go.mod h1:bopw91TMyo8J3tvftk8xmU2kPmlrt4nScJQZU2hE5EM= +github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= +github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 h1:bAn7/zixMGCfxrRTfdpNzjtPYqr8smhKouy9mxVdGPU= +github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673/go.mod h1:N3UwUGtsrSj3ccvlPHLoLsHnpR27oXr4ZE984MbSER8= +github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +go.opencensus.io v0.18.0/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= +go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= +go.opentelemetry.io/otel v1.13.0 h1:1ZAKnNQKwBBxFtww/GwxNUyTf0AxkZzrukO8MeXqe4Y= +go.opentelemetry.io/otel v1.13.0/go.mod h1:FH3RtdZCzRkJYFTCsAKDy9l/XYjMdNv6QrkFFB8DvVg= +go.opentelemetry.io/otel/trace v1.13.0 h1:CBgRZ6ntv+Amuj1jDsMhZtlAPT6gbyIRdaIzFhfBSdY= +go.opentelemetry.io/otel/trace v1.13.0/go.mod h1:muCvmmO9KKpvuXSf3KKAXXB2ygNYHQ+ZfI5X08d3tds= +go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ= +go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= +go.uber.org/dig v1.15.0 h1:vq3YWr8zRj1eFGC7Gvf907hE0eRjPTZ1d3xHadD6liE= +go.uber.org/dig v1.15.0/go.mod h1:pKHs0wMynzL6brANhB2hLMro+zalv1osARTviTcqHLM= +go.uber.org/fx v1.18.2 h1:bUNI6oShr+OVFQeU8cDNbnN7VFsu+SsjHzUF51V/GAU= +go.uber.org/fx v1.18.2/go.mod h1:g0V1KMQ66zIRk8bLu3Ea5Jt2w/cHlOIp4wdRsgh0JaY= +go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= +go.uber.org/goleak v1.1.12 h1:gZAh5/EyT/HQwlpkCy6wTpqfH9H8Lz8zbm3dZh+OyzA= +go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= +go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= +go.uber.org/multierr v1.9.0 h1:7fIwc/ZtS0q++VgcfqFDxSBZVv/Xo49/SYnDFupUwlI= +go.uber.org/multierr v1.9.0/go.mod h1:X2jQV1h+kxSjClGpnseKVIxpmcjrj7MNnI0bnlfKTVQ= +go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= +go.uber.org/zap v1.14.1/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc= +go.uber.org/zap v1.16.0/go.mod h1:MA8QOfq0BHJwdXa996Y4dYkAqRKB8/1K1QMMZVaNZjQ= +go.uber.org/zap v1.19.1/go.mod h1:j3DNczoxDZroyBnOT1L/Q79cfUMGZxlv/9dzN7SM1rI= +go.uber.org/zap v1.24.0 h1:FiJd5l1UOLj0wCgbSE0rwwXHzEdAZS6hiiSnxJN/D60= +go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg= +go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE= +golang.org/x/build v0.0.0-20190111050920-041ab4dc3f9d/go.mod h1:OWs+y06UdEOHN4y+MfF/py+xQ/tYqIWW03b70/CG9Rw= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181030102418-4d3f4d9ffa16/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190313024323-a1f597ede03a/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200602180216-279210d13fed/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= +golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= +golang.org/x/crypto v0.6.0 h1:qfktjS5LUO+fFKeJXZ+ikTRijMmljikvG68fpMMruSc= +golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/exp v0.0.0-20230213192124-5e25df0256eb h1:PaBZQdo+iSDyHT053FjUCgZQ/9uqVwPOcl7KSWhKn6w= +golang.org/x/exp v0.0.0-20230213192124-5e25df0256eb/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.7.0 h1:LapD9S96VoQRhi/GrNTqeBJFrUjs5UHCAtTlgwA5oZA= +golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181011144130-49bb7cea24b1/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181029044818-c44066c5c816/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181106065722-10aee1819953/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190227160552-c95aed5357e7/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190313220215-9f648a60d977/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210726213435-c6fcb2dbf985/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.7.0 h1:rJrUqqhjsgNp7KqAIc25s9pZnjU7TUcSY7HcVZjdn1g= +golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b h1:clP8eMhB30EHdc0bd2Twtq6kgU7yl5ub2cQLSdrv1Dg= +golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/perf v0.0.0-20180704124530-6e6d33e29852/go.mod h1:JLpeXjPJfIyPr5TlbXLkXWLhP8nz10XfvxElABhCtcw= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180810173357-98c5dad5d1a0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181029174526-d69651ed3497/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190219092855-153ac476189d/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190228124157-a34e9553db1e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190316082340-a2f829d7f35f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190405154228-4b34438f7a67/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200814200057-3d37ad5750ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210309074719-68d13333faf2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.7.0 h1:4BRB4x83lYWy72KwLD/qYDuTu7q9PjSagHvijDw7cLo= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181030000716-a0a13e073c7b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.6-0.20210726203631-07bc1bf47fb2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.3.0 h1:SrNbZl6ECOS1qFzgTdQfWXZM9XBkiA6tkFrH9YSTPHM= +golang.org/x/tools v0.3.0/go.mod h1:/rWhSS2+zyEVwoJf8YAX6L2f0ntZ7Kn/mGgAWcipA5k= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 h1:H2TDz8ibqkAF6YGhCdN3jS9O0/s90v0rJh3X/OLHEUk= +golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= +google.golang.org/api v0.0.0-20181030000543-1d582fd0359e/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= +google.golang.org/api v0.1.0/go.mod h1:UGEZY7KEX120AnNLIHFMKIo4obdJhkp2tPbaPlQx13Y= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= +google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.6 h1:lMO5rYAqUxkmaj76jAkRUvt5JZgFymx/+Q5Mzfivuhc= +google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20181029155118-b69ba1387ce2/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20181202183823-bd91e49a0898/go.mod h1:7Ep/1NZk928CDR8SjdVbjWNpdIf6nzjE3BTgJDr2Atg= +google.golang.org/genproto v0.0.0-20190306203927-b5d61aea6440/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= +google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio= +google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= +google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= +google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJdjuHRquDANNeA4x7B8WQ9o= +honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +lukechampine.com/blake3 v1.1.7 h1:GgRMhmdsuK8+ii6UZFDL8Nb+VyMwadAgcJyfYHxG6n0= +lukechampine.com/blake3 v1.1.7/go.mod h1:tkKEOtDkNtklkXtLNEOGNq5tcV90tJiA1vAA12R78LA= +nhooyr.io/websocket v1.8.7 h1:usjR2uOr/zjjkVMy0lW+PPohFok7PCow5sDjLgX4P4g= +nhooyr.io/websocket v1.8.7/go.mod h1:B70DZP8IakI65RVQ51MsWP/8jndNma26DVA/nFSCgW0= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= +sourcegraph.com/sourcegraph/go-diff v0.5.0/go.mod h1:kuch7UrkMzY0X+p9CRK03kfuPQ2zzQcaEFbx8wA8rck= +sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4/go.mod h1:ketZ/q3QxT9HOBeFhu6RdvsftgpsbFHBF5Cas6cDKZ0= diff --git a/internal/test/flaky.go b/internal/test/flaky.go new file mode 100644 index 0000000000..6319e5247c --- /dev/null +++ b/internal/test/flaky.go @@ -0,0 +1,16 @@ +package test + +import ( + "os" + "testing" +) + +// Flaky will skip the test if the RUN_FLAKY_TESTS environment variable is empty. +func Flaky(t *testing.T) { + // We can't use flags because it fails for tests that does not import this package + if os.Getenv("RUN_FLAKY_TESTS") != "" { + return + } + + t.Skip("flaky") +} diff --git a/ipld/car/.gitattributes b/ipld/car/.gitattributes new file mode 100644 index 0000000000..6f95229927 --- /dev/null +++ b/ipld/car/.gitattributes @@ -0,0 +1,2 @@ +# To prevent CRLF breakages on Windows for fragile files, like testdata. +* -text diff --git a/ipld/car/.gitignore b/ipld/car/.gitignore new file mode 100644 index 0000000000..b3f7c18ae7 --- /dev/null +++ b/ipld/car/.gitignore @@ -0,0 +1,4 @@ +car/car +main +coverage.txt +dist/ diff --git a/ipld/car/README.md b/ipld/car/README.md new file mode 100644 index 0000000000..b91dcf148d --- /dev/null +++ b/ipld/car/README.md @@ -0,0 +1,68 @@ +go-car (go!) +================== + +[![](https://img.shields.io/badge/made%20by-Protocol%20Labs-blue.svg?style=flat-square)](https://protocol.ai) +[![Go Reference](https://pkg.go.dev/badge/github.com/boxo/ipld/car.svg)](https://pkg.go.dev/github.com/boxo/ipld/car) + +> Work with car (Content addressed ARchive) files! + +This is a Golang implementation of the [CAR specifications](https://ipld.io/specs/transport/car/), both [CARv1](https://ipld.io/specs/transport/car/carv1/) and [CARv2](https://ipld.io/specs/transport/car/carv2/). + +As a format, there are two major module versions: + +* [`go-car/v2`](v2/) is geared towards reading and writing CARv2 files, and also + supports consuming CARv1 files and using CAR files as an IPFS blockstore. +* `go-car`, in the root directory, only supports reading and writing CARv1 files. + +Most users should use v2, especially for new software, since the v2 API transparently supports both CAR formats. + +# **Unstable** + +This package will be refactored and thinned out soon. You can find an +implementation that supports indexes and go-ipld-prime there at [`ipld/go-car`](github.com/ipld/go-car). + +## Usage / Installation + +This repository provides a `car` binary that can be used for creating, extracting, and working with car files. + +To install the latest version of `car`, run: +```shell script +go install github.com/boxo/ipld/car/cmd/car@latest +``` + +More information about this binary is available in [`cmd/car`](cmd/car/) + + +## Features + +[CARv2](v2) features: +* [Generate index](https://pkg.go.dev/github.com/boxo/ipld/car/v2#GenerateIndex) from an existing CARv1 file +* [Wrap](https://pkg.go.dev/github.com/boxo/ipld/car/v2#WrapV1) CARv1 files into a CARv2 with automatic index generation. +* Random-access to blocks in a CAR file given their CID via [Read-Only blockstore](https://pkg.go.dev/github.com/boxo/ipld/car/v2/blockstore#NewReadOnly) API, with transparent support for both CARv1 and CARv2 +* Write CARv2 files via [Read-Write blockstore](https://pkg.go.dev/github.com/boxo/ipld/car/v2/blockstore#OpenReadWrite) API, with support for appending blocks to an existing CARv2 file, and resumption from a partially written CARv2 files. +* Individual access to [inner CARv1 data payload]((https://pkg.go.dev/github.com/boxo/ipld/car/v2#Reader.DataReader)) and [index]((https://pkg.go.dev/github.com/boxo/ipld/car/v2#Reader.IndexReader)) of a CARv2 file via the `Reader` API. + + +## API Documentation + +See docs on [pkg.go.dev](https://pkg.go.dev/github.com/boxo/ipld/car). + +## Examples + +Here is a shortlist of other examples from the documentation + +* [Wrap an existing CARv1 file into an indexed CARv2 file](https://pkg.go.dev/github.com/boxo/ipld/car/v2#example-WrapV1File) +* [Open read-only blockstore from a CAR file](https://pkg.go.dev/github.com/boxo/ipld/car/v2/blockstore#example-OpenReadOnly) +* [Open read-write blockstore from a CAR file](https://pkg.go.dev/github.com/boxo/ipld/car/v2/blockstore#example-OpenReadWrite) +* [Read the index from an existing CARv2 file](https://pkg.go.dev/github.com/boxo/ipld/car/v2/index#example-ReadFrom) +* [Extract the index from a CARv2 file and store it as a separate file](https://pkg.go.dev/github.com/boxo/ipld/car/v2/index#example-WriteTo) + +## Contribute + +PRs are welcome! + +When editing the Readme, please conform to the [standard-readme](https://github.com/RichardLitt/standard-readme) specification. + +## License + +Apache-2.0/MIT © Protocol Labs diff --git a/ipld/car/car.go b/ipld/car/car.go new file mode 100644 index 0000000000..c4cfb2477d --- /dev/null +++ b/ipld/car/car.go @@ -0,0 +1,223 @@ +package car + +import ( + "bufio" + "context" + "fmt" + "io" + + blocks "github.com/ipfs/boxo/blocks" + "github.com/ipfs/boxo/ipld/merkledag" + cid "github.com/ipfs/go-cid" + cbor "github.com/ipfs/go-ipld-cbor" + format "github.com/ipfs/go-ipld-format" + + util "github.com/ipfs/boxo/ipld/car/util" +) + +func init() { + cbor.RegisterCborType(CarHeader{}) +} + +type Store interface { + Put(context.Context, blocks.Block) error +} + +type ReadStore interface { + Get(context.Context, cid.Cid) (blocks.Block, error) +} + +type CarHeader struct { + Roots []cid.Cid + Version uint64 +} + +type carWriter struct { + ds format.NodeGetter + w io.Writer + walk WalkFunc +} + +type WalkFunc func(format.Node) ([]*format.Link, error) + +func WriteCar(ctx context.Context, ds format.NodeGetter, roots []cid.Cid, w io.Writer, options ...merkledag.WalkOption) error { + return WriteCarWithWalker(ctx, ds, roots, w, DefaultWalkFunc, options...) +} + +func WriteCarWithWalker(ctx context.Context, ds format.NodeGetter, roots []cid.Cid, w io.Writer, walk WalkFunc, options ...merkledag.WalkOption) error { + + h := &CarHeader{ + Roots: roots, + Version: 1, + } + + if err := WriteHeader(h, w); err != nil { + return fmt.Errorf("failed to write car header: %s", err) + } + + cw := &carWriter{ds: ds, w: w, walk: walk} + seen := cid.NewSet() + for _, r := range roots { + if err := merkledag.Walk(ctx, cw.enumGetLinks, r, seen.Visit, options...); err != nil { + return err + } + } + return nil +} + +func DefaultWalkFunc(nd format.Node) ([]*format.Link, error) { + return nd.Links(), nil +} + +func ReadHeader(br *bufio.Reader) (*CarHeader, error) { + hb, err := util.LdRead(br) + if err != nil { + return nil, err + } + + var ch CarHeader + if err := cbor.DecodeInto(hb, &ch); err != nil { + return nil, fmt.Errorf("invalid header: %v", err) + } + + return &ch, nil +} + +func WriteHeader(h *CarHeader, w io.Writer) error { + hb, err := cbor.DumpObject(h) + if err != nil { + return err + } + + return util.LdWrite(w, hb) +} + +func HeaderSize(h *CarHeader) (uint64, error) { + hb, err := cbor.DumpObject(h) + if err != nil { + return 0, err + } + + return util.LdSize(hb), nil +} + +func (cw *carWriter) enumGetLinks(ctx context.Context, c cid.Cid) ([]*format.Link, error) { + nd, err := cw.ds.Get(ctx, c) + if err != nil { + return nil, err + } + + if err := cw.writeNode(ctx, nd); err != nil { + return nil, err + } + + return cw.walk(nd) +} + +func (cw *carWriter) writeNode(ctx context.Context, nd format.Node) error { + return util.LdWrite(cw.w, nd.Cid().Bytes(), nd.RawData()) +} + +type CarReader struct { + br *bufio.Reader + Header *CarHeader +} + +func NewCarReader(r io.Reader) (*CarReader, error) { + br := bufio.NewReader(r) + ch, err := ReadHeader(br) + if err != nil { + return nil, err + } + + if ch.Version != 1 { + return nil, fmt.Errorf("invalid car version: %d", ch.Version) + } + + if len(ch.Roots) == 0 { + return nil, fmt.Errorf("empty car, no roots") + } + + return &CarReader{ + br: br, + Header: ch, + }, nil +} + +func (cr *CarReader) Next() (blocks.Block, error) { + c, data, err := util.ReadNode(cr.br) + if err != nil { + return nil, err + } + + hashed, err := c.Prefix().Sum(data) + if err != nil { + return nil, err + } + + if !hashed.Equals(c) { + return nil, fmt.Errorf("mismatch in content integrity, name: %s, data: %s", c, hashed) + } + + return blocks.NewBlockWithCid(data, c) +} + +type batchStore interface { + PutMany(context.Context, []blocks.Block) error +} + +func LoadCar(ctx context.Context, s Store, r io.Reader) (*CarHeader, error) { + cr, err := NewCarReader(r) + if err != nil { + return nil, err + } + + if bs, ok := s.(batchStore); ok { + return loadCarFast(ctx, bs, cr) + } + + return loadCarSlow(ctx, s, cr) +} + +func loadCarFast(ctx context.Context, s batchStore, cr *CarReader) (*CarHeader, error) { + var buf []blocks.Block + for { + blk, err := cr.Next() + if err != nil { + if err == io.EOF { + if len(buf) > 0 { + if err := s.PutMany(ctx, buf); err != nil { + return nil, err + } + } + return cr.Header, nil + } + return nil, err + } + + buf = append(buf, blk) + + if len(buf) > 1000 { + if err := s.PutMany(ctx, buf); err != nil { + return nil, err + } + buf = buf[:0] + } + } +} + +func loadCarSlow(ctx context.Context, s Store, cr *CarReader) (*CarHeader, error) { + for { + blk, err := cr.Next() + if err != nil { + if err == io.EOF { + return cr.Header, nil + } + return nil, err + } + + if err := s.Put(ctx, blk); err != nil { + return nil, err + } + } +} diff --git a/ipld/car/car_test.go b/ipld/car/car_test.go new file mode 100644 index 0000000000..3b1a8faa27 --- /dev/null +++ b/ipld/car/car_test.go @@ -0,0 +1,229 @@ +package car_test + +import ( + "bytes" + "context" + "encoding/hex" + "io" + "strings" + "testing" + + car "github.com/ipfs/boxo/ipld/car" + "github.com/ipfs/boxo/ipld/merkledag" + dstest "github.com/ipfs/boxo/ipld/merkledag/test" + "github.com/ipfs/go-cid" + format "github.com/ipfs/go-ipld-format" +) + +func assertAddNodes(t *testing.T, ds format.DAGService, nds ...format.Node) { + for _, nd := range nds { + if err := ds.Add(context.Background(), nd); err != nil { + t.Fatal(err) + } + } +} + +func TestRoundtrip(t *testing.T) { + ctx := context.Background() + dserv := dstest.Mock() + a := merkledag.NewRawNode([]byte("aaaa")) + b := merkledag.NewRawNode([]byte("bbbb")) + c := merkledag.NewRawNode([]byte("cccc")) + + nd1 := &merkledag.ProtoNode{} + nd1.AddNodeLink("cat", a) + + nd2 := &merkledag.ProtoNode{} + nd2.AddNodeLink("first", nd1) + nd2.AddNodeLink("dog", b) + + nd3 := &merkledag.ProtoNode{} + nd3.AddNodeLink("second", nd2) + nd3.AddNodeLink("bear", c) + + assertAddNodes(t, dserv, a, b, c, nd1, nd2, nd3) + + buf := new(bytes.Buffer) + if err := car.WriteCar(context.Background(), dserv, []cid.Cid{nd3.Cid()}, buf); err != nil { + t.Fatal(err) + } + + bserv := dstest.Bserv() + ch, err := car.LoadCar(ctx, bserv.Blockstore(), buf) + if err != nil { + t.Fatal(err) + } + + if len(ch.Roots) != 1 { + t.Fatal("should have one root") + } + + if !ch.Roots[0].Equals(nd3.Cid()) { + t.Fatal("got wrong cid") + } + + bs := bserv.Blockstore() + for _, nd := range []format.Node{a, b, c, nd1, nd2, nd3} { + has, err := bs.Has(ctx, nd.Cid()) + if err != nil { + t.Fatal(err) + } + + if !has { + t.Fatal("should have cid in blockstore") + } + } +} + +// fixture is a clean single-block, single-root CAR +const fixtureStr = "3aa265726f6f747381d82a58250001711220151fe9e73c6267a7060c6f6c4cca943c236f4b196723489608edb42a8b8fa80b6776657273696f6e012c01711220151fe9e73c6267a7060c6f6c4cca943c236f4b196723489608edb42a8b8fa80ba165646f646779f5" + +func TestEOFHandling(t *testing.T) { + fixture, err := hex.DecodeString(fixtureStr) + if err != nil { + t.Fatal(err) + } + + load := func(t *testing.T, byts []byte) *car.CarReader { + cr, err := car.NewCarReader(bytes.NewReader(byts)) + if err != nil { + t.Fatal(err) + } + + blk, err := cr.Next() + if err != nil { + t.Fatal(err) + } + if blk.Cid().String() != "bafyreiavd7u6opdcm6tqmddpnrgmvfb4enxuwglhenejmchnwqvixd5ibm" { + t.Fatal("unexpected CID") + } + + return cr + } + + t.Run("CleanEOF", func(t *testing.T) { + cr := load(t, fixture) + + blk, err := cr.Next() + if err != io.EOF { + t.Fatal("Didn't get expected EOF") + } + if blk != nil { + t.Fatal("EOF returned expected block") + } + }) + + t.Run("BadVarint", func(t *testing.T) { + fixtureBadVarint := append(fixture, 160) + cr := load(t, fixtureBadVarint) + + blk, err := cr.Next() + if err != io.ErrUnexpectedEOF { + t.Fatal("Didn't get unexpected EOF") + } + if blk != nil { + t.Fatal("EOF returned unexpected block") + } + }) + + t.Run("TruncatedBlock", func(t *testing.T) { + fixtureTruncatedBlock := append(fixture, 100, 0, 0) + cr := load(t, fixtureTruncatedBlock) + + blk, err := cr.Next() + if err != io.ErrUnexpectedEOF { + t.Fatal("Didn't get unexpected EOF") + } + if blk != nil { + t.Fatal("EOF returned unexpected block") + } + }) +} + +func TestBadHeaders(t *testing.T) { + testCases := []struct { + name string + hex string + errStr string // either the whole error string + errPfx string // or just the prefix + }{ + { + "{version:2}", + "0aa16776657273696f6e02", + "invalid car version: 2", + "", + }, + { + // an unfortunate error because we don't use a pointer + "{roots:[baeaaaa3bmjrq]}", + "13a165726f6f747381d82a480001000003616263", + "invalid car version: 0", + "", + }, { + "{version:\"1\",roots:[baeaaaa3bmjrq]}", + "1da265726f6f747381d82a4800010000036162636776657273696f6e6131", + "", "invalid header: ", + }, { + "{version:1}", + "0aa16776657273696f6e01", + "empty car, no roots", + "", + }, { + "{version:1,roots:{cid:baeaaaa3bmjrq}}", + "20a265726f6f7473a163636964d82a4800010000036162636776657273696f6e01", + "", + "invalid header: ", + }, { + "{version:1,roots:[baeaaaa3bmjrq],blip:true}", + "22a364626c6970f565726f6f747381d82a4800010000036162636776657273696f6e01", + "", + "invalid header: ", + }, { + "[1,[]]", + "03820180", + "", + "invalid header: ", + }, { + // this is an unfortunate error, it'd be nice to catch it better but it's + // very unlikely we'd ever see this in practice + "null", + "01f6", + "", + "invalid car version: 0", + }, + } + + makeCar := func(t *testing.T, byts string) error { + fixture, err := hex.DecodeString(byts) + if err != nil { + t.Fatal(err) + } + _, err = car.NewCarReader(bytes.NewReader(fixture)) + return err + } + + t.Run("Sanity check {version:1,roots:[baeaaaa3bmjrq]}", func(t *testing.T) { + err := makeCar(t, "1ca265726f6f747381d82a4800010000036162636776657273696f6e01") + if err != nil { + t.Fatal(err) + } + }) + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + err := makeCar(t, tc.hex) + if err == nil { + t.Fatal("expected error from bad header, didn't get one") + } + if tc.errStr != "" { + if err.Error() != tc.errStr { + t.Fatalf("bad error: %v", err) + } + } else { + if !strings.HasPrefix(err.Error(), tc.errPfx) { + t.Fatalf("bad error: %v", err) + } + } + }) + } +} diff --git a/ipld/car/fuzz_test.go b/ipld/car/fuzz_test.go new file mode 100644 index 0000000000..4f4a3f55cf --- /dev/null +++ b/ipld/car/fuzz_test.go @@ -0,0 +1,34 @@ +//go:build go1.18 + +package car_test + +import ( + "bytes" + "encoding/hex" + "io" + "testing" + + car "github.com/ipfs/boxo/ipld/car" +) + +func FuzzCarReader(f *testing.F) { + fixture, err := hex.DecodeString(fixtureStr) + if err != nil { + f.Fatal(err) + } + f.Add(fixture) + + f.Fuzz(func(t *testing.T, data []byte) { + r, err := car.NewCarReader(bytes.NewReader(data)) + if err != nil { + return + } + + for { + _, err = r.Next() + if err == io.EOF { + return + } + } + }) +} diff --git a/ipld/car/options.go b/ipld/car/options.go new file mode 100644 index 0000000000..e317f9cc9e --- /dev/null +++ b/ipld/car/options.go @@ -0,0 +1,51 @@ +package car + +import "math" + +// options holds the configured options after applying a number of +// Option funcs. +type options struct { + TraverseLinksOnlyOnce bool + MaxTraversalLinks uint64 +} + +// Option describes an option which affects behavior when +// interacting with the interface. +type Option func(*options) + +// TraverseLinksOnlyOnce prevents the traversal engine from repeatedly visiting +// the same links more than once. +// +// This can be an efficient strategy for an exhaustive selector where it's known +// that repeat visits won't impact the completeness of execution. However it +// should be used with caution with most other selectors as repeat visits of +// links for different reasons during selector execution can be valid and +// necessary to perform full traversal. +func TraverseLinksOnlyOnce() Option { + return func(sco *options) { + sco.TraverseLinksOnlyOnce = true + } +} + +// MaxTraversalLinks changes the allowed number of links a selector traversal +// can execute before failing. +// +// Note that setting this option may cause an error to be returned from selector +// execution when building a SelectiveCar. +func MaxTraversalLinks(MaxTraversalLinks uint64) Option { + return func(sco *options) { + sco.MaxTraversalLinks = MaxTraversalLinks + } +} + +// applyOptions applies given opts and returns the resulting options. +func applyOptions(opt ...Option) options { + opts := options{ + TraverseLinksOnlyOnce: false, // default: recurse until exhausted + MaxTraversalLinks: math.MaxInt64, // default: traverse all + } + for _, o := range opt { + o(&opts) + } + return opts +} diff --git a/ipld/car/options_test.go b/ipld/car/options_test.go new file mode 100644 index 0000000000..250c672037 --- /dev/null +++ b/ipld/car/options_test.go @@ -0,0 +1,27 @@ +package car + +import ( + "math" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestApplyOptions_SetsExpectedDefaults(t *testing.T) { + require.Equal(t, options{ + MaxTraversalLinks: math.MaxInt64, + TraverseLinksOnlyOnce: false, + }, applyOptions()) +} + +func TestApplyOptions_AppliesOptions(t *testing.T) { + require.Equal(t, + options{ + MaxTraversalLinks: 123, + TraverseLinksOnlyOnce: true, + }, + applyOptions( + MaxTraversalLinks(123), + TraverseLinksOnlyOnce(), + )) +} diff --git a/ipld/car/selectivecar.go b/ipld/car/selectivecar.go new file mode 100644 index 0000000000..d4dbf11fed --- /dev/null +++ b/ipld/car/selectivecar.go @@ -0,0 +1,289 @@ +package car + +import ( + "bytes" + "context" + "errors" + "fmt" + "io" + "math" + + util "github.com/ipfs/boxo/ipld/car/util" + cid "github.com/ipfs/go-cid" + "github.com/ipld/go-ipld-prime" + cidlink "github.com/ipld/go-ipld-prime/linking/cid" + basicnode "github.com/ipld/go-ipld-prime/node/basic" + "github.com/ipld/go-ipld-prime/traversal" + "github.com/ipld/go-ipld-prime/traversal/selector" + + // The dag-pb and raw codecs are necessary for unixfs. + dagpb "github.com/ipld/go-codec-dagpb" + _ "github.com/ipld/go-ipld-prime/codec/raw" +) + +// Dag is a root/selector combo to put into a car +type Dag struct { + Root cid.Cid + Selector ipld.Node +} + +// Block is all information and metadata about a block that is part of a car file +type Block struct { + BlockCID cid.Cid + Data []byte + Offset uint64 + Size uint64 +} + +// SelectiveCar is a car file based on root + selector combos instead of just +// a single root and complete dag walk +type SelectiveCar struct { + ctx context.Context + dags []Dag + store ReadStore + opts options +} + +// OnCarHeaderFunc is called during traversal when the header is created +type OnCarHeaderFunc func(CarHeader) error + +// OnNewCarBlockFunc is called during traveral when a new unique block is encountered +type OnNewCarBlockFunc func(Block) error + +// SelectiveCarPrepared is a SelectiveCar that has already been traversed, such that it +// can be written quicker with Dump. It also contains metadata already collection about +// the Car file like size and number of blocks that go into it +type SelectiveCarPrepared struct { + SelectiveCar + size uint64 + header CarHeader + cids []cid.Cid + userOnNewCarBlocks []OnNewCarBlockFunc +} + +// NewSelectiveCar creates a new SelectiveCar for the given car file based +// a block store and set of root+selector pairs +func NewSelectiveCar(ctx context.Context, store ReadStore, dags []Dag, opts ...Option) SelectiveCar { + return SelectiveCar{ + ctx: ctx, + store: store, + dags: dags, + opts: applyOptions(opts...), + } +} + +func (sc SelectiveCar) traverse(onCarHeader OnCarHeaderFunc, onNewCarBlock OnNewCarBlockFunc) (uint64, error) { + traverser := &selectiveCarTraverser{onCarHeader, onNewCarBlock, 0, cid.NewSet(), sc, cidlink.DefaultLinkSystem()} + traverser.lsys.StorageReadOpener = traverser.loader + return traverser.traverse() +} + +// Prepare traverse a car file and collects data on what is about to be written, but +// does not actually write the file +func (sc SelectiveCar) Prepare(userOnNewCarBlocks ...OnNewCarBlockFunc) (SelectiveCarPrepared, error) { + var header CarHeader + var cids []cid.Cid + + onCarHeader := func(h CarHeader) error { + header = h + return nil + } + onNewCarBlock := func(block Block) error { + cids = append(cids, block.BlockCID) + return nil + } + size, err := sc.traverse(onCarHeader, onNewCarBlock) + if err != nil { + return SelectiveCarPrepared{}, err + } + return SelectiveCarPrepared{sc, size, header, cids, userOnNewCarBlocks}, nil +} + +func (sc SelectiveCar) Write(w io.Writer, userOnNewCarBlocks ...OnNewCarBlockFunc) error { + onCarHeader := func(h CarHeader) error { + if err := WriteHeader(&h, w); err != nil { + return fmt.Errorf("failed to write car header: %s", err) + } + return nil + } + onNewCarBlock := func(block Block) error { + err := util.LdWrite(w, block.BlockCID.Bytes(), block.Data) + if err != nil { + return err + } + for _, userOnNewCarBlock := range userOnNewCarBlocks { + err := userOnNewCarBlock(block) + if err != nil { + return err + } + } + return nil + } + _, err := sc.traverse(onCarHeader, onNewCarBlock) + return err +} + +// Size returns the total size in bytes of the car file that will be written +func (sc SelectiveCarPrepared) Size() uint64 { + return sc.size +} + +// Header returns the header for the car file that will be written +func (sc SelectiveCarPrepared) Header() CarHeader { + return sc.header +} + +// Cids returns the list of unique block cids that will be written to the car file +func (sc SelectiveCarPrepared) Cids() []cid.Cid { + return sc.cids +} + +// Dump writes the car file as quickly as possible based on information already +// collected +func (sc SelectiveCarPrepared) Dump(ctx context.Context, w io.Writer) error { + offset, err := HeaderSize(&sc.header) + if err != nil { + return fmt.Errorf("failed to size car header: %s", err) + } + if err := WriteHeader(&sc.header, w); err != nil { + return fmt.Errorf("failed to write car header: %s", err) + } + for _, c := range sc.cids { + blk, err := sc.store.Get(ctx, c) + if err != nil { + return err + } + raw := blk.RawData() + size := util.LdSize(c.Bytes(), raw) + err = util.LdWrite(w, c.Bytes(), raw) + if err != nil { + return err + } + for _, userOnNewCarBlock := range sc.userOnNewCarBlocks { + err := userOnNewCarBlock(Block{ + BlockCID: c, + Data: raw, + Offset: offset, + Size: size, + }) + if err != nil { + return err + } + } + offset += size + } + return nil +} + +type selectiveCarTraverser struct { + onCarHeader OnCarHeaderFunc + onNewCarBlock OnNewCarBlockFunc + offset uint64 + cidSet *cid.Set + sc SelectiveCar + lsys ipld.LinkSystem +} + +func (sct *selectiveCarTraverser) traverse() (uint64, error) { + err := sct.traverseHeader() + if err != nil { + return 0, err + } + err = sct.traverseBlocks() + if err != nil { + return 0, err + } + return sct.offset, nil +} + +func (sct *selectiveCarTraverser) traverseHeader() error { + roots := make([]cid.Cid, 0, len(sct.sc.dags)) + for _, carDag := range sct.sc.dags { + roots = append(roots, carDag.Root) + } + + header := CarHeader{ + Roots: roots, + Version: 1, + } + + size, err := HeaderSize(&header) + if err != nil { + return err + } + + sct.offset += size + + return sct.onCarHeader(header) +} + +func (sct *selectiveCarTraverser) loader(ctx ipld.LinkContext, lnk ipld.Link) (io.Reader, error) { + cl, ok := lnk.(cidlink.Link) + if !ok { + return nil, errors.New("incorrect link type") + } + c := cl.Cid + blk, err := sct.sc.store.Get(ctx.Ctx, c) + if err != nil { + return nil, err + } + raw := blk.RawData() + if !sct.cidSet.Has(c) { + sct.cidSet.Add(c) + size := util.LdSize(c.Bytes(), raw) + err := sct.onNewCarBlock(Block{ + BlockCID: c, + Data: raw, + Offset: sct.offset, + Size: size, + }) + if err != nil { + return nil, err + } + sct.offset += size + } + return bytes.NewReader(raw), nil +} + +func (sct *selectiveCarTraverser) traverseBlocks() error { + nsc := func(lnk ipld.Link, lctx ipld.LinkContext) (ipld.NodePrototype, error) { + // We can decode all nodes into basicnode's Any, except for + // dagpb nodes, which must explicitly use the PBNode prototype. + if lnk, ok := lnk.(cidlink.Link); ok && lnk.Cid.Prefix().Codec == 0x70 { + return dagpb.Type.PBNode, nil + } + return basicnode.Prototype.Any, nil + } + + for _, carDag := range sct.sc.dags { + parsed, err := selector.ParseSelector(carDag.Selector) + if err != nil { + return err + } + lnk := cidlink.Link{Cid: carDag.Root} + ns, _ := nsc(lnk, ipld.LinkContext{}) // nsc won't error + nd, err := sct.lsys.Load(ipld.LinkContext{Ctx: sct.sc.ctx}, lnk, ns) + if err != nil { + return err + } + prog := traversal.Progress{ + Cfg: &traversal.Config{ + Ctx: sct.sc.ctx, + LinkSystem: sct.lsys, + LinkTargetNodePrototypeChooser: nsc, + LinkVisitOnlyOnce: sct.sc.opts.TraverseLinksOnlyOnce, + }, + } + if sct.sc.opts.MaxTraversalLinks < math.MaxInt64 { + prog.Budget = &traversal.Budget{ + NodeBudget: math.MaxInt64, + LinkBudget: int64(sct.sc.opts.MaxTraversalLinks), + } + } + err = prog.WalkAdv(nd, parsed, func(traversal.Progress, ipld.Node, traversal.VisitReason) error { return nil }) + if err != nil { + return err + } + } + return nil +} diff --git a/ipld/car/selectivecar_test.go b/ipld/car/selectivecar_test.go new file mode 100644 index 0000000000..a49b99cdec --- /dev/null +++ b/ipld/car/selectivecar_test.go @@ -0,0 +1,228 @@ +package car_test + +import ( + "bytes" + "context" + "testing" + + blocks "github.com/ipfs/boxo/blocks" + car "github.com/ipfs/boxo/ipld/car" + "github.com/ipfs/boxo/ipld/merkledag" + dstest "github.com/ipfs/boxo/ipld/merkledag/test" + cid "github.com/ipfs/go-cid" + format "github.com/ipfs/go-ipld-format" + basicnode "github.com/ipld/go-ipld-prime/node/basic" + "github.com/ipld/go-ipld-prime/traversal/selector" + "github.com/ipld/go-ipld-prime/traversal/selector/builder" + selectorparse "github.com/ipld/go-ipld-prime/traversal/selector/parse" + "github.com/stretchr/testify/require" +) + +func TestRoundtripSelective(t *testing.T) { + ctx := context.Background() + sourceBserv := dstest.Bserv() + sourceBs := sourceBserv.Blockstore() + dserv := merkledag.NewDAGService(sourceBserv) + a := merkledag.NewRawNode([]byte("aaaa")) + b := merkledag.NewRawNode([]byte("bbbb")) + c := merkledag.NewRawNode([]byte("cccc")) + + nd1 := &merkledag.ProtoNode{} + nd1.AddNodeLink("cat", a) + + nd2 := &merkledag.ProtoNode{} + nd2.AddNodeLink("first", nd1) + nd2.AddNodeLink("dog", b) + nd2.AddNodeLink("repeat", nd1) + + nd3 := &merkledag.ProtoNode{} + nd3.AddNodeLink("second", nd2) + nd3.AddNodeLink("bear", c) + + assertAddNodes(t, dserv, a, b, c, nd1, nd2, nd3) + + ssb := builder.NewSelectorSpecBuilder(basicnode.Prototype.Any) + + // the graph assembled above looks as follows, in order: + // nd3 -> [c, nd2 -> [nd1 -> a, b, nd1 -> a]] + // this selector starts at n3, and traverses a link at index 1 (nd2, the second link, zero indexed) + // it then recursively traverses all of its children + // the only node skipped is 'c' -- link at index 0 immediately below nd3 + // the purpose is simply to show we are not writing the entire merkledag underneath + // nd3 + selector := ssb.ExploreFields(func(efsb builder.ExploreFieldsSpecBuilder) { + efsb.Insert("Links", + ssb.ExploreIndex(1, ssb.ExploreRecursive(selector.RecursionLimitNone(), ssb.ExploreAll(ssb.ExploreRecursiveEdge())))) + }).Node() + + sc := car.NewSelectiveCar(context.Background(), sourceBs, []car.Dag{{Root: nd3.Cid(), Selector: selector}}) + + // write car in one step + buf := new(bytes.Buffer) + blockCount := 0 + var oneStepBlocks []car.Block + err := sc.Write(buf, func(block car.Block) error { + oneStepBlocks = append(oneStepBlocks, block) + blockCount++ + return nil + }) + require.Equal(t, blockCount, 5) + require.NoError(t, err) + + // create a new builder for two-step write + sc2 := car.NewSelectiveCar(context.Background(), sourceBs, []car.Dag{{Root: nd3.Cid(), Selector: selector}}) + + // write car in two steps + var twoStepBlocks []car.Block + scp, err := sc2.Prepare(func(block car.Block) error { + twoStepBlocks = append(twoStepBlocks, block) + return nil + }) + require.NoError(t, err) + buf2 := new(bytes.Buffer) + err = scp.Dump(ctx, buf2) + require.NoError(t, err) + + // verify preparation step correctly assesed length and blocks + require.Equal(t, scp.Size(), uint64(buf.Len())) + require.Equal(t, len(scp.Cids()), blockCount) + + // verify equal data written by both methods + require.Equal(t, buf.Bytes(), buf2.Bytes()) + + // verify equal blocks were passed to user block hook funcs + require.Equal(t, oneStepBlocks, twoStepBlocks) + + // readout car and verify contents + bserv := dstest.Bserv() + ch, err := car.LoadCar(ctx, bserv.Blockstore(), buf) + require.NoError(t, err) + require.Equal(t, len(ch.Roots), 1) + + require.True(t, ch.Roots[0].Equals(nd3.Cid())) + + bs := bserv.Blockstore() + for _, nd := range []format.Node{a, b, nd1, nd2, nd3} { + has, err := bs.Has(ctx, nd.Cid()) + require.NoError(t, err) + require.True(t, has) + } + + for _, nd := range []format.Node{c} { + has, err := bs.Has(ctx, nd.Cid()) + require.NoError(t, err) + require.False(t, has) + } +} + +func TestNoLinkRepeatSelective(t *testing.T) { + sourceBserv := dstest.Bserv() + sourceBs := countingReadStore{bs: sourceBserv.Blockstore()} + dserv := merkledag.NewDAGService(sourceBserv) + a := merkledag.NewRawNode([]byte("aaaa")) + b := merkledag.NewRawNode([]byte("bbbb")) + c := merkledag.NewRawNode([]byte("cccc")) + + nd1 := &merkledag.ProtoNode{} + nd1.AddNodeLink("cat", a) + + nd2 := &merkledag.ProtoNode{} + nd2.AddNodeLink("first", nd1) + nd2.AddNodeLink("dog", b) + nd2.AddNodeLink("repeat", nd1) + + nd3 := &merkledag.ProtoNode{} + nd3.AddNodeLink("second", nd2) + nd3.AddNodeLink("bear", c) + nd3.AddNodeLink("bearagain1", c) + nd3.AddNodeLink("bearagain2", c) + nd3.AddNodeLink("bearagain3", c) + + assertAddNodes(t, dserv, a, b, c, nd1, nd2, nd3) + + t.Run("TraverseLinksOnlyOnce off", func(t *testing.T) { + sourceBs.count = 0 + sc := car.NewSelectiveCar(context.Background(), + &sourceBs, + []car.Dag{{Root: nd3.Cid(), Selector: selectorparse.CommonSelector_ExploreAllRecursively}}, + ) + + buf := new(bytes.Buffer) + blockCount := 0 + err := sc.Write(buf, func(block car.Block) error { + blockCount++ + return nil + }) + require.Equal(t, blockCount, 6) + require.Equal(t, sourceBs.count, 11) // with TraverseLinksOnlyOnce off, we expect repeat block visits because our DAG has repeat links + require.NoError(t, err) + }) + + t.Run("TraverseLinksOnlyOnce on", func(t *testing.T) { + sourceBs.count = 0 + + sc := car.NewSelectiveCar(context.Background(), + &sourceBs, + []car.Dag{{Root: nd3.Cid(), Selector: selectorparse.CommonSelector_ExploreAllRecursively}}, + car.TraverseLinksOnlyOnce(), + ) + + buf := new(bytes.Buffer) + blockCount := 0 + err := sc.Write(buf, func(block car.Block) error { + blockCount++ + return nil + }) + require.Equal(t, blockCount, 6) + require.Equal(t, sourceBs.count, 6) // only 6 blocks to load, no duplicate loading expected + require.NoError(t, err) + }) +} + +func TestLinkLimitSelective(t *testing.T) { + sourceBserv := dstest.Bserv() + sourceBs := sourceBserv.Blockstore() + dserv := merkledag.NewDAGService(sourceBserv) + a := merkledag.NewRawNode([]byte("aaaa")) + b := merkledag.NewRawNode([]byte("bbbb")) + c := merkledag.NewRawNode([]byte("cccc")) + + nd1 := &merkledag.ProtoNode{} + nd1.AddNodeLink("cat", a) + + nd2 := &merkledag.ProtoNode{} + nd2.AddNodeLink("first", nd1) + nd2.AddNodeLink("dog", b) + nd2.AddNodeLink("repeat", nd1) + + nd3 := &merkledag.ProtoNode{} + nd3.AddNodeLink("second", nd2) + nd3.AddNodeLink("bear", c) + + assertAddNodes(t, dserv, a, b, c, nd1, nd2, nd3) + + sc := car.NewSelectiveCar(context.Background(), + sourceBs, + []car.Dag{{Root: nd3.Cid(), Selector: selectorparse.CommonSelector_ExploreAllRecursively}}, + car.MaxTraversalLinks(2)) + + buf := new(bytes.Buffer) + blockCount := 0 + err := sc.Write(buf, func(block car.Block) error { + blockCount++ + return nil + }) + require.Equal(t, blockCount, 3) // root + 2 + require.Error(t, err) + require.Regexp(t, "^traversal budget exceeded: budget for links reached zero while on path .*", err) +} + +type countingReadStore struct { + bs car.ReadStore + count int +} + +func (rs *countingReadStore) Get(ctx context.Context, c cid.Cid) (blocks.Block, error) { + rs.count++ + return rs.bs.Get(ctx, c) +} diff --git a/ipld/car/testdata/fuzz/FuzzCarReader/21a90a70853c333c6b9ddc133bae39a14164874ed8abdee1f6a6795311a0e546 b/ipld/car/testdata/fuzz/FuzzCarReader/21a90a70853c333c6b9ddc133bae39a14164874ed8abdee1f6a6795311a0e546 new file mode 100644 index 0000000000..a7ab1d519c --- /dev/null +++ b/ipld/car/testdata/fuzz/FuzzCarReader/21a90a70853c333c6b9ddc133bae39a14164874ed8abdee1f6a6795311a0e546 @@ -0,0 +1,2 @@ +go test fuzz v1 +[]byte("\xe0\xe0\xe0\xe0\xa7\x06\folLʔ<#oK\x19g#H\x96\b\xed\xb4*\x8b\x8f\xa8\vgversion\x19") diff --git a/ipld/car/testdata/fuzz/FuzzCarReader/5857e57e4072c6b0d8684030cc13b5570849c89b3dbf5bc0152abc66c9642f3e b/ipld/car/testdata/fuzz/FuzzCarReader/5857e57e4072c6b0d8684030cc13b5570849c89b3dbf5bc0152abc66c9642f3e new file mode 100644 index 0000000000..3e680cf60e --- /dev/null +++ b/ipld/car/testdata/fuzz/FuzzCarReader/5857e57e4072c6b0d8684030cc13b5570849c89b3dbf5bc0152abc66c9642f3e @@ -0,0 +1,2 @@ +go test fuzz v1 +[]byte(":\xa2eroots\x81\xd80X%\x00\x0100 00000000000000000000000000000000gversion\x01\x010") diff --git a/ipld/car/util/util.go b/ipld/car/util/util.go new file mode 100644 index 0000000000..af2f38e420 --- /dev/null +++ b/ipld/car/util/util.go @@ -0,0 +1,136 @@ +package util + +import ( + "bufio" + "bytes" + "encoding/binary" + "errors" + "fmt" + "io" + + cid "github.com/ipfs/go-cid" + mh "github.com/multiformats/go-multihash" +) + +// MaxAllowedSectionSize dictates the maximum number of bytes that a CARv1 header +// or section is allowed to occupy without causing a decode to error. +// This cannot be supplied as an option, only adjusted as a global. You should +// use v2#NewReader instead since it allows for options to be passed in. +var MaxAllowedSectionSize uint = 32 << 20 // 32MiB + +var cidv0Pref = []byte{0x12, 0x20} + +type BytesReader interface { + io.Reader + io.ByteReader +} + +// Deprecated: ReadCid shouldn't be used directly, use CidFromReader from go-cid +func ReadCid(buf []byte) (cid.Cid, int, error) { + if len(buf) >= 2 && bytes.Equal(buf[:2], cidv0Pref) { + i := 34 + if len(buf) < i { + i = len(buf) + } + c, err := cid.Cast(buf[:i]) + return c, i, err + } + + br := bytes.NewReader(buf) + + // assume cidv1 + vers, err := binary.ReadUvarint(br) + if err != nil { + return cid.Cid{}, 0, err + } + + // TODO: the go-cid package allows version 0 here as well + if vers != 1 { + return cid.Cid{}, 0, fmt.Errorf("invalid cid version number") + } + + codec, err := binary.ReadUvarint(br) + if err != nil { + return cid.Cid{}, 0, err + } + + mhr := mh.NewReader(br) + h, err := mhr.ReadMultihash() + if err != nil { + return cid.Cid{}, 0, err + } + + return cid.NewCidV1(codec, h), len(buf) - br.Len(), nil +} + +func ReadNode(br *bufio.Reader) (cid.Cid, []byte, error) { + data, err := LdRead(br) + if err != nil { + return cid.Cid{}, nil, err + } + + n, c, err := cid.CidFromReader(bytes.NewReader(data)) + if err != nil { + return cid.Cid{}, nil, err + } + + return c, data[n:], nil +} + +func LdWrite(w io.Writer, d ...[]byte) error { + var sum uint64 + for _, s := range d { + sum += uint64(len(s)) + } + + buf := make([]byte, 8) + n := binary.PutUvarint(buf, sum) + _, err := w.Write(buf[:n]) + if err != nil { + return err + } + + for _, s := range d { + _, err = w.Write(s) + if err != nil { + return err + } + } + + return nil +} + +func LdSize(d ...[]byte) uint64 { + var sum uint64 + for _, s := range d { + sum += uint64(len(s)) + } + buf := make([]byte, 8) + n := binary.PutUvarint(buf, sum) + return sum + uint64(n) +} + +func LdRead(r *bufio.Reader) ([]byte, error) { + if _, err := r.Peek(1); err != nil { // no more blocks, likely clean io.EOF + return nil, err + } + + l, err := binary.ReadUvarint(r) + if err != nil { + if err == io.EOF { + return nil, io.ErrUnexpectedEOF // don't silently pretend this is a clean EOF + } + return nil, err + } + + if l > uint64(MaxAllowedSectionSize) { // Don't OOM + return nil, errors.New("malformed car; header is bigger than util.MaxAllowedSectionSize") + } + + buf := make([]byte, l) + if _, err := io.ReadFull(r, buf); err != nil { + return nil, err + } + + return buf, nil +} diff --git a/ipld/car/util/util_test.go b/ipld/car/util/util_test.go new file mode 100644 index 0000000000..4708f3f316 --- /dev/null +++ b/ipld/car/util/util_test.go @@ -0,0 +1,27 @@ +package util_test + +import ( + "bytes" + crand "crypto/rand" + "math/rand" + "testing" + + "github.com/ipfs/boxo/ipld/car/util" + "github.com/stretchr/testify/require" +) + +func TestLdSize(t *testing.T) { + for i := 0; i < 5; i++ { + var buf bytes.Buffer + data := make([][]byte, 5) + for j := 0; j < 5; j++ { + data[j] = make([]byte, rand.Intn(30)) + _, err := crand.Read(data[j]) + require.NoError(t, err) + } + size := util.LdSize(data...) + err := util.LdWrite(&buf, data...) + require.NoError(t, err) + require.Equal(t, uint64(len(buf.Bytes())), size) + } +} diff --git a/ipld/car/v2/LICENSE.md b/ipld/car/v2/LICENSE.md new file mode 100644 index 0000000000..2fa16a1537 --- /dev/null +++ b/ipld/car/v2/LICENSE.md @@ -0,0 +1,229 @@ +The contents of this repository are Copyright (c) corresponding authors and +contributors, licensed under the `Permissive License Stack` meaning either of: + +- Apache-2.0 Software License: https://www.apache.org/licenses/LICENSE-2.0 + ([...4tr2kfsq](https://dweb.link/ipfs/bafkreiankqxazcae4onkp436wag2lj3ccso4nawxqkkfckd6cg4tr2kfsq)) + +- MIT Software License: https://opensource.org/licenses/MIT + ([...vljevcba](https://dweb.link/ipfs/bafkreiepofszg4gfe2gzuhojmksgemsub2h4uy2gewdnr35kswvljevcba)) + +You may not use the contents of this repository except in compliance +with one of the listed Licenses. For an extended clarification of the +intent behind the choice of Licensing please refer to +https://protocol.ai/blog/announcing-the-permissive-license-stack/ + +Unless required by applicable law or agreed to in writing, software +distributed under the terms listed in this notice is distributed on +an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, +either express or implied. See each License for the specific language +governing permissions and limitations under that License. + + +`SPDX-License-Identifier: Apache-2.0 OR MIT` + +Verbatim copies of both licenses are included below: + +
Apache-2.0 Software License + +``` + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS +``` +
+ +
MIT Software License + +``` +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. +``` +
diff --git a/ipld/car/v2/bench_test.go b/ipld/car/v2/bench_test.go new file mode 100644 index 0000000000..89a579df58 --- /dev/null +++ b/ipld/car/v2/bench_test.go @@ -0,0 +1,203 @@ +package car_test + +import ( + "context" + "io" + "math/rand" + "os" + "path/filepath" + "testing" + + "github.com/ipfs/boxo/ipld/car/v2/blockstore" + "github.com/ipfs/boxo/ipld/merkledag" + "github.com/ipfs/go-cid" + + carv2 "github.com/ipfs/boxo/ipld/car/v2" +) + +// BenchmarkReadBlocks instantiates a BlockReader, and iterates over all blocks. +// It essentially looks at the contents of any CARv1 or CARv2 file. +// Note that this also uses internal carv1.ReadHeader underneath. +func BenchmarkReadBlocks(b *testing.B) { + path := "testdata/sample-wrapped-v2.car" + + info, err := os.Stat(path) + if err != nil { + b.Fatal(err) + } + b.SetBytes(info.Size()) + b.ReportAllocs() + + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + r, err := os.Open("testdata/sample-wrapped-v2.car") + if err != nil { + b.Fatal(err) + } + br, err := carv2.NewBlockReader(r) + if err != nil { + b.Fatal(err) + } + for { + _, err := br.Next() + if err == io.EOF { + break + } + if err != nil { + b.Fatal(err) + } + } + + if err := r.Close(); err != nil { + b.Fatal(err) + } + } + }) +} + +// BenchmarkExtractV1File extracts inner CARv1 payload from a sample CARv2 file using ExtractV1File. +func BenchmarkExtractV1File(b *testing.B) { + path := filepath.Join(b.TempDir(), "bench-large-v2.car") + generateRandomCarV2File(b, path, 10<<20) // 10 MiB + defer os.Remove(path) + + info, err := os.Stat(path) + if err != nil { + b.Fatal(err) + } + b.SetBytes(info.Size()) + b.ReportAllocs() + b.ResetTimer() + + b.RunParallel(func(pb *testing.PB) { + dstPath := filepath.Join(b.TempDir(), "destination.car") + for pb.Next() { + err = carv2.ExtractV1File(path, dstPath) + if err != nil { + b.Fatal(err) + } + _ = os.Remove(dstPath) + } + }) +} + +// BenchmarkExtractV1UsingReader extracts inner CARv1 payload from a sample CARv2 file using Reader +// API. This benchmark is implemented to be used as a comparison in conjunction with +// BenchmarkExtractV1File. +func BenchmarkExtractV1UsingReader(b *testing.B) { + path := filepath.Join(b.TempDir(), "bench-large-v2.car") + generateRandomCarV2File(b, path, 10<<20) // 10 MiB + defer os.Remove(path) + + info, err := os.Stat(path) + if err != nil { + b.Fatal(err) + } + b.SetBytes(info.Size()) + b.ReportAllocs() + b.ResetTimer() + + b.RunParallel(func(pb *testing.PB) { + dstPath := filepath.Join(b.TempDir(), "destination.car") + for pb.Next() { + dst, err := os.Create(dstPath) + if err != nil { + b.Fatal(err) + } + reader, err := carv2.OpenReader(path) + if err != nil { + b.Fatal(err) + } + dr, err := reader.DataReader() + if err != nil { + b.Fatal(err) + } + _, err = io.Copy(dst, dr) + if err != nil { + b.Fatal(err) + } + if err := dst.Close(); err != nil { + b.Fatal(err) + } + } + }) +} + +// BenchmarkReader_InspectWithBlockValidation benchmarks Reader.Inspect with block hash validation +// for a randomly generated CARv2 file of size 10 MiB. +func BenchmarkReader_InspectWithBlockValidation(b *testing.B) { + path := filepath.Join(b.TempDir(), "bench-large-v2.car") + generateRandomCarV2File(b, path, 10<<20) // 10 MiB + defer os.Remove(path) + + info, err := os.Stat(path) + if err != nil { + b.Fatal(err) + } + b.SetBytes(info.Size()) + b.ReportAllocs() + b.ResetTimer() + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + benchmarkInspect(b, path, true) + } + }) +} + +// BenchmarkReader_InspectWithoutBlockValidation benchmarks Reader.Inspect without block hash +// validation for a randomly generated CARv2 file of size 10 MiB. +func BenchmarkReader_InspectWithoutBlockValidation(b *testing.B) { + path := filepath.Join(b.TempDir(), "bench-large-v2.car") + generateRandomCarV2File(b, path, 10<<20) // 10 MiB + defer os.Remove(path) + + info, err := os.Stat(path) + if err != nil { + b.Fatal(err) + } + b.SetBytes(info.Size()) + b.ReportAllocs() + b.ResetTimer() + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + benchmarkInspect(b, path, false) + } + }) +} + +func benchmarkInspect(b *testing.B, path string, validateBlockHash bool) { + reader, err := carv2.OpenReader(path) + if err != nil { + b.Fatal(err) + } + if _, err := reader.Inspect(validateBlockHash); err != nil { + b.Fatal(err) + } +} +func generateRandomCarV2File(b *testing.B, path string, minTotalBlockSize int) { + // Use fixed RNG for determinism across benchmarks. + rng := rand.New(rand.NewSource(1413)) + bs, err := blockstore.OpenReadWrite(path, []cid.Cid{}) + defer func() { + if err := bs.Finalize(); err != nil { + b.Fatal(err) + } + }() + if err != nil { + b.Fatal(err) + } + buf := make([]byte, 32<<10) // 32 KiB + var totalBlockSize int + for totalBlockSize < minTotalBlockSize { + size, err := rng.Read(buf) + if err != nil { + b.Fatal(err) + } + + blk := merkledag.NewRawNode(buf) + if err := bs.Put(context.TODO(), blk); err != nil { + b.Fatal(err) + } + totalBlockSize += size + } +} diff --git a/ipld/car/v2/block_reader.go b/ipld/car/v2/block_reader.go new file mode 100644 index 0000000000..367df743ff --- /dev/null +++ b/ipld/car/v2/block_reader.go @@ -0,0 +1,221 @@ +package car + +import ( + "fmt" + "io" + + blocks "github.com/ipfs/boxo/blocks" + "github.com/ipfs/boxo/ipld/car/v2/internal/carv1" + "github.com/ipfs/boxo/ipld/car/v2/internal/carv1/util" + internalio "github.com/ipfs/boxo/ipld/car/v2/internal/io" + "github.com/ipfs/go-cid" + "github.com/multiformats/go-varint" +) + +// BlockReader facilitates iteration over CAR blocks for both CARv1 and CARv2. +// See NewBlockReader +type BlockReader struct { + // The detected version of the CAR payload. + Version uint64 + // The roots of the CAR payload. May be empty. + Roots []cid.Cid + + // Used internally only, by BlockReader.Next during iteration over blocks. + r io.Reader + offset uint64 + readerSize int64 + opts Options +} + +// NewBlockReader instantiates a new BlockReader facilitating iteration over blocks in CARv1 or +// CARv2 payload. Upon instantiation, the version is automatically detected and exposed via +// BlockReader.Version. The root CIDs of the CAR payload are exposed via BlockReader.Roots +// +// See BlockReader.Next +func NewBlockReader(r io.Reader, opts ...Option) (*BlockReader, error) { + options := ApplyOptions(opts...) + + // Read CARv1 header or CARv2 pragma. + // Both are a valid CARv1 header, therefore are read as such. + pragmaOrV1Header, err := carv1.ReadHeader(r, options.MaxAllowedHeaderSize) + if err != nil { + return nil, err + } + + // Populate the block reader version and options. + br := &BlockReader{ + Version: pragmaOrV1Header.Version, + opts: options, + } + + // Expect either version 1 or 2. + switch br.Version { + case 1: + // If version is 1, r represents a CARv1. + // Simply populate br.Roots and br.r without modifying r. + br.Roots = pragmaOrV1Header.Roots + br.r = r + br.readerSize = -1 + br.offset, _ = carv1.HeaderSize(pragmaOrV1Header) + case 2: + // If the version is 2: + // 1. Read CARv2 specific header to locate the inner CARv1 data payload offset and size. + // 2. Skip to the beginning of the inner CARv1 data payload. + // 3. Re-initialize r as a LimitReader, limited to the size of the inner CARv1 payload. + // 4. Read the header of inner CARv1 data payload via r to populate br.Roots. + + // Read CARv2-specific header. + v2h := Header{} + if _, err := v2h.ReadFrom(r); err != nil { + return nil, err + } + + // Skip to the beginning of inner CARv1 data payload. + // Note, at this point the pragma and CARv1 header have been read. + // An io.ReadSeeker is opportunistically constructed from the given io.Reader r. + // The constructor does not take an initial offset, so we use Seek in io.SeekCurrent to + // fast forward to the beginning of data payload by subtracting pragma and header size from + // dataOffset. + rs := internalio.ToByteReadSeeker(r) + if _, err := rs.Seek(int64(v2h.DataOffset)-PragmaSize-HeaderSize, io.SeekCurrent); err != nil { + return nil, err + } + br.offset = uint64(v2h.DataOffset) + br.readerSize = int64(v2h.DataOffset + v2h.DataSize) + + // Set br.r to a LimitReader reading from r limited to dataSize. + br.r = io.LimitReader(r, int64(v2h.DataSize)) + + // Populate br.Roots by reading the inner CARv1 data payload header. + header, err := carv1.ReadHeader(br.r, options.MaxAllowedHeaderSize) + if err != nil { + return nil, err + } + // Assert that the data payload header is exactly 1, i.e. the header represents a CARv1. + if header.Version != 1 { + return nil, fmt.Errorf("invalid data payload header version; expected 1, got %v", header.Version) + } + br.Roots = header.Roots + default: + // Otherwise, error out with invalid version since only versions 1 or 2 are expected. + return nil, fmt.Errorf("invalid car version: %d", br.Version) + } + return br, nil +} + +// Next iterates over blocks in the underlying CAR payload with an io.EOF error indicating the end +// is reached. Note, this function is forward-only; once the end has been reached it will always +// return io.EOF. +// +// When the payload represents a CARv1 the BlockReader.Next simply iterates over blocks until it +// reaches the end of the underlying io.Reader stream. +// +// As for CARv2 payload, the underlying io.Reader is read only up to the end of the last block. +// Note, in a case where ZeroLengthSectionAsEOF Option is enabled, io.EOF is returned +// immediately upon encountering a zero-length section without reading any further bytes from the +// underlying io.Reader. +func (br *BlockReader) Next() (blocks.Block, error) { + c, data, err := util.ReadNode(br.r, br.opts.ZeroLengthSectionAsEOF, br.opts.MaxAllowedSectionSize) + if err != nil { + return nil, err + } + + if !br.opts.TrustedCAR { + hashed, err := c.Prefix().Sum(data) + if err != nil { + return nil, err + } + + if !hashed.Equals(c) { + return nil, fmt.Errorf("mismatch in content integrity, expected: %s, got: %s", c, hashed) + } + } + + ss := uint64(c.ByteLen()) + uint64(len(data)) + br.offset += uint64(varint.UvarintSize(ss)) + ss + return blocks.NewBlockWithCid(data, c) +} + +type BlockMetadata struct { + cid.Cid + Offset uint64 + Size uint64 +} + +// SkipNext jumps over the next block, returning metadata about what it is (the CID, offset, and size). +// Like Next it will return an io.EOF once it has reached the end. +// +// If the underlying reader used by the BlockReader is actually a ReadSeeker, this method will attempt to +// seek over the underlying data rather than reading it into memory. +func (br *BlockReader) SkipNext() (*BlockMetadata, error) { + sctSize, err := util.LdReadSize(br.r, br.opts.ZeroLengthSectionAsEOF, br.opts.MaxAllowedSectionSize) + if err != nil { + return nil, err + } + + if sctSize == 0 { + _, _, err := cid.CidFromBytes([]byte{}) + return nil, err + } + + cidSize, c, err := cid.CidFromReader(io.LimitReader(br.r, int64(sctSize))) + if err != nil { + return nil, err + } + + blkSize := sctSize - uint64(cidSize) + if brs, ok := br.r.(io.ReadSeeker); ok { + // carv1 and we don't know the size, so work it out and cache it + if br.readerSize == -1 { + cur, err := brs.Seek(0, io.SeekCurrent) + if err != nil { + return nil, err + } + end, err := brs.Seek(0, io.SeekEnd) + if err != nil { + return nil, err + } + br.readerSize = end + if _, err = brs.Seek(cur, io.SeekStart); err != nil { + return nil, err + } + } + // seek. + finalOffset, err := brs.Seek(int64(blkSize), io.SeekCurrent) + if err != nil { + return nil, err + } + if finalOffset != int64(br.offset)+int64(sctSize)+int64(varint.UvarintSize(sctSize)) { + return nil, fmt.Errorf("unexpected length") + } + if finalOffset > br.readerSize { + return nil, io.ErrUnexpectedEOF + } + br.offset = uint64(finalOffset) + return &BlockMetadata{ + c, + uint64(finalOffset) - sctSize - uint64(varint.UvarintSize(sctSize)), + blkSize, + }, nil + } + + // read to end. + readCnt, err := io.CopyN(io.Discard, br.r, int64(blkSize)) + if err != nil { + if err == io.EOF { + return nil, io.ErrUnexpectedEOF + } + return nil, err + } + if readCnt != int64(blkSize) { + return nil, fmt.Errorf("unexpected length") + } + origOffset := br.offset + br.offset += uint64(varint.UvarintSize(sctSize)) + sctSize + + return &BlockMetadata{ + c, + origOffset, + blkSize, + }, nil +} diff --git a/ipld/car/v2/block_reader_test.go b/ipld/car/v2/block_reader_test.go new file mode 100644 index 0000000000..8a65d0c72a --- /dev/null +++ b/ipld/car/v2/block_reader_test.go @@ -0,0 +1,237 @@ +package car_test + +import ( + "bytes" + "encoding/hex" + "fmt" + "io" + "os" + "testing" + + carv2 "github.com/ipfs/boxo/ipld/car/v2" + "github.com/ipfs/boxo/ipld/car/v2/internal/carv1" + "github.com/ipfs/go-cid" + mh "github.com/multiformats/go-multihash" + "github.com/multiformats/go-varint" + "github.com/stretchr/testify/require" +) + +func TestBlockReaderFailsOnUnknownVersion(t *testing.T) { + r := requireReaderFromPath(t, "testdata/sample-rootless-v42.car") + _, err := carv2.NewBlockReader(r) + require.EqualError(t, err, "invalid car version: 42") +} + +func TestBlockReaderFailsOnCorruptPragma(t *testing.T) { + r := requireReaderFromPath(t, "testdata/sample-corrupt-pragma.car") + _, err := carv2.NewBlockReader(r) + require.EqualError(t, err, "unexpected EOF") +} + +func TestBlockReader_WithCarV1Consistency(t *testing.T) { + tests := []struct { + name string + path string + zerLenAsEOF bool + wantVersion uint64 + }{ + { + name: "CarV1WithoutZeroLengthSection", + path: "testdata/sample-v1.car", + wantVersion: 1, + }, + { + name: "CarV1WithZeroLenSection", + path: "testdata/sample-v1-with-zero-len-section.car", + zerLenAsEOF: true, + wantVersion: 1, + }, + { + name: "AnotherCarV1WithZeroLenSection", + path: "testdata/sample-v1-with-zero-len-section2.car", + zerLenAsEOF: true, + wantVersion: 1, + }, + { + name: "CarV1WithZeroLenSectionWithoutOption", + path: "testdata/sample-v1-with-zero-len-section.car", + wantVersion: 1, + }, + { + name: "AnotherCarV1WithZeroLenSectionWithoutOption", + path: "testdata/sample-v1-with-zero-len-section2.car", + wantVersion: 1, + }, + { + name: "CorruptCarV1", + path: "testdata/sample-v1-tailing-corrupt-section.car", + wantVersion: 1, + }, + { + name: "CarV2WrappingV1", + path: "testdata/sample-wrapped-v2.car", + wantVersion: 2, + }, + { + name: "CarV2ProducedByBlockstore", + path: "testdata/sample-rw-bs-v2.car", + wantVersion: 2, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + r := requireReaderFromPath(t, tt.path) + subject, err := carv2.NewBlockReader(r, carv2.ZeroLengthSectionAsEOF(tt.zerLenAsEOF)) + require.NoError(t, err) + + require.Equal(t, tt.wantVersion, subject.Version) + + var wantReader *carv1.CarReader + switch tt.wantVersion { + case 1: + wantReader = requireNewCarV1ReaderFromV1File(t, tt.path, tt.zerLenAsEOF) + case 2: + wantReader = requireNewCarV1ReaderFromV2File(t, tt.path, tt.zerLenAsEOF) + default: + require.Failf(t, "invalid test-case", "unknown wantVersion %v", tt.wantVersion) + } + require.Equal(t, wantReader.Header.Roots, subject.Roots) + + for { + gotBlock, gotErr := subject.Next() + wantBlock, wantErr := wantReader.Next() + require.Equal(t, wantBlock, gotBlock) + require.Equal(t, wantErr, gotErr) + if gotErr == io.EOF { + break + } + } + }) + t.Run(tt.name+"-skipping-reads", func(t *testing.T) { + r := requireReaderFromPath(t, tt.path) + subject, err := carv2.NewBlockReader(r, carv2.ZeroLengthSectionAsEOF(tt.zerLenAsEOF)) + require.NoError(t, err) + + require.Equal(t, tt.wantVersion, subject.Version) + + var wantReader *carv1.CarReader + switch tt.wantVersion { + case 1: + wantReader = requireNewCarV1ReaderFromV1File(t, tt.path, tt.zerLenAsEOF) + case 2: + wantReader = requireNewCarV1ReaderFromV2File(t, tt.path, tt.zerLenAsEOF) + default: + require.Failf(t, "invalid test-case", "unknown wantVersion %v", tt.wantVersion) + } + require.Equal(t, wantReader.Header.Roots, subject.Roots) + + for { + gotBlock, gotErr := subject.SkipNext() + wantBlock, wantErr := wantReader.Next() + if wantErr != nil && gotErr == nil { + fmt.Printf("want was %+v\n", wantReader) + fmt.Printf("want was err, got was %+v / %d\n", gotBlock, gotBlock.Size) + } + require.Equal(t, wantErr, gotErr) + if gotErr == io.EOF { + break + } + if gotErr == nil { + require.Equal(t, wantBlock.Cid(), gotBlock.Cid) + require.Equal(t, uint64(len(wantBlock.RawData())), gotBlock.Size) + } + } + }) + } +} + +func TestMaxSectionLength(t *testing.T) { + // headerHex is the zero-roots CARv1 header + const headerHex = "11a265726f6f7473806776657273696f6e01" + headerBytes, _ := hex.DecodeString(headerHex) + // 8 MiB block of zeros + block := make([]byte, 8<<20) + // CID for that block + pfx := cid.NewPrefixV1(cid.Raw, mh.SHA2_256) + cid, err := pfx.Sum(block) + require.NoError(t, err) + + // construct CAR + var buf bytes.Buffer + buf.Write(headerBytes) + buf.Write(varint.ToUvarint(uint64(len(cid.Bytes()) + len(block)))) + buf.Write(cid.Bytes()) + buf.Write(block) + + // try to read it + car, err := carv2.NewBlockReader(bytes.NewReader(buf.Bytes())) + require.NoError(t, err) + // error should occur on first section read + _, err = car.Next() + require.EqualError(t, err, "invalid section data, length of read beyond allowable maximum") + + // successful read by expanding the max section size + car, err = carv2.NewBlockReader(bytes.NewReader(buf.Bytes()), carv2.MaxAllowedSectionSize((8<<20)+40)) + require.NoError(t, err) + // can now read block and get our 8 MiB zeroed byte array + readBlock, err := car.Next() + require.NoError(t, err) + require.True(t, bytes.Equal(block, readBlock.RawData())) +} + +func TestTrustedCAR(t *testing.T) { + // headerHex is the zero-roots CARv1 header + const headerHex = "11a265726f6f7473806776657273696f6e01" + headerBytes, _ := hex.DecodeString(headerHex) + // block of zeros + block := make([]byte, 5) + // CID for that block + pfx := cid.NewPrefixV1(cid.Raw, mh.SHA2_256) + cid, err := pfx.Sum(block) + require.NoError(t, err) + + // Modify the block so it won't match CID anymore + block[2] = 0xFF + // construct CAR + var buf bytes.Buffer + buf.Write(headerBytes) + buf.Write(varint.ToUvarint(uint64(len(cid.Bytes()) + len(block)))) + buf.Write(cid.Bytes()) + buf.Write(block) + + // try to read it as trusted + car, err := carv2.NewBlockReader(bytes.NewReader(buf.Bytes()), carv2.WithTrustedCAR(true)) + require.NoError(t, err) + _, err = car.Next() + require.NoError(t, err) + + // Try to read it as untrusted - should fail + car, err = carv2.NewBlockReader(bytes.NewReader(buf.Bytes()), carv2.WithTrustedCAR(false)) + require.NoError(t, err) + // error should occur on first section read + _, err = car.Next() + require.EqualError(t, err, "mismatch in content integrity, expected: bafkreieikviivlpbn3cxhuq6njef37ikoysaqxa2cs26zxleqxpay2bzuq, got: bafkreidgklrppelx4fxcsna7cxvo3g7ayedfojkqeuus6kz6e4hy7gukmy") +} + +func TestMaxHeaderLength(t *testing.T) { + // headerHex is the is a 5 root CARv1 header + const headerHex = "de01a265726f6f747385d82a58250001711220785197229dc8bb1152945da58e2348f7e279eeded06cc2ca736d0e879858b501d82a58250001711220785197229dc8bb1152945da58e2348f7e279eeded06cc2ca736d0e879858b501d82a58250001711220785197229dc8bb1152945da58e2348f7e279eeded06cc2ca736d0e879858b501d82a58250001711220785197229dc8bb1152945da58e2348f7e279eeded06cc2ca736d0e879858b501d82a58250001711220785197229dc8bb1152945da58e2348f7e279eeded06cc2ca736d0e879858b5016776657273696f6e01" + headerBytes, _ := hex.DecodeString(headerHex) + c, _ := cid.Decode("bafyreidykglsfhoixmivffc5uwhcgshx4j465xwqntbmu43nb2dzqwfvae") + + // successful read + car, err := carv2.NewBlockReader(bytes.NewReader(headerBytes)) + require.NoError(t, err) + require.ElementsMatch(t, []cid.Cid{c, c, c, c, c}, car.Roots) + + // unsuccessful read, low allowable max header length (length - 3 because there are 2 bytes in the length varint prefix) + _, err = carv2.NewBlockReader(bytes.NewReader(headerBytes), carv2.MaxAllowedHeaderSize(uint64(len(headerBytes)-3))) + require.EqualError(t, err, "invalid header data, length of read beyond allowable maximum") +} + +func requireReaderFromPath(t *testing.T, path string) io.Reader { + f, err := os.Open(path) + require.NoError(t, err) + t.Cleanup(func() { require.NoError(t, f.Close()) }) + return f +} diff --git a/ipld/car/v2/blockstore/bench_test.go b/ipld/car/v2/blockstore/bench_test.go new file mode 100644 index 0000000000..8c83b4f19e --- /dev/null +++ b/ipld/car/v2/blockstore/bench_test.go @@ -0,0 +1,80 @@ +package blockstore_test + +import ( + "context" + "io" + mathrand "math/rand" + "os" + "testing" + + carv2 "github.com/ipfs/boxo/ipld/car/v2" + "github.com/ipfs/boxo/ipld/car/v2/blockstore" + "github.com/ipfs/go-cid" +) + +// BenchmarkOpenReadOnlyV1 opens a read-only blockstore, +// and retrieves all blocks in a shuffled order. +// Note that this benchmark includes generating an index, +// since the input file is a CARv1. +func BenchmarkOpenReadOnlyV1(b *testing.B) { + path := "../testdata/sample-v1.car" + f, err := os.Open("../testdata/sample-v1.car") + if err != nil { + b.Fatal(err) + } + defer func() { + if err := f.Close(); err != nil { + b.Fatal(err) + } + }() + info, err := os.Stat(path) + if err != nil { + b.Fatal(err) + } + b.SetBytes(info.Size()) + b.ReportAllocs() + + var shuffledCIDs []cid.Cid + br, err := carv2.NewBlockReader(f) + if err != nil { + b.Fatal(err) + } + for { + block, err := br.Next() + if err == io.EOF { + break + } + if err != nil { + b.Fatal(err) + } + shuffledCIDs = append(shuffledCIDs, block.Cid()) + } + + // The shuffling needs to be deterministic, + // for the sake of stable benchmark results. + // Any source number works as long as it's fixed. + rnd := mathrand.New(mathrand.NewSource(123456)) + rnd.Shuffle(len(shuffledCIDs), func(i, j int) { + shuffledCIDs[i], shuffledCIDs[j] = shuffledCIDs[j], shuffledCIDs[i] + }) + + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + bs, err := blockstore.OpenReadOnly(path) + if err != nil { + b.Fatal(err) + } + + for _, c := range shuffledCIDs { + _, err := bs.Get(context.TODO(), c) + if err != nil { + b.Fatal(err) + } + } + + if err := bs.Close(); err != nil { + b.Fatal(err) + } + } + }) +} diff --git a/ipld/car/v2/blockstore/doc.go b/ipld/car/v2/blockstore/doc.go new file mode 100644 index 0000000000..4aa4cfdfc9 --- /dev/null +++ b/ipld/car/v2/blockstore/doc.go @@ -0,0 +1,28 @@ +// Package blockstore implements the IPFS blockstore interface backed by a CAR file. +// This package provides two flavours of blockstore: ReadOnly and ReadWrite. +// +// The ReadOnly blockstore provides a read-only random access from a given data payload either in +// unindexed CARv1 format or indexed/unindexed v2 format: +// - ReadOnly.NewReadOnly can be used to instantiate a new read-only blockstore for a given CARv1 +// or CARv2 data payload with an optional index override. +// - ReadOnly.OpenReadOnly can be used to instantiate a new read-only blockstore for a given CARv1 +// or CARv2 file with automatic index generation if the index is not present. +// +// The ReadWrite blockstore allows writing and reading of the blocks concurrently. The user of this +// blockstore is responsible for calling ReadWrite.Finalize when finished writing blocks. +// Upon finalization, the instance can no longer be used for reading or writing blocks and will +// error if used. To continue reading the blocks users are encouraged to use ReadOnly blockstore +// instantiated from the same file path using OpenReadOnly. +// A user may resume reading/writing from files produced by an instance of ReadWrite blockstore. The +// resumption is attempted automatically, if the path passed to OpenReadWrite exists. +// +// Note that the blockstore implementations in this package behave similarly to IPFS IdStore wrapper +// when given CIDs with multihash.IDENTITY code. +// More specifically, for CIDs with multhash.IDENTITY code: +// * blockstore.Has will always return true. +// * blockstore.Get will always succeed, returning the multihash digest of the given CID. +// * blockstore.GetSize will always succeed, returning the multihash digest length of the given CID. +// * blockstore.Put and blockstore.PutMany will always succeed without performing any operation unless car.StoreIdentityCIDs is enabled. +// +// See: https://pkg.go.dev/github.com/ipfs/go-ipfs-blockstore#NewIdStore +package blockstore diff --git a/ipld/car/v2/blockstore/example_test.go b/ipld/car/v2/blockstore/example_test.go new file mode 100644 index 0000000000..46f1781265 --- /dev/null +++ b/ipld/car/v2/blockstore/example_test.go @@ -0,0 +1,159 @@ +package blockstore_test + +import ( + "context" + "fmt" + "os" + "path/filepath" + "time" + + blocks "github.com/ipfs/boxo/blocks" + carv2 "github.com/ipfs/boxo/ipld/car/v2" + "github.com/ipfs/boxo/ipld/car/v2/blockstore" + "github.com/ipfs/boxo/ipld/merkledag" + "github.com/ipfs/go-cid" +) + +const cidPrintCount = 5 + +// ExampleOpenReadOnly opens a read-only blockstore from a CARv1 file, and prints its root CIDs +// along with CID mapping to raw data size of blocks for first five sections in the CAR file. +func ExampleOpenReadOnly() { + // Open a new ReadOnly blockstore from a CARv1 file. + // Note, `OpenReadOnly` accepts bot CARv1 and CARv2 formats and transparently generate index + // in the background if necessary. + // This instance sets ZeroLengthSectionAsEOF option to treat zero sized sections in file as EOF. + robs, err := blockstore.OpenReadOnly("../testdata/sample-v1.car", + blockstore.UseWholeCIDs(true), + carv2.ZeroLengthSectionAsEOF(true), + ) + if err != nil { + panic(err) + } + defer robs.Close() + + // Print root CIDs. + roots, err := robs.Roots() + if err != nil { + panic(err) + } + fmt.Printf("Contains %v root CID(s):\n", len(roots)) + for _, r := range roots { + fmt.Printf("\t%v\n", r) + } + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + // Print the raw data size for the first 5 CIDs in the CAR file. + keysChan, err := robs.AllKeysChan(ctx) + if err != nil { + panic(err) + } + fmt.Printf("List of first %v CIDs and their raw data size:\n", cidPrintCount) + i := 1 + for k := range keysChan { + if i > cidPrintCount { + cancel() + break + } + size, err := robs.GetSize(context.TODO(), k) + if err != nil { + panic(err) + } + fmt.Printf("\t%v -> %v bytes\n", k, size) + i++ + } + + // Output: + // Contains 1 root CID(s): + // bafy2bzaced4ueelaegfs5fqu4tzsh6ywbbpfk3cxppupmxfdhbpbhzawfw5oy + // List of first 5 CIDs and their raw data size: + // bafy2bzaced4ueelaegfs5fqu4tzsh6ywbbpfk3cxppupmxfdhbpbhzawfw5oy -> 821 bytes + // bafy2bzaceaycv7jhaegckatnncu5yugzkrnzeqsppzegufr35lroxxnsnpspu -> 1053 bytes + // bafy2bzaceb62wdepofqu34afqhbcn4a7jziwblt2ih5hhqqm6zitd3qpzhdp4 -> 1094 bytes + // bafy2bzaceb3utcspm5jqcdqpih3ztbaztv7yunzkiyfq7up7xmokpxemwgu5u -> 1051 bytes + // bafy2bzacedjwekyjresrwjqj4n2r5bnuuu3klncgjo2r3slsp6wgqb37sz4ck -> 821 bytes +} + +// ExampleOpenReadWrite creates a read-write blockstore and puts +func ExampleOpenReadWrite() { + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + + thisBlock := merkledag.NewRawNode([]byte("fish")).Block + thatBlock := merkledag.NewRawNode([]byte("lobster")).Block + andTheOtherBlock := merkledag.NewRawNode([]byte("barreleye")).Block + + tdir, err := os.MkdirTemp(os.TempDir(), "example-*") + if err != nil { + panic(err) + } + dst := filepath.Join(tdir, "sample-rw-bs-v2.car") + roots := []cid.Cid{thisBlock.Cid(), thatBlock.Cid(), andTheOtherBlock.Cid()} + + rwbs, err := blockstore.OpenReadWrite(dst, roots, carv2.UseDataPadding(1413), carv2.UseIndexPadding(42)) + if err != nil { + panic(err) + } + + // Put all blocks onto the blockstore. + blocks := []blocks.Block{thisBlock, thatBlock} + if err := rwbs.PutMany(ctx, blocks); err != nil { + panic(err) + } + fmt.Printf("Successfully wrote %v blocks into the blockstore.\n", len(blocks)) + + // Any blocks put can be read back using the same blockstore instance. + block, err := rwbs.Get(ctx, thatBlock.Cid()) + if err != nil { + panic(err) + } + fmt.Printf("Read back block just put with raw value of `%v`.\n", string(block.RawData())) + + // Finalize the blockstore to flush out the index and make a complete CARv2. + if err := rwbs.Finalize(); err != nil { + panic(err) + } + + // Resume from the same file to add more blocks. + // Note the UseDataPadding and roots must match the values passed to the blockstore instance + // that created the original file. Otherwise, we cannot resume from the same file. + resumedRwbos, err := blockstore.OpenReadWrite(dst, roots, carv2.UseDataPadding(1413)) + if err != nil { + panic(err) + } + + // Put another block, appending it to the set of blocks that are written previously. + if err := resumedRwbos.Put(ctx, andTheOtherBlock); err != nil { + panic(err) + } + + // Read back the the block put before resumption. + // Blocks previously put are present. + block, err = resumedRwbos.Get(ctx, thatBlock.Cid()) + if err != nil { + panic(err) + } + fmt.Printf("Resumed blockstore contains blocks put previously with raw value of `%v`.\n", string(block.RawData())) + + // Put an additional block to the CAR. + // Blocks put after resumption are also present. + block, err = resumedRwbos.Get(ctx, andTheOtherBlock.Cid()) + if err != nil { + panic(err) + } + fmt.Printf("It also contains the block put after resumption with raw value of `%v`.\n", string(block.RawData())) + + // Finalize the blockstore to flush out the index and make a complete CARv2. + // Note, Finalize must be called on an open ReadWrite blockstore to flush out a complete CARv2. + if err := resumedRwbos.Finalize(); err != nil { + panic(err) + } + + // Output: + // Successfully wrote 2 blocks into the blockstore. + // Read back block just put with raw value of `lobster`. + // Resumed blockstore contains blocks put previously with raw value of `lobster`. + // It also contains the block put after resumption with raw value of `barreleye`. +} diff --git a/ipld/car/v2/blockstore/readonly.go b/ipld/car/v2/blockstore/readonly.go new file mode 100644 index 0000000000..7c6d7f2ea2 --- /dev/null +++ b/ipld/car/v2/blockstore/readonly.go @@ -0,0 +1,474 @@ +package blockstore + +import ( + "context" + "errors" + "fmt" + "io" + "sync" + + blocks "github.com/ipfs/boxo/blocks" + blockstore "github.com/ipfs/boxo/blockstore" + carv2 "github.com/ipfs/boxo/ipld/car/v2" + "github.com/ipfs/boxo/ipld/car/v2/index" + "github.com/ipfs/boxo/ipld/car/v2/internal/carv1" + internalio "github.com/ipfs/boxo/ipld/car/v2/internal/io" + "github.com/ipfs/boxo/ipld/car/v2/internal/store" + "github.com/ipfs/go-cid" + format "github.com/ipfs/go-ipld-format" + "github.com/multiformats/go-varint" + "golang.org/x/exp/mmap" +) + +var _ blockstore.Blockstore = (*ReadOnly)(nil) + +var ( + errZeroLengthSection = fmt.Errorf("zero-length carv2 section not allowed by default; see WithZeroLengthSectionAsEOF option") + errReadOnly = fmt.Errorf("called write method on a read-only carv2 blockstore") + errClosed = fmt.Errorf("cannot use a carv2 blockstore after closing") +) + +// ReadOnly provides a read-only CAR Block Store. +type ReadOnly struct { + // mu allows ReadWrite to be safe for concurrent use. + // It's in ReadOnly so that read operations also grab read locks, + // given that ReadWrite embeds ReadOnly for methods like Get and Has. + // + // The main fields guarded by the mutex are the index and the underlying writers. + // For simplicity, the entirety of the blockstore methods grab the mutex. + mu sync.RWMutex + + // When true, the blockstore has been closed via Close, Discard, or + // Finalize, and must not be used. Any further blockstore method calls + // will return errClosed to avoid panics or broken behavior. + closed bool + + // The backing containing the data payload in CARv1 format. + backing io.ReaderAt + + // The CARv1 content index. + idx index.Index + + // If we called carv2.NewReaderMmap, remember to close it too. + carv2Closer io.Closer + + opts carv2.Options +} + +type contextKey string + +const asyncErrHandlerKey contextKey = "asyncErrorHandlerKey" + +var UseWholeCIDs = carv2.UseWholeCIDs + +// NewReadOnly creates a new ReadOnly blockstore from the backing with a optional index as idx. +// This function accepts both CARv1 and CARv2 backing. +// The blockstore is instantiated with the given index if it is not nil. +// +// Otherwise: +// * For a CARv1 backing an index is generated. +// * For a CARv2 backing an index is only generated if Header.HasIndex returns false. +// +// There is no need to call ReadOnly.Close on instances returned by this function. +func NewReadOnly(backing io.ReaderAt, idx index.Index, opts ...carv2.Option) (*ReadOnly, error) { + b := &ReadOnly{ + opts: carv2.ApplyOptions(opts...), + } + + version, err := readVersion(backing, opts...) + if err != nil { + return nil, err + } + switch version { + case 1: + if idx == nil { + if idx, err = generateIndex(backing, opts...); err != nil { + return nil, err + } + } + b.backing = backing + b.idx = idx + return b, nil + case 2: + v2r, err := carv2.NewReader(backing, opts...) + if err != nil { + return nil, err + } + if idx == nil { + if v2r.Header.HasIndex() { + ir, err := v2r.IndexReader() + if err != nil { + return nil, err + } + idx, err = index.ReadFrom(ir) + if err != nil { + return nil, err + } + } else { + dr, err := v2r.DataReader() + if err != nil { + return nil, err + } + if idx, err = generateIndex(dr, opts...); err != nil { + return nil, err + } + } + } + b.backing, err = v2r.DataReader() + if err != nil { + return nil, err + } + b.idx = idx + return b, nil + default: + return nil, fmt.Errorf("unsupported car version: %v", version) + } +} + +func readVersion(at io.ReaderAt, opts ...carv2.Option) (uint64, error) { + var rr io.Reader + switch r := at.(type) { + case io.Reader: + rr = r + default: + var err error + rr, err = internalio.NewOffsetReadSeeker(r, 0) + if err != nil { + return 0, err + } + } + return carv2.ReadVersion(rr, opts...) +} + +func generateIndex(at io.ReaderAt, opts ...carv2.Option) (index.Index, error) { + var rs io.ReadSeeker + switch r := at.(type) { + case io.ReadSeeker: + rs = r + // The version may have been read from the given io.ReaderAt; therefore move back to the begining. + if _, err := rs.Seek(0, io.SeekStart); err != nil { + return nil, err + } + default: + var err error + rs, err = internalio.NewOffsetReadSeeker(r, 0) + if err != nil { + return nil, err + } + } + + // Note, we do not set any write options so that all write options fall back onto defaults. + return carv2.GenerateIndex(rs, opts...) +} + +// OpenReadOnly opens a read-only blockstore from a CAR file (either v1 or v2), generating an index if it does not exist. +// Note, the generated index if the index does not exist is ephemeral and only stored in memory. +// See car.GenerateIndex and Index.Attach for persisting index onto a CAR file. +func OpenReadOnly(path string, opts ...carv2.Option) (*ReadOnly, error) { + f, err := mmap.Open(path) + if err != nil { + return nil, err + } + + robs, err := NewReadOnly(f, nil, opts...) + if err != nil { + return nil, err + } + robs.carv2Closer = f + + return robs, nil +} + +// Index gives direct access to the index. +// You should never add records on your own there. +func (b *ReadOnly) Index() index.Index { + return b.idx +} + +// DeleteBlock is unsupported and always errors. +func (b *ReadOnly) DeleteBlock(_ context.Context, _ cid.Cid) error { + return errReadOnly +} + +// Has indicates if the store contains a block that corresponds to the given key. +// This function always returns true for any given key with multihash.IDENTITY +// code unless the StoreIdentityCIDs option is on, in which case it will defer +// to the index to check for the existence of the block; the index may or may +// not contain identity CIDs included in this CAR, depending on whether +// StoreIdentityCIDs was on when the index was created. If the CAR is a CARv1 +// and StoreIdentityCIDs is on, then the index will contain identity CIDs and +// this will always return true. +func (b *ReadOnly) Has(ctx context.Context, key cid.Cid) (bool, error) { + if !b.opts.StoreIdentityCIDs { + // If we don't store identity CIDs then we can return them straight away as if they are here, + // otherwise we need to check for their existence. + // Note, we do this without locking, since there is no shared information to lock for in order to perform the check. + if _, ok, err := store.IsIdentity(key); err != nil { + return false, err + } else if ok { + return true, nil + } + } + + b.mu.RLock() + defer b.mu.RUnlock() + + if b.closed { + return false, errClosed + } + + _, _, size, err := store.FindCid( + b.backing, + b.idx, + key, + b.opts.BlockstoreUseWholeCIDs, + b.opts.ZeroLengthSectionAsEOF, + b.opts.MaxAllowedSectionSize, + false, + ) + if errors.Is(err, index.ErrNotFound) { + return false, nil + } else if err != nil { + return false, err + } + return size > -1, nil +} + +// Get gets a block corresponding to the given key. +// This function always returns the block for any given key with +// multihash.IDENTITY code unless the StoreIdentityCIDs option is on, in which +// case it will defer to the index to check for the existence of the block; the +// index may or may not contain identity CIDs included in this CAR, depending on +// whether StoreIdentityCIDs was on when the index was created. If the CAR is a +// CARv1 and StoreIdentityCIDs is on, then the index will contain identity CIDs +// and this will always return true. +func (b *ReadOnly) Get(ctx context.Context, key cid.Cid) (blocks.Block, error) { + if !b.opts.StoreIdentityCIDs { + // If we don't store identity CIDs then we can return them straight away as if they are here, + // otherwise we need to check for their existence. + // Note, we do this without locking, since there is no shared information to lock for in order to perform the check. + if digest, ok, err := store.IsIdentity(key); err != nil { + return nil, err + } else if ok { + return blocks.NewBlockWithCid(digest, key) + } + } + + b.mu.RLock() + defer b.mu.RUnlock() + + if b.closed { + return nil, errClosed + } + + data, _, _, err := store.FindCid( + b.backing, + b.idx, + key, + b.opts.BlockstoreUseWholeCIDs, + b.opts.ZeroLengthSectionAsEOF, + b.opts.MaxAllowedSectionSize, + true, + ) + if errors.Is(err, index.ErrNotFound) { + return nil, format.ErrNotFound{Cid: key} + } else if err != nil { + return nil, err + } + return blocks.NewBlockWithCid(data, key) +} + +// GetSize gets the size of an item corresponding to the given key. +func (b *ReadOnly) GetSize(ctx context.Context, key cid.Cid) (int, error) { + // Check if the given CID has multihash.IDENTITY code + // Note, we do this without locking, since there is no shared information to lock for in order to perform the check. + if digest, ok, err := store.IsIdentity(key); err != nil { + return 0, err + } else if ok { + return len(digest), nil + } + + b.mu.RLock() + defer b.mu.RUnlock() + + if b.closed { + return 0, errClosed + } + + _, _, size, err := store.FindCid( + b.backing, + b.idx, + key, + b.opts.BlockstoreUseWholeCIDs, + b.opts.ZeroLengthSectionAsEOF, + b.opts.MaxAllowedSectionSize, + false, + ) + if errors.Is(err, index.ErrNotFound) { + return -1, format.ErrNotFound{Cid: key} + } else if err != nil { + return -1, err + } + return size, nil +} + +// Put is not supported and always returns an error. +func (b *ReadOnly) Put(context.Context, blocks.Block) error { + return errReadOnly +} + +// PutMany is not supported and always returns an error. +func (b *ReadOnly) PutMany(context.Context, []blocks.Block) error { + return errReadOnly +} + +// WithAsyncErrorHandler returns a context with async error handling set to the given errHandler. +// Any errors that occur during asynchronous operations of AllKeysChan will be passed to the given +// handler. +func WithAsyncErrorHandler(ctx context.Context, errHandler func(error)) context.Context { + return context.WithValue(ctx, asyncErrHandlerKey, errHandler) +} + +// AllKeysChan returns the list of keys in the CAR data payload. +// If the ctx is constructed using WithAsyncErrorHandler any errors that occur during asynchronous +// retrieval of CIDs will be passed to the error handler function set in context. +// Otherwise, errors will terminate the asynchronous operation silently. +// +// See WithAsyncErrorHandler +func (b *ReadOnly) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) { + // We release the lock when the channel-sending goroutine stops. + // Note that we can't use a deferred unlock here, + // because if we return a nil error, + // we only want to unlock once the async goroutine has stopped. + b.mu.RLock() + + if b.closed { + b.mu.RUnlock() // don't hold the mutex forever + return nil, errClosed + } + + // TODO we may use this walk for populating the index, and we need to be able to iterate keys in this way somewhere for index generation. In general though, when it's asked for all keys from a blockstore with an index, we should iterate through the index when possible rather than linear reads through the full car. + rdr, err := internalio.NewOffsetReadSeeker(b.backing, 0) + if err != nil { + return nil, err + } + header, err := carv1.ReadHeader(rdr, b.opts.MaxAllowedHeaderSize) + if err != nil { + b.mu.RUnlock() // don't hold the mutex forever + return nil, fmt.Errorf("error reading car header: %w", err) + } + headerSize, err := carv1.HeaderSize(header) + if err != nil { + b.mu.RUnlock() // don't hold the mutex forever + return nil, err + } + + // TODO: document this choice of 5, or use simpler buffering like 0 or 1. + ch := make(chan cid.Cid, 5) + + // Seek to the end of header. + if _, err = rdr.Seek(int64(headerSize), io.SeekStart); err != nil { + b.mu.RUnlock() // don't hold the mutex forever + return nil, err + } + + go func() { + defer b.mu.RUnlock() + defer close(ch) + + for { + length, err := varint.ReadUvarint(rdr) + if err != nil { + if err != io.EOF { + maybeReportError(ctx, err) + } + return + } + + // Null padding; by default it's an error. + if length == 0 { + if b.opts.ZeroLengthSectionAsEOF { + break + } else { + maybeReportError(ctx, errZeroLengthSection) + return + } + } + + thisItemForNxt, err := rdr.Seek(0, io.SeekCurrent) + if err != nil { + maybeReportError(ctx, err) + return + } + _, c, err := cid.CidFromReader(rdr) + if err != nil { + maybeReportError(ctx, err) + return + } + if _, err := rdr.Seek(thisItemForNxt+int64(length), io.SeekStart); err != nil { + maybeReportError(ctx, err) + return + } + + // If we're just using multihashes, flatten to the "raw" codec. + if !b.opts.BlockstoreUseWholeCIDs { + c = cid.NewCidV1(cid.Raw, c.Hash()) + } + + select { + case ch <- c: + case <-ctx.Done(): + maybeReportError(ctx, ctx.Err()) + return + } + } + }() + return ch, nil +} + +// maybeReportError checks if an error handler is present in context associated to the key +// asyncErrHandlerKey, and if preset it will pass the error to it. +func maybeReportError(ctx context.Context, err error) { + value := ctx.Value(asyncErrHandlerKey) + if eh, _ := value.(func(error)); eh != nil { + eh(err) + } +} + +// HashOnRead is currently unimplemented; hashing on reads never happens. +func (b *ReadOnly) HashOnRead(bool) { + // TODO: implement before the final release? +} + +// Roots returns the root CIDs of the backing CAR. +func (b *ReadOnly) Roots() ([]cid.Cid, error) { + ors, err := internalio.NewOffsetReadSeeker(b.backing, 0) + if err != nil { + return nil, err + } + header, err := carv1.ReadHeader(ors, b.opts.MaxAllowedHeaderSize) + if err != nil { + return nil, fmt.Errorf("error reading car header: %w", err) + } + return header.Roots, nil +} + +// Close closes the underlying reader if it was opened by OpenReadOnly. +// After this call, the blockstore can no longer be used. +// +// Note that this call may block if any blockstore operations are currently in +// progress, including an AllKeysChan that hasn't been fully consumed or cancelled. +func (b *ReadOnly) Close() error { + b.mu.Lock() + defer b.mu.Unlock() + + return b.closeWithoutMutex() +} + +func (b *ReadOnly) closeWithoutMutex() error { + b.closed = true + if b.carv2Closer != nil { + return b.carv2Closer.Close() + } + return nil +} diff --git a/ipld/car/v2/blockstore/readonly_test.go b/ipld/car/v2/blockstore/readonly_test.go new file mode 100644 index 0000000000..5d018b0318 --- /dev/null +++ b/ipld/car/v2/blockstore/readonly_test.go @@ -0,0 +1,396 @@ +package blockstore + +import ( + "bytes" + "context" + "io" + "os" + "testing" + "time" + + blocks "github.com/ipfs/boxo/blocks" + "github.com/ipfs/boxo/ipld/merkledag" + "github.com/ipfs/go-cid" + format "github.com/ipfs/go-ipld-format" + "github.com/multiformats/go-multicodec" + "github.com/multiformats/go-multihash" + "github.com/stretchr/testify/require" + + carv2 "github.com/ipfs/boxo/ipld/car/v2" + "github.com/ipfs/boxo/ipld/car/v2/index" + "github.com/ipfs/boxo/ipld/car/v2/internal/carv1" + "github.com/ipfs/boxo/ipld/car/v2/internal/store" +) + +func TestReadOnlyGetReturnsBlockstoreNotFoundWhenCidDoesNotExist(t *testing.T) { + subject, err := OpenReadOnly("../testdata/sample-v1.car") + require.NoError(t, err) + nonExistingKey := merkledag.NewRawNode([]byte("lobstermuncher")).Block.Cid() + + // Assert blockstore API returns blockstore.ErrNotFound + gotBlock, err := subject.Get(context.TODO(), nonExistingKey) + require.IsType(t, format.ErrNotFound{}, err) + require.Nil(t, gotBlock) +} + +func TestReadOnly(t *testing.T) { + tests := []struct { + name string + v1OrV2path string + opts []carv2.Option + noIdCids bool + }{ + { + "OpenedWithCarV1", + "../testdata/sample-v1.car", + []carv2.Option{UseWholeCIDs(true), carv2.StoreIdentityCIDs(true)}, + // index is made, but identity CIDs are included so they'll be found + false, + }, + { + "OpenedWithCarV1_NoIdentityCID", + "../testdata/sample-v1.car", + []carv2.Option{UseWholeCIDs(true)}, + // index is made, identity CIDs are not included, but we always short-circuit when StoreIdentityCIDs(false) + false, + }, + { + "OpenedWithCarV2", + "../testdata/sample-wrapped-v2.car", + []carv2.Option{UseWholeCIDs(true), carv2.StoreIdentityCIDs(true)}, + // index already exists, but was made without identity CIDs, but opening with StoreIdentityCIDs(true) means we check the index + true, + }, + { + "OpenedWithCarV2_NoIdentityCID", + "../testdata/sample-wrapped-v2.car", + []carv2.Option{UseWholeCIDs(true)}, + // index already exists, it was made without identity CIDs, but we always short-circuit when StoreIdentityCIDs(false) + false, + }, + { + "OpenedWithCarV1ZeroLenSection", + "../testdata/sample-v1-with-zero-len-section.car", + []carv2.Option{UseWholeCIDs(true), carv2.ZeroLengthSectionAsEOF(true)}, + false, + }, + { + "OpenedWithAnotherCarV1ZeroLenSection", + "../testdata/sample-v1-with-zero-len-section2.car", + []carv2.Option{UseWholeCIDs(true), carv2.ZeroLengthSectionAsEOF(true)}, + false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ctx := context.TODO() + + subject, err := OpenReadOnly(tt.v1OrV2path, tt.opts...) + require.NoError(t, err) + t.Cleanup(func() { require.NoError(t, subject.Close()) }) + + f, err := os.Open(tt.v1OrV2path) + require.NoError(t, err) + t.Cleanup(func() { require.NoError(t, f.Close()) }) + + reader, err := carv2.NewBlockReader(f, tt.opts...) + require.NoError(t, err) + + // Assert roots match v1 payload. + wantRoots := reader.Roots + gotRoots, err := subject.Roots() + require.NoError(t, err) + require.Equal(t, wantRoots, gotRoots) + + var wantCids []cid.Cid + for { + wantBlock, err := reader.Next() + if err == io.EOF { + break + } + require.NoError(t, err) + + key := wantBlock.Cid() + wantCids = append(wantCids, key) + + // Assert blockstore contains key. + has, err := subject.Has(ctx, key) + require.NoError(t, err) + if key.Prefix().MhType == uint64(multicodec.Identity) && tt.noIdCids { + // fixture wasn't made with StoreIdentityCIDs, but we opened it with StoreIdentityCIDs, + // so they aren't there to find + require.False(t, has) + } else { + require.True(t, has) + } + + // Assert size matches block raw data length. + gotSize, err := subject.GetSize(ctx, key) + wantSize := len(wantBlock.RawData()) + require.NoError(t, err) + require.Equal(t, wantSize, gotSize) + + // Assert block itself matches v1 payload block. + if has { + gotBlock, err := subject.Get(ctx, key) + require.NoError(t, err) + require.Equal(t, wantBlock, gotBlock) + } + + // Assert write operations error + require.Error(t, subject.Put(ctx, wantBlock)) + require.Error(t, subject.PutMany(ctx, []blocks.Block{wantBlock})) + require.Error(t, subject.DeleteBlock(ctx, key)) + } + + ctx, cancel := context.WithTimeout(context.Background(), time.Second*2) + defer cancel() + + // Assert all cids in blockstore match v1 payload CIDs. + allKeysChan, err := subject.AllKeysChan(ctx) + require.NoError(t, err) + var gotCids []cid.Cid + for gotKey := range allKeysChan { + gotCids = append(gotCids, gotKey) + } + require.Equal(t, wantCids, gotCids) + }) + } +} + +func TestNewReadOnlyFailsOnUnknownVersion(t *testing.T) { + f, err := os.Open("../testdata/sample-rootless-v42.car") + require.NoError(t, err) + t.Cleanup(func() { f.Close() }) + subject, err := NewReadOnly(f, nil) + require.Errorf(t, err, "unsupported car version: 42") + require.Nil(t, subject) +} + +func TestReadOnlyAllKeysChanErrHandlerCalledOnTimeout(t *testing.T) { + expiredCtx, cancel := context.WithTimeout(context.Background(), -time.Millisecond) + t.Cleanup(cancel) + + subject, err := OpenReadOnly("../testdata/sample-v1.car") + require.NoError(t, err) + t.Cleanup(func() { require.NoError(t, subject.Close()) }) + + // Make a channel to be able to select and block on until error handler is called. + errHandlerCalled := make(chan interface{}) + expiredErrHandlingCtx := WithAsyncErrorHandler(expiredCtx, func(err error) { + defer close(errHandlerCalled) + require.EqualError(t, err, "context deadline exceeded") + }) + _, err = subject.AllKeysChan(expiredErrHandlingCtx) + require.NoError(t, err) + + // Assert error handler was called with required condition, waiting at most 3 seconds. + select { + case <-errHandlerCalled: + break + case <-time.After(time.Second * 3): + require.Fail(t, "error handler was not called within expected time window") + } +} + +func TestReadOnlyAllKeysChanErrHandlerNeverCalled(t *testing.T) { + tests := []struct { + name string + path string + errHandler func(err error) + wantCIDs []cid.Cid + }{ + { + "ReadingValidCarV1ReturnsNoErrors", + "../testdata/sample-v1.car", + func(err error) { + require.Fail(t, "unexpected call", "error handler called unexpectedly with err: %v", err) + }, + listCids(t, newV1ReaderFromV1File(t, "../testdata/sample-v1.car", false)), + }, + { + "ReadingValidCarV2ReturnsNoErrors", + "../testdata/sample-wrapped-v2.car", + func(err error) { + require.Fail(t, "unexpected call", "error handler called unexpectedly with err: %v", err) + }, + listCids(t, newV1ReaderFromV2File(t, "../testdata/sample-wrapped-v2.car", false)), + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + subject, err := OpenReadOnly(tt.path, UseWholeCIDs(true)) + require.NoError(t, err) + ctx := WithAsyncErrorHandler(context.Background(), tt.errHandler) + keysChan, err := subject.AllKeysChan(ctx) + require.NoError(t, err) + var gotCids []cid.Cid + for k := range keysChan { + gotCids = append(gotCids, k) + } + require.Equal(t, tt.wantCIDs, gotCids) + }) + } +} + +func listCids(t *testing.T, v1r *carv1.CarReader) (cids []cid.Cid) { + for { + block, err := v1r.Next() + if err == io.EOF { + break + } + require.NoError(t, err) + cids = append(cids, block.Cid()) + } + return +} + +func newV1ReaderFromV1File(t *testing.T, carv1Path string, zeroLenSectionAsEOF bool) *carv1.CarReader { + f, err := os.Open(carv1Path) + require.NoError(t, err) + t.Cleanup(func() { f.Close() }) + v1r, err := newV1Reader(f, zeroLenSectionAsEOF) + require.NoError(t, err) + return v1r +} + +func newV1ReaderFromV2File(t *testing.T, carv2Path string, zeroLenSectionAsEOF bool) *carv1.CarReader { + f, err := os.Open(carv2Path) + require.NoError(t, err) + t.Cleanup(func() { f.Close() }) + v2r, err := carv2.NewReader(f) + require.NoError(t, err) + dr, err := v2r.DataReader() + require.NoError(t, err) + v1r, err := newV1Reader(dr, zeroLenSectionAsEOF) + require.NoError(t, err) + return v1r +} + +func newV1Reader(r io.Reader, zeroLenSectionAsEOF bool) (*carv1.CarReader, error) { + if zeroLenSectionAsEOF { + return carv1.NewCarReaderWithZeroLengthSectionAsEOF(r) + } + return carv1.NewCarReader(r) +} + +func TestReadOnlyErrorAfterClose(t *testing.T) { + bs, err := OpenReadOnly("../testdata/sample-v1.car") + ctx := context.TODO() + require.NoError(t, err) + + roots, err := bs.Roots() + require.NoError(t, err) + _, err = bs.Has(ctx, roots[0]) + require.NoError(t, err) + _, err = bs.Get(ctx, roots[0]) + require.NoError(t, err) + _, err = bs.GetSize(ctx, roots[0]) + require.NoError(t, err) + + ctx, cancel := context.WithCancel(context.Background()) + _, err = bs.AllKeysChan(ctx) + require.NoError(t, err) + cancel() // to stop the AllKeysChan goroutine + + bs.Close() + + _, err = bs.Roots() + require.Error(t, err) + _, err = bs.Has(ctx, roots[0]) + require.Error(t, err) + _, err = bs.Get(ctx, roots[0]) + require.Error(t, err) + _, err = bs.GetSize(ctx, roots[0]) + require.Error(t, err) + _, err = bs.AllKeysChan(ctx) + require.Error(t, err) + + // TODO: test that closing blocks if an AllKeysChan operation is + // in progress. +} + +func TestNewReadOnly_CarV1WithoutIndexWorksAsExpected(t *testing.T) { + carV1Bytes, err := os.ReadFile("../testdata/sample-v1.car") + require.NoError(t, err) + + reader := bytes.NewReader(carV1Bytes) + v1r, err := carv1.NewCarReader(reader) + require.NoError(t, err) + require.Equal(t, uint64(1), v1r.Header.Version) + + // Pick the first block in CARv1 as candidate to check `Get` works. + wantBlock, err := v1r.Next() + require.NoError(t, err) + + // Seek back to the begining of the CARv1 payload. + _, err = reader.Seek(0, io.SeekStart) + require.NoError(t, err) + + subject, err := NewReadOnly(reader, nil, UseWholeCIDs(true)) + require.NoError(t, err) + + // Require that the block is found via ReadOnly API and contetns are as expected. + gotBlock, err := subject.Get(context.TODO(), wantBlock.Cid()) + require.NoError(t, err) + require.Equal(t, wantBlock, gotBlock) +} + +func TestReadOnlyIndex(t *testing.T) { + tests := []struct { + name string + path string + wantCIDs []cid.Cid + }{ + { + "IndexCarV1", + "../testdata/sample-v1.car", + listCids(t, newV1ReaderFromV1File(t, "../testdata/sample-v1.car", false)), + }, + { + "IndexCarV2", + "../testdata/sample-wrapped-v2.car", + listCids(t, newV1ReaderFromV2File(t, "../testdata/sample-wrapped-v2.car", false)), + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + subject, err := OpenReadOnly(tt.path, UseWholeCIDs(true)) + require.NoError(t, err) + + idx := subject.Index() + + for _, c := range tt.wantCIDs { + _, isIdentity, err := store.IsIdentity(c) + require.NoError(t, err) + if isIdentity { + // the index doesn't hold identity CIDs + continue + } + _, err = index.GetFirst(idx, c) + require.NoError(t, err) + } + + if idx, ok := idx.(index.IterableIndex); ok { + expected := make([]multihash.Multihash, 0, len(tt.wantCIDs)) + for _, c := range tt.wantCIDs { + _, isIdentity, err := store.IsIdentity(c) + require.NoError(t, err) + if isIdentity { + // the index doesn't hold identity CIDs + continue + } + expected = append(expected, c.Hash()) + } + + var got []multihash.Multihash + err = idx.ForEach(func(m multihash.Multihash, u uint64) error { + got = append(got, m) + return nil + }) + require.NoError(t, err) + require.ElementsMatch(t, expected, got) + } + }) + } +} diff --git a/ipld/car/v2/blockstore/readwrite.go b/ipld/car/v2/blockstore/readwrite.go new file mode 100644 index 0000000000..918921ec93 --- /dev/null +++ b/ipld/car/v2/blockstore/readwrite.go @@ -0,0 +1,389 @@ +package blockstore + +import ( + "context" + "fmt" + "os" + + blocks "github.com/ipfs/boxo/blocks" + blockstore "github.com/ipfs/boxo/blockstore" + "github.com/ipfs/go-cid" + + carv2 "github.com/ipfs/boxo/ipld/car/v2" + "github.com/ipfs/boxo/ipld/car/v2/index" + "github.com/ipfs/boxo/ipld/car/v2/internal/carv1" + "github.com/ipfs/boxo/ipld/car/v2/internal/carv1/util" + internalio "github.com/ipfs/boxo/ipld/car/v2/internal/io" + "github.com/ipfs/boxo/ipld/car/v2/internal/store" +) + +var _ blockstore.Blockstore = (*ReadWrite)(nil) + +var ( + errFinalized = fmt.Errorf("cannot write in a carv2 blockstore after finalize") +) + +// ReadWrite implements a blockstore that stores blocks in CARv2 format. +// Blocks put into the blockstore can be read back once they are successfully written. +// This implementation is preferable for a write-heavy workload. +// The blocks are written immediately on Put and PutAll calls, while the index is stored in memory +// and updated incrementally. +// +// The Finalize function must be called once the putting blocks are finished. +// Upon calling Finalize header is finalized and index is written out. +// Once finalized, all read and write calls to this blockstore will result in errors. +type ReadWrite struct { + ronly ReadOnly + + f *os.File + dataWriter *internalio.OffsetWriteSeeker + idx *store.InsertionIndex + header carv2.Header + + finalized bool // also protected by ronly.mu + + opts carv2.Options +} + +var WriteAsCarV1 = carv2.WriteAsCarV1 +var AllowDuplicatePuts = carv2.AllowDuplicatePuts + +// OpenReadWrite creates a new ReadWrite at the given path with a provided set of root CIDs and options. +// +// ReadWrite.Finalize must be called once putting and reading blocks are no longer needed. +// Upon calling ReadWrite.Finalize the CARv2 header and index are written out onto the file and the +// backing file is closed. Once finalized, all read and write calls to this blockstore will result +// in errors. Note, ReadWrite.Finalize must be called on an open instance regardless of whether any +// blocks were put or not. +// +// If a file at given path does not exist, the instantiation will write car.Pragma and data payload +// header (i.e. the inner CARv1 header) onto the file before returning. +// +// When the given path already exists, the blockstore will attempt to resume from it. +// On resumption the existing data sections in file are re-indexed, allowing the caller to continue +// putting any remaining blocks without having to re-ingest blocks for which previous ReadWrite.Put +// returned successfully. +// +// Resumption only works on files that were created by a previous instance of a ReadWrite +// blockstore. This means a file created as a result of a successful call to OpenReadWrite can be +// resumed from as long as write operations such as ReadWrite.Put, ReadWrite.PutMany returned +// successfully. On resumption the roots argument and WithDataPadding option must match the +// previous instantiation of ReadWrite blockstore that created the file. More explicitly, the file +// resuming from must: +// 1. start with a complete CARv2 car.Pragma. +// 2. contain a complete CARv1 data header with root CIDs matching the CIDs passed to the +// constructor, starting at offset optionally padded by WithDataPadding, followed by zero or +// more complete data sections. If any corrupt data sections are present the resumption will fail. +// Note, if set previously, the blockstore must use the same WithDataPadding option as before, +// since this option is used to locate the CARv1 data payload. +// +// Note, resumption should be used with WithCidDeduplication, so that blocks that are successfully +// written into the file are not re-written. Unless, the user explicitly wants duplicate blocks. +// +// Resuming from finalized files is allowed. However, resumption will regenerate the index +// regardless by scanning every existing block in file. +func OpenReadWrite(path string, roots []cid.Cid, opts ...carv2.Option) (*ReadWrite, error) { + f, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE, 0o666) // TODO: Should the user be able to configure FileMode permissions? + if err != nil { + return nil, fmt.Errorf("could not open read/write file: %w", err) + } + // If construction of blockstore fails, make sure to close off the open file. + defer func() { + if err != nil { + f.Close() + } + }() + rwbs, err := OpenReadWriteFile(f, roots, opts...) + if err != nil { + return nil, err + } + // close the file when finalizing + rwbs.ronly.carv2Closer = rwbs.f + return rwbs, nil +} + +// OpenReadWriteFile is similar as OpenReadWrite but lets you control the file lifecycle. +// You are responsible for closing the given file. +func OpenReadWriteFile(f *os.File, roots []cid.Cid, opts ...carv2.Option) (*ReadWrite, error) { + stat, err := f.Stat() + if err != nil { + // Note, we should not get an os.ErrNotExist here because the flags used to open file includes os.O_CREATE + return nil, err + } + // Try and resume by default if the file size is non-zero. + resume := stat.Size() != 0 + + // Instantiate block store. + // Set the header fileld before applying options since padding options may modify header. + rwbs := &ReadWrite{ + f: f, + idx: store.NewInsertionIndex(), + header: carv2.NewHeader(0), + opts: carv2.ApplyOptions(opts...), + finalized: false, + } + rwbs.ronly.opts = rwbs.opts + + if p := rwbs.opts.DataPadding; p > 0 { + rwbs.header = rwbs.header.WithDataPadding(p) + } + if p := rwbs.opts.IndexPadding; p > 0 { + rwbs.header = rwbs.header.WithIndexPadding(p) + } + + offset := int64(rwbs.header.DataOffset) + if rwbs.opts.WriteAsCarV1 { + offset = 0 + } + rwbs.dataWriter = internalio.NewOffsetWriter(rwbs.f, offset) + var v1r internalio.ReadSeekerAt + v1r, err = internalio.NewOffsetReadSeeker(rwbs.f, offset) + if err != nil { + return nil, err + } + rwbs.ronly.backing = v1r + rwbs.ronly.idx = rwbs.idx + + if resume { + if err = store.ResumableVersion(f, rwbs.opts.WriteAsCarV1); err != nil { + return nil, err + } + if err = store.Resume( + f, + rwbs.ronly.backing, + rwbs.dataWriter, + rwbs.idx, + roots, + rwbs.header.DataOffset, + rwbs.opts.WriteAsCarV1, + rwbs.opts.MaxAllowedHeaderSize, + rwbs.opts.ZeroLengthSectionAsEOF, + ); err != nil { + return nil, err + } + } else { + if err = rwbs.initWithRoots(!rwbs.opts.WriteAsCarV1, roots); err != nil { + return nil, err + } + } + + return rwbs, nil +} + +func (b *ReadWrite) initWithRoots(v2 bool, roots []cid.Cid) error { + if v2 { + if _, err := b.f.WriteAt(carv2.Pragma, 0); err != nil { + return err + } + } + return carv1.WriteHeader(&carv1.CarHeader{Roots: roots, Version: 1}, b.dataWriter) +} + +// Index gives direct access to the index. +// You should never add records on your own there. +func (b *ReadWrite) Index() index.Index { + return b.idx +} + +// Put puts a given block to the underlying datastore +func (b *ReadWrite) Put(ctx context.Context, blk blocks.Block) error { + // PutMany already checks b.ronly.closed. + return b.PutMany(ctx, []blocks.Block{blk}) +} + +// PutMany puts a slice of blocks at the same time using batching +// capabilities of the underlying datastore whenever possible. +func (b *ReadWrite) PutMany(ctx context.Context, blks []blocks.Block) error { + b.ronly.mu.Lock() + defer b.ronly.mu.Unlock() + + if b.ronly.closed { + return errClosed + } + if b.finalized { + return errFinalized + } + + for _, bl := range blks { + c := bl.Cid() + + if should, err := store.ShouldPut( + b.idx, + c, + b.opts.MaxIndexCidSize, + b.opts.StoreIdentityCIDs, + b.opts.BlockstoreAllowDuplicatePuts, + b.opts.BlockstoreUseWholeCIDs, + ); err != nil { + return err + } else if !should { + continue + } + + n := uint64(b.dataWriter.Position()) + if err := util.LdWrite(b.dataWriter, c.Bytes(), bl.RawData()); err != nil { + return err + } + b.idx.InsertNoReplace(c, n) + } + return nil +} + +// Discard closes this blockstore without finalizing its header and index. +// After this call, the blockstore can no longer be used. +// +// Note that this call may block if any blockstore operations are currently in +// progress, including an AllKeysChan that hasn't been fully consumed or cancelled. +func (b *ReadWrite) Discard() { + // Same semantics as ReadOnly.Close, including allowing duplicate calls. + // The only difference is that our method is called Discard, + // to further clarify that we're not properly finalizing and writing a + // CARv2 file. + b.ronly.Close() +} + +// Finalize finalizes this blockstore by writing the CARv2 header, along with flattened index +// for more efficient subsequent read. +// This is the equivalent to calling FinalizeReadOnly and Close. +// After this call, the blockstore can no longer be used. +func (b *ReadWrite) Finalize() error { + b.ronly.mu.Lock() + defer b.ronly.mu.Unlock() + + for _, err := range []error{b.finalizeReadOnlyWithoutMutex(), b.closeWithoutMutex()} { + if err != nil { + return err + } + } + return nil +} + +// Finalize finalizes this blockstore by writing the CARv2 header, along with flattened index +// for more efficient subsequent read, but keep it open read-only. +// This call should be complemented later by a call to Close. +func (b *ReadWrite) FinalizeReadOnly() error { + b.ronly.mu.Lock() + defer b.ronly.mu.Unlock() + + return b.finalizeReadOnlyWithoutMutex() +} + +func (b *ReadWrite) finalizeReadOnlyWithoutMutex() error { + if b.opts.WriteAsCarV1 { + // all blocks are already properly written to the CARv1 inner container and there's + // no additional finalization required at the end of the file for a complete v1 + b.finalized = true + return nil + } + + if b.ronly.closed { + // Allow duplicate Finalize calls, just like Close. + // Still error, just like ReadOnly.Close; it should be discarded. + return fmt.Errorf("called Finalize or FinalizeReadOnly on a closed blockstore") + } + if b.finalized { + return fmt.Errorf("called Finalize or FinalizeReadOnly on an already finalized blockstore") + } + + b.finalized = true + + return store.Finalize(b.f, b.header, b.idx, uint64(b.dataWriter.Position()), b.opts.StoreIdentityCIDs, b.opts.IndexCodec) +} + +// Close closes the blockstore. +// After this call, the blockstore can no longer be used. +func (b *ReadWrite) Close() error { + b.ronly.mu.Lock() + defer b.ronly.mu.Unlock() + + return b.closeWithoutMutex() +} + +func (b *ReadWrite) closeWithoutMutex() error { + if !b.opts.WriteAsCarV1 && !b.finalized { + return fmt.Errorf("called Close without FinalizeReadOnly first") + } + if b.ronly.closed { + // Allow duplicate Close calls + // Still error, just like ReadOnly.Close; it should be discarded. + return fmt.Errorf("called Close on a closed blockstore") + } + + if err := b.ronly.closeWithoutMutex(); err != nil { + return err + } + return nil +} + +func (b *ReadWrite) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) { + if ctx.Err() != nil { + return nil, ctx.Err() + } + + b.ronly.mu.Lock() + defer b.ronly.mu.Unlock() + + if b.ronly.closed { + return nil, errClosed + } + + out := make(chan cid.Cid) + + go func() { + defer close(out) + err := b.idx.ForEachCid(func(c cid.Cid, _ uint64) error { + if !b.opts.BlockstoreUseWholeCIDs { + c = cid.NewCidV1(cid.Raw, c.Hash()) + } + select { + case out <- c: + case <-ctx.Done(): + return ctx.Err() + } + return nil + }) + if err != nil { + maybeReportError(ctx, err) + } + }() + + return out, nil +} + +func (b *ReadWrite) Has(ctx context.Context, key cid.Cid) (bool, error) { + b.ronly.mu.Lock() + defer b.ronly.mu.Unlock() + + if b.ronly.closed { + return false, errClosed + } + + return store.Has( + b.idx, + key, + b.opts.MaxIndexCidSize, + b.opts.StoreIdentityCIDs, + b.opts.BlockstoreAllowDuplicatePuts, + b.opts.BlockstoreUseWholeCIDs, + ) +} + +func (b *ReadWrite) Get(ctx context.Context, key cid.Cid) (blocks.Block, error) { + return b.ronly.Get(ctx, key) +} + +func (b *ReadWrite) GetSize(ctx context.Context, key cid.Cid) (int, error) { + return b.ronly.GetSize(ctx, key) +} + +func (b *ReadWrite) DeleteBlock(_ context.Context, _ cid.Cid) error { + return fmt.Errorf("ReadWrite blockstore does not support deleting blocks") +} + +func (b *ReadWrite) HashOnRead(enable bool) { + b.ronly.HashOnRead(enable) +} + +func (b *ReadWrite) Roots() ([]cid.Cid, error) { + return b.ronly.Roots() +} diff --git a/ipld/car/v2/blockstore/readwrite_test.go b/ipld/car/v2/blockstore/readwrite_test.go new file mode 100644 index 0000000000..d024c33afd --- /dev/null +++ b/ipld/car/v2/blockstore/readwrite_test.go @@ -0,0 +1,1177 @@ +package blockstore_test + +import ( + "context" + "crypto/sha512" + "fmt" + "io" + "math/rand" + "os" + "path" + "path/filepath" + "sync" + "testing" + "time" + + blocks "github.com/ipfs/boxo/blocks" + "github.com/ipfs/boxo/ipld/merkledag" + "github.com/ipfs/go-cid" + cbor "github.com/ipfs/go-ipld-cbor" + format "github.com/ipfs/go-ipld-format" + "github.com/multiformats/go-multicodec" + "github.com/multiformats/go-multihash" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + carv2 "github.com/ipfs/boxo/ipld/car/v2" + "github.com/ipfs/boxo/ipld/car/v2/blockstore" + "github.com/ipfs/boxo/ipld/car/v2/index" + "github.com/ipfs/boxo/ipld/car/v2/internal/carv1" +) + +var ( + rng = rand.New(rand.NewSource(1413)) + oneTestBlockWithCidV1 = merkledag.NewRawNode([]byte("fish")).Block + anotherTestBlockWithCidV0 = blocks.NewBlock([]byte("barreleye")) +) + +func TestReadWriteGetReturnsBlockstoreNotFoundWhenCidDoesNotExist(t *testing.T) { + path := filepath.Join(t.TempDir(), "readwrite-err-not-found.car") + subject, err := blockstore.OpenReadWrite(path, []cid.Cid{}) + t.Cleanup(func() { subject.Finalize() }) + require.NoError(t, err) + nonExistingKey := merkledag.NewRawNode([]byte("undadasea")).Block.Cid() + + // Assert blockstore API returns blockstore.ErrNotFound + gotBlock, err := subject.Get(context.TODO(), nonExistingKey) + require.IsType(t, format.ErrNotFound{}, err) + require.Nil(t, gotBlock) +} + +func TestBlockstore(t *testing.T) { + originalCARv1Path := "../testdata/sample-v1.car" + originalCARv1ComparePath := "../testdata/sample-v1-noidentity.car" + originalCARv1ComparePathStat, err := os.Stat(originalCARv1ComparePath) + require.NoError(t, err) + + variants := []struct { + name string + options []carv2.Option + expectedV1StartOffset int64 + }{ + // no options, expect a standard CARv2 with the noidentity inner CARv1 + {"noopt_carv2", []carv2.Option{}, int64(carv2.PragmaSize + carv2.HeaderSize)}, + // option to only write as a CARv1, expect the noidentity inner CARv1 + {"carv1", []carv2.Option{blockstore.WriteAsCarV1(true)}, int64(0)}, + } + + for _, variant := range variants { + t.Run(variant.name, func(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + + f, err := os.Open(originalCARv1Path) + require.NoError(t, err) + t.Cleanup(func() { assert.NoError(t, f.Close()) }) + r, err := carv1.NewCarReader(f) + require.NoError(t, err) + + path := filepath.Join(t.TempDir(), fmt.Sprintf("readwrite_%s.car", variant.name)) + ingester, err := blockstore.OpenReadWrite(path, r.Header.Roots, variant.options...) + require.NoError(t, err) + t.Cleanup(func() { ingester.Finalize() }) + + cids := make([]cid.Cid, 0) + var idCidCount int + for { + b, err := r.Next() + if err == io.EOF { + break + } + require.NoError(t, err) + + err = ingester.Put(ctx, b) + require.NoError(t, err) + cids = append(cids, b.Cid()) + + // try reading a random one: + candidate := cids[rng.Intn(len(cids))] + if has, err := ingester.Has(ctx, candidate); !has || err != nil { + t.Fatalf("expected to find %s but didn't: %s", candidate, err) + } + + dmh, err := multihash.Decode(b.Cid().Hash()) + require.NoError(t, err) + if dmh.Code == multihash.IDENTITY { + idCidCount++ + } + } + + for _, c := range cids { + b, err := ingester.Get(ctx, c) + require.NoError(t, err) + if !b.Cid().Equals(c) { + t.Fatal("wrong item returned") + } + } + + err = ingester.Finalize() + require.NoError(t, err) + robs, err := blockstore.OpenReadOnly(path) + require.NoError(t, err) + t.Cleanup(func() { assert.NoError(t, robs.Close()) }) + + allKeysCh, err := robs.AllKeysChan(ctx) + require.NoError(t, err) + numKeysCh := 0 + for c := range allKeysCh { + b, err := robs.Get(ctx, c) + require.NoError(t, err) + if !b.Cid().Equals(c) { + t.Fatal("wrong item returned") + } + numKeysCh++ + } + expectedCidCount := len(cids) - idCidCount + require.Equal(t, expectedCidCount, numKeysCh, "AllKeysChan returned an unexpected amount of keys; expected %v but got %v", expectedCidCount, numKeysCh) + + for _, c := range cids { + b, err := robs.Get(ctx, c) + require.NoError(t, err) + if !b.Cid().Equals(c) { + t.Fatal("wrong item returned") + } + } + + wrote, err := os.Open(path) + require.NoError(t, err) + t.Cleanup(func() { require.NoError(t, wrote.Close()) }) + _, err = wrote.Seek(variant.expectedV1StartOffset, io.SeekStart) + require.NoError(t, err) + hasher := sha512.New() + gotWritten, err := io.Copy(hasher, io.LimitReader(wrote, originalCARv1ComparePathStat.Size())) + require.NoError(t, err) + gotSum := hasher.Sum(nil) + + hasher.Reset() + originalCarV1, err := os.Open(originalCARv1ComparePath) + require.NoError(t, err) + t.Cleanup(func() { require.NoError(t, originalCarV1.Close()) }) + wantWritten, err := io.Copy(hasher, originalCarV1) + require.NoError(t, err) + wantSum := hasher.Sum(nil) + + require.Equal(t, wantWritten, gotWritten) + require.Equal(t, wantSum, gotSum) + }) + } +} + +func TestBlockstorePutSameHashes(t *testing.T) { + tdir := t.TempDir() + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + + // This blockstore allows duplicate puts, + // and identifies by multihash as per the default. + wbsAllowDups, err := blockstore.OpenReadWrite( + filepath.Join(tdir, "readwrite-allowdup.car"), nil, + blockstore.AllowDuplicatePuts(true), + ) + require.NoError(t, err) + t.Cleanup(func() { wbsAllowDups.Finalize() }) + + // This blockstore deduplicates puts by CID. + wbsByCID, err := blockstore.OpenReadWrite( + filepath.Join(tdir, "readwrite-dedup-wholecid.car"), nil, + blockstore.UseWholeCIDs(true), + ) + require.NoError(t, err) + t.Cleanup(func() { wbsByCID.Finalize() }) + + // This blockstore deduplicates puts by multihash. + wbsByHash, err := blockstore.OpenReadWrite( + filepath.Join(tdir, "readwrite-dedup-hash.car"), nil, + ) + require.NoError(t, err) + t.Cleanup(func() { wbsByHash.Finalize() }) + + var blockList []blocks.Block + + appendBlock := func(data []byte, version, codec uint64) { + c, err := cid.Prefix{ + Version: version, + Codec: codec, + MhType: multihash.SHA2_256, + MhLength: -1, + }.Sum(data) + require.NoError(t, err) + + block, err := blocks.NewBlockWithCid(data, c) + require.NoError(t, err) + + blockList = append(blockList, block) + } + + // Two raw blocks, meaning we have two unique multihashes. + // However, we have multiple CIDs for each multihash. + // We also have two duplicate CIDs. + data1 := []byte("foo bar") + appendBlock(data1, 0, cid.DagProtobuf) + appendBlock(data1, 1, cid.DagProtobuf) + appendBlock(data1, 1, cid.DagCBOR) + appendBlock(data1, 1, cid.DagCBOR) // duplicate CID + + data2 := []byte("foo bar baz") + appendBlock(data2, 0, cid.DagProtobuf) + appendBlock(data2, 1, cid.DagProtobuf) + appendBlock(data2, 1, cid.DagProtobuf) // duplicate CID + appendBlock(data2, 1, cid.DagCBOR) + + countBlocks := func(bs *blockstore.ReadWrite) int { + ch, err := bs.AllKeysChan(context.Background()) + require.NoError(t, err) + + n := 0 + for c := range ch { + if c.Prefix().Codec == cid.Raw { + if bs == wbsByCID { + t.Error("expected blockstore with UseWholeCIDs to not flatten on AllKeysChan") + } + } else { + if bs != wbsByCID { + t.Error("expected blockstore without UseWholeCIDs to flatten on AllKeysChan") + } + } + n++ + } + return n + } + + putBlockList := func(bs *blockstore.ReadWrite) { + for i, block := range blockList { + // Has should never error here. + // The first block should be missing. + // Others might not, given the duplicate hashes. + has, err := bs.Has(ctx, block.Cid()) + require.NoError(t, err) + if i == 0 { + require.False(t, has) + } + + err = bs.Put(ctx, block) + require.NoError(t, err) + + // Has, Get, and GetSize need to work right after a Put. + has, err = bs.Has(ctx, block.Cid()) + require.NoError(t, err) + require.True(t, has) + + got, err := bs.Get(ctx, block.Cid()) + require.NoError(t, err) + require.Equal(t, block.Cid(), got.Cid()) + require.Equal(t, block.RawData(), got.RawData()) + + size, err := bs.GetSize(ctx, block.Cid()) + require.NoError(t, err) + require.Equal(t, len(block.RawData()), size) + } + } + + putBlockList(wbsAllowDups) + require.Equal(t, len(blockList), countBlocks(wbsAllowDups)) + + err = wbsAllowDups.Finalize() + require.NoError(t, err) + + // Put the same list of blocks to the blockstore that + // deduplicates by CID. + // We should end up with two fewer blocks, + // as two are entire CID duplicates. + putBlockList(wbsByCID) + require.Equal(t, len(blockList)-2, countBlocks(wbsByCID)) + + err = wbsByCID.Finalize() + require.NoError(t, err) + + // Put the same list of blocks to the blockstore that + // deduplicates by CID. + // We should end up with just two blocks, + // as the original set of blocks only has two distinct multihashes. + putBlockList(wbsByHash) + require.Equal(t, 2, countBlocks(wbsByHash)) + + err = wbsByHash.Finalize() + require.NoError(t, err) +} + +func TestBlockstoreConcurrentUse(t *testing.T) { + wbs, err := blockstore.OpenReadWrite(filepath.Join(t.TempDir(), "readwrite.car"), nil) + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + + require.NoError(t, err) + t.Cleanup(func() { wbs.Finalize() }) + + var wg sync.WaitGroup + for i := 0; i < 100; i++ { + data := []byte(fmt.Sprintf("data-%d", i)) + + wg.Add(1) + go func() { + defer wg.Done() + + c, err := cid.Prefix{ + Version: 1, + Codec: cid.Raw, + MhType: multihash.SHA2_256, + MhLength: -1, + }.Sum(data) + require.NoError(t, err) + + block, err := blocks.NewBlockWithCid(data, c) + require.NoError(t, err) + + has, err := wbs.Has(ctx, block.Cid()) + require.NoError(t, err) + require.False(t, has) + + err = wbs.Put(ctx, block) + require.NoError(t, err) + + got, err := wbs.Get(ctx, block.Cid()) + require.NoError(t, err) + require.Equal(t, data, got.RawData()) + }() + } + wg.Wait() +} + +type bufferReaderAt []byte + +func (b bufferReaderAt) ReadAt(p []byte, off int64) (int, error) { + if off >= int64(len(b)) { + return 0, io.EOF + } + return copy(p, b[off:]), nil +} + +func TestBlockstoreNullPadding(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + + paddedV1, err := os.ReadFile("../testdata/sample-v1-with-zero-len-section.car") + require.NoError(t, err) + + rbs, err := blockstore.NewReadOnly(bufferReaderAt(paddedV1), nil, + carv2.ZeroLengthSectionAsEOF(true)) + require.NoError(t, err) + + roots, err := rbs.Roots() + require.NoError(t, err) + + has, err := rbs.Has(ctx, roots[0]) + require.NoError(t, err) + require.True(t, has) + + allKeysCh, err := rbs.AllKeysChan(ctx) + require.NoError(t, err) + for c := range allKeysCh { + b, err := rbs.Get(ctx, c) + require.NoError(t, err) + if !b.Cid().Equals(c) { + t.Fatal("wrong item returned") + } + } +} + +func TestBlockstoreResumption(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + + v1f, err := os.Open("../testdata/sample-v1.car") + require.NoError(t, err) + t.Cleanup(func() { assert.NoError(t, v1f.Close()) }) + r, err := carv1.NewCarReader(v1f) + require.NoError(t, err) + + path := filepath.Join(t.TempDir(), "readwrite-resume.car") + // Create an incomplete CARv2 file with no blocks put. + subject, err := blockstore.OpenReadWrite(path, r.Header.Roots, + blockstore.UseWholeCIDs(true)) + require.NoError(t, err) + + // For each block resume on the same file, putting blocks one at a time. + var wantBlockCountSoFar, idCidCount int + wantBlocks := make(map[cid.Cid]blocks.Block) + for { + b, err := r.Next() + if err == io.EOF { + break + } + require.NoError(t, err) + wantBlockCountSoFar++ + wantBlocks[b.Cid()] = b + + dmh, err := multihash.Decode(b.Cid().Hash()) + require.NoError(t, err) + if dmh.Code == multihash.IDENTITY { + idCidCount++ + } + + // 30% chance of subject failing; more concretely: re-instantiating blockstore with the same + // file without calling Finalize. The higher this percentage the slower the test runs + // considering the number of blocks in the original CARv1 test payload. + resume := rng.Float32() <= 0.3 + // If testing resume case, then flip a coin to decide whether to finalize before blockstore + // re-instantiation or not. Note, both cases should work for resumption since we do not + // limit resumption to unfinalized files. + finalizeBeforeResumption := rng.Float32() <= 0.5 + if resume { + if finalizeBeforeResumption { + require.NoError(t, subject.Finalize()) + } else { + // Close off the open file and re-instantiate a new subject with resumption enabled. + // Note, we don't have to close the file for resumption to work. + // We do this to avoid resource leak during testing. + subject.Discard() + } + subject, err = blockstore.OpenReadWrite(path, r.Header.Roots, + blockstore.UseWholeCIDs(true)) + require.NoError(t, err) + } + require.NoError(t, subject.Put(ctx, b)) + + // With 10% chance test read operations on an resumed read-write blockstore. + // We don't test on every put to reduce test runtime. + testRead := rng.Float32() <= 0.1 + if testRead { + // Assert read operations on the read-write blockstore are as expected when resumed from an + // existing file + var gotBlockCountSoFar int + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + t.Cleanup(cancel) + keysChan, err := subject.AllKeysChan(ctx) + require.NoError(t, err) + for k := range keysChan { + has, err := subject.Has(ctx, k) + require.NoError(t, err) + require.True(t, has) + gotBlock, err := subject.Get(ctx, k) + require.NoError(t, err) + require.Equal(t, wantBlocks[k], gotBlock) + gotBlockCountSoFar++ + } + // Assert the number of blocks in file are as expected calculated via AllKeysChan + require.Equal(t, wantBlockCountSoFar-idCidCount, gotBlockCountSoFar) + } + } + subject.Discard() + + // Finalize the blockstore to complete partially written CARv2 file. + subject, err = blockstore.OpenReadWrite(path, r.Header.Roots, + blockstore.UseWholeCIDs(true)) + require.NoError(t, err) + require.NoError(t, subject.Finalize()) + + // Assert resumed from file is a valid CARv2 with index. + v2f, err := os.Open(path) + require.NoError(t, err) + t.Cleanup(func() { assert.NoError(t, v2f.Close()) }) + v2r, err := carv2.NewReader(v2f) + require.NoError(t, err) + require.True(t, v2r.Header.HasIndex()) + + // Assert CARv1 payload in file matches the original CARv1 payload. + _, err = v1f.Seek(0, io.SeekStart) + require.NoError(t, err) + wantPayloadReader, err := carv1.NewCarReader(v1f) + require.NoError(t, err) + + dr, err := v2r.DataReader() + require.NoError(t, err) + gotPayloadReader, err := carv1.NewCarReader(dr) + require.NoError(t, err) + + require.Equal(t, wantPayloadReader.Header, gotPayloadReader.Header) + for { + wantNextBlock, wantErr := wantPayloadReader.Next() + if wantErr == io.EOF { + gotNextBlock, gotErr := gotPayloadReader.Next() + require.Equal(t, wantErr, gotErr) + require.Nil(t, gotNextBlock) + break + } + require.NoError(t, wantErr) + + dmh, err := multihash.Decode(wantNextBlock.Cid().Hash()) + require.NoError(t, err) + if dmh.Code == multihash.IDENTITY { + continue + } + + gotNextBlock, gotErr := gotPayloadReader.Next() + require.NoError(t, gotErr) + require.Equal(t, wantNextBlock, gotNextBlock) + } + + // Assert index in resumed from file is identical to index generated from the data payload portion of the generated CARv2 file. + _, err = v1f.Seek(0, io.SeekStart) + require.NoError(t, err) + ir, err := v2r.IndexReader() + require.NoError(t, err) + gotIdx, err := index.ReadFrom(ir) + require.NoError(t, err) + dr, err = v2r.DataReader() + require.NoError(t, err) + wantIdx, err := carv2.GenerateIndex(dr) + require.NoError(t, err) + require.Equal(t, wantIdx, gotIdx) +} + +func TestBlockstoreResumptionIsSupportedOnFinalizedFile(t *testing.T) { + path := filepath.Join(t.TempDir(), "readwrite-resume-finalized.car") + // Create an incomplete CARv2 file with no blocks put. + subject, err := blockstore.OpenReadWrite(path, []cid.Cid{}) + require.NoError(t, err) + require.NoError(t, subject.Finalize()) + subject, err = blockstore.OpenReadWrite(path, []cid.Cid{}) + require.NoError(t, err) + t.Cleanup(func() { subject.Finalize() }) +} + +func TestReadWritePanicsOnlyWhenFinalized(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + + oneTestBlockCid := oneTestBlockWithCidV1.Cid() + anotherTestBlockCid := anotherTestBlockWithCidV0.Cid() + wantRoots := []cid.Cid{oneTestBlockCid, anotherTestBlockCid} + path := filepath.Join(t.TempDir(), "readwrite-finalized-panic.car") + + subject, err := blockstore.OpenReadWrite(path, wantRoots) + require.NoError(t, err) + + require.NoError(t, subject.Put(ctx, oneTestBlockWithCidV1)) + require.NoError(t, subject.Put(ctx, anotherTestBlockWithCidV0)) + + gotBlock, err := subject.Get(ctx, oneTestBlockCid) + require.NoError(t, err) + require.Equal(t, oneTestBlockWithCidV1, gotBlock) + + gotSize, err := subject.GetSize(ctx, oneTestBlockCid) + require.NoError(t, err) + require.Equal(t, len(oneTestBlockWithCidV1.RawData()), gotSize) + + gotRoots, err := subject.Roots() + require.NoError(t, err) + require.Equal(t, wantRoots, gotRoots) + + has, err := subject.Has(ctx, oneTestBlockCid) + require.NoError(t, err) + require.True(t, has) + + subject.HashOnRead(true) + // Delete should always error regardless of finalize + require.Error(t, subject.DeleteBlock(ctx, oneTestBlockCid)) + + require.NoError(t, subject.Finalize()) + require.Error(t, subject.Finalize()) + + _, err = subject.Get(ctx, oneTestBlockCid) + require.Error(t, err) + _, err = subject.GetSize(ctx, anotherTestBlockCid) + require.Error(t, err) + _, err = subject.Has(ctx, anotherTestBlockCid) + require.Error(t, err) + + require.Error(t, subject.Put(ctx, oneTestBlockWithCidV1)) + require.Error(t, subject.PutMany(ctx, []blocks.Block{anotherTestBlockWithCidV0})) + _, err = subject.AllKeysChan(context.Background()) + require.Error(t, err) + require.Error(t, subject.DeleteBlock(ctx, oneTestBlockCid)) +} + +func TestReadWriteWithPaddingWorksAsExpected(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + + oneTestBlockCid := oneTestBlockWithCidV1.Cid() + anotherTestBlockCid := anotherTestBlockWithCidV0.Cid() + WantRoots := []cid.Cid{oneTestBlockCid, anotherTestBlockCid} + path := filepath.Join(t.TempDir(), "readwrite-with-padding.car") + + wantCarV1Padding := uint64(1413) + wantIndexPadding := uint64(1314) + subject, err := blockstore.OpenReadWrite( + path, + WantRoots, + carv2.UseDataPadding(wantCarV1Padding), + carv2.UseIndexPadding(wantIndexPadding)) + require.NoError(t, err) + require.NoError(t, subject.Put(ctx, oneTestBlockWithCidV1)) + require.NoError(t, subject.Put(ctx, anotherTestBlockWithCidV0)) + require.NoError(t, subject.Finalize()) + + // Assert CARv2 header contains right offsets. + gotCarV2, err := carv2.OpenReader(path) + t.Cleanup(func() { gotCarV2.Close() }) + require.NoError(t, err) + wantCarV1Offset := carv2.PragmaSize + carv2.HeaderSize + wantCarV1Padding + wantIndexOffset := wantCarV1Offset + gotCarV2.Header.DataSize + wantIndexPadding + require.Equal(t, wantCarV1Offset, gotCarV2.Header.DataOffset) + require.Equal(t, wantIndexOffset, gotCarV2.Header.IndexOffset) + require.NoError(t, gotCarV2.Close()) + + f, err := os.Open(path) + require.NoError(t, err) + t.Cleanup(func() { f.Close() }) + + // Assert reading CARv1 directly at offset and size is as expected. + gotCarV1, err := carv1.NewCarReader(io.NewSectionReader(f, int64(wantCarV1Offset), int64(gotCarV2.Header.DataSize))) + require.NoError(t, err) + require.Equal(t, WantRoots, gotCarV1.Header.Roots) + gotOneBlock, err := gotCarV1.Next() + require.NoError(t, err) + require.Equal(t, oneTestBlockWithCidV1, gotOneBlock) + gotAnotherBlock, err := gotCarV1.Next() + require.NoError(t, err) + require.Equal(t, anotherTestBlockWithCidV0, gotAnotherBlock) + _, err = gotCarV1.Next() + require.Equal(t, io.EOF, err) + + // Assert reading index directly from file is parsable and has expected CIDs. + stat, err := f.Stat() + require.NoError(t, err) + indexSize := stat.Size() - int64(wantIndexOffset) + gotIdx, err := index.ReadFrom(io.NewSectionReader(f, int64(wantIndexOffset), indexSize)) + require.NoError(t, err) + _, err = index.GetFirst(gotIdx, oneTestBlockCid) + require.NoError(t, err) + _, err = index.GetFirst(gotIdx, anotherTestBlockCid) + require.NoError(t, err) +} + +func TestReadWriteResumptionFromNonV2FileIsError(t *testing.T) { + tmpPath := requireTmpCopy(t, "../testdata/sample-rootless-v42.car") + subject, err := blockstore.OpenReadWrite(tmpPath, []cid.Cid{}) + require.EqualError(t, err, "cannot resume on CAR file with version 42") + require.Nil(t, subject) +} + +func TestReadWriteResumptionMismatchingRootsIsError(t *testing.T) { + tmpPath := requireTmpCopy(t, "../testdata/sample-wrapped-v2.car") + + origContent, err := os.ReadFile(tmpPath) + require.NoError(t, err) + + badRoot, err := cid.NewPrefixV1(cid.Raw, multihash.SHA2_256).Sum([]byte("bad root")) + require.NoError(t, err) + + subject, err := blockstore.OpenReadWrite(tmpPath, []cid.Cid{badRoot}) + require.EqualError(t, err, "cannot resume on file with mismatching data header") + require.Nil(t, subject) + + newContent, err := os.ReadFile(tmpPath) + require.NoError(t, err) + + // Expect the bad file to be left untouched; check the size first. + // If the sizes mismatch, printing a huge diff would not help us. + require.Equal(t, len(origContent), len(newContent)) + require.Equal(t, origContent, newContent) +} + +func requireTmpCopy(t *testing.T, src string) string { + srcF, err := os.Open(src) + require.NoError(t, err) + defer func() { require.NoError(t, srcF.Close()) }() + stats, err := srcF.Stat() + require.NoError(t, err) + + dst := filepath.Join(t.TempDir(), stats.Name()) + dstF, err := os.Create(dst) + require.NoError(t, err) + defer func() { require.NoError(t, dstF.Close()) }() + + _, err = io.Copy(dstF, srcF) + require.NoError(t, err) + return dst +} + +func TestReadWriteResumptionFromFileWithDifferentCarV1PaddingIsError(t *testing.T) { + oneTestBlockCid := oneTestBlockWithCidV1.Cid() + WantRoots := []cid.Cid{oneTestBlockCid} + path := filepath.Join(t.TempDir(), "readwrite-resume-with-padding.car") + + subject, err := blockstore.OpenReadWrite( + path, + WantRoots, + carv2.UseDataPadding(1413)) + require.NoError(t, err) + require.NoError(t, subject.Put(context.TODO(), oneTestBlockWithCidV1)) + require.NoError(t, subject.Finalize()) + + resumingSubject, err := blockstore.OpenReadWrite( + path, + WantRoots, + carv2.UseDataPadding(1314)) + require.EqualError(t, err, "cannot resume from file with mismatched CARv1 offset; "+ + "`WithDataPadding` option must match the padding on file. "+ + "Expected padding value of 1413 but got 1314") + require.Nil(t, resumingSubject) +} + +func TestReadWriteErrorAfterClose(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + + root := blocks.NewBlock([]byte("foo")) + for _, closeMethod := range []func(*blockstore.ReadWrite){ + (*blockstore.ReadWrite).Discard, + func(bs *blockstore.ReadWrite) { bs.Finalize() }, + } { + path := filepath.Join(t.TempDir(), "readwrite.car") + bs, err := blockstore.OpenReadWrite(path, []cid.Cid{root.Cid()}) + require.NoError(t, err) + + err = bs.Put(ctx, root) + require.NoError(t, err) + + roots, err := bs.Roots() + require.NoError(t, err) + _, err = bs.Has(ctx, roots[0]) + require.NoError(t, err) + _, err = bs.Get(ctx, roots[0]) + require.NoError(t, err) + _, err = bs.GetSize(ctx, roots[0]) + require.NoError(t, err) + + ctx, cancel := context.WithCancel(context.Background()) + _, err = bs.AllKeysChan(ctx) + require.NoError(t, err) + cancel() // to stop the AllKeysChan goroutine + + closeMethod(bs) + + _, err = bs.Roots() + require.Error(t, err) + _, err = bs.Has(ctx, roots[0]) + require.Error(t, err) + _, err = bs.Get(ctx, roots[0]) + require.Error(t, err) + _, err = bs.GetSize(ctx, roots[0]) + require.Error(t, err) + _, err = bs.AllKeysChan(ctx) + require.Error(t, err) + + err = bs.Put(ctx, root) + require.Error(t, err) + + // TODO: test that closing blocks if an AllKeysChan operation is + // in progress. + } +} + +func TestOpenReadWrite_WritesIdentityCIDsWhenOptionIsEnabled(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + + path := filepath.Join(t.TempDir(), "readwrite-with-id-enabled.car") + subject, err := blockstore.OpenReadWrite(path, []cid.Cid{}, carv2.StoreIdentityCIDs(true)) + require.NoError(t, err) + + data := []byte("fish") + idmh, err := multihash.Sum(data, multihash.IDENTITY, -1) + require.NoError(t, err) + idCid := cid.NewCidV1(uint64(multicodec.Raw), idmh) + + idBlock, err := blocks.NewBlockWithCid(data, idCid) + require.NoError(t, err) + err = subject.Put(ctx, idBlock) + require.NoError(t, err) + + has, err := subject.Has(ctx, idCid) + require.NoError(t, err) + require.True(t, has) + + gotBlock, err := subject.Get(ctx, idCid) + require.NoError(t, err) + require.Equal(t, idBlock, gotBlock) + + keysChan, err := subject.AllKeysChan(context.Background()) + require.NoError(t, err) + var i int + for c := range keysChan { + i++ + require.Equal(t, idCid, c) + } + require.Equal(t, 1, i) + + err = subject.Finalize() + require.NoError(t, err) + + // Assert resulting CAR file indeed has the IDENTITY block. + f, err := os.Open(path) + require.NoError(t, err) + t.Cleanup(func() { require.NoError(t, f.Close()) }) + + reader, err := carv2.NewBlockReader(f) + require.NoError(t, err) + + gotBlock, err = reader.Next() + require.NoError(t, err) + require.Equal(t, idBlock, gotBlock) + + next, err := reader.Next() + require.Equal(t, io.EOF, err) + require.Nil(t, next) + + // Assert the id is indexed. + r, err := carv2.OpenReader(path) + require.NoError(t, err) + t.Cleanup(func() { require.NoError(t, r.Close()) }) + require.True(t, r.Header.HasIndex()) + + ir, err := r.IndexReader() + require.NoError(t, err) + require.NotNil(t, ir) + + gotIdx, err := index.ReadFrom(ir) + require.NoError(t, err) + + // Determine expected offset as the length of header plus one + dr, err := r.DataReader() + require.NoError(t, err) + header, err := carv1.ReadHeader(dr, carv1.DefaultMaxAllowedHeaderSize) + require.NoError(t, err) + object, err := cbor.DumpObject(header) + require.NoError(t, err) + expectedOffset := len(object) + 1 + + // Assert index is iterable and has exactly one record with expected multihash and offset. + switch idx := gotIdx.(type) { + case index.IterableIndex: + var i int + err := idx.ForEach(func(mh multihash.Multihash, offset uint64) error { + i++ + require.Equal(t, idmh, mh) + require.Equal(t, uint64(expectedOffset), offset) + return nil + }) + require.NoError(t, err) + require.Equal(t, 1, i) + default: + require.Failf(t, "unexpected index type", "wanted %v but got %v", multicodec.CarMultihashIndexSorted, idx.Codec()) + } +} + +func TestOpenReadWrite_ErrorsWhenWritingTooLargeOfACid(t *testing.T) { + maxAllowedCidSize := uint64(2) + path := filepath.Join(t.TempDir(), "readwrite-with-id-enabled-too-large.car") + subject, err := blockstore.OpenReadWrite(path, []cid.Cid{}, carv2.MaxIndexCidSize(maxAllowedCidSize)) + t.Cleanup(subject.Discard) + require.NoError(t, err) + + data := []byte("monsterlobster") + mh, err := multihash.Sum(data, multihash.SHA2_256, -1) + require.NoError(t, err) + bigCid := cid.NewCidV1(uint64(multicodec.Raw), mh) + bigCidLen := uint64(bigCid.ByteLen()) + require.True(t, bigCidLen > maxAllowedCidSize) + + bigBlock, err := blocks.NewBlockWithCid(data, bigCid) + require.NoError(t, err) + err = subject.Put(context.TODO(), bigBlock) + require.Equal(t, &carv2.ErrCidTooLarge{MaxSize: maxAllowedCidSize, CurrentSize: bigCidLen}, err) +} + +func TestReadWrite_ReWritingCARv1WithIdentityCidIsIdenticalToOriginalWithOptionsEnabled(t *testing.T) { + originalCARv1Path := "../testdata/sample-v1.car" + originalCarV1, err := os.Open(originalCARv1Path) + require.NoError(t, err) + t.Cleanup(func() { require.NoError(t, originalCarV1.Close()) }) + + r, err := carv2.NewBlockReader(originalCarV1) + require.NoError(t, err) + + path := filepath.Join(t.TempDir(), "readwrite-from-carv1-with-id-enabled.car") + subject, err := blockstore.OpenReadWrite(path, r.Roots, carv2.StoreIdentityCIDs(true)) + require.NoError(t, err) + var idCidCount int + for { + next, err := r.Next() + if err == io.EOF { + break + } + require.NoError(t, err) + if next.Cid().Prefix().MhType == multihash.IDENTITY { + idCidCount++ + } + err = subject.Put(context.TODO(), next) + require.NoError(t, err) + } + require.NotZero(t, idCidCount) + err = subject.Finalize() + require.NoError(t, err) + + v2r, err := carv2.OpenReader(path) + require.NoError(t, err) + t.Cleanup(func() { require.NoError(t, v2r.Close()) }) + + // Assert characteristics bit is set. + require.True(t, v2r.Header.Characteristics.IsFullyIndexed()) + + // Assert original CARv1 and generated innter CARv1 payload have the same SHA512 hash + // Note, we hash instead of comparing bytes to avoid excessive memory usage when sample CARv1 is large. + + hasher := sha512.New() + dr, err := v2r.DataReader() + require.NoError(t, err) + gotWritten, err := io.Copy(hasher, dr) + require.NoError(t, err) + gotSum := hasher.Sum(nil) + + hasher.Reset() + _, err = originalCarV1.Seek(0, io.SeekStart) + require.NoError(t, err) + wantWritten, err := io.Copy(hasher, originalCarV1) + require.NoError(t, err) + wantSum := hasher.Sum(nil) + + require.Equal(t, wantWritten, gotWritten) + require.Equal(t, wantSum, gotSum) +} + +func TestReadWriteOpenFile(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + + dir := t.TempDir() // auto cleanup + f, err := os.CreateTemp(dir, "") + require.NoError(t, err) + + root := blocks.NewBlock([]byte("foo")) + + bs, err := blockstore.OpenReadWriteFile(f, []cid.Cid{root.Cid()}) + require.NoError(t, err) + + err = bs.Put(ctx, root) + require.NoError(t, err) + + roots, err := bs.Roots() + require.NoError(t, err) + _, err = bs.Has(ctx, roots[0]) + require.NoError(t, err) + _, err = bs.Get(ctx, roots[0]) + require.NoError(t, err) + _, err = bs.GetSize(ctx, roots[0]) + require.NoError(t, err) + + err = bs.Finalize() + require.NoError(t, err) + + _, err = f.Seek(0, 0) + require.NoError(t, err) // file should not be closed, let the caller do it + + err = f.Close() + require.NoError(t, err) +} + +func TestBlockstore_IdentityCidWithEmptyDataIsIndexed(t *testing.T) { + p := path.Join(t.TempDir(), "car-id-cid-empty.carv2") + var noData []byte + + mh, err := multihash.Sum(noData, multihash.IDENTITY, -1) + require.NoError(t, err) + w, err := blockstore.OpenReadWrite(p, nil, carv2.StoreIdentityCIDs(true)) + require.NoError(t, err) + + blk, err := blocks.NewBlockWithCid(noData, cid.NewCidV1(cid.Raw, mh)) + require.NoError(t, err) + + err = w.Put(context.TODO(), blk) + require.NoError(t, err) + require.NoError(t, w.Finalize()) + + r, err := carv2.OpenReader(p) + require.NoError(t, err) + defer func() { require.NoError(t, r.Close()) }() + + dr, err := r.DataReader() + require.NoError(t, err) + header, err := carv1.ReadHeader(dr, carv1.DefaultMaxAllowedHeaderSize) + require.NoError(t, err) + wantOffset, err := carv1.HeaderSize(header) + require.NoError(t, err) + + ir, err := r.IndexReader() + require.NoError(t, err) + idx, err := index.ReadFrom(ir) + require.NoError(t, err) + + itidx, ok := idx.(index.IterableIndex) + require.True(t, ok) + var count int + err = itidx.ForEach(func(m multihash.Multihash, u uint64) error { + dm, err := multihash.Decode(m) + require.NoError(t, err) + require.Equal(t, multicodec.Identity, multicodec.Code(dm.Code)) + require.Equal(t, 0, dm.Length) + require.Empty(t, dm.Digest) + require.Equal(t, wantOffset, u) + count++ + return nil + }) + require.NoError(t, err) + require.Equal(t, 1, count) +} + +func TestBlockstoreFinalizeReadOnly(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + + root := blocks.NewBlock([]byte("foo")) + + p := filepath.Join(t.TempDir(), "readwrite.car") + bs, err := blockstore.OpenReadWrite(p, []cid.Cid{root.Cid()}) + require.NoError(t, err) + + err = bs.Put(ctx, root) + require.NoError(t, err) + + roots, err := bs.Roots() + require.NoError(t, err) + _, err = bs.Has(ctx, roots[0]) + require.NoError(t, err) + _, err = bs.Get(ctx, roots[0]) + require.NoError(t, err) + _, err = bs.GetSize(ctx, roots[0]) + require.NoError(t, err) + _, err = bs.AllKeysChan(ctx) + require.NoError(t, err) + + // soft finalize, we can still read, but not write + err = bs.FinalizeReadOnly() + require.NoError(t, err) + + _, err = bs.Roots() + require.NoError(t, err) + _, err = bs.Has(ctx, roots[0]) + require.NoError(t, err) + _, err = bs.Get(ctx, roots[0]) + require.NoError(t, err) + _, err = bs.GetSize(ctx, roots[0]) + require.NoError(t, err) + _, err = bs.AllKeysChan(ctx) + require.NoError(t, err) + + err = bs.Put(ctx, root) + require.Error(t, err) + + // final close, nothing works anymore + err = bs.Close() + require.NoError(t, err) + + _, err = bs.Roots() + require.Error(t, err) + _, err = bs.Has(ctx, roots[0]) + require.Error(t, err) + _, err = bs.Get(ctx, roots[0]) + require.Error(t, err) + _, err = bs.GetSize(ctx, roots[0]) + require.Error(t, err) + _, err = bs.AllKeysChan(ctx) + require.Error(t, err) + + err = bs.Put(ctx, root) + require.Error(t, err) +} + +func TestWholeCID(t *testing.T) { + for _, whole := range []bool{true, false} { + whole := whole + t.Run(fmt.Sprintf("whole=%t", whole), func(t *testing.T) { + t.Parallel() + ctx := context.Background() + path := filepath.Join(t.TempDir(), fmt.Sprintf("writable_%t.car", whole)) + rw, err := blockstore.OpenReadWrite(path, []cid.Cid{}, carv2.UseWholeCIDs(whole)) + require.NoError(t, err) + t.Cleanup(func() { require.NoError(t, rw.Finalize()) }) + + require.NoError(t, rw.Put(ctx, oneTestBlockWithCidV1)) + has, err := rw.Has(ctx, oneTestBlockWithCidV1.Cid()) + require.NoError(t, err) + require.True(t, has) + + pref := oneTestBlockWithCidV1.Cid().Prefix() + pref.Codec = cid.DagCBOR + pref.Version = 1 + cpb1, err := pref.Sum(oneTestBlockWithCidV1.RawData()) + require.NoError(t, err) + + has, err = rw.Has(ctx, cpb1) + require.NoError(t, err) + require.Equal(t, has, !whole) + + require.NoError(t, rw.Put(ctx, anotherTestBlockWithCidV0)) + has, err = rw.Has(ctx, anotherTestBlockWithCidV0.Cid()) + require.NoError(t, err) + require.True(t, has) + has, err = rw.Has(ctx, cpb1) + require.NoError(t, err) + require.Equal(t, has, !whole) + + pref = anotherTestBlockWithCidV0.Cid().Prefix() + pref.Codec = cid.DagJSON + pref.Version = 1 + cpb2, err := pref.Sum(anotherTestBlockWithCidV0.RawData()) + require.NoError(t, err) + + has, err = rw.Has(ctx, cpb2) + require.NoError(t, err) + require.Equal(t, has, !whole) + has, err = rw.Has(ctx, cpb1) + require.NoError(t, err) + require.Equal(t, has, !whole) + }) + } +} + +func TestReadWriteIndex(t *testing.T) { + tmpPath := requireTmpCopy(t, "../testdata/sample-wrapped-v2.car") + + root := cid.MustParse("bafy2bzaced4ueelaegfs5fqu4tzsh6ywbbpfk3cxppupmxfdhbpbhzawfw5oy") + subject, err := blockstore.OpenReadWrite(tmpPath, []cid.Cid{root}) + require.NoError(t, err) + + defer func() { + err = subject.Finalize() + require.NoError(t, err) + }() + + var wantCids []cid.Cid + var wantMh []multihash.Multihash + ch, err := subject.AllKeysChan(context.Background()) + require.NoError(t, err) + for c := range ch { + wantCids = append(wantCids, c) + wantMh = append(wantMh, c.Hash()) + } + + idx := subject.Index() + + for _, c := range wantCids { + _, err = index.GetFirst(idx, c) + require.NoError(t, err) + } + + if idx, ok := idx.(index.IterableIndex); ok { + var got []multihash.Multihash + err = idx.ForEach(func(m multihash.Multihash, u uint64) error { + got = append(got, m) + return nil + }) + require.NoError(t, err) + require.ElementsMatch(t, wantMh, got) + } +} diff --git a/ipld/car/v2/car.go b/ipld/car/v2/car.go new file mode 100644 index 0000000000..571eb1140b --- /dev/null +++ b/ipld/car/v2/car.go @@ -0,0 +1,193 @@ +package car + +import ( + "encoding/binary" + "fmt" + "io" +) + +const ( + // PragmaSize is the size of the CARv2 pragma in bytes. + PragmaSize = 11 + // HeaderSize is the fixed size of CARv2 header in number of bytes. + HeaderSize = 40 + // CharacteristicsSize is the fixed size of Characteristics bitfield within CARv2 header in number of bytes. + CharacteristicsSize = 16 +) + +// The pragma of a CARv2, containing the version number. +// This is a valid CARv1 header, with version number of 2 and no root CIDs. +var Pragma = []byte{ + 0x0a, // unit(10) + 0xa1, // map(1) + 0x67, // string(7) + 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, // "version" + 0x02, // uint(2) +} + +type ( + // Header represents the CARv2 header/pragma. + Header struct { + // 128-bit characteristics of this CARv2 file, such as order, deduplication, etc. Reserved for future use. + Characteristics Characteristics + // The byte-offset from the beginning of the CARv2 to the first byte of the CARv1 data payload. + DataOffset uint64 + // The byte-length of the CARv1 data payload. + DataSize uint64 + // The byte-offset from the beginning of the CARv2 to the first byte of the index payload. This value may be 0 to indicate the absence of index data. + IndexOffset uint64 + } + // Characteristics is a bitfield placeholder for capturing the characteristics of a CARv2 such as order and determinism. + Characteristics struct { + Hi uint64 + Lo uint64 + } +) + +// fullyIndexedCharPos is the position of Characteristics.Hi bit that specifies whether the index is a catalog af all CIDs or not. +const fullyIndexedCharPos = 7 // left-most bit + +// WriteTo writes this characteristics to the given w. +func (c Characteristics) WriteTo(w io.Writer) (n int64, err error) { + buf := make([]byte, 16) + binary.LittleEndian.PutUint64(buf[:8], c.Hi) + binary.LittleEndian.PutUint64(buf[8:], c.Lo) + written, err := w.Write(buf) + return int64(written), err +} + +func (c *Characteristics) ReadFrom(r io.Reader) (int64, error) { + buf := make([]byte, CharacteristicsSize) + read, err := io.ReadFull(r, buf) + n := int64(read) + if err != nil { + return n, err + } + c.Hi = binary.LittleEndian.Uint64(buf[:8]) + c.Lo = binary.LittleEndian.Uint64(buf[8:]) + return n, nil +} + +// IsFullyIndexed specifies whether the index of CARv2 represents a catalog of all CID segments. +// See StoreIdentityCIDs +func (c *Characteristics) IsFullyIndexed() bool { + return isBitSet(c.Hi, fullyIndexedCharPos) +} + +// SetFullyIndexed sets whether of CARv2 represents a catalog of all CID segments. +func (c *Characteristics) SetFullyIndexed(b bool) { + if b { + c.Hi = setBit(c.Hi, fullyIndexedCharPos) + } else { + c.Hi = unsetBit(c.Hi, fullyIndexedCharPos) + } +} + +func setBit(n uint64, pos uint) uint64 { + n |= 1 << pos + return n +} + +func unsetBit(n uint64, pos uint) uint64 { + mask := uint64(^(1 << pos)) + n &= mask + return n +} + +func isBitSet(n uint64, pos uint) bool { + bit := n & (1 << pos) + return bit > 0 +} + +// NewHeader instantiates a new CARv2 header, given the data size. +func NewHeader(dataSize uint64) Header { + header := Header{ + DataSize: dataSize, + } + header.DataOffset = PragmaSize + HeaderSize + header.IndexOffset = header.DataOffset + dataSize + return header +} + +// WithIndexPadding sets the index offset from the beginning of the file for this header and returns +// the header for convenient chained calls. +// The index offset is calculated as the sum of PragmaSize, HeaderSize, +// Header.DataSize, and the given padding. +func (h Header) WithIndexPadding(padding uint64) Header { + h.IndexOffset = h.IndexOffset + padding + return h +} + +// WithDataPadding sets the data payload byte-offset from the beginning of the file for this header +// and returns the header for convenient chained calls. +// The Data offset is calculated as the sum of PragmaSize, HeaderSize and the given padding. +// The call to this function also shifts the Header.IndexOffset forward by the given padding. +func (h Header) WithDataPadding(padding uint64) Header { + h.DataOffset = PragmaSize + HeaderSize + padding + h.IndexOffset = h.IndexOffset + padding + return h +} + +func (h Header) WithDataSize(size uint64) Header { + h.DataSize = size + h.IndexOffset = size + h.IndexOffset + return h +} + +// HasIndex indicates whether the index is present. +func (h Header) HasIndex() bool { + return h.IndexOffset != 0 +} + +// WriteTo serializes this header as bytes and writes them using the given io.Writer. +func (h Header) WriteTo(w io.Writer) (n int64, err error) { + wn, err := h.Characteristics.WriteTo(w) + n += wn + if err != nil { + return + } + buf := make([]byte, 24) + binary.LittleEndian.PutUint64(buf[:8], h.DataOffset) + binary.LittleEndian.PutUint64(buf[8:16], h.DataSize) + binary.LittleEndian.PutUint64(buf[16:], h.IndexOffset) + written, err := w.Write(buf) + n += int64(written) + return n, err +} + +// ReadFrom populates fields of this header from the given r. +func (h *Header) ReadFrom(r io.Reader) (int64, error) { + n, err := h.Characteristics.ReadFrom(r) + if err != nil { + return n, err + } + buf := make([]byte, 24) + read, err := io.ReadFull(r, buf) + n += int64(read) + if err != nil { + return n, err + } + dataOffset := binary.LittleEndian.Uint64(buf[:8]) + dataSize := binary.LittleEndian.Uint64(buf[8:16]) + indexOffset := binary.LittleEndian.Uint64(buf[16:]) + // Assert the data payload offset validity. + // It must be at least 51 ( + ). + if int64(dataOffset) < PragmaSize+HeaderSize { + return n, fmt.Errorf("invalid data payload offset: %v", dataOffset) + } + // Assert the data size validity. + // It must be larger than zero. + // Technically, it should be at least 11 bytes (i.e. a valid CARv1 header with no roots) but + // we let further parsing of the header to signal invalid data payload header. + if int64(dataSize) <= 0 { + return n, fmt.Errorf("invalid data payload size: %v", dataSize) + } + // Assert the index offset validity. + if int64(indexOffset) < 0 { + return n, fmt.Errorf("invalid index offset: %v", indexOffset) + } + h.DataOffset = dataOffset + h.DataSize = dataSize + h.IndexOffset = indexOffset + return n, nil +} diff --git a/ipld/car/v2/car_test.go b/ipld/car/v2/car_test.go new file mode 100644 index 0000000000..c5d35063c7 --- /dev/null +++ b/ipld/car/v2/car_test.go @@ -0,0 +1,247 @@ +package car_test + +import ( + "bytes" + "testing" + + "github.com/stretchr/testify/require" + + carv2 "github.com/ipfs/boxo/ipld/car/v2" + "github.com/ipfs/boxo/ipld/car/v2/internal/carv1" + "github.com/stretchr/testify/assert" +) + +func TestCarV2PragmaLength(t *testing.T) { + tests := []struct { + name string + want interface{} + got interface{} + }{ + { + "ActualSizeShouldBe11", + 11, + len(carv2.Pragma), + }, + { + "ShouldStartWithVarint(10)", + carv2.Pragma[0], + 10, + }, + } + for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { + assert.EqualValues(t, tt.want, tt.got, "CarV2Pragma got = %v, want %v", tt.got, tt.want) + }) + } +} + +func TestCarV2PragmaIsValidCarV1Header(t *testing.T) { + v1h, err := carv1.ReadHeader(bytes.NewReader(carv2.Pragma), carv1.DefaultMaxAllowedHeaderSize) + assert.NoError(t, err, "cannot decode pragma as CBOR with CARv1 header structure") + assert.Equal(t, &carv1.CarHeader{ + Roots: nil, + Version: 2, + }, v1h, "CARv2 pragma must be a valid CARv1 header") +} + +func TestHeader_WriteTo(t *testing.T) { + tests := []struct { + name string + target carv2.Header + wantWrite []byte + wantErr bool + }{ + { + "HeaderWithEmptyCharacteristicsIsWrittenAsExpected", + carv2.Header{ + Characteristics: carv2.Characteristics{}, + DataOffset: 99, + }, + []byte{ + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x63, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + }, + false, + }, + { + "NonEmptyHeaderIsWrittenAsExpected", + carv2.Header{ + Characteristics: carv2.Characteristics{ + Hi: 1001, Lo: 1002, + }, + DataOffset: 99, + DataSize: 100, + IndexOffset: 101, + }, + []byte{ + 0xe9, 0x3, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0xea, 0x3, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x63, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x64, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x65, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + }, + false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + buf := &bytes.Buffer{} + written, err := tt.target.WriteTo(buf) + if (err != nil) != tt.wantErr { + t.Errorf("WriteTo() error = %v, wantErr %v", err, tt.wantErr) + return + } + gotWrite := buf.Bytes() + assert.Equal(t, tt.wantWrite, gotWrite, "Header.WriteTo() gotWrite = %v, wantWrite %v", gotWrite, tt.wantWrite) + assert.EqualValues(t, carv2.HeaderSize, uint64(len(gotWrite)), "WriteTo() CARv2 header length must always be %v bytes long", carv2.HeaderSize) + assert.EqualValues(t, carv2.HeaderSize, uint64(written), "WriteTo() CARv2 header byte count must always be %v bytes long", carv2.HeaderSize) + }) + } +} + +func TestHeader_ReadFrom(t *testing.T) { + tests := []struct { + name string + target []byte + wantHeader carv2.Header + wantErr bool + }{ + { + "HeaderWithEmptyCharacteristicsIsWrittenAsExpected", + []byte{ + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x63, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x64, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + }, + carv2.Header{ + Characteristics: carv2.Characteristics{}, + DataOffset: 99, + DataSize: 100, + }, + false, + }, + { + "NonEmptyHeaderIsWrittenAsExpected", + + []byte{ + 0xe9, 0x3, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0xea, 0x3, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x63, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x64, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x65, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + }, + carv2.Header{ + Characteristics: carv2.Characteristics{ + Hi: 1001, Lo: 1002, + }, + DataOffset: 99, + DataSize: 100, + IndexOffset: 101, + }, + false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + gotHeader := carv2.Header{} + gotRead, err := gotHeader.ReadFrom(bytes.NewReader(tt.target)) + assert.NoError(t, err) + assert.Equal(t, int64(carv2.HeaderSize), gotRead) + assert.Equal(t, tt.wantHeader, gotHeader) + }) + } +} + +func TestHeader_WithPadding(t *testing.T) { + tests := []struct { + name string + subject carv2.Header + wantCarV1Offset uint64 + wantIndexOffset uint64 + }{ + { + "WhenNoPaddingOffsetsAreSumOfSizes", + carv2.NewHeader(123), + carv2.PragmaSize + carv2.HeaderSize, + carv2.PragmaSize + carv2.HeaderSize + 123, + }, + { + "WhenOnlyPaddingCarV1BothOffsetsShift", + carv2.NewHeader(123).WithDataPadding(3), + carv2.PragmaSize + carv2.HeaderSize + 3, + carv2.PragmaSize + carv2.HeaderSize + 3 + 123, + }, + { + "WhenPaddingBothCarV1AndIndexBothOffsetsShiftWithAdditionalIndexShift", + carv2.NewHeader(123).WithDataPadding(3).WithIndexPadding(7), + carv2.PragmaSize + carv2.HeaderSize + 3, + carv2.PragmaSize + carv2.HeaderSize + 3 + 123 + 7, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + assert.EqualValues(t, tt.wantCarV1Offset, tt.subject.DataOffset) + assert.EqualValues(t, tt.wantIndexOffset, tt.subject.IndexOffset) + }) + } +} + +func TestNewHeaderHasExpectedValues(t *testing.T) { + wantCarV1Len := uint64(1413) + want := carv2.Header{ + Characteristics: carv2.Characteristics{}, + DataOffset: carv2.PragmaSize + carv2.HeaderSize, + DataSize: wantCarV1Len, + IndexOffset: carv2.PragmaSize + carv2.HeaderSize + wantCarV1Len, + } + got := carv2.NewHeader(wantCarV1Len) + assert.Equal(t, want, got, "NewHeader got = %v, want = %v", got, want) +} + +func TestCharacteristics_StoreIdentityCIDs(t *testing.T) { + subject := carv2.Characteristics{} + require.False(t, subject.IsFullyIndexed()) + + subject.SetFullyIndexed(true) + require.True(t, subject.IsFullyIndexed()) + + var buf bytes.Buffer + written, err := subject.WriteTo(&buf) + require.NoError(t, err) + require.Equal(t, int64(16), written) + require.Equal(t, []byte{ + 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + }, buf.Bytes()) + + var decodedSubject carv2.Characteristics + read, err := decodedSubject.ReadFrom(&buf) + require.NoError(t, err) + require.Equal(t, int64(16), read) + require.True(t, decodedSubject.IsFullyIndexed()) + + buf.Reset() + subject.SetFullyIndexed(false) + require.False(t, subject.IsFullyIndexed()) + + written, err = subject.WriteTo(&buf) + require.NoError(t, err) + require.Equal(t, int64(16), written) + require.Equal(t, []byte{ + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + }, buf.Bytes()) + + var decodedSubjectAgain carv2.Characteristics + read, err = decodedSubjectAgain.ReadFrom(&buf) + require.NoError(t, err) + require.Equal(t, int64(16), read) + require.False(t, decodedSubjectAgain.IsFullyIndexed()) +} diff --git a/ipld/car/v2/doc.go b/ipld/car/v2/doc.go new file mode 100644 index 0000000000..b12266649a --- /dev/null +++ b/ipld/car/v2/doc.go @@ -0,0 +1,8 @@ +// Package car allows inspecting and reading CAR files, +// described at https://ipld.io/specs/transport/car/. +// The entire library is geared towards the CARv2 spec, +// but many of the APIs consuming CAR files also accept CARv1. +// +// The blockstore sub-package contains an implementation of the +// go-ipfs-blockstore interface. +package car diff --git a/ipld/car/v2/errors.go b/ipld/car/v2/errors.go new file mode 100644 index 0000000000..ee89e0b25a --- /dev/null +++ b/ipld/car/v2/errors.go @@ -0,0 +1,18 @@ +package car + +import ( + "fmt" +) + +var _ (error) = (*ErrCidTooLarge)(nil) + +// ErrCidTooLarge signals that a CID is too large to include in CARv2 index. +// See: MaxIndexCidSize. +type ErrCidTooLarge struct { + MaxSize uint64 + CurrentSize uint64 +} + +func (e *ErrCidTooLarge) Error() string { + return fmt.Sprintf("cid size is larger than max allowed (%d > %d)", e.CurrentSize, e.MaxSize) +} diff --git a/ipld/car/v2/errors_test.go b/ipld/car/v2/errors_test.go new file mode 100644 index 0000000000..56e2c7a095 --- /dev/null +++ b/ipld/car/v2/errors_test.go @@ -0,0 +1,12 @@ +package car + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestNewErrCidTooLarge_ErrorContainsSizes(t *testing.T) { + subject := &ErrCidTooLarge{MaxSize: 1413, CurrentSize: 1414} + require.EqualError(t, subject, "cid size is larger than max allowed (1414 > 1413)") +} diff --git a/ipld/car/v2/example_test.go b/ipld/car/v2/example_test.go new file mode 100644 index 0000000000..8b9288ebb1 --- /dev/null +++ b/ipld/car/v2/example_test.go @@ -0,0 +1,133 @@ +package car_test + +import ( + "bytes" + "context" + "fmt" + "io" + "os" + "path/filepath" + + carv2 "github.com/ipfs/boxo/ipld/car/v2" + "github.com/ipfs/boxo/ipld/car/v2/blockstore" +) + +func ExampleWrapV1File() { + // We have a sample CARv1 file. + // Wrap it as-is in a CARv2, with an index. + // Writing the result to testdata allows reusing that file in other tests, + // and also helps ensure that the result is deterministic. + src := "testdata/sample-v1.car" + tdir, err := os.MkdirTemp(os.TempDir(), "example-*") + if err != nil { + panic(err) + } + dst := filepath.Join(tdir, "wrapped-v2.car") + if err := carv2.WrapV1File(src, dst); err != nil { + panic(err) + } + + // Open our new CARv2 file and show some info about it. + cr, err := carv2.OpenReader(dst) + if err != nil { + panic(err) + } + defer func() { + if err := cr.Close(); err != nil { + panic(err) + } + }() + + roots, err := cr.Roots() + if err != nil { + panic(err) + } + fmt.Println("Roots:", roots) + fmt.Println("Has index:", cr.Header.HasIndex()) + + // Verify that the CARv1 remains exactly the same. + orig, err := os.ReadFile(src) + if err != nil { + panic(err) + } + dr, err := cr.DataReader() + if err != nil { + panic(err) + } + inner, err := io.ReadAll(dr) + if err != nil { + panic(err) + } + fmt.Println("Inner CARv1 is exactly the same:", bytes.Equal(orig, inner)) + + // Verify that the CARv2 works well with its index. + bs, err := blockstore.OpenReadOnly(dst) + if err != nil { + panic(err) + } + fmt.Println(bs.Get(context.TODO(), roots[0])) + + // Output: + // Roots: [bafy2bzaced4ueelaegfs5fqu4tzsh6ywbbpfk3cxppupmxfdhbpbhzawfw5oy] + // Has index: true + // Inner CARv1 is exactly the same: true + // [Block bafy2bzaced4ueelaegfs5fqu4tzsh6ywbbpfk3cxppupmxfdhbpbhzawfw5oy] +} + +// ExampleNewBlockReader instantiates a new BlockReader for a CARv1 file and its wrapped CARv2 +// version. For each file, it prints the version, the root CIDs and the first five block CIDs. +// Note, the roots and first five block CIDs are identical in both files since both represent the +// same root CIDs and data blocks. +func ExampleNewBlockReader() { + for _, path := range []string{ + "testdata/sample-v1.car", + "testdata/sample-wrapped-v2.car", + } { + fmt.Println("File:", path) + f, err := os.Open(path) + if err != nil { + panic(err) + } + br, err := carv2.NewBlockReader(f) + if err != nil { + panic(err) + } + defer func() { + if err := f.Close(); err != nil { + panic(err) + } + }() + fmt.Println("Version:", br.Version) + fmt.Println("Roots:", br.Roots) + fmt.Println("First 5 block CIDs:") + for i := 0; i < 5; i++ { + bl, err := br.Next() + if err == io.EOF { + break + } + if err != nil { + panic(err) + } + fmt.Printf("\t%v\n", bl.Cid()) + } + } + // Output: + // File: testdata/sample-v1.car + // Version: 1 + // Roots: [bafy2bzaced4ueelaegfs5fqu4tzsh6ywbbpfk3cxppupmxfdhbpbhzawfw5oy] + // First 5 block CIDs: + // bafy2bzaced4ueelaegfs5fqu4tzsh6ywbbpfk3cxppupmxfdhbpbhzawfw5oy + // bafy2bzaceaycv7jhaegckatnncu5yugzkrnzeqsppzegufr35lroxxnsnpspu + // bafy2bzaceb62wdepofqu34afqhbcn4a7jziwblt2ih5hhqqm6zitd3qpzhdp4 + // bafy2bzaceb3utcspm5jqcdqpih3ztbaztv7yunzkiyfq7up7xmokpxemwgu5u + // bafy2bzacedjwekyjresrwjqj4n2r5bnuuu3klncgjo2r3slsp6wgqb37sz4ck + // File: testdata/sample-wrapped-v2.car + // Version: 2 + // Roots: [bafy2bzaced4ueelaegfs5fqu4tzsh6ywbbpfk3cxppupmxfdhbpbhzawfw5oy] + // First 5 block CIDs: + // bafy2bzaced4ueelaegfs5fqu4tzsh6ywbbpfk3cxppupmxfdhbpbhzawfw5oy + // bafy2bzaceaycv7jhaegckatnncu5yugzkrnzeqsppzegufr35lroxxnsnpspu + // bafy2bzaceb62wdepofqu34afqhbcn4a7jziwblt2ih5hhqqm6zitd3qpzhdp4 + // bafy2bzaceb3utcspm5jqcdqpih3ztbaztv7yunzkiyfq7up7xmokpxemwgu5u + // bafy2bzacedjwekyjresrwjqj4n2r5bnuuu3klncgjo2r3slsp6wgqb37sz4ck +} diff --git a/ipld/car/v2/fuzz_test.go b/ipld/car/v2/fuzz_test.go new file mode 100644 index 0000000000..30ec4ff43a --- /dev/null +++ b/ipld/car/v2/fuzz_test.go @@ -0,0 +1,141 @@ +//go:build go1.18 + +package car_test + +import ( + "bytes" + "encoding/hex" + "io" + "os" + "path/filepath" + "testing" + + car "github.com/ipfs/boxo/ipld/car/v2" + "github.com/ipfs/boxo/ipld/car/v2/internal/carv1" +) + +// v1FixtureStr is a clean carv1 single-block, single-root CAR +const v1FixtureStr = "3aa265726f6f747381d82a58250001711220151fe9e73c6267a7060c6f6c4cca943c236f4b196723489608edb42a8b8fa80b6776657273696f6e012c01711220151fe9e73c6267a7060c6f6c4cca943c236f4b196723489608edb42a8b8fa80ba165646f646779f5" + +func seedWithCarFiles(f *testing.F) { + fixture, err := hex.DecodeString(v1FixtureStr) + if err != nil { + f.Fatal(err) + } + f.Add(fixture) + files, err := filepath.Glob("testdata/*.car") + if err != nil { + f.Fatal(err) + } + for _, fname := range files { + func() { + file, err := os.Open(fname) + if err != nil { + f.Fatal(err) + } + defer file.Close() + data, err := io.ReadAll(file) + if err != nil { + f.Fatal(err) + } + f.Add(data) + }() + } +} + +func FuzzBlockReader(f *testing.F) { + seedWithCarFiles(f) + + f.Fuzz(func(t *testing.T, data []byte) { + r, err := car.NewBlockReader(bytes.NewReader(data)) + if err != nil { + return + } + + for { + _, err = r.Next() + if err == io.EOF { + return + } + } + }) +} + +func FuzzReader(f *testing.F) { + seedWithCarFiles(f) + + f.Fuzz(func(t *testing.T, data []byte) { + subject, err := car.NewReader(bytes.NewReader(data)) + if err != nil { + return + } + + subject.Roots() + _, err = subject.IndexReader() + if err != nil { + return + } + dr, err := subject.DataReader() + if err != nil { + return + } + car.GenerateIndex(dr) + }) +} + +func FuzzInspect(f *testing.F) { + seedWithCarFiles(f) + + f.Fuzz(func(t *testing.T, data []byte) { + reader, err := car.NewReader(bytes.NewReader(data)) + if err != nil { + return + } + + // Do differential fuzzing between Inspect and the normal parser + _, inspectErr := reader.Inspect(true) + if inspectErr == nil { + return + } + + reader, err = car.NewReader(bytes.NewReader(data)) + if err != nil { + t.Fatal("second NewReader on same data failed", err.Error()) + } + i, err := reader.IndexReader() + if err != nil { + return + } + // FIXME: Once indexes are safe to parse, do not skip .car with index in the differential fuzzing. + if i == nil { + return + } + dr, err := reader.DataReader() + if err != nil { + return + } + + _, err = carv1.ReadHeader(dr, carv1.DefaultMaxAllowedHeaderSize) + if err != nil { + return + } + + blocks, err := car.NewBlockReader(dr) + if err != nil { + return + } + + for { + _, err := blocks.Next() + if err != nil { + if err == io.EOF { + break + } + // caught error as expected + return + } + } + + t.Fatal("Inspect found error but we red this file correctly:", inspectErr.Error()) + }) +} diff --git a/ipld/car/v2/index/doc.go b/ipld/car/v2/index/doc.go new file mode 100644 index 0000000000..b8062cc186 --- /dev/null +++ b/ipld/car/v2/index/doc.go @@ -0,0 +1,6 @@ +// package index provides indexing functionality for CARv1 data payload represented as a mapping of +// CID to offset. This can then be used to implement random access over a CARv1. +// +// Index can be written or read using the following static functions: index.WriteTo and +// index.ReadFrom. +package index diff --git a/ipld/car/v2/index/errors.go b/ipld/car/v2/index/errors.go new file mode 100644 index 0000000000..1a10a98467 --- /dev/null +++ b/ipld/car/v2/index/errors.go @@ -0,0 +1,6 @@ +package index + +import "errors" + +// ErrNotFound signals a record is not found in the index. +var ErrNotFound = errors.New("not found") diff --git a/ipld/car/v2/index/example_test.go b/ipld/car/v2/index/example_test.go new file mode 100644 index 0000000000..bef2e2e01a --- /dev/null +++ b/ipld/car/v2/index/example_test.go @@ -0,0 +1,113 @@ +package index_test + +import ( + "fmt" + "io" + "os" + "reflect" + + carv2 "github.com/ipfs/boxo/ipld/car/v2" + "github.com/ipfs/boxo/ipld/car/v2/index" +) + +// ExampleReadFrom unmarshalls an index from an indexed CARv2 file, and for each root CID prints the +// offset at which its corresponding block starts relative to the wrapped CARv1 data payload. +func ExampleReadFrom() { + // Open the CARv2 file + cr, err := carv2.OpenReader("../testdata/sample-wrapped-v2.car") + if err != nil { + panic(err) + } + defer cr.Close() + + // Get root CIDs in the CARv1 file. + roots, err := cr.Roots() + if err != nil { + panic(err) + } + + // Read and unmarshall index within CARv2 file. + ir, err := cr.IndexReader() + if err != nil { + panic(err) + } + idx, err := index.ReadFrom(ir) + if err != nil { + panic(err) + } + + // For each root CID print the offset relative to CARv1 data payload. + for _, r := range roots { + offset, err := index.GetFirst(idx, r) + if err != nil { + panic(err) + } + fmt.Printf("Frame with CID %v starts at offset %v relative to CARv1 data payload.\n", r, offset) + } + + // Output: + // Frame with CID bafy2bzaced4ueelaegfs5fqu4tzsh6ywbbpfk3cxppupmxfdhbpbhzawfw5oy starts at offset 61 relative to CARv1 data payload. +} + +// ExampleWriteTo unmarshalls an index from an indexed CARv2 file, and stores it as a separate +// file on disk. +func ExampleWriteTo() { + // Open the CARv2 file + src := "../testdata/sample-wrapped-v2.car" + cr, err := carv2.OpenReader(src) + if err != nil { + panic(err) + } + defer func() { + if err := cr.Close(); err != nil { + panic(err) + } + }() + + // Read and unmarshall index within CARv2 file. + ir, err := cr.IndexReader() + if err != nil { + panic(err) + } + idx, err := index.ReadFrom(ir) + if err != nil { + panic(err) + } + + // Store the index alone onto destination file. + f, err := os.CreateTemp(os.TempDir(), "example-index-*.carindex") + if err != nil { + panic(err) + } + defer func() { + if err := f.Close(); err != nil { + panic(err) + } + }() + _, err = index.WriteTo(idx, f) + if err != nil { + panic(err) + } + + // Seek to the beginning of tile to read it back. + _, err = f.Seek(0, io.SeekStart) + if err != nil { + panic(err) + } + + // Read and unmarshall the destination file as a separate index instance. + reReadIdx, err := index.ReadFrom(f) + if err != nil { + panic(err) + } + + // Expect indices to be equal. + if reflect.DeepEqual(idx, reReadIdx) { + fmt.Printf("Saved index file matches the index embedded in CARv2 at %v.\n", src) + } else { + panic("expected to get the same index as the CARv2 file") + } + + // Output: + // Saved index file matches the index embedded in CARv2 at ../testdata/sample-wrapped-v2.car. +} diff --git a/ipld/car/v2/index/index.go b/ipld/car/v2/index/index.go new file mode 100644 index 0000000000..9a5e0d0841 --- /dev/null +++ b/ipld/car/v2/index/index.go @@ -0,0 +1,160 @@ +package index + +import ( + "encoding/binary" + "fmt" + "io" + + internalio "github.com/ipfs/boxo/ipld/car/v2/internal/io" + "github.com/ipfs/go-cid" + + "github.com/multiformats/go-multicodec" + "github.com/multiformats/go-multihash" + "github.com/multiformats/go-varint" +) + +// CarIndexNone is a sentinel value used as a multicodec code for the index indicating no index. +const CarIndexNone = 0x300000 + +type ( + // Record is a pre-processed record of a car item and location. + Record struct { + cid.Cid + Offset uint64 + } + + // Index provides an interface for looking up byte offset of a given CID. + // + // Note that each indexing mechanism is free to match CIDs however it + // sees fit. For example, multicodec.CarIndexSorted only indexes + // multihash digests, meaning that Get and GetAll will find matching + // blocks even if the CID's encoding multicodec differs. Other index + // implementations might index the entire CID, the entire multihash, or + // just part of a multihash's digest. + // + // See: multicodec.CarIndexSorted, multicodec.CarMultihashIndexSorted + Index interface { + // Codec provides the multicodec code that the index implements. + // + // Note that this may return a reserved code if the index + // implementation is not defined in a spec. + Codec() multicodec.Code + + // Marshal encodes the index in serial form. + Marshal(w io.Writer) (uint64, error) + + // Unmarshal decodes the index from its serial form. + // Note, this function will copy the entire index into memory. + // + // Do not unmarshal index from untrusted CARv2 files. Instead, the index should be + // regenerated from the CARv2 data payload. + Unmarshal(r io.Reader) error + + // Load inserts a number of records into the index. + // Note that Index will load all given records. Any filtering of the records such as + // exclusion of CIDs with multihash.IDENTITY code must occur prior to calling this function. + // + // Further, the actual information extracted and indexed from the given records entirely + // depends on the concrete index implementation. + // For example, some index implementations may only store partial multihashes. + Load([]Record) error + + // GetAll looks up all blocks matching a given CID, + // calling a function for each one of their offsets. + // + // GetAll stops if the given function returns false, + // or there are no more offsets; whichever happens first. + // + // If no error occurred and the CID isn't indexed, + // meaning that no callbacks happen, + // ErrNotFound is returned. + GetAll(cid.Cid, func(uint64) bool) error + } + + // IterableIndex is an index which support iterating over it's elements + IterableIndex interface { + Index + + // ForEach takes a callback function that will be called + // on each entry in the index. The arguments to the callback are + // the multihash of the element, and the offset in the car file + // where the element appears. + // + // If the callback returns a non-nil error, the iteration is aborted, + // and the ForEach function returns the error to the user. + // + // An index may contain multiple offsets corresponding to the same multihash, e.g. via duplicate blocks. + // In such cases, the given function may be called multiple times with the same multihash but different offset. + // + // The order of calls to the given function is deterministic, but entirely index-specific. + ForEach(func(multihash.Multihash, uint64) error) error + } +) + +// GetFirst is a wrapper over Index.GetAll, returning the offset for the first +// matching indexed CID. +func GetFirst(idx Index, key cid.Cid) (uint64, error) { + var firstOffset uint64 + err := idx.GetAll(key, func(offset uint64) bool { + firstOffset = offset + return false + }) + return firstOffset, err +} + +// New constructs a new index corresponding to the given CAR index codec. +func New(codec multicodec.Code) (Index, error) { + switch codec { + case multicodec.CarIndexSorted: + return newSorted(), nil + case multicodec.CarMultihashIndexSorted: + return NewMultihashSorted(), nil + default: + return nil, fmt.Errorf("unknwon index codec: %v", codec) + } +} + +// WriteTo writes the given idx into w. +// The written bytes include the index encoding. +// This can then be read back using index.ReadFrom +func WriteTo(idx Index, w io.Writer) (uint64, error) { + buf := make([]byte, binary.MaxVarintLen64) + b := varint.PutUvarint(buf, uint64(idx.Codec())) + n, err := w.Write(buf[:b]) + if err != nil { + return uint64(n), err + } + + l, err := idx.Marshal(w) + return uint64(n) + l, err +} + +// ReadFrom reads index from r. +// The reader decodes the index by reading the first byte to interpret the encoding. +// Returns error if the encoding is not known. +// +// Attempting to read index data from untrusted sources is not recommended. +// Instead, the index should be regenerated from the CARv2 data payload. +func ReadFrom(r io.Reader) (Index, error) { + codec, err := ReadCodec(r) + if err != nil { + return nil, err + } + idx, err := New(codec) + if err != nil { + return nil, err + } + if err := idx.Unmarshal(r); err != nil { + return nil, err + } + return idx, nil +} + +// ReadCodec reads the codec of the index by decoding the first varint read from r. +func ReadCodec(r io.Reader) (multicodec.Code, error) { + code, err := varint.ReadUvarint(internalio.ToByteReader(r)) + if err != nil { + return 0, err + } + return multicodec.Code(code), nil +} diff --git a/ipld/car/v2/index/index_test.go b/ipld/car/v2/index/index_test.go new file mode 100644 index 0000000000..6dc2a5efde --- /dev/null +++ b/ipld/car/v2/index/index_test.go @@ -0,0 +1,168 @@ +package index + +import ( + "bytes" + "io" + "os" + "path/filepath" + "testing" + + blocks "github.com/ipfs/boxo/blocks" + "github.com/ipfs/boxo/ipld/car/v2/internal/carv1" + "github.com/ipfs/boxo/ipld/car/v2/internal/carv1/util" + "github.com/multiformats/go-multicodec" + "github.com/multiformats/go-varint" + "github.com/stretchr/testify/require" +) + +func TestNew(t *testing.T) { + tests := []struct { + name string + codec multicodec.Code + want Index + wantErr bool + }{ + { + name: "CarSortedIndexCodecIsConstructed", + codec: multicodec.CarIndexSorted, + want: newSorted(), + }, + { + name: "ValidMultiCodecButUnknwonToIndexIsError", + codec: multicodec.Cidv1, + wantErr: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := New(tt.codec) + if tt.wantErr { + require.Error(t, err) + } else { + require.NoError(t, err) + require.Equal(t, tt.want, got) + } + }) + } +} + +func TestReadFrom(t *testing.T) { + idxf, err := os.Open("../testdata/sample-index.carindex") + require.NoError(t, err) + t.Cleanup(func() { require.NoError(t, idxf.Close()) }) + + subject, err := ReadFrom(idxf) + require.NoError(t, err) + + _, err = idxf.Seek(0, io.SeekStart) + require.NoError(t, err) + + idxf2, err := os.Open("../testdata/sample-multihash-index-sorted.carindex") + require.NoError(t, err) + t.Cleanup(func() { require.NoError(t, idxf2.Close()) }) + + subjectInAltFormat, err := ReadFrom(idxf) + require.NoError(t, err) + + crf, err := os.Open("../testdata/sample-v1.car") + require.NoError(t, err) + t.Cleanup(func() { require.NoError(t, crf.Close()) }) + cr, err := carv1.NewCarReader(crf) + require.NoError(t, err) + + for { + wantBlock, err := cr.Next() + if err == io.EOF { + break + } + require.NoError(t, err) + + wantCid := wantBlock.Cid() + // Get offset from the index for a CID and assert it exists + gotOffset, err := GetFirst(subject, wantCid) + require.NoError(t, err) + require.NotZero(t, gotOffset) + + // Get offset from the index in alternative format for a CID and assert it exists + gotOffset2, err := GetFirst(subjectInAltFormat, wantCid) + require.NoError(t, err) + require.NotZero(t, gotOffset2) + + // Seek to the offset on CARv1 file + _, err = crf.Seek(int64(gotOffset), io.SeekStart) + require.NoError(t, err) + + // Read the fame at offset and assert the frame corresponds to the expected block. + gotCid, gotData, err := util.ReadNode(crf, false, carv1.DefaultMaxAllowedSectionSize) + require.NoError(t, err) + gotBlock, err := blocks.NewBlockWithCid(gotData, gotCid) + require.NoError(t, err) + require.Equal(t, wantBlock, gotBlock) + } +} + +func TestWriteTo(t *testing.T) { + // Read sample index on file + idxf, err := os.Open("../testdata/sample-multihash-index-sorted.carindex") + require.NoError(t, err) + t.Cleanup(func() { require.NoError(t, idxf.Close()) }) + + // Unmarshall to get expected index + wantIdx, err := ReadFrom(idxf) + require.NoError(t, err) + + // Write the same index out + dest := filepath.Join(t.TempDir(), "index-write-to-test.carindex") + destF, err := os.Create(dest) + require.NoError(t, err) + t.Cleanup(func() { require.NoError(t, destF.Close()) }) + _, err = WriteTo(wantIdx, destF) + require.NoError(t, err) + + // Seek to the beginning of the written out file. + _, err = destF.Seek(0, io.SeekStart) + require.NoError(t, err) + + // Read the written index back + gotIdx, err := ReadFrom(destF) + require.NoError(t, err) + + // Assert they are equal + require.Equal(t, wantIdx, gotIdx) +} + +func TestMarshalledIndexStartsWithCodec(t *testing.T) { + + tests := []struct { + path string + codec multicodec.Code + }{ + { + path: "../testdata/sample-multihash-index-sorted.carindex", + codec: multicodec.CarMultihashIndexSorted, + }, + { + path: "../testdata/sample-index.carindex", + codec: multicodec.CarIndexSorted, + }, + } + for _, test := range tests { + test := test + t.Run(test.codec.String(), func(t *testing.T) { + // Read sample index on file + idxf, err := os.Open(test.path) + require.NoError(t, err) + t.Cleanup(func() { require.NoError(t, idxf.Close()) }) + + // Unmarshall to get expected index + wantIdx, err := ReadFrom(idxf) + require.NoError(t, err) + + // Assert the first two bytes are the corresponding multicodec code. + buf := new(bytes.Buffer) + _, err = WriteTo(wantIdx, buf) + require.NoError(t, err) + require.Equal(t, varint.ToUvarint(uint64(test.codec)), buf.Bytes()[:2]) + }) + } +} diff --git a/ipld/car/v2/index/indexsorted.go b/ipld/car/v2/index/indexsorted.go new file mode 100644 index 0000000000..93b31e433c --- /dev/null +++ b/ipld/car/v2/index/indexsorted.go @@ -0,0 +1,319 @@ +package index + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "io" + "sort" + + internalio "github.com/ipfs/boxo/ipld/car/v2/internal/io" + + "github.com/multiformats/go-multicodec" + + "github.com/ipfs/go-cid" + "github.com/multiformats/go-multihash" +) + +var _ Index = (*multiWidthIndex)(nil) + +type ( + digestRecord struct { + digest []byte + index uint64 + } + recordSet []digestRecord + singleWidthIndex struct { + width uint32 + len uint64 // in struct, len is #items. when marshaled, it's saved as #bytes. + index []byte + } + multiWidthIndex map[uint32]singleWidthIndex +) + +func (d digestRecord) write(buf []byte) { + n := copy(buf[:], d.digest) + binary.LittleEndian.PutUint64(buf[n:], d.index) +} + +func (r recordSet) Len() int { + return len(r) +} + +func (r recordSet) Less(i, j int) bool { + return bytes.Compare(r[i].digest, r[j].digest) < 0 +} + +func (r recordSet) Swap(i, j int) { + r[i], r[j] = r[j], r[i] +} + +func (s *singleWidthIndex) Marshal(w io.Writer) (uint64, error) { + l := uint64(0) + if err := binary.Write(w, binary.LittleEndian, s.width); err != nil { + return 0, err + } + l += 4 + if err := binary.Write(w, binary.LittleEndian, int64(len(s.index))); err != nil { + return l, err + } + l += 8 + n, err := w.Write(s.index) + return l + uint64(n), err +} + +func (s *singleWidthIndex) Unmarshal(r io.Reader) error { + var width uint32 + if err := binary.Read(r, binary.LittleEndian, &width); err != nil { + if err == io.EOF { + return io.ErrUnexpectedEOF + } + return err + } + var dataLen uint64 + if err := binary.Read(r, binary.LittleEndian, &dataLen); err != nil { + if err == io.EOF { + return io.ErrUnexpectedEOF + } + return err + } + + if err := s.checkUnmarshalLengths(width, dataLen, 0); err != nil { + return err + } + + buf := make([]byte, dataLen) + if _, err := io.ReadFull(r, buf); err != nil { + return err + } + s.index = buf + return nil +} + +func (s *singleWidthIndex) checkUnmarshalLengths(width uint32, dataLen, extra uint64) error { + if width < 8 { + return errors.New("malformed index; width must be at least 8") + } + const maxWidth = 32 << 20 // 32MiB, to ~match the go-cid maximum + if width > maxWidth { + return errors.New("index too big; singleWidthIndex width is larger than allowed maximum") + } + oldDataLen, dataLen := dataLen, dataLen+extra + if oldDataLen > dataLen { + return errors.New("index too big; singleWidthIndex len is overflowing") + } + if int64(dataLen) < 0 { + return errors.New("index too big; singleWidthIndex len is overflowing int64") + } + s.width = width + s.len = dataLen / uint64(width) + return nil +} + +func (s *singleWidthIndex) Less(i int, digest []byte) bool { + return bytes.Compare(digest[:], s.index[i*int(s.width):((i+1)*int(s.width)-8)]) <= 0 +} + +func (s *singleWidthIndex) GetAll(c cid.Cid, fn func(uint64) bool) error { + d, err := multihash.Decode(c.Hash()) + if err != nil { + return err + } + return s.getAll(d.Digest, fn) +} + +func (s *singleWidthIndex) getAll(d []byte, fn func(uint64) bool) error { + idx := sort.Search(int(s.len), func(i int) bool { + return s.Less(i, d) + }) + + var any bool + for ; uint64(idx) < s.len; idx++ { + digestStart := idx * int(s.width) + offsetEnd := (idx + 1) * int(s.width) + digestEnd := offsetEnd - 8 + if bytes.Equal(d[:], s.index[digestStart:digestEnd]) { + any = true + offset := binary.LittleEndian.Uint64(s.index[digestEnd:offsetEnd]) + if !fn(offset) { + // User signalled to stop searching; therefore, break. + break + } + } else { + // No more matches; therefore, break. + break + } + } + if !any { + return ErrNotFound + } + return nil +} + +func (s *singleWidthIndex) Load(items []Record) error { + m := make(multiWidthIndex) + if err := m.Load(items); err != nil { + return err + } + if len(m) != 1 { + return fmt.Errorf("unexpected number of cid widths: %d", len(m)) + } + for _, i := range m { + s.index = i.index + s.len = i.len + s.width = i.width + return nil + } + return nil +} + +func (s *singleWidthIndex) forEachDigest(f func(digest []byte, offset uint64) error) error { + segmentCount := len(s.index) / int(s.width) + for i := 0; i < segmentCount; i++ { + digestStart := i * int(s.width) + offsetEnd := (i + 1) * int(s.width) + digestEnd := offsetEnd - 8 + digest := s.index[digestStart:digestEnd] + offset := binary.LittleEndian.Uint64(s.index[digestEnd:offsetEnd]) + if err := f(digest, offset); err != nil { + return err + } + } + return nil +} + +func (m *multiWidthIndex) GetAll(c cid.Cid, fn func(uint64) bool) error { + d, err := multihash.Decode(c.Hash()) + if err != nil { + return err + } + if s, ok := (*m)[uint32(len(d.Digest)+8)]; ok { + return s.getAll(d.Digest, fn) + } + return ErrNotFound +} + +func (m *multiWidthIndex) Codec() multicodec.Code { + return multicodec.CarIndexSorted +} + +func (m *multiWidthIndex) Marshal(w io.Writer) (uint64, error) { + l := uint64(0) + if err := binary.Write(w, binary.LittleEndian, int32(len(*m))); err != nil { + return l, err + } + l += 4 + + // The widths are unique, but ranging over a map isn't deterministic. + // As per the CARv2 spec, we must order buckets by digest length. + + widths := make([]uint32, 0, len(*m)) + for width := range *m { + widths = append(widths, width) + } + sort.Slice(widths, func(i, j int) bool { + return widths[i] < widths[j] + }) + + for _, width := range widths { + bucket := (*m)[width] + n, err := bucket.Marshal(w) + l += n + if err != nil { + return l, err + } + } + return l, nil +} + +func (m *multiWidthIndex) Unmarshal(r io.Reader) error { + reader := internalio.ToByteReadSeeker(r) + var l int32 + if err := binary.Read(reader, binary.LittleEndian, &l); err != nil { + if err == io.EOF { + return io.ErrUnexpectedEOF + } + return err + } + sum, err := reader.Seek(0, io.SeekCurrent) + if err != nil { + return err + } + if int32(l) < 0 { + return errors.New("index too big; multiWidthIndex count is overflowing int32") + } + for i := 0; i < int(l); i++ { + s := singleWidthIndex{} + if err := s.Unmarshal(r); err != nil { + return err + } + n, err := reader.Seek(0, io.SeekCurrent) + if err != nil { + return err + } + oldSum := sum + sum += n + if sum < oldSum { + return errors.New("index too big; multiWidthIndex len is overflowing int64") + } + (*m)[s.width] = s + } + return nil +} + +func (m *multiWidthIndex) Load(items []Record) error { + // Split cids on their digest length + idxs := make(map[int][]digestRecord) + for _, item := range items { + decHash, err := multihash.Decode(item.Hash()) + if err != nil { + return err + } + + digest := decHash.Digest + idx, ok := idxs[len(digest)] + if !ok { + idxs[len(digest)] = make([]digestRecord, 0) + idx = idxs[len(digest)] + } + idxs[len(digest)] = append(idx, digestRecord{digest, item.Offset}) + } + + // Sort each list. then write to compact form. + for width, lst := range idxs { + sort.Sort(recordSet(lst)) + rcrdWdth := width + 8 + compact := make([]byte, rcrdWdth*len(lst)) + for off, itm := range lst { + itm.write(compact[off*rcrdWdth : (off+1)*rcrdWdth]) + } + s := singleWidthIndex{ + width: uint32(rcrdWdth), + len: uint64(len(lst)), + index: compact, + } + (*m)[uint32(width)+8] = s + } + return nil +} + +func (m *multiWidthIndex) forEachDigest(f func(digest []byte, offset uint64) error) error { + sizes := make([]uint32, 0, len(*m)) + for k := range *m { + sizes = append(sizes, k) + } + sort.Slice(sizes, func(i, j int) bool { return sizes[i] < sizes[j] }) + for _, s := range sizes { + swi := (*m)[s] + if err := swi.forEachDigest(f); err != nil { + return err + } + } + return nil +} + +func newSorted() Index { + m := make(multiWidthIndex) + return &m +} diff --git a/ipld/car/v2/index/indexsorted_test.go b/ipld/car/v2/index/indexsorted_test.go new file mode 100644 index 0000000000..a0da42354a --- /dev/null +++ b/ipld/car/v2/index/indexsorted_test.go @@ -0,0 +1,64 @@ +package index + +import ( + "encoding/binary" + "testing" + + "github.com/ipfs/boxo/ipld/merkledag" + "github.com/multiformats/go-multicodec" + "github.com/stretchr/testify/require" +) + +func TestSortedIndexCodec(t *testing.T) { + require.Equal(t, multicodec.CarIndexSorted, newSorted().Codec()) +} + +func TestIndexSorted_GetReturnsNotFoundWhenCidDoesNotExist(t *testing.T) { + nonExistingKey := merkledag.NewRawNode([]byte("lobstermuncher")).Block.Cid() + tests := []struct { + name string + subject Index + }{ + { + "Sorted", + newSorted(), + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + gotOffset, err := GetFirst(tt.subject, nonExistingKey) + require.Equal(t, ErrNotFound, err) + require.Equal(t, uint64(0), gotOffset) + }) + } +} + +func TestSingleWidthIndex_GetAll(t *testing.T) { + l := 4 + width := 9 + buf := make([]byte, width*l) + + // Populate the index bytes as total of four records. + // The last record should not match the getAll. + for i := 0; i < l; i++ { + if i < l-1 { + buf[i*width] = 1 + } else { + buf[i*width] = 2 + } + binary.LittleEndian.PutUint64(buf[(i*width)+1:(i*width)+width], uint64(14)) + } + subject := &singleWidthIndex{ + width: 9, + len: uint64(l), + index: buf, + } + + var foundCount int + err := subject.getAll([]byte{1}, func(u uint64) bool { + foundCount++ + return true + }) + require.NoError(t, err) + require.Equal(t, 3, foundCount) +} diff --git a/ipld/car/v2/index/mhindexsorted.go b/ipld/car/v2/index/mhindexsorted.go new file mode 100644 index 0000000000..75ea21ecd5 --- /dev/null +++ b/ipld/car/v2/index/mhindexsorted.go @@ -0,0 +1,214 @@ +package index + +import ( + "encoding/binary" + "errors" + "io" + "sort" + + internalio "github.com/ipfs/boxo/ipld/car/v2/internal/io" + + "github.com/ipfs/go-cid" + "github.com/multiformats/go-multicodec" + "github.com/multiformats/go-multihash" +) + +var ( + _ Index = (*MultihashIndexSorted)(nil) + _ IterableIndex = (*MultihashIndexSorted)(nil) +) + +type ( + // MultihashIndexSorted maps multihash code (i.e. hashing algorithm) to multiWidthCodedIndex. + MultihashIndexSorted map[uint64]*multiWidthCodedIndex + // multiWidthCodedIndex stores multihash code for each multiWidthIndex. + multiWidthCodedIndex struct { + multiWidthIndex + code uint64 + } +) + +func newMultiWidthCodedIndex() *multiWidthCodedIndex { + return &multiWidthCodedIndex{ + multiWidthIndex: make(multiWidthIndex), + } +} + +func (m *multiWidthCodedIndex) Marshal(w io.Writer) (uint64, error) { + if err := binary.Write(w, binary.LittleEndian, m.code); err != nil { + return 8, err + } + n, err := m.multiWidthIndex.Marshal(w) + return 8 + n, err +} + +func (m *multiWidthCodedIndex) Unmarshal(r io.Reader) error { + if err := binary.Read(r, binary.LittleEndian, &m.code); err != nil { + if err == io.EOF { + return io.ErrUnexpectedEOF + } + return err + } + return m.multiWidthIndex.Unmarshal(r) +} + +func (m *multiWidthCodedIndex) forEach(f func(mh multihash.Multihash, offset uint64) error) error { + return m.multiWidthIndex.forEachDigest(func(digest []byte, offset uint64) error { + mh, err := multihash.Encode(digest, m.code) + if err != nil { + return err + } + return f(mh, offset) + }) +} + +func (m *MultihashIndexSorted) Codec() multicodec.Code { + return multicodec.CarMultihashIndexSorted +} + +func (m *MultihashIndexSorted) Marshal(w io.Writer) (uint64, error) { + if err := binary.Write(w, binary.LittleEndian, int32(len(*m))); err != nil { + return 4, err + } + // The codes are unique, but ranging over a map isn't deterministic. + // As per the CARv2 spec, we must order buckets by digest length. + // TODO update CARv2 spec to reflect this for the new index type. + codes := m.sortedMultihashCodes() + l := uint64(4) + + for _, code := range codes { + mwci := (*m)[code] + n, err := mwci.Marshal(w) + l += n + if err != nil { + return l, err + } + } + return l, nil +} + +func (m *MultihashIndexSorted) sortedMultihashCodes() []uint64 { + codes := make([]uint64, 0, len(*m)) + for code := range *m { + codes = append(codes, code) + } + sort.Slice(codes, func(i, j int) bool { + return codes[i] < codes[j] + }) + return codes +} + +func (m *MultihashIndexSorted) Unmarshal(r io.Reader) error { + reader := internalio.ToByteReadSeeker(r) + var l int32 + if err := binary.Read(reader, binary.LittleEndian, &l); err != nil { + if err == io.EOF { + return io.ErrUnexpectedEOF + } + return err + } + sum, err := reader.Seek(0, io.SeekCurrent) + if err != nil { + return err + } + if int32(l) < 0 { + return errors.New("index too big; MultihashIndexSorted count is overflowing int32") + } + for i := 0; i < int(l); i++ { + mwci := newMultiWidthCodedIndex() + if err := mwci.Unmarshal(r); err != nil { + return err + } + n, err := reader.Seek(0, io.SeekCurrent) + if err != nil { + return err + } + oldSum := sum + sum += n + if sum < oldSum { + return errors.New("index too big; MultihashIndexSorted len is overflowing int64") + } + m.put(mwci) + } + return nil +} + +func (m *MultihashIndexSorted) put(mwci *multiWidthCodedIndex) { + (*m)[mwci.code] = mwci +} + +func (m *MultihashIndexSorted) Load(records []Record) error { + // TODO optimize load by avoiding multihash decoding twice. + // This implementation decodes multihashes twice: once here to group by code, and once in the + // internals of multiWidthIndex to group by digest length. The code can be optimized by + // combining the grouping logic into one step where the multihash of a CID is only decoded once. + // The optimization would need refactoring of the IndexSorted compaction logic. + + // Group records by multihash code + byCode := make(map[uint64][]Record) + for _, record := range records { + dmh, err := multihash.Decode(record.Hash()) + if err != nil { + return err + } + code := dmh.Code + recsByCode, ok := byCode[code] + if !ok { + recsByCode = make([]Record, 0) + byCode[code] = recsByCode + } + byCode[code] = append(recsByCode, record) + } + + // Load each record group. + for code, recsByCode := range byCode { + mwci := newMultiWidthCodedIndex() + mwci.code = code + if err := mwci.Load(recsByCode); err != nil { + return err + } + m.put(mwci) + } + return nil +} + +func (m *MultihashIndexSorted) GetAll(cid cid.Cid, f func(uint64) bool) error { + hash := cid.Hash() + dmh, err := multihash.Decode(hash) + if err != nil { + return err + } + mwci, err := m.get(dmh) + if err != nil { + return err + } + return mwci.GetAll(cid, f) +} + +// ForEach calls f for every multihash and its associated offset stored by this index. +func (m *MultihashIndexSorted) ForEach(f func(mh multihash.Multihash, offset uint64) error) error { + sizes := make([]uint64, 0, len(*m)) + for k := range *m { + sizes = append(sizes, k) + } + sort.Slice(sizes, func(i, j int) bool { return sizes[i] < sizes[j] }) + for _, s := range sizes { + mwci := (*m)[s] + if err := mwci.forEach(f); err != nil { + return err + } + } + return nil +} + +func (m *MultihashIndexSorted) get(dmh *multihash.DecodedMultihash) (*multiWidthCodedIndex, error) { + if codedIdx, ok := (*m)[dmh.Code]; ok { + return codedIdx, nil + } + return nil, ErrNotFound +} + +func NewMultihashSorted() *MultihashIndexSorted { + index := make(MultihashIndexSorted) + return &index +} diff --git a/ipld/car/v2/index/mhindexsorted_test.go b/ipld/car/v2/index/mhindexsorted_test.go new file mode 100644 index 0000000000..1b0f2ab863 --- /dev/null +++ b/ipld/car/v2/index/mhindexsorted_test.go @@ -0,0 +1,113 @@ +package index_test + +import ( + "bytes" + "fmt" + "math/rand" + "testing" + + "github.com/multiformats/go-multicodec" + + "github.com/ipfs/boxo/ipld/car/v2/index" + "github.com/ipfs/go-cid" + "github.com/multiformats/go-multihash" + "github.com/stretchr/testify/require" +) + +func TestMutilhashSortedIndex_Codec(t *testing.T) { + subject, err := index.New(multicodec.CarMultihashIndexSorted) + require.NoError(t, err) + require.Equal(t, multicodec.CarMultihashIndexSorted, subject.Codec()) +} + +func TestMultiWidthCodedIndex_MarshalUnmarshal(t *testing.T) { + rng := rand.New(rand.NewSource(1413)) + records := generateIndexRecords(t, multihash.SHA2_256, rng) + + // Create a new mh sorted index and load randomly generated records into it. + subject, err := index.New(multicodec.CarMultihashIndexSorted) + require.NoError(t, err) + err = subject.Load(records) + require.NoError(t, err) + + // Marshal the index. + buf := new(bytes.Buffer) + _, err = subject.Marshal(buf) + require.NoError(t, err) + + // Unmarshal it back to another instance of mh sorted index. + umSubject, err := index.New(multicodec.CarMultihashIndexSorted) + require.NoError(t, err) + err = umSubject.Unmarshal(buf) + require.NoError(t, err) + + // Assert original records are present in both index instances with expected offset. + requireContainsAll(t, subject, records) + requireContainsAll(t, umSubject, records) +} + +func TestMultiWidthCodedIndex_StableIterate(t *testing.T) { + rng := rand.New(rand.NewSource(1414)) + records := generateIndexRecords(t, multihash.SHA2_256, rng) + records = append(records, generateIndexRecords(t, multihash.SHA2_512, rng)...) + records = append(records, generateIndexRecords(t, multihash.IDENTITY, rng)...) + + // Create a new mh sorted index and load randomly generated records into it. + idx, err := index.New(multicodec.CarMultihashIndexSorted) + require.NoError(t, err) + err = idx.Load(records) + require.NoError(t, err) + + subject, ok := idx.(index.IterableIndex) + require.True(t, ok) + + mh := make([]multihash.Multihash, 0, len(records)) + require.NoError(t, subject.ForEach(func(m multihash.Multihash, _ uint64) error { + mh = append(mh, m) + return nil + })) + + for i := 0; i < 10; i++ { + candidate := make([]multihash.Multihash, 0, len(records)) + require.NoError(t, subject.ForEach(func(m multihash.Multihash, _ uint64) error { + candidate = append(candidate, m) + return nil + })) + require.Equal(t, mh, candidate) + } +} + +func generateIndexRecords(t *testing.T, hasherCode uint64, rng *rand.Rand) []index.Record { + var records []index.Record + recordCount := rng.Intn(99) + 1 // Up to 100 records + for i := 0; i < recordCount; i++ { + records = append(records, index.Record{ + Cid: generateCidV1(t, hasherCode, rng), + Offset: rng.Uint64(), + }) + } + return records +} + +func generateCidV1(t *testing.T, hasherCode uint64, rng *rand.Rand) cid.Cid { + data := []byte(fmt.Sprintf("🌊d-%d", rng.Uint64())) + mh, err := multihash.Sum(data, hasherCode, -1) + require.NoError(t, err) + return cid.NewCidV1(cid.Raw, mh) +} + +func requireContainsAll(t *testing.T, subject index.Index, nonIdentityRecords []index.Record) { + for _, r := range nonIdentityRecords { + wantCid := r.Cid + wantOffset := r.Offset + + var gotOffsets []uint64 + err := subject.GetAll(wantCid, func(o uint64) bool { + gotOffsets = append(gotOffsets, o) + return false + }) + require.NoError(t, err) + require.Equal(t, 1, len(gotOffsets)) + require.Equal(t, wantOffset, gotOffsets[0]) + } +} diff --git a/ipld/car/v2/index_gen.go b/ipld/car/v2/index_gen.go new file mode 100644 index 0000000000..5f1da03e16 --- /dev/null +++ b/ipld/car/v2/index_gen.go @@ -0,0 +1,231 @@ +package car + +import ( + "fmt" + "io" + "os" + + "github.com/ipfs/boxo/ipld/car/v2/index" + "github.com/ipfs/boxo/ipld/car/v2/internal/carv1" + internalio "github.com/ipfs/boxo/ipld/car/v2/internal/io" + "github.com/ipfs/go-cid" + "github.com/multiformats/go-multihash" + "github.com/multiformats/go-varint" +) + +// GenerateIndex generates index for the given car payload reader. +// The index can be stored in serialized format using index.WriteTo. +// +// Note, the index is re-generated every time even if the payload is in CARv2 format and already has +// an index. To read existing index when available see ReadOrGenerateIndex. +// See: LoadIndex. +func GenerateIndex(v1r io.Reader, opts ...Option) (index.Index, error) { + wopts := ApplyOptions(opts...) + idx, err := index.New(wopts.IndexCodec) + if err != nil { + return nil, err + } + if err := LoadIndex(idx, v1r, opts...); err != nil { + return nil, err + } + return idx, nil +} + +// LoadIndex populates idx with index records generated from r. +// The r may be in CARv1 or CARv2 format. +// +// If the StoreIdentityCIDs option is set when calling LoadIndex, identity +// CIDs will be included in the index. By default this option is off, and +// identity CIDs will not be included in the index. +// +// Note, the index is re-generated every time even if r is in CARv2 format and already has an index. +// To read existing index when available see ReadOrGenerateIndex. +func LoadIndex(idx index.Index, r io.Reader, opts ...Option) error { + // Parse Options. + o := ApplyOptions(opts...) + + reader := internalio.ToByteReadSeeker(r) + pragma, err := carv1.ReadHeader(r, o.MaxAllowedHeaderSize) + if err != nil { + return fmt.Errorf("error reading car header: %w", err) + } + + var dataSize, dataOffset int64 + switch pragma.Version { + case 1: + break + case 2: + // Read V2 header which should appear immediately after pragma according to CARv2 spec. + var v2h Header + _, err := v2h.ReadFrom(r) + if err != nil { + return err + } + + // Sanity-check the CARv2 header + if v2h.DataOffset < HeaderSize { + return fmt.Errorf("malformed CARv2; data offset too small: %d", v2h.DataOffset) + } + if v2h.DataSize < 1 { + return fmt.Errorf("malformed CARv2; data payload size too small: %d", v2h.DataSize) + } + + // Seek to the beginning of the inner CARv1 payload + _, err = reader.Seek(int64(v2h.DataOffset), io.SeekStart) + if err != nil { + return err + } + + // Set dataSize and dataOffset which are then used during index loading logic to decide + // where to stop and adjust section offset respectively. + // Note that we could use a LimitReader here and re-define reader with it. However, it means + // the internalio.ToByteReadSeeker will be less efficient since LimitReader does not + // implement ByteReader nor ReadSeeker. + dataSize = int64(v2h.DataSize) + dataOffset = int64(v2h.DataOffset) + + // Read the inner CARv1 header to skip it and sanity check it. + v1h, err := carv1.ReadHeader(reader, o.MaxAllowedHeaderSize) + if err != nil { + return err + } + if v1h.Version != 1 { + return fmt.Errorf("expected data payload header version of 1; got %d", v1h.Version) + } + default: + return fmt.Errorf("expected either version 1 or 2; got %d", pragma.Version) + } + + // Record the start of each section, with first section starring from current position in the + // reader, i.e. right after the header, since we have only read the header so far. + var sectionOffset int64 + + // The Seek call below is equivalent to getting the reader.offset directly. + // We get it through Seek to only depend on APIs of a typical io.Seeker. + // This would also reduce refactoring in case the utility reader is moved. + if sectionOffset, err = reader.Seek(0, io.SeekCurrent); err != nil { + return err + } + + // Subtract the data offset; if CARv1 this would be zero otherwise the value will come from the + // CARv2 header. + sectionOffset -= dataOffset + + records := make([]index.Record, 0) + for { + // Read the section's length. + sectionLen, err := varint.ReadUvarint(reader) + if err != nil { + if err == io.EOF { + break + } + return err + } + + // Null padding; by default it's an error. + if sectionLen == 0 { + if o.ZeroLengthSectionAsEOF { + break + } else { + return fmt.Errorf("carv1 null padding not allowed by default; see ZeroLengthSectionAsEOF") + } + } + + // Read the CID. + cidLen, c, err := cid.CidFromReader(reader) + if err != nil { + return err + } + + if o.StoreIdentityCIDs || c.Prefix().MhType != multihash.IDENTITY { + if uint64(cidLen) > o.MaxIndexCidSize { + return &ErrCidTooLarge{MaxSize: o.MaxIndexCidSize, CurrentSize: uint64(cidLen)} + } + records = append(records, index.Record{Cid: c, Offset: uint64(sectionOffset)}) + } + + // Seek to the next section by skipping the block. + // The section length includes the CID, so subtract it. + remainingSectionLen := int64(sectionLen) - int64(cidLen) + if sectionOffset, err = reader.Seek(remainingSectionLen, io.SeekCurrent); err != nil { + return err + } + // Subtract the data offset which will be non-zero when reader represents a CARv2. + sectionOffset -= dataOffset + + // Check if we have reached the end of data payload and if so treat it as an EOF. + // Note, dataSize will be non-zero only if we are reading from a CARv2. + if dataSize != 0 && sectionOffset >= dataSize { + break + } + } + + if err := idx.Load(records); err != nil { + return err + } + + return nil +} + +// GenerateIndexFromFile walks a CAR file at the give path and generates an index of cid->byte offset. +// The index can be stored using index.WriteTo. Both CARv1 and CARv2 formats are accepted. +// +// Note, the index is re-generated every time even if the given CAR file is in CARv2 format and +// already has an index. To read existing index when available see ReadOrGenerateIndex. +// +// See: GenerateIndex. +func GenerateIndexFromFile(path string, opts ...Option) (index.Index, error) { + f, err := os.Open(path) + if err != nil { + return nil, err + } + defer f.Close() + return GenerateIndex(f, opts...) +} + +// ReadOrGenerateIndex accepts both CARv1 and CARv2 formats, and reads or generates an index for it. +// When the given reader is in CARv1 format an index is always generated. +// For a payload in CARv2 format, an index is only generated if Header.HasIndex returns false. +// An error is returned for all other formats, i.e. pragma with versions other than 1 or 2. +// +// Note, the returned index lives entirely in memory and will not depend on the +// given reader to fulfill index lookup. +func ReadOrGenerateIndex(rs io.ReadSeeker, opts ...Option) (index.Index, error) { + // Read version. + version, err := ReadVersion(rs, opts...) + if err != nil { + return nil, err + } + // Seek to the beginning, since reading the version changes the reader's offset. + if _, err := rs.Seek(0, io.SeekStart); err != nil { + return nil, err + } + + switch version { + case 1: + // Simply generate the index, since there can't be a pre-existing one. + return GenerateIndex(rs, opts...) + case 2: + // Read CARv2 format + v2r, err := NewReader(internalio.ToReaderAt(rs), opts...) + if err != nil { + return nil, err + } + // If index is present, then no need to generate; decode and return it. + if v2r.Header.HasIndex() { + ir, err := v2r.IndexReader() + if err != nil { + return nil, err + } + return index.ReadFrom(ir) + } + // Otherwise, generate index from CARv1 payload wrapped within CARv2 format. + dr, err := v2r.DataReader() + if err != nil { + return nil, err + } + return GenerateIndex(dr, opts...) + default: + return nil, fmt.Errorf("unknown version %v", version) + } +} diff --git a/ipld/car/v2/index_gen_test.go b/ipld/car/v2/index_gen_test.go new file mode 100644 index 0000000000..48d30b426b --- /dev/null +++ b/ipld/car/v2/index_gen_test.go @@ -0,0 +1,274 @@ +package car_test + +import ( + "io" + "os" + "testing" + + "github.com/stretchr/testify/assert" + + carv2 "github.com/ipfs/boxo/ipld/car/v2" + "github.com/ipfs/boxo/ipld/car/v2/index" + "github.com/ipfs/boxo/ipld/car/v2/internal/carv1" + internalio "github.com/ipfs/boxo/ipld/car/v2/internal/io" + "github.com/ipfs/go-cid" + "github.com/multiformats/go-multicodec" + "github.com/multiformats/go-multihash" + "github.com/multiformats/go-varint" + "github.com/stretchr/testify/require" +) + +func TestGenerateIndex(t *testing.T) { + type testCase struct { + name string + carPath string + opts []carv2.Option + wantIndexer func(t *testing.T) index.Index + wantErr bool + } + tests := []testCase{ + { + name: "CarV1IsIndexedAsExpected", + carPath: "testdata/sample-v1.car", + wantIndexer: func(t *testing.T) index.Index { + v1, err := os.Open("testdata/sample-v1.car") + require.NoError(t, err) + t.Cleanup(func() { assert.NoError(t, v1.Close()) }) + want, err := carv2.GenerateIndex(v1) + require.NoError(t, err) + return want + }, + }, + { + name: "CarV2WithIndexIsReturnedAsExpected", + carPath: "testdata/sample-wrapped-v2.car", + wantIndexer: func(t *testing.T) index.Index { + v2, err := os.Open("testdata/sample-wrapped-v2.car") + require.NoError(t, err) + t.Cleanup(func() { assert.NoError(t, v2.Close()) }) + reader, err := carv2.NewReader(v2) + require.NoError(t, err) + ir, err := reader.IndexReader() + require.NoError(t, err) + want, err := index.ReadFrom(ir) + require.NoError(t, err) + return want + }, + }, + { + name: "CarV1WithZeroLenSectionIsGeneratedAsExpected", + carPath: "testdata/sample-v1-with-zero-len-section.car", + opts: []carv2.Option{carv2.ZeroLengthSectionAsEOF(true)}, + wantIndexer: func(t *testing.T) index.Index { + v1, err := os.Open("testdata/sample-v1-with-zero-len-section.car") + require.NoError(t, err) + t.Cleanup(func() { assert.NoError(t, v1.Close()) }) + want, err := carv2.GenerateIndex(v1, carv2.ZeroLengthSectionAsEOF(true)) + require.NoError(t, err) + return want + }, + }, + { + name: "AnotherCarV1WithZeroLenSectionIsGeneratedAsExpected", + carPath: "testdata/sample-v1-with-zero-len-section2.car", + opts: []carv2.Option{carv2.ZeroLengthSectionAsEOF(true)}, + wantIndexer: func(t *testing.T) index.Index { + v1, err := os.Open("testdata/sample-v1-with-zero-len-section2.car") + require.NoError(t, err) + t.Cleanup(func() { assert.NoError(t, v1.Close()) }) + want, err := carv2.GenerateIndex(v1, carv2.ZeroLengthSectionAsEOF(true)) + require.NoError(t, err) + return want + }, + }, + { + name: "CarV1WithZeroLenSectionWithoutOptionIsError", + carPath: "testdata/sample-v1-with-zero-len-section.car", + wantErr: true, + }, + { + name: "CarOtherThanV1OrV2IsError", + carPath: "testdata/sample-rootless-v42.car", + wantIndexer: func(t *testing.T) index.Index { return nil }, + wantErr: true, + }, + } + + requireWant := func(tt testCase, got index.Index, gotErr error) { + if tt.wantErr { + require.Error(t, gotErr) + } else { + require.NoError(t, gotErr) + var want index.Index + if tt.wantIndexer != nil { + want = tt.wantIndexer(t) + } + if want == nil { + require.Nil(t, got) + } else { + require.Equal(t, want, got) + } + } + } + + for _, tt := range tests { + t.Run("ReadOrGenerateIndex_"+tt.name, func(t *testing.T) { + carFile, err := os.Open(tt.carPath) + require.NoError(t, err) + t.Cleanup(func() { assert.NoError(t, carFile.Close()) }) + got, gotErr := carv2.ReadOrGenerateIndex(carFile, tt.opts...) + requireWant(tt, got, gotErr) + }) + t.Run("GenerateIndexFromFile_"+tt.name, func(t *testing.T) { + got, gotErr := carv2.GenerateIndexFromFile(tt.carPath, tt.opts...) + requireWant(tt, got, gotErr) + }) + t.Run("LoadIndex_"+tt.name, func(t *testing.T) { + carFile, err := os.Open(tt.carPath) + require.NoError(t, err) + got, err := index.New(multicodec.CarMultihashIndexSorted) + require.NoError(t, err) + gotErr := carv2.LoadIndex(got, carFile, tt.opts...) + requireWant(tt, got, gotErr) + }) + t.Run("GenerateIndex_"+tt.name, func(t *testing.T) { + carFile, err := os.Open(tt.carPath) + require.NoError(t, err) + got, gotErr := carv2.GenerateIndex(carFile, tt.opts...) + requireWant(tt, got, gotErr) + }) + } +} + +func TestMultihashIndexSortedConsistencyWithIndexSorted(t *testing.T) { + path := "testdata/sample-v1.car" + + sortedIndex, err := carv2.GenerateIndexFromFile(path) + require.NoError(t, err) + require.Equal(t, multicodec.CarMultihashIndexSorted, sortedIndex.Codec()) + + f, err := os.Open(path) + require.NoError(t, err) + t.Cleanup(func() { require.NoError(t, f.Close()) }) + br, err := carv2.NewBlockReader(f) + require.NoError(t, err) + + subject := generateMultihashSortedIndex(t, path) + for { + wantNext, err := br.Next() + if err == io.EOF { + break + } + require.NoError(t, err) + + dmh, err := multihash.Decode(wantNext.Cid().Hash()) + require.NoError(t, err) + if dmh.Code == multihash.IDENTITY { + continue + } + + wantCid := wantNext.Cid() + var wantOffsets []uint64 + err = sortedIndex.GetAll(wantCid, func(o uint64) bool { + wantOffsets = append(wantOffsets, o) + return false + }) + require.NoError(t, err) + + var gotOffsets []uint64 + err = subject.GetAll(wantCid, func(o uint64) bool { + gotOffsets = append(gotOffsets, o) + return false + }) + + require.NoError(t, err) + require.Equal(t, wantOffsets, gotOffsets) + } +} + +func TestMultihashSorted_ForEachIsConsistentWithGetAll(t *testing.T) { + path := "testdata/sample-v1.car" + f, err := os.Open(path) + require.NoError(t, err) + t.Cleanup(func() { require.NoError(t, f.Close()) }) + + br, err := carv2.NewBlockReader(f) + require.NoError(t, err) + subject := generateMultihashSortedIndex(t, path) + + gotForEach := make(map[string]uint64) + err = subject.ForEach(func(mh multihash.Multihash, offset uint64) error { + gotForEach[mh.String()] = offset + return nil + }) + require.NoError(t, err) + + for { + b, err := br.Next() + if err == io.EOF { + break + } + require.NoError(t, err) + c := b.Cid() + dmh, err := multihash.Decode(c.Hash()) + require.NoError(t, err) + if dmh.Code == multihash.IDENTITY { + continue + } + + wantMh := c.Hash() + + var wantOffset uint64 + err = subject.GetAll(c, func(u uint64) bool { + wantOffset = u + return false + }) + require.NoError(t, err) + + s := wantMh.String() + gotOffset, ok := gotForEach[s] + require.True(t, ok) + require.Equal(t, wantOffset, gotOffset) + } +} + +func generateMultihashSortedIndex(t *testing.T, path string) *index.MultihashIndexSorted { + f, err := os.Open(path) + require.NoError(t, err) + t.Cleanup(func() { require.NoError(t, f.Close()) }) + reader := internalio.ToByteReadSeeker(f) + header, err := carv1.ReadHeader(reader, carv1.DefaultMaxAllowedHeaderSize) + require.NoError(t, err) + require.Equal(t, uint64(1), header.Version) + + idx := index.NewMultihashSorted() + records := make([]index.Record, 0) + + var sectionOffset int64 + sectionOffset, err = reader.Seek(0, io.SeekCurrent) + require.NoError(t, err) + + for { + sectionLen, err := varint.ReadUvarint(reader) + if err == io.EOF { + break + } + require.NoError(t, err) + + if sectionLen == 0 { + break + } + + cidLen, c, err := cid.CidFromReader(reader) + require.NoError(t, err) + records = append(records, index.Record{Cid: c, Offset: uint64(sectionOffset)}) + remainingSectionLen := int64(sectionLen) - int64(cidLen) + sectionOffset, err = reader.Seek(remainingSectionLen, io.SeekCurrent) + require.NoError(t, err) + } + + err = idx.Load(records) + require.NoError(t, err) + + return idx +} diff --git a/ipld/car/v2/internal/carv1/car.go b/ipld/car/v2/internal/carv1/car.go new file mode 100644 index 0000000000..469cb12747 --- /dev/null +++ b/ipld/car/v2/internal/carv1/car.go @@ -0,0 +1,284 @@ +package carv1 + +import ( + "context" + "fmt" + "io" + + "github.com/ipfs/boxo/ipld/car/v2/internal/carv1/util" + + blocks "github.com/ipfs/boxo/blocks" + internalio "github.com/ipfs/boxo/ipld/car/v2/internal/io" + "github.com/ipfs/boxo/ipld/merkledag" + cid "github.com/ipfs/go-cid" + cbor "github.com/ipfs/go-ipld-cbor" + format "github.com/ipfs/go-ipld-format" +) + +const DefaultMaxAllowedHeaderSize uint64 = 32 << 20 // 32MiB +const DefaultMaxAllowedSectionSize uint64 = 8 << 20 // 8MiB + +func init() { + cbor.RegisterCborType(CarHeader{}) +} + +type Store interface { + Put(context.Context, blocks.Block) error +} + +type ReadStore interface { + Get(context.Context, cid.Cid) (blocks.Block, error) +} + +type CarHeader struct { + Roots []cid.Cid + Version uint64 +} + +type carWriter struct { + ds format.NodeGetter + w io.Writer +} + +func WriteCar(ctx context.Context, ds format.NodeGetter, roots []cid.Cid, w io.Writer) error { + h := &CarHeader{ + Roots: roots, + Version: 1, + } + + if err := WriteHeader(h, w); err != nil { + return fmt.Errorf("failed to write car header: %s", err) + } + + cw := &carWriter{ds: ds, w: w} + seen := cid.NewSet() + for _, r := range roots { + if err := merkledag.Walk(ctx, cw.enumGetLinks, r, seen.Visit); err != nil { + return err + } + } + return nil +} + +func ReadHeaderAt(at io.ReaderAt, maxReadBytes uint64) (*CarHeader, error) { + var rr io.Reader + switch r := at.(type) { + case io.Reader: + rr = r + default: + var err error + rr, err = internalio.NewOffsetReadSeeker(r, 0) + if err != nil { + return nil, err + } + } + return ReadHeader(rr, maxReadBytes) +} + +func ReadHeader(r io.Reader, maxReadBytes uint64) (*CarHeader, error) { + hb, err := util.LdRead(r, false, maxReadBytes) + if err != nil { + if err == util.ErrSectionTooLarge { + err = util.ErrHeaderTooLarge + } + return nil, err + } + + var ch CarHeader + if err := cbor.DecodeInto(hb, &ch); err != nil { + return nil, fmt.Errorf("invalid header: %v", err) + } + + return &ch, nil +} + +func WriteHeader(h *CarHeader, w io.Writer) error { + hb, err := cbor.DumpObject(h) + if err != nil { + return err + } + + return util.LdWrite(w, hb) +} + +func HeaderSize(h *CarHeader) (uint64, error) { + hb, err := cbor.DumpObject(h) + if err != nil { + return 0, err + } + + return util.LdSize(hb), nil +} + +func (cw *carWriter) enumGetLinks(ctx context.Context, c cid.Cid) ([]*format.Link, error) { + nd, err := cw.ds.Get(ctx, c) + if err != nil { + return nil, err + } + + if err := cw.writeNode(ctx, nd); err != nil { + return nil, err + } + + return nd.Links(), nil +} + +func (cw *carWriter) writeNode(ctx context.Context, nd format.Node) error { + return util.LdWrite(cw.w, nd.Cid().Bytes(), nd.RawData()) +} + +type CarReader struct { + r io.Reader + Header *CarHeader + zeroLenAsEOF bool + maxAllowedSectionSize uint64 +} + +func NewCarReaderWithZeroLengthSectionAsEOF(r io.Reader) (*CarReader, error) { + return NewCarReaderWithoutDefaults(r, true, DefaultMaxAllowedHeaderSize, DefaultMaxAllowedSectionSize) +} + +func NewCarReader(r io.Reader) (*CarReader, error) { + return NewCarReaderWithoutDefaults(r, false, DefaultMaxAllowedHeaderSize, DefaultMaxAllowedSectionSize) +} + +func NewCarReaderWithoutDefaults(r io.Reader, zeroLenAsEOF bool, maxAllowedHeaderSize uint64, maxAllowedSectionSize uint64) (*CarReader, error) { + ch, err := ReadHeader(r, maxAllowedHeaderSize) + if err != nil { + return nil, err + } + + if ch.Version != 1 { + return nil, fmt.Errorf("invalid car version: %d", ch.Version) + } + + if len(ch.Roots) == 0 { + return nil, fmt.Errorf("empty car, no roots") + } + + return &CarReader{ + r: r, + Header: ch, + zeroLenAsEOF: zeroLenAsEOF, + maxAllowedSectionSize: maxAllowedSectionSize, + }, nil +} + +func (cr *CarReader) Next() (blocks.Block, error) { + c, data, err := util.ReadNode(cr.r, cr.zeroLenAsEOF, cr.maxAllowedSectionSize) + if err != nil { + return nil, err + } + + hashed, err := c.Prefix().Sum(data) + if err != nil { + return nil, err + } + + if !hashed.Equals(c) { + return nil, fmt.Errorf("mismatch in content integrity, name: %s, data: %s", c, hashed) + } + + return blocks.NewBlockWithCid(data, c) +} + +type batchStore interface { + PutMany(context.Context, []blocks.Block) error +} + +func LoadCar(s Store, r io.Reader) (*CarHeader, error) { + ctx := context.TODO() + cr, err := NewCarReader(r) + if err != nil { + return nil, err + } + + if bs, ok := s.(batchStore); ok { + return loadCarFast(ctx, bs, cr) + } + + return loadCarSlow(ctx, s, cr) +} + +func loadCarFast(ctx context.Context, s batchStore, cr *CarReader) (*CarHeader, error) { + var buf []blocks.Block + for { + blk, err := cr.Next() + if err != nil { + if err == io.EOF { + if len(buf) > 0 { + if err := s.PutMany(ctx, buf); err != nil { + return nil, err + } + } + return cr.Header, nil + } + return nil, err + } + + buf = append(buf, blk) + + if len(buf) > 1000 { + if err := s.PutMany(ctx, buf); err != nil { + return nil, err + } + buf = buf[:0] + } + } +} + +func loadCarSlow(ctx context.Context, s Store, cr *CarReader) (*CarHeader, error) { + for { + blk, err := cr.Next() + if err != nil { + if err == io.EOF { + return cr.Header, nil + } + return nil, err + } + + if err := s.Put(ctx, blk); err != nil { + return nil, err + } + } +} + +// Matches checks whether two headers match. +// Two headers are considered matching if: +// 1. They have the same version number, and +// 2. They contain the same root CIDs in any order. +// +// Note, this function explicitly ignores the order of roots. +// If order of roots matter use reflect.DeepEqual instead. +func (h CarHeader) Matches(other CarHeader) bool { + if h.Version != other.Version { + return false + } + thisLen := len(h.Roots) + if thisLen != len(other.Roots) { + return false + } + // Headers with a single root are popular. + // Implement a fast execution path for popular cases. + if thisLen == 1 { + return h.Roots[0].Equals(other.Roots[0]) + } + + // Check other contains all roots. + // TODO: should this be optimised for cases where the number of roots are large since it has O(N^2) complexity? + for _, r := range h.Roots { + if !other.containsRoot(r) { + return false + } + } + return true +} + +func (h *CarHeader) containsRoot(root cid.Cid) bool { + for _, r := range h.Roots { + if r.Equals(root) { + return true + } + } + return false +} diff --git a/ipld/car/v2/internal/carv1/car_test.go b/ipld/car/v2/internal/carv1/car_test.go new file mode 100644 index 0000000000..21602114e4 --- /dev/null +++ b/ipld/car/v2/internal/carv1/car_test.go @@ -0,0 +1,333 @@ +package carv1 + +import ( + "bytes" + "context" + "encoding/hex" + "io" + "os" + "strings" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/ipfs/boxo/ipld/merkledag" + dstest "github.com/ipfs/boxo/ipld/merkledag/test" + cid "github.com/ipfs/go-cid" + format "github.com/ipfs/go-ipld-format" +) + +func assertAddNodes(t *testing.T, ds format.DAGService, nds ...format.Node) { + for _, nd := range nds { + if err := ds.Add(context.Background(), nd); err != nil { + t.Fatal(err) + } + } +} + +func TestRoundtrip(t *testing.T) { + dserv := dstest.Mock() + a := merkledag.NewRawNode([]byte("aaaa")) + b := merkledag.NewRawNode([]byte("bbbb")) + c := merkledag.NewRawNode([]byte("cccc")) + + nd1 := &merkledag.ProtoNode{} + nd1.AddNodeLink("cat", a) + + nd2 := &merkledag.ProtoNode{} + nd2.AddNodeLink("first", nd1) + nd2.AddNodeLink("dog", b) + + nd3 := &merkledag.ProtoNode{} + nd3.AddNodeLink("second", nd2) + nd3.AddNodeLink("bear", c) + + assertAddNodes(t, dserv, a, b, c, nd1, nd2, nd3) + + buf := new(bytes.Buffer) + if err := WriteCar(context.Background(), dserv, []cid.Cid{nd3.Cid()}, buf); err != nil { + t.Fatal(err) + } + + bserv := dstest.Bserv() + ch, err := LoadCar(bserv.Blockstore(), buf) + if err != nil { + t.Fatal(err) + } + + if len(ch.Roots) != 1 { + t.Fatal("should have one root") + } + + if !ch.Roots[0].Equals(nd3.Cid()) { + t.Fatal("got wrong cid") + } + + bs := bserv.Blockstore() + for _, nd := range []format.Node{a, b, c, nd1, nd2, nd3} { + has, err := bs.Has(context.TODO(), nd.Cid()) + if err != nil { + t.Fatal(err) + } + + if !has { + t.Fatal("should have cid in blockstore") + } + } +} + +func TestEOFHandling(t *testing.T) { + // fixture is a clean single-block, single-root CAR + fixture, err := hex.DecodeString("3aa265726f6f747381d82a58250001711220151fe9e73c6267a7060c6f6c4cca943c236f4b196723489608edb42a8b8fa80b6776657273696f6e012c01711220151fe9e73c6267a7060c6f6c4cca943c236f4b196723489608edb42a8b8fa80ba165646f646779f5") + if err != nil { + t.Fatal(err) + } + + load := func(t *testing.T, byts []byte) *CarReader { + cr, err := NewCarReader(bytes.NewReader(byts)) + if err != nil { + t.Fatal(err) + } + + blk, err := cr.Next() + if err != nil { + t.Fatal(err) + } + if blk.Cid().String() != "bafyreiavd7u6opdcm6tqmddpnrgmvfb4enxuwglhenejmchnwqvixd5ibm" { + t.Fatal("unexpected CID") + } + + return cr + } + + t.Run("CleanEOF", func(t *testing.T) { + cr := load(t, fixture) + + blk, err := cr.Next() + if err != io.EOF { + t.Fatal("Didn't get expected EOF") + } + if blk != nil { + t.Fatal("EOF returned expected block") + } + }) + + t.Run("BadVarint", func(t *testing.T) { + fixtureBadVarint := append(fixture, 160) + cr := load(t, fixtureBadVarint) + + blk, err := cr.Next() + if err != io.ErrUnexpectedEOF { + t.Fatal("Didn't get unexpected EOF") + } + if blk != nil { + t.Fatal("EOF returned unexpected block") + } + }) + + t.Run("TruncatedBlock", func(t *testing.T) { + fixtureTruncatedBlock := append(fixture, 100, 0, 0) + cr := load(t, fixtureTruncatedBlock) + + blk, err := cr.Next() + if err != io.ErrUnexpectedEOF { + t.Fatal("Didn't get unexpected EOF") + } + if blk != nil { + t.Fatal("EOF returned unexpected block") + } + }) +} + +func TestBadHeaders(t *testing.T) { + testCases := []struct { + name string + hex string + errStr string // either the whole error string + errPfx string // or just the prefix + }{ + { + "{version:2}", + "0aa16776657273696f6e02", + "invalid car version: 2", + "", + }, + { + // an unfortunate error because we don't use a pointer + "{roots:[baeaaaa3bmjrq]}", + "13a165726f6f747381d82a480001000003616263", + "invalid car version: 0", + "", + }, + { + "{version:\"1\",roots:[baeaaaa3bmjrq]}", + "1da265726f6f747381d82a4800010000036162636776657273696f6e6131", + "", "invalid header: ", + }, + { + "{version:1}", + "0aa16776657273696f6e01", + "empty car, no roots", + "", + }, + { + "{version:1,roots:{cid:baeaaaa3bmjrq}}", + "20a265726f6f7473a163636964d82a4800010000036162636776657273696f6e01", + "", + "invalid header: ", + }, + { + "{version:1,roots:[baeaaaa3bmjrq],blip:true}", + "22a364626c6970f565726f6f747381d82a4800010000036162636776657273696f6e01", + "", + "invalid header: ", + }, + { + "[1,[]]", + "03820180", + "", + "invalid header: ", + }, + { + // this is an unfortunate error, it'd be nice to catch it better but it's + // very unlikely we'd ever see this in practice + "null", + "01f6", + "", + "invalid car version: 0", + }, + } + + makeCar := func(t *testing.T, byts string) error { + fixture, err := hex.DecodeString(byts) + if err != nil { + t.Fatal(err) + } + _, err = NewCarReader(bytes.NewReader(fixture)) + return err + } + + t.Run("Sanity check {version:1,roots:[baeaaaa3bmjrq]}", func(t *testing.T) { + err := makeCar(t, "1ca265726f6f747381d82a4800010000036162636776657273696f6e01") + if err != nil { + t.Fatal(err) + } + }) + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + err := makeCar(t, tc.hex) + if err == nil { + t.Fatal("expected error from bad header, didn't get one") + } + if tc.errStr != "" { + if err.Error() != tc.errStr { + t.Fatalf("bad error: %v", err) + } + } else { + if !strings.HasPrefix(err.Error(), tc.errPfx) { + t.Fatalf("bad error: %v", err) + } + } + }) + } +} + +func TestCarHeaderMatchess(t *testing.T) { + oneCid := merkledag.NewRawNode([]byte("fish")).Cid() + anotherCid := merkledag.NewRawNode([]byte("lobster")).Cid() + tests := []struct { + name string + one CarHeader + other CarHeader + want bool + }{ + { + "SameVersionNilRootsIsMatching", + CarHeader{nil, 1}, + CarHeader{nil, 1}, + true, + }, + { + "SameVersionEmptyRootsIsMatching", + CarHeader{[]cid.Cid{}, 1}, + CarHeader{[]cid.Cid{}, 1}, + true, + }, + { + "SameVersionNonEmptySameRootsIsMatching", + CarHeader{[]cid.Cid{oneCid}, 1}, + CarHeader{[]cid.Cid{oneCid}, 1}, + true, + }, + { + "SameVersionNonEmptySameRootsInDifferentOrderIsMatching", + CarHeader{[]cid.Cid{oneCid, anotherCid}, 1}, + CarHeader{[]cid.Cid{anotherCid, oneCid}, 1}, + true, + }, + { + "SameVersionDifferentRootsIsNotMatching", + CarHeader{[]cid.Cid{oneCid}, 1}, + CarHeader{[]cid.Cid{anotherCid}, 1}, + false, + }, + { + "DifferentVersionDifferentRootsIsNotMatching", + CarHeader{[]cid.Cid{oneCid}, 0}, + CarHeader{[]cid.Cid{anotherCid}, 1}, + false, + }, + { + "MismatchingVersionIsNotMatching", + CarHeader{nil, 0}, + CarHeader{nil, 1}, + false, + }, + { + "ZeroValueHeadersAreMatching", + CarHeader{}, + CarHeader{}, + true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := tt.one.Matches(tt.other) + require.Equal(t, tt.want, got, "Matches() = %v, want %v", got, tt.want) + }) + } +} + +func TestReadingZeroLengthSectionWithoutOptionSetIsError(t *testing.T) { + f, err := os.Open("../../testdata/sample-v1-with-zero-len-section.car") + require.NoError(t, err) + subject, err := NewCarReader(f) + require.NoError(t, err) + + for { + _, err := subject.Next() + if err == io.EOF { + break + } else if err != nil { + require.EqualError(t, err, "varints malformed, could not reach the end") + return + } + } + require.Fail(t, "expected error when reading file with zero section without option set") +} + +func TestReadingZeroLengthSectionWithOptionSetIsSuccess(t *testing.T) { + f, err := os.Open("../../testdata/sample-v1-with-zero-len-section.car") + require.NoError(t, err) + subject, err := NewCarReaderWithZeroLengthSectionAsEOF(f) + require.NoError(t, err) + + for { + _, err := subject.Next() + if err == io.EOF { + break + } + require.NoError(t, err) + } +} diff --git a/ipld/car/v2/internal/carv1/doc.go b/ipld/car/v2/internal/carv1/doc.go new file mode 100644 index 0000000000..821ca2f0aa --- /dev/null +++ b/ipld/car/v2/internal/carv1/doc.go @@ -0,0 +1,2 @@ +// Forked from CARv1 to avoid dependency to ipld-prime 0.9.0 due to outstanding upgrades in filecoin. +package carv1 diff --git a/ipld/car/v2/internal/carv1/util/util.go b/ipld/car/v2/internal/carv1/util/util.go new file mode 100644 index 0000000000..eb20da6521 --- /dev/null +++ b/ipld/car/v2/internal/carv1/util/util.go @@ -0,0 +1,98 @@ +package util + +import ( + "errors" + "io" + + internalio "github.com/ipfs/boxo/ipld/car/v2/internal/io" + + "github.com/multiformats/go-varint" + + cid "github.com/ipfs/go-cid" +) + +var ErrSectionTooLarge = errors.New("invalid section data, length of read beyond allowable maximum") +var ErrHeaderTooLarge = errors.New("invalid header data, length of read beyond allowable maximum") + +type BytesReader interface { + io.Reader + io.ByteReader +} + +func ReadNode(r io.Reader, zeroLenAsEOF bool, maxReadBytes uint64) (cid.Cid, []byte, error) { + data, err := LdRead(r, zeroLenAsEOF, maxReadBytes) + if err != nil { + return cid.Cid{}, nil, err + } + + n, c, err := cid.CidFromBytes(data) + if err != nil { + return cid.Cid{}, nil, err + } + + return c, data[n:], nil +} + +func LdWrite(w io.Writer, d ...[]byte) error { + var sum uint64 + for _, s := range d { + sum += uint64(len(s)) + } + + buf := make([]byte, 8) + n := varint.PutUvarint(buf, sum) + _, err := w.Write(buf[:n]) + if err != nil { + return err + } + + for _, s := range d { + _, err = w.Write(s) + if err != nil { + return err + } + } + + return nil +} + +func LdSize(d ...[]byte) uint64 { + var sum uint64 + for _, s := range d { + sum += uint64(len(s)) + } + s := varint.UvarintSize(sum) + return sum + uint64(s) +} + +func LdReadSize(r io.Reader, zeroLenAsEOF bool, maxReadBytes uint64) (uint64, error) { + l, err := varint.ReadUvarint(internalio.ToByteReader(r)) + if err != nil { + // If the length of bytes read is non-zero when the error is EOF then signal an unclean EOF. + if l > 0 && err == io.EOF { + return 0, io.ErrUnexpectedEOF + } + return 0, err + } else if l == 0 && zeroLenAsEOF { + return 0, io.EOF + } + + if l > maxReadBytes { // Don't OOM + return 0, ErrSectionTooLarge + } + return l, nil +} + +func LdRead(r io.Reader, zeroLenAsEOF bool, maxReadBytes uint64) ([]byte, error) { + l, err := LdReadSize(r, zeroLenAsEOF, maxReadBytes) + if err != nil { + return nil, err + } + + buf := make([]byte, l) + if _, err := io.ReadFull(r, buf); err != nil { + return nil, err + } + + return buf, nil +} diff --git a/ipld/car/v2/internal/carv1/util/util_test.go b/ipld/car/v2/internal/carv1/util/util_test.go new file mode 100644 index 0000000000..594312f991 --- /dev/null +++ b/ipld/car/v2/internal/carv1/util/util_test.go @@ -0,0 +1,28 @@ +package util_test + +import ( + "bytes" + crand "crypto/rand" + "math/rand" + "testing" + + "github.com/ipfs/boxo/ipld/car/v2/internal/carv1/util" + + "github.com/stretchr/testify/require" +) + +func TestLdSize(t *testing.T) { + for i := 0; i < 5; i++ { + var buf bytes.Buffer + data := make([][]byte, 5) + for j := 0; j < 5; j++ { + data[j] = make([]byte, rand.Intn(30)) + _, err := crand.Read(data[j]) + require.NoError(t, err) + } + size := util.LdSize(data...) + err := util.LdWrite(&buf, data...) + require.NoError(t, err) + require.Equal(t, uint64(len(buf.Bytes())), size) + } +} diff --git a/ipld/car/v2/internal/errsort/search.go b/ipld/car/v2/internal/errsort/search.go new file mode 100644 index 0000000000..fb88617940 --- /dev/null +++ b/ipld/car/v2/internal/errsort/search.go @@ -0,0 +1,54 @@ +/* +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +package errsort + +// Search is like sort.Search but accepts an erroring closure. +// If it errors the search is terminated immediately +func Search(n int, f func(int) (bool, error)) (int, error) { + // Define f(-1) == false and f(n) == true. + // Invariant: f(i-1) == false, f(j) == true. + i, j := 0, n + for i < j { + h := int(uint(i+j) >> 1) // avoid overflow when computing h + // i ≤ h < j + less, err := f(h) + if err != nil { + return 0, err + } + if !less { + i = h + 1 // preserves f(i-1) == false + } else { + j = h // preserves f(j) == true + } + } + // i == j, f(i-1) == false, and f(j) (= f(i)) == true => answer is i. + return i, nil +} diff --git a/ipld/car/v2/internal/io/converter.go b/ipld/car/v2/internal/io/converter.go new file mode 100644 index 0000000000..2e2844bd0e --- /dev/null +++ b/ipld/car/v2/internal/io/converter.go @@ -0,0 +1,155 @@ +package io + +import ( + "errors" + "io" + "sync" +) + +var ( + _ io.ByteReader = (*readerPlusByte)(nil) + _ io.ByteReader = (*readSeekerPlusByte)(nil) + _ io.ByteReader = (*discardingReadSeekerPlusByte)(nil) + _ io.ReadSeeker = (*discardingReadSeekerPlusByte)(nil) + _ io.ReadSeeker = (*readerAtSeeker)(nil) + _ io.ReaderAt = (*readSeekerAt)(nil) +) + +type ( + readerPlusByte struct { + io.Reader + + byteBuf [1]byte // escapes via io.Reader.Read; preallocate + } + + readSeekerPlusByte struct { + io.ReadSeeker + + byteBuf [1]byte // escapes via io.Reader.Read; preallocate + } + + discardingReadSeekerPlusByte struct { + io.Reader + offset int64 + + byteBuf [1]byte // escapes via io.Reader.Read; preallocate + } + + ByteReadSeeker interface { + io.ReadSeeker + io.ByteReader + } + + readSeekerAt struct { + rs io.ReadSeeker + mu sync.Mutex + } + + readerAtSeeker struct { + ra io.ReaderAt + position int64 + mu sync.Mutex + } +) + +func ToByteReader(r io.Reader) io.ByteReader { + if br, ok := r.(io.ByteReader); ok { + return br + } + return &readerPlusByte{Reader: r} +} + +func ToByteReadSeeker(r io.Reader) ByteReadSeeker { + if brs, ok := r.(ByteReadSeeker); ok { + return brs + } + if rs, ok := r.(io.ReadSeeker); ok { + return &readSeekerPlusByte{ReadSeeker: rs} + } + return &discardingReadSeekerPlusByte{Reader: r} +} + +func ToReadSeeker(ra io.ReaderAt) io.ReadSeeker { + if rs, ok := ra.(io.ReadSeeker); ok { + return rs + } + return &readerAtSeeker{ra: ra} +} + +func ToReaderAt(rs io.ReadSeeker) io.ReaderAt { + if ra, ok := rs.(io.ReaderAt); ok { + return ra + } + return &readSeekerAt{rs: rs} +} + +func (rb *readerPlusByte) ReadByte() (byte, error) { + _, err := io.ReadFull(rb, rb.byteBuf[:]) + return rb.byteBuf[0], err +} + +func (rsb *readSeekerPlusByte) ReadByte() (byte, error) { + _, err := io.ReadFull(rsb, rsb.byteBuf[:]) + return rsb.byteBuf[0], err +} + +func (drsb *discardingReadSeekerPlusByte) ReadByte() (byte, error) { + _, err := io.ReadFull(drsb, drsb.byteBuf[:]) + return drsb.byteBuf[0], err +} + +func (drsb *discardingReadSeekerPlusByte) Read(p []byte) (read int, err error) { + read, err = drsb.Reader.Read(p) + drsb.offset += int64(read) + return +} + +func (drsb *discardingReadSeekerPlusByte) Seek(offset int64, whence int) (int64, error) { + switch whence { + case io.SeekStart: + n := offset - drsb.offset + if n < 0 { + return 0, errors.New("unsupported rewind via whence: io.SeekStart") + } + _, err := io.CopyN(io.Discard, drsb, n) + return drsb.offset, err + case io.SeekCurrent: + _, err := io.CopyN(io.Discard, drsb, offset) + return drsb.offset, err + default: + return 0, errors.New("unsupported whence: io.SeekEnd") + } +} + +func (ras *readerAtSeeker) Read(p []byte) (n int, err error) { + ras.mu.Lock() + defer ras.mu.Unlock() + n, err = ras.ra.ReadAt(p, ras.position) + ras.position += int64(n) + return n, err +} + +func (ras *readerAtSeeker) Seek(offset int64, whence int) (int64, error) { + ras.mu.Lock() + defer ras.mu.Unlock() + switch whence { + case io.SeekStart: + ras.position = offset + case io.SeekCurrent: + ras.position += offset + case io.SeekEnd: + return 0, errors.New("unsupported whence: io.SeekEnd") + default: + return 0, errors.New("unsupported whence") + } + return ras.position, nil +} + +func (rsa *readSeekerAt) ReadAt(p []byte, off int64) (n int, err error) { + rsa.mu.Lock() + defer rsa.mu.Unlock() + if _, err := rsa.rs.Seek(off, io.SeekStart); err != nil { + return 0, err + } + return rsa.rs.Read(p) +} diff --git a/ipld/car/v2/internal/io/offset_read_seeker.go b/ipld/car/v2/internal/io/offset_read_seeker.go new file mode 100644 index 0000000000..b3899ab784 --- /dev/null +++ b/ipld/car/v2/internal/io/offset_read_seeker.go @@ -0,0 +1,122 @@ +package io + +import ( + "errors" + "io" +) + +var ( + _ io.ReaderAt = (*offsetReadSeeker)(nil) + _ io.ReadSeeker = (*offsetReadSeeker)(nil) +) + +// offsetReadSeeker implements Read, and ReadAt on a section +// of an underlying io.ReaderAt. +// The main difference between io.SectionReader and offsetReadSeeker is that +// NewOffsetReadSeeker does not require the user to know the number of readable bytes. +// +// It also partially implements Seek, where the implementation panics if io.SeekEnd is passed. +// This is because, offsetReadSeeker does not know the end of the file therefore cannot seek relative +// to it. +type offsetReadSeeker struct { + r io.ReaderAt + base int64 + off int64 + b [1]byte // avoid alloc in ReadByte +} + +type ReadSeekerAt interface { + io.Reader + io.ReaderAt + io.Seeker + io.ByteReader +} + +// NewOffsetReadSeeker returns an ReadSeekerAt that reads from r +// starting offset offset off and stops with io.EOF when r reaches its end. +// The Seek function will panic if whence io.SeekEnd is passed. +func NewOffsetReadSeeker(r io.ReaderAt, off int64) (ReadSeekerAt, error) { + if or, ok := r.(*offsetReadSeeker); ok { + oldBase := or.base + newBase := or.base + off + if newBase < oldBase { + return nil, errors.New("NewOffsetReadSeeker overflow int64") + } + return &offsetReadSeeker{ + r: or.r, + base: newBase, + off: newBase, + }, nil + } + return &offsetReadSeeker{ + r: r, + base: off, + off: off, + }, nil +} + +func (o *offsetReadSeeker) Read(p []byte) (n int, err error) { + n, err = o.r.ReadAt(p, o.off) + oldOffset := o.off + off := oldOffset + int64(n) + if off < oldOffset { + return 0, errors.New("ReadAt offset overflow") + } + o.off = off + return +} + +func (o *offsetReadSeeker) ReadAt(p []byte, off int64) (n int, err error) { + if off < 0 { + return 0, io.EOF + } + oldOffset := off + off += o.base + if off < oldOffset { + return 0, errors.New("ReadAt offset overflow") + } + return o.r.ReadAt(p, off) +} + +func (o *offsetReadSeeker) ReadByte() (byte, error) { + _, err := o.Read(o.b[:]) + return o.b[0], err +} + +func (o *offsetReadSeeker) Offset() int64 { + return o.off +} + +func (o *offsetReadSeeker) Seek(offset int64, whence int) (int64, error) { + switch whence { + case io.SeekStart: + oldOffset := offset + off := offset + o.base + if off < oldOffset { + return 0, errors.New("Seek offset overflow") + } + o.off = off + case io.SeekCurrent: + oldOffset := o.off + if offset < 0 { + if -offset > oldOffset { + return 0, errors.New("Seek offset underflow") + } + o.off = oldOffset + offset + } else { + off := oldOffset + offset + if off < oldOffset { + return 0, errors.New("Seek offset overflow") + } + o.off = off + } + case io.SeekEnd: + panic("unsupported whence: SeekEnd") + } + return o.Position(), nil +} + +// Position returns the current position of this reader relative to the initial offset. +func (o *offsetReadSeeker) Position() int64 { + return o.off - o.base +} diff --git a/ipld/car/v2/internal/io/offset_write_seeker.go b/ipld/car/v2/internal/io/offset_write_seeker.go new file mode 100644 index 0000000000..7e0f6ba58e --- /dev/null +++ b/ipld/car/v2/internal/io/offset_write_seeker.go @@ -0,0 +1,41 @@ +package io + +import "io" + +var ( + _ io.Writer = (*OffsetWriteSeeker)(nil) + _ io.WriteSeeker = (*OffsetWriteSeeker)(nil) +) + +type OffsetWriteSeeker struct { + w io.WriterAt + base int64 + offset int64 +} + +func NewOffsetWriter(w io.WriterAt, off int64) *OffsetWriteSeeker { + return &OffsetWriteSeeker{w, off, off} +} + +func (ow *OffsetWriteSeeker) Write(b []byte) (n int, err error) { + n, err = ow.w.WriteAt(b, ow.offset) + ow.offset += int64(n) + return +} + +func (ow *OffsetWriteSeeker) Seek(offset int64, whence int) (int64, error) { + switch whence { + case io.SeekStart: + ow.offset = offset + ow.base + case io.SeekCurrent: + ow.offset += offset + case io.SeekEnd: + panic("unsupported whence: SeekEnd") + } + return ow.Position(), nil +} + +// Position returns the current position of this writer relative to the initial offset, i.e. the number of bytes written. +func (ow *OffsetWriteSeeker) Position() int64 { + return ow.offset - ow.base +} diff --git a/ipld/car/v2/internal/loader/counting_loader.go b/ipld/car/v2/internal/loader/counting_loader.go new file mode 100644 index 0000000000..e428993ea4 --- /dev/null +++ b/ipld/car/v2/internal/loader/counting_loader.go @@ -0,0 +1,61 @@ +package loader + +import ( + "bytes" + "io" + + "github.com/ipld/go-ipld-prime" + "github.com/ipld/go-ipld-prime/linking" + "github.com/multiformats/go-varint" +) + +// counter tracks how much data has been read. +type counter struct { + totalRead uint64 +} + +func (c *counter) Size() uint64 { + return c.totalRead +} + +// ReadCounter provides an externally consumable interface to the +// additional data tracked about the linksystem. +type ReadCounter interface { + Size() uint64 +} + +type countingReader struct { + r io.Reader + c *counter +} + +func (c *countingReader) Read(p []byte) (int, error) { + n, err := c.r.Read(p) + c.c.totalRead += uint64(n) + return n, err +} + +// CountingLinkSystem wraps an ipld linksystem with to track the size of +// data loaded in a `counter` object. Each time nodes are loaded from the +// link system which trigger block reads, the size of the block as it would +// appear in a CAR file is added to the counter (included the size of the +// CID and the varint length for the block data). +func CountingLinkSystem(ls ipld.LinkSystem) (ipld.LinkSystem, ReadCounter) { + c := counter{} + clc := ls + clc.StorageReadOpener = func(lc linking.LinkContext, l ipld.Link) (io.Reader, error) { + r, err := ls.StorageReadOpener(lc, l) + if err != nil { + return nil, err + } + buf := bytes.NewBuffer(nil) + n, err := buf.ReadFrom(r) + if err != nil { + return nil, err + } + size := varint.ToUvarint(uint64(n) + uint64(len(l.Binary()))) + c.totalRead += uint64(len(size)) + uint64(len(l.Binary())) + return &countingReader{buf, &c}, nil + } + return clc, &c +} diff --git a/ipld/car/v2/internal/loader/writing_loader.go b/ipld/car/v2/internal/loader/writing_loader.go new file mode 100644 index 0000000000..8be1af3f06 --- /dev/null +++ b/ipld/car/v2/internal/loader/writing_loader.go @@ -0,0 +1,126 @@ +package loader + +import ( + "bytes" + "io" + + "github.com/ipfs/boxo/ipld/car/v2/index" + "github.com/ipfs/go-cid" + "github.com/ipld/go-ipld-prime" + "github.com/ipld/go-ipld-prime/linking" + "github.com/multiformats/go-multicodec" + "github.com/multiformats/go-varint" +) + +type writerOutput struct { + w io.Writer + size uint64 + code multicodec.Code + rcrds map[cid.Cid]index.Record +} + +func (w *writerOutput) Size() uint64 { + return w.size +} + +func (w *writerOutput) Index() (index.Index, error) { + idx, err := index.New(w.code) + if err != nil { + return nil, err + } + rcrds := make([]index.Record, 0, len(w.rcrds)) + for _, r := range w.rcrds { + rcrds = append(rcrds, r) + } + if err := idx.Load(rcrds); err != nil { + return nil, err + } + + return idx, nil +} + +// An IndexTracker tracks the records loaded/written, calculate an +// index based on them. +type IndexTracker interface { + ReadCounter + Index() (index.Index, error) +} + +type writingReader struct { + r io.Reader + len int64 + cid string + wo *writerOutput +} + +func (w *writingReader) Read(p []byte) (int, error) { + if w.wo != nil { + // write the cid + size := varint.ToUvarint(uint64(w.len) + uint64(len(w.cid))) + if _, err := w.wo.w.Write(size); err != nil { + return 0, err + } + if _, err := w.wo.w.Write([]byte(w.cid)); err != nil { + return 0, err + } + cpy := bytes.NewBuffer(w.r.(*bytes.Buffer).Bytes()) + if _, err := cpy.WriteTo(w.wo.w); err != nil { + return 0, err + } + _, c, err := cid.CidFromBytes([]byte(w.cid)) + if err != nil { + return 0, err + } + w.wo.rcrds[c] = index.Record{ + Cid: c, + Offset: w.wo.size, + } + w.wo.size += uint64(w.len) + uint64(len(size)+len(w.cid)) + + w.wo = nil + } + + return w.r.Read(p) +} + +// TeeingLinkSystem wraps an IPLD.LinkSystem so that each time a block is loaded from it, +// that block is also written as a CAR block to the provided io.Writer. Metadata +// (the size of data written) is provided in the second return value. +// The `initialOffset` is used to calculate the offsets recorded for the index, and will be +// +// included in the `.Size()` of the IndexTracker. +// +// An indexCodec of `index.CarIndexNoIndex` can be used to not track these offsets. +func TeeingLinkSystem(ls ipld.LinkSystem, w io.Writer, initialOffset uint64, indexCodec multicodec.Code) (ipld.LinkSystem, IndexTracker) { + wo := writerOutput{ + w: w, + size: initialOffset, + code: indexCodec, + rcrds: make(map[cid.Cid]index.Record), + } + + tls := ls + tls.StorageReadOpener = func(lc linking.LinkContext, l ipld.Link) (io.Reader, error) { + _, c, err := cid.CidFromBytes([]byte(l.Binary())) + if err != nil { + return nil, err + } + + // if we've already read this cid in this session, don't re-write it. + if _, ok := wo.rcrds[c]; ok { + return ls.StorageReadOpener(lc, l) + } + + r, err := ls.StorageReadOpener(lc, l) + if err != nil { + return nil, err + } + buf := bytes.NewBuffer(nil) + n, err := buf.ReadFrom(r) + if err != nil { + return nil, err + } + return &writingReader{buf, n, l.Binary(), &wo}, nil + } + return tls, &wo +} diff --git a/ipld/car/v2/internal/store/identity.go b/ipld/car/v2/internal/store/identity.go new file mode 100644 index 0000000000..d61a57c48d --- /dev/null +++ b/ipld/car/v2/internal/store/identity.go @@ -0,0 +1,17 @@ +package store + +import ( + "github.com/ipfs/go-cid" + "github.com/multiformats/go-multihash" +) + +// IsIdentity inspects the CID and determines whether it is an IDENTITY CID. +func IsIdentity(key cid.Cid) (digest []byte, ok bool, err error) { + dmh, err := multihash.Decode(key.Hash()) + if err != nil { + return nil, false, err + } + ok = dmh.Code == multihash.IDENTITY + digest = dmh.Digest + return digest, ok, nil +} diff --git a/ipld/car/v2/internal/store/index.go b/ipld/car/v2/internal/store/index.go new file mode 100644 index 0000000000..b29ec8e86b --- /dev/null +++ b/ipld/car/v2/internal/store/index.go @@ -0,0 +1,108 @@ +package store + +import ( + "bytes" + "io" + + carv2 "github.com/ipfs/boxo/ipld/car/v2" + "github.com/ipfs/boxo/ipld/car/v2/index" + "github.com/ipfs/boxo/ipld/car/v2/internal/carv1/util" + internalio "github.com/ipfs/boxo/ipld/car/v2/internal/io" + "github.com/ipfs/go-cid" + "github.com/multiformats/go-multicodec" + "github.com/multiformats/go-varint" +) + +// FindCid can be used to either up the existence, size and offset of a block +// if it exists in CAR as specified by the index; and optionally the data bytes +// of the block. +func FindCid( + reader io.ReaderAt, + idx index.Index, + key cid.Cid, + useWholeCids bool, + zeroLenAsEOF bool, + maxReadBytes uint64, + readBytes bool, +) ([]byte, int64, int, error) { + + var fnData []byte + var fnOffset int64 + var fnLen int = -1 + var fnErr error + err := idx.GetAll(key, func(offset uint64) bool { + reader, err := internalio.NewOffsetReadSeeker(reader, int64(offset)) + if err != nil { + fnErr = err + return false + } + var readCid cid.Cid + if readBytes { + readCid, fnData, err = util.ReadNode(reader, zeroLenAsEOF, maxReadBytes) + if err != nil { + fnErr = err + return false + } + fnLen = len(fnData) + } else { + sectionLen, err := varint.ReadUvarint(reader) + if err != nil { + fnErr = err + return false + } + var cidLen int + cidLen, readCid, err = cid.CidFromReader(reader) + if err != nil { + fnErr = err + return false + } + fnLen = int(sectionLen) - cidLen + fnOffset = int64(offset) + reader.(interface{ Position() int64 }).Position() + } + if useWholeCids { + if !readCid.Equals(key) { + fnLen = -1 + return true // continue looking + } + return false + } else { + if !bytes.Equal(readCid.Hash(), key.Hash()) { + // weird, bad index, continue looking + fnLen = -1 + return true + } + return false + } + }) + if err != nil { + return nil, -1, -1, err + } + if fnErr != nil { + return nil, -1, -1, fnErr + } + if fnLen == -1 { + return nil, -1, -1, index.ErrNotFound + } + return fnData, fnOffset, fnLen, nil +} + +// Finalize will write the index to the writer at the offset specified in the header. It should only +// be used for a CARv2 and when the CAR interface is being closed. +func Finalize(writer io.WriterAt, header carv2.Header, idx *InsertionIndex, dataSize uint64, storeIdentityCIDs bool, indexCodec multicodec.Code) error { + // TODO check if add index option is set and don't write the index then set index offset to zero. + header = header.WithDataSize(dataSize) + header.Characteristics.SetFullyIndexed(storeIdentityCIDs) + + // TODO if index not needed don't bother flattening it. + fi, err := idx.Flatten(indexCodec) + if err != nil { + return err + } + if _, err := index.WriteTo(fi, internalio.NewOffsetWriter(writer, int64(header.IndexOffset))); err != nil { + return err + } + if _, err := header.WriteTo(internalio.NewOffsetWriter(writer, carv2.PragmaSize)); err != nil { + return err + } + return nil +} diff --git a/ipld/car/v2/internal/store/indexcheck.go b/ipld/car/v2/internal/store/indexcheck.go new file mode 100644 index 0000000000..89c563bb5b --- /dev/null +++ b/ipld/car/v2/internal/store/indexcheck.go @@ -0,0 +1,84 @@ +package store + +import ( + carv2 "github.com/ipfs/boxo/ipld/car/v2" + "github.com/ipfs/go-cid" +) + +// ShouldPut returns true if the block should be put into the CAR according to the options provided +// and the index. It returns false if the block should not be put into the CAR, either because it +// is an identity block and StoreIdentityCIDs is false, or because it already exists and +// BlockstoreAllowDuplicatePuts is false. +func ShouldPut( + idx *InsertionIndex, + c cid.Cid, + maxIndexCidSize uint64, + storeIdentityCIDs bool, + blockstoreAllowDuplicatePuts bool, + blockstoreUseWholeCIDs bool, +) (bool, error) { + + // If StoreIdentityCIDs option is disabled then treat IDENTITY CIDs like IdStore. + if !storeIdentityCIDs { + // Check for IDENTITY CID. If IDENTITY, ignore and move to the next block. + if _, ok, err := IsIdentity(c); err != nil { + return false, err + } else if ok { + return false, nil + } + } + + // Check if its size is too big. + // If larger than maximum allowed size, return error. + // Note, we need to check this regardless of whether we have IDENTITY CID or not. + // Since multhihash codes other than IDENTITY can result in large digests. + cSize := uint64(len(c.Bytes())) + if cSize > maxIndexCidSize { + return false, &carv2.ErrCidTooLarge{MaxSize: maxIndexCidSize, CurrentSize: cSize} + } + + if !blockstoreAllowDuplicatePuts { + if blockstoreUseWholeCIDs { + has, err := idx.HasExactCID(c) + if err != nil { + return false, err + } + return !has, nil // deduplicated by CID + } + if !blockstoreUseWholeCIDs { + _, err := idx.Get(c) + if err == nil { + return false, nil // deduplicated by hash + } + } + } + + return true, nil +} + +// Has returns true if the block exists in in the store according to the various +// rules associated with the options. Similar to ShouldPut, but for the simpler +// Has() case. +func Has( + idx *InsertionIndex, + c cid.Cid, + maxIndexCidSize uint64, + storeIdentityCIDs bool, + blockstoreAllowDuplicatePuts bool, + blockstoreUseWholeCIDs bool, +) (bool, error) { + + // If StoreIdentityCIDs option is disabled then treat IDENTITY CIDs like IdStore. + if !storeIdentityCIDs { + if _, ok, err := IsIdentity(c); err != nil { + return false, err + } else if ok { + return true, nil + } + } + + if blockstoreUseWholeCIDs { + return idx.HasExactCID(c) + } + return idx.HasMultihash(c.Hash()) +} diff --git a/ipld/car/v2/internal/store/insertionindex.go b/ipld/car/v2/internal/store/insertionindex.go new file mode 100644 index 0000000000..a27844552d --- /dev/null +++ b/ipld/car/v2/internal/store/insertionindex.go @@ -0,0 +1,264 @@ +package store + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "io" + + "github.com/ipfs/boxo/ipld/car/v2/index" + "github.com/ipfs/go-cid" + "github.com/multiformats/go-multicodec" + "github.com/multiformats/go-multihash" + "github.com/petar/GoLLRB/llrb" + cbor "github.com/whyrusleeping/cbor/go" +) + +// This index is intended to be efficient for random-access, in-memory lookups +// and is not intended to be an index type that is attached to a CARv2. +// See flatten() for conversion of this data to a known, existing index type. + +var ( + errUnsupported = errors.New("not supported") + insertionIndexCodec = multicodec.Code(0x300003) +) + +type InsertionIndex struct { + items llrb.LLRB +} + +func NewInsertionIndex() *InsertionIndex { + return &InsertionIndex{} +} + +type recordDigest struct { + digest []byte + index.Record +} + +func (r recordDigest) Less(than llrb.Item) bool { + other, ok := than.(recordDigest) + if !ok { + return false + } + return bytes.Compare(r.digest, other.digest) < 0 +} + +func newRecordDigest(r index.Record) recordDigest { + d, err := multihash.Decode(r.Hash()) + if err != nil { + panic(err) + } + + return recordDigest{d.Digest, r} +} + +func newRecordFromCid(c cid.Cid, at uint64) recordDigest { + d, err := multihash.Decode(c.Hash()) + if err != nil { + panic(err) + } + + return recordDigest{d.Digest, index.Record{Cid: c, Offset: at}} +} + +func (ii *InsertionIndex) InsertNoReplace(key cid.Cid, n uint64) { + ii.items.InsertNoReplace(newRecordFromCid(key, n)) +} + +func (ii *InsertionIndex) Get(c cid.Cid) (uint64, error) { + record, err := ii.getRecord(c) + if err != nil { + return 0, err + } + return record.Offset, nil +} + +func (ii *InsertionIndex) getRecord(c cid.Cid) (index.Record, error) { + d, err := multihash.Decode(c.Hash()) + if err != nil { + return index.Record{}, err + } + entry := recordDigest{digest: d.Digest} + e := ii.items.Get(entry) + if e == nil { + return index.Record{}, index.ErrNotFound + } + r, ok := e.(recordDigest) + if !ok { + return index.Record{}, errUnsupported + } + + return r.Record, nil +} + +func (ii *InsertionIndex) GetAll(c cid.Cid, fn func(uint64) bool) error { + d, err := multihash.Decode(c.Hash()) + if err != nil { + return err + } + entry := recordDigest{digest: d.Digest} + + any := false + iter := func(i llrb.Item) bool { + existing := i.(recordDigest) + if !bytes.Equal(existing.digest, entry.digest) { + // We've already looked at all entries with matching digests. + return false + } + any = true + return fn(existing.Record.Offset) + } + ii.items.AscendGreaterOrEqual(entry, iter) + if !any { + return index.ErrNotFound + } + return nil +} + +func (ii *InsertionIndex) Marshal(w io.Writer) (uint64, error) { + l := uint64(0) + if err := binary.Write(w, binary.LittleEndian, int64(ii.items.Len())); err != nil { + return l, err + } + l += 8 + + var err error + iter := func(i llrb.Item) bool { + if err = cbor.Encode(w, i.(recordDigest).Record); err != nil { + return false + } + return true + } + ii.items.AscendGreaterOrEqual(ii.items.Min(), iter) + return l, err +} + +func (ii *InsertionIndex) Unmarshal(r io.Reader) error { + var length int64 + if err := binary.Read(r, binary.LittleEndian, &length); err != nil { + return err + } + d := cbor.NewDecoder(r) + for i := int64(0); i < length; i++ { + var rec index.Record + if err := d.Decode(&rec); err != nil { + return err + } + ii.items.InsertNoReplace(newRecordDigest(rec)) + } + return nil +} + +func (ii *InsertionIndex) ForEach(f func(multihash.Multihash, uint64) error) error { + var err error + ii.items.AscendGreaterOrEqual(ii.items.Min(), func(i llrb.Item) bool { + r := i.(recordDigest).Record + err = f(r.Cid.Hash(), r.Offset) + return err == nil + }) + return err +} + +func (ii *InsertionIndex) ForEachCid(f func(cid.Cid, uint64) error) error { + var err error + ii.items.AscendGreaterOrEqual(ii.items.Min(), func(i llrb.Item) bool { + r := i.(recordDigest).Record + err = f(r.Cid, r.Offset) + return err == nil + }) + return err +} + +func (ii *InsertionIndex) Codec() multicodec.Code { + return insertionIndexCodec +} + +func (ii *InsertionIndex) Load(rs []index.Record) error { + for _, r := range rs { + rec := newRecordDigest(r) + if rec.digest == nil { + return fmt.Errorf("invalid entry: %v", r) + } + ii.items.InsertNoReplace(rec) + } + return nil +} + +// flatten returns a formatted index in the given codec for more efficient subsequent loading. +func (ii *InsertionIndex) Flatten(codec multicodec.Code) (index.Index, error) { + si, err := index.New(codec) + if err != nil { + return nil, err + } + rcrds := make([]index.Record, ii.items.Len()) + + idx := 0 + iter := func(i llrb.Item) bool { + rcrds[idx] = i.(recordDigest).Record + idx++ + return true + } + ii.items.AscendGreaterOrEqual(ii.items.Min(), iter) + + if err := si.Load(rcrds); err != nil { + return nil, err + } + return si, nil +} + +// note that hasExactCID is very similar to GetAll, +// but it's separate as it allows us to compare Record.Cid directly, +// whereas GetAll just provides Record.Offset. + +func (ii *InsertionIndex) HasExactCID(c cid.Cid) (bool, error) { + d, err := multihash.Decode(c.Hash()) + if err != nil { + return false, err + } + entry := recordDigest{digest: d.Digest} + + found := false + iter := func(i llrb.Item) bool { + existing := i.(recordDigest) + if !bytes.Equal(existing.digest, entry.digest) { + // We've already looked at all entries with matching digests. + return false + } + if existing.Record.Cid == c { + // We found an exact match. + found = true + return false + } + // Continue looking in ascending order. + return true + } + ii.items.AscendGreaterOrEqual(entry, iter) + return found, nil +} + +func (ii *InsertionIndex) HasMultihash(mh multihash.Multihash) (bool, error) { + d, err := multihash.Decode(mh) + if err != nil { + return false, err + } + entry := recordDigest{digest: d.Digest} + + found := false + iter := func(i llrb.Item) bool { + existing := i.(recordDigest) + if !bytes.Equal(existing.digest, entry.digest) { + // We've already looked at all entries with matching digests. + return false + } + if bytes.Equal(existing.Record.Cid.Hash(), mh) { + found = true + return false + } + // Continue looking in ascending order. + return true + } + ii.items.AscendGreaterOrEqual(entry, iter) + return found, nil +} diff --git a/ipld/car/v2/internal/store/resume.go b/ipld/car/v2/internal/store/resume.go new file mode 100644 index 0000000000..0ba94570a5 --- /dev/null +++ b/ipld/car/v2/internal/store/resume.go @@ -0,0 +1,198 @@ +package store + +import ( + "errors" + "fmt" + "io" + + carv2 "github.com/ipfs/boxo/ipld/car/v2" + "github.com/ipfs/boxo/ipld/car/v2/internal/carv1" + internalio "github.com/ipfs/boxo/ipld/car/v2/internal/io" + "github.com/ipfs/go-cid" + "github.com/multiformats/go-varint" +) + +type ReaderWriterAt interface { + io.ReaderAt + io.WriterAt +} + +// ResumableVersion performs two tasks - check if there is a valid header at the start of the, +// reader, then check whether the version of that header matches what we expect. +func ResumableVersion(reader io.Reader, writeAsV1 bool) error { + version, err := carv2.ReadVersion(reader) + if err != nil { + // The file is not a valid CAR file and cannot resume from it. + // Or the write must have failed before pragma was written. + return err + } + + switch { + case version == 1 && writeAsV1: + case version == 2 && !writeAsV1: + default: + // The file is not the expected version and we cannot resume from it. + return fmt.Errorf("cannot resume on CAR file with version %v", version) + } + return nil +} + +// Resume will attempt to resume a CARv2 or CARv1 file by checking that there exists an existing +// CAR and that the CAR header details match what is being requested for resumption. +// Resumption of a CARv2 involves "unfinalizing" the header by resetting it back to a bare state +// and then truncating the file to remove the index. Truncation is important because it allows a +// non-finalized CARv2 to be resumed from as the header won't contain the DataSize of the payload +// body and if the file also contains an index, we cannot determine the end of the payload. +// Therefore, when using a resumed, existing and finalized, CARv2, whose body may not extend +// beyond the index and then closing without finalization (e.g. due to a crash), the file will no +// longer be parseable because we won't have DataSize, and we won't be able to determine it by +// parsing the payload to EOF. +func Resume( + rw ReaderWriterAt, + dataReader io.ReaderAt, + dataWriter *internalio.OffsetWriteSeeker, + idx *InsertionIndex, + roots []cid.Cid, + dataOffset uint64, + v1 bool, + maxAllowedHeaderSize uint64, + zeroLengthSectionAsEOF bool, +) error { + + var headerInFile carv2.Header + var v1r internalio.ReadSeekerAt + + if !v1 { + if _, ok := rw.(interface{ Truncate(size int64) error }); !ok { + return fmt.Errorf("cannot resume a CARv2 without the ability to truncate (e.g. an io.File)") + } + + // Check if file was finalized by trying to read the CARv2 header. + // We check because if finalized the CARv1 reader behaviour needs to be adjusted since + // EOF will not signify end of CARv1 payload. i.e. index is most likely present. + r, err := internalio.NewOffsetReadSeeker(rw, carv2.PragmaSize) + if err != nil { + return err + } + _, err = headerInFile.ReadFrom(r) + + // If reading CARv2 header succeeded, and CARv1 offset in header is not zero then the file is + // most-likely finalized. Check padding and truncate the file to remove index. + // Otherwise, carry on reading the v1 payload at offset determined from b.header. + if err == nil && headerInFile.DataOffset != 0 { + if headerInFile.DataOffset != dataOffset { + // Assert that the padding on file matches the given WithDataPadding option. + wantPadding := headerInFile.DataOffset - carv2.PragmaSize - carv2.HeaderSize + gotPadding := dataOffset - carv2.PragmaSize - carv2.HeaderSize + return fmt.Errorf( + "cannot resume from file with mismatched CARv1 offset; "+ + "`WithDataPadding` option must match the padding on file. "+ + "Expected padding value of %v but got %v", wantPadding, gotPadding, + ) + } else if headerInFile.DataSize == 0 { + // If CARv1 size is zero, since CARv1 offset wasn't, then the CARv2 header was + // most-likely partially written. Since we write the header last in Finalize then the + // file most-likely contains the index and we cannot know where it starts, therefore + // can't resume. + return errors.New("corrupt CARv2 header; cannot resume from file") + } + } + + v1r, err = internalio.NewOffsetReadSeeker(dataReader, 0) + if err != nil { + return err + } + } else { + var err error + v1r, err = internalio.NewOffsetReadSeeker(rw, 0) + if err != nil { + return err + } + } + + header, err := carv1.ReadHeader(v1r, maxAllowedHeaderSize) + if err != nil { + // Cannot read the CARv1 header; the file is most likely corrupt. + return fmt.Errorf("error reading car header: %w", err) + } + if !header.Matches(carv1.CarHeader{Roots: roots, Version: 1}) { + // Cannot resume if version and root does not match. + return errors.New("cannot resume on file with mismatching data header") + } + + if headerInFile.DataOffset != 0 { + // If header in file contains the size of car v1, then the index is most likely present. + // Since we will need to re-generate the index, as the one in file is flattened, truncate + // the file so that the Readonly.backing has the right set of bytes to deal with. + // This effectively means resuming from a finalized file will wipe its index even if there + // are no blocks put unless the user calls finalize. + if err := rw.(interface{ Truncate(size int64) error }).Truncate(int64(headerInFile.DataOffset + headerInFile.DataSize)); err != nil { + return err + } + } + + if !v1 { + // Now that CARv2 header is present on file, clear it to avoid incorrect size and offset in + // header in case blocksotre is closed without finalization and is resumed from. + wat, ok := rw.(io.WriterAt) + if !ok { // how would we get this far?? + return errors.New("cannot resume from file without io.WriterAt") + } + if _, err := new(carv2.Header).WriteTo(internalio.NewOffsetWriter(wat, carv2.PragmaSize)); err != nil { + return fmt.Errorf("could not un-finalize: %w", err) + } + } + + // TODO See how we can reduce duplicate code here. + // The code here comes from car.GenerateIndex. + // Copied because we need to populate an insertindex, not a sorted index. + // Producing a sorted index via generate, then converting it to insertindex is not possible. + // Because Index interface does not expose internal records. + // This may be done as part of https://github.com/ipld/go-car/issues/95 + + offset, err := carv1.HeaderSize(header) + if err != nil { + return err + } + sectionOffset := int64(0) + if sectionOffset, err = v1r.Seek(int64(offset), io.SeekStart); err != nil { + return err + } + + for { + // Grab the length of the section. + // Note that ReadUvarint wants a ByteReader. + length, err := varint.ReadUvarint(v1r) + if err != nil { + if err == io.EOF { + break + } + return err + } + + // Null padding; by default it's an error. + if length == 0 { + if zeroLengthSectionAsEOF { + break + } else { + return fmt.Errorf("carv1 null padding not allowed by default; see WithZeroLegthSectionAsEOF") + } + } + + // Grab the CID. + n, c, err := cid.CidFromReader(v1r) + if err != nil { + return err + } + idx.InsertNoReplace(c, uint64(sectionOffset)) + + // Seek to the next section by skipping the block. + // The section length includes the CID, so subtract it. + if sectionOffset, err = v1r.Seek(int64(length)-int64(n), io.SeekCurrent); err != nil { + return err + } + } + // Seek to the end of last skipped block where the writer should resume writing. + _, err = dataWriter.Seek(sectionOffset, io.SeekStart) + return err +} diff --git a/ipld/car/v2/options.go b/ipld/car/v2/options.go new file mode 100644 index 0000000000..5a8c5dc867 --- /dev/null +++ b/ipld/car/v2/options.go @@ -0,0 +1,240 @@ +package car + +import ( + "math" + + "github.com/ipfs/boxo/ipld/car/v2/index" + "github.com/ipld/go-ipld-prime/traversal" + "github.com/multiformats/go-multicodec" + + "github.com/ipfs/boxo/ipld/car/v2/internal/carv1" +) + +// DefaultMaxIndexCidSize specifies the maximum size in byptes accepted as a section CID by CARv2 index. +const DefaultMaxIndexCidSize = 2 << 10 // 2 KiB + +// DefaultMaxAllowedHeaderSize specifies the default maximum size that a CARv1 +// decode (including within a CARv2 container) will allow a header to be without +// erroring. This is to prevent OOM errors where a header prefix includes a +// too-large size specifier. +// Currently set to 32 MiB. +const DefaultMaxAllowedHeaderSize = carv1.DefaultMaxAllowedHeaderSize + +// DefaultMaxAllowedHeaderSize specifies the default maximum size that a CARv1 +// decode (including within a CARv2 container) will allow a section to be +// without erroring. This is to prevent OOM errors where a section prefix +// includes a too-large size specifier. +// Typically IPLD blocks should be under 2 MiB (ideally under 1 MiB), so unless +// atypical data is expected, this should not be a large value. +// Currently set to 8 MiB. +const DefaultMaxAllowedSectionSize = carv1.DefaultMaxAllowedSectionSize + +// Option describes an option which affects behavior when interacting with CAR files. +type Option func(*Options) + +// ReadOption hints that an API wants options related only to reading CAR files. +type ReadOption = Option + +// WriteOption hints that an API wants options related only to reading CAR files. +type WriteOption = Option + +// ReadWriteOption is either a ReadOption or a WriteOption. +// Deprecated: use Option instead. +type ReadWriteOption = Option + +// Options holds the configured options after applying a number of +// Option funcs. +// +// This type should not be used directly by end users; it's only exposed as a +// side effect of Option. +type Options struct { + DataPadding uint64 + IndexPadding uint64 + IndexCodec multicodec.Code + ZeroLengthSectionAsEOF bool + MaxIndexCidSize uint64 + StoreIdentityCIDs bool + + BlockstoreAllowDuplicatePuts bool + BlockstoreUseWholeCIDs bool + MaxTraversalLinks uint64 + WriteAsCarV1 bool + TraversalPrototypeChooser traversal.LinkTargetNodePrototypeChooser + TrustedCAR bool + + MaxAllowedHeaderSize uint64 + MaxAllowedSectionSize uint64 +} + +// ApplyOptions applies given opts and returns the resulting Options. +// This function should not be used directly by end users; it's only exposed as a +// side effect of Option. +func ApplyOptions(opt ...Option) Options { + opts := Options{ + MaxTraversalLinks: math.MaxInt64, //default: traverse all + MaxAllowedHeaderSize: carv1.DefaultMaxAllowedHeaderSize, + MaxAllowedSectionSize: carv1.DefaultMaxAllowedSectionSize, + } + for _, o := range opt { + o(&opts) + } + // Set defaults for zero valued fields. + if opts.IndexCodec == 0 { + opts.IndexCodec = multicodec.CarMultihashIndexSorted + } + if opts.MaxIndexCidSize == 0 { + opts.MaxIndexCidSize = DefaultMaxIndexCidSize + } + return opts +} + +// ZeroLengthSectionAsEOF sets whether to allow the CARv1 decoder to treat +// a zero-length section as the end of the input CAR file. For example, this can +// be useful to allow "null padding" after a CARv1 without knowing where the +// padding begins. +func ZeroLengthSectionAsEOF(enable bool) Option { + return func(o *Options) { + o.ZeroLengthSectionAsEOF = enable + } +} + +// UseDataPadding sets the padding to be added between CARv2 header and its data payload on Finalize. +func UseDataPadding(p uint64) Option { + return func(o *Options) { + o.DataPadding = p + } +} + +// UseIndexPadding sets the padding between data payload and its index on Finalize. +func UseIndexPadding(p uint64) Option { + return func(o *Options) { + o.IndexPadding = p + } +} + +// UseIndexCodec sets the codec used for index generation. +func UseIndexCodec(c multicodec.Code) Option { + return func(o *Options) { + o.IndexCodec = c + } +} + +// WithoutIndex flags that no index should be included in generation. +func WithoutIndex() Option { + return func(o *Options) { + o.IndexCodec = index.CarIndexNone + } +} + +// StoreIdentityCIDs sets whether to persist sections that are referenced by +// CIDs with multihash.IDENTITY digest. +// When writing CAR files with this option, Characteristics.IsFullyIndexed will +// be set. +// +// By default, the blockstore interface will always return true for Has() called +// with identity CIDs, but when this option is turned on, it will defer to the +// index. +// +// When creating an index (or loading a CARv1 as a blockstore), when this option +// is on, identity CIDs will be included in the index. +// +// This option is disabled by default. +func StoreIdentityCIDs(b bool) Option { + return func(o *Options) { + o.StoreIdentityCIDs = b + } +} + +// MaxIndexCidSize specifies the maximum allowed size for indexed CIDs in bytes. +// Indexing a CID with larger than the allowed size results in ErrCidTooLarge error. +func MaxIndexCidSize(s uint64) Option { + return func(o *Options) { + o.MaxIndexCidSize = s + } +} + +// WithTraversalPrototypeChooser specifies the prototype chooser that should be used +// when performing traversals in writes from a linksystem. +func WithTraversalPrototypeChooser(t traversal.LinkTargetNodePrototypeChooser) Option { + return func(o *Options) { + o.TraversalPrototypeChooser = t + } +} + +// WithTrustedCAR specifies whether CIDs match the block data as they are read +// from the CAR files. +func WithTrustedCAR(t bool) Option { + return func(o *Options) { + o.TrustedCAR = t + } +} + +// MaxAllowedHeaderSize overrides the default maximum size (of 32 MiB) that a +// CARv1 decode (including within a CARv2 container) will allow a header to be +// without erroring. +func MaxAllowedHeaderSize(max uint64) Option { + return func(o *Options) { + o.MaxAllowedHeaderSize = max + } +} + +// MaxAllowedSectionSize overrides the default maximum size (of 8 MiB) that a +// CARv1 decode (including within a CARv2 container) will allow a header to be +// without erroring. +// Typically IPLD blocks should be under 2 MiB (ideally under 1 MiB), so unless +// atypical data is expected, this should not be a large value. +func MaxAllowedSectionSize(max uint64) Option { + return func(o *Options) { + o.MaxAllowedSectionSize = max + } +} + +// --------------------------------------------------- storage interface options + +// UseWholeCIDs is a read option which makes a CAR storage interface (blockstore +// or storage) identify blocks by whole CIDs, and not just their multihashes. +// The default is to use multihashes, which matches the current semantics of +// go-ipfs-blockstore v1. +// +// Enabling this option affects a number of methods, including read-only ones: +// +// • Get, Has, and HasSize will only return a block only if the entire CID is +// present in the CAR file. +// +// • AllKeysChan will return the original whole CIDs, instead of with their +// multicodec set to "raw" to just provide multihashes. +// +// • If AllowDuplicatePuts isn't set, Put and PutMany will deduplicate by the +// whole CID, allowing different CIDs with equal multihashes. +// +// Note that this option only affects the storage interfaces (blockstore +// or storage), and is ignored by the root go-car/v2 package. +func UseWholeCIDs(enable bool) Option { + return func(o *Options) { + o.BlockstoreUseWholeCIDs = enable + } +} + +// WriteAsCarV1 is a write option which makes a CAR interface (blockstore or +// storage) write the output as a CARv1 only, with no CARv2 header or index. +// Indexing is used internally during write but is discarded upon finalization. +// +// Note that this option only affects the storage interfaces (blockstore +// or storage), and is ignored by the root go-car/v2 package. +func WriteAsCarV1(asCarV1 bool) Option { + return func(o *Options) { + o.WriteAsCarV1 = asCarV1 + } +} + +// AllowDuplicatePuts is a write option which makes a CAR interface (blockstore +// or storage) not deduplicate blocks in Put and PutMany. The default is to +// deduplicate, which matches the current semantics of go-ipfs-blockstore v1. +// +// Note that this option only affects the storage interfaces (blockstore +// or storage), and is ignored by the root go-car/v2 package. +func AllowDuplicatePuts(allow bool) Option { + return func(o *Options) { + o.BlockstoreAllowDuplicatePuts = allow + } +} diff --git a/ipld/car/v2/options_test.go b/ipld/car/v2/options_test.go new file mode 100644 index 0000000000..8964b981f5 --- /dev/null +++ b/ipld/car/v2/options_test.go @@ -0,0 +1,50 @@ +package car_test + +import ( + "math" + "testing" + + carv2 "github.com/ipfs/boxo/ipld/car/v2" + "github.com/ipfs/boxo/ipld/car/v2/blockstore" + "github.com/multiformats/go-multicodec" + "github.com/stretchr/testify/require" +) + +func TestApplyOptions_SetsExpectedDefaults(t *testing.T) { + require.Equal(t, carv2.Options{ + IndexCodec: multicodec.CarMultihashIndexSorted, + MaxIndexCidSize: carv2.DefaultMaxIndexCidSize, + MaxTraversalLinks: math.MaxInt64, + MaxAllowedHeaderSize: 32 << 20, + MaxAllowedSectionSize: 8 << 20, + }, carv2.ApplyOptions()) +} + +func TestApplyOptions_AppliesOptions(t *testing.T) { + require.Equal(t, + carv2.Options{ + DataPadding: 123, + IndexPadding: 456, + IndexCodec: multicodec.CarIndexSorted, + ZeroLengthSectionAsEOF: true, + MaxIndexCidSize: 789, + StoreIdentityCIDs: true, + BlockstoreAllowDuplicatePuts: true, + BlockstoreUseWholeCIDs: true, + MaxTraversalLinks: math.MaxInt64, + MaxAllowedHeaderSize: 101, + MaxAllowedSectionSize: 202, + }, + carv2.ApplyOptions( + carv2.UseDataPadding(123), + carv2.UseIndexPadding(456), + carv2.UseIndexCodec(multicodec.CarIndexSorted), + carv2.ZeroLengthSectionAsEOF(true), + carv2.MaxIndexCidSize(789), + carv2.StoreIdentityCIDs(true), + carv2.MaxAllowedHeaderSize(101), + carv2.MaxAllowedSectionSize(202), + blockstore.AllowDuplicatePuts(true), + blockstore.UseWholeCIDs(true), + )) +} diff --git a/ipld/car/v2/reader.go b/ipld/car/v2/reader.go new file mode 100644 index 0000000000..30092bc439 --- /dev/null +++ b/ipld/car/v2/reader.go @@ -0,0 +1,368 @@ +package car + +import ( + "errors" + "fmt" + "io" + "math" + + "github.com/ipfs/boxo/ipld/car/v2/index" + "github.com/ipfs/boxo/ipld/car/v2/internal/carv1" + "github.com/ipfs/boxo/ipld/car/v2/internal/carv1/util" + internalio "github.com/ipfs/boxo/ipld/car/v2/internal/io" + "github.com/ipfs/go-cid" + "github.com/multiformats/go-multicodec" + "github.com/multiformats/go-multihash" + "github.com/multiformats/go-varint" + "golang.org/x/exp/mmap" +) + +// Reader represents a reader of CARv2. +type Reader struct { + Header Header + Version uint64 + r io.ReaderAt + roots []cid.Cid + opts Options + closer io.Closer +} + +// OpenReader is a wrapper for NewReader which opens the file at path. +func OpenReader(path string, opts ...Option) (*Reader, error) { + f, err := mmap.Open(path) + if err != nil { + return nil, err + } + + r, err := NewReader(f, opts...) + if err != nil { + return nil, err + } + + r.closer = f + return r, nil +} + +// NewReader constructs a new reader that reads either CARv1 or CARv2 from the given r. +// Upon instantiation, the reader inspects the payload and provides appropriate read operations +// for both CARv1 and CARv2. +// +// Note that any other version other than 1 or 2 will result in an error. The caller may use +// Reader.Version to get the actual version r represents. In the case where r represents a CARv1 +// Reader.Header will not be populated and is left as zero-valued. +func NewReader(r io.ReaderAt, opts ...Option) (*Reader, error) { + cr := &Reader{ + r: r, + } + cr.opts = ApplyOptions(opts...) + + or, err := internalio.NewOffsetReadSeeker(r, 0) + if err != nil { + return nil, err + } + cr.Version, err = ReadVersion(or, opts...) + if err != nil { + return nil, err + } + + if cr.Version != 1 && cr.Version != 2 { + return nil, fmt.Errorf("invalid car version: %d", cr.Version) + } + + if cr.Version == 2 { + if err := cr.readV2Header(); err != nil { + return nil, err + } + } + + return cr, nil +} + +// Roots returns the root CIDs. +// The root CIDs are extracted lazily from the data payload header. +func (r *Reader) Roots() ([]cid.Cid, error) { + if r.roots != nil { + return r.roots, nil + } + dr, err := r.DataReader() + if err != nil { + return nil, err + } + header, err := carv1.ReadHeader(dr, r.opts.MaxAllowedHeaderSize) + if err != nil { + return nil, err + } + r.roots = header.Roots + return r.roots, nil +} + +func (r *Reader) readV2Header() (err error) { + headerSection := io.NewSectionReader(r.r, PragmaSize, HeaderSize) + _, err = r.Header.ReadFrom(headerSection) + return +} + +// SectionReader implements both io.ReadSeeker and io.ReaderAt. +// It is the interface version of io.SectionReader, but note that the +// implementation is not guaranteed to be an io.SectionReader. +type SectionReader interface { + io.Reader + io.Seeker + io.ReaderAt +} + +// DataReader provides a reader containing the data payload in CARv1 format. +func (r *Reader) DataReader() (SectionReader, error) { + if r.Version == 2 { + return io.NewSectionReader(r.r, int64(r.Header.DataOffset), int64(r.Header.DataSize)), nil + } + return internalio.NewOffsetReadSeeker(r.r, 0) +} + +// IndexReader provides an io.Reader containing the index for the data payload if the index is +// present. Otherwise, returns nil. +// Note, this function will always return nil if the backing payload represents a CARv1. +func (r *Reader) IndexReader() (io.Reader, error) { + if r.Version == 1 || !r.Header.HasIndex() { + return nil, nil + } + return internalio.NewOffsetReadSeeker(r.r, int64(r.Header.IndexOffset)) +} + +// Stats is returned by an Inspect() call +type Stats struct { + Version uint64 + Header Header + Roots []cid.Cid + RootsPresent bool + BlockCount uint64 + CodecCounts map[multicodec.Code]uint64 + MhTypeCounts map[multicodec.Code]uint64 + AvgCidLength uint64 + MaxCidLength uint64 + MinCidLength uint64 + AvgBlockLength uint64 + MaxBlockLength uint64 + MinBlockLength uint64 + IndexCodec multicodec.Code +} + +// Inspect does a quick scan of a CAR, performing basic validation of the format +// and returning a Stats object that provides a high-level description of the +// contents of the CAR. +// Inspect works for CARv1 and CARv2 contents. A CARv1 will return an +// uninitialized Header value. +// +// If validateBlockHash is true, all block data in the payload will be hashed +// and compared to the CID for that block and an error will return if there +// is a mismatch. If false, block data will be skipped over and not checked. +// Performing a full block hash validation is similar to using a BlockReader and +// calling Next over all blocks. +// +// Inspect will perform a basic check of a CARv2 index, where present, but this +// does not guarantee that the index is correct. Attempting to read index data +// from untrusted sources is not recommended. If required, further validation of +// an index can be performed by loading the index and performing a ForEach() and +// sanity checking that the offsets are within the data payload section of the +// CAR. However, re-generation of index data in this case is the recommended +// course of action. +// +// Beyond the checks performed by Inspect, a valid / good CAR is somewhat +// use-case dependent. Factors to consider include: +// +// - Bad indexes, including incorrect offsets, duplicate entries, or other +// faulty data. Indexes should be re-generated, regardless, if you need to use +// them and have any reason to not trust the source. +// +// - Blocks use codecs that your system doesn't have access to—which may mean +// you can't traverse a DAG or use the contained data. Stats.CodecCounts +// contains a list of codecs found in the CAR so this can be checked. +// +// - CIDs use multihashes that your system doesn't have access to—which will +// mean you can't validate block hashes are correct (using validateBlockHash +// in this case will result in a failure). Stats.MhTypeCounts contains a +// list of multihashes found in the CAR so this can be checked. +// +// - The presence of IDENTITY CIDs, which may not be supported (or desired) by +// the consumer of the CAR. Stats.CodecCounts can determine the presence +// of IDENTITY CIDs. +// +// - Roots: the number of roots, duplicates, and whether they are related to the +// blocks contained within the CAR. Stats contains a list of Roots and a +// RootsPresent bool so further checks can be performed. +// +// - DAG completeness is not checked. Any properties relating to the DAG, or +// DAGs contained within a CAR are the responsibility of the user to check. +func (r *Reader) Inspect(validateBlockHash bool) (Stats, error) { + stats := Stats{ + Version: r.Version, + Header: r.Header, + CodecCounts: make(map[multicodec.Code]uint64), + MhTypeCounts: make(map[multicodec.Code]uint64), + } + + var totalCidLength uint64 + var totalBlockLength uint64 + var minCidLength uint64 = math.MaxUint64 + var minBlockLength uint64 = math.MaxUint64 + + dr, err := r.DataReader() + if err != nil { + return Stats{}, err + } + bdr := internalio.ToByteReader(dr) + + // read roots, not using Roots(), because we need the offset setup in the data trader + header, err := carv1.ReadHeader(dr, r.opts.MaxAllowedHeaderSize) + if err != nil { + return Stats{}, err + } + stats.Roots = header.Roots + var rootsPresentCount int + rootsPresent := make([]bool, len(stats.Roots)) + + // read block sections + for { + sectionLength, err := varint.ReadUvarint(bdr) + if err != nil { + if err == io.EOF { + // if the length of bytes read is non-zero when the error is EOF then signal an unclean EOF. + if sectionLength > 0 { + return Stats{}, io.ErrUnexpectedEOF + } + // otherwise, this is a normal ending + break + } + return Stats{}, err + } + if sectionLength == 0 && r.opts.ZeroLengthSectionAsEOF { + // normal ending for this read mode + break + } + if sectionLength > r.opts.MaxAllowedSectionSize { + return Stats{}, util.ErrSectionTooLarge + } + + // decode just the CID bytes + cidLen, c, err := cid.CidFromReader(dr) + if err != nil { + return Stats{}, err + } + + if sectionLength < uint64(cidLen) { + // this case is handled different in the normal ReadNode() path since it + // slurps in the whole section bytes and decodes CID from there - so an + // error should come from a failing io.ReadFull + return Stats{}, errors.New("section length shorter than CID length") + } + + // is this a root block? (also account for duplicate root CIDs) + if rootsPresentCount < len(stats.Roots) { + for i, r := range stats.Roots { + if !rootsPresent[i] && c == r { + rootsPresent[i] = true + rootsPresentCount++ + } + } + } + + cp := c.Prefix() + codec := multicodec.Code(cp.Codec) + count := stats.CodecCounts[codec] + stats.CodecCounts[codec] = count + 1 + mhtype := multicodec.Code(cp.MhType) + count = stats.MhTypeCounts[mhtype] + stats.MhTypeCounts[mhtype] = count + 1 + + blockLength := sectionLength - uint64(cidLen) + + if validateBlockHash { + // Use multihash.SumStream to avoid having to copy the entire block content into memory. + // The SumStream uses a buffered copy to write bytes into the hasher which will take + // advantage of streaming hash calculation depending on the hash function. + // TODO: introduce SumStream in go-cid to simplify the code here. + blockReader := io.LimitReader(dr, int64(blockLength)) + mhl := cp.MhLength + if mhtype == multicodec.Identity { + mhl = -1 + } + mh, err := multihash.SumStream(blockReader, cp.MhType, mhl) + if err != nil { + return Stats{}, err + } + var gotCid cid.Cid + switch cp.Version { + case 0: + gotCid = cid.NewCidV0(mh) + case 1: + gotCid = cid.NewCidV1(cp.Codec, mh) + default: + return Stats{}, fmt.Errorf("invalid cid version: %d", cp.Version) + } + if !gotCid.Equals(c) { + return Stats{}, fmt.Errorf("mismatch in content integrity, expected: %s, got: %s", c, gotCid) + } + } else { + // otherwise, skip over it + if _, err := dr.Seek(int64(blockLength), io.SeekCurrent); err != nil { + return Stats{}, err + } + } + + stats.BlockCount++ + totalCidLength += uint64(cidLen) + totalBlockLength += blockLength + if uint64(cidLen) < minCidLength { + minCidLength = uint64(cidLen) + } + if uint64(cidLen) > stats.MaxCidLength { + stats.MaxCidLength = uint64(cidLen) + } + if blockLength < minBlockLength { + minBlockLength = blockLength + } + if blockLength > stats.MaxBlockLength { + stats.MaxBlockLength = blockLength + } + } + + stats.RootsPresent = len(stats.Roots) == rootsPresentCount + if stats.BlockCount > 0 { + stats.MinCidLength = minCidLength + stats.MinBlockLength = minBlockLength + stats.AvgCidLength = totalCidLength / stats.BlockCount + stats.AvgBlockLength = totalBlockLength / stats.BlockCount + } + + if stats.Version != 1 && stats.Header.HasIndex() { + idxr, err := r.IndexReader() + if err != nil { + return Stats{}, err + } + stats.IndexCodec, err = index.ReadCodec(idxr) + if err != nil { + return Stats{}, err + } + } + + return stats, nil +} + +// Close closes the underlying reader if it was opened by OpenReader. +func (r *Reader) Close() error { + if r.closer != nil { + return r.closer.Close() + } + return nil +} + +// ReadVersion reads the version from the pragma. +// This function accepts both CARv1 and CARv2 payloads. +func ReadVersion(r io.Reader, opts ...Option) (uint64, error) { + o := ApplyOptions(opts...) + header, err := carv1.ReadHeader(r, o.MaxAllowedHeaderSize) + if err != nil { + return 0, err + } + return header.Version, nil +} diff --git a/ipld/car/v2/reader_test.go b/ipld/car/v2/reader_test.go new file mode 100644 index 0000000000..75a4c274f3 --- /dev/null +++ b/ipld/car/v2/reader_test.go @@ -0,0 +1,592 @@ +package car_test + +import ( + "bytes" + "encoding/hex" + "io" + "os" + "strings" + "testing" + + carv2 "github.com/ipfs/boxo/ipld/car/v2" + "github.com/ipfs/boxo/ipld/car/v2/index" + "github.com/ipfs/boxo/ipld/car/v2/internal/carv1" + "github.com/ipfs/go-cid" + "github.com/multiformats/go-multicodec" + "github.com/stretchr/testify/require" +) + +func TestReadVersion(t *testing.T) { + tests := []struct { + name string + path string + want uint64 + wantErr bool + }{ + { + name: "CarV1VersionIsOne", + path: "testdata/sample-v1.car", + want: 1, + }, + { + name: "CarV2VersionIsTwo", + path: "testdata/sample-rw-bs-v2.car", + want: 2, + }, + { + name: "CarV1VersionWithZeroLenSectionIsOne", + path: "testdata/sample-v1-with-zero-len-section.car", + want: 1, + }, + { + name: "AnotherCarV1VersionWithZeroLenSectionIsOne", + path: "testdata/sample-v1-with-zero-len-section2.car", + want: 1, + }, + { + name: "WrappedCarV1InCarV2VersionIsTwo", + path: "testdata/sample-wrapped-v2.car", + want: 2, + }, + { + name: "FutureVersionWithCorrectPragmaIsAsExpected", + path: "testdata/sample-rootless-v42.car", + want: 42, + }, + { + name: "CarV1WithValidHeaderButCorruptSectionIsOne", + path: "testdata/sample-v1-tailing-corrupt-section.car", + want: 1, + }, + { + name: "CarV2WithValidHeaderButCorruptSectionAndIndexIsTwo", + path: "testdata/sample-v2-corrupt-data-and-index.car", + want: 2, + }, + { + name: "CarFileWithCorruptPragmaIsError", + path: "testdata/sample-corrupt-pragma.car", + wantErr: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + f, err := os.Open(tt.path) + require.NoError(t, err) + t.Cleanup(func() { require.NoError(t, f.Close()) }) + + got, err := carv2.ReadVersion(f) + if tt.wantErr { + require.Error(t, err, "ReadVersion() error = %v, wantErr %v", err, tt.wantErr) + } else { + require.NoError(t, err) + require.Equal(t, tt.want, got, "ReadVersion() got = %v, want %v", got, tt.want) + } + }) + } +} + +func TestReaderFailsOnUnknownVersion(t *testing.T) { + _, err := carv2.OpenReader("testdata/sample-rootless-v42.car") + require.EqualError(t, err, "invalid car version: 42") +} + +func TestReaderFailsOnCorruptPragma(t *testing.T) { + _, err := carv2.OpenReader("testdata/sample-corrupt-pragma.car") + require.EqualError(t, err, "unexpected EOF") +} + +func TestReader_WithCarV1Consistency(t *testing.T) { + tests := []struct { + name string + path string + zerLenAsEOF bool + }{ + { + name: "CarV1WithoutZeroLengthSection", + path: "testdata/sample-v1.car", + }, + { + name: "CarV1WithZeroLenSection", + path: "testdata/sample-v1-with-zero-len-section.car", + zerLenAsEOF: true, + }, + { + name: "AnotherCarV1WithZeroLenSection", + path: "testdata/sample-v1-with-zero-len-section2.car", + zerLenAsEOF: true, + }, + { + name: "CarV1WithZeroLenSectionWithoutOption", + path: "testdata/sample-v1-with-zero-len-section.car", + }, + { + name: "AnotherCarV1WithZeroLenSectionWithoutOption", + path: "testdata/sample-v1-with-zero-len-section2.car", + }, + { + name: "CorruptCarV1", + path: "testdata/sample-v1-tailing-corrupt-section.car", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + subject, err := carv2.OpenReader(tt.path, carv2.ZeroLengthSectionAsEOF(tt.zerLenAsEOF)) + require.NoError(t, err) + t.Cleanup(func() { require.NoError(t, subject.Close()) }) + wantReader := requireNewCarV1ReaderFromV1File(t, tt.path, tt.zerLenAsEOF) + + require.Equal(t, uint64(1), subject.Version) + gotRoots, err := subject.Roots() + require.NoError(t, err) + require.Equal(t, wantReader.Header.Roots, gotRoots) + ir, err := subject.IndexReader() + require.Nil(t, ir) + require.NoError(t, err) + }) + } +} + +func TestReader_WithCarV2Consistency(t *testing.T) { + tests := []struct { + name string + path string + zerLenAsEOF bool + }{ + { + name: "CarV2WrappingV1", + path: "testdata/sample-wrapped-v2.car", + }, + { + name: "CarV2ProducedByBlockstore", + path: "testdata/sample-rw-bs-v2.car", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + subject, err := carv2.OpenReader(tt.path, carv2.ZeroLengthSectionAsEOF(tt.zerLenAsEOF)) + require.NoError(t, err) + t.Cleanup(func() { require.NoError(t, subject.Close()) }) + wantReader := requireNewCarV1ReaderFromV2File(t, tt.path, tt.zerLenAsEOF) + + require.Equal(t, uint64(2), subject.Version) + gotRoots, err := subject.Roots() + require.NoError(t, err) + require.Equal(t, wantReader.Header.Roots, gotRoots) + + gotIndexReader, err := subject.IndexReader() + require.NoError(t, err) + require.NotNil(t, gotIndexReader) + gotIndex, err := index.ReadFrom(gotIndexReader) + require.NoError(t, err) + dr, err := subject.DataReader() + require.NoError(t, err) + wantIndex, err := carv2.GenerateIndex(dr) + require.NoError(t, err) + require.Equal(t, wantIndex, gotIndex) + }) + } +} + +func TestOpenReader_DoesNotPanicForReadersCreatedBeforeClosure(t *testing.T) { + subject, err := carv2.OpenReader("testdata/sample-wrapped-v2.car") + require.NoError(t, err) + dReaderBeforeClosure, err := subject.DataReader() + require.NoError(t, err) + iReaderBeforeClosure, err := subject.IndexReader() + require.NoError(t, err) + require.NoError(t, subject.Close()) + + buf := make([]byte, 1) + panicTest := func(r io.Reader) { + _, err := r.Read(buf) + require.EqualError(t, err, "mmap: closed") + } + + require.NotPanics(t, func() { panicTest(dReaderBeforeClosure) }) + require.NotPanics(t, func() { panicTest(iReaderBeforeClosure) }) +} + +func TestOpenReader_DoesNotPanicForReadersCreatedAfterClosure(t *testing.T) { + subject, err := carv2.OpenReader("testdata/sample-wrapped-v2.car") + require.NoError(t, err) + require.NoError(t, subject.Close()) + dReaderAfterClosure, err := subject.DataReader() + require.NoError(t, err) + iReaderAfterClosure, err := subject.IndexReader() + require.NoError(t, err) + + buf := make([]byte, 1) + panicTest := func(r io.Reader) { + _, err := r.Read(buf) + require.EqualError(t, err, "mmap: closed") + } + + require.NotPanics(t, func() { panicTest(dReaderAfterClosure) }) + require.NotPanics(t, func() { panicTest(iReaderAfterClosure) }) +} + +func TestReader_ReturnsNilWhenThereIsNoIndex(t *testing.T) { + tests := []struct { + name string + path string + }{ + { + name: "IndexlessCarV2", + path: "testdata/sample-v2-indexless.car", + }, + { + name: "CarV1", + path: "testdata/sample-v1.car", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + subject, err := carv2.OpenReader(tt.path) + require.NoError(t, err) + t.Cleanup(func() { require.NoError(t, subject.Close()) }) + ir, err := subject.IndexReader() + require.NoError(t, err) + require.Nil(t, ir) + }) + } +} + +func requireNewCarV1ReaderFromV2File(t *testing.T, carV12Path string, zerLenAsEOF bool) *carv1.CarReader { + f, err := os.Open(carV12Path) + require.NoError(t, err) + t.Cleanup(func() { require.NoError(t, f.Close()) }) + + _, err = f.Seek(carv2.PragmaSize, io.SeekStart) + require.NoError(t, err) + header := carv2.Header{} + _, err = header.ReadFrom(f) + require.NoError(t, err) + return requireNewCarV1Reader(t, io.NewSectionReader(f, int64(header.DataOffset), int64(header.DataSize)), zerLenAsEOF) +} + +func requireNewCarV1ReaderFromV1File(t *testing.T, carV1Path string, zerLenAsEOF bool) *carv1.CarReader { + f, err := os.Open(carV1Path) + require.NoError(t, err) + t.Cleanup(func() { require.NoError(t, f.Close()) }) + return requireNewCarV1Reader(t, f, zerLenAsEOF) +} + +func requireNewCarV1Reader(t *testing.T, r io.Reader, zerLenAsEOF bool) *carv1.CarReader { + var cr *carv1.CarReader + var err error + if zerLenAsEOF { + cr, err = carv1.NewCarReaderWithZeroLengthSectionAsEOF(r) + } else { + cr, err = carv1.NewCarReader(r) + } + require.NoError(t, err) + return cr +} + +func TestInspect(t *testing.T) { + tests := []struct { + name string + path string + carHex string + zerLenAsEOF bool + expectedStats carv2.Stats + }{ + { + name: "IndexlessCarV2", + path: "testdata/sample-v2-indexless.car", + expectedStats: carv2.Stats{ + Version: 2, + Header: carv2.Header{ + Characteristics: carv2.Characteristics{0, 0}, + DataOffset: 51, + DataSize: 479907, + IndexOffset: 0, + }, + Roots: []cid.Cid{mustCidDecode("bafy2bzaced4ueelaegfs5fqu4tzsh6ywbbpfk3cxppupmxfdhbpbhzawfw5oy")}, + RootsPresent: true, + AvgBlockLength: 417, // 417.6644423260248 + MinBlockLength: 1, + MaxBlockLength: 1342, + AvgCidLength: 37, // 37.86939942802669 + MinCidLength: 14, + MaxCidLength: 38, + BlockCount: 1049, + CodecCounts: map[multicodec.Code]uint64{ + multicodec.Raw: 6, + multicodec.DagCbor: 1043, + }, + MhTypeCounts: map[multicodec.Code]uint64{ + multicodec.Identity: 6, + multicodec.Blake2b256: 1043, + }, + }, + }, + { + // same payload as IndexlessCarV2, so only difference is the Version & Header + name: "CarV1", + path: "testdata/sample-v1.car", + expectedStats: carv2.Stats{ + Version: 1, + Header: carv2.Header{}, + Roots: []cid.Cid{mustCidDecode("bafy2bzaced4ueelaegfs5fqu4tzsh6ywbbpfk3cxppupmxfdhbpbhzawfw5oy")}, + RootsPresent: true, + AvgBlockLength: 417, // 417.6644423260248 + MinBlockLength: 1, + MaxBlockLength: 1342, + AvgCidLength: 37, // 37.86939942802669 + MinCidLength: 14, + MaxCidLength: 38, + BlockCount: 1049, + CodecCounts: map[multicodec.Code]uint64{ + multicodec.Raw: 6, + multicodec.DagCbor: 1043, + }, + MhTypeCounts: map[multicodec.Code]uint64{ + multicodec.Identity: 6, + multicodec.Blake2b256: 1043, + }, + }, + }, + { + // same payload as IndexlessCarV2, so only difference is the Header + name: "CarV2ProducedByBlockstore", + path: "testdata/sample-rw-bs-v2.car", + expectedStats: carv2.Stats{ + Version: 2, + Header: carv2.Header{ + DataOffset: 1464, + DataSize: 273, + IndexOffset: 1737, + }, + Roots: []cid.Cid{ + mustCidDecode("bafkreifuosuzujyf4i6psbneqtwg2fhplc2wxptc5euspa2gn3bwhnihfu"), + mustCidDecode("bafkreifc4hca3inognou377hfhvu2xfchn2ltzi7yu27jkaeujqqqdbjju"), + mustCidDecode("bafkreig5lvr4l6b4fr3un4xvzeyt3scevgsqjgrhlnwxw2unwbn5ro276u"), + }, + RootsPresent: true, + BlockCount: 3, + CodecCounts: map[multicodec.Code]uint64{multicodec.Raw: 3}, + MhTypeCounts: map[multicodec.Code]uint64{multicodec.Sha2_256: 3}, + AvgCidLength: 36, + MaxCidLength: 36, + MinCidLength: 36, + AvgBlockLength: 6, + MaxBlockLength: 9, + MinBlockLength: 4, + IndexCodec: multicodec.CarMultihashIndexSorted, + }, + }, + // same as CarV1 but with a zero-byte EOF to test options + { + name: "CarV1VersionWithZeroLenSectionIsOne", + path: "testdata/sample-v1-with-zero-len-section.car", + zerLenAsEOF: true, + expectedStats: carv2.Stats{ + Version: 1, + Header: carv2.Header{}, + Roots: []cid.Cid{mustCidDecode("bafy2bzaced4ueelaegfs5fqu4tzsh6ywbbpfk3cxppupmxfdhbpbhzawfw5oy")}, + RootsPresent: true, + AvgBlockLength: 417, // 417.6644423260248 + MinBlockLength: 1, + MaxBlockLength: 1342, + AvgCidLength: 37, // 37.86939942802669 + MinCidLength: 14, + MaxCidLength: 38, + BlockCount: 1049, + CodecCounts: map[multicodec.Code]uint64{ + multicodec.Raw: 6, + multicodec.DagCbor: 1043, + }, + MhTypeCounts: map[multicodec.Code]uint64{ + multicodec.Identity: 6, + multicodec.Blake2b256: 1043, + }, + }, + }, + { + // A case where this _could_ be a valid CAR if we allowed identity CIDs + // and not matching block contents to exist, there's no block bytes in + // this. It will only fail if you don't validate the CID matches the, + // bytes (see TestInspectError for that case). + name: "IdentityCID", + // 47 {version:1,roots:[identity cid]} 25 identity cid (dag-json {"identity":"block"}) + carHex: "2f a265726f6f747381d82a581a0001a90200147b226964656e74697479223a22626c6f636b227d6776657273696f6e01 19 01a90200147b226964656e74697479223a22626c6f636b227d", + expectedStats: carv2.Stats{ + Version: 1, + Roots: []cid.Cid{mustCidDecode("baguqeaaupmrgszdfnz2gs5dzei5ceytmn5rwwit5")}, + RootsPresent: true, + BlockCount: 1, + CodecCounts: map[multicodec.Code]uint64{multicodec.DagJson: 1}, + MhTypeCounts: map[multicodec.Code]uint64{multicodec.Identity: 1}, + AvgCidLength: 25, + MaxCidLength: 25, + MinCidLength: 25, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var reader *carv2.Reader + var err error + if tt.path != "" { + reader, err = carv2.OpenReader(tt.path, carv2.ZeroLengthSectionAsEOF(tt.zerLenAsEOF)) + require.NoError(t, err) + } else { + byts, err := hex.DecodeString(strings.ReplaceAll(tt.carHex, " ", "")) + require.NoError(t, err) + reader, err = carv2.NewReader(bytes.NewReader(byts), carv2.ZeroLengthSectionAsEOF(tt.zerLenAsEOF)) + require.NoError(t, err) + } + t.Cleanup(func() { require.NoError(t, reader.Close()) }) + stats, err := reader.Inspect(false) + require.NoError(t, err) + require.Equal(t, tt.expectedStats, stats) + }) + } +} + +func TestInspectError(t *testing.T) { + tests := []struct { + name string + carHex string + expectedOpenError string + expectedInspectError string + validateBlockHash bool + }{ + { + name: "BadCidV0", + carHex: "3aa265726f6f747381d8305825000130302030303030303030303030303030303030303030303030303030303030303030306776657273696f6e010130", + expectedInspectError: "expected 1 as the cid version number, got: 48", + }, + { + name: "BadHeaderLength", + carHex: "e0e0e0e0a7060c6f6c4cca943c236f4b196723489608edb42a8b8fa80b6776657273696f6e19", + expectedOpenError: "invalid header data, length of read beyond allowable maximum", + }, + { + name: "BadSectionLength", + carHex: "11a265726f6f7473806776657273696f6e01e0e0e0e0a7060155122001d448afd928065458cf670b60f5a594d735af0172c8d67f22a81680132681ca00000000000000000000", + expectedInspectError: "invalid section data, length of read beyond allowable maximum", + }, + { + name: "BadSectionLength2", + carHex: "3aa265726f6f747381d8305825000130302030303030303030303030303030303030303030303030303030303030303030306776657273696f6e01200130302030303030303030303030303030303030303030303030303030303030303030303030303030303030", + expectedInspectError: "section length shorter than CID length", + validateBlockHash: true, + }, + { + name: "BadSectionLength3", + carHex: "11a265726f6f7473f66776657273696f6e0180", + expectedInspectError: "unexpected EOF", + }, + { + name: "BadBlockHash(SanityCheck)", // this should pass because we don't ask the CID be validated even though it doesn't match + // header cid data + carHex: "11a265726f6f7473806776657273696f6e 012e0155122001d448afd928065458cf670b60f5a594d735af0172c8d67f22a81680132681ca ffffffffffffffffffff", + }, + { + name: "BadBlockHash", // same as above, but we ask for CID validation + // header cid data + carHex: "11a265726f6f7473806776657273696f6e 012e0155122001d448afd928065458cf670b60f5a594d735af0172c8d67f22a81680132681ca ffffffffffffffffffff", + validateBlockHash: true, + expectedInspectError: "mismatch in content integrity, expected: bafkreiab2rek7wjiazkfrt3hbnqpljmu24226alszdlh6ivic2abgjubzi, got: bafkreiaaqoxrddiyuy6gxnks6ioqytxhq5a7tchm2mm5htigznwiljukmm", + }, + { + name: "IdentityCID", // a case where this _could_ be a valid CAR if we allowed identity CIDs and not matching block contents to exist, there's no block bytes in this + // 47 {version:1,roots:[identity cid]} 25 identity cid (dag-json {"identity":"block"}) + carHex: "2f a265726f6f747381d82a581a0001a90200147b226964656e74697479223a22626c6f636b227d6776657273696f6e01 19 01a90200147b226964656e74697479223a22626c6f636b227d", + validateBlockHash: true, + expectedInspectError: "mismatch in content integrity, expected: baguqeaaupmrgszdfnz2gs5dzei5ceytmn5rwwit5, got: baguqeaaa", + }, + // the bad index tests are manually constructed from this single-block CARv2 by adjusting the Uint32 and Uint64 values in the index: + // pragma carv2 header carv1 icodec count codec count (swi) width dataLen mh offset + // 0aa16776657273696f6e02 00000000000000000000000000000000330000000000000041000000000000007400000000000000 11a265726f6f7473806776657273696f6e012e0155122001d448afd928065458cf670b60f5a594d735af0172c8d67f22a81680132681ca00000000000000000000 8108 01000000 1200000000000000 01000000 28000000 2800000000000000 01d448afd928065458cf670b60f5a594d735af0172c8d67f22a81680132681ca 1200000000000000 + // we don't test any further into the index, to do that, a user should do a ForEach across the loaded index (and sanity check the offsets) + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + car, _ := hex.DecodeString(strings.ReplaceAll(tt.carHex, " ", "")) + reader, err := carv2.NewReader(bytes.NewReader(car)) + if tt.expectedOpenError != "" { + require.Error(t, err) + require.Equal(t, tt.expectedOpenError, err.Error()) + return + } else { + require.NoError(t, err) + } + t.Cleanup(func() { require.NoError(t, reader.Close()) }) + _, err = reader.Inspect(tt.validateBlockHash) + if tt.expectedInspectError != "" { + require.Error(t, err) + require.Equal(t, tt.expectedInspectError, err.Error()) + } else { + require.NoError(t, err) + } + }) + } +} + +func TestIndex_ReadFromCorruptIndex(t *testing.T) { + tests := []struct { + name string + givenCarHex string + wantErr string + }{ + { + name: "BadIndexCountOverflow", + // pragma carv2 header carv1 icodec count codec count (swi) width dataLen mh offset + givenCarHex: "0aa16776657273696f6e02 00000000000000000000000000000000330000000000000041000000000000007400000000000000 11a265726f6f7473806776657273696f6e012e0155122001d448afd928065458cf670b60f5a594d735af0172c8d67f22a81680132681ca00000000000000000000 8108 ffffffff 1200000000000000 01000000 28000000 2800000000000000 01d448afd928065458cf670b60f5a594d735af0172c8d67f22a81680132681ca 1200000000000000", + wantErr: "index too big; MultihashIndexSorted count is overflowing int32", + }, + { + name: "BadIndexCountTooMany", + // pragma carv2 header carv1 icodec count codec count (swi) width dataLen mh offset + givenCarHex: "0aa16776657273696f6e02 00000000000000000000000000000000330000000000000041000000000000007400000000000000 11a265726f6f7473806776657273696f6e012e0155122001d448afd928065458cf670b60f5a594d735af0172c8d67f22a81680132681ca00000000000000000000 8108 ffffff7f 1200000000000000 01000000 28000000 2800000000000000 01d448afd928065458cf670b60f5a594d735af0172c8d67f22a81680132681ca 1200000000000000", + wantErr: "unexpected EOF", + }, + { + name: "BadIndexMultiWidthOverflow", + // pragma carv2 header carv1 icodec count codec count (swi) width dataLen mh offset + givenCarHex: "0aa16776657273696f6e02 00000000000000000000000000000000330000000000000041000000000000007400000000000000 11a265726f6f7473806776657273696f6e012e0155122001d448afd928065458cf670b60f5a594d735af0172c8d67f22a81680132681ca00000000000000000000 8108 01000000 1200000000000000 ffffffff 28000000 2800000000000000 01d448afd928065458cf670b60f5a594d735af0172c8d67f22a81680132681ca 1200000000000000", + wantErr: "index too big; multiWidthIndex count is overflowing int32", + }, + { + name: "BadIndexMultiWidthTooMany", + // pragma carv2 header carv1 icodec count codec count (swi) width dataLen mh offset + givenCarHex: "0aa16776657273696f6e02 00000000000000000000000000000000330000000000000041000000000000007400000000000000 11a265726f6f7473806776657273696f6e012e0155122001d448afd928065458cf670b60f5a594d735af0172c8d67f22a81680132681ca00000000000000000000 8108 01000000 1200000000000000 ffffff7f 28000000 2800000000000000 01d448afd928065458cf670b60f5a594d735af0172c8d67f22a81680132681ca 1200000000000000", + wantErr: "unexpected EOF", + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + car, _ := hex.DecodeString(strings.ReplaceAll(test.givenCarHex, " ", "")) + reader, err := carv2.NewReader(bytes.NewReader(car)) + require.NoError(t, err) + + ir, err := reader.IndexReader() + require.NoError(t, err) + require.NotNil(t, ir) + + gotIdx, err := index.ReadFrom(ir) + if test.wantErr == "" { + require.NoError(t, err) + require.NotNil(t, gotIdx) + } else { + require.Error(t, err) + require.Equal(t, test.wantErr, err.Error()) + require.Nil(t, gotIdx) + } + }) + } +} + +func mustCidDecode(s string) cid.Cid { + c, err := cid.Decode(s) + if err != nil { + panic(err) + } + return c +} diff --git a/ipld/car/v2/selective.go b/ipld/car/v2/selective.go new file mode 100644 index 0000000000..33ac016bcd --- /dev/null +++ b/ipld/car/v2/selective.go @@ -0,0 +1,289 @@ +package car + +import ( + "context" + "fmt" + "io" + "math" + "os" + + "github.com/ipfs/boxo/ipld/car/v2/index" + "github.com/ipfs/boxo/ipld/car/v2/internal/carv1" + "github.com/ipfs/boxo/ipld/car/v2/internal/loader" + "github.com/ipfs/go-cid" + ipld "github.com/ipld/go-ipld-prime" + "github.com/ipld/go-ipld-prime/datamodel" + "github.com/ipld/go-ipld-prime/linking" + cidlink "github.com/ipld/go-ipld-prime/linking/cid" + "github.com/ipld/go-ipld-prime/node/basicnode" + "github.com/ipld/go-ipld-prime/traversal" + "github.com/ipld/go-ipld-prime/traversal/selector" +) + +// ErrSizeMismatch is returned when a written traversal realizes the written header size does not +// match the actual number of car bytes written. +var ErrSizeMismatch = fmt.Errorf("car-error-sizemismatch") + +// ErrOffsetImpossible is returned when specified paddings or offsets of either a wrapped carv1 +// or index cannot be satisfied based on the data being written. +var ErrOffsetImpossible = fmt.Errorf("car-error-offsetimpossible") + +// MaxTraversalLinks changes the allowed number of links a selector traversal +// can execute before failing. +// +// Note that setting this option may cause an error to be returned from selector +// execution when building a SelectiveCar. +func MaxTraversalLinks(MaxTraversalLinks uint64) Option { + return func(sco *Options) { + sco.MaxTraversalLinks = MaxTraversalLinks + } +} + +// NewSelectiveWriter walks through the proposed dag traversal to learn its total size in order to be able to +// stream out a car to a writer in the expected traversal order in one go. +func NewSelectiveWriter(ctx context.Context, ls *ipld.LinkSystem, root cid.Cid, selector ipld.Node, opts ...Option) (Writer, error) { + cls, cntr := loader.CountingLinkSystem(*ls) + + c1h := carv1.CarHeader{Roots: []cid.Cid{root}, Version: 1} + headSize, err := carv1.HeaderSize(&c1h) + if err != nil { + return nil, err + } + if err := traverse(ctx, &cls, root, selector, ApplyOptions(opts...)); err != nil { + return nil, err + } + tc := traversalCar{ + size: headSize + cntr.Size(), + ctx: ctx, + root: root, + selector: selector, + ls: ls, + opts: ApplyOptions(opts...), + } + return &tc, nil +} + +// TraverseToFile writes a car file matching a given root and selector to the +// path at `destination` using one read of each block. +func TraverseToFile(ctx context.Context, ls *ipld.LinkSystem, root cid.Cid, selector ipld.Node, destination string, opts ...Option) error { + tc := traversalCar{ + size: 0, + ctx: ctx, + root: root, + selector: selector, + ls: ls, + opts: ApplyOptions(opts...), + } + + fp, err := os.Create(destination) + if err != nil { + return err + } + defer fp.Close() + + _, err = tc.WriteTo(fp) + if err != nil { + return err + } + + // fix header size. + if _, err = fp.Seek(0, 0); err != nil { + return err + } + + tc.size = uint64(tc.size) + if _, err = tc.WriteV2Header(fp); err != nil { + return err + } + + return nil +} + +// TraverseV1 walks through the proposed dag traversal and writes a carv1 to the provided io.Writer +func TraverseV1(ctx context.Context, ls *ipld.LinkSystem, root cid.Cid, selector ipld.Node, writer io.Writer, opts ...Option) (uint64, error) { + opts = append(opts, WithoutIndex()) + tc := traversalCar{ + size: 0, + ctx: ctx, + root: root, + selector: selector, + ls: ls, + opts: ApplyOptions(opts...), + } + + len, _, err := tc.WriteV1(writer) + return len, err +} + +// Writer is an interface allowing writing a car prepared by PrepareTraversal +type Writer interface { + io.WriterTo +} + +var _ Writer = (*traversalCar)(nil) + +type traversalCar struct { + size uint64 + ctx context.Context + root cid.Cid + selector ipld.Node + ls *ipld.LinkSystem + opts Options +} + +func (tc *traversalCar) WriteTo(w io.Writer) (int64, error) { + n, err := tc.WriteV2Header(w) + if err != nil { + return n, err + } + v1s, idx, err := tc.WriteV1(w) + n += int64(v1s) + + if err != nil { + return n, err + } + + // index padding, then index + if tc.opts.IndexCodec != index.CarIndexNone { + if tc.opts.IndexPadding > 0 { + buf := make([]byte, tc.opts.IndexPadding) + pn, err := w.Write(buf) + n += int64(pn) + if err != nil { + return n, err + } + } + in, err := index.WriteTo(idx, w) + n += int64(in) + if err != nil { + return n, err + } + } + + return n, err +} + +func (tc *traversalCar) WriteV2Header(w io.Writer) (int64, error) { + n, err := w.Write(Pragma) + if err != nil { + return int64(n), err + } + + h := NewHeader(tc.size) + if p := tc.opts.DataPadding; p > 0 { + h = h.WithDataPadding(p) + } + if p := tc.opts.IndexPadding; p > 0 { + h = h.WithIndexPadding(p) + } + if tc.opts.IndexCodec == index.CarIndexNone { + h.IndexOffset = 0 + } + hn, err := h.WriteTo(w) + if err != nil { + return int64(n) + hn, err + } + hn += int64(n) + + // We include the initial data padding after the carv2 header + if h.DataOffset > uint64(hn) { + // TODO: buffer writes if this needs to be big. + buf := make([]byte, h.DataOffset-uint64(hn)) + n, err = w.Write(buf) + hn += int64(n) + if err != nil { + return hn, err + } + } else if h.DataOffset < uint64(hn) { + return hn, ErrOffsetImpossible + } + + return hn, nil +} + +func (tc *traversalCar) WriteV1(w io.Writer) (uint64, index.Index, error) { + // write the v1 header + c1h := carv1.CarHeader{Roots: []cid.Cid{tc.root}, Version: 1} + if err := carv1.WriteHeader(&c1h, w); err != nil { + return 0, nil, err + } + v1Size, err := carv1.HeaderSize(&c1h) + if err != nil { + return v1Size, nil, err + } + + // write the block. + wls, writer := loader.TeeingLinkSystem(*tc.ls, w, v1Size, tc.opts.IndexCodec) + err = traverse(tc.ctx, &wls, tc.root, tc.selector, tc.opts) + v1Size = writer.Size() + if err != nil { + return v1Size, nil, err + } + if tc.size != 0 && tc.size != v1Size { + return v1Size, nil, ErrSizeMismatch + } + tc.size = v1Size + + if tc.opts.IndexCodec == index.CarIndexNone { + return v1Size, nil, nil + } + idx, err := writer.Index() + return v1Size, idx, err +} + +func traverse(ctx context.Context, ls *ipld.LinkSystem, root cid.Cid, s ipld.Node, opts Options) error { + sel, err := selector.CompileSelector(s) + if err != nil { + return err + } + + chooser := func(_ ipld.Link, _ linking.LinkContext) (ipld.NodePrototype, error) { + return basicnode.Prototype.Any, nil + } + if opts.TraversalPrototypeChooser != nil { + chooser = opts.TraversalPrototypeChooser + } + + progress := traversal.Progress{ + Cfg: &traversal.Config{ + Ctx: ctx, + LinkSystem: *ls, + LinkTargetNodePrototypeChooser: chooser, + LinkVisitOnlyOnce: !opts.BlockstoreAllowDuplicatePuts, + }, + } + if opts.MaxTraversalLinks < math.MaxInt64 { + progress.Budget = &traversal.Budget{ + NodeBudget: math.MaxInt64, + LinkBudget: int64(opts.MaxTraversalLinks), + } + } + + lnk := cidlink.Link{Cid: root} + ls.TrustedStorage = true + rp, err := chooser(lnk, ipld.LinkContext{}) + if err != nil { + return err + } + rootNode, err := ls.Load(ipld.LinkContext{}, lnk, rp) + if err != nil { + return fmt.Errorf("root blk load failed: %s", err) + } + err = progress.WalkMatching(rootNode, sel, func(_ traversal.Progress, node ipld.Node) error { + if lbn, ok := node.(datamodel.LargeBytesNode); ok { + s, err := lbn.AsLargeBytes() + if err != nil { + return err + } + _, err = io.Copy(io.Discard, s) + if err != nil { + return err + } + } + return nil + }) + if err != nil { + return fmt.Errorf("walk failed: %s", err) + } + return nil +} diff --git a/ipld/car/v2/selective_test.go b/ipld/car/v2/selective_test.go new file mode 100644 index 0000000000..02afde0eb0 --- /dev/null +++ b/ipld/car/v2/selective_test.go @@ -0,0 +1,137 @@ +package car_test + +import ( + "bytes" + "context" + "io" + "os" + "path" + "testing" + + blocks "github.com/ipfs/boxo/blocks" + "github.com/ipfs/boxo/ipld/car/v2" + "github.com/ipfs/boxo/ipld/car/v2/blockstore" + "github.com/ipfs/go-cid" + "github.com/ipfs/go-unixfsnode" + "github.com/ipfs/go-unixfsnode/data/builder" + dagpb "github.com/ipld/go-codec-dagpb" + "github.com/ipld/go-ipld-prime/datamodel" + "github.com/ipld/go-ipld-prime/linking" + cidlink "github.com/ipld/go-ipld-prime/linking/cid" + basicnode "github.com/ipld/go-ipld-prime/node/basic" + "github.com/ipld/go-ipld-prime/storage/bsadapter" + sb "github.com/ipld/go-ipld-prime/traversal/selector/builder" + selectorparse "github.com/ipld/go-ipld-prime/traversal/selector/parse" + "github.com/stretchr/testify/require" + + _ "github.com/ipld/go-ipld-prime/codec/dagcbor" + _ "github.com/ipld/go-ipld-prime/codec/raw" +) + +func TestPrepareTraversal(t *testing.T) { + from, err := blockstore.OpenReadOnly("testdata/sample-unixfs-v2.car") + require.NoError(t, err) + ls := cidlink.DefaultLinkSystem() + bsa := bsadapter.Adapter{Wrapped: from} + ls.SetReadStorage(&bsa) + + rts, _ := from.Roots() + writer, err := car.NewSelectiveWriter(context.Background(), &ls, rts[0], selectorparse.CommonSelector_ExploreAllRecursively) + require.NoError(t, err) + + buf := bytes.Buffer{} + n, err := writer.WriteTo(&buf) + require.NoError(t, err) + require.Equal(t, int64(len(buf.Bytes())), n) + + fi, _ := os.Stat("testdata/sample-unixfs-v2.car") + require.Equal(t, fi.Size(), n) + + // Headers should be equal + h1, _ := car.OpenReader("testdata/sample-unixfs-v2.car") + h1h := bytes.Buffer{} + h1h.Write(car.Pragma) + h1.Header.WriteTo(&h1h) + require.Equal(t, buf.Bytes()[:h1h.Len()], h1h.Bytes()) +} + +func TestFileTraversal(t *testing.T) { + from, err := blockstore.OpenReadOnly("testdata/sample-unixfs-v2.car") + require.NoError(t, err) + ls := cidlink.DefaultLinkSystem() + bsa := bsadapter.Adapter{Wrapped: from} + ls.SetReadStorage(&bsa) + + rts, _ := from.Roots() + outDir := t.TempDir() + err = car.TraverseToFile(context.Background(), &ls, rts[0], selectorparse.CommonSelector_ExploreAllRecursively, path.Join(outDir, "out.car")) + require.NoError(t, err) + + require.FileExists(t, path.Join(outDir, "out.car")) + + fa, _ := os.Stat("testdata/sample-unixfs-v2.car") + fb, _ := os.Stat(path.Join(outDir, "out.car")) + require.Equal(t, fa.Size(), fb.Size()) +} + +func TestV1Traversal(t *testing.T) { + from, err := blockstore.OpenReadOnly("testdata/sample-v1.car") + require.NoError(t, err) + ls := cidlink.DefaultLinkSystem() + bsa := bsadapter.Adapter{Wrapped: from} + ls.SetReadStorage(&bsa) + + rts, _ := from.Roots() + w := bytes.NewBuffer(nil) + n, err := car.TraverseV1(context.Background(), &ls, rts[0], selectorparse.CommonSelector_ExploreAllRecursively, w) + require.NoError(t, err) + require.Equal(t, int64(len(w.Bytes())), int64(n)) + + fa, _ := os.Stat("testdata/sample-v1.car") + require.Equal(t, fa.Size(), int64(n)) +} + +func TestPartialTraversal(t *testing.T) { + store := cidlink.Memory{Bag: make(map[string][]byte)} + ls := cidlink.DefaultLinkSystem() + ls.StorageReadOpener = store.OpenRead + ls.StorageWriteOpener = store.OpenWrite + unixfsnode.AddUnixFSReificationToLinkSystem(&ls) + + // write a unixfs file. + initBuf := bytes.Buffer{} + _, _ = initBuf.Write(make([]byte, 1000000)) + rt, _, err := builder.BuildUnixFSFile(&initBuf, "", &ls) + require.NoError(t, err) + + // read a subset of the file. + _, rts, err := cid.CidFromBytes([]byte(rt.Binary())) + require.NoError(t, err) + ssb := sb.NewSelectorSpecBuilder(basicnode.Prototype.Any) + sel := ssb.ExploreInterpretAs("unixfs", ssb.MatcherSubset(0, 256*1000)) + buf := bytes.Buffer{} + chooser := dagpb.AddSupportToChooser(func(l datamodel.Link, lc linking.LinkContext) (datamodel.NodePrototype, error) { + return basicnode.Prototype.Any, nil + }) + _, err = car.TraverseV1(context.Background(), &ls, rts, sel.Node(), &buf, car.WithTraversalPrototypeChooser(chooser)) + require.NoError(t, err) + + fb := len(buf.Bytes()) + require.Less(t, fb, 1000000) + + loaded, err := car.NewBlockReader(&buf) + require.NoError(t, err) + fnd := make(map[cid.Cid]struct{}) + var b blocks.Block + for err == nil { + b, err = loaded.Next() + if err == io.EOF { + break + } + if _, ok := fnd[b.Cid()]; ok { + require.Fail(t, "duplicate block present", b.Cid()) + } + fnd[b.Cid()] = struct{}{} + } + require.Equal(t, 2, len(fnd)) +} diff --git a/ipld/car/v2/storage/doc.go b/ipld/car/v2/storage/doc.go new file mode 100644 index 0000000000..21e049b21b --- /dev/null +++ b/ipld/car/v2/storage/doc.go @@ -0,0 +1,75 @@ +// Package storage provides a CAR abstraction for the +// github.com/ipld/go-ipld-prime/storage interfaces in the form of a StorageCar. +// +// THIS PACKAGE IS EXPERIMENTAL. Breaking changes may be introduced in +// semver-minor releases before this package stabilizes. Use with caution and +// prefer the blockstore API if stability is required. +// +// StorageCar as ReadableStorage provides basic Get and Has operations. It also +// implements StreamingReadableStorage for the more efficient GetStreaming +// operation which is easily supported by the CAR format. +// +// StorageCar as WritableStorage provides the Put operation. It does not +// implement StreamingWritableStorage because the CAR format requires CIDs to +// be written before the blocks themselves, which is not possible with +// StreamingWritableStorage without buffering. Therefore, the PutStream function +// in github.com/ipld/go-ipld-prime/storage will provide equivalent +// functionality if it were to be implemented here. +// +// StorageCar can be used with an IPLD LinkSystem, defined by +// github.com/ipld/go-ipld-prime/linking, with the +// linking.SetReadStorage and linking.SetWriteStorage functions, to provide +// read and/or write to and/or from a CAR format as required. +// +// The focus of the StorageCar interfaces is to use the minimal possible IO +// interface for the operation(s) being performed. +// +// • OpenReadable requires an io.ReaderAt as seeking is required for +// random-access reads as a ReadableStore. +// +// • NewWritable requires an io.Writer when used to write a CARv1 as this format +// can be written in a continuous stream as blocks are written through a +// WritableStore (i.e. when the WriteAsCarV1 option is turned on). When used to +// write a CARv2, the default mode, a random-access io.WriterAt is required as +// the CARv2 header must be written after the payload is finalized and index +// written in order to indicate payload location in the output. The plain +// Writable store may be used to stream CARv1 contents without buffering; +// only storing CIDs in memory for de-duplication (where required) and to still +// allow Has operations. +// +// • NewReadableWritable requires an io.ReaderAt and an io.Writer as it combines +// the functionality of a NewWritable with OpenReadable, being able to random- +// access read any written blocks. +// +// • OpenReadableWritable requires an io.ReaderAt, an io.Writer and an +// io.WriterAt as it extends the NewReadableWritable functionality with the +// ability to resume an existing CAR. In addition, if the CAR being resumed is +// a CARv2, the IO object being provided must have a Truncate() method (e.g. +// an io.File) in order to properly manage CAR lifecycle and avoid writing a +// corrupt CAR. +// +// The following options are available to customize the behavior of the +// StorageCar: +// +// • WriteAsCarV1 +// +// • StoreIdentityCIDs +// +// • AllowDuplicatePuts +// +// • UseWholeCIDs +// +// • ZeroLengthSectionAsEOF +// +// • UseIndexCodec +// +// • UseDataPadding +// +// • UseIndexPadding +// +// • MaxIndexCidSize +// +// • MaxAllowedHeaderSize +// +// • MaxAllowedSectionSize +package storage diff --git a/ipld/car/v2/storage/notfound.go b/ipld/car/v2/storage/notfound.go new file mode 100644 index 0000000000..82153e2f2e --- /dev/null +++ b/ipld/car/v2/storage/notfound.go @@ -0,0 +1,39 @@ +package storage + +import "github.com/ipfs/go-cid" + +// compatible with the go-ipld-format ErrNotFound, match against +// interface{NotFound() bool} +// this could go into go-ipld-prime, but for now we'll just exercise the +// feature-test pattern + +type ErrNotFound struct { + Cid cid.Cid +} + +func (e ErrNotFound) Error() string { + if e.Cid == cid.Undef { + return "ipld: could not find node" + } + return "ipld: could not find " + e.Cid.String() +} + +func (e ErrNotFound) NotFound() bool { + return true +} + +func (e ErrNotFound) Is(err error) bool { + switch err.(type) { + case ErrNotFound: + return true + default: + return false + } +} + +func IsNotFound(err error) bool { + if nf, ok := err.(interface{ NotFound() bool }); ok { + return nf.NotFound() + } + return false +} diff --git a/ipld/car/v2/storage/storage.go b/ipld/car/v2/storage/storage.go new file mode 100644 index 0000000000..46156e8dfe --- /dev/null +++ b/ipld/car/v2/storage/storage.go @@ -0,0 +1,505 @@ +package storage + +import ( + "bytes" + "context" + "errors" + "fmt" + "io" + "sync" + + carv2 "github.com/ipfs/boxo/ipld/car/v2" + "github.com/ipfs/boxo/ipld/car/v2/index" + "github.com/ipfs/boxo/ipld/car/v2/internal/carv1" + "github.com/ipfs/boxo/ipld/car/v2/internal/carv1/util" + internalio "github.com/ipfs/boxo/ipld/car/v2/internal/io" + "github.com/ipfs/boxo/ipld/car/v2/internal/store" + "github.com/ipfs/go-cid" + ipldstorage "github.com/ipld/go-ipld-prime/storage" +) + +var errClosed = errors.New("cannot use a CARv2 storage after closing") + +type ReaderAtWriterAt interface { + io.ReaderAt + io.Writer + io.WriterAt +} + +type ReadableCar interface { + ipldstorage.ReadableStorage + ipldstorage.StreamingReadableStorage + Roots() []cid.Cid +} + +// WritableCar is compatible with storage.WritableStorage but also returns +// the roots of the CAR. It does not implement ipld.StreamingWritableStorage +// as the CAR format does not support streaming data followed by its CID, so +// any streaming implementation would perform buffering and copy the +// existing storage.PutStream() implementation. +type WritableCar interface { + ipldstorage.WritableStorage + Roots() []cid.Cid + Finalize() error +} + +var _ ReadableCar = (*StorageCar)(nil) +var _ WritableCar = (*StorageCar)(nil) + +type StorageCar struct { + idx index.Index + reader io.ReaderAt + writer positionedWriter + dataWriter *internalio.OffsetWriteSeeker + header carv2.Header + roots []cid.Cid + opts carv2.Options + + closed bool + mu sync.RWMutex +} + +type positionedWriter interface { + io.Writer + Position() int64 +} + +// OpenReadable opens a CARv1 or CARv2 file for reading as a ReadableStorage +// and StreamingReadableStorage as defined by +// github.com/ipld/go-ipld-prime/storage. +// +// The returned ReadableStorage is compatible with a linksystem SetReadStorage +// method as defined by github.com/ipld/go-ipld-prime/linking +// to provide a block source backed by a CAR. +// +// When opening a CAR, an initial scan is performed to generate an index, or +// load an index from a CARv2 index where available. This index data is kept in +// memory while the CAR is being used in order to provide efficient random +// Get access to blocks and Has operations. +// +// The Readable supports StreamingReadableStorage, which allows for efficient +// GetStreaming operations straight out of the underlying CAR where the +// linksystem can make use of it. +func OpenReadable(reader io.ReaderAt, opts ...carv2.Option) (ReadableCar, error) { + sc := &StorageCar{opts: carv2.ApplyOptions(opts...)} + + rr := internalio.ToReadSeeker(reader) + header, err := carv1.ReadHeader(rr, sc.opts.MaxAllowedHeaderSize) + if err != nil { + return nil, err + } + switch header.Version { + case 1: + sc.roots = header.Roots + sc.reader = reader + rr.Seek(0, io.SeekStart) + sc.idx = store.NewInsertionIndex() + if err := carv2.LoadIndex(sc.idx, rr, opts...); err != nil { + return nil, err + } + case 2: + v2r, err := carv2.NewReader(reader, opts...) + if err != nil { + return nil, err + } + sc.roots, err = v2r.Roots() + if err != nil { + return nil, err + } + if v2r.Header.HasIndex() { + ir, err := v2r.IndexReader() + if err != nil { + return nil, err + } + sc.idx, err = index.ReadFrom(ir) + if err != nil { + return nil, err + } + } else { + dr, err := v2r.DataReader() + if err != nil { + return nil, err + } + sc.idx = store.NewInsertionIndex() + if err := carv2.LoadIndex(sc.idx, dr, opts...); err != nil { + return nil, err + } + } + if sc.reader, err = v2r.DataReader(); err != nil { + return nil, err + } + default: + return nil, fmt.Errorf("unsupported CAR version: %v", header.Version) + } + + return sc, nil +} + +// NewWritable creates a new WritableStorage as defined by +// github.com/ipld/go-ipld-prime/storage that writes a CARv1 or CARv2 format to +// the given io.Writer. +// +// The returned WritableStorage is compatible with a linksystem SetWriteStorage +// method as defined by github.com/ipld/go-ipld-prime/linking +// to provide a block sink backed by a CAR. +// +// The WritableStorage supports Put operations, which will write +// blocks to the CAR in the order they are received. +// +// When writing a CARv2 format (the default), the provided writer must be +// compatible with io.WriterAt in order to provide random access as the CARv2 +// header must be written after the blocks in order to indicate the size of the +// CARv2 data payload. +// +// A CARv1 (generated using the WriteAsCarV1 option) only requires an io.Writer +// and can therefore stream CAR contents as blocks are written while still +// providing Has operations and the ability to avoid writing duplicate blocks +// as required. +// +// When writing a CARv2 format, it is important to call the Finalize method on +// the returned WritableStorage in order to write the CARv2 header and index. +func NewWritable(writer io.Writer, roots []cid.Cid, opts ...carv2.Option) (WritableCar, error) { + sc, err := newWritable(writer, roots, opts...) + if err != nil { + return nil, err + } + return sc.init() +} + +func newWritable(writer io.Writer, roots []cid.Cid, opts ...carv2.Option) (*StorageCar, error) { + sc := &StorageCar{ + writer: &positionTrackingWriter{w: writer}, + idx: store.NewInsertionIndex(), + header: carv2.NewHeader(0), + opts: carv2.ApplyOptions(opts...), + roots: roots, + } + + if p := sc.opts.DataPadding; p > 0 { + sc.header = sc.header.WithDataPadding(p) + } + if p := sc.opts.IndexPadding; p > 0 { + sc.header = sc.header.WithIndexPadding(p) + } + + offset := int64(sc.header.DataOffset) + if sc.opts.WriteAsCarV1 { + offset = 0 + } + + if writerAt, ok := writer.(io.WriterAt); ok { + sc.dataWriter = internalio.NewOffsetWriter(writerAt, offset) + } else { + if !sc.opts.WriteAsCarV1 { + return nil, fmt.Errorf("cannot write as CARv2 to a non-seekable writer") + } + } + + return sc, nil +} + +func newReadableWritable(rw ReaderAtWriterAt, roots []cid.Cid, opts ...carv2.Option) (*StorageCar, error) { + sc, err := newWritable(rw, roots, opts...) + if err != nil { + return nil, err + } + + sc.reader = rw + if !sc.opts.WriteAsCarV1 { + sc.reader, err = internalio.NewOffsetReadSeeker(rw, int64(sc.header.DataOffset)) + if err != nil { + return nil, err + } + } + + return sc, nil +} + +// NewReadableWritable creates a new StorageCar that is able to provide both +// StorageReader and StorageWriter functionality. +// +// The returned StorageCar is compatible with a linksystem SetReadStorage and +// SetWriteStorage methods as defined by github.com/ipld/go-ipld-prime/linking. +// +// When writing a CARv2 format, it is important to call the Finalize method on +// the returned WritableStorage in order to write the CARv2 header and index. +func NewReadableWritable(rw ReaderAtWriterAt, roots []cid.Cid, opts ...carv2.Option) (*StorageCar, error) { + sc, err := newReadableWritable(rw, roots, opts...) + if err != nil { + return nil, err + } + if _, err := sc.init(); err != nil { + return nil, err + } + return sc, nil +} + +// OpenReadableWritable creates a new StorageCar that is able to provide both +// StorageReader and StorageWriter functionality. +// +// The returned StorageCar is compatible with a linksystem SetReadStorage and +// SetWriteStorage methods as defined by github.com/ipld/go-ipld-prime/linking. +// +// It attempts to resume a CARv2 file that was previously written to by +// NewWritable, or NewReadableWritable. +func OpenReadableWritable(rw ReaderAtWriterAt, roots []cid.Cid, opts ...carv2.Option) (*StorageCar, error) { + sc, err := newReadableWritable(rw, roots, opts...) + if err != nil { + return nil, err + } + + // attempt to resume + rs, err := internalio.NewOffsetReadSeeker(rw, 0) + if err != nil { + return nil, err + } + if err := store.ResumableVersion(rs, sc.opts.WriteAsCarV1); err != nil { + return nil, err + } + if err := store.Resume( + rw, + sc.reader, + sc.dataWriter, + sc.idx.(*store.InsertionIndex), + roots, + sc.header.DataOffset, + sc.opts.WriteAsCarV1, + sc.opts.MaxAllowedHeaderSize, + sc.opts.ZeroLengthSectionAsEOF, + ); err != nil { + return nil, err + } + return sc, nil +} + +func (sc *StorageCar) init() (WritableCar, error) { + if !sc.opts.WriteAsCarV1 { + if _, err := sc.writer.Write(carv2.Pragma); err != nil { + return nil, err + } + } + var w io.Writer = sc.dataWriter + if sc.dataWriter == nil { + w = sc.writer + } + if err := carv1.WriteHeader(&carv1.CarHeader{Roots: sc.roots, Version: 1}, w); err != nil { + return nil, err + } + return sc, nil +} + +// Roots returns the roots of the CAR. +func (sc *StorageCar) Roots() []cid.Cid { + return sc.roots +} + +// Put adds a block to the CAR, where the block is identified by the given CID +// provided in string form. The keyStr value must be a valid CID binary string +// (not a multibase string representation), i.e. generated with CID#KeyString(). +func (sc *StorageCar) Put(ctx context.Context, keyStr string, data []byte) error { + keyCid, err := cid.Cast([]byte(keyStr)) + if err != nil { + return fmt.Errorf("bad CID key: %w", err) + } + + sc.mu.Lock() + defer sc.mu.Unlock() + + if sc.closed { + return errClosed + } + + idx, ok := sc.idx.(*store.InsertionIndex) + if !ok || sc.writer == nil { + return fmt.Errorf("cannot put into a read-only CAR") + } + + if should, err := store.ShouldPut( + idx, + keyCid, + sc.opts.MaxIndexCidSize, + sc.opts.StoreIdentityCIDs, + sc.opts.BlockstoreAllowDuplicatePuts, + sc.opts.BlockstoreUseWholeCIDs, + ); err != nil { + return err + } else if !should { + return nil + } + + w := sc.writer + if sc.dataWriter != nil { + w = sc.dataWriter + } + n := uint64(w.Position()) + if err := util.LdWrite(w, keyCid.Bytes(), data); err != nil { + return err + } + idx.InsertNoReplace(keyCid, n) + + return nil +} + +// Has returns true if the CAR contains a block identified by the given CID +// provided in string form. The keyStr value must be a valid CID binary string +// (not a multibase string representation), i.e. generated with CID#KeyString(). +func (sc *StorageCar) Has(ctx context.Context, keyStr string) (bool, error) { + keyCid, err := cid.Cast([]byte(keyStr)) + if err != nil { + return false, fmt.Errorf("bad CID key: %w", err) + } + + sc.mu.RLock() + defer sc.mu.RUnlock() + + if sc.closed { + return false, errClosed + } + + if idx, ok := sc.idx.(*store.InsertionIndex); ok && sc.writer != nil { + // writable CAR, fast path using InsertionIndex + return store.Has( + idx, + keyCid, + sc.opts.MaxIndexCidSize, + sc.opts.StoreIdentityCIDs, + sc.opts.BlockstoreAllowDuplicatePuts, + sc.opts.BlockstoreUseWholeCIDs, + ) + } + + if !sc.opts.StoreIdentityCIDs { + // If we don't store identity CIDs then we can return them straight away as if they are here, + // otherwise we need to check for their existence. + // Note, we do this without locking, since there is no shared information to lock for in order to perform the check. + if _, ok, err := store.IsIdentity(keyCid); err != nil { + return false, err + } else if ok { + return true, nil + } + } + + _, _, size, err := store.FindCid( + sc.reader, + sc.idx, + keyCid, + sc.opts.BlockstoreUseWholeCIDs, + sc.opts.ZeroLengthSectionAsEOF, + sc.opts.MaxAllowedSectionSize, + false, + ) + if errors.Is(err, index.ErrNotFound) { + return false, nil + } else if err != nil { + return false, err + } + return size > -1, nil +} + +// Get returns the block bytes identified by the given CID provided in string +// form. The keyStr value must be a valid CID binary string (not a multibase +// string representation), i.e. generated with CID#KeyString(). +func (sc *StorageCar) Get(ctx context.Context, keyStr string) ([]byte, error) { + rdr, err := sc.GetStream(ctx, keyStr) + if err != nil { + return nil, err + } + return io.ReadAll(rdr) +} + +// GetStream returns a stream of the block bytes identified by the given CID +// provided in string form. The keyStr value must be a valid CID binary string +// (not a multibase string representation), i.e. generated with CID#KeyString(). +func (sc *StorageCar) GetStream(ctx context.Context, keyStr string) (io.ReadCloser, error) { + if sc.reader == nil { + return nil, fmt.Errorf("cannot read from a write-only CAR") + } + + keyCid, err := cid.Cast([]byte(keyStr)) + if err != nil { + return nil, fmt.Errorf("bad CID key: %w", err) + } + + if !sc.opts.StoreIdentityCIDs { + // If we don't store identity CIDs then we can return them straight away as if they are here, + // otherwise we need to check for their existence. + // Note, we do this without locking, since there is no shared information to lock for in order to perform the check. + if digest, ok, err := store.IsIdentity(keyCid); err != nil { + return nil, err + } else if ok { + return io.NopCloser(bytes.NewReader(digest)), nil + } + } + + sc.mu.RLock() + defer sc.mu.RUnlock() + + if sc.closed { + return nil, errClosed + } + + _, offset, size, err := store.FindCid( + sc.reader, + sc.idx, + keyCid, + sc.opts.BlockstoreUseWholeCIDs, + sc.opts.ZeroLengthSectionAsEOF, + sc.opts.MaxAllowedSectionSize, + false, + ) + if errors.Is(err, index.ErrNotFound) { + return nil, ErrNotFound{Cid: keyCid} + } else if err != nil { + return nil, err + } + return io.NopCloser(io.NewSectionReader(sc.reader, offset, int64(size))), nil +} + +// Finalize writes the CAR index to the underlying writer if the CAR being +// written is a CARv2. It also writes a finalized CARv2 header which details +// payload location. This should be called on a writable StorageCar in order to +// avoid data loss. +func (sc *StorageCar) Finalize() error { + idx, ok := sc.idx.(*store.InsertionIndex) + if !ok || sc.writer == nil { + // ignore this, it's not writable + return nil + } + + if sc.opts.WriteAsCarV1 { + return nil + } + + wat, ok := sc.writer.(*positionTrackingWriter).w.(io.WriterAt) + if !ok { // should should already be checked at construction if this is a writable + return fmt.Errorf("cannot finalize a CARv2 without an io.WriterAt") + } + + sc.mu.Lock() + defer sc.mu.Unlock() + + if sc.closed { + // Allow duplicate Finalize calls, just like Close. + // Still error, just like ReadOnly.Close; it should be discarded. + return fmt.Errorf("called Finalize on a closed storage CAR") + } + + sc.closed = true + + return store.Finalize(wat, sc.header, idx, uint64(sc.dataWriter.Position()), sc.opts.StoreIdentityCIDs, sc.opts.IndexCodec) +} + +type positionTrackingWriter struct { + w io.Writer + offset int64 +} + +func (ptw *positionTrackingWriter) Write(p []byte) (int, error) { + written, err := ptw.w.Write(p) + ptw.offset += int64(written) + return written, err +} + +func (ptw *positionTrackingWriter) Position() int64 { + return ptw.offset +} diff --git a/ipld/car/v2/storage/storage_test.go b/ipld/car/v2/storage/storage_test.go new file mode 100644 index 0000000000..8278462623 --- /dev/null +++ b/ipld/car/v2/storage/storage_test.go @@ -0,0 +1,1276 @@ +package storage_test + +// TODO: test readable can't write and writable can't read + +import ( + "bytes" + "context" + "crypto/sha512" + "errors" + "fmt" + "io" + "math/rand" + "os" + "path/filepath" + "sync" + "testing" + "time" + + carv2 "github.com/ipfs/boxo/ipld/car/v2" + "github.com/ipfs/boxo/ipld/car/v2/index" + "github.com/ipfs/boxo/ipld/car/v2/internal/carv1" + "github.com/ipfs/boxo/ipld/car/v2/storage" + "github.com/ipfs/go-cid" + "github.com/multiformats/go-multicodec" + "github.com/multiformats/go-multihash" + "github.com/stretchr/testify/require" +) + +var rng = rand.New(rand.NewSource(1413)) +var rngLk sync.Mutex + +func TestReadable(t *testing.T) { + tests := []struct { + name string + inputPath string + opts []carv2.Option + noIdCids bool + }{ + { + "OpenedWithCarV1", + "../testdata/sample-v1.car", + []carv2.Option{carv2.UseWholeCIDs(true), carv2.StoreIdentityCIDs(true)}, + false, + }, + { + "OpenedWithCarV1_NoIdentityCID", + "../testdata/sample-v1.car", + []carv2.Option{carv2.UseWholeCIDs(true)}, + false, + }, + { + "OpenedWithCarV2", + "../testdata/sample-wrapped-v2.car", + []carv2.Option{carv2.UseWholeCIDs(true), carv2.StoreIdentityCIDs(true)}, + // index already exists, but was made without identity CIDs, but opening with StoreIdentityCIDs(true) means we check the index + true, + }, + { + "OpenedWithCarV2_NoIdentityCID", + "../testdata/sample-wrapped-v2.car", + []carv2.Option{carv2.UseWholeCIDs(true)}, + false, + }, + { + "OpenedWithCarV1ZeroLenSection", + "../testdata/sample-v1-with-zero-len-section.car", + []carv2.Option{carv2.UseWholeCIDs(true), carv2.ZeroLengthSectionAsEOF(true)}, + false, + }, + { + "OpenedWithAnotherCarV1ZeroLenSection", + "../testdata/sample-v1-with-zero-len-section2.car", + []carv2.Option{carv2.UseWholeCIDs(true), carv2.ZeroLengthSectionAsEOF(true)}, + false, + }, + { + "IndexlessV2", + "../testdata/sample-v2-indexless.car", + []carv2.Option{carv2.UseWholeCIDs(true)}, + false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + + // Setup new StorageCar + inputReader, err := os.Open(tt.inputPath) + require.NoError(t, err) + t.Cleanup(func() { require.NoError(t, inputReader.Close()) }) + readable, err := storage.OpenReadable(inputReader, tt.opts...) + require.NoError(t, err) + + // Setup BlockReader to compare against + actualReader, err := os.Open(tt.inputPath) + require.NoError(t, err) + t.Cleanup(func() { require.NoError(t, actualReader.Close()) }) + actual, err := carv2.NewBlockReader(actualReader, tt.opts...) + require.NoError(t, err) + + // Assert roots match v1 payload. + require.Equal(t, actual.Roots, readable.Roots()) + + for { + wantBlock, err := actual.Next() + if err == io.EOF { + break + } + require.NoError(t, err) + + key := wantBlock.Cid() + + // Assert StorageCar contains key. + has, err := readable.Has(ctx, key.KeyString()) + require.NoError(t, err) + if key.Prefix().MhType == uint64(multicodec.Identity) && tt.noIdCids { + // fixture wasn't made with StoreIdentityCIDs, but we opened it with StoreIdentityCIDs, + // so they aren't there to find + require.False(t, has) + } else { + require.True(t, has) + } + + // Assert block itself matches v1 payload block. + if has { + gotBlock, err := readable.Get(ctx, key.KeyString()) + require.NoError(t, err) + require.Equal(t, wantBlock.RawData(), gotBlock) + + reader, err := readable.GetStream(ctx, key.KeyString()) + require.NoError(t, err) + data, err := io.ReadAll(reader) + require.NoError(t, err) + require.Equal(t, wantBlock.RawData(), data) + } + } + + // test not exists + c := randCid() + has, err := readable.Has(ctx, c.KeyString()) + require.NoError(t, err) + require.False(t, has) + + _, err = readable.Get(ctx, c.KeyString()) + require.True(t, errors.Is(err, storage.ErrNotFound{})) + require.True(t, storage.IsNotFound(err)) + require.Contains(t, err.Error(), c.String()) + + // random identity, should only find this if we _don't_ store identity CIDs + storeIdentity := carv2.ApplyOptions(tt.opts...).StoreIdentityCIDs + c = randIdentityCid() + + has, err = readable.Has(ctx, c.KeyString()) + require.NoError(t, err) + require.Equal(t, !storeIdentity, has) + + got, err := readable.Get(ctx, c.KeyString()) + if !storeIdentity { + require.NoError(t, err) + mh, err := multihash.Decode(c.Hash()) + require.NoError(t, err) + require.Equal(t, mh.Digest, got) + } else { + require.True(t, errors.Is(err, storage.ErrNotFound{})) + require.True(t, storage.IsNotFound(err)) + require.Contains(t, err.Error(), c.String()) + } + }) + } +} + +func TestReadableBadVersion(t *testing.T) { + f, err := os.Open("../testdata/sample-rootless-v42.car") + require.NoError(t, err) + t.Cleanup(func() { f.Close() }) + subject, err := storage.OpenReadable(f) + require.Errorf(t, err, "unsupported car version: 42") + require.Nil(t, subject) +} + +func TestWritable(t *testing.T) { + originalCarV1Path := "../testdata/sample-v1.car" + + variants := []struct { + name string + compareCarV1 string + options []carv2.Option + expectedV1StartOffset int64 + }{ + {"carv2_noopt", "sample-v1-noidentity.car", []carv2.Option{}, int64(carv2.PragmaSize + carv2.HeaderSize)}, + {"carv2_identity", "sample-v1.car", []carv2.Option{carv2.StoreIdentityCIDs(true)}, int64(carv2.PragmaSize + carv2.HeaderSize)}, + {"carv1", "sample-v1-noidentity.car", []carv2.Option{carv2.WriteAsCarV1(true)}, int64(0)}, + {"carv1_identity", "sample-v1.car", []carv2.Option{carv2.WriteAsCarV1(true), carv2.StoreIdentityCIDs(true)}, int64(0)}, + } + + for _, mode := range []string{"WithRead", "WithoutRead"} { + t.Run(mode, func(t *testing.T) { + for _, variant := range variants { + t.Run(variant.name, func(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + + opts := carv2.ApplyOptions(variant.options...) + + // Setup input file using standard CarV1 reader + srcFile, err := os.Open(originalCarV1Path) + require.NoError(t, err) + t.Cleanup(func() { require.NoError(t, srcFile.Close()) }) + r, err := carv1.NewCarReader(srcFile) + require.NoError(t, err) + + path := filepath.Join(t.TempDir(), fmt.Sprintf("writable_%s_%s.car", mode, variant.name)) + var dstFile *os.File + + var writable *storage.StorageCar + if mode == "WithoutRead" { + dstFile, err = os.OpenFile(path, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644) + require.NoError(t, err) + t.Cleanup(func() { dstFile.Close() }) + var writer io.Writer = &writerOnly{dstFile} + if !opts.WriteAsCarV1 { + writer = &writerAtOnly{dstFile} + } + w, err := storage.NewWritable(writer, r.Header.Roots, variant.options...) + require.NoError(t, err) + writable = w.(*storage.StorageCar) + } else { + dstFile, err = os.OpenFile(path, os.O_CREATE|os.O_RDWR|os.O_TRUNC, 0644) + require.NoError(t, err) + t.Cleanup(func() { dstFile.Close() }) + writable, err = storage.NewReadableWritable(dstFile, r.Header.Roots, variant.options...) + require.NoError(t, err) + } + + require.Equal(t, r.Header.Roots, writable.Roots()) + + cids := make([]cid.Cid, 0) + var idCidCount int + for { + // read from source + b, err := r.Next() + if err == io.EOF { + break + } + require.NoError(t, err) + + // write to dest + err = writable.Put(ctx, b.Cid().KeyString(), b.RawData()) + require.NoError(t, err) + cids = append(cids, b.Cid()) + + dmh, err := multihash.Decode(b.Cid().Hash()) + require.NoError(t, err) + if dmh.Code == multihash.IDENTITY { + idCidCount++ + } + + if mode == "WithRead" { + // writable is a ReadableWritable / StorageCar + + // read back out the one we just wrote + gotBlock, err := writable.Get(ctx, b.Cid().KeyString()) + require.NoError(t, err) + require.Equal(t, b.RawData(), gotBlock) + + reader, err := writable.GetStream(ctx, b.Cid().KeyString()) + require.NoError(t, err) + data, err := io.ReadAll(reader) + require.NoError(t, err) + require.Equal(t, b.RawData(), data) + + // try reading a random one: + candIndex := rng.Intn(len(cids)) + var candidate cid.Cid + for _, c := range cids { + if candIndex == 0 { + candidate = c + break + } + candIndex-- + } + has, err := writable.Has(ctx, candidate.KeyString()) + require.NoError(t, err) + require.True(t, has) + + // not exists + c := randCid() + has, err = writable.Has(ctx, c.KeyString()) + require.NoError(t, err) + require.False(t, has) + _, err = writable.Get(ctx, c.KeyString()) + require.True(t, errors.Is(err, storage.ErrNotFound{})) + require.True(t, storage.IsNotFound(err)) + require.Contains(t, err.Error(), c.String()) + + // random identity, should only find this if we _don't_ store identity CIDs + c = randIdentityCid() + has, err = writable.Has(ctx, c.KeyString()) + require.NoError(t, err) + require.Equal(t, !opts.StoreIdentityCIDs, has) + + got, err := writable.Get(ctx, c.KeyString()) + if !opts.StoreIdentityCIDs { + require.NoError(t, err) + mh, err := multihash.Decode(c.Hash()) + require.NoError(t, err) + require.Equal(t, mh.Digest, got) + } else { + require.True(t, errors.Is(err, storage.ErrNotFound{})) + require.True(t, storage.IsNotFound(err)) + require.Contains(t, err.Error(), c.String()) + } + } + } + + err = writable.Finalize() + require.NoError(t, err) + + err = dstFile.Close() + require.NoError(t, err) + + // test header version using carv2 reader + reopen, err := os.Open(path) + require.NoError(t, err) + t.Cleanup(func() { require.NoError(t, reopen.Close()) }) + rd, err := carv2.NewReader(reopen) + require.NoError(t, err) + require.Equal(t, opts.WriteAsCarV1, rd.Version == 1) + + // now compare the binary contents of the written file to the expected file + comparePath := filepath.Join("../testdata/", variant.compareCarV1) + compareStat, err := os.Stat(comparePath) + require.NoError(t, err) + + wrote, err := os.Open(path) + require.NoError(t, err) + t.Cleanup(func() { require.NoError(t, wrote.Close()) }) + _, err = wrote.Seek(variant.expectedV1StartOffset, io.SeekStart) + require.NoError(t, err) + hasher := sha512.New() + gotWritten, err := io.Copy(hasher, io.LimitReader(wrote, compareStat.Size())) + require.NoError(t, err) + gotSum := hasher.Sum(nil) + + hasher.Reset() + compareV1, err := os.Open(comparePath) + require.NoError(t, err) + t.Cleanup(func() { require.NoError(t, compareV1.Close()) }) + wantWritten, err := io.Copy(hasher, compareV1) + require.NoError(t, err) + wantSum := hasher.Sum(nil) + + require.Equal(t, wantWritten, gotWritten) + require.Equal(t, wantSum, gotSum) + }) + } + }) + } +} + +func TestCannotWriteableV2WithoutWriterAt(t *testing.T) { + w, err := storage.NewWritable(&writerOnly{os.Stdout}, []cid.Cid{}) + require.Error(t, err) + require.Nil(t, w) +} + +func TestErrorsWhenWritingCidTooLarge(t *testing.T) { + maxAllowedCidSize := uint64(20) + + path := filepath.Join(t.TempDir(), "writable-with-id-enabled-too-large.car") + out, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY, 0644) + require.NoError(t, err) + t.Cleanup(func() { require.NoError(t, out.Close()) }) + subject, err := storage.NewWritable(out, []cid.Cid{}, carv2.MaxIndexCidSize(maxAllowedCidSize)) + require.NoError(t, err) + + // normal block but shorten the CID to make it acceptable + testCid, testData := randBlock() + mh, err := multihash.Decode(testCid.Hash()) + require.NoError(t, err) + dig := mh.Digest[:10] + shortMh, err := multihash.Encode(dig, mh.Code) + require.NoError(t, err) + testCid = cid.NewCidV1(mh.Code, shortMh) + + err = subject.Put(context.TODO(), testCid.KeyString(), testData) + require.NoError(t, err) + + // standard CID but too long for options + testCid, testData = randBlock() + err = subject.Put(context.TODO(), testCid.KeyString(), testData) + require.Equal(t, &carv2.ErrCidTooLarge{MaxSize: maxAllowedCidSize, CurrentSize: uint64(testCid.ByteLen())}, err) +} + +func TestConcurrentUse(t *testing.T) { + dst, err := os.OpenFile(filepath.Join(t.TempDir(), "readwrite.car"), os.O_CREATE|os.O_RDWR, 0644) + require.NoError(t, err) + t.Cleanup(func() { require.NoError(t, dst.Close()) }) + wbs, err := storage.NewReadableWritable(dst, nil) + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + + require.NoError(t, err) + t.Cleanup(func() { wbs.Finalize() }) + + var wg sync.WaitGroup + for i := 0; i < 100; i++ { + wg.Add(1) + go func() { + defer wg.Done() + + testCid, testData := randBlock() + + has, err := wbs.Has(ctx, testCid.KeyString()) + require.NoError(t, err) + require.False(t, has) + + err = wbs.Put(ctx, testCid.KeyString(), testData) + require.NoError(t, err) + + got, err := wbs.Get(ctx, testCid.KeyString()) + require.NoError(t, err) + require.Equal(t, testData, got) + }() + } + wg.Wait() +} + +func TestNullPadding(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + + paddedV1, err := os.ReadFile("../testdata/sample-v1-with-zero-len-section.car") + require.NoError(t, err) + + readable, err := storage.OpenReadable(bufferReaderAt(paddedV1), carv2.ZeroLengthSectionAsEOF(true)) + require.NoError(t, err) + + roots := readable.Roots() + require.Len(t, roots, 1) + has, err := readable.Has(ctx, roots[0].KeyString()) + require.NoError(t, err) + require.True(t, has) + + actual, err := carv2.NewBlockReader(bytes.NewReader(paddedV1), carv2.ZeroLengthSectionAsEOF(true)) + require.NoError(t, err) + + for { + wantBlock, err := actual.Next() + if err == io.EOF { + break + } + require.NoError(t, err) + b, err := readable.Get(ctx, wantBlock.Cid().KeyString()) + require.NoError(t, err) + require.Equal(t, wantBlock.RawData(), b) + } +} + +func TestPutSameHashes(t *testing.T) { + tdir := t.TempDir() + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + + // This writable allows duplicate puts, and identifies by multihash as per the default. + pathAllowDups := filepath.Join(tdir, "writable-allowdup.car") + dstAllowDups, err := os.OpenFile(pathAllowDups, os.O_CREATE|os.O_RDWR, 0644) + require.NoError(t, err) + t.Cleanup(func() { require.NoError(t, dstAllowDups.Close()) }) + wbsAllowDups, err := storage.NewReadableWritable(dstAllowDups, nil, carv2.AllowDuplicatePuts(true)) + require.NoError(t, err) + + // This writable deduplicates puts by CID. + pathByCID := filepath.Join(tdir, "writable-dedup-wholecid.car") + dstByCID, err := os.OpenFile(pathByCID, os.O_CREATE|os.O_RDWR, 0644) + require.NoError(t, err) + t.Cleanup(func() { require.NoError(t, dstByCID.Close()) }) + wbsByCID, err := storage.NewReadableWritable(dstByCID, nil, carv2.UseWholeCIDs(true)) + require.NoError(t, err) + + // This writable deduplicates puts by multihash + pathByHash := filepath.Join(tdir, "writable-dedup-byhash.car") + dstByHash, err := os.OpenFile(pathByHash, os.O_CREATE|os.O_RDWR, 0644) + require.NoError(t, err) + t.Cleanup(func() { require.NoError(t, dstByHash.Close()) }) + wbsByHash, err := storage.NewReadableWritable(dstByHash, nil) + require.NoError(t, err) + + var blockList []struct { + cid cid.Cid + data []byte + } + + appendBlock := func(data []byte, version, codec uint64) { + c, err := cid.Prefix{ + Version: version, + Codec: codec, + MhType: multihash.SHA2_256, + MhLength: -1, + }.Sum(data) + require.NoError(t, err) + blockList = append(blockList, struct { + cid cid.Cid + data []byte + }{c, data}) + } + + // Two raw blocks, meaning we have two unique multihashes. + // However, we have multiple CIDs for each multihash. + // We also have two duplicate CIDs. + data1 := []byte("foo bar") + appendBlock(data1, 0, cid.DagProtobuf) + appendBlock(data1, 1, cid.DagProtobuf) + appendBlock(data1, 1, cid.DagCBOR) + appendBlock(data1, 1, cid.DagCBOR) // duplicate CID + + data2 := []byte("foo bar baz") + appendBlock(data2, 0, cid.DagProtobuf) + appendBlock(data2, 1, cid.DagProtobuf) + appendBlock(data2, 1, cid.DagProtobuf) // duplicate CID + appendBlock(data2, 1, cid.DagCBOR) + + countBlocks := func(path string) int { + f, err := os.Open(path) + require.NoError(t, err) + t.Cleanup(func() { require.NoError(t, f.Close()) }) + rdr, err := carv2.NewBlockReader(f) + require.NoError(t, err) + + n := 0 + for { + _, err := rdr.Next() + if err == io.EOF { + break + } + n++ + } + return n + } + + putBlockList := func(writable *storage.StorageCar) { + for i, block := range blockList { + // Has should never error here. + // The first block should be missing. + // Others might not, given the duplicate hashes. + has, err := writable.Has(ctx, block.cid.KeyString()) + require.NoError(t, err) + if i == 0 { + require.False(t, has) + } + + err = writable.Put(ctx, block.cid.KeyString(), block.data) + require.NoError(t, err) + + // Has and Get need to work right after a Put + has, err = writable.Has(ctx, block.cid.KeyString()) + require.NoError(t, err) + require.True(t, has) + + got, err := writable.Get(ctx, block.cid.KeyString()) + require.NoError(t, err) + require.Equal(t, block.data, got) + } + } + + putBlockList(wbsAllowDups) + err = wbsAllowDups.Finalize() + require.NoError(t, err) + require.Equal(t, len(blockList), countBlocks(pathAllowDups)) + + // Put the same list of blocks to the CAR that deduplicates by CID. + // We should end up with two fewer blocks, as two are entire CID duplicates. + putBlockList(wbsByCID) + err = wbsByCID.Finalize() + require.NoError(t, err) + require.Equal(t, len(blockList)-2, countBlocks(pathByCID)) + + // Put the same list of blocks to the CAR that deduplicates by CID. + // We should end up with just two blocks, as the original set of blocks only + // has two distinct multihashes. + putBlockList(wbsByHash) + err = wbsByHash.Finalize() + require.NoError(t, err) + require.Equal(t, 2, countBlocks(pathByHash)) +} + +func TestReadableCantWrite(t *testing.T) { + inp, err := os.Open("../testdata/sample-v1.car") + require.NoError(t, err) + t.Cleanup(func() { require.NoError(t, inp.Close()) }) + readable, err := storage.OpenReadable(inp) + require.NoError(t, err) + require.ErrorContains(t, readable.(*storage.StorageCar).Put(context.Background(), randCid().KeyString(), []byte("bar")), "read-only") + // Finalize() is nonsense for a readable, but it should be safe + require.NoError(t, readable.(*storage.StorageCar).Finalize()) +} + +func TestWritableCantRead(t *testing.T) { + // an io.Writer with no io.WriterAt capabilities + path := filepath.Join(t.TempDir(), "writable.car") + out, err := os.Create(path) + require.NoError(t, err) + t.Cleanup(func() { require.NoError(t, out.Close()) }) + + // This should fail because the writer is not an io.WriterAt + _, err = storage.NewWritable(&writerOnly{out}, nil) + require.ErrorContains(t, err, "CARv2") + require.ErrorContains(t, err, "non-seekable") + + writable, err := storage.NewWritable(&writerOnly{out}, nil, carv2.WriteAsCarV1(true)) + require.NoError(t, err) + + _, err = writable.(*storage.StorageCar).Get(context.Background(), randCid().KeyString()) + require.ErrorContains(t, err, "write-only") + + _, err = writable.(*storage.StorageCar).GetStream(context.Background(), randCid().KeyString()) + require.ErrorContains(t, err, "write-only") + + require.NoError(t, writable.Finalize()) +} + +func TestReadWriteWithPaddingWorksAsExpected(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + + testCid1, testData1 := randBlock() + testCid2, testData2 := randBlock() + + wantRoots := []cid.Cid{testCid1, testCid2} + path := filepath.Join(t.TempDir(), "readwrite-with-padding.car") + writer, err := os.Create(path) + require.NoError(t, err) + t.Cleanup(func() { require.NoError(t, writer.Close()) }) + + wantCarV1Padding := uint64(1413) + wantIndexPadding := uint64(1314) + subject, err := storage.NewReadableWritable( + writer, + wantRoots, + carv2.UseDataPadding(wantCarV1Padding), + carv2.UseIndexPadding(wantIndexPadding)) + require.NoError(t, err) + require.NoError(t, subject.Put(ctx, testCid1.KeyString(), testData1)) + require.NoError(t, subject.Put(ctx, testCid2.KeyString(), testData2)) + require.NoError(t, subject.Finalize()) + + // Assert CARv2 header contains right offsets. + gotCarV2, err := carv2.OpenReader(path) + t.Cleanup(func() { gotCarV2.Close() }) + require.NoError(t, err) + wantCarV1Offset := carv2.PragmaSize + carv2.HeaderSize + wantCarV1Padding + wantIndexOffset := wantCarV1Offset + gotCarV2.Header.DataSize + wantIndexPadding + require.Equal(t, wantCarV1Offset, gotCarV2.Header.DataOffset) + require.Equal(t, wantIndexOffset, gotCarV2.Header.IndexOffset) + require.NoError(t, gotCarV2.Close()) + + f, err := os.Open(path) + require.NoError(t, err) + t.Cleanup(func() { f.Close() }) + + // Assert reading CARv1 directly at offset and size is as expected. + gotCarV1, err := carv1.NewCarReader(io.NewSectionReader(f, int64(wantCarV1Offset), int64(gotCarV2.Header.DataSize))) + require.NoError(t, err) + require.Equal(t, wantRoots, gotCarV1.Header.Roots) + gotBlock, err := gotCarV1.Next() + require.NoError(t, err) + require.Equal(t, testCid1, gotBlock.Cid()) + require.Equal(t, testData1, gotBlock.RawData()) + gotBlock, err = gotCarV1.Next() + require.NoError(t, err) + require.Equal(t, testCid2, gotBlock.Cid()) + require.Equal(t, testData2, gotBlock.RawData()) + + _, err = gotCarV1.Next() + require.Equal(t, io.EOF, err) + + // Assert reading index directly from file is parsable and has expected CIDs. + stat, err := f.Stat() + require.NoError(t, err) + indexSize := stat.Size() - int64(wantIndexOffset) + gotIdx, err := index.ReadFrom(io.NewSectionReader(f, int64(wantIndexOffset), indexSize)) + require.NoError(t, err) + _, err = index.GetFirst(gotIdx, testCid1) + require.NoError(t, err) + _, err = index.GetFirst(gotIdx, testCid2) + require.NoError(t, err) +} + +func TestResumption(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + + srcPath := "../testdata/sample-v1.car" + + v1f, err := os.Open(srcPath) + require.NoError(t, err) + t.Cleanup(func() { require.NoError(t, v1f.Close()) }) + rd, err := carv2.NewReader(v1f) + require.NoError(t, err) + roots, err := rd.Roots() + require.NoError(t, err) + + blockSource := func() <-chan simpleBlock { + v1f, err := os.Open(srcPath) + require.NoError(t, err) + t.Cleanup(func() { require.NoError(t, v1f.Close()) }) + r, err := carv1.NewCarReader(v1f) + require.NoError(t, err) + ret := make(chan simpleBlock) + + go func() { + for { + b, err := r.Next() + if err == io.EOF { + close(ret) + break + } + require.NoError(t, err) + ret <- simpleBlock{cid: b.Cid(), data: b.RawData()} + } + }() + + return ret + } + + path := filepath.Join(t.TempDir(), "readwrite-resume.car") + writer, err := os.Create(path) + require.NoError(t, err) + t.Cleanup(func() { require.NoError(t, writer.Close()) }) + // Create an incomplete CARv2 file with no blocks put. + subject, err := storage.NewReadableWritable(writer, roots, carv2.UseWholeCIDs(true)) + require.NoError(t, err) + + // For each block resume on the same file, putting blocks one at a time. + var wantBlockCountSoFar, idCidCount int + wantBlocks := make(map[cid.Cid]simpleBlock) + for b := range blockSource() { + wantBlockCountSoFar++ + wantBlocks[b.cid] = b + + dmh, err := multihash.Decode(b.cid.Hash()) + require.NoError(t, err) + if dmh.Code == multihash.IDENTITY { + idCidCount++ + } + + // 30% chance of subject failing; more concretely: re-instantiating the StorageCar with the same + // file without calling Finalize. The higher this percentage the slower the test runs + // considering the number of blocks in the original CARv1 test payload. + resume := rng.Float32() <= 0.3 + // If testing resume case, then flip a coin to decide whether to finalize before the StorageCar + // re-instantiation or not. Note, both cases should work for resumption since we do not + // limit resumption to unfinalized files. + finalizeBeforeResumption := rng.Float32() <= 0.5 + if resume { + if finalizeBeforeResumption { + require.NoError(t, subject.Finalize()) + } + + _, err := writer.Seek(0, io.SeekStart) + require.NoError(t, err) + subject, err = storage.OpenReadableWritable(writer, roots, carv2.UseWholeCIDs(true)) + require.NoError(t, err) + } + require.NoError(t, subject.Put(ctx, b.cid.KeyString(), b.data)) + + // With 10% chance test read operations on an resumed read-write StorageCar. + // We don't test on every put to reduce test runtime. + testRead := rng.Float32() <= 0.1 + if testRead { + // Assert read operations on the read-write StorageCar are as expected when resumed from an + // existing file + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + t.Cleanup(cancel) + for k, wantBlock := range wantBlocks { + has, err := subject.Has(ctx, k.KeyString()) + require.NoError(t, err) + require.True(t, has) + gotBlock, err := subject.Get(ctx, k.KeyString()) + require.NoError(t, err) + require.Equal(t, wantBlock.data, gotBlock) + } + // Assert the number of blocks in file are as expected calculated via AllKeysChan + require.Equal(t, wantBlockCountSoFar, len(wantBlocks)) + } + } + + // Finalize the StorageCar to complete partially written CARv2 file. + subject, err = storage.OpenReadableWritable(writer, roots, carv2.UseWholeCIDs(true)) + require.NoError(t, err) + require.NoError(t, subject.Finalize()) + + // Assert resumed from file is a valid CARv2 with index. + v2f, err := os.Open(path) + require.NoError(t, err) + t.Cleanup(func() { require.NoError(t, v2f.Close()) }) + v2r, err := carv2.NewReader(v2f) + require.NoError(t, err) + require.True(t, v2r.Header.HasIndex()) + + // Assert CARv1 payload in file matches the original CARv1 payload. + _, err = v1f.Seek(0, io.SeekStart) + require.NoError(t, err) + wantPayloadReader, err := carv1.NewCarReader(v1f) + require.NoError(t, err) + + dr, err := v2r.DataReader() + require.NoError(t, err) + gotPayloadReader, err := carv1.NewCarReader(dr) + require.NoError(t, err) + + require.Equal(t, wantPayloadReader.Header, gotPayloadReader.Header) + for { + wantNextBlock, wantErr := wantPayloadReader.Next() + if wantErr == io.EOF { + gotNextBlock, gotErr := gotPayloadReader.Next() + require.Equal(t, wantErr, gotErr) + require.Nil(t, gotNextBlock) + break + } + require.NoError(t, wantErr) + + dmh, err := multihash.Decode(wantNextBlock.Cid().Hash()) + require.NoError(t, err) + if dmh.Code == multihash.IDENTITY { + continue + } + + gotNextBlock, gotErr := gotPayloadReader.Next() + require.NoError(t, gotErr) + require.Equal(t, wantNextBlock, gotNextBlock) + } + + // Assert index in resumed from file is identical to index generated from the data payload portion of the generated CARv2 file. + _, err = v1f.Seek(0, io.SeekStart) + require.NoError(t, err) + ir, err := v2r.IndexReader() + require.NoError(t, err) + gotIdx, err := index.ReadFrom(ir) + require.NoError(t, err) + dr, err = v2r.DataReader() + require.NoError(t, err) + wantIdx, err := carv2.GenerateIndex(dr) + require.NoError(t, err) + require.Equal(t, wantIdx, gotIdx) +} + +func TestResumptionV1(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + + srcPath := "../testdata/sample-v1.car" + + v1f, err := os.Open(srcPath) + require.NoError(t, err) + t.Cleanup(func() { require.NoError(t, v1f.Close()) }) + rd, err := carv2.NewReader(v1f) + require.NoError(t, err) + roots, err := rd.Roots() + require.NoError(t, err) + + blockSource := func() <-chan simpleBlock { + v1f, err := os.Open(srcPath) + require.NoError(t, err) + t.Cleanup(func() { require.NoError(t, v1f.Close()) }) + r, err := carv1.NewCarReader(v1f) + require.NoError(t, err) + ret := make(chan simpleBlock) + + go func() { + for { + b, err := r.Next() + if err == io.EOF { + close(ret) + break + } + require.NoError(t, err) + ret <- simpleBlock{cid: b.Cid(), data: b.RawData()} + } + }() + + return ret + } + + path := filepath.Join(t.TempDir(), "readwrite-resume-v1.car") + writer, err := os.Create(path) + require.NoError(t, err) + t.Cleanup(func() { require.NoError(t, writer.Close()) }) + // Create an incomplete CARv2 file with no blocks put. + subject, err := storage.NewReadableWritable(writer, roots, carv2.UseWholeCIDs(true), carv2.WriteAsCarV1(true)) + require.NoError(t, err) + + // For each block resume on the same file, putting blocks one at a time. + var wantBlockCountSoFar, idCidCount int + wantBlocks := make(map[cid.Cid]simpleBlock) + for b := range blockSource() { + wantBlockCountSoFar++ + wantBlocks[b.cid] = b + + dmh, err := multihash.Decode(b.cid.Hash()) + require.NoError(t, err) + if dmh.Code == multihash.IDENTITY { + idCidCount++ + } + + // 30% chance of subject failing; more concretely: re-instantiating the StorageCar with the same + // file without calling Finalize. The higher this percentage the slower the test runs + // considering the number of blocks in the original CARv1 test payload. + resume := rng.Float32() <= 0.3 + // If testing resume case, then flip a coin to decide whether to finalize before the StorageCar + // re-instantiation or not. Note, both cases should work for resumption since we do not + // limit resumption to unfinalized files. + finalizeBeforeResumption := rng.Float32() <= 0.5 + if resume { + if finalizeBeforeResumption { + require.NoError(t, subject.Finalize()) + } + + _, err := writer.Seek(0, io.SeekStart) + require.NoError(t, err) + subject, err = storage.OpenReadableWritable(writer, roots, carv2.UseWholeCIDs(true), carv2.WriteAsCarV1(true)) + require.NoError(t, err) + } + require.NoError(t, subject.Put(ctx, b.cid.KeyString(), b.data)) + + // With 10% chance test read operations on an resumed read-write StorageCar. + // We don't test on every put to reduce test runtime. + testRead := rng.Float32() <= 0.1 + if testRead { + // Assert read operations on the read-write StorageCar are as expected when resumed from an + // existing file + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + t.Cleanup(cancel) + for k, wantBlock := range wantBlocks { + has, err := subject.Has(ctx, k.KeyString()) + require.NoError(t, err) + require.True(t, has) + gotBlock, err := subject.Get(ctx, k.KeyString()) + require.NoError(t, err) + require.Equal(t, wantBlock.data, gotBlock) + } + // Assert the number of blocks in file are as expected calculated via AllKeysChan + require.Equal(t, wantBlockCountSoFar, len(wantBlocks)) + } + } + + // Finalize the StorageCar to complete partially written CARv2 file. + subject, err = storage.OpenReadableWritable(writer, roots, carv2.UseWholeCIDs(true), carv2.WriteAsCarV1(true)) + require.NoError(t, err) + require.NoError(t, subject.Finalize()) + + // Assert resumed from file is a valid CARv2 with index. + v2f, err := os.Open(path) + require.NoError(t, err) + t.Cleanup(func() { require.NoError(t, v2f.Close()) }) + v2r, err := carv2.NewReader(v2f) + require.NoError(t, err) + require.False(t, v2r.Header.HasIndex()) + require.Equal(t, uint64(1), v2r.Version) + + _, err = v1f.Seek(0, io.SeekStart) + require.NoError(t, err) + wantPayloadReader, err := carv1.NewCarReader(v1f) + require.NoError(t, err) + + dr, err := v2r.DataReader() // since this is a v1 we're just reading from the top with this + require.NoError(t, err) + gotPayloadReader, err := carv1.NewCarReader(dr) + require.NoError(t, err) + + require.Equal(t, wantPayloadReader.Header, gotPayloadReader.Header) + for { + wantNextBlock, wantErr := wantPayloadReader.Next() + if wantErr == io.EOF { + gotNextBlock, gotErr := gotPayloadReader.Next() + require.Equal(t, wantErr, gotErr) + require.Nil(t, gotNextBlock) + break + } + require.NoError(t, wantErr) + + dmh, err := multihash.Decode(wantNextBlock.Cid().Hash()) + require.NoError(t, err) + if dmh.Code == multihash.IDENTITY { + continue + } + + gotNextBlock, gotErr := gotPayloadReader.Next() + require.NoError(t, gotErr) + require.Equal(t, wantNextBlock, gotNextBlock) + } +} + +func TestResumptionIsSupportedOnFinalizedFile(t *testing.T) { + path := filepath.Join(t.TempDir(), "readwrite-resume-finalized.car") + v2f, err := os.Create(path) + require.NoError(t, err) + t.Cleanup(func() { require.NoError(t, v2f.Close()) }) + // Create an incomplete CARv2 file with no blocks put. + subject, err := storage.NewReadableWritable(v2f, []cid.Cid{}) + require.NoError(t, err) + require.NoError(t, subject.Finalize()) + + reopen, err := os.OpenFile(path, os.O_RDWR, 0o666) + require.NoError(t, err) + t.Cleanup(func() { require.NoError(t, reopen.Close()) }) + subject, err = storage.NewReadableWritable(reopen, []cid.Cid{}) + require.NoError(t, err) + t.Cleanup(func() { subject.Finalize() }) +} + +func TestReadWriteErrorsOnlyWhenFinalized(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + + testCid1, testData1 := randBlock() + testCid2, testData2 := randBlock() + + wantRoots := []cid.Cid{testCid1, testCid2} + path := filepath.Join(t.TempDir(), "readwrite-finalized-panic.car") + writer, err := os.Create(path) + require.NoError(t, err) + t.Cleanup(func() { require.NoError(t, writer.Close()) }) + + subject, err := storage.NewReadableWritable(writer, wantRoots) + require.NoError(t, err) + + require.NoError(t, subject.Put(ctx, testCid1.KeyString(), testData1)) + require.NoError(t, subject.Put(ctx, testCid2.KeyString(), testData2)) + + gotBlock, err := subject.Get(ctx, testCid1.KeyString()) + require.NoError(t, err) + require.Equal(t, testData1, gotBlock) + + gotRoots := subject.Roots() + require.Equal(t, wantRoots, gotRoots) + + has, err := subject.Has(ctx, testCid1.KeyString()) + require.NoError(t, err) + require.True(t, has) + + require.NoError(t, subject.Finalize()) + require.Error(t, subject.Finalize()) + + _, ok := (interface{})(subject).(io.Closer) + require.False(t, ok) + + _, err = subject.Get(ctx, testCid1.KeyString()) + require.Error(t, err) + require.Error(t, err) + _, err = subject.Has(ctx, testCid2.KeyString()) + require.Error(t, err) + + require.Error(t, subject.Put(ctx, testCid1.KeyString(), testData1)) +} + +func TestReadWriteResumptionMismatchingRootsIsError(t *testing.T) { + tmpPath := requireTmpCopy(t, "../testdata/sample-wrapped-v2.car") + + origContent, err := os.ReadFile(tmpPath) + require.NoError(t, err) + + badRoot := randCid() + writer, err := os.OpenFile(tmpPath, os.O_RDWR, 0o666) + require.NoError(t, err) + t.Cleanup(func() { writer.Close() }) + subject, err := storage.OpenReadableWritable(writer, []cid.Cid{badRoot}) + require.EqualError(t, err, "cannot resume on file with mismatching data header") + require.Nil(t, subject) + require.NoError(t, writer.Close()) + + newContent, err := os.ReadFile(tmpPath) + require.NoError(t, err) + + // Expect the bad file to be left untouched; check the size first. + // If the sizes mismatch, printing a huge diff would not help us. + require.Equal(t, len(origContent), len(newContent)) + require.Equal(t, origContent, newContent) +} + +func requireTmpCopy(t *testing.T, src string) string { + srcF, err := os.Open(src) + require.NoError(t, err) + defer func() { require.NoError(t, srcF.Close()) }() + stats, err := srcF.Stat() + require.NoError(t, err) + + dst := filepath.Join(t.TempDir(), stats.Name()) + dstF, err := os.Create(dst) + require.NoError(t, err) + defer func() { require.NoError(t, dstF.Close()) }() + + _, err = io.Copy(dstF, srcF) + require.NoError(t, err) + return dst +} + +func TestReadWriteResumptionFromFileWithDifferentCarV1PaddingIsError(t *testing.T) { + testCid1, testData1 := randBlock() + wantRoots := []cid.Cid{testCid1} + path := filepath.Join(t.TempDir(), "readwrite-resume-with-padding.car") + writer, err := os.Create(path) + require.NoError(t, err) + t.Cleanup(func() { require.NoError(t, writer.Close()) }) + subject, err := storage.NewReadableWritable( + writer, + wantRoots, + carv2.UseDataPadding(1413)) + require.NoError(t, err) + require.NoError(t, subject.Put(context.TODO(), testCid1.KeyString(), testData1)) + require.NoError(t, subject.Finalize()) + + subject, err = storage.OpenReadableWritable( + writer, + wantRoots, + carv2.UseDataPadding(1314)) + require.EqualError(t, err, "cannot resume from file with mismatched CARv1 offset; "+ + "`WithDataPadding` option must match the padding on file. "+ + "Expected padding value of 1413 but got 1314") + require.Nil(t, subject) +} + +func TestOperationsErrorWithBadCidStrings(t *testing.T) { + testCid, testData := randBlock() + path := filepath.Join(t.TempDir(), "badkeys.car") + writer, err := os.Create(path) + require.NoError(t, err) + t.Cleanup(func() { require.NoError(t, writer.Close()) }) + subject, err := storage.NewReadableWritable(writer, []cid.Cid{}) + require.NoError(t, err) + + require.NoError(t, subject.Put(context.TODO(), testCid.KeyString(), testData)) + require.ErrorContains(t, subject.Put(context.TODO(), fmt.Sprintf("%s/nope", testCid.KeyString()), testData), "bad CID key") + require.ErrorContains(t, subject.Put(context.TODO(), "nope", testData), "bad CID key") + + has, err := subject.Has(context.TODO(), testCid.KeyString()) + require.NoError(t, err) + require.True(t, has) + has, err = subject.Has(context.TODO(), fmt.Sprintf("%s/nope", testCid.KeyString())) + require.ErrorContains(t, err, "bad CID key") + require.False(t, has) + has, err = subject.Has(context.TODO(), "nope") + require.ErrorContains(t, err, "bad CID key") + require.False(t, has) + + got, err := subject.Get(context.TODO(), testCid.KeyString()) + require.NoError(t, err) + require.NotNil(t, got) + got, err = subject.Get(context.TODO(), fmt.Sprintf("%s/nope", testCid.KeyString())) + require.ErrorContains(t, err, "bad CID key") + require.Nil(t, got) + got, err = subject.Get(context.TODO(), "nope") + require.ErrorContains(t, err, "bad CID key") + require.Nil(t, got) +} + +func TestWholeCID(t *testing.T) { + for _, whole := range []bool{true, false} { + whole := whole + t.Run(fmt.Sprintf("whole=%t", whole), func(t *testing.T) { + t.Parallel() + ctx := context.Background() + path := filepath.Join(t.TempDir(), fmt.Sprintf("writable_%t.car", whole)) + out, err := os.Create(path) + require.NoError(t, err) + t.Cleanup(func() { require.NoError(t, out.Close()) }) + store, err := storage.NewReadableWritable(out, []cid.Cid{}, carv2.UseWholeCIDs(whole)) + require.NoError(t, err) + + c1, b1 := randBlock() + c2, b2 := randBlock() + + require.NoError(t, store.Put(ctx, c1.KeyString(), b1)) + has, err := store.Has(ctx, c1.KeyString()) + require.NoError(t, err) + require.True(t, has) + + pref := c1.Prefix() + pref.Codec = cid.DagProtobuf + pref.Version = 1 + cpb1, err := pref.Sum(b1) + require.NoError(t, err) + + has, err = store.Has(ctx, cpb1.KeyString()) + require.NoError(t, err) + require.Equal(t, has, !whole) + + require.NoError(t, store.Put(ctx, c2.KeyString(), b1)) + has, err = store.Has(ctx, c2.KeyString()) + require.NoError(t, err) + require.True(t, has) + has, err = store.Has(ctx, cpb1.KeyString()) + require.NoError(t, err) + require.Equal(t, has, !whole) + + pref = c2.Prefix() + pref.Codec = cid.DagProtobuf + pref.Version = 1 + cpb2, err := pref.Sum(b2) + require.NoError(t, err) + + has, err = store.Has(ctx, cpb2.KeyString()) + require.NoError(t, err) + require.Equal(t, has, !whole) + has, err = store.Has(ctx, cpb1.KeyString()) + require.NoError(t, err) + require.Equal(t, has, !whole) + }) + } +} + +type writerOnly struct { + io.Writer +} + +func (w *writerOnly) Write(p []byte) (n int, err error) { + return w.Writer.Write(p) +} + +type writerAtOnly struct { + *os.File +} + +func (w *writerAtOnly) WriteAt(p []byte, off int64) (n int, err error) { + return w.File.WriteAt(p, off) +} + +func (w *writerAtOnly) Write(p []byte) (n int, err error) { + return w.File.Write(p) +} + +func randBlock() (cid.Cid, []byte) { + data := make([]byte, 1024) + rngLk.Lock() + rng.Read(data) + rngLk.Unlock() + h, err := multihash.Sum(data, multihash.SHA2_512, -1) + if err != nil { + panic(err) + } + return cid.NewCidV1(cid.Raw, h), data +} + +func randCid() cid.Cid { + b := make([]byte, 32) + rngLk.Lock() + rng.Read(b) + rngLk.Unlock() + mh, _ := multihash.Encode(b, multihash.SHA2_256) + return cid.NewCidV1(cid.DagProtobuf, mh) +} + +func randIdentityCid() cid.Cid { + b := make([]byte, 32) + rngLk.Lock() + rng.Read(b) + rngLk.Unlock() + mh, _ := multihash.Encode(b, multihash.IDENTITY) + return cid.NewCidV1(cid.Raw, mh) +} + +type bufferReaderAt []byte + +func (b bufferReaderAt) ReadAt(p []byte, off int64) (int, error) { + if off >= int64(len(b)) { + return 0, io.EOF + } + return copy(p, b[off:]), nil +} + +type simpleBlock struct { + cid cid.Cid + data []byte +} diff --git a/ipld/car/v2/testdata/fuzz/FuzzBlockReader/c3c7eedeb4968a5b3131371ba9f5138a38c882a7fa06456595998e740a9f5a14 b/ipld/car/v2/testdata/fuzz/FuzzBlockReader/c3c7eedeb4968a5b3131371ba9f5138a38c882a7fa06456595998e740a9f5a14 new file mode 100644 index 0000000000..ae9262d491 --- /dev/null +++ b/ipld/car/v2/testdata/fuzz/FuzzBlockReader/c3c7eedeb4968a5b3131371ba9f5138a38c882a7fa06456595998e740a9f5a14 @@ -0,0 +1,2 @@ +go test fuzz v1 +[]byte("\xff\x80\xaa\x95\xa6sion\x01,\x01q\x12 \x15\x1f\xe9\xe7 dataSize { + // Truncate to the expected size to assure the resulting file is a correctly sized CARv1. + err = dst.Truncate(written) + } + + return err +} + +// AttachIndex attaches a given index to an existing CARv2 file at given path and offset. +func AttachIndex(path string, idx index.Index, offset uint64) error { + // TODO: instead of offset, maybe take padding? + // TODO: check that the given path is indeed a CARv2. + // TODO: update CARv2 header according to the offset at which index is written out. + out, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0o640) + if err != nil { + return err + } + defer out.Close() + indexWriter := internalio.NewOffsetWriter(out, int64(offset)) + _, err = index.WriteTo(idx, indexWriter) + return err +} + +// ReplaceRootsInFile replaces the root CIDs in CAR file at given path with the given roots. +// This function accepts both CARv1 and CARv2 files. +// +// Note that the roots are only replaced if their total serialized size exactly matches the total +// serialized size of existing roots in CAR file. +func ReplaceRootsInFile(path string, roots []cid.Cid, opts ...Option) (err error) { + f, err := os.OpenFile(path, os.O_RDWR, 0o666) + if err != nil { + return err + } + defer func() { + // Close file and override return error type if it is nil. + if cerr := f.Close(); err == nil { + err = cerr + } + }() + + options := ApplyOptions(opts...) + + // Read header or pragma; note that both are a valid CARv1 header. + header, err := carv1.ReadHeader(f, options.MaxAllowedHeaderSize) + if err != nil { + return err + } + + var currentSize int64 + var newHeaderOffset int64 + switch header.Version { + case 1: + // When the given file is a CARv1 : + // 1. The offset at which the new header should be written is zero (newHeaderOffset = 0) + // 2. The current header size is equal to the number of bytes read, and + // + // Note that we explicitly avoid using carv1.HeaderSize to determine the current header size. + // This is based on the fact that carv1.ReadHeader does not read any extra bytes. + // Therefore, we can avoid extra allocations of carv1.HeaderSize to determine size by simply + // counting the bytes read so far. + currentSize, err = f.Seek(0, io.SeekCurrent) + if err != nil { + return err + } + case 2: + // When the given file is a CARv2 : + // 1. The offset at which the new header should be written is carv2.Header.DataOffset + // 2. The inner CARv1 header size is equal to the number of bytes read minus carv2.Header.DataOffset + var v2h Header + if _, err = v2h.ReadFrom(f); err != nil { + return err + } + newHeaderOffset = int64(v2h.DataOffset) + if _, err = f.Seek(newHeaderOffset, io.SeekStart); err != nil { + return err + } + var innerV1Header *carv1.CarHeader + innerV1Header, err = carv1.ReadHeader(f, options.MaxAllowedHeaderSize) + if err != nil { + return err + } + if innerV1Header.Version != 1 { + err = fmt.Errorf("invalid data payload header: expected version 1, got %d", innerV1Header.Version) + } + var readSoFar int64 + readSoFar, err = f.Seek(0, io.SeekCurrent) + if err != nil { + return err + } + currentSize = readSoFar - newHeaderOffset + default: + err = fmt.Errorf("invalid car version: %d", header.Version) + return err + } + + newHeader := &carv1.CarHeader{ + Roots: roots, + Version: 1, + } + // Serialize the new header straight up instead of using carv1.HeaderSize. + // Because, carv1.HeaderSize serialises it to calculate size anyway. + // By serializing straight up we get the replacement bytes and size. + // Otherwise, we end up serializing the new header twice: + // once through carv1.HeaderSize, and + // once to write it out. + var buf bytes.Buffer + if err = carv1.WriteHeader(newHeader, &buf); err != nil { + return err + } + // Assert the header sizes are consistent. + newSize := int64(buf.Len()) + if currentSize != newSize { + return fmt.Errorf("current header size (%d) must match replacement header size (%d)", currentSize, newSize) + } + // Seek to the offset at which the new header should be written. + if _, err = f.Seek(newHeaderOffset, io.SeekStart); err != nil { + return err + } + _, err = f.Write(buf.Bytes()) + return err +} diff --git a/ipld/car/v2/writer_test.go b/ipld/car/v2/writer_test.go new file mode 100644 index 0000000000..517a820dbb --- /dev/null +++ b/ipld/car/v2/writer_test.go @@ -0,0 +1,282 @@ +package car + +import ( + "context" + "io" + "os" + "path/filepath" + "testing" + + "github.com/ipfs/boxo/ipld/car/v2/index" + "github.com/ipfs/boxo/ipld/car/v2/internal/carv1" + "github.com/stretchr/testify/require" + + "github.com/ipfs/boxo/ipld/merkledag" + dstest "github.com/ipfs/boxo/ipld/merkledag/test" + "github.com/ipfs/go-cid" + format "github.com/ipfs/go-ipld-format" + "github.com/stretchr/testify/assert" +) + +func TestWrapV1(t *testing.T) { + // Produce a CARv1 file to test wrapping with. + dagSvc := dstest.Mock() + src := filepath.Join(t.TempDir(), "unwrapped-test-v1.car") + sf, err := os.Create(src) + require.NoError(t, err) + t.Cleanup(func() { require.NoError(t, sf.Close()) }) + require.NoError(t, carv1.WriteCar(context.Background(), dagSvc, generateRootCid(t, dagSvc), sf)) + + // Wrap the test CARv1 file + dest := filepath.Join(t.TempDir(), "wrapped-test-v1.car") + df, err := os.Create(dest) + require.NoError(t, err) + t.Cleanup(func() { require.NoError(t, df.Close()) }) + _, err = sf.Seek(0, io.SeekStart) + require.NoError(t, err) + require.NoError(t, WrapV1(sf, df)) + + // Assert wrapped file is valid CARv2 with CARv1 data payload matching the original CARv1 file. + subject, err := OpenReader(dest) + t.Cleanup(func() { require.NoError(t, subject.Close()) }) + require.NoError(t, err) + + // Assert CARv1 data payloads are identical. + _, err = sf.Seek(0, io.SeekStart) + require.NoError(t, err) + wantPayload, err := io.ReadAll(sf) + require.NoError(t, err) + dr, err := subject.DataReader() + require.NoError(t, err) + gotPayload, err := io.ReadAll(dr) + require.NoError(t, err) + require.Equal(t, wantPayload, gotPayload) + + // Assert embedded index in CARv2 is same as index generated from the original CARv1. + wantIdx, err := GenerateIndexFromFile(src) + require.NoError(t, err) + ir, err := subject.IndexReader() + require.NoError(t, err) + gotIdx, err := index.ReadFrom(ir) + require.NoError(t, err) + require.Equal(t, wantIdx, gotIdx) +} + +func TestExtractV1(t *testing.T) { + // Produce a CARv1 file to test. + dagSvc := dstest.Mock() + v1Src := filepath.Join(t.TempDir(), "original-test-v1.car") + v1f, err := os.Create(v1Src) + require.NoError(t, err) + t.Cleanup(func() { require.NoError(t, v1f.Close()) }) + require.NoError(t, carv1.WriteCar(context.Background(), dagSvc, generateRootCid(t, dagSvc), v1f)) + _, err = v1f.Seek(0, io.SeekStart) + require.NoError(t, err) + wantV1, err := io.ReadAll(v1f) + require.NoError(t, err) + + // Wrap the produced CARv1 into a CARv2 to use for testing. + v2path := filepath.Join(t.TempDir(), "wrapped-for-extract-test-v2.car") + require.NoError(t, WrapV1File(v1Src, v2path)) + + // Assert extract from CARv2 file is as expected. + dstPath := filepath.Join(t.TempDir(), "extract-file-test-v1.car") + require.NoError(t, ExtractV1File(v2path, dstPath)) + gotFromFile, err := os.ReadFile(dstPath) + require.NoError(t, err) + require.Equal(t, wantV1, gotFromFile) + + // Assert extract from CARv2 file in-place is as expected + require.NoError(t, ExtractV1File(v2path, v2path)) + gotFromInPlaceFile, err := os.ReadFile(v2path) + require.NoError(t, err) + require.Equal(t, wantV1, gotFromInPlaceFile) +} + +func TestExtractV1WithUnknownVersionIsError(t *testing.T) { + dstPath := filepath.Join(t.TempDir(), "extract-dst-file-test-v42.car") + err := ExtractV1File("testdata/sample-rootless-v42.car", dstPath) + require.EqualError(t, err, "source version must be 2; got: 42") +} + +func TestExtractV1FromACarV1IsError(t *testing.T) { + dstPath := filepath.Join(t.TempDir(), "extract-dst-file-test-v1.car") + err := ExtractV1File("testdata/sample-v1.car", dstPath) + require.Equal(t, ErrAlreadyV1, err) +} + +func generateRootCid(t *testing.T, adder format.NodeAdder) []cid.Cid { + // TODO convert this into a utility testing lib that takes an rng and generates a random DAG with some threshold for depth/breadth. + this := merkledag.NewRawNode([]byte("fish")) + that := merkledag.NewRawNode([]byte("lobster")) + other := merkledag.NewRawNode([]byte("🌊")) + + one := &merkledag.ProtoNode{} + assertAddNodeLink(t, one, this, "fishmonger") + + another := &merkledag.ProtoNode{} + assertAddNodeLink(t, another, one, "barreleye") + assertAddNodeLink(t, another, that, "🐡") + + andAnother := &merkledag.ProtoNode{} + assertAddNodeLink(t, andAnother, another, "🍤") + + assertAddNodes(t, adder, this, that, other, one, another, andAnother) + return []cid.Cid{andAnother.Cid()} +} + +func assertAddNodeLink(t *testing.T, pn *merkledag.ProtoNode, fn format.Node, name string) { + assert.NoError(t, pn.AddNodeLink(name, fn)) +} + +func assertAddNodes(t *testing.T, adder format.NodeAdder, nds ...format.Node) { + for _, nd := range nds { + assert.NoError(t, adder.Add(context.Background(), nd)) + } +} + +func TestReplaceRootsInFile(t *testing.T) { + tests := []struct { + name string + path string + roots []cid.Cid + wantErrMsg string + }{ + { + name: "CorruptPragmaIsRejected", + path: "testdata/sample-corrupt-pragma.car", + wantErrMsg: "unexpected EOF", + }, + { + name: "CARv42IsRejected", + path: "testdata/sample-rootless-v42.car", + wantErrMsg: "invalid car version: 42", + }, + { + name: "CARv1RootsOfDifferentSizeAreNotReplaced", + path: "testdata/sample-v1.car", + wantErrMsg: "current header size (61) must match replacement header size (18)", + }, + { + name: "CARv2RootsOfDifferentSizeAreNotReplaced", + path: "testdata/sample-wrapped-v2.car", + wantErrMsg: "current header size (61) must match replacement header size (18)", + }, + { + name: "CARv1NonEmptyRootsOfDifferentSizeAreNotReplaced", + path: "testdata/sample-v1.car", + roots: []cid.Cid{requireDecodedCid(t, "QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n")}, + wantErrMsg: "current header size (61) must match replacement header size (57)", + }, + { + name: "CARv1ZeroLenNonEmptyRootsOfDifferentSizeAreNotReplaced", + path: "testdata/sample-v1-with-zero-len-section.car", + roots: []cid.Cid{merkledag.NewRawNode([]byte("fish")).Cid()}, + wantErrMsg: "current header size (61) must match replacement header size (59)", + }, + { + name: "CARv2NonEmptyRootsOfDifferentSizeAreNotReplaced", + path: "testdata/sample-wrapped-v2.car", + roots: []cid.Cid{merkledag.NewRawNode([]byte("fish")).Cid()}, + wantErrMsg: "current header size (61) must match replacement header size (59)", + }, + { + name: "CARv2IndexlessNonEmptyRootsOfDifferentSizeAreNotReplaced", + path: "testdata/sample-v2-indexless.car", + roots: []cid.Cid{merkledag.NewRawNode([]byte("fish")).Cid()}, + wantErrMsg: "current header size (61) must match replacement header size (59)", + }, + { + name: "CARv1SameSizeRootsAreReplaced", + path: "testdata/sample-v1.car", + roots: []cid.Cid{requireDecodedCid(t, "bafy2bzaced4ueelaegfs5fqu4tzsh6ywbbpfk3cxppupmxfdhbpbhzawfw5od")}, + }, + { + name: "CARv2SameSizeRootsAreReplaced", + path: "testdata/sample-wrapped-v2.car", + roots: []cid.Cid{requireDecodedCid(t, "bafy2bzaced4ueelaegfs5fqu4tzsh6ywbbpfk3cxppupmxfdhbpbhzawfw5oi")}, + }, + { + name: "CARv2IndexlessSameSizeRootsAreReplaced", + path: "testdata/sample-v2-indexless.car", + roots: []cid.Cid{requireDecodedCid(t, "bafy2bzaced4ueelaegfs5fqu4tzsh6ywbbpfk3cxppupmxfdhbpbhzawfw5oi")}, + }, + { + name: "CARv1ZeroLenSameSizeRootsAreReplaced", + path: "testdata/sample-v1-with-zero-len-section.car", + roots: []cid.Cid{requireDecodedCid(t, "bafy2bzaced4ueelaegfs5fqu4tzsh6ywbbpfk3cxppupmxfdhbpbhzawfw5o5")}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Make a copy of input files to preserve original for comparison. + // This also avoids modification files in testdata. + tmpCopy := requireTmpCopy(t, tt.path) + err := ReplaceRootsInFile(tmpCopy, tt.roots) + if tt.wantErrMsg != "" { + require.EqualError(t, err, tt.wantErrMsg) + return + } + require.NoError(t, err) + + original, err := os.Open(tt.path) + require.NoError(t, err) + defer func() { require.NoError(t, original.Close()) }() + + target, err := os.Open(tmpCopy) + require.NoError(t, err) + defer func() { require.NoError(t, target.Close()) }() + + // Assert file size has not changed. + wantStat, err := original.Stat() + require.NoError(t, err) + gotStat, err := target.Stat() + require.NoError(t, err) + require.Equal(t, wantStat.Size(), gotStat.Size()) + + wantReader, err := NewBlockReader(original, ZeroLengthSectionAsEOF(true)) + require.NoError(t, err) + gotReader, err := NewBlockReader(target, ZeroLengthSectionAsEOF(true)) + require.NoError(t, err) + + // Assert roots are replaced. + require.Equal(t, tt.roots, gotReader.Roots) + + // Assert data blocks are identical. + for { + wantNext, wantErr := wantReader.Next() + gotNext, gotErr := gotReader.Next() + if wantErr == io.EOF { + require.Equal(t, io.EOF, gotErr) + break + } + require.NoError(t, wantErr) + require.NoError(t, gotErr) + require.Equal(t, wantNext, gotNext) + } + }) + } +} + +func requireDecodedCid(t *testing.T, s string) cid.Cid { + decoded, err := cid.Decode(s) + require.NoError(t, err) + return decoded +} + +func requireTmpCopy(t *testing.T, src string) string { + srcF, err := os.Open(src) + require.NoError(t, err) + defer func() { require.NoError(t, srcF.Close()) }() + stats, err := srcF.Stat() + require.NoError(t, err) + + dst := filepath.Join(t.TempDir(), stats.Name()) + dstF, err := os.Create(dst) + require.NoError(t, err) + defer func() { require.NoError(t, dstF.Close()) }() + + _, err = io.Copy(dstF, srcF) + require.NoError(t, err) + return dst +} diff --git a/ipld/merkledag/coding.go b/ipld/merkledag/coding.go new file mode 100644 index 0000000000..e88f2dd325 --- /dev/null +++ b/ipld/merkledag/coding.go @@ -0,0 +1,218 @@ +package merkledag + +import ( + "fmt" + "sort" + "strings" + + blocks "github.com/ipfs/boxo/blocks" + cid "github.com/ipfs/go-cid" + format "github.com/ipfs/go-ipld-format" + pb "github.com/ipfs/boxo/ipld/merkledag/pb" + dagpb "github.com/ipld/go-codec-dagpb" + ipld "github.com/ipld/go-ipld-prime" + "github.com/ipld/go-ipld-prime/fluent/qp" + cidlink "github.com/ipld/go-ipld-prime/linking/cid" +) + +// Make sure the user doesn't upgrade this file. +// We need to check *here* as well as inside the `pb` package *just* in case the +// user replaces *all* go files in that package. +const _ = pb.DoNotUpgradeFileEverItWillChangeYourHashes + +// for now, we use a PBNode intermediate thing. +// because native go objects are nice. + +// pbLinkSlice is a slice of pb.PBLink, similar to LinkSlice but for sorting the +// PB form +type pbLinkSlice []*pb.PBLink + +func (pbls pbLinkSlice) Len() int { return len(pbls) } +func (pbls pbLinkSlice) Swap(a, b int) { pbls[a], pbls[b] = pbls[b], pbls[a] } +func (pbls pbLinkSlice) Less(a, b int) bool { return *pbls[a].Name < *pbls[b].Name } + +// unmarshal decodes raw data into a *Node instance. +// The conversion uses an intermediate PBNode. +func unmarshal(encodedBytes []byte) (*ProtoNode, error) { + nb := dagpb.Type.PBNode.NewBuilder() + if err := dagpb.DecodeBytes(nb, encodedBytes); err != nil { + return nil, err + } + nd := nb.Build() + return fromImmutableNode(&immutableProtoNode{encodedBytes, nd.(dagpb.PBNode)}), nil +} + +func fromImmutableNode(encoded *immutableProtoNode) *ProtoNode { + n := new(ProtoNode) + n.encoded = encoded + if n.encoded.PBNode.Data.Exists() { + n.data = n.encoded.PBNode.Data.Must().Bytes() + } + numLinks := n.encoded.PBNode.Links.Length() + // links may not be sorted after deserialization, but we don't change + // them until we mutate this node since we're representing the current, + // as-serialized state + n.links = make([]*format.Link, numLinks) + linkAllocs := make([]format.Link, numLinks) + for i := int64(0); i < numLinks; i++ { + next := n.encoded.PBNode.Links.Lookup(i) + name := "" + if next.FieldName().Exists() { + name = next.FieldName().Must().String() + } + c := next.FieldHash().Link().(cidlink.Link).Cid + size := uint64(0) + if next.FieldTsize().Exists() { + size = uint64(next.FieldTsize().Must().Int()) + } + link := &linkAllocs[i] + link.Name = name + link.Size = size + link.Cid = c + n.links[i] = link + } + // we don't set n.linksDirty because the order of the links list from + // serialized form needs to be stable, until we start mutating the ProtoNode + return n +} +func (n *ProtoNode) marshalImmutable() (*immutableProtoNode, error) { + links := n.Links() + nd, err := qp.BuildMap(dagpb.Type.PBNode, 2, func(ma ipld.MapAssembler) { + qp.MapEntry(ma, "Links", qp.List(int64(len(links)), func(la ipld.ListAssembler) { + for _, link := range links { + // it shouldn't be possible to get here with an undefined CID, but in + // case it is we're going to drop this link from the encoded form + // entirely + if link.Cid.Defined() { + qp.ListEntry(la, qp.Map(3, func(ma ipld.MapAssembler) { + qp.MapEntry(ma, "Hash", qp.Link(cidlink.Link{Cid: link.Cid})) + qp.MapEntry(ma, "Name", qp.String(link.Name)) + sz := int64(link.Size) + if sz < 0 { // overflow, >MaxInt64 is almost certainly an error + sz = 0 + } + qp.MapEntry(ma, "Tsize", qp.Int(sz)) + })) + } + } + })) + if n.data != nil { + qp.MapEntry(ma, "Data", qp.Bytes(n.data)) + } + }) + if err != nil { + return nil, err + } + + // 1KiB can be allocated on the stack, and covers most small nodes + // without having to grow the buffer and cause allocations. + enc := make([]byte, 0, 1024) + + enc, err = dagpb.AppendEncode(enc, nd) + if err != nil { + return nil, err + } + return &immutableProtoNode{enc, nd.(dagpb.PBNode)}, nil +} + +// Marshal encodes a *Node instance into a new byte slice. +// The conversion uses an intermediate PBNode. +func (n *ProtoNode) Marshal() ([]byte, error) { + enc, err := n.marshalImmutable() + if err != nil { + return nil, err + } + return enc.encoded, nil +} + +// GetPBNode converts *ProtoNode into it's protocol buffer variant. +// If you plan on mutating the data of the original node, it is recommended +// that you call ProtoNode.Copy() before calling ProtoNode.GetPBNode() +func (n *ProtoNode) GetPBNode() *pb.PBNode { + pbn := &pb.PBNode{} + if len(n.links) > 0 { + pbn.Links = make([]*pb.PBLink, len(n.links)) + } + + for i, l := range n.links { + pbn.Links[i] = &pb.PBLink{} + pbn.Links[i].Name = &l.Name + pbn.Links[i].Tsize = &l.Size + if l.Cid.Defined() { + pbn.Links[i].Hash = l.Cid.Bytes() + } + } + + // Ensure links are sorted prior to encode, regardless of `linksDirty`. They + // may not have come sorted if we deserialized a badly encoded form that + // didn't have links already sorted. + sort.Stable(pbLinkSlice(pbn.Links)) + + if len(n.data) > 0 { + pbn.Data = n.data + } + return pbn +} + +// EncodeProtobuf returns the encoded raw data version of a Node instance. +// It may use a cached encoded version, unless the force flag is given. +func (n *ProtoNode) EncodeProtobuf(force bool) ([]byte, error) { + if n.encoded == nil || n.linksDirty || force { + if n.linksDirty { + // there was a mutation involving links, make sure we sort before we build + // and cache a `Node` form that captures the current state + sort.Stable(LinkSlice(n.links)) + n.linksDirty = false + } + n.cached = cid.Undef + var err error + n.encoded, err = n.marshalImmutable() + if err != nil { + return nil, err + } + } + + if !n.cached.Defined() { + c, err := n.CidBuilder().Sum(n.encoded.encoded) + if err != nil { + return nil, err + } + + n.cached = c + } + + return n.encoded.encoded, nil +} + +// DecodeProtobuf decodes raw data and returns a new Node instance. +func DecodeProtobuf(encoded []byte) (*ProtoNode, error) { + n, err := unmarshal(encoded) + if err != nil { + return nil, fmt.Errorf("incorrectly formatted merkledag node: %s", err) + } + return n, nil +} + +// DecodeProtobufBlock is a block decoder for protobuf IPLD nodes conforming to +// node.DecodeBlockFunc +func DecodeProtobufBlock(b blocks.Block) (format.Node, error) { + c := b.Cid() + if c.Type() != cid.DagProtobuf { + return nil, fmt.Errorf("this function can only decode protobuf nodes") + } + + decnd, err := DecodeProtobuf(b.RawData()) + if err != nil { + if strings.Contains(err.Error(), "Unmarshal failed") { + return nil, fmt.Errorf("the block referred to by '%s' was not a valid merkledag node", c) + } + return nil, fmt.Errorf("failed to decode Protocol Buffers: %v", err) + } + + decnd.cached = c + decnd.builder = c.Prefix() + return decnd, nil +} + +// Type assertion +var _ format.DecodeBlockFunc = DecodeProtobufBlock diff --git a/ipld/merkledag/coding_test.go b/ipld/merkledag/coding_test.go new file mode 100644 index 0000000000..7e94990566 --- /dev/null +++ b/ipld/merkledag/coding_test.go @@ -0,0 +1,52 @@ +package merkledag_test + +import ( + "bytes" + "fmt" + "testing" + + cid "github.com/ipfs/go-cid" + ipld "github.com/ipfs/go-ipld-format" + "github.com/ipfs/boxo/ipld/merkledag" +) + +var benchInput []byte + +func init() { + someData := bytes.Repeat([]byte("some plaintext data\n"), 10) + // make a test CID -- doesn't matter just to add as a link + someCid, _ := cid.Cast([]byte{1, 85, 0, 5, 0, 1, 2, 3, 4}) + + node := &merkledag.ProtoNode{} + node.SetData(someData) + for i := 0; i < 10; i++ { + node.AddRawLink(fmt.Sprintf("%d", i), &ipld.Link{ + Size: 10, + Cid: someCid, + }) + } + + enc, err := node.EncodeProtobuf(true) + if err != nil { + panic(err) + } + benchInput = enc +} + +func BenchmarkRoundtrip(b *testing.B) { + b.ReportAllocs() + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + node, err := merkledag.DecodeProtobuf(benchInput) + if err != nil { + b.Fatal(err) + } + + enc, err := node.EncodeProtobuf(true) + if err != nil { + b.Fatal(err) + } + _ = enc + } + }) +} diff --git a/ipld/merkledag/dagutils/diff.go b/ipld/merkledag/dagutils/diff.go new file mode 100644 index 0000000000..49a3ae627b --- /dev/null +++ b/ipld/merkledag/dagutils/diff.go @@ -0,0 +1,204 @@ +package dagutils + +import ( + "context" + "fmt" + "path" + + "github.com/ipfs/go-cid" + ipld "github.com/ipfs/go-ipld-format" + + dag "github.com/ipfs/boxo/ipld/merkledag" +) + +// ChangeType denotes type of change in Change +type ChangeType int + +// These constants define the changes that can be applied to a DAG. +const ( + Add ChangeType = iota + Remove + Mod +) + +// Change represents a change to a DAG and contains a reference to the old and +// new CIDs. +type Change struct { + Type ChangeType + Path string + Before cid.Cid + After cid.Cid +} + +// String prints a human-friendly line about a change. +func (c *Change) String() string { + switch c.Type { + case Add: + return fmt.Sprintf("Added %s at %s", c.After.String(), c.Path) + case Remove: + return fmt.Sprintf("Removed %s from %s", c.Before.String(), c.Path) + case Mod: + return fmt.Sprintf("Changed %s to %s at %s", c.Before.String(), c.After.String(), c.Path) + default: + panic("nope") + } +} + +// ApplyChange applies the requested changes to the given node in the given dag. +func ApplyChange(ctx context.Context, ds ipld.DAGService, nd *dag.ProtoNode, cs []*Change) (*dag.ProtoNode, error) { + e := NewDagEditor(nd, ds) + for _, c := range cs { + switch c.Type { + case Add: + child, err := ds.Get(ctx, c.After) + if err != nil { + return nil, err + } + + childpb, ok := child.(*dag.ProtoNode) + if !ok { + return nil, dag.ErrNotProtobuf + } + + err = e.InsertNodeAtPath(ctx, c.Path, childpb, nil) + if err != nil { + return nil, err + } + + case Remove: + err := e.RmLink(ctx, c.Path) + if err != nil { + return nil, err + } + + case Mod: + err := e.RmLink(ctx, c.Path) + if err != nil { + return nil, err + } + child, err := ds.Get(ctx, c.After) + if err != nil { + return nil, err + } + + childpb, ok := child.(*dag.ProtoNode) + if !ok { + return nil, dag.ErrNotProtobuf + } + + err = e.InsertNodeAtPath(ctx, c.Path, childpb, nil) + if err != nil { + return nil, err + } + } + } + + return e.Finalize(ctx, ds) +} + +// Diff returns a set of changes that transform node 'a' into node 'b'. +// It only traverses links in the following cases: +// 1. two node's links number are greater than 0. +// 2. both of two nodes are ProtoNode. +// Otherwise, it compares the cid and emits a Mod change object. +func Diff(ctx context.Context, ds ipld.DAGService, a, b ipld.Node) ([]*Change, error) { + if a.Cid() == b.Cid() { + return []*Change{}, nil + } + + cleanA, okA := a.Copy().(*dag.ProtoNode) + cleanB, okB := b.Copy().(*dag.ProtoNode) + + linksA := a.Links() + linksB := b.Links() + + if !okA || !okB || (len(linksA) == 0 && len(linksB) == 0) { + return []*Change{{Type: Mod, Before: a.Cid(), After: b.Cid()}}, nil + } + + var out []*Change + for _, linkA := range linksA { + linkB, _, err := b.ResolveLink([]string{linkA.Name}) + if err != nil { + continue + } + + cleanA.RemoveNodeLink(linkA.Name) + cleanB.RemoveNodeLink(linkA.Name) + + if linkA.Cid == linkB.Cid { + continue + } + + nodeA, err := linkA.GetNode(ctx, ds) + if err != nil { + return nil, err + } + + nodeB, err := linkB.GetNode(ctx, ds) + if err != nil { + return nil, err + } + + sub, err := Diff(ctx, ds, nodeA, nodeB) + if err != nil { + return nil, err + } + + for _, c := range sub { + c.Path = path.Join(linkA.Name, c.Path) + } + + out = append(out, sub...) + } + + for _, l := range cleanA.Links() { + out = append(out, &Change{Type: Remove, Path: l.Name, Before: l.Cid}) + } + + for _, l := range cleanB.Links() { + out = append(out, &Change{Type: Add, Path: l.Name, After: l.Cid}) + } + + return out, nil +} + +// Conflict represents two incompatible changes and is returned by MergeDiffs(). +type Conflict struct { + A *Change + B *Change +} + +// MergeDiffs takes two slice of changes and adds them to a single slice. +// When a Change from b happens to the same path of an existing change in a, +// a conflict is created and b is not added to the merged slice. +// A slice of Conflicts is returned and contains pointers to the +// Changes involved (which share the same path). +func MergeDiffs(a, b []*Change) ([]*Change, []Conflict) { + paths := make(map[string]*Change) + for _, c := range b { + paths[c.Path] = c + } + + var changes []*Change + var conflicts []Conflict + + // NOTE: we avoid iterating over maps here to ensure iteration order is determistic. We + // include changes from a first, then b. + for _, changeA := range a { + if changeB, ok := paths[changeA.Path]; ok { + conflicts = append(conflicts, Conflict{changeA, changeB}) + } else { + changes = append(changes, changeA) + } + delete(paths, changeA.Path) + } + + for _, c := range b { + if _, ok := paths[c.Path]; ok { + changes = append(changes, c) + } + } + + return changes, conflicts +} diff --git a/ipld/merkledag/dagutils/diff_test.go b/ipld/merkledag/dagutils/diff_test.go new file mode 100644 index 0000000000..692e8cc704 --- /dev/null +++ b/ipld/merkledag/dagutils/diff_test.go @@ -0,0 +1,127 @@ +package dagutils + +import ( + "context" + "testing" + + cid "github.com/ipfs/go-cid" + ipld "github.com/ipfs/go-ipld-format" + dag "github.com/ipfs/boxo/ipld/merkledag" + mdtest "github.com/ipfs/boxo/ipld/merkledag/test" +) + +func TestMergeDiffs(t *testing.T) { + node1 := dag.NodeWithData([]byte("one")) + node2 := dag.NodeWithData([]byte("two")) + node3 := dag.NodeWithData([]byte("three")) + node4 := dag.NodeWithData([]byte("four")) + + changesA := []*Change{ + {Add, "one", cid.Cid{}, node1.Cid()}, + {Remove, "two", node2.Cid(), cid.Cid{}}, + {Mod, "three", node3.Cid(), node4.Cid()}, + } + + changesB := []*Change{ + {Mod, "two", node2.Cid(), node3.Cid()}, + {Add, "four", cid.Cid{}, node4.Cid()}, + } + + changes, conflicts := MergeDiffs(changesA, changesB) + if len(changes) != 3 { + t.Fatal("unexpected merge changes") + } + + expect := []*Change{ + changesA[0], + changesA[2], + changesB[1], + } + + for i, change := range changes { + if change.Type != expect[i].Type { + t.Error("unexpected diff change type") + } + + if change.Path != expect[i].Path { + t.Error("unexpected diff change path") + } + + if change.Before != expect[i].Before { + t.Error("unexpected diff change before") + } + + if change.After != expect[i].After { + t.Error("unexpected diff change before") + } + } + + if len(conflicts) != 1 { + t.Fatal("unexpected merge conflicts") + } + + if conflicts[0].A != changesA[1] { + t.Error("unexpected merge conflict a") + } + + if conflicts[0].B != changesB[0] { + t.Error("unexpected merge conflict b") + } +} + +func TestDiff(t *testing.T) { + ctx := context.Background() + ds := mdtest.Mock() + + rootA := &dag.ProtoNode{} + rootB := &dag.ProtoNode{} + + child1 := dag.NodeWithData([]byte("one")) + child2 := dag.NodeWithData([]byte("two")) + child3 := dag.NodeWithData([]byte("three")) + child4 := dag.NodeWithData([]byte("four")) + + rootA.AddNodeLink("one", child1) + rootA.AddNodeLink("two", child2) + + rootB.AddNodeLink("one", child3) + rootB.AddNodeLink("four", child4) + + nodes := []ipld.Node{child1, child2, child3, child4, rootA, rootB} + if err := ds.AddMany(ctx, nodes); err != nil { + t.Fatal("failed to add nodes") + } + + changes, err := Diff(ctx, ds, rootA, rootB) + if err != nil { + t.Fatal("unexpected diff error") + } + + if len(changes) != 3 { + t.Fatal("unexpected diff changes") + } + + expect := []Change{ + {Mod, "one", child1.Cid(), child3.Cid()}, + {Remove, "two", child2.Cid(), cid.Cid{}}, + {Add, "four", cid.Cid{}, child4.Cid()}, + } + + for i, change := range changes { + if change.Type != expect[i].Type { + t.Error("unexpected diff change type") + } + + if change.Path != expect[i].Path { + t.Error("unexpected diff change path") + } + + if change.Before != expect[i].Before { + t.Error("unexpected diff change before") + } + + if change.After != expect[i].After { + t.Error("unexpected diff change before") + } + } +} diff --git a/ipld/merkledag/dagutils/diffenum.go b/ipld/merkledag/dagutils/diffenum.go new file mode 100644 index 0000000000..75f86b952d --- /dev/null +++ b/ipld/merkledag/dagutils/diffenum.go @@ -0,0 +1,99 @@ +package dagutils + +import ( + "context" + "fmt" + + cid "github.com/ipfs/go-cid" + ipld "github.com/ipfs/go-ipld-format" + + mdag "github.com/ipfs/boxo/ipld/merkledag" +) + +// DiffEnumerate fetches every object in the graph pointed to by 'to' that is +// not in 'from'. This can be used to more efficiently fetch a graph if you can +// guarantee you already have the entirety of 'from' +func DiffEnumerate(ctx context.Context, dserv ipld.NodeGetter, from, to cid.Cid) error { + fnd, err := dserv.Get(ctx, from) + if err != nil { + return fmt.Errorf("get %s: %s", from, err) + } + + tnd, err := dserv.Get(ctx, to) + if err != nil { + return fmt.Errorf("get %s: %s", to, err) + } + + diff := getLinkDiff(fnd, tnd) + + sset := cid.NewSet() + for _, c := range diff { + // Since we're already assuming we have everything in the 'from' graph, + // add all those cids to our 'already seen' set to avoid potentially + // enumerating them later + if c.bef.Defined() { + sset.Add(c.bef) + } + } + for _, c := range diff { + if !c.bef.Defined() { + if sset.Has(c.aft) { + continue + } + err := mdag.Walk(ctx, mdag.GetLinksDirect(dserv), c.aft, sset.Visit, mdag.Concurrent()) + if err != nil { + return err + } + } else { + err := DiffEnumerate(ctx, dserv, c.bef, c.aft) + if err != nil { + return err + } + } + } + + return nil +} + +// if both bef and aft are not nil, then that signifies bef was replaces with aft. +// if bef is nil and aft is not, that means aft was newly added +// if aft is nil and bef is not, that means bef was deleted +type diffpair struct { + bef, aft cid.Cid +} + +// getLinkDiff returns a changeset between nodes 'a' and 'b'. Currently does +// not log deletions as our usecase doesnt call for this. +func getLinkDiff(a, b ipld.Node) []diffpair { + ina := make(map[string]*ipld.Link) + inb := make(map[string]*ipld.Link) + var aonly []cid.Cid + for _, l := range b.Links() { + inb[l.Cid.KeyString()] = l + } + for _, l := range a.Links() { + var key = l.Cid.KeyString() + ina[key] = l + if inb[key] == nil { + aonly = append(aonly, l.Cid) + } + } + + var out []diffpair + var aindex int + + for _, l := range b.Links() { + if ina[l.Cid.KeyString()] != nil { + continue + } + + if aindex < len(aonly) { + out = append(out, diffpair{bef: aonly[aindex], aft: l.Cid}) + aindex++ + } else { + out = append(out, diffpair{aft: l.Cid}) + continue + } + } + return out +} diff --git a/ipld/merkledag/dagutils/diffenum_test.go b/ipld/merkledag/dagutils/diffenum_test.go new file mode 100644 index 0000000000..b96e6f759b --- /dev/null +++ b/ipld/merkledag/dagutils/diffenum_test.go @@ -0,0 +1,249 @@ +package dagutils + +import ( + "context" + "fmt" + "testing" + + "github.com/ipfs/go-cid" + ipld "github.com/ipfs/go-ipld-format" + + dag "github.com/ipfs/boxo/ipld/merkledag" + mdtest "github.com/ipfs/boxo/ipld/merkledag/test" +) + +func buildNode(name string, desc map[string]ndesc, out map[string]ipld.Node) ipld.Node { + this := desc[name] + nd := new(dag.ProtoNode) + nd.SetData([]byte(name)) + for k, v := range this { + child, ok := out[v] + if !ok { + child = buildNode(v, desc, out) + out[v] = child + } + + if err := nd.AddNodeLink(k, child); err != nil { + panic(err) + } + } + + return nd +} + +type ndesc map[string]string + +func mkGraph(desc map[string]ndesc) map[string]ipld.Node { + out := make(map[string]ipld.Node) + for name := range desc { + if _, ok := out[name]; ok { + continue + } + + out[name] = buildNode(name, desc, out) + } + return out +} + +var tg1 = map[string]ndesc{ + "a1": { + "foo": "b", + }, + "b": {}, + "a2": { + "foo": "b", + "bar": "c", + }, + "c": {}, +} + +var tg2 = map[string]ndesc{ + "a1": { + "foo": "b", + }, + "b": {}, + "a2": { + "foo": "b", + "bar": "c", + }, + "c": {"baz": "d"}, + "d": {}, +} + +var tg3 = map[string]ndesc{ + "a1": { + "foo": "b", + "bar": "c", + }, + "b": {}, + "a2": { + "foo": "b", + "bar": "d", + }, + "c": {}, + "d": {}, +} + +var tg4 = map[string]ndesc{ + "a1": { + "key1": "b", + "key2": "c", + }, + "a2": { + "key1": "b", + "key2": "d", + }, +} + +var tg5 = map[string]ndesc{ + "a1": { + "key1": "a", + "key2": "b", + }, + "a2": { + "key1": "c", + "key2": "d", + }, +} + +func TestNameMatching(t *testing.T) { + nds := mkGraph(tg4) + + diff := getLinkDiff(nds["a1"], nds["a2"]) + if len(diff) != 1 { + t.Fatal(fmt.Errorf("node diff didn't match by name")) + } +} + +func TestNameMatching2(t *testing.T) { + nds := mkGraph(tg5) + + diff := getLinkDiff(nds["a1"], nds["a2"]) + if len(diff) != 2 { + t.Fatal(fmt.Errorf("incorrect number of link diff elements")) + } + if !(diff[0].bef.Equals(nds["a1"].Links()[0].Cid) && diff[0].aft.Equals(nds["a2"].Links()[0].Cid)) { + t.Fatal(fmt.Errorf("node diff didn't match by name")) + } +} + +func TestDiffEnumBasic(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + nds := mkGraph(tg1) + + ds := mdtest.Mock() + lgds := &getLogger{ds: ds} + + for _, nd := range nds { + err := ds.Add(ctx, nd) + if err != nil { + t.Fatal(err) + } + } + + err := DiffEnumerate(ctx, lgds, nds["a1"].Cid(), nds["a2"].Cid()) + if err != nil { + t.Fatal(err) + } + + err = assertCidList(lgds.log, []cid.Cid{nds["a1"].Cid(), nds["a2"].Cid(), nds["c"].Cid()}) + if err != nil { + t.Fatal(err) + } +} + +type getLogger struct { + ds ipld.NodeGetter + log []cid.Cid +} + +func (gl *getLogger) Get(ctx context.Context, c cid.Cid) (ipld.Node, error) { + nd, err := gl.ds.Get(ctx, c) + if err != nil { + return nil, err + } + gl.log = append(gl.log, c) + return nd, nil +} + +func (gl *getLogger) GetMany(ctx context.Context, cids []cid.Cid) <-chan *ipld.NodeOption { + outCh := make(chan *ipld.NodeOption, len(cids)) + nds := gl.ds.GetMany(ctx, cids) + for no := range nds { + if no.Err == nil { + gl.log = append(gl.log, no.Node.Cid()) + } + select { + case outCh <- no: + default: + panic("too many responses") + } + } + return nds +} + +func assertCidList(a, b []cid.Cid) error { + if len(a) != len(b) { + return fmt.Errorf("got different number of cids than expected") + } + for i, c := range a { + if !c.Equals(b[i]) { + return fmt.Errorf("expected %s, got %s", c, b[i]) + } + } + return nil +} + +func TestDiffEnumFail(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + nds := mkGraph(tg2) + + ds := mdtest.Mock() + lgds := &getLogger{ds: ds} + + for _, s := range []string{"a1", "a2", "b", "c"} { + err := ds.Add(ctx, nds[s]) + if err != nil { + t.Fatal(err) + } + } + + err := DiffEnumerate(ctx, lgds, nds["a1"].Cid(), nds["a2"].Cid()) + if !ipld.IsNotFound(err) { + t.Fatal("expected err not found") + } + + err = assertCidList(lgds.log, []cid.Cid{nds["a1"].Cid(), nds["a2"].Cid(), nds["c"].Cid()}) + if err != nil { + t.Fatal(err) + } + +} + +func TestDiffEnumRecurse(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + nds := mkGraph(tg3) + + ds := mdtest.Mock() + lgds := &getLogger{ds: ds} + + for _, s := range []string{"a1", "a2", "b", "c", "d"} { + err := ds.Add(ctx, nds[s]) + if err != nil { + t.Fatal(err) + } + } + + err := DiffEnumerate(ctx, lgds, nds["a1"].Cid(), nds["a2"].Cid()) + if err != nil { + t.Fatal(err) + } + + err = assertCidList(lgds.log, []cid.Cid{nds["a1"].Cid(), nds["a2"].Cid(), nds["c"].Cid(), nds["d"].Cid()}) + if err != nil { + t.Fatal(err) + } +} diff --git a/ipld/merkledag/dagutils/utils.go b/ipld/merkledag/dagutils/utils.go new file mode 100644 index 0000000000..4f6500b25c --- /dev/null +++ b/ipld/merkledag/dagutils/utils.go @@ -0,0 +1,234 @@ +package dagutils + +import ( + "context" + "errors" + "strings" + + bserv "github.com/ipfs/boxo/blockservice" + bstore "github.com/ipfs/boxo/blockstore" + offline "github.com/ipfs/boxo/exchange/offline" + ds "github.com/ipfs/go-datastore" + syncds "github.com/ipfs/go-datastore/sync" + ipld "github.com/ipfs/go-ipld-format" + + dag "github.com/ipfs/boxo/ipld/merkledag" +) + +// Editor represents a ProtoNode tree editor and provides methods to +// modify it. +type Editor struct { + root *dag.ProtoNode + + // tmp is a temporary in memory (for now) dagstore for all of the + // intermediary nodes to be stored in + tmp ipld.DAGService + + // src is the dagstore with *all* of the data on it, it is used to pull + // nodes from for modification (nil is a valid value) + src ipld.DAGService +} + +// NewMemoryDagService returns a new, thread-safe in-memory DAGService. +func NewMemoryDagService() ipld.DAGService { + // build mem-datastore for editor's intermediary nodes + bs := bstore.NewBlockstore(syncds.MutexWrap(ds.NewMapDatastore())) + bsrv := bserv.New(bs, offline.Exchange(bs)) + return dag.NewDAGService(bsrv) +} + +// NewDagEditor returns an ProtoNode editor. +// +// * root is the node to be modified +// * source is the dagstore to pull nodes from (optional) +func NewDagEditor(root *dag.ProtoNode, source ipld.DAGService) *Editor { + return &Editor{ + root: root, + tmp: NewMemoryDagService(), + src: source, + } +} + +// GetNode returns the a copy of the root node being edited. +func (e *Editor) GetNode() *dag.ProtoNode { + return e.root.Copy().(*dag.ProtoNode) +} + +// GetDagService returns the DAGService used by this editor. +func (e *Editor) GetDagService() ipld.DAGService { + return e.tmp +} + +func addLink(ctx context.Context, ds ipld.DAGService, root *dag.ProtoNode, childname string, childnd ipld.Node) (*dag.ProtoNode, error) { + if childname == "" { + return nil, errors.New("cannot create link with no name") + } + + // ensure that the node we are adding is in the dagservice + err := ds.Add(ctx, childnd) + if err != nil { + return nil, err + } + + _ = ds.Remove(ctx, root.Cid()) + + // ensure no link with that name already exists + _ = root.RemoveNodeLink(childname) // ignore error, only option is ErrNotFound + + if err := root.AddNodeLink(childname, childnd); err != nil { + return nil, err + } + + if err := ds.Add(ctx, root); err != nil { + return nil, err + } + return root, nil +} + +// InsertNodeAtPath inserts a new node in the tree and replaces the current root with the new one. +func (e *Editor) InsertNodeAtPath(ctx context.Context, pth string, toinsert ipld.Node, create func() *dag.ProtoNode) error { + splpath := strings.Split(pth, "/") + nd, err := e.insertNodeAtPath(ctx, e.root, splpath, toinsert, create) + if err != nil { + return err + } + e.root = nd + return nil +} + +func (e *Editor) insertNodeAtPath(ctx context.Context, root *dag.ProtoNode, path []string, toinsert ipld.Node, create func() *dag.ProtoNode) (*dag.ProtoNode, error) { + if len(path) == 1 { + return addLink(ctx, e.tmp, root, path[0], toinsert) + } + + nd, err := root.GetLinkedProtoNode(ctx, e.tmp, path[0]) + if err != nil { + // if 'create' is true, we create directories on the way down as needed + if err == dag.ErrLinkNotFound && create != nil { + nd = create() + err = nil // no longer an error case + } else if ipld.IsNotFound(err) { + // try finding it in our source dagstore + nd, err = root.GetLinkedProtoNode(ctx, e.src, path[0]) + } + + // if we receive an ErrNotFound, then our second 'GetLinkedNode' call + // also fails, we want to error out + if err != nil { + return nil, err + } + } + + ndprime, err := e.insertNodeAtPath(ctx, nd, path[1:], toinsert, create) + if err != nil { + return nil, err + } + + _ = e.tmp.Remove(ctx, root.Cid()) + + _ = root.RemoveNodeLink(path[0]) + err = root.AddNodeLink(path[0], ndprime) + if err != nil { + return nil, err + } + + err = e.tmp.Add(ctx, root) + if err != nil { + return nil, err + } + + return root, nil +} + +// RmLink removes the link with the given name and updates the root node of +// the editor. +func (e *Editor) RmLink(ctx context.Context, pth string) error { + splpath := strings.Split(pth, "/") + nd, err := e.rmLink(ctx, e.root, splpath) + if err != nil { + return err + } + e.root = nd + return nil +} + +func (e *Editor) rmLink(ctx context.Context, root *dag.ProtoNode, path []string) (*dag.ProtoNode, error) { + if len(path) == 1 { + // base case, remove node in question + err := root.RemoveNodeLink(path[0]) + if err != nil { + return nil, err + } + + err = e.tmp.Add(ctx, root) + if err != nil { + return nil, err + } + + return root, nil + } + + // search for node in both tmp dagstore and source dagstore + nd, err := root.GetLinkedProtoNode(ctx, e.tmp, path[0]) + if ipld.IsNotFound(err) { + nd, err = root.GetLinkedProtoNode(ctx, e.src, path[0]) + } + + if err != nil { + return nil, err + } + + nnode, err := e.rmLink(ctx, nd, path[1:]) + if err != nil { + return nil, err + } + + _ = e.tmp.Remove(ctx, root.Cid()) + + _ = root.RemoveNodeLink(path[0]) + err = root.AddNodeLink(path[0], nnode) + if err != nil { + return nil, err + } + + err = e.tmp.Add(ctx, root) + if err != nil { + return nil, err + } + + return root, nil +} + +// Finalize writes the new DAG to the given DAGService and returns the modified +// root node. +func (e *Editor) Finalize(ctx context.Context, ds ipld.DAGService) (*dag.ProtoNode, error) { + nd := e.GetNode() + err := copyDag(ctx, nd, e.tmp, ds) + return nd, err +} + +func copyDag(ctx context.Context, nd ipld.Node, from, to ipld.DAGService) error { + // TODO(#4609): make this batch. + err := to.Add(ctx, nd) + if err != nil { + return err + } + + for _, lnk := range nd.Links() { + child, err := lnk.GetNode(ctx, from) + if err != nil { + if ipld.IsNotFound(err) { + // not found means we didnt modify it, and it should + // already be in the target datastore + continue + } + return err + } + + err = copyDag(ctx, child, from, to) + if err != nil { + return err + } + } + return nil +} diff --git a/ipld/merkledag/dagutils/utils_test.go b/ipld/merkledag/dagutils/utils_test.go new file mode 100644 index 0000000000..3fd3e823fe --- /dev/null +++ b/ipld/merkledag/dagutils/utils_test.go @@ -0,0 +1,114 @@ +package dagutils + +import ( + "context" + "strings" + "testing" + + "github.com/ipfs/go-cid" + ipld "github.com/ipfs/go-ipld-format" + + dag "github.com/ipfs/boxo/ipld/merkledag" + mdtest "github.com/ipfs/boxo/ipld/merkledag/test" +) + +func TestAddLink(t *testing.T) { + ctx, context := context.WithCancel(context.Background()) + defer context() + + ds := mdtest.Mock() + fishnode := dag.NodeWithData([]byte("fishcakes!")) + + err := ds.Add(ctx, fishnode) + if err != nil { + t.Fatal(err) + } + + nd := new(dag.ProtoNode) + nnode, err := addLink(ctx, ds, nd, "fish", fishnode) + if err != nil { + t.Fatal(err) + } + + fnprime, err := nnode.GetLinkedNode(ctx, ds, "fish") + if err != nil { + t.Fatal(err) + } + + fnpkey := fnprime.Cid() + if !fnpkey.Equals(fishnode.Cid()) { + t.Fatal("wrong child node found!") + } +} + +func assertNodeAtPath(t *testing.T, ds ipld.DAGService, root *dag.ProtoNode, pth string, exp cid.Cid) { + parts := strings.Split(pth, "/") + cur := root + for _, e := range parts { + nxt, err := cur.GetLinkedProtoNode(context.Background(), ds, e) + if err != nil { + t.Fatal(err) + } + + cur = nxt + } + + curc := cur.Cid() + if !curc.Equals(exp) { + t.Fatal("node not as expected at end of path") + } +} + +func TestInsertNode(t *testing.T) { + root := new(dag.ProtoNode) + e := NewDagEditor(root, nil) + + testInsert(t, e, "a", "anodefortesting", false, "") + testInsert(t, e, "a/b", "data", false, "") + testInsert(t, e, "a/b/c/d/e", "blah", false, "no link by that name") + testInsert(t, e, "a/b/c/d/e", "foo", true, "") + testInsert(t, e, "a/b/c/d/f", "baz", true, "") + testInsert(t, e, "a/b/c/d/f", "bar", true, "") + + testInsert(t, e, "", "bar", true, "cannot create link with no name") + testInsert(t, e, "////", "slashes", true, "cannot create link with no name") + + c := e.GetNode().Cid() + + if c.String() != "QmZ8yeT9uD6ouJPNAYt62XffYuXBT6b4mP4obRSE9cJrSt" { + t.Fatal("output was different than expected: ", c) + } +} + +func testInsert(t *testing.T, e *Editor, path, data string, create bool, experr string) { + child := dag.NodeWithData([]byte(data)) + err := e.tmp.Add(context.Background(), child) + if err != nil { + t.Fatal(err) + } + + var c func() *dag.ProtoNode + if create { + c = func() *dag.ProtoNode { + return &dag.ProtoNode{} + } + } + + err = e.InsertNodeAtPath(context.Background(), path, child, c) + if experr != "" { + var got string + if err != nil { + got = err.Error() + } + if got != experr { + t.Fatalf("expected '%s' but got '%s'", experr, got) + } + return + } + + if err != nil { + t.Fatal(err, path, data, create, experr) + } + + assertNodeAtPath(t, e.tmp, e.root, path, child.Cid()) +} diff --git a/ipld/merkledag/errservice.go b/ipld/merkledag/errservice.go new file mode 100644 index 0000000000..f4607615a4 --- /dev/null +++ b/ipld/merkledag/errservice.go @@ -0,0 +1,47 @@ +package merkledag + +import ( + "context" + + cid "github.com/ipfs/go-cid" + ipld "github.com/ipfs/go-ipld-format" +) + +// ErrorService implements ipld.DAGService, returning 'Err' for every call. +type ErrorService struct { + Err error +} + +var _ ipld.DAGService = (*ErrorService)(nil) + +// Add returns the cs.Err. +func (cs *ErrorService) Add(ctx context.Context, nd ipld.Node) error { + return cs.Err +} + +// AddMany returns the cs.Err. +func (cs *ErrorService) AddMany(ctx context.Context, nds []ipld.Node) error { + return cs.Err +} + +// Get returns the cs.Err. +func (cs *ErrorService) Get(ctx context.Context, c cid.Cid) (ipld.Node, error) { + return nil, cs.Err +} + +// GetMany many returns the cs.Err. +func (cs *ErrorService) GetMany(ctx context.Context, cids []cid.Cid) <-chan *ipld.NodeOption { + ch := make(chan *ipld.NodeOption) + close(ch) + return ch +} + +// Remove returns the cs.Err. +func (cs *ErrorService) Remove(ctx context.Context, c cid.Cid) error { + return cs.Err +} + +// RemoveMany returns the cs.Err. +func (cs *ErrorService) RemoveMany(ctx context.Context, cids []cid.Cid) error { + return cs.Err +} diff --git a/ipld/merkledag/merkledag.go b/ipld/merkledag/merkledag.go new file mode 100644 index 0000000000..3b18917948 --- /dev/null +++ b/ipld/merkledag/merkledag.go @@ -0,0 +1,580 @@ +// Package merkledag implements the IPFS Merkle DAG data structures. +package merkledag + +import ( + "context" + "fmt" + "sync" + + blocks "github.com/ipfs/boxo/blocks" + bserv "github.com/ipfs/boxo/blockservice" + cid "github.com/ipfs/go-cid" + ipldcbor "github.com/ipfs/go-ipld-cbor" + format "github.com/ipfs/go-ipld-format" + legacy "github.com/ipfs/go-ipld-legacy" + dagpb "github.com/ipld/go-codec-dagpb" + + // blank import is used to register the IPLD raw codec + _ "github.com/ipld/go-ipld-prime/codec/raw" + basicnode "github.com/ipld/go-ipld-prime/node/basic" +) + +// TODO: We should move these registrations elsewhere. Really, most of the IPLD +// functionality should go in a `go-ipld` repo but that will take a lot of work +// and design. +func init() { + format.Register(cid.DagProtobuf, DecodeProtobufBlock) + format.Register(cid.Raw, DecodeRawBlock) + format.Register(cid.DagCBOR, ipldcbor.DecodeBlock) + + legacy.RegisterCodec(cid.DagProtobuf, dagpb.Type.PBNode, ProtoNodeConverter) + legacy.RegisterCodec(cid.Raw, basicnode.Prototype.Bytes, RawNodeConverter) +} + +// contextKey is a type to use as value for the ProgressTracker contexts. +type contextKey string + +const progressContextKey contextKey = "progress" + +// NewDAGService constructs a new DAGService (using the default implementation). +// Note that the default implementation is also an ipld.LinkGetter. +func NewDAGService(bs bserv.BlockService) *dagService { + return &dagService{Blocks: bs} +} + +// dagService is an IPFS Merkle DAG service. +// - the root is virtual (like a forest) +// - stores nodes' data in a BlockService +// TODO: should cache Nodes that are in memory, and be +// +// able to free some of them when vm pressure is high +type dagService struct { + Blocks bserv.BlockService +} + +// Add adds a node to the dagService, storing the block in the BlockService +func (n *dagService) Add(ctx context.Context, nd format.Node) error { + if n == nil { // FIXME remove this assertion. protect with constructor invariant + return fmt.Errorf("dagService is nil") + } + + return n.Blocks.AddBlock(ctx, nd) +} + +func (n *dagService) AddMany(ctx context.Context, nds []format.Node) error { + blks := make([]blocks.Block, len(nds)) + for i, nd := range nds { + blks[i] = nd + } + return n.Blocks.AddBlocks(ctx, blks) +} + +// Get retrieves a node from the dagService, fetching the block in the BlockService +func (n *dagService) Get(ctx context.Context, c cid.Cid) (format.Node, error) { + if n == nil { + return nil, fmt.Errorf("dagService is nil") + } + + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + b, err := n.Blocks.GetBlock(ctx, c) + if err != nil { + return nil, err + } + + return legacy.DecodeNode(ctx, b) +} + +// GetLinks return the links for the node, the node doesn't necessarily have +// to exist locally. +func (n *dagService) GetLinks(ctx context.Context, c cid.Cid) ([]*format.Link, error) { + if c.Type() == cid.Raw { + return nil, nil + } + node, err := n.Get(ctx, c) + if err != nil { + return nil, err + } + return node.Links(), nil +} + +func (n *dagService) Remove(ctx context.Context, c cid.Cid) error { + return n.Blocks.DeleteBlock(ctx, c) +} + +// RemoveMany removes multiple nodes from the DAG. It will likely be faster than +// removing them individually. +// +// This operation is not atomic. If it returns an error, some nodes may or may +// not have been removed. +func (n *dagService) RemoveMany(ctx context.Context, cids []cid.Cid) error { + // TODO(#4608): make this batch all the way down. + for _, c := range cids { + if err := n.Blocks.DeleteBlock(ctx, c); err != nil { + return err + } + } + return nil +} + +// GetLinksDirect creates a function to get the links for a node, from +// the node, bypassing the LinkService. If the node does not exist +// locally (and can not be retrieved) an error will be returned. +func GetLinksDirect(serv format.NodeGetter) GetLinks { + return func(ctx context.Context, c cid.Cid) ([]*format.Link, error) { + nd, err := serv.Get(ctx, c) + if err != nil { + return nil, err + } + return nd.Links(), nil + } +} + +type sesGetter struct { + bs *bserv.Session +} + +// Get gets a single node from the DAG. +func (sg *sesGetter) Get(ctx context.Context, c cid.Cid) (format.Node, error) { + blk, err := sg.bs.GetBlock(ctx, c) + + if err != nil { + return nil, err + } + + return legacy.DecodeNode(ctx, blk) +} + +// GetMany gets many nodes at once, batching the request if possible. +func (sg *sesGetter) GetMany(ctx context.Context, keys []cid.Cid) <-chan *format.NodeOption { + return getNodesFromBG(ctx, sg.bs, keys) +} + +// WrapSession wraps a blockservice session to satisfy the format.NodeGetter interface +func WrapSession(s *bserv.Session) format.NodeGetter { + return &sesGetter{s} +} + +// Session returns a NodeGetter using a new session for block fetches. +func (n *dagService) Session(ctx context.Context) format.NodeGetter { + return WrapSession(bserv.NewSession(ctx, n.Blocks)) +} + +// FetchGraph fetches all nodes that are children of the given node +func FetchGraph(ctx context.Context, root cid.Cid, serv format.DAGService) error { + return FetchGraphWithDepthLimit(ctx, root, -1, serv) +} + +// FetchGraphWithDepthLimit fetches all nodes that are children to the given +// node down to the given depth. maxDepth=0 means "only fetch root", +// maxDepth=1 means "fetch root and its direct children" and so on... +// maxDepth=-1 means unlimited. +func FetchGraphWithDepthLimit(ctx context.Context, root cid.Cid, depthLim int, serv format.DAGService) error { + var ng format.NodeGetter = NewSession(ctx, serv) + + set := make(map[cid.Cid]int) + + // Visit function returns true when: + // * The element is not in the set and we're not over depthLim + // * The element is in the set but recorded depth is deeper + // than currently seen (if we find it higher in the tree we'll need + // to explore deeper than before). + // depthLim = -1 means we only return true if the element is not in the + // set. + visit := func(c cid.Cid, depth int) bool { + oldDepth, ok := set[c] + + if (ok && depthLim < 0) || (depthLim >= 0 && depth > depthLim) { + return false + } + + if !ok || oldDepth > depth { + set[c] = depth + return true + } + return false + } + + // If we have a ProgressTracker, we wrap the visit function to handle it + v, _ := ctx.Value(progressContextKey).(*ProgressTracker) + if v == nil { + return WalkDepth(ctx, GetLinksDirect(ng), root, visit, Concurrent()) + } + + visitProgress := func(c cid.Cid, depth int) bool { + if visit(c, depth) { + v.Increment() + return true + } + return false + } + return WalkDepth(ctx, GetLinksDirect(ng), root, visitProgress, Concurrent()) +} + +// GetMany gets many nodes from the DAG at once. +// +// This method may not return all requested nodes (and may or may not return an +// error indicating that it failed to do so. It is up to the caller to verify +// that it received all nodes. +func (n *dagService) GetMany(ctx context.Context, keys []cid.Cid) <-chan *format.NodeOption { + return getNodesFromBG(ctx, n.Blocks, keys) +} + +func dedupKeys(keys []cid.Cid) []cid.Cid { + set := cid.NewSet() + for _, c := range keys { + set.Add(c) + } + if set.Len() == len(keys) { + return keys + } + return set.Keys() +} + +func getNodesFromBG(ctx context.Context, bs bserv.BlockGetter, keys []cid.Cid) <-chan *format.NodeOption { + keys = dedupKeys(keys) + + out := make(chan *format.NodeOption, len(keys)) + blocks := bs.GetBlocks(ctx, keys) + var count int + + go func() { + defer close(out) + for { + select { + case b, ok := <-blocks: + if !ok { + if count != len(keys) { + out <- &format.NodeOption{Err: fmt.Errorf("failed to fetch all nodes")} + } + return + } + + nd, err := legacy.DecodeNode(ctx, b) + if err != nil { + out <- &format.NodeOption{Err: err} + return + } + + out <- &format.NodeOption{Node: nd} + count++ + + case <-ctx.Done(): + out <- &format.NodeOption{Err: ctx.Err()} + return + } + } + }() + return out +} + +// GetLinks is the type of function passed to the EnumerateChildren function(s) +// for getting the children of an IPLD node. +type GetLinks func(context.Context, cid.Cid) ([]*format.Link, error) + +// GetLinksWithDAG returns a GetLinks function that tries to use the given +// NodeGetter as a LinkGetter to get the children of a given IPLD node. This may +// allow us to traverse the DAG without actually loading and parsing the node in +// question (if we already have the links cached). +func GetLinksWithDAG(ng format.NodeGetter) GetLinks { + return func(ctx context.Context, c cid.Cid) ([]*format.Link, error) { + return format.GetLinks(ctx, ng, c) + } +} + +// defaultConcurrentFetch is the default maximum number of concurrent fetches +// that 'fetchNodes' will start at a time +const defaultConcurrentFetch = 32 + +// walkOptions represent the parameters of a graph walking algorithm +type walkOptions struct { + SkipRoot bool + Concurrency int + ErrorHandler func(c cid.Cid, err error) error +} + +// WalkOption is a setter for walkOptions +type WalkOption func(*walkOptions) + +func (wo *walkOptions) addHandler(handler func(c cid.Cid, err error) error) { + if wo.ErrorHandler != nil { + wo.ErrorHandler = func(c cid.Cid, err error) error { + return handler(c, wo.ErrorHandler(c, err)) + } + } else { + wo.ErrorHandler = handler + } +} + +// SkipRoot is a WalkOption indicating that the root node should skipped +func SkipRoot() WalkOption { + return func(walkOptions *walkOptions) { + walkOptions.SkipRoot = true + } +} + +// Concurrent is a WalkOption indicating that node fetching should be done in +// parallel, with the default concurrency factor. +// NOTE: When using that option, the walk order is *not* guarantee. +// NOTE: It *does not* make multiple concurrent calls to the passed `visit` function. +func Concurrent() WalkOption { + return func(walkOptions *walkOptions) { + walkOptions.Concurrency = defaultConcurrentFetch + } +} + +// Concurrency is a WalkOption indicating that node fetching should be done in +// parallel, with a specific concurrency factor. +// NOTE: When using that option, the walk order is *not* guarantee. +// NOTE: It *does not* make multiple concurrent calls to the passed `visit` function. +func Concurrency(worker int) WalkOption { + return func(walkOptions *walkOptions) { + walkOptions.Concurrency = worker + } +} + +// IgnoreErrors is a WalkOption indicating that the walk should attempt to +// continue even when an error occur. +func IgnoreErrors() WalkOption { + return func(walkOptions *walkOptions) { + walkOptions.addHandler(func(c cid.Cid, err error) error { + return nil + }) + } +} + +// IgnoreMissing is a WalkOption indicating that the walk should continue when +// a node is missing. +func IgnoreMissing() WalkOption { + return func(walkOptions *walkOptions) { + walkOptions.addHandler(func(c cid.Cid, err error) error { + if format.IsNotFound(err) { + return nil + } + return err + }) + } +} + +// OnMissing is a WalkOption adding a callback that will be triggered on a missing +// node. +func OnMissing(callback func(c cid.Cid)) WalkOption { + return func(walkOptions *walkOptions) { + walkOptions.addHandler(func(c cid.Cid, err error) error { + if format.IsNotFound(err) { + callback(c) + } + return err + }) + } +} + +// OnError is a WalkOption adding a custom error handler. +// If this handler return a nil error, the walk will continue. +func OnError(handler func(c cid.Cid, err error) error) WalkOption { + return func(walkOptions *walkOptions) { + walkOptions.addHandler(handler) + } +} + +// WalkGraph will walk the dag in order (depth first) starting at the given root. +func Walk(ctx context.Context, getLinks GetLinks, c cid.Cid, visit func(cid.Cid) bool, options ...WalkOption) error { + visitDepth := func(c cid.Cid, depth int) bool { + return visit(c) + } + + return WalkDepth(ctx, getLinks, c, visitDepth, options...) +} + +// WalkDepth walks the dag starting at the given root and passes the current +// depth to a given visit function. The visit function can be used to limit DAG +// exploration. +func WalkDepth(ctx context.Context, getLinks GetLinks, c cid.Cid, visit func(cid.Cid, int) bool, options ...WalkOption) error { + opts := &walkOptions{} + for _, opt := range options { + opt(opts) + } + + if opts.Concurrency > 1 { + return parallelWalkDepth(ctx, getLinks, c, visit, opts) + } else { + return sequentialWalkDepth(ctx, getLinks, c, 0, visit, opts) + } +} + +func sequentialWalkDepth(ctx context.Context, getLinks GetLinks, root cid.Cid, depth int, visit func(cid.Cid, int) bool, options *walkOptions) error { + if !(options.SkipRoot && depth == 0) { + if !visit(root, depth) { + return nil + } + } + + links, err := getLinks(ctx, root) + if err != nil && options.ErrorHandler != nil { + err = options.ErrorHandler(root, err) + } + if err != nil { + return err + } + + for _, lnk := range links { + if err := sequentialWalkDepth(ctx, getLinks, lnk.Cid, depth+1, visit, options); err != nil { + return err + } + } + return nil +} + +// ProgressTracker is used to show progress when fetching nodes. +type ProgressTracker struct { + Total int + lk sync.Mutex +} + +// DeriveContext returns a new context with value "progress" derived from +// the given one. +func (p *ProgressTracker) DeriveContext(ctx context.Context) context.Context { + return context.WithValue(ctx, progressContextKey, p) +} + +// Increment adds one to the total progress. +func (p *ProgressTracker) Increment() { + p.lk.Lock() + defer p.lk.Unlock() + p.Total++ +} + +// Value returns the current progress. +func (p *ProgressTracker) Value() int { + p.lk.Lock() + defer p.lk.Unlock() + return p.Total +} + +func parallelWalkDepth(ctx context.Context, getLinks GetLinks, root cid.Cid, visit func(cid.Cid, int) bool, options *walkOptions) error { + type cidDepth struct { + cid cid.Cid + depth int + } + + type linksDepth struct { + links []*format.Link + depth int + } + + feed := make(chan cidDepth) + out := make(chan linksDepth) + done := make(chan struct{}) + + var visitlk sync.Mutex + var wg sync.WaitGroup + + errChan := make(chan error) + fetchersCtx, cancel := context.WithCancel(ctx) + defer wg.Wait() + defer cancel() + for i := 0; i < options.Concurrency; i++ { + wg.Add(1) + go func() { + defer wg.Done() + for cdepth := range feed { + ci := cdepth.cid + depth := cdepth.depth + + var shouldVisit bool + + // bypass the root if needed + if !(options.SkipRoot && depth == 0) { + visitlk.Lock() + shouldVisit = visit(ci, depth) + visitlk.Unlock() + } else { + shouldVisit = true + } + + if shouldVisit { + links, err := getLinks(ctx, ci) + if err != nil && options.ErrorHandler != nil { + err = options.ErrorHandler(root, err) + } + if err != nil { + select { + case errChan <- err: + case <-fetchersCtx.Done(): + } + return + } + + outLinks := linksDepth{ + links: links, + depth: depth + 1, + } + + select { + case out <- outLinks: + case <-fetchersCtx.Done(): + return + } + } + select { + case done <- struct{}{}: + case <-fetchersCtx.Done(): + } + } + }() + } + defer close(feed) + + send := feed + var todoQueue []cidDepth + var inProgress int + + next := cidDepth{ + cid: root, + depth: 0, + } + + for { + select { + case send <- next: + inProgress++ + if len(todoQueue) > 0 { + next = todoQueue[0] + todoQueue = todoQueue[1:] + } else { + next = cidDepth{} + send = nil + } + case <-done: + inProgress-- + if inProgress == 0 && !next.cid.Defined() { + return nil + } + case linksDepth := <-out: + for _, lnk := range linksDepth.links { + cd := cidDepth{ + cid: lnk.Cid, + depth: linksDepth.depth, + } + + if !next.cid.Defined() { + next = cd + send = feed + } else { + todoQueue = append(todoQueue, cd) + } + } + case err := <-errChan: + return err + + case <-ctx.Done(): + return ctx.Err() + } + } +} + +var _ format.LinkGetter = &dagService{} +var _ format.NodeGetter = &dagService{} +var _ format.NodeGetter = &sesGetter{} +var _ format.DAGService = &dagService{} diff --git a/ipld/merkledag/merkledag_test.go b/ipld/merkledag/merkledag_test.go new file mode 100644 index 0000000000..58c8d1f95b --- /dev/null +++ b/ipld/merkledag/merkledag_test.go @@ -0,0 +1,1232 @@ +package merkledag_test + +import ( + "bytes" + "context" + "crypto/rand" + "encoding/hex" + "encoding/json" + "errors" + "fmt" + "io" + "math" + "strings" + "sync" + "testing" + "time" + + . "github.com/ipfs/boxo/ipld/merkledag" + mdpb "github.com/ipfs/boxo/ipld/merkledag/pb" + dstest "github.com/ipfs/boxo/ipld/merkledag/test" + + blocks "github.com/ipfs/boxo/blocks" + bserv "github.com/ipfs/boxo/blockservice" + bstest "github.com/ipfs/boxo/blockservice/test" + cid "github.com/ipfs/go-cid" + offline "github.com/ipfs/boxo/exchange/offline" + u "github.com/ipfs/boxo/util" + ipld "github.com/ipfs/go-ipld-format" + prime "github.com/ipld/go-ipld-prime" + mh "github.com/multiformats/go-multihash" +) + +var someCid cid.Cid = func() cid.Cid { + c, _ := cid.Cast([]byte{1, 85, 0, 5, 0, 1, 2, 3, 4}) + return c +}() + +// makeDepthTestingGraph makes a small DAG with two levels. The level-two +// nodes are both children of the root and of one of the level 1 nodes. +// This is meant to test the Walk*Depth functions. +func makeDepthTestingGraph(t *testing.T, ds ipld.DAGService) ipld.Node { + root := NodeWithData(nil) + l11 := NodeWithData([]byte("leve1_node1")) + l12 := NodeWithData([]byte("leve1_node2")) + l21 := NodeWithData([]byte("leve2_node1")) + l22 := NodeWithData([]byte("leve2_node2")) + l23 := NodeWithData([]byte("leve2_node3")) + + l11.AddNodeLink(l21.Cid().String(), l21) + l11.AddNodeLink(l22.Cid().String(), l22) + l11.AddNodeLink(l23.Cid().String(), l23) + + root.AddNodeLink(l11.Cid().String(), l11) + root.AddNodeLink(l12.Cid().String(), l12) + root.AddNodeLink(l23.Cid().String(), l23) + + ctx := context.Background() + for _, n := range []ipld.Node{l23, l22, l21, l12, l11, root} { + err := ds.Add(ctx, n) + if err != nil { + t.Fatal(err) + } + } + + return root +} + +// Check that all children of root are in the given set and in the datastore +func traverseAndCheck(t *testing.T, root ipld.Node, ds ipld.DAGService, hasF func(c cid.Cid) bool) { + // traverse dag and check + for _, lnk := range root.Links() { + c := lnk.Cid + if !hasF(c) { + t.Fatal("missing key in set! ", lnk.Cid.String()) + } + child, err := ds.Get(context.Background(), c) + if err != nil { + t.Fatal(err) + } + traverseAndCheck(t, child, ds, hasF) + } +} + +type brokenBuilder struct{} + +func (brokenBuilder) Sum([]byte) (cid.Cid, error) { return cid.Undef, errors.New("Nope!") } +func (brokenBuilder) GetCodec() uint64 { return 0 } +func (b brokenBuilder) WithCodec(uint64) cid.Builder { return b } + +// builder that will pass the basic SetCidBuilder tests but fail otherwise +type sneakyBrokenBuilder struct{} + +func (sneakyBrokenBuilder) Sum(data []byte) (cid.Cid, error) { + if len(data) == 256 { + return V1CidPrefix().Sum(data) + } + return cid.Undef, errors.New("Nope!") +} +func (sneakyBrokenBuilder) GetCodec() uint64 { return 0 } +func (b sneakyBrokenBuilder) WithCodec(uint64) cid.Builder { return b } + +func TestBadBuilderEncode(t *testing.T) { + n := NodeWithData([]byte("boop")) + + t.Run("good builder sanity check", func(t *testing.T) { + if _, err := n.EncodeProtobuf(false); err != nil { + t.Fatal(err) + } + if err := n.SetCidBuilder( + &cid.Prefix{ + MhType: mh.SHA2_256, + MhLength: -1, + Version: 1, + Codec: cid.DagProtobuf, + }, + ); err != nil { + t.Fatal(err) + } + }) + + t.Run("hasher we can't use, should error", func(t *testing.T) { + if err := n.SetCidBuilder( + &cid.Prefix{ + MhType: mh.SHA2_256_TRUNC254_PADDED, + MhLength: 256, + Version: 1, + Codec: cid.DagProtobuf, + }, + ); err == nil { + t.Fatal("expected SetCidBuilder to error on unusable hasher") + } + if _, err := n.EncodeProtobuf(false); err != nil { + t.Fatalf("expected EncodeProtobuf to use safe CidBuilder: %v", err) + } + }) + + t.Run("broken custom builder, should error", func(t *testing.T) { + if err := n.SetCidBuilder(brokenBuilder{}); err == nil { + t.Fatal("expected SetCidBuilder to error on unusable hasher") + } + if _, err := n.EncodeProtobuf(false); err != nil { + t.Fatalf("expected EncodeProtobuf to use safe CidBuilder: %v", err) + } + }) + + t.Run("broken custom builder as pointer, should error", func(t *testing.T) { + if err := n.SetCidBuilder(&brokenBuilder{}); err == nil { + t.Fatal("expected SetCidBuilder to error on unusable hasher") + } + if _, err := n.EncodeProtobuf(false); err != nil { + t.Fatalf("expected EncodeProtobuf to use safe CidBuilder: %v", err) + } + }) + + t.Run("broken sneaky custom builder, should error", func(t *testing.T) { + if err := n.SetCidBuilder(sneakyBrokenBuilder{}); err != nil { + t.Fatalf("expected SetCidBuilder to not error with sneaky custom builder: %v", err) + } + if _, err := n.EncodeProtobuf(false); err == nil { + t.Fatal("expected EncodeProtobuf to fail using the sneaky custom builder") + } + if len(n.RawData()) != 0 { + t.Fatal("expected RawData to return zero-byte slice") + } + if n.Cid().String() != "bafybeihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku" { + t.Fatal("expected Cid to return the zero dag-pb CID") + } + if n.String() != "bafybeihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku" { + t.Fatal("expected String to return the zero dag-pb CID string") + } + }) +} + +func TestLinkChecking(t *testing.T) { + cases := []struct { + name string + fn func(*ProtoNode) error + }{ + { + name: "AddRawLink overflow Tsize", + fn: func(n *ProtoNode) error { + return n.AddRawLink("foo", &ipld.Link{Size: math.MaxUint64, Cid: someCid}) + }, + }, + + { + name: "AddRawLink undefined CID", + fn: func(n *ProtoNode) error { + return n.AddRawLink("foo", &ipld.Link{Cid: cid.Undef}) + }, + }, + + { + name: "SetLinks overflow Tsize", + fn: func(n *ProtoNode) error { + return n.SetLinks([]*ipld.Link{{Size: math.MaxUint64, Cid: someCid}}) + }, + }, + + { + name: "SetLinks undefined CID", + fn: func(n *ProtoNode) error { + return n.SetLinks([]*ipld.Link{{Cid: cid.Undef}}) + }, + }, + + { + name: "UnmarshalJSON overflow Tsize", + fn: func(n *ProtoNode) error { + return n.UnmarshalJSON([]byte(`{"data":null,"links":[{"Name":"","Size":18446744073709549568,"Cid":{"/":"QmNPWHBrVQiiV8FpyNuEPhB9E2rbvdy9Yx79EY1EJuyf9o"}}]}`)) + }, + }, + + { + name: "UnmarshalJSON undefined CID", + fn: func(n *ProtoNode) error { + return n.UnmarshalJSON([]byte(`{"data":null,"links":[{"Name":"","Size":100}]}`)) + }, + }, + } + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + n := NodeWithData([]byte("boop")) + err := tc.fn(n) + if err == nil { + t.Fatal("expected error") + } + }) + } + + t.Run("round-trip block with bad Tsize", func(t *testing.T) { + badblock, _ := hex.DecodeString("122f0a22122000bb3604d2ecd386227007c548249521fbb9a394e1e26460091d0a692888e7361880f0ffffffffffffff01") + n, err := DecodeProtobuf(badblock) + if err != nil { + t.Fatal(err) + } + // sanity + if len(n.Links()) != 1 { + t.Fatal("expected a link") + } + // sanity + if n.Links()[0].Size <= math.MaxInt64 { + t.Fatal("expected link Tsize to be oversized") + } + + // forced round-trip + byts, err := n.EncodeProtobuf(true) + if err != nil { + t.Fatal(err) + } + n, err = DecodeProtobuf(byts) + if err != nil { + t.Fatal(err) + } + if len(n.Links()) != 1 { + t.Fatal("expected a link") + } + if n.Links()[0].Size != 0 { + t.Fatal("expected link Tsize to be truncated on reencode") + } + }) +} + +func TestNode(t *testing.T) { + + n1 := NodeWithData([]byte("beep")) + n2 := NodeWithData([]byte("boop")) + n3 := NodeWithData([]byte("beep boop")) + if err := n3.AddNodeLink("beep-link", n1); err != nil { + t.Error(err) + } + if err := n3.AddNodeLink("boop-link", n2); err != nil { + t.Error(err) + } + + printn := func(name string, n *ProtoNode) { + fmt.Println(">", name) + fmt.Println("data:", string(n.Data())) + + fmt.Println("links:") + for _, l := range n.Links() { + fmt.Println("-", l.Name, l.Size, l.Cid) + } + + e, err := n.EncodeProtobuf(false) + if err != nil { + t.Error(err) + } else { + fmt.Println("encoded:", e) + } + + h := n.Multihash() + k := n.Cid().Hash() + if k.String() != h.String() { + t.Error("Key is not equivalent to multihash") + } else { + fmt.Println("key: ", k) + } + + SubtestNodeStat(t, n) + } + + printn("beep", n1) + printn("boop", n2) + printn("beep boop", n3) +} + +func SubtestNodeStat(t *testing.T, n *ProtoNode) { + enc, err := n.EncodeProtobuf(true) + if err != nil { + t.Error("n.EncodeProtobuf(true) failed") + return + } + + cumSize, err := n.Size() + if err != nil { + t.Error("n.Size() failed") + return + } + + k := n.Cid() + + expected := ipld.NodeStat{ + NumLinks: len(n.Links()), + BlockSize: len(enc), + LinksSize: len(enc) - len(n.Data()), // includes framing. + DataSize: len(n.Data()), + CumulativeSize: int(cumSize), + Hash: k.String(), + } + + actual, err := n.Stat() + if err != nil { + t.Error("n.Stat() failed") + return + } + + if expected != *actual { + t.Errorf("n.Stat incorrect.\nexpect: %s\nactual: %s", expected, actual) + } else { + fmt.Printf("n.Stat correct: %s\n", actual) + } +} + +type devZero struct{} + +func (devZero) Read(b []byte) (int, error) { + for i := range b { + b[i] = 0 + } + return len(b), nil +} + +func TestBatchFetch(t *testing.T) { + read := io.LimitReader(u.NewTimeSeededRand(), 1024*32) + runBatchFetchTest(t, read) +} + +func TestBatchFetchDupBlock(t *testing.T) { + read := io.LimitReader(devZero{}, 1024*32) + runBatchFetchTest(t, read) +} + +// makeTestDAG creates a simple DAG from the data in a reader. +// First, a node is created from each 512 bytes of data from the reader +// (like a the Size chunker would do). Then all nodes are added as children +// to a root node, which is returned. +func makeTestDAG(t *testing.T, read io.Reader, ds ipld.DAGService) ipld.Node { + p := make([]byte, 512) + nodes := []*ProtoNode{} + + for { + n, err := io.ReadFull(read, p) + if err == io.EOF { + break + } + + if err != nil { + t.Fatal(err) + } + + if n != len(p) { + t.Fatal("should have read 512 bytes from the reader") + } + + protoNode := NodeWithData(p) + nodes = append(nodes, protoNode) + } + + ctx := context.Background() + // Add a root referencing all created nodes + root := NodeWithData(nil) + for _, n := range nodes { + err := root.AddNodeLink(n.Cid().String(), n) + if err != nil { + t.Fatal(err) + } + err = ds.Add(ctx, n) + if err != nil { + t.Fatal(err) + } + } + err := ds.Add(ctx, root) + if err != nil { + t.Fatal(err) + } + return root +} + +// makeTestDAGReader takes the root node as returned by makeTestDAG and +// provides a reader that reads all the RawData from that node and its children. +func makeTestDAGReader(t *testing.T, root ipld.Node, ds ipld.DAGService) io.Reader { + ctx := context.Background() + buf := new(bytes.Buffer) + buf.Write(root.RawData()) + for _, l := range root.Links() { + n, err := ds.Get(ctx, l.Cid) + if err != nil { + t.Fatal(err) + } + _, err = buf.Write(n.RawData()) + if err != nil { + t.Fatal(err) + } + } + return buf +} + +func runBatchFetchTest(t *testing.T, read io.Reader) { + ctx := context.Background() + var dagservs []ipld.DAGService + for _, bsi := range bstest.Mocks(5) { + dagservs = append(dagservs, NewDAGService(bsi)) + } + + root := makeTestDAG(t, read, dagservs[0]) + + t.Log("finished setup.") + + dagr := makeTestDAGReader(t, root, dagservs[0]) + + expected, err := io.ReadAll(dagr) + if err != nil { + t.Fatal(err) + } + + err = dagservs[0].Add(ctx, root) + if err != nil { + t.Fatal(err) + } + + t.Log("Added file to first node.") + + c := root.Cid() + + wg := sync.WaitGroup{} + errs := make(chan error) + + for i := 1; i < len(dagservs); i++ { + wg.Add(1) + go func(i int) { + defer wg.Done() + first, err := dagservs[i].Get(ctx, c) + if err != nil { + errs <- err + } + fmt.Println("Got first node back.") + + firstpb, ok := first.(*ProtoNode) + if !ok { + errs <- ErrNotProtobuf + } + read := makeTestDAGReader(t, firstpb, dagservs[i]) + datagot, err := io.ReadAll(read) + if err != nil { + errs <- err + } + + if !bytes.Equal(datagot, expected) { + errs <- errors.New("got bad data back") + } + }(i) + } + + go func() { + wg.Wait() + close(errs) + }() + + for err := range errs { + if err != nil { + t.Fatal(err) + } + } +} + +func TestCantGet(t *testing.T) { + ds := dstest.Mock() + a := NodeWithData([]byte("A")) + + c := a.Cid() + _, err := ds.Get(context.Background(), c) + if !strings.Contains(err.Error(), "not found") { + t.Fatal("expected err not found, got: ", err) + } +} + +func TestFetchGraph(t *testing.T) { + var dservs []ipld.DAGService + bsis := bstest.Mocks(2) + for _, bsi := range bsis { + dservs = append(dservs, NewDAGService(bsi)) + } + + read := io.LimitReader(u.NewTimeSeededRand(), 1024*32) + root := makeTestDAG(t, read, dservs[0]) + + err := FetchGraph(context.TODO(), root.Cid(), dservs[1]) + if err != nil { + t.Fatal(err) + } + + // create an offline dagstore and ensure all blocks were fetched + bs := bserv.New(bsis[1].Blockstore(), offline.Exchange(bsis[1].Blockstore())) + + offlineDS := NewDAGService(bs) + + err = Walk(context.Background(), offlineDS.GetLinks, root.Cid(), func(_ cid.Cid) bool { return true }) + if err != nil { + t.Fatal(err) + } +} + +func TestFetchGraphWithDepthLimit(t *testing.T) { + type testcase struct { + depthLim int + setLen int + } + + tests := []testcase{ + {1, 4}, + {0, 1}, + {-1, 6}, + {2, 6}, + {3, 6}, + } + + testF := func(t *testing.T, tc testcase) { + var dservs []ipld.DAGService + bsis := bstest.Mocks(2) + for _, bsi := range bsis { + dservs = append(dservs, NewDAGService(bsi)) + } + + root := makeDepthTestingGraph(t, dservs[0]) + + err := FetchGraphWithDepthLimit(context.TODO(), root.Cid(), tc.depthLim, dservs[1]) + if err != nil { + t.Fatal(err) + } + + // create an offline dagstore and ensure all blocks were fetched + bs := bserv.New(bsis[1].Blockstore(), offline.Exchange(bsis[1].Blockstore())) + + offlineDS := NewDAGService(bs) + + set := make(map[string]int) + visitF := func(c cid.Cid, depth int) bool { + if tc.depthLim < 0 || depth <= tc.depthLim { + set[string(c.Bytes())] = depth + return true + } + return false + + } + + err = WalkDepth(context.Background(), offlineDS.GetLinks, root.Cid(), visitF) + if err != nil { + t.Fatal(err) + } + + if len(set) != tc.setLen { + t.Fatalf("expected %d nodes but visited %d", tc.setLen, len(set)) + } + } + + for _, tc := range tests { + t.Run(fmt.Sprintf("depth limit %d", tc.depthLim), func(t *testing.T) { + testF(t, tc) + }) + } +} + +func TestWalk(t *testing.T) { + bsi := bstest.Mocks(1) + ds := NewDAGService(bsi[0]) + + read := io.LimitReader(u.NewTimeSeededRand(), 1024*1024) + root := makeTestDAG(t, read, ds) + + set := cid.NewSet() + + err := Walk(context.Background(), ds.GetLinks, root.Cid(), set.Visit) + if err != nil { + t.Fatal(err) + } + + traverseAndCheck(t, root, ds, set.Has) +} + +func TestFetchFailure(t *testing.T) { + ctx := context.Background() + + ds := dstest.Mock() + ds_bad := dstest.Mock() + + top := new(ProtoNode) + for i := 0; i < 10; i++ { + nd := NodeWithData([]byte{byte('a' + i)}) + err := ds.Add(ctx, nd) + if err != nil { + t.Fatal(err) + } + + err = top.AddNodeLink(fmt.Sprintf("AA%d", i), nd) + if err != nil { + t.Fatal(err) + } + } + + for i := 0; i < 10; i++ { + nd := NodeWithData([]byte{'f', 'a' + byte(i)}) + err := ds_bad.Add(ctx, nd) + if err != nil { + t.Fatal(err) + } + + err = top.AddNodeLink(fmt.Sprintf("BB%d", i), nd) + if err != nil { + t.Fatal(err) + } + } + + getters := ipld.GetDAG(ctx, ds, top) + for i, getter := range getters { + _, err := getter.Get(ctx) + if err != nil && i < 10 { + t.Fatal(err) + } + if err == nil && i >= 10 { + t.Fatal("should have failed request") + } + } +} + +func TestUnmarshalFailure(t *testing.T) { + badData := []byte("hello world") + + _, err := DecodeProtobuf(badData) + if err == nil { + t.Fatal("shouldnt succeed to parse this") + } + + // now with a bad link + pbn := &mdpb.PBNode{Links: []*mdpb.PBLink{{Hash: []byte("not a multihash")}}} + badlink, err := pbn.Marshal() + if err != nil { + t.Fatal(err) + } + + _, err = DecodeProtobuf(badlink) + if err == nil { + t.Fatal("should have failed to parse node with bad link") + } + + n := &ProtoNode{} + n.Marshal() +} + +func TestBasicAddGet(t *testing.T) { + ctx := context.Background() + + ds := dstest.Mock() + nd := new(ProtoNode) + + err := ds.Add(ctx, nd) + if err != nil { + t.Fatal(err) + } + + out, err := ds.Get(ctx, nd.Cid()) + if err != nil { + t.Fatal(err) + } + + if !nd.Cid().Equals(out.Cid()) { + t.Fatal("output didnt match input") + } +} + +func TestGetRawNodes(t *testing.T) { + ctx := context.Background() + + rn := NewRawNode([]byte("test")) + + ds := dstest.Mock() + + err := ds.Add(ctx, rn) + if err != nil { + t.Fatal(err) + } + + if !rn.Cid().Equals(rn.Cid()) { + t.Fatal("output cids didnt match") + } + + out, err := ds.Get(ctx, rn.Cid()) + if err != nil { + t.Fatal(err) + } + + if !bytes.Equal(out.RawData(), []byte("test")) { + t.Fatal("raw block should match input data") + } + + if out.Links() != nil { + t.Fatal("raw blocks shouldnt have links") + } + + if out.Tree("", -1) != nil { + t.Fatal("tree should return no paths in a raw block") + } + + size, err := out.Size() + if err != nil { + t.Fatal(err) + } + if size != 4 { + t.Fatal("expected size to be 4") + } + + ns, err := out.Stat() + if err != nil { + t.Fatal(err) + } + + if ns.DataSize != 4 { + t.Fatal("expected size to be 4, got: ", ns.DataSize) + } + + _, _, err = out.Resolve([]string{"foo"}) + if err != ErrLinkNotFound { + t.Fatal("shouldnt find links under raw blocks") + } +} + +func TestProtoNodeResolve(t *testing.T) { + + nd := new(ProtoNode) + nd.SetLinks([]*ipld.Link{{Name: "foo", Cid: someCid}}) + + lnk, left, err := nd.ResolveLink([]string{"foo", "bar"}) + if err != nil { + t.Fatal(err) + } + + if len(left) != 1 || left[0] != "bar" { + t.Fatal("expected the single path element 'bar' to remain") + } + + if lnk.Name != "foo" { + t.Fatal("how did we get anything else?") + } + + tvals := nd.Tree("", -1) + if len(tvals) != 1 || tvals[0] != "foo" { + t.Fatal("expected tree to return []{\"foo\"}") + } +} + +func TestCidRetention(t *testing.T) { + ctx := context.Background() + + nd := new(ProtoNode) + nd.SetData([]byte("fooooo")) + + pref := nd.Cid().Prefix() + pref.Version = 1 + + c2, err := pref.Sum(nd.RawData()) + if err != nil { + t.Fatal(err) + } + + blk, err := blocks.NewBlockWithCid(nd.RawData(), c2) + if err != nil { + t.Fatal(err) + } + + bs := dstest.Bserv() + err = bs.AddBlock(ctx, blk) + if err != nil { + t.Fatal(err) + } + + ds := NewDAGService(bs) + out, err := ds.Get(ctx, c2) + if err != nil { + t.Fatal(err) + } + + if !out.Cid().Equals(c2) { + t.Fatal("output cid didnt match") + } +} + +func TestCidRawDoesnNeedData(t *testing.T) { + srv := NewDAGService(dstest.Bserv()) + nd := NewRawNode([]byte("somedata")) + + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + // there is no data for this node in the blockservice + // so dag service can't load it + links, err := srv.GetLinks(ctx, nd.Cid()) + if err != nil { + t.Fatal(err) + } + if len(links) != 0 { + t.Fatal("raw node shouldn't have any links") + } +} + +func TestRawToJson(t *testing.T) { + rawData := []byte{1, 2, 3, 4} + nd := NewRawNode(rawData) + encoded, err := nd.MarshalJSON() + if err != nil { + t.Fatal(err) + } + var res interface{} + err = json.Unmarshal(encoded, &res) + if err != nil { + t.Fatal(err) + } + resBytes, ok := res.(string) + if !ok { + t.Fatal("expected to marshal to a string") + } + if string(rawData) != resBytes { + t.Fatal("failed to round-trip bytes") + } +} + +func TestGetManyDuplicate(t *testing.T) { + ctx := context.Background() + + srv := NewDAGService(dstest.Bserv()) + + nd := NodeWithData([]byte("foo")) + if err := srv.Add(ctx, nd); err != nil { + t.Fatal(err) + } + nds := srv.GetMany(ctx, []cid.Cid{nd.Cid(), nd.Cid(), nd.Cid()}) + out, ok := <-nds + if !ok { + t.Fatal("expecting node foo") + } + if out.Err != nil { + t.Fatal(out.Err) + } + if !out.Node.Cid().Equals(nd.Cid()) { + t.Fatal("got wrong node") + } + out, ok = <-nds + if ok { + if out.Err != nil { + t.Fatal(out.Err) + } else { + t.Fatal("expecting no more nodes") + } + } +} + +func TestEnumerateAsyncFailsNotFound(t *testing.T) { + ctx := context.Background() + + a := NodeWithData([]byte("foo1")) + b := NodeWithData([]byte("foo2")) + c := NodeWithData([]byte("foo3")) + d := NodeWithData([]byte("foo4")) + e := NodeWithData([]byte("foo5")) + + ds := dstest.Mock() + for _, n := range []ipld.Node{a, b, c} { + err := ds.Add(ctx, n) + if err != nil { + t.Fatal(err) + } + } + + parent := new(ProtoNode) + if err := parent.AddNodeLink("a", a); err != nil { + t.Fatal(err) + } + + if err := parent.AddNodeLink("b", b); err != nil { + t.Fatal(err) + } + + if err := parent.AddNodeLink("c", c); err != nil { + t.Fatal(err) + } + + if err := parent.AddNodeLink("d", d); err != nil { + t.Fatal(err) + } + + if err := parent.AddNodeLink("e", e); err != nil { + t.Fatal(err) + } + + err := ds.Add(ctx, parent) + if err != nil { + t.Fatal(err) + } + + cset := cid.NewSet() + err = Walk(ctx, GetLinksDirect(ds), parent.Cid(), cset.Visit) + if err == nil { + t.Fatal("this should have failed") + } +} + +func TestLinkSorting(t *testing.T) { + az := "az" + aaaa := "aaaa" + bbbb := "bbbb" + cccc := "cccc" + + azBlk := NewRawNode([]byte(az)) + aaaaBlk := NewRawNode([]byte(aaaa)) + bbbbBlk := NewRawNode([]byte(bbbb)) + ccccBlk := NewRawNode([]byte(cccc)) + pbn := &mdpb.PBNode{ + Links: []*mdpb.PBLink{ + {Hash: bbbbBlk.Cid().Bytes(), Name: &bbbb}, + {Hash: azBlk.Cid().Bytes(), Name: &az}, + {Hash: aaaaBlk.Cid().Bytes(), Name: &aaaa}, + {Hash: ccccBlk.Cid().Bytes(), Name: &cccc}, + }, + } + byts, err := pbn.Marshal() + if err != nil { + t.Fatal(err) + } + + mustLookupNodeString := func(t *testing.T, node prime.Node, name string) prime.Node { + subNode, err := node.LookupByString(name) + if err != nil { + t.Fatal(err) + } + return subNode + } + + mustLookupNodeIndex := func(t *testing.T, node prime.Node, idx int64) prime.Node { + subNode, err := node.LookupByIndex(idx) + if err != nil { + t.Fatal(err) + } + return subNode + } + + mustNodeAsString := func(t *testing.T, node prime.Node) string { + str, err := node.AsString() + if err != nil { + t.Fatal(err) + } + return str + } + + verifyUnsortedNode := func(t *testing.T, node *ProtoNode) { + links := node.Links() + if len(links) != 4 { + t.Errorf("wrong number of links, expected 4 but got %d", len(links)) + } + if links[0].Name != bbbb { + t.Errorf("expected link 0 to be 'bbbb', got %s", links[0].Name) + } + if links[1].Name != az { + t.Errorf("expected link 0 to be 'az', got %s", links[1].Name) + } + if links[2].Name != aaaa { + t.Errorf("expected link 0 to be 'aaaa', got %s", links[2].Name) + } + if links[3].Name != cccc { + t.Errorf("expected link 0 to be 'cccc', got %s", links[3].Name) + } + + // check the go-ipld-prime form + linksNode := mustLookupNodeString(t, node, "Links") + if linksNode.Length() != 4 { + t.Errorf("(Node) wrong number of links, expected 4 but got %d", len(links)) + } + if name := mustNodeAsString(t, mustLookupNodeString(t, mustLookupNodeIndex(t, linksNode, 0), "Name")); name != bbbb { + t.Errorf("(Node) expected link 0 to be 'bbbb', got %s", name) + } + if name := mustNodeAsString(t, mustLookupNodeString(t, mustLookupNodeIndex(t, linksNode, 1), "Name")); name != az { + t.Errorf("(Node) expected link 0 to be 'az', got %s", name) + } + if name := mustNodeAsString(t, mustLookupNodeString(t, mustLookupNodeIndex(t, linksNode, 2), "Name")); name != aaaa { + t.Errorf("(Node) expected link 0 to be 'aaaa', got %s", name) + } + if name := mustNodeAsString(t, mustLookupNodeString(t, mustLookupNodeIndex(t, linksNode, 3), "Name")); name != cccc { + t.Errorf("(Node) expected link 0 to be 'cccc', got %s", name) + } + } + + verifySortedNode := func(t *testing.T, node *ProtoNode) { + links := node.Links() + if len(links) != 4 { + t.Errorf("wrong number of links, expected 4 but got %d", len(links)) + } + if links[0].Name != aaaa { + t.Errorf("expected link 0 to be 'aaaa', got %s", links[0].Name) + } + if links[1].Name != az { + t.Errorf("expected link 0 to be 'az', got %s", links[1].Name) + } + if links[2].Name != bbbb { + t.Errorf("expected link 0 to be 'bbbb', got %s", links[2].Name) + } + if links[3].Name != cccc { + t.Errorf("expected link 0 to be 'cccc', got %s", links[3].Name) + } + + // check the go-ipld-prime form + linksNode := mustLookupNodeString(t, node, "Links") + if linksNode.Length() != 4 { + t.Errorf("(Node) wrong number of links, expected 4 but got %d", len(links)) + } + if name := mustNodeAsString(t, mustLookupNodeString(t, mustLookupNodeIndex(t, linksNode, 0), "Name")); name != aaaa { + t.Errorf("(Node) expected link 0 to be 'aaaa', got %s", name) + } + if name := mustNodeAsString(t, mustLookupNodeString(t, mustLookupNodeIndex(t, linksNode, 1), "Name")); name != az { + t.Errorf("(Node) expected link 0 to be 'az', got %s", name) + } + if name := mustNodeAsString(t, mustLookupNodeString(t, mustLookupNodeIndex(t, linksNode, 2), "Name")); name != bbbb { + t.Errorf("(Node) expected link 0 to be 'bbbb', got %s", name) + } + if name := mustNodeAsString(t, mustLookupNodeString(t, mustLookupNodeIndex(t, linksNode, 3), "Name")); name != cccc { + t.Errorf("(Node) expected link 0 to be 'cccc', got %s", name) + } + } + + t.Run("decode", func(t *testing.T) { + node, err := DecodeProtobuf(byts) + if err != nil { + t.Fatal(err) + } + verifyUnsortedNode(t, node) + }) + + t.Run("RawData() should not mutate, should return original form", func(t *testing.T) { + node, err := DecodeProtobuf(byts) + if err != nil { + t.Fatal(err) + } + rawData := node.RawData() + verifyUnsortedNode(t, node) + if !bytes.Equal(rawData, byts) { + t.Error("RawData() did not return original bytes") + } + }) + + t.Run("Size() should not mutate", func(t *testing.T) { + node, err := DecodeProtobuf(byts) + if err != nil { + t.Fatal(err) + } + sz, err := node.Size() + if err != nil { + t.Fatal(err) + } + if sz != 182 { + t.Errorf("expected size to be 182, got %d", sz) + } + verifyUnsortedNode(t, node) + }) + + t.Run("GetPBNode() should not mutate, returned PBNode should be sorted", func(t *testing.T) { + node, err := DecodeProtobuf(byts) + if err != nil { + t.Fatal(err) + } + rtPBNode := node.GetPBNode() + rtByts, err := rtPBNode.Marshal() + if err != nil { + t.Fatal(err) + } + verifyUnsortedNode(t, node) + rtNode, err := DecodeProtobuf(rtByts) + if err != nil { + t.Fatal(err) + } + verifySortedNode(t, rtNode) + }) + + t.Run("add and remove link should mutate", func(t *testing.T) { + node, err := DecodeProtobuf(byts) + if err != nil { + t.Fatal(err) + } + if err = node.AddRawLink("foo", &ipld.Link{ + Size: 10, + Cid: someCid, + }); err != nil { + t.Fatal(err) + } + if err = node.RemoveNodeLink("foo"); err != nil { + t.Fatal(err) + } + verifySortedNode(t, node) + }) + + t.Run("update link should not mutate, returned ProtoNode should be sorted", func(t *testing.T) { + node, err := DecodeProtobuf(byts) + if err != nil { + t.Fatal(err) + } + newNode, err := node.UpdateNodeLink("self", node) + if err != nil { + t.Fatal(err) + } + if err = newNode.RemoveNodeLink("self"); err != nil { + t.Fatal(err) + } + verifySortedNode(t, newNode) + verifyUnsortedNode(t, node) + }) + + t.Run("SetLinks() should mutate", func(t *testing.T) { + node, err := DecodeProtobuf(byts) + if err != nil { + t.Fatal(err) + } + links := node.Links() // clone + node.SetLinks(links) + verifySortedNode(t, node) + }) +} + +func TestProgressIndicator(t *testing.T) { + testProgressIndicator(t, 5) +} + +func TestProgressIndicatorNoChildren(t *testing.T) { + testProgressIndicator(t, 0) +} + +func testProgressIndicator(t *testing.T, depth int) { + ds := dstest.Mock() + + top, numChildren := mkDag(ds, depth) + + v := new(ProgressTracker) + ctx := v.DeriveContext(context.Background()) + + err := FetchGraph(ctx, top, ds) + if err != nil { + t.Fatal(err) + } + + if v.Value() != numChildren+1 { + t.Errorf("wrong number of children reported in progress indicator, expected %d, got %d", + numChildren+1, v.Value()) + } +} + +func mkDag(ds ipld.DAGService, depth int) (cid.Cid, int) { + ctx := context.Background() + + totalChildren := 0 + f := func() *ProtoNode { + p := new(ProtoNode) + buf := make([]byte, 16) + rand.Read(buf) + + p.SetData(buf) + err := ds.Add(ctx, p) + if err != nil { + panic(err) + } + return p + } + + for i := 0; i < depth; i++ { + thisf := f + f = func() *ProtoNode { + pn := mkNodeWithChildren(thisf, 10) + err := ds.Add(ctx, pn) + if err != nil { + panic(err) + } + totalChildren += 10 + return pn + } + } + + nd := f() + err := ds.Add(ctx, nd) + if err != nil { + panic(err) + } + + return nd.Cid(), totalChildren +} + +func mkNodeWithChildren(getChild func() *ProtoNode, width int) *ProtoNode { + cur := new(ProtoNode) + + for i := 0; i < width; i++ { + c := getChild() + if err := cur.AddNodeLink(fmt.Sprint(i), c); err != nil { + panic(err) + } + } + + return cur +} diff --git a/ipld/merkledag/node.go b/ipld/merkledag/node.go new file mode 100644 index 0000000000..24ad8e1654 --- /dev/null +++ b/ipld/merkledag/node.go @@ -0,0 +1,575 @@ +package merkledag + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "math" + "sort" + + blocks "github.com/ipfs/boxo/blocks" + cid "github.com/ipfs/go-cid" + format "github.com/ipfs/go-ipld-format" + legacy "github.com/ipfs/go-ipld-legacy" + logging "github.com/ipfs/go-log/v2" + dagpb "github.com/ipld/go-codec-dagpb" + ipld "github.com/ipld/go-ipld-prime" + mh "github.com/multiformats/go-multihash" + mhcore "github.com/multiformats/go-multihash/core" +) + +// Common errors +var ( + ErrNotProtobuf = fmt.Errorf("expected protobuf dag node") + ErrNotRawNode = fmt.Errorf("expected raw bytes node") + ErrLinkNotFound = fmt.Errorf("no link by that name") +) + +var log = logging.Logger("merkledag") + +// for testing custom CidBuilders +var zeros [256]byte +var zeroCid = mustZeroCid() + +type immutableProtoNode struct { + encoded []byte + dagpb.PBNode +} + +// ProtoNode represents a node in the IPFS Merkle DAG. +// nodes have opaque data and a set of navigable links. +// ProtoNode is a go-ipld-legacy.UniversalNode, meaning it is both +// a go-ipld-prime node and a go-ipld-format node. +// ProtoNode maintains compatibility with it's original implementation +// as a go-ipld-format only node, which included some mutability, namely the +// the ability to add/remove links in place +// +// TODO: We should be able to eventually replace this implementation with +// * go-codec-dagpb for basic DagPB encode/decode to go-ipld-prime +// * go-unixfsnode ADLs for higher level DAGPB functionality +// For the time being however, go-unixfsnode is read only and +// this mutable protonode implementation is needed to support go-unixfs, +// the only library that implements both read and write for UnixFS v1. +type ProtoNode struct { + links []*format.Link + linksDirty bool + data []byte + + // cache encoded/marshaled value, kept to make the go-ipld-prime Node interface + // work (see prime.go), and to provide a cached []byte encoded form available + encoded *immutableProtoNode + cached cid.Cid + + // builder specifies cid version and hashing function + builder cid.Builder +} + +var v0CidPrefix = cid.Prefix{ + Codec: cid.DagProtobuf, + MhLength: -1, + MhType: mh.SHA2_256, + Version: 0, +} + +var v1CidPrefix = cid.Prefix{ + Codec: cid.DagProtobuf, + MhLength: -1, + MhType: mh.SHA2_256, + Version: 1, +} + +// V0CidPrefix returns a prefix for CIDv0 +func V0CidPrefix() cid.Prefix { return v0CidPrefix } + +// V1CidPrefix returns a prefix for CIDv1 with the default settings +func V1CidPrefix() cid.Prefix { return v1CidPrefix } + +// PrefixForCidVersion returns the Protobuf prefix for a given CID version +func PrefixForCidVersion(version int) (cid.Prefix, error) { + switch version { + case 0: + return v0CidPrefix, nil + case 1: + return v1CidPrefix, nil + default: + return cid.Prefix{}, fmt.Errorf("unknown CID version: %d", version) + } +} + +// CidBuilder returns the CID Builder for this ProtoNode, it is never nil +func (n *ProtoNode) CidBuilder() cid.Builder { + if n.builder == nil { + n.builder = v0CidPrefix + } + return n.builder +} + +// SetCidBuilder sets the CID builder if it is non nil, if nil then it +// is reset to the default value. An error will be returned if the builder +// is not usable. +func (n *ProtoNode) SetCidBuilder(builder cid.Builder) error { + if builder == nil { + n.builder = v0CidPrefix + return nil + } + switch b := builder.(type) { + case cid.Prefix: + if err := checkHasher(b.MhType, b.MhLength); err != nil { + return err + } + case *cid.Prefix: + if err := checkHasher(b.MhType, b.MhLength); err != nil { + return err + } + default: + // We have to test it's a usable hasher by invoking it and checking it + // doesn't error. This is only a basic check, there are still ways it may + // break + if _, err := builder.Sum(zeros[:]); err != nil { + return err + } + } + n.builder = builder.WithCodec(cid.DagProtobuf) + n.cached = cid.Undef + return nil +} + +// check whether the hasher is likely to be a usable one +func checkHasher(indicator uint64, sizeHint int) error { + mhLen := sizeHint + if mhLen <= 0 { + mhLen = -1 + } + _, err := mhcore.GetVariableHasher(indicator, mhLen) + return err +} + +// LinkSlice is a slice of format.Links +type LinkSlice []*format.Link + +func (ls LinkSlice) Len() int { return len(ls) } +func (ls LinkSlice) Swap(a, b int) { ls[a], ls[b] = ls[b], ls[a] } +func (ls LinkSlice) Less(a, b int) bool { return ls[a].Name < ls[b].Name } + +// NodeWithData builds a new Protonode with the given data. +func NodeWithData(d []byte) *ProtoNode { + return &ProtoNode{data: d} +} + +// AddNodeLink adds a link to another node. The link will be added in +// sorted order. +// +// If sorting has not already been applied to this node (because +// it was deserialized from a form that did not have sorted links), the links +// list will be sorted. If a ProtoNode was deserialized from a badly encoded +// form that did not already have its links sorted, calling AddNodeLink and then +// RemoveNodeLink for the same link, will not result in an identically encoded +// form as the links will have been sorted. +func (n *ProtoNode) AddNodeLink(name string, that format.Node) error { + lnk, err := format.MakeLink(that) + if err != nil { + return err + } + + lnk.Name = name + + n.AddRawLink(name, lnk) + + return nil +} + +// AddRawLink adds a copy of a link to this node. The link will be added in +// sorted order. +// +// If sorting has not already been applied to this node (because +// it was deserialized from a form that did not have sorted links), the links +// list will be sorted. If a ProtoNode was deserialized from a badly encoded +// form that did not already have its links sorted, calling AddRawLink and then +// RemoveNodeLink for the same link, will not result in an identically encoded +// form as the links will have been sorted. +func (n *ProtoNode) AddRawLink(name string, l *format.Link) error { + lnk := &format.Link{ + Name: name, + Size: l.Size, + Cid: l.Cid, + } + if err := checkLink(lnk); err != nil { + return err + } + n.links = append(n.links, lnk) + n.linksDirty = true // needs a sort + n.encoded = nil + return nil +} + +// RemoveNodeLink removes a link on this node by the given name. If there are +// no links with this name, ErrLinkNotFound will be returned. If there are more +// than one link with this name, they will all be removed. +func (n *ProtoNode) RemoveNodeLink(name string) error { + ref := n.links[:0] + found := false + + for _, v := range n.links { + if v.Name != name { + ref = append(ref, v) + } else { + found = true + } + } + + if !found { + return ErrLinkNotFound + } + + n.links = ref + // Even though a removal won't change sorting, this node may have come from + // a deserialized state with badly sorted links. Now that we are mutating, + // we need to ensure the resulting link list is sorted when it gets consumed. + n.linksDirty = true + n.encoded = nil + + return nil +} + +// GetNodeLink returns a copy of the link with the given name. +func (n *ProtoNode) GetNodeLink(name string) (*format.Link, error) { + for _, l := range n.links { + if l.Name == name { + return &format.Link{ + Name: l.Name, + Size: l.Size, + Cid: l.Cid, + }, nil + } + } + return nil, ErrLinkNotFound +} + +// GetLinkedProtoNode returns a copy of the ProtoNode with the given name. +func (n *ProtoNode) GetLinkedProtoNode(ctx context.Context, ds format.DAGService, name string) (*ProtoNode, error) { + nd, err := n.GetLinkedNode(ctx, ds, name) + if err != nil { + return nil, err + } + + pbnd, ok := nd.(*ProtoNode) + if !ok { + return nil, ErrNotProtobuf + } + + return pbnd, nil +} + +// GetLinkedNode returns a copy of the IPLD Node with the given name. +func (n *ProtoNode) GetLinkedNode(ctx context.Context, ds format.DAGService, name string) (format.Node, error) { + lnk, err := n.GetNodeLink(name) + if err != nil { + return nil, err + } + + return lnk.GetNode(ctx, ds) +} + +// Copy returns a copy of the node. The resulting node will have a properly +// sorted Links list regardless of whether the original came from a badly +// serialized form that didn't have a sorted list. +// NOTE: This does not make copies of Node objects in the links. +func (n *ProtoNode) Copy() format.Node { + nnode := new(ProtoNode) + if len(n.data) > 0 { + nnode.data = make([]byte, len(n.data)) + copy(nnode.data, n.data) + } + + if len(n.links) > 0 { + nnode.links = append([]*format.Link(nil), n.links...) + // Sort links regardless of linksDirty state, this may have come from a + // serialized form that had badly sorted links, in which case linksDirty + // will not be true. + sort.Stable(LinkSlice(nnode.links)) + } + + nnode.builder = n.builder + + return nnode +} + +// RawData returns the encoded byte form of this node. +// +// Note that this method may return an empty byte slice if there is an error +// performing the encode. To check whether such an error may have occurred, use +// node.EncodeProtobuf(false), instead (or prior to calling RawData) and check +// for its returned error value; the result of EncodeProtobuf is cached so there +// is minimal overhead when invoking both methods. +func (n *ProtoNode) RawData() []byte { + out, err := n.EncodeProtobuf(false) + if err != nil { + log.Errorf("failed to encode dag-pb block: %s", err.Error()) + return nil + } + return out +} + +// Data returns the data stored by this node. +func (n *ProtoNode) Data() []byte { + return n.data +} + +// SetData stores data in this nodes. +func (n *ProtoNode) SetData(d []byte) { + n.encoded = nil + n.cached = cid.Undef + n.data = d +} + +// UpdateNodeLink return a copy of the node with the link name set to point to +// that. The link will be added in sorted order. If a link of the same name +// existed, it is removed. +// +// If sorting has not already been applied to this node (because +// it was deserialized from a form that did not have sorted links), the links +// list will be sorted in the returned copy. +func (n *ProtoNode) UpdateNodeLink(name string, that *ProtoNode) (*ProtoNode, error) { + newnode := n.Copy().(*ProtoNode) + _ = newnode.RemoveNodeLink(name) // ignore error + err := newnode.AddNodeLink(name, that) + return newnode, err +} + +// Size returns the total size of the data addressed by node, +// including the total sizes of references. +func (n *ProtoNode) Size() (uint64, error) { + b, err := n.EncodeProtobuf(false) + if err != nil { + return 0, err + } + + s := uint64(len(b)) + for _, l := range n.links { + s += l.Size + } + return s, nil +} + +// Stat returns statistics on the node. +func (n *ProtoNode) Stat() (*format.NodeStat, error) { + enc, err := n.EncodeProtobuf(false) + if err != nil { + return nil, err + } + + cumSize, err := n.Size() + if err != nil { + return nil, err + } + + return &format.NodeStat{ + Hash: n.Cid().String(), + NumLinks: len(n.links), + BlockSize: len(enc), + LinksSize: len(enc) - len(n.data), // includes framing. + DataSize: len(n.data), + CumulativeSize: int(cumSize), + }, nil +} + +// Loggable implements the ipfs/go-log.Loggable interface. +func (n *ProtoNode) Loggable() map[string]interface{} { + return map[string]interface{}{ + "node": n.String(), + } +} + +// UnmarshalJSON reads the node fields from a JSON-encoded byte slice. +func (n *ProtoNode) UnmarshalJSON(b []byte) error { + s := struct { + Data []byte `json:"data"` + Links []*format.Link `json:"links"` + }{} + + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + + n.data = s.Data + // Links may not be sorted after deserialization, but we don't change + // them until we mutate this node since we're representing the current, + // as-serialized state. So n.linksDirty is not set here. + n.links = s.Links + for _, lnk := range s.Links { + if err := checkLink(lnk); err != nil { + return err + } + } + + n.encoded = nil + return nil +} + +func checkLink(lnk *format.Link) error { + if lnk.Size > math.MaxInt64 { + return fmt.Errorf("value of Tsize is too large: %d", lnk.Size) + } + if !lnk.Cid.Defined() { + return errors.New("link must have a value Cid value") + } + return nil +} + +// MarshalJSON returns a JSON representation of the node. +func (n *ProtoNode) MarshalJSON() ([]byte, error) { + if n.linksDirty { + // there was a mutation involving links, make sure we sort + sort.Stable(LinkSlice(n.links)) + n.linksDirty = false + n.encoded = nil + } + + out := map[string]interface{}{ + "data": n.data, + "links": n.links, + } + + return json.Marshal(out) +} + +// Cid returns the node's Cid, calculated according to its prefix +// and raw data contents. +// +// Note that this method may return a CID representing a zero-length byte slice +// if there is an error performing the encode. To check whether such an error +// may have occurred, use node.EncodeProtobuf(false), instead (or prior to +// calling RawData) and check for its returned error value; the result of +// EncodeProtobuf is cached so there is minimal overhead when invoking both +// methods. +func (n *ProtoNode) Cid() cid.Cid { + // re-encode if necessary and we'll get a new cached CID + if _, err := n.EncodeProtobuf(false); err != nil { + log.Errorf("failed to encode dag-pb block: %s", err.Error()) + // error, return a zero-CID + c, err := n.CidBuilder().Sum([]byte{}) + if err != nil { + // CidBuilder was a source of error, return _the_ dag-pb zero CIDv1 + return zeroCid + } + return c + } + return n.cached +} + +// String prints the node's Cid. +// +// Note that this method may return a CID representing a zero-length byte slice +// if there is an error performing the encode. To check whether such an error +// may have occurred, use node.EncodeProtobuf(false), instead (or prior to +// calling RawData) and check for its returned error value; the result of +// EncodeProtobuf is cached so there is minimal overhead when invoking both +// methods. +func (n *ProtoNode) String() string { + return n.Cid().String() +} + +// Multihash hashes the encoded data of this node. +// +// Note that this method may return a multihash representing a zero-length byte +// slice if there is an error performing the encode. To check whether such an +// error may have occurred, use node.EncodeProtobuf(false), instead (or prior to +// calling RawData) and check for its returned error value; the result of +// EncodeProtobuf is cached so there is minimal overhead when invoking both +// methods. +func (n *ProtoNode) Multihash() mh.Multihash { + return n.Cid().Hash() +} + +// Links returns a copy of the node's links. +func (n *ProtoNode) Links() []*format.Link { + if n.linksDirty { + // there was a mutation involving links, make sure we sort + sort.Stable(LinkSlice(n.links)) + n.linksDirty = false + n.encoded = nil + } + return append([]*format.Link(nil), n.links...) +} + +// SetLinks replaces the node links with a copy of the provided links. Sorting +// will be applied to the list. +func (n *ProtoNode) SetLinks(links []*format.Link) error { + for _, lnk := range links { + if err := checkLink(lnk); err != nil { + return err + } + } + n.links = append([]*format.Link(nil), links...) + n.linksDirty = true // needs a sort + n.encoded = nil + return nil +} + +// Resolve is an alias for ResolveLink. +func (n *ProtoNode) Resolve(path []string) (interface{}, []string, error) { + return n.ResolveLink(path) +} + +// ResolveLink consumes the first element of the path and obtains the link +// corresponding to it from the node. It returns the link +// and the path without the consumed element. +func (n *ProtoNode) ResolveLink(path []string) (*format.Link, []string, error) { + if len(path) == 0 { + return nil, nil, fmt.Errorf("end of path, no more links to resolve") + } + + lnk, err := n.GetNodeLink(path[0]) + if err != nil { + return nil, nil, err + } + + return lnk, path[1:], nil +} + +// Tree returns the link names of the ProtoNode. +// ProtoNodes are only ever one path deep, so anything different than an empty +// string for p results in nothing. The depth parameter is ignored. +func (n *ProtoNode) Tree(p string, depth int) []string { + if p != "" { + return nil + } + + if n.linksDirty { + // there was a mutation involving links, make sure we sort + sort.Stable(LinkSlice(n.links)) + n.linksDirty = false + n.encoded = nil + } + + out := make([]string, 0, len(n.links)) + for _, lnk := range n.links { + out = append(out, lnk.Name) + } + return out +} + +func ProtoNodeConverter(b blocks.Block, nd ipld.Node) (legacy.UniversalNode, error) { + pbNode, ok := nd.(dagpb.PBNode) + if !ok { + return nil, ErrNotProtobuf + } + encoded := &immutableProtoNode{b.RawData(), pbNode} + pn := fromImmutableNode(encoded) + pn.cached = b.Cid() + pn.builder = b.Cid().Prefix() + return pn, nil +} + +// TODO: replace with cid.MustParse() when we bump go-cid +func mustZeroCid() cid.Cid { + c, err := cid.Parse("bafybeihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku") + if err != nil { + panic(err) + } + return c +} + +var _ legacy.UniversalNode = &ProtoNode{} diff --git a/ipld/merkledag/node_test.go b/ipld/merkledag/node_test.go new file mode 100644 index 0000000000..87fb484e87 --- /dev/null +++ b/ipld/merkledag/node_test.go @@ -0,0 +1,194 @@ +package merkledag_test + +import ( + "bytes" + "context" + "testing" + + . "github.com/ipfs/boxo/ipld/merkledag" + mdtest "github.com/ipfs/boxo/ipld/merkledag/test" + + cid "github.com/ipfs/go-cid" + ipld "github.com/ipfs/go-ipld-format" +) + +var sampleCid cid.Cid + +func init() { + var err error + // make a test CID -- doesn't matter just to add as a link + sampleCid, err = cid.Cast([]byte{1, 85, 0, 5, 0, 1, 2, 3, 4}) + if err != nil { + panic(err) + } +} + +func TestStableCID(t *testing.T) { + nd := &ProtoNode{} + nd.SetData([]byte("foobar")) + nd.SetLinks([]*ipld.Link{ + {Name: "a", Cid: sampleCid}, + {Name: "b", Cid: sampleCid}, + {Name: "c", Cid: sampleCid}, + }) + expected, err := cid.Decode("QmciCHWD9Q47VPX6naY3XsPZGnqVqbedAniGCcaHjBaCri") + if err != nil { + t.Fatal(err) + } + if !nd.Cid().Equals(expected) { + t.Fatalf("Got CID %s, expected CID %s", nd.Cid(), expected) + } +} + +func TestRemoveLink(t *testing.T) { + nd := &ProtoNode{} + nd.SetLinks([]*ipld.Link{ + {Name: "a", Cid: sampleCid}, + {Name: "b", Cid: sampleCid}, + {Name: "a", Cid: sampleCid}, + {Name: "a", Cid: sampleCid}, + {Name: "c", Cid: sampleCid}, + {Name: "a", Cid: sampleCid}, + }) + + err := nd.RemoveNodeLink("a") + if err != nil { + t.Fatal(err) + } + + if len(nd.Links()) != 2 { + t.Fatal("number of links incorrect") + } + + if nd.Links()[0].Name != "b" { + t.Fatal("link order wrong") + } + + if nd.Links()[1].Name != "c" { + t.Fatal("link order wrong") + } + + // should fail + err = nd.RemoveNodeLink("a") + if err != ErrLinkNotFound { + t.Fatal("should have failed to remove link") + } + + // ensure nothing else got touched + if len(nd.Links()) != 2 { + t.Fatal("number of links incorrect") + } + + if nd.Links()[0].Name != "b" { + t.Fatal("link order wrong") + } + + if nd.Links()[1].Name != "c" { + t.Fatal("link order wrong") + } +} + +func TestFindLink(t *testing.T) { + ctx := context.Background() + + ds := mdtest.Mock() + ndEmpty := new(ProtoNode) + err := ds.Add(ctx, ndEmpty) + if err != nil { + t.Fatal(err) + } + + kEmpty := ndEmpty.Cid() + + nd := &ProtoNode{} + nd.SetLinks([]*ipld.Link{ + {Name: "a", Cid: kEmpty}, + {Name: "c", Cid: kEmpty}, + {Name: "b", Cid: kEmpty}, + }) + + err = ds.Add(ctx, nd) + if err != nil { + t.Fatal(err) + } + + lnk, err := nd.GetNodeLink("b") + if err != nil { + t.Fatal(err) + } + + if lnk.Name != "b" { + t.Fatal("got wrong link back") + } + + _, err = nd.GetNodeLink("f") + if err != ErrLinkNotFound { + t.Fatal("shouldnt have found link") + } + + _, err = nd.GetLinkedNode(context.Background(), ds, "b") + if err != nil { + t.Fatal(err) + } + + outnd, err := nd.UpdateNodeLink("b", nd) + if err != nil { + t.Fatal(err) + } + + olnk, err := outnd.GetNodeLink("b") + if err != nil { + t.Fatal(err) + } + + if olnk.Cid.String() == kEmpty.String() { + t.Fatal("new link should have different hash") + } +} + +func TestNodeCopy(t *testing.T) { + nd := &ProtoNode{} + nd.SetLinks([]*ipld.Link{ + {Name: "a", Cid: sampleCid}, + {Name: "c", Cid: sampleCid}, + {Name: "b", Cid: sampleCid}, + }) + + nd.SetData([]byte("testing")) + + ond := nd.Copy().(*ProtoNode) + ond.SetData(nil) + + if nd.Data() == nil { + t.Fatal("should be different objects") + } +} + +func TestJsonRoundtrip(t *testing.T) { + nd := new(ProtoNode) + nd.SetLinks([]*ipld.Link{ + {Name: "a", Cid: sampleCid}, + {Name: "c", Cid: sampleCid}, + {Name: "b", Cid: sampleCid}, + }) + nd.SetData([]byte("testing")) + + jb, err := nd.MarshalJSON() + if err != nil { + t.Fatal(err) + } + + nn := new(ProtoNode) + err = nn.UnmarshalJSON(jb) + if err != nil { + t.Fatal(err) + } + + if !bytes.Equal(nn.Data(), nd.Data()) { + t.Fatal("data wasnt the same") + } + + if !nn.Cid().Equals(nd.Cid()) { + t.Fatal("objects differed after marshaling") + } +} diff --git a/ipld/merkledag/pb/compat_test.go b/ipld/merkledag/pb/compat_test.go new file mode 100644 index 0000000000..529cee0cad --- /dev/null +++ b/ipld/merkledag/pb/compat_test.go @@ -0,0 +1,299 @@ +package merkledag_pb + +// mirrored in JavaScript @ https://github.com/ipld/js-dag-pb/blob/master/test/test-compat.js + +import ( + "encoding/hex" + "encoding/json" + "testing" +) + +var dataZero []byte = make([]byte, 0) +var dataSome []byte = []byte{0, 1, 2, 3, 4} +var cidBytes []byte = []byte{1, 85, 0, 5, 0, 1, 2, 3, 4} +var zeroName string = "" +var someName string = "some name" +var zeroTsize uint64 = 0 +var someTsize uint64 = 1010 +var largeTsize uint64 = 9007199254740991 // JavaScript Number.MAX_SAFE_INTEGER + +type testCase struct { + name string + node *PBNode + expectedBytes string + expectedForm string +} + +var testCases = []testCase{ + { + name: "empty", + node: &PBNode{}, + expectedBytes: "", + expectedForm: "{}", + }, + { + name: "Data zero", + node: &PBNode{Data: dataZero}, + expectedBytes: "0a00", + expectedForm: `{ + "Data": "" +}`, + }, + { + name: "Data some", + node: &PBNode{Data: dataSome}, + expectedBytes: "0a050001020304", + expectedForm: `{ + "Data": "0001020304" +}`, + }, + { + name: "Links zero", + node: &PBNode{Links: make([]*PBLink, 0)}, + expectedBytes: "", + expectedForm: "{}", + }, + { + name: "Data some Links zero", + node: &PBNode{Data: dataSome, Links: make([]*PBLink, 0)}, + expectedBytes: "0a050001020304", + expectedForm: `{ + "Data": "0001020304" +}`, + }, + { + name: "Links empty", + node: &PBNode{Links: []*PBLink{{}}}, + expectedBytes: "1200", + expectedForm: `{ + "Links": [ + {} + ] +}`, + }, + { + name: "Data some Links empty", + node: &PBNode{Data: dataSome, Links: []*PBLink{{}}}, + expectedBytes: "12000a050001020304", + expectedForm: `{ + "Data": "0001020304", + "Links": [ + {} + ] +}`, + }, + { + name: "Links Hash zero", + node: &PBNode{Links: []*PBLink{{Hash: dataZero}}}, + expectedBytes: "12020a00", + expectedForm: `{ + "Links": [ + { + "Hash": "" + } + ] +}`, + }, + { + name: "Links Hash some", + node: &PBNode{Links: []*PBLink{{Hash: cidBytes}}}, + expectedBytes: "120b0a09015500050001020304", + expectedForm: `{ + "Links": [ + { + "Hash": "015500050001020304" + } + ] +}`, + }, + { + name: "Links Name zero", + node: &PBNode{Links: []*PBLink{{Name: &zeroName}}}, + expectedBytes: "12021200", + expectedForm: `{ + "Links": [ + { + "Name": "" + } + ] +}`, + }, + { + name: "Links Hash some Name zero", + node: &PBNode{Links: []*PBLink{{Hash: cidBytes, Name: &zeroName}}}, + expectedBytes: "120d0a090155000500010203041200", + expectedForm: `{ + "Links": [ + { + "Hash": "015500050001020304", + "Name": "" + } + ] +}`, + }, + { + name: "Links Name some", + node: &PBNode{Links: []*PBLink{{Name: &someName}}}, + expectedBytes: "120b1209736f6d65206e616d65", + expectedForm: `{ + "Links": [ + { + "Name": "some name" + } + ] +}`, + }, + { + name: "Links Hash some Name some", + node: &PBNode{Links: []*PBLink{{Hash: cidBytes, Name: &someName}}}, + expectedBytes: "12160a090155000500010203041209736f6d65206e616d65", + expectedForm: `{ + "Links": [ + { + "Hash": "015500050001020304", + "Name": "some name" + } + ] +}`, + }, + { + name: "Links Tsize zero", + node: &PBNode{Links: []*PBLink{{Tsize: &zeroTsize}}}, + expectedBytes: "12021800", + expectedForm: `{ + "Links": [ + { + "Tsize": 0 + } + ] +}`, + }, + { + name: "Links Hash some Tsize zero", + node: &PBNode{Links: []*PBLink{{Hash: cidBytes, Tsize: &zeroTsize}}}, + expectedBytes: "120d0a090155000500010203041800", + expectedForm: `{ + "Links": [ + { + "Hash": "015500050001020304", + "Tsize": 0 + } + ] +}`, + }, + { + name: "Links Tsize some", + node: &PBNode{Links: []*PBLink{{Tsize: &someTsize}}}, + expectedBytes: "120318f207", + expectedForm: `{ + "Links": [ + { + "Tsize": 1010 + } + ] +}`, + }, + { + name: "Links Hash some Tsize some", + node: &PBNode{Links: []*PBLink{{Hash: cidBytes, Tsize: &largeTsize}}}, + expectedBytes: "12140a0901550005000102030418ffffffffffffff0f", + expectedForm: `{ + "Links": [ + { + "Hash": "015500050001020304", + "Tsize": 9007199254740991 + } + ] +}`, + }, +} + +func TestCompat(t *testing.T) { + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + verifyRoundTrip(t, tc) + }) + } +} + +func verifyRoundTrip(t *testing.T, tc testCase) { + actualBytes, actualForm, err := nodeRoundTripToString(t, tc.node) + if err != nil { + t.Fatal(err) + } + + if actualBytes != tc.expectedBytes { + t.Logf( + "Expected bytes: [%v]\nGot: [%v]\n", + tc.expectedBytes, + actualBytes) + t.Error("Did not match") + } + + if actualForm != tc.expectedForm { + t.Logf( + "Expected form: [%v]\nGot: [%v]\n", + tc.expectedForm, + actualForm) + t.Error("Did not match") + } +} + +func nodeRoundTripToString(t *testing.T, n *PBNode) (string, string, error) { + bytes, err := n.Marshal() + if err != nil { + return "", "", err + } + t.Logf("[%v]\n", hex.EncodeToString(bytes)) + rt := new(PBNode) + if err := rt.Unmarshal(bytes); err != nil { + return "", "", err + } + str, err := json.MarshalIndent(cleanPBNode(t, rt), "", "\t") + if err != nil { + return "", "", err + } + return hex.EncodeToString(bytes), string(str), nil +} + +// convert a PBLink into a map for clean JSON marshalling +func cleanPBLink(t *testing.T, link *PBLink) map[string]interface{} { + if link == nil { + return nil + } + // this would be a bad pb decode + if link.XXX_unrecognized != nil { + t.Fatal("Got unexpected XXX_unrecognized") + } + nl := make(map[string]interface{}) + if link.Hash != nil { + nl["Hash"] = hex.EncodeToString(link.Hash) + } + if link.Name != nil { + nl["Name"] = link.Name + } + if link.Tsize != nil { + nl["Tsize"] = link.Tsize + } + return nl +} + +// convert a PBNode into a map for clean JSON marshalling +func cleanPBNode(t *testing.T, node *PBNode) map[string]interface{} { + // this would be a bad pb decode + if node.XXX_unrecognized != nil { + t.Fatal("Got unexpected XXX_unrecognized") + } + nn := make(map[string]interface{}) + if node.Data != nil { + nn["Data"] = hex.EncodeToString(node.Data) + } + if node.Links != nil { + links := make([]map[string]interface{}, len(node.Links)) + for i, l := range node.Links { + links[i] = cleanPBLink(t, l) + } + nn["Links"] = links + } + return nn +} diff --git a/ipld/merkledag/pb/merkledag.pb.go b/ipld/merkledag/pb/merkledag.pb.go new file mode 100644 index 0000000000..45806ce92d --- /dev/null +++ b/ipld/merkledag/pb/merkledag.pb.go @@ -0,0 +1,1062 @@ +// Code originally generated by protoc-gen-gogo from merkledag.proto, +// now manually managed + +package merkledag_pb + +import ( + bytes "bytes" + fmt "fmt" + io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" + + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" +) + +// DoNotUpgradeFileEverItWillChangeYourHashes warns users about not breaking +// their file hashes. +const DoNotUpgradeFileEverItWillChangeYourHashes = ` +This file does not produce canonical protobufs. Unfortunately, if we change it, +we'll change the hashes of the files we produce. + +Do *not regenerate this file. +` + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// An IPFS MerkleDAG Link +type PBLink struct { + // multihash of the target object + Hash []byte `protobuf:"bytes,1,opt,name=Hash" json:"Hash,omitempty"` + // utf string name. should be unique per object + Name *string `protobuf:"bytes,2,opt,name=Name" json:"Name,omitempty"` + // cumulative size of target object + Tsize *uint64 `protobuf:"varint,3,opt,name=Tsize" json:"Tsize,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PBLink) Reset() { *m = PBLink{} } +func (*PBLink) ProtoMessage() {} +func (*PBLink) Descriptor() ([]byte, []int) { + return fileDescriptor_10837cc3557cec00, []int{0} +} +func (m *PBLink) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PBLink) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_PBLink.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *PBLink) XXX_Merge(src proto.Message) { + xxx_messageInfo_PBLink.Merge(m, src) +} +func (m *PBLink) XXX_Size() int { + return m.Size() +} +func (m *PBLink) XXX_DiscardUnknown() { + xxx_messageInfo_PBLink.DiscardUnknown(m) +} + +var xxx_messageInfo_PBLink proto.InternalMessageInfo + +func (m *PBLink) GetHash() []byte { + if m != nil { + return m.Hash + } + return nil +} + +func (m *PBLink) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *PBLink) GetTsize() uint64 { + if m != nil && m.Tsize != nil { + return *m.Tsize + } + return 0 +} + +// An IPFS MerkleDAG Node +type PBNode struct { + // refs to other objects + Links []*PBLink `protobuf:"bytes,2,rep,name=Links" json:"Links,omitempty"` + // opaque user data + Data []byte `protobuf:"bytes,1,opt,name=Data" json:"Data,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PBNode) Reset() { *m = PBNode{} } +func (*PBNode) ProtoMessage() {} +func (*PBNode) Descriptor() ([]byte, []int) { + return fileDescriptor_10837cc3557cec00, []int{1} +} +func (m *PBNode) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PBNode) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_PBNode.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *PBNode) XXX_Merge(src proto.Message) { + xxx_messageInfo_PBNode.Merge(m, src) +} +func (m *PBNode) XXX_Size() int { + return m.Size() +} +func (m *PBNode) XXX_DiscardUnknown() { + xxx_messageInfo_PBNode.DiscardUnknown(m) +} + +var xxx_messageInfo_PBNode proto.InternalMessageInfo + +func (m *PBNode) GetLinks() []*PBLink { + if m != nil { + return m.Links + } + return nil +} + +func (m *PBNode) GetData() []byte { + if m != nil { + return m.Data + } + return nil +} + +func init() { + proto.RegisterType((*PBLink)(nil), "merkledag.pb.PBLink") + proto.RegisterType((*PBNode)(nil), "merkledag.pb.PBNode") +} + +func init() { proto.RegisterFile("merkledag.proto", fileDescriptor_10837cc3557cec00) } + +var fileDescriptor_10837cc3557cec00 = []byte{ + // 227 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0xcf, 0x4d, 0x2d, 0xca, + 0xce, 0x49, 0x4d, 0x49, 0x4c, 0xd7, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x41, 0x12, 0x48, + 0x92, 0xd2, 0x4d, 0xcf, 0x2c, 0xc9, 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0x4f, + 0xcf, 0xd7, 0x07, 0x2b, 0x4a, 0x2a, 0x4d, 0x03, 0xf3, 0xc0, 0x1c, 0x30, 0x0b, 0xa2, 0x59, 0xc9, + 0x8d, 0x8b, 0x2d, 0xc0, 0xc9, 0x27, 0x33, 0x2f, 0x5b, 0x48, 0x88, 0x8b, 0xc5, 0x23, 0xb1, 0x38, + 0x43, 0x82, 0x51, 0x81, 0x51, 0x83, 0x27, 0x08, 0xcc, 0x06, 0x89, 0xf9, 0x25, 0xe6, 0xa6, 0x4a, + 0x30, 0x29, 0x30, 0x6a, 0x70, 0x06, 0x81, 0xd9, 0x42, 0x22, 0x5c, 0xac, 0x21, 0xc5, 0x99, 0x55, + 0xa9, 0x12, 0xcc, 0x0a, 0x8c, 0x1a, 0x2c, 0x41, 0x10, 0x8e, 0x92, 0x07, 0xc8, 0x1c, 0xbf, 0xfc, + 0x94, 0x54, 0x21, 0x2d, 0x2e, 0x56, 0x90, 0x79, 0xc5, 0x12, 0x4c, 0x0a, 0xcc, 0x1a, 0xdc, 0x46, + 0x22, 0x7a, 0xc8, 0xce, 0xd3, 0x83, 0x58, 0x16, 0x04, 0x51, 0x02, 0x32, 0xdf, 0x25, 0xb1, 0x24, + 0x11, 0x66, 0x27, 0x88, 0xed, 0xa4, 0x73, 0xe3, 0xa1, 0x1c, 0xc3, 0x83, 0x87, 0x72, 0x8c, 0x1f, + 0x1e, 0xca, 0x31, 0xfe, 0x78, 0x28, 0xc7, 0xd8, 0xf0, 0x48, 0x8e, 0x71, 0xc5, 0x23, 0x39, 0xc6, + 0x1d, 0x8f, 0xe4, 0x18, 0x0f, 0x3c, 0x92, 0x63, 0x3c, 0xf1, 0x48, 0x8e, 0xf1, 0xc2, 0x23, 0x39, + 0xc6, 0x07, 0x8f, 0xe4, 0x18, 0x01, 0x01, 0x00, 0x00, 0xff, 0xff, 0x1d, 0x56, 0xb5, 0x6e, 0x0e, + 0x01, 0x00, 0x00, +} + +func (pbLink *PBLink) VerboseEqual(that interface{}) error { + if that == nil { + if pbLink == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*PBLink) + if !ok { + that2, ok := that.(PBLink) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *PBLink") + } + } + if that1 == nil { + if pbLink == nil { + return nil + } + return fmt.Errorf("that is type *PBLink but is nil && this != nil") + } else if pbLink == nil { + return fmt.Errorf("that is type *PBLink but is not nil && this == nil") + } + if !bytes.Equal(pbLink.Hash, that1.Hash) { + return fmt.Errorf("this.Hash(%v) is not equal to that.Hash(%v)", pbLink.Hash, that1.Hash) + } + if pbLink.Name != nil && that1.Name != nil { + if *pbLink.Name != *that1.Name { + return fmt.Errorf("this.Name(%v) is not equal to that.Name(%v)", *pbLink.Name, *that1.Name) + } + } else if pbLink.Name != nil { + return fmt.Errorf("this.Name == nil && that.Name != nil") + } else if that1.Name != nil { + return fmt.Errorf("this.Name(%v) is not equal to that.Name(%v)", pbLink.Name, that1.Name) + } + if pbLink.Tsize != nil && that1.Tsize != nil { + if *pbLink.Tsize != *that1.Tsize { + return fmt.Errorf("this.Tsize(%v) is not equal to that.Tsize(%v)", *pbLink.Tsize, *that1.Tsize) + } + } else if pbLink.Tsize != nil { + return fmt.Errorf("this.Tsize == nil && that.Tsize != nil") + } else if that1.Tsize != nil { + return fmt.Errorf("this.Tsize(%v) is not equal to that.Tsize(%v)", pbLink.Tsize, that1.Tsize) + } + if !bytes.Equal(pbLink.XXX_unrecognized, that1.XXX_unrecognized) { + return fmt.Errorf("XXX_unrecognized this(%v) is not equal to that(%v)", pbLink.XXX_unrecognized, that1.XXX_unrecognized) + } + return nil +} +func (pbLink *PBLink) Equal(that interface{}) bool { + if that == nil { + return pbLink == nil + } + + that1, ok := that.(*PBLink) + if !ok { + that2, ok := that.(PBLink) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return pbLink == nil + } else if pbLink == nil { + return false + } + if !bytes.Equal(pbLink.Hash, that1.Hash) { + return false + } + if pbLink.Name != nil && that1.Name != nil { + if *pbLink.Name != *that1.Name { + return false + } + } else if pbLink.Name != nil { + return false + } else if that1.Name != nil { + return false + } + if pbLink.Tsize != nil && that1.Tsize != nil { + if *pbLink.Tsize != *that1.Tsize { + return false + } + } else if pbLink.Tsize != nil { + return false + } else if that1.Tsize != nil { + return false + } + if !bytes.Equal(pbLink.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (pbLink *PBNode) VerboseEqual(that interface{}) error { + if that == nil { + if pbLink == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*PBNode) + if !ok { + that2, ok := that.(PBNode) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *PBNode") + } + } + if that1 == nil { + if pbLink == nil { + return nil + } + return fmt.Errorf("that is type *PBNode but is nil && this != nil") + } else if pbLink == nil { + return fmt.Errorf("that is type *PBNode but is not nil && this == nil") + } + if len(pbLink.Links) != len(that1.Links) { + return fmt.Errorf("len(this.Links)(%v) is not equal to len(that.Links)(%v)", len(pbLink.Links), len(that1.Links)) + } + for i := range pbLink.Links { + if !pbLink.Links[i].Equal(that1.Links[i]) { + return fmt.Errorf("this.Links[%v](%v) is not equal to that.Links[%v](%v)", i, pbLink.Links[i], i, that1.Links[i]) + } + } + if !bytes.Equal(pbLink.Data, that1.Data) { + return fmt.Errorf("this.Data(%v) is not equal to that.Data(%v)", pbLink.Data, that1.Data) + } + if !bytes.Equal(pbLink.XXX_unrecognized, that1.XXX_unrecognized) { + return fmt.Errorf("this.XXX_unrecognized(%v) is not equal to that.XXX_unrecognized(%v)", pbLink.XXX_unrecognized, that1.XXX_unrecognized) + } + return nil +} + +func (pbNode *PBNode) Equal(that interface{}) bool { + if that == nil { + return pbNode == nil + } + + that1, ok := that.(*PBNode) + if !ok { + that2, ok := that.(PBNode) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return pbNode == nil + } else if pbNode == nil { + return false + } + if len(pbNode.Links) != len(that1.Links) { + return false + } + for i := range pbNode.Links { + if !pbNode.Links[i].Equal(that1.Links[i]) { + return false + } + } + if !bytes.Equal(pbNode.Data, that1.Data) { + return false + } + if !bytes.Equal(pbNode.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (pbLink *PBLink) GoString() string { + if pbLink == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&merkledag_pb.PBLink{") + if pbLink.Hash != nil { + s = append(s, "Hash: "+valueToGoStringMerkledag(pbLink.Hash, "byte")+",\n") + } + if pbLink.Name != nil { + s = append(s, "Name: "+valueToGoStringMerkledag(pbLink.Name, "string")+",\n") + } + if pbLink.Tsize != nil { + s = append(s, "Tsize: "+valueToGoStringMerkledag(pbLink.Tsize, "uint64")+",\n") + } + if pbLink.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", pbLink.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (pbNode *PBNode) GoString() string { + if pbNode == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&merkledag_pb.PBNode{") + if pbNode.Links != nil { + s = append(s, "Links: "+fmt.Sprintf("%#v", pbNode.Links)+",\n") + } + if pbNode.Data != nil { + s = append(s, "Data: "+valueToGoStringMerkledag(pbNode.Data, "byte")+",\n") + } + if pbNode.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", pbNode.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringMerkledag(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *PBLink) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PBLink) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PBLink) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Tsize != nil { + i = encodeVarintMerkledag(dAtA, i, uint64(*m.Tsize)) + i-- + dAtA[i] = 0x18 + } + if m.Name != nil { + i -= len(*m.Name) + copy(dAtA[i:], *m.Name) + i = encodeVarintMerkledag(dAtA, i, uint64(len(*m.Name))) + i-- + dAtA[i] = 0x12 + } + if m.Hash != nil { + i -= len(m.Hash) + copy(dAtA[i:], m.Hash) + i = encodeVarintMerkledag(dAtA, i, uint64(len(m.Hash))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *PBNode) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PBNode) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PBNode) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Data != nil { + i -= len(m.Data) + copy(dAtA[i:], m.Data) + i = encodeVarintMerkledag(dAtA, i, uint64(len(m.Data))) + i-- + dAtA[i] = 0xa + } + if len(m.Links) > 0 { + for iNdEx := len(m.Links) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Links[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMerkledag(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + return len(dAtA) - i, nil +} + +func encodeVarintMerkledag(dAtA []byte, offset int, v uint64) int { + offset -= sovMerkledag(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func NewPopulatedPBLink(r randyMerkledag, easy bool) *PBLink { + this := &PBLink{} + if r.Intn(5) != 0 { + v1 := r.Intn(100) + this.Hash = make([]byte, v1) + for i := 0; i < v1; i++ { + this.Hash[i] = byte(r.Intn(256)) + } + } + if r.Intn(5) != 0 { + v2 := string(randStringMerkledag(r)) + this.Name = &v2 + } + if r.Intn(5) != 0 { + v3 := uint64(uint64(r.Uint32())) + this.Tsize = &v3 + } + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedMerkledag(r, 4) + } + return this +} + +func NewPopulatedPBNode(r randyMerkledag, easy bool) *PBNode { + this := &PBNode{} + if r.Intn(5) != 0 { + v4 := r.Intn(100) + this.Data = make([]byte, v4) + for i := 0; i < v4; i++ { + this.Data[i] = byte(r.Intn(256)) + } + } + if r.Intn(5) != 0 { + v5 := r.Intn(5) + this.Links = make([]*PBLink, v5) + for i := 0; i < v5; i++ { + this.Links[i] = NewPopulatedPBLink(r, easy) + } + } + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedMerkledag(r, 3) + } + return this +} + +type randyMerkledag interface { + Float32() float32 + Float64() float64 + Int63() int64 + Int31() int32 + Uint32() uint32 + Intn(n int) int +} + +func randUTF8RuneMerkledag(r randyMerkledag) rune { + ru := r.Intn(62) + if ru < 10 { + return rune(ru + 48) + } else if ru < 36 { + return rune(ru + 55) + } + return rune(ru + 61) +} +func randStringMerkledag(r randyMerkledag) string { + v6 := r.Intn(100) + tmps := make([]rune, v6) + for i := 0; i < v6; i++ { + tmps[i] = randUTF8RuneMerkledag(r) + } + return string(tmps) +} +func randUnrecognizedMerkledag(r randyMerkledag, maxFieldNumber int) (dAtA []byte) { + l := r.Intn(5) + for i := 0; i < l; i++ { + wire := r.Intn(4) + if wire == 3 { + wire = 5 + } + fieldNumber := maxFieldNumber + r.Intn(100) + dAtA = randFieldMerkledag(dAtA, r, fieldNumber, wire) + } + return dAtA +} +func randFieldMerkledag(dAtA []byte, r randyMerkledag, fieldNumber int, wire int) []byte { + key := uint32(fieldNumber)<<3 | uint32(wire) + switch wire { + case 0: + dAtA = encodeVarintPopulateMerkledag(dAtA, uint64(key)) + v7 := r.Int63() + if r.Intn(2) == 0 { + v7 *= -1 + } + dAtA = encodeVarintPopulateMerkledag(dAtA, uint64(v7)) + case 1: + dAtA = encodeVarintPopulateMerkledag(dAtA, uint64(key)) + dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) + case 2: + dAtA = encodeVarintPopulateMerkledag(dAtA, uint64(key)) + ll := r.Intn(100) + dAtA = encodeVarintPopulateMerkledag(dAtA, uint64(ll)) + for j := 0; j < ll; j++ { + dAtA = append(dAtA, byte(r.Intn(256))) + } + default: + dAtA = encodeVarintPopulateMerkledag(dAtA, uint64(key)) + dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) + } + return dAtA +} +func encodeVarintPopulateMerkledag(dAtA []byte, v uint64) []byte { + for v >= 1<<7 { + dAtA = append(dAtA, uint8(uint64(v)&0x7f|0x80)) + v >>= 7 + } + dAtA = append(dAtA, uint8(v)) + return dAtA +} +func (m *PBLink) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Hash != nil { + l = len(m.Hash) + n += 1 + l + sovMerkledag(uint64(l)) + } + if m.Name != nil { + l = len(*m.Name) + n += 1 + l + sovMerkledag(uint64(l)) + } + if m.Tsize != nil { + n += 1 + sovMerkledag(uint64(*m.Tsize)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *PBNode) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Data != nil { + l = len(m.Data) + n += 1 + l + sovMerkledag(uint64(l)) + } + if len(m.Links) > 0 { + for _, e := range m.Links { + l = e.Size() + n += 1 + l + sovMerkledag(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovMerkledag(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func (pbLink *PBLink) String() string { + if pbLink == nil { + return "nil" + } + s := strings.Join([]string{`&PBLink{`, + `Hash:` + valueToStringMerkledag(pbLink.Hash) + `,`, + `Name:` + valueToStringMerkledag(pbLink.Name) + `,`, + `Tsize:` + valueToStringMerkledag(pbLink.Tsize) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", pbLink.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (pbNode *PBNode) String() string { + if pbNode == nil { + return "nil" + } + repeatedStringForLinks := "[]*PBLink{" + for _, f := range pbNode.Links { + repeatedStringForLinks += strings.Replace(f.String(), "PBLink", "PBLink", 1) + "," + } + repeatedStringForLinks += "}" + s := strings.Join([]string{`&PBNode{`, + `Data:` + valueToStringMerkledag(pbNode.Data) + `,`, + `Links:` + repeatedStringForLinks + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", pbNode.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func valueToStringMerkledag(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *PBLink) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMerkledag + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PBLink: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PBLink: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hash", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMerkledag + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthMerkledag + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthMerkledag + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Hash = append(m.Hash[:0], dAtA[iNdEx:postIndex]...) + if m.Hash == nil { + m.Hash = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMerkledag + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMerkledag + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthMerkledag + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Name = &s + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Tsize", wireType) + } + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMerkledag + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Tsize = &v + default: + iNdEx = preIndex + skippy, err := skipMerkledag(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMerkledag + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthMerkledag + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PBNode) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMerkledag + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PBNode: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PBNode: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMerkledag + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthMerkledag + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthMerkledag + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) + if m.Data == nil { + m.Data = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Links", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMerkledag + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMerkledag + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMerkledag + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Links = append(m.Links, &PBLink{}) + if err := m.Links[len(m.Links)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMerkledag(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMerkledag + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthMerkledag + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipMerkledag(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowMerkledag + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowMerkledag + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowMerkledag + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthMerkledag + } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthMerkledag + } + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupMerkledag + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthMerkledag = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowMerkledag = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupMerkledag = fmt.Errorf("proto: unexpected end of group") +) diff --git a/ipld/merkledag/pb/merkledag.proto b/ipld/merkledag/pb/merkledag.proto new file mode 100644 index 0000000000..ec540e681e --- /dev/null +++ b/ipld/merkledag/pb/merkledag.proto @@ -0,0 +1,41 @@ +syntax = "proto2"; + +package merkledag.pb; + +import "github.com/gogo/protobuf/gogoproto/gogo.proto"; + +option (gogoproto.gostring_all) = true; +option (gogoproto.equal_all) = true; +option (gogoproto.verbose_equal_all) = true; +option (gogoproto.goproto_stringer_all) = false; +option (gogoproto.stringer_all) = true; +option (gogoproto.populate_all) = true; +option (gogoproto.testgen_all) = true; +option (gogoproto.benchgen_all) = true; +option (gogoproto.marshaler_all) = true; +option (gogoproto.sizer_all) = true; +option (gogoproto.unmarshaler_all) = true; + + +// An IPFS MerkleDAG Link +message PBLink { + + // multihash of the target object + optional bytes Hash = 1; + + // utf string name. should be unique per object + optional string Name = 2; + + // cumulative size of target object + optional uint64 Tsize = 3; +} + +// An IPFS MerkleDAG Node +message PBNode { + + // refs to other objects + repeated PBLink Links = 2; + + // opaque user data + optional bytes Data = 1; +} diff --git a/ipld/merkledag/pb/merkledagpb_test.go b/ipld/merkledag/pb/merkledagpb_test.go new file mode 100644 index 0000000000..f72b306da8 --- /dev/null +++ b/ipld/merkledag/pb/merkledagpb_test.go @@ -0,0 +1,486 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: merkledag.proto + +package merkledag_pb + +import ( + fmt "fmt" + go_parser "go/parser" + math "math" + math_rand "math/rand" + testing "testing" + time "time" + + _ "github.com/gogo/protobuf/gogoproto" + github_com_gogo_protobuf_jsonpb "github.com/gogo/protobuf/jsonpb" + github_com_gogo_protobuf_proto "github.com/gogo/protobuf/proto" + proto "github.com/gogo/protobuf/proto" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +func TestPBLinkProto(t *testing.T) { + seed := time.Now().UnixNano() + popr := math_rand.New(math_rand.NewSource(seed)) + p := NewPopulatedPBLink(popr, false) + dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &PBLink{} + if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + littlefuzz := make([]byte, len(dAtA)) + copy(littlefuzz, dAtA) + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + if err := p.VerboseEqual(msg); err != nil { + t.Fatalf("seed = %d, %#v !VerboseProto %#v, since %v", seed, msg, p, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } + if len(littlefuzz) > 0 { + fuzzamount := 100 + for i := 0; i < fuzzamount; i++ { + littlefuzz[popr.Intn(len(littlefuzz))] = byte(popr.Intn(256)) + littlefuzz = append(littlefuzz, byte(popr.Intn(256))) + } + // shouldn't panic + _ = github_com_gogo_protobuf_proto.Unmarshal(littlefuzz, msg) + } +} + +func TestPBLinkMarshalTo(t *testing.T) { + seed := time.Now().UnixNano() + popr := math_rand.New(math_rand.NewSource(seed)) + p := NewPopulatedPBLink(popr, false) + size := p.Size() + dAtA := make([]byte, size) + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + _, err := p.MarshalTo(dAtA) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &PBLink{} + if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + if err := p.VerboseEqual(msg); err != nil { + t.Fatalf("seed = %d, %#v !VerboseProto %#v, since %v", seed, msg, p, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func BenchmarkPBLinkProtoMarshal(b *testing.B) { + popr := math_rand.New(math_rand.NewSource(616)) + total := 0 + pops := make([]*PBLink, 10000) + for i := 0; i < 10000; i++ { + pops[i] = NewPopulatedPBLink(popr, false) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + dAtA, err := github_com_gogo_protobuf_proto.Marshal(pops[i%10000]) + if err != nil { + panic(err) + } + total += len(dAtA) + } + b.SetBytes(int64(total / b.N)) +} + +func BenchmarkPBLinkProtoUnmarshal(b *testing.B) { + popr := math_rand.New(math_rand.NewSource(616)) + total := 0 + datas := make([][]byte, 10000) + for i := 0; i < 10000; i++ { + dAtA, err := github_com_gogo_protobuf_proto.Marshal(NewPopulatedPBLink(popr, false)) + if err != nil { + panic(err) + } + datas[i] = dAtA + } + msg := &PBLink{} + b.ResetTimer() + for i := 0; i < b.N; i++ { + total += len(datas[i%10000]) + if err := github_com_gogo_protobuf_proto.Unmarshal(datas[i%10000], msg); err != nil { + panic(err) + } + } + b.SetBytes(int64(total / b.N)) +} + +func TestPBNodeProto(t *testing.T) { + seed := time.Now().UnixNano() + popr := math_rand.New(math_rand.NewSource(seed)) + p := NewPopulatedPBNode(popr, false) + dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &PBNode{} + if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + littlefuzz := make([]byte, len(dAtA)) + copy(littlefuzz, dAtA) + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + if err := p.VerboseEqual(msg); err != nil { + t.Fatalf("seed = %d, %#v !VerboseProto %#v, since %v", seed, msg, p, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } + if len(littlefuzz) > 0 { + fuzzamount := 100 + for i := 0; i < fuzzamount; i++ { + littlefuzz[popr.Intn(len(littlefuzz))] = byte(popr.Intn(256)) + littlefuzz = append(littlefuzz, byte(popr.Intn(256))) + } + // shouldn't panic + _ = github_com_gogo_protobuf_proto.Unmarshal(littlefuzz, msg) + } +} + +func TestPBNodeMarshalTo(t *testing.T) { + seed := time.Now().UnixNano() + popr := math_rand.New(math_rand.NewSource(seed)) + p := NewPopulatedPBNode(popr, false) + size := p.Size() + dAtA := make([]byte, size) + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + _, err := p.MarshalTo(dAtA) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &PBNode{} + if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + if err := p.VerboseEqual(msg); err != nil { + t.Fatalf("seed = %d, %#v !VerboseProto %#v, since %v", seed, msg, p, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func BenchmarkPBNodeProtoMarshal(b *testing.B) { + popr := math_rand.New(math_rand.NewSource(616)) + total := 0 + pops := make([]*PBNode, 10000) + for i := 0; i < 10000; i++ { + pops[i] = NewPopulatedPBNode(popr, false) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + dAtA, err := github_com_gogo_protobuf_proto.Marshal(pops[i%10000]) + if err != nil { + panic(err) + } + total += len(dAtA) + } + b.SetBytes(int64(total / b.N)) +} + +func BenchmarkPBNodeProtoUnmarshal(b *testing.B) { + popr := math_rand.New(math_rand.NewSource(616)) + total := 0 + datas := make([][]byte, 10000) + for i := 0; i < 10000; i++ { + dAtA, err := github_com_gogo_protobuf_proto.Marshal(NewPopulatedPBNode(popr, false)) + if err != nil { + panic(err) + } + datas[i] = dAtA + } + msg := &PBNode{} + b.ResetTimer() + for i := 0; i < b.N; i++ { + total += len(datas[i%10000]) + if err := github_com_gogo_protobuf_proto.Unmarshal(datas[i%10000], msg); err != nil { + panic(err) + } + } + b.SetBytes(int64(total / b.N)) +} + +func TestPBLinkJSON(t *testing.T) { + seed := time.Now().UnixNano() + popr := math_rand.New(math_rand.NewSource(seed)) + p := NewPopulatedPBLink(popr, true) + marshaler := github_com_gogo_protobuf_jsonpb.Marshaler{} + jsondata, err := marshaler.MarshalToString(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &PBLink{} + err = github_com_gogo_protobuf_jsonpb.UnmarshalString(jsondata, msg) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if err := p.VerboseEqual(msg); err != nil { + t.Fatalf("seed = %d, %#v !VerboseProto %#v, since %v", seed, msg, p, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p) + } +} +func TestPBNodeJSON(t *testing.T) { + seed := time.Now().UnixNano() + popr := math_rand.New(math_rand.NewSource(seed)) + p := NewPopulatedPBNode(popr, true) + marshaler := github_com_gogo_protobuf_jsonpb.Marshaler{} + jsondata, err := marshaler.MarshalToString(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &PBNode{} + err = github_com_gogo_protobuf_jsonpb.UnmarshalString(jsondata, msg) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if err := p.VerboseEqual(msg); err != nil { + t.Fatalf("seed = %d, %#v !VerboseProto %#v, since %v", seed, msg, p, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p) + } +} +func TestPBLinkProtoText(t *testing.T) { + seed := time.Now().UnixNano() + popr := math_rand.New(math_rand.NewSource(seed)) + p := NewPopulatedPBLink(popr, true) + dAtA := github_com_gogo_protobuf_proto.MarshalTextString(p) + msg := &PBLink{} + if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if err := p.VerboseEqual(msg); err != nil { + t.Fatalf("seed = %d, %#v !VerboseProto %#v, since %v", seed, msg, p, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestPBLinkProtoCompactText(t *testing.T) { + seed := time.Now().UnixNano() + popr := math_rand.New(math_rand.NewSource(seed)) + p := NewPopulatedPBLink(popr, true) + dAtA := github_com_gogo_protobuf_proto.CompactTextString(p) + msg := &PBLink{} + if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if err := p.VerboseEqual(msg); err != nil { + t.Fatalf("seed = %d, %#v !VerboseProto %#v, since %v", seed, msg, p, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestPBNodeProtoText(t *testing.T) { + seed := time.Now().UnixNano() + popr := math_rand.New(math_rand.NewSource(seed)) + p := NewPopulatedPBNode(popr, true) + dAtA := github_com_gogo_protobuf_proto.MarshalTextString(p) + msg := &PBNode{} + if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if err := p.VerboseEqual(msg); err != nil { + t.Fatalf("seed = %d, %#v !VerboseProto %#v, since %v", seed, msg, p, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestPBNodeProtoCompactText(t *testing.T) { + seed := time.Now().UnixNano() + popr := math_rand.New(math_rand.NewSource(seed)) + p := NewPopulatedPBNode(popr, true) + dAtA := github_com_gogo_protobuf_proto.CompactTextString(p) + msg := &PBNode{} + if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if err := p.VerboseEqual(msg); err != nil { + t.Fatalf("seed = %d, %#v !VerboseProto %#v, since %v", seed, msg, p, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestPBLinkVerboseEqual(t *testing.T) { + popr := math_rand.New(math_rand.NewSource(time.Now().UnixNano())) + p := NewPopulatedPBLink(popr, false) + dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) + if err != nil { + panic(err) + } + msg := &PBLink{} + if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { + panic(err) + } + if err := p.VerboseEqual(msg); err != nil { + t.Fatalf("%#v !VerboseEqual %#v, since %v", msg, p, err) + } +} +func TestPBNodeVerboseEqual(t *testing.T) { + popr := math_rand.New(math_rand.NewSource(time.Now().UnixNano())) + p := NewPopulatedPBNode(popr, false) + dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) + if err != nil { + panic(err) + } + msg := &PBNode{} + if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { + panic(err) + } + if err := p.VerboseEqual(msg); err != nil { + t.Fatalf("%#v !VerboseEqual %#v, since %v", msg, p, err) + } +} +func TestPBLinkGoString(t *testing.T) { + popr := math_rand.New(math_rand.NewSource(time.Now().UnixNano())) + p := NewPopulatedPBLink(popr, false) + s1 := p.GoString() + s2 := fmt.Sprintf("%#v", p) + if s1 != s2 { + t.Fatalf("GoString want %v got %v", s1, s2) + } + _, err := go_parser.ParseExpr(s1) + if err != nil { + t.Fatal(err) + } +} +func TestPBNodeGoString(t *testing.T) { + popr := math_rand.New(math_rand.NewSource(time.Now().UnixNano())) + p := NewPopulatedPBNode(popr, false) + s1 := p.GoString() + s2 := fmt.Sprintf("%#v", p) + if s1 != s2 { + t.Fatalf("GoString want %v got %v", s1, s2) + } + _, err := go_parser.ParseExpr(s1) + if err != nil { + t.Fatal(err) + } +} +func TestPBLinkSize(t *testing.T) { + seed := time.Now().UnixNano() + popr := math_rand.New(math_rand.NewSource(seed)) + p := NewPopulatedPBLink(popr, true) + size2 := github_com_gogo_protobuf_proto.Size(p) + dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + size := p.Size() + if len(dAtA) != size { + t.Errorf("seed = %d, size %v != marshalled size %v", seed, size, len(dAtA)) + } + if size2 != size { + t.Errorf("seed = %d, size %v != before marshal proto.Size %v", seed, size, size2) + } + size3 := github_com_gogo_protobuf_proto.Size(p) + if size3 != size { + t.Errorf("seed = %d, size %v != after marshal proto.Size %v", seed, size, size3) + } +} + +func BenchmarkPBLinkSize(b *testing.B) { + popr := math_rand.New(math_rand.NewSource(616)) + total := 0 + pops := make([]*PBLink, 1000) + for i := 0; i < 1000; i++ { + pops[i] = NewPopulatedPBLink(popr, false) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + total += pops[i%1000].Size() + } + b.SetBytes(int64(total / b.N)) +} + +func TestPBNodeSize(t *testing.T) { + seed := time.Now().UnixNano() + popr := math_rand.New(math_rand.NewSource(seed)) + p := NewPopulatedPBNode(popr, true) + size2 := github_com_gogo_protobuf_proto.Size(p) + dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + size := p.Size() + if len(dAtA) != size { + t.Errorf("seed = %d, size %v != marshalled size %v", seed, size, len(dAtA)) + } + if size2 != size { + t.Errorf("seed = %d, size %v != before marshal proto.Size %v", seed, size, size2) + } + size3 := github_com_gogo_protobuf_proto.Size(p) + if size3 != size { + t.Errorf("seed = %d, size %v != after marshal proto.Size %v", seed, size, size3) + } +} + +func BenchmarkPBNodeSize(b *testing.B) { + popr := math_rand.New(math_rand.NewSource(616)) + total := 0 + pops := make([]*PBNode, 1000) + for i := 0; i < 1000; i++ { + pops[i] = NewPopulatedPBNode(popr, false) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + total += pops[i%1000].Size() + } + b.SetBytes(int64(total / b.N)) +} + +func TestPBLinkStringer(t *testing.T) { + popr := math_rand.New(math_rand.NewSource(time.Now().UnixNano())) + p := NewPopulatedPBLink(popr, false) + s1 := p.String() + s2 := fmt.Sprintf("%v", p) + if s1 != s2 { + t.Fatalf("String want %v got %v", s1, s2) + } +} +func TestPBNodeStringer(t *testing.T) { + popr := math_rand.New(math_rand.NewSource(time.Now().UnixNano())) + p := NewPopulatedPBNode(popr, false) + s1 := p.String() + s2 := fmt.Sprintf("%v", p) + if s1 != s2 { + t.Fatalf("String want %v got %v", s1, s2) + } +} + +//These tests are generated by github.com/gogo/protobuf/plugin/testgen diff --git a/ipld/merkledag/pb/stability_test.go b/ipld/merkledag/pb/stability_test.go new file mode 100644 index 0000000000..5da833713e --- /dev/null +++ b/ipld/merkledag/pb/stability_test.go @@ -0,0 +1,24 @@ +package merkledag_pb + +import ( + bytes "bytes" + "testing" +) + +func TestStability(t *testing.T) { + correct := []byte("\x12\x87\x01\n;\x81\x869\xacH\xa4Ư\xa2\xf1X\x1a\x8b\x95%\xe2\x0f\xdah\x92\u007f+/\xf86\xf75x\xdb\x0f\xa5L)\xf7\xfd\x92\x8d\x92\xcaC\xf1\x93\xde\xe4\u007fY\x15I\xf5\x97\xa8\x11\xc8\xfag\xab\x03\x1e\xbd\x12B|CMw`mHq{>?|vd{0F7>8m[C`HSg3UcXmGs-qp-z6{Kc.tGX->H07\x18\xeaئ\xd0\b\x129\x121LZ3,V9jnmk^veYEV71EMLt9;6]}bnkU2e7GXmqisoCPV0C+ni\x18Ա\xfe\x8b\f\x12u\n'\xf2#\xc1\xc0nQ\xf9\xb5\x19\x80\xcd\xf8\x06k1\xf6#\x84\x1c\xb6\xbf\xeaY\x9b\xd8O\x84\x04\xdbKq\xe4\xae\xf2\xd6\xe9*\x16B\x12D[gVeg4=t}EGSu82+dmgvQ+Tr>_sLUJ|iZ[P2y2T67ilvEikK}\\iru?IF?mVS[Mv9KG8+\x18\x92\xa0\xf9\xa1\n\n\x11?̎\v\x06ѣ\x80nH\x12\x00\xa7\xd2w͝") + n := new(PBNode) + err := n.Unmarshal(correct) + if err != nil { + t.Fatal(err) + } + d, err := n.Marshal() + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(d, correct) { + t.Logf("%q", d) + t.Fatal("protobuf not stable") + } + +} diff --git a/ipld/merkledag/pb/upgrade_check.go b/ipld/merkledag/pb/upgrade_check.go new file mode 100644 index 0000000000..e5a6473c13 --- /dev/null +++ b/ipld/merkledag/pb/upgrade_check.go @@ -0,0 +1,5 @@ +package merkledag_pb + +// Make sure the user doesn't upgrade this package! +// This will fail to build if the user does. +const _ = DoNotUpgradeFileEverItWillChangeYourHashes diff --git a/ipld/merkledag/prime.go b/ipld/merkledag/prime.go new file mode 100644 index 0000000000..5f9bbc691e --- /dev/null +++ b/ipld/merkledag/prime.go @@ -0,0 +1,206 @@ +package merkledag + +import ( + dagpb "github.com/ipld/go-codec-dagpb" + "github.com/ipld/go-ipld-prime" +) + +// Protonode was originally implemented as a go-ipld-format node, and included +// functionality that does not fit well into the model for go-ipld-prime, namely +// the ability ot modify the node in place. + +// In order to support the go-ipld-prime interface, all of these prime methods +// serialize and rebuild the go-ipld-prime node as needed, so that it remains up +// to date with mutations made via the add/remove link methods + +// Kind returns a value from the Kind enum describing what the +// essential serializable kind of this node is (map, list, integer, etc). +// Most other handling of a node requires first switching upon the kind. +func (n *ProtoNode) Kind() ipld.Kind { + _, _ = n.EncodeProtobuf(false) + return n.encoded.Kind() +} + +// LookupByString looks up a child object in this node and returns it. +// The returned Node may be any of the Kind: +// a primitive (string, int64, etc), a map, a list, or a link. +// +// If the Kind of this Node is not Kind_Map, a nil node and an error +// will be returned. +// +// If the key does not exist, a nil node and an error will be returned. +func (n *ProtoNode) LookupByString(key string) (ipld.Node, error) { + _, err := n.EncodeProtobuf(false) + if err != nil { + return nil, err + } + return n.encoded.LookupByString(key) +} + +// LookupByNode is the equivalent of LookupByString, but takes a reified Node +// as a parameter instead of a plain string. +// This mechanism is useful if working with typed maps (if the key types +// have constraints, and you already have a reified `schema.TypedNode` value, +// using that value can save parsing and validation costs); +// and may simply be convenient if you already have a Node value in hand. +// +// (When writing generic functions over Node, a good rule of thumb is: +// when handling a map, check for `schema.TypedNode`, and in this case prefer +// the LookupByNode(Node) method; otherwise, favor LookupByString; typically +// implementations will have their fastest paths thusly.) +func (n *ProtoNode) LookupByNode(key ipld.Node) (ipld.Node, error) { + _, err := n.EncodeProtobuf(false) + if err != nil { + return nil, err + } + return n.encoded.LookupByNode(key) +} + +// LookupByIndex is the equivalent of LookupByString but for indexing into a list. +// As with LookupByString, the returned Node may be any of the Kind: +// a primitive (string, int64, etc), a map, a list, or a link. +// +// If the Kind of this Node is not Kind_List, a nil node and an error +// will be returned. +// +// If idx is out of range, a nil node and an error will be returned. +func (n *ProtoNode) LookupByIndex(idx int64) (ipld.Node, error) { + _, err := n.EncodeProtobuf(false) + if err != nil { + return nil, err + } + return n.encoded.LookupByIndex(idx) +} + +// LookupBySegment is will act as either LookupByString or LookupByIndex, +// whichever is contextually appropriate. +// +// Using LookupBySegment may imply an "atoi" conversion if used on a list node, +// or an "itoa" conversion if used on a map node. If an "itoa" conversion +// takes place, it may error, and this method may return that error. +func (n *ProtoNode) LookupBySegment(seg ipld.PathSegment) (ipld.Node, error) { + _, err := n.EncodeProtobuf(false) + if err != nil { + return nil, err + } + return n.encoded.LookupBySegment(seg) +} + +// Note that when using codegenerated types, there may be a fifth variant +// of lookup method on maps: `Get($GeneratedTypeKey) $GeneratedTypeValue`! +// MapIterator returns an iterator which yields key-value pairs +// traversing the node. +// If the node kind is anything other than a map, nil will be returned. +// +// The iterator will yield every entry in the map; that is, it +// can be expected that itr.Next will be called node.Length times +// before itr.Done becomes true. +func (n *ProtoNode) MapIterator() ipld.MapIterator { + _, _ = n.EncodeProtobuf(false) + return n.encoded.MapIterator() +} + +// ListIterator returns an iterator which yields key-value pairs +// traversing the node. +// If the node kind is anything other than a list, nil will be returned. +// +// The iterator will yield every entry in the list; that is, it +// can be expected that itr.Next will be called node.Length times +// before itr.Done becomes true. +func (n *ProtoNode) ListIterator() ipld.ListIterator { + _, _ = n.EncodeProtobuf(false) + return n.encoded.ListIterator() +} + +// Length returns the length of a list, or the number of entries in a map, +// or -1 if the node is not of list nor map kind. +func (n *ProtoNode) Length() int64 { + _, _ = n.EncodeProtobuf(false) + return n.encoded.Length() +} + +// Absent nodes are returned when traversing a struct field that is +// defined by a schema but unset in the data. (Absent nodes are not +// possible otherwise; you'll only see them from `schema.TypedNode`.) +// The absent flag is necessary so iterating over structs can +// unambiguously make the distinction between values that are +// present-and-null versus values that are absent. +// +// Absent nodes respond to `Kind()` as `ipld.Kind_Null`, +// for lack of any better descriptive value; you should therefore +// always check IsAbsent rather than just a switch on kind +// when it may be important to handle absent values distinctly. +func (n *ProtoNode) IsAbsent() bool { + _, _ = n.EncodeProtobuf(false) + return n.encoded.IsAbsent() +} + +func (n *ProtoNode) IsNull() bool { + _, _ = n.EncodeProtobuf(false) + return n.encoded.IsNull() +} + +func (n *ProtoNode) AsBool() (bool, error) { + _, err := n.EncodeProtobuf(false) + if err != nil { + return false, err + } + return n.encoded.AsBool() +} + +func (n *ProtoNode) AsInt() (int64, error) { + _, err := n.EncodeProtobuf(false) + if err != nil { + return 0, err + } + return n.encoded.AsInt() +} + +func (n *ProtoNode) AsFloat() (float64, error) { + _, err := n.EncodeProtobuf(false) + if err != nil { + return 0, err + } + return n.encoded.AsFloat() +} + +func (n *ProtoNode) AsString() (string, error) { + _, err := n.EncodeProtobuf(false) + if err != nil { + return "", err + } + return n.encoded.AsString() +} + +func (n *ProtoNode) AsBytes() ([]byte, error) { + _, err := n.EncodeProtobuf(false) + if err != nil { + return nil, err + } + return n.encoded.AsBytes() +} + +func (n *ProtoNode) AsLink() (ipld.Link, error) { + _, err := n.EncodeProtobuf(false) + if err != nil { + return nil, err + } + return n.encoded.AsLink() +} + +// Prototype returns a NodePrototype which can describe some properties of this node's implementation, +// and also be used to get a NodeBuilder, +// which can be use to create new nodes with the same implementation as this one. +// +// For typed nodes, the NodePrototype will also implement schema.Type. +// +// For Advanced Data Layouts, the NodePrototype will encapsulate any additional +// parameters and configuration of the ADL, and will also (usually) +// implement NodePrototypeSupportingAmend. +// +// Calling this method should not cause an allocation. +func (n *ProtoNode) Prototype() ipld.NodePrototype { + return dagpb.Type.PBNode +} + +var _ ipld.Node = &ProtoNode{} diff --git a/ipld/merkledag/raw.go b/ipld/merkledag/raw.go new file mode 100644 index 0000000000..68e2473e17 --- /dev/null +++ b/ipld/merkledag/raw.go @@ -0,0 +1,126 @@ +package merkledag + +import ( + "encoding/json" + "fmt" + + blocks "github.com/ipfs/boxo/blocks" + u "github.com/ipfs/boxo/util" + legacy "github.com/ipfs/go-ipld-legacy" + ipld "github.com/ipld/go-ipld-prime" + basicnode "github.com/ipld/go-ipld-prime/node/basic" + + cid "github.com/ipfs/go-cid" + format "github.com/ipfs/go-ipld-format" +) + +// RawNode represents a node which only contains data. +type RawNode struct { + blocks.Block + + // Always a node/basic Bytes. + // We can't reference a specific type, as it's not exposed there. + // If we find that the interface indirection really matters, + // then we could possibly use dagpb.Bytes. + ipld.Node +} + +var _ legacy.UniversalNode = &RawNode{} + +// NewRawNode creates a RawNode using the default sha2-256 hash function. +func NewRawNode(data []byte) *RawNode { + h := u.Hash(data) + c := cid.NewCidV1(cid.Raw, h) + blk, _ := blocks.NewBlockWithCid(data, c) + return &RawNode{blk, basicnode.NewBytes(data)} +} + +// DecodeRawBlock is a block decoder for raw IPLD nodes conforming to `node.DecodeBlockFunc`. +func DecodeRawBlock(block blocks.Block) (format.Node, error) { + if block.Cid().Type() != cid.Raw { + return nil, fmt.Errorf("raw nodes cannot be decoded from non-raw blocks: %d", block.Cid().Type()) + } + // Once you "share" a block, it should be immutable. Therefore, we can just use this block as-is. + return &RawNode{block, basicnode.NewBytes(block.RawData())}, nil +} + +var _ format.DecodeBlockFunc = DecodeRawBlock + +// NewRawNodeWPrefix creates a RawNode using the provided cid builder +func NewRawNodeWPrefix(data []byte, builder cid.Builder) (*RawNode, error) { + builder = builder.WithCodec(cid.Raw) + c, err := builder.Sum(data) + if err != nil { + return nil, err + } + blk, err := blocks.NewBlockWithCid(data, c) + if err != nil { + return nil, err + } + // Once you "share" a block, it should be immutable. Therefore, we can just use this block as-is. + return &RawNode{blk, basicnode.NewBytes(data)}, nil +} + +// Links returns nil. +func (rn *RawNode) Links() []*format.Link { + return nil +} + +// ResolveLink returns an error. +func (rn *RawNode) ResolveLink(path []string) (*format.Link, []string, error) { + return nil, nil, ErrLinkNotFound +} + +// Resolve returns an error. +func (rn *RawNode) Resolve(path []string) (interface{}, []string, error) { + return nil, nil, ErrLinkNotFound +} + +// Tree returns nil. +func (rn *RawNode) Tree(p string, depth int) []string { + return nil +} + +// Copy performs a deep copy of this node and returns it as an format.Node +func (rn *RawNode) Copy() format.Node { + copybuf := make([]byte, len(rn.RawData())) + copy(copybuf, rn.RawData()) + nblk, err := blocks.NewBlockWithCid(rn.RawData(), rn.Cid()) + if err != nil { + // programmer error + panic("failure attempting to clone raw block: " + err.Error()) + } + // Once you "share" a block, it should be immutable. Therefore, we can just use this block as-is. + return &RawNode{nblk, basicnode.NewBytes(nblk.RawData())} +} + +// Size returns the size of this node +func (rn *RawNode) Size() (uint64, error) { + return uint64(len(rn.RawData())), nil +} + +// Stat returns some Stats about this node. +func (rn *RawNode) Stat() (*format.NodeStat, error) { + return &format.NodeStat{ + Hash: rn.Cid().String(), + NumLinks: 0, + BlockSize: len(rn.RawData()), + LinksSize: 0, + CumulativeSize: len(rn.RawData()), + DataSize: len(rn.RawData()), + }, nil +} + +// MarshalJSON is required for our "ipfs dag" commands. +func (rn *RawNode) MarshalJSON() ([]byte, error) { + return json.Marshal(string(rn.RawData())) +} + +func RawNodeConverter(b blocks.Block, nd ipld.Node) (legacy.UniversalNode, error) { + if nd.Kind() != ipld.Kind_Bytes { + return nil, ErrNotRawNode + } + return &RawNode{b, nd}, nil +} + +var _ legacy.UniversalNode = (*RawNode)(nil) diff --git a/ipld/merkledag/readonly.go b/ipld/merkledag/readonly.go new file mode 100644 index 0000000000..36242fbeb1 --- /dev/null +++ b/ipld/merkledag/readonly.go @@ -0,0 +1,20 @@ +package merkledag + +import ( + "fmt" + + ipld "github.com/ipfs/go-ipld-format" +) + +// ErrReadOnly is used when a read-only datastructure is written to. +var ErrReadOnly = fmt.Errorf("cannot write to readonly DAGService") + +// NewReadOnlyDagService takes a NodeGetter, and returns a full DAGService +// implementation that returns ErrReadOnly when its 'write' methods are +// invoked. +func NewReadOnlyDagService(ng ipld.NodeGetter) ipld.DAGService { + return &ComboService{ + Read: ng, + Write: &ErrorService{ErrReadOnly}, + } +} diff --git a/ipld/merkledag/readonly_test.go b/ipld/merkledag/readonly_test.go new file mode 100644 index 0000000000..0d00eebfb5 --- /dev/null +++ b/ipld/merkledag/readonly_test.go @@ -0,0 +1,64 @@ +package merkledag_test + +import ( + "context" + "testing" + + . "github.com/ipfs/boxo/ipld/merkledag" + dstest "github.com/ipfs/boxo/ipld/merkledag/test" + + cid "github.com/ipfs/go-cid" + ipld "github.com/ipfs/go-ipld-format" +) + +func TestReadonlyProperties(t *testing.T) { + ds := dstest.Mock() + ro := NewReadOnlyDagService(ds) + + ctx := context.Background() + nds := []ipld.Node{ + NewRawNode([]byte("foo1")), + NewRawNode([]byte("foo2")), + NewRawNode([]byte("foo3")), + NewRawNode([]byte("foo4")), + } + cids := []cid.Cid{ + nds[0].Cid(), + nds[1].Cid(), + nds[2].Cid(), + nds[3].Cid(), + } + + // add to the actual underlying datastore + if err := ds.Add(ctx, nds[2]); err != nil { + t.Fatal(err) + } + if err := ds.Add(ctx, nds[3]); err != nil { + t.Fatal(err) + } + + if err := ro.Add(ctx, nds[0]); err != ErrReadOnly { + t.Fatal("expected ErrReadOnly") + } + if err := ro.Add(ctx, nds[2]); err != ErrReadOnly { + t.Fatal("expected ErrReadOnly") + } + + if err := ro.AddMany(ctx, nds[0:1]); err != ErrReadOnly { + t.Fatal("expected ErrReadOnly") + } + + if err := ro.Remove(ctx, cids[3]); err != ErrReadOnly { + t.Fatal("expected ErrReadOnly") + } + if err := ro.RemoveMany(ctx, cids[1:2]); err != ErrReadOnly { + t.Fatal("expected ErrReadOnly") + } + + if _, err := ro.Get(ctx, cids[0]); !ipld.IsNotFound(err) { + t.Fatal("expected ErrNotFound") + } + if _, err := ro.Get(ctx, cids[3]); err != nil { + t.Fatal(err) + } +} diff --git a/ipld/merkledag/rwservice.go b/ipld/merkledag/rwservice.go new file mode 100644 index 0000000000..a916350a63 --- /dev/null +++ b/ipld/merkledag/rwservice.go @@ -0,0 +1,47 @@ +package merkledag + +import ( + "context" + + cid "github.com/ipfs/go-cid" + ipld "github.com/ipfs/go-ipld-format" +) + +// ComboService implements ipld.DAGService, using 'Read' for all fetch methods, +// and 'Write' for all methods that add new objects. +type ComboService struct { + Read ipld.NodeGetter + Write ipld.DAGService +} + +var _ ipld.DAGService = (*ComboService)(nil) + +// Add writes a new node using the Write DAGService. +func (cs *ComboService) Add(ctx context.Context, nd ipld.Node) error { + return cs.Write.Add(ctx, nd) +} + +// AddMany adds nodes using the Write DAGService. +func (cs *ComboService) AddMany(ctx context.Context, nds []ipld.Node) error { + return cs.Write.AddMany(ctx, nds) +} + +// Get fetches a node using the Read DAGService. +func (cs *ComboService) Get(ctx context.Context, c cid.Cid) (ipld.Node, error) { + return cs.Read.Get(ctx, c) +} + +// GetMany fetches nodes using the Read DAGService. +func (cs *ComboService) GetMany(ctx context.Context, cids []cid.Cid) <-chan *ipld.NodeOption { + return cs.Read.GetMany(ctx, cids) +} + +// Remove deletes a node using the Write DAGService. +func (cs *ComboService) Remove(ctx context.Context, c cid.Cid) error { + return cs.Write.Remove(ctx, c) +} + +// RemoveMany deletes nodes using the Write DAGService. +func (cs *ComboService) RemoveMany(ctx context.Context, cids []cid.Cid) error { + return cs.Write.RemoveMany(ctx, cids) +} diff --git a/ipld/merkledag/session.go b/ipld/merkledag/session.go new file mode 100644 index 0000000000..c7bbff169c --- /dev/null +++ b/ipld/merkledag/session.go @@ -0,0 +1,21 @@ +package merkledag + +import ( + "context" + + ipld "github.com/ipfs/go-ipld-format" +) + +// SessionMaker is an object that can generate a new fetching session. +type SessionMaker interface { + Session(context.Context) ipld.NodeGetter +} + +// NewSession returns a session backed NodeGetter if the given NodeGetter +// implements SessionMaker. +func NewSession(ctx context.Context, g ipld.NodeGetter) ipld.NodeGetter { + if sm, ok := g.(SessionMaker); ok { + return sm.Session(ctx) + } + return g +} diff --git a/ipld/merkledag/test/utils.go b/ipld/merkledag/test/utils.go new file mode 100644 index 0000000000..302d194d6d --- /dev/null +++ b/ipld/merkledag/test/utils.go @@ -0,0 +1,23 @@ +package mdutils + +import ( + dag "github.com/ipfs/boxo/ipld/merkledag" + + bsrv "github.com/ipfs/boxo/blockservice" + blockstore "github.com/ipfs/boxo/blockstore" + offline "github.com/ipfs/boxo/exchange/offline" + ds "github.com/ipfs/go-datastore" + dssync "github.com/ipfs/go-datastore/sync" + ipld "github.com/ipfs/go-ipld-format" +) + +// Mock returns a new thread-safe, mock DAGService. +func Mock() ipld.DAGService { + return dag.NewDAGService(Bserv()) +} + +// Bserv returns a new, thread-safe, mock BlockService. +func Bserv() bsrv.BlockService { + bstore := blockstore.NewBlockstore(dssync.MutexWrap(ds.NewMapDatastore())) + return bsrv.New(bstore, offline.Exchange(bstore)) +} diff --git a/ipld/merkledag/traverse/traverse.go b/ipld/merkledag/traverse/traverse.go new file mode 100644 index 0000000000..dbc426fa92 --- /dev/null +++ b/ipld/merkledag/traverse/traverse.go @@ -0,0 +1,222 @@ +// Package traverse provides merkledag traversal functions +package traverse + +import ( + "context" + "errors" + + ipld "github.com/ipfs/go-ipld-format" +) + +// Order is an identifier for traversal algorithm orders +type Order int + +// These constants define different traversing methods +const ( + // DFSPre defines depth-first pre-order + DFSPre Order = iota + // DFSPost defines depth-first post-order + DFSPost + // BFS defines breadth-first order + BFS +) + +// Options specifies a series of traversal options +type Options struct { + DAG ipld.NodeGetter // the dagservice to fetch nodes + Order Order // what order to traverse in + Func Func // the function to perform at each step + ErrFunc ErrFunc // see ErrFunc. Optional + + SkipDuplicates bool // whether to skip duplicate nodes +} + +// State is a current traversal state +type State struct { + Node ipld.Node + Depth int +} + +type traversal struct { + opts Options + seen map[string]struct{} +} + +func (t *traversal) shouldSkip(n ipld.Node) (bool, error) { + if t.opts.SkipDuplicates { + k := n.Cid() + if _, found := t.seen[k.KeyString()]; found { + return true, nil + } + t.seen[k.KeyString()] = struct{}{} + } + + return false, nil +} + +func (t *traversal) callFunc(next State) error { + return t.opts.Func(next) +} + +// getNode returns the node for link. If it return an error, +// stop processing. if it returns a nil node, just skip it. +// +// the error handling is a little complicated. +func (t *traversal) getNode(link *ipld.Link) (ipld.Node, error) { + + getNode := func(l *ipld.Link) (ipld.Node, error) { + next, err := l.GetNode(context.TODO(), t.opts.DAG) + if err != nil { + return nil, err + } + + skip, err := t.shouldSkip(next) + if skip { + next = nil + } + return next, err + } + + next, err := getNode(link) + if err != nil && t.opts.ErrFunc != nil { // attempt recovery. + err = t.opts.ErrFunc(err) + next = nil // skip regardless + } + return next, err +} + +// Func is the type of the function called for each dag.Node visited by Traverse. +// The traversal argument contains the current traversal state. +// If an error is returned, processing stops. +type Func func(current State) error + +// ErrFunc is provided to handle problems when walking to the Node. Traverse +// will call ErrFunc with the error encountered. ErrFunc can decide how to +// handle that error, and return an error back to Traversal with how to proceed: +// - nil - skip the Node and its children, but continue processing +// - all other errors halt processing immediately. +// +// If ErrFunc is nil, Traversal will stop, as if: +// +// opts.ErrFunc = func(err error) { return err } +type ErrFunc func(err error) error + +// Traverse initiates a DAG traversal with the given options starting at +// the given root. +func Traverse(root ipld.Node, o Options) error { + t := traversal{ + opts: o, + seen: map[string]struct{}{}, + } + + state := State{ + Node: root, + Depth: 0, + } + + switch o.Order { + default: + return dfsPreTraverse(state, &t) + case DFSPre: + return dfsPreTraverse(state, &t) + case DFSPost: + return dfsPostTraverse(state, &t) + case BFS: + return bfsTraverse(state, &t) + } +} + +type dfsFunc func(state State, t *traversal) error + +func dfsPreTraverse(state State, t *traversal) error { + if err := t.callFunc(state); err != nil { + return err + } + return dfsDescend(dfsPreTraverse, state, t) +} + +func dfsPostTraverse(state State, t *traversal) error { + if err := dfsDescend(dfsPostTraverse, state, t); err != nil { + return err + } + return t.callFunc(state) +} + +func dfsDescend(df dfsFunc, curr State, t *traversal) error { + for _, l := range curr.Node.Links() { + node, err := t.getNode(l) + if err != nil { + return err + } + if node == nil { // skip + continue + } + + next := State{ + Node: node, + Depth: curr.Depth + 1, + } + if err := df(next, t); err != nil { + return err + } + } + return nil +} + +func bfsTraverse(root State, t *traversal) error { + + if skip, err := t.shouldSkip(root.Node); skip || err != nil { + return err + } + + var q queue + q.enq(root) + for q.len() > 0 { + curr := q.deq() + if curr.Node == nil { + return errors.New("failed to dequeue though queue not empty") + } + + // call user's func + if err := t.callFunc(curr); err != nil { + return err + } + + for _, l := range curr.Node.Links() { + node, err := t.getNode(l) + if err != nil { + return err + } + if node == nil { // skip + continue + } + + q.enq(State{ + Node: node, + Depth: curr.Depth + 1, + }) + } + } + return nil +} + +type queue struct { + s []State +} + +func (q *queue) enq(n State) { + q.s = append(q.s, n) +} + +func (q *queue) deq() State { + if len(q.s) < 1 { + return State{} + } + n := q.s[0] + q.s = q.s[1:] + return n +} + +func (q *queue) len() int { + return len(q.s) +} diff --git a/ipld/merkledag/traverse/traverse_test.go b/ipld/merkledag/traverse/traverse_test.go new file mode 100644 index 0000000000..c19f1b0545 --- /dev/null +++ b/ipld/merkledag/traverse/traverse_test.go @@ -0,0 +1,418 @@ +package traverse + +import ( + "bytes" + "context" + "fmt" + "testing" + + mdag "github.com/ipfs/boxo/ipld/merkledag" + mdagtest "github.com/ipfs/boxo/ipld/merkledag/test" + + ipld "github.com/ipfs/go-ipld-format" +) + +func TestDFSPreNoSkip(t *testing.T) { + ds := mdagtest.Mock() + opts := Options{Order: DFSPre, DAG: ds} + + testWalkOutputs(t, newFan(t, ds), opts, []byte(` +0 /a +1 /a/aa +1 /a/ab +1 /a/ac +1 /a/ad +`)) + + testWalkOutputs(t, newLinkedList(t, ds), opts, []byte(` +0 /a +1 /a/aa +2 /a/aa/aaa +3 /a/aa/aaa/aaaa +4 /a/aa/aaa/aaaa/aaaaa +`)) + + testWalkOutputs(t, newBinaryTree(t, ds), opts, []byte(` +0 /a +1 /a/aa +2 /a/aa/aaa +2 /a/aa/aab +1 /a/ab +2 /a/ab/aba +2 /a/ab/abb +`)) + + testWalkOutputs(t, newBinaryDAG(t, ds), opts, []byte(` +0 /a +1 /a/aa +2 /a/aa/aaa +3 /a/aa/aaa/aaaa +4 /a/aa/aaa/aaaa/aaaaa +4 /a/aa/aaa/aaaa/aaaaa +3 /a/aa/aaa/aaaa +4 /a/aa/aaa/aaaa/aaaaa +4 /a/aa/aaa/aaaa/aaaaa +2 /a/aa/aaa +3 /a/aa/aaa/aaaa +4 /a/aa/aaa/aaaa/aaaaa +4 /a/aa/aaa/aaaa/aaaaa +3 /a/aa/aaa/aaaa +4 /a/aa/aaa/aaaa/aaaaa +4 /a/aa/aaa/aaaa/aaaaa +1 /a/aa +2 /a/aa/aaa +3 /a/aa/aaa/aaaa +4 /a/aa/aaa/aaaa/aaaaa +4 /a/aa/aaa/aaaa/aaaaa +3 /a/aa/aaa/aaaa +4 /a/aa/aaa/aaaa/aaaaa +4 /a/aa/aaa/aaaa/aaaaa +2 /a/aa/aaa +3 /a/aa/aaa/aaaa +4 /a/aa/aaa/aaaa/aaaaa +4 /a/aa/aaa/aaaa/aaaaa +3 /a/aa/aaa/aaaa +4 /a/aa/aaa/aaaa/aaaaa +4 /a/aa/aaa/aaaa/aaaaa +`)) +} + +func TestDFSPreSkip(t *testing.T) { + ds := mdagtest.Mock() + opts := Options{Order: DFSPre, SkipDuplicates: true, DAG: ds} + + testWalkOutputs(t, newFan(t, ds), opts, []byte(` +0 /a +1 /a/aa +1 /a/ab +1 /a/ac +1 /a/ad +`)) + + testWalkOutputs(t, newLinkedList(t, ds), opts, []byte(` +0 /a +1 /a/aa +2 /a/aa/aaa +3 /a/aa/aaa/aaaa +4 /a/aa/aaa/aaaa/aaaaa +`)) + + testWalkOutputs(t, newBinaryTree(t, ds), opts, []byte(` +0 /a +1 /a/aa +2 /a/aa/aaa +2 /a/aa/aab +1 /a/ab +2 /a/ab/aba +2 /a/ab/abb +`)) + + testWalkOutputs(t, newBinaryDAG(t, ds), opts, []byte(` +0 /a +1 /a/aa +2 /a/aa/aaa +3 /a/aa/aaa/aaaa +4 /a/aa/aaa/aaaa/aaaaa +`)) +} + +func TestDFSPostNoSkip(t *testing.T) { + ds := mdagtest.Mock() + opts := Options{Order: DFSPost, DAG: ds} + + testWalkOutputs(t, newFan(t, ds), opts, []byte(` +1 /a/aa +1 /a/ab +1 /a/ac +1 /a/ad +0 /a +`)) + + testWalkOutputs(t, newLinkedList(t, ds), opts, []byte(` +4 /a/aa/aaa/aaaa/aaaaa +3 /a/aa/aaa/aaaa +2 /a/aa/aaa +1 /a/aa +0 /a +`)) + + testWalkOutputs(t, newBinaryTree(t, ds), opts, []byte(` +2 /a/aa/aaa +2 /a/aa/aab +1 /a/aa +2 /a/ab/aba +2 /a/ab/abb +1 /a/ab +0 /a +`)) + + testWalkOutputs(t, newBinaryDAG(t, ds), opts, []byte(` +4 /a/aa/aaa/aaaa/aaaaa +4 /a/aa/aaa/aaaa/aaaaa +3 /a/aa/aaa/aaaa +4 /a/aa/aaa/aaaa/aaaaa +4 /a/aa/aaa/aaaa/aaaaa +3 /a/aa/aaa/aaaa +2 /a/aa/aaa +4 /a/aa/aaa/aaaa/aaaaa +4 /a/aa/aaa/aaaa/aaaaa +3 /a/aa/aaa/aaaa +4 /a/aa/aaa/aaaa/aaaaa +4 /a/aa/aaa/aaaa/aaaaa +3 /a/aa/aaa/aaaa +2 /a/aa/aaa +1 /a/aa +4 /a/aa/aaa/aaaa/aaaaa +4 /a/aa/aaa/aaaa/aaaaa +3 /a/aa/aaa/aaaa +4 /a/aa/aaa/aaaa/aaaaa +4 /a/aa/aaa/aaaa/aaaaa +3 /a/aa/aaa/aaaa +2 /a/aa/aaa +4 /a/aa/aaa/aaaa/aaaaa +4 /a/aa/aaa/aaaa/aaaaa +3 /a/aa/aaa/aaaa +4 /a/aa/aaa/aaaa/aaaaa +4 /a/aa/aaa/aaaa/aaaaa +3 /a/aa/aaa/aaaa +2 /a/aa/aaa +1 /a/aa +0 /a +`)) +} + +func TestDFSPostSkip(t *testing.T) { + ds := mdagtest.Mock() + opts := Options{Order: DFSPost, SkipDuplicates: true, DAG: ds} + + testWalkOutputs(t, newFan(t, ds), opts, []byte(` +1 /a/aa +1 /a/ab +1 /a/ac +1 /a/ad +0 /a +`)) + + testWalkOutputs(t, newLinkedList(t, ds), opts, []byte(` +4 /a/aa/aaa/aaaa/aaaaa +3 /a/aa/aaa/aaaa +2 /a/aa/aaa +1 /a/aa +0 /a +`)) + + testWalkOutputs(t, newBinaryTree(t, ds), opts, []byte(` +2 /a/aa/aaa +2 /a/aa/aab +1 /a/aa +2 /a/ab/aba +2 /a/ab/abb +1 /a/ab +0 /a +`)) + + testWalkOutputs(t, newBinaryDAG(t, ds), opts, []byte(` +4 /a/aa/aaa/aaaa/aaaaa +3 /a/aa/aaa/aaaa +2 /a/aa/aaa +1 /a/aa +0 /a +`)) +} + +func TestBFSNoSkip(t *testing.T) { + ds := mdagtest.Mock() + opts := Options{Order: BFS, DAG: ds} + + testWalkOutputs(t, newFan(t, ds), opts, []byte(` +0 /a +1 /a/aa +1 /a/ab +1 /a/ac +1 /a/ad +`)) + + testWalkOutputs(t, newLinkedList(t, ds), opts, []byte(` +0 /a +1 /a/aa +2 /a/aa/aaa +3 /a/aa/aaa/aaaa +4 /a/aa/aaa/aaaa/aaaaa +`)) + + testWalkOutputs(t, newBinaryTree(t, ds), opts, []byte(` +0 /a +1 /a/aa +1 /a/ab +2 /a/aa/aaa +2 /a/aa/aab +2 /a/ab/aba +2 /a/ab/abb +`)) + + testWalkOutputs(t, newBinaryDAG(t, ds), opts, []byte(` +0 /a +1 /a/aa +1 /a/aa +2 /a/aa/aaa +2 /a/aa/aaa +2 /a/aa/aaa +2 /a/aa/aaa +3 /a/aa/aaa/aaaa +3 /a/aa/aaa/aaaa +3 /a/aa/aaa/aaaa +3 /a/aa/aaa/aaaa +3 /a/aa/aaa/aaaa +3 /a/aa/aaa/aaaa +3 /a/aa/aaa/aaaa +3 /a/aa/aaa/aaaa +4 /a/aa/aaa/aaaa/aaaaa +4 /a/aa/aaa/aaaa/aaaaa +4 /a/aa/aaa/aaaa/aaaaa +4 /a/aa/aaa/aaaa/aaaaa +4 /a/aa/aaa/aaaa/aaaaa +4 /a/aa/aaa/aaaa/aaaaa +4 /a/aa/aaa/aaaa/aaaaa +4 /a/aa/aaa/aaaa/aaaaa +4 /a/aa/aaa/aaaa/aaaaa +4 /a/aa/aaa/aaaa/aaaaa +4 /a/aa/aaa/aaaa/aaaaa +4 /a/aa/aaa/aaaa/aaaaa +4 /a/aa/aaa/aaaa/aaaaa +4 /a/aa/aaa/aaaa/aaaaa +4 /a/aa/aaa/aaaa/aaaaa +4 /a/aa/aaa/aaaa/aaaaa +`)) +} + +func TestBFSSkip(t *testing.T) { + ds := mdagtest.Mock() + opts := Options{Order: BFS, SkipDuplicates: true, DAG: ds} + + testWalkOutputs(t, newFan(t, ds), opts, []byte(` +0 /a +1 /a/aa +1 /a/ab +1 /a/ac +1 /a/ad +`)) + + testWalkOutputs(t, newLinkedList(t, ds), opts, []byte(` +0 /a +1 /a/aa +2 /a/aa/aaa +3 /a/aa/aaa/aaaa +4 /a/aa/aaa/aaaa/aaaaa +`)) + + testWalkOutputs(t, newBinaryTree(t, ds), opts, []byte(` +0 /a +1 /a/aa +1 /a/ab +2 /a/aa/aaa +2 /a/aa/aab +2 /a/ab/aba +2 /a/ab/abb +`)) + + testWalkOutputs(t, newBinaryDAG(t, ds), opts, []byte(` +0 /a +1 /a/aa +2 /a/aa/aaa +3 /a/aa/aaa/aaaa +4 /a/aa/aaa/aaaa/aaaaa +`)) +} + +func testWalkOutputs(t *testing.T, root ipld.Node, opts Options, expect []byte) { + expect = bytes.TrimLeft(expect, "\n") + + buf := new(bytes.Buffer) + walk := func(current State) error { + s := fmt.Sprintf("%d %s\n", current.Depth, current.Node.(*mdag.ProtoNode).Data()) + t.Logf("walk: %s", s) + buf.Write([]byte(s)) + return nil + } + + opts.Func = walk + if err := Traverse(root, opts); err != nil { + t.Error(err) + return + } + + actual := buf.Bytes() + if !bytes.Equal(actual, expect) { + t.Error("error: outputs differ") + t.Logf("expect:\n%s", expect) + t.Logf("actual:\n%s", actual) + } else { + t.Logf("expect matches actual:\n%s", expect) + } +} + +func newFan(t *testing.T, ds ipld.DAGService) ipld.Node { + a := mdag.NodeWithData([]byte("/a")) + addLink(t, ds, a, child(t, ds, a, "aa")) + addLink(t, ds, a, child(t, ds, a, "ab")) + addLink(t, ds, a, child(t, ds, a, "ac")) + addLink(t, ds, a, child(t, ds, a, "ad")) + return a +} + +func newLinkedList(t *testing.T, ds ipld.DAGService) ipld.Node { + a := mdag.NodeWithData([]byte("/a")) + aa := child(t, ds, a, "aa") + aaa := child(t, ds, aa, "aaa") + aaaa := child(t, ds, aaa, "aaaa") + aaaaa := child(t, ds, aaaa, "aaaaa") + addLink(t, ds, aaaa, aaaaa) + addLink(t, ds, aaa, aaaa) + addLink(t, ds, aa, aaa) + addLink(t, ds, a, aa) + return a +} + +func newBinaryTree(t *testing.T, ds ipld.DAGService) ipld.Node { + a := mdag.NodeWithData([]byte("/a")) + aa := child(t, ds, a, "aa") + ab := child(t, ds, a, "ab") + addLink(t, ds, aa, child(t, ds, aa, "aaa")) + addLink(t, ds, aa, child(t, ds, aa, "aab")) + addLink(t, ds, ab, child(t, ds, ab, "aba")) + addLink(t, ds, ab, child(t, ds, ab, "abb")) + addLink(t, ds, a, aa) + addLink(t, ds, a, ab) + return a +} + +func newBinaryDAG(t *testing.T, ds ipld.DAGService) ipld.Node { + a := mdag.NodeWithData([]byte("/a")) + aa := child(t, ds, a, "aa") + aaa := child(t, ds, aa, "aaa") + aaaa := child(t, ds, aaa, "aaaa") + aaaaa := child(t, ds, aaaa, "aaaaa") + addLink(t, ds, aaaa, aaaaa) + addLink(t, ds, aaaa, aaaaa) + addLink(t, ds, aaa, aaaa) + addLink(t, ds, aaa, aaaa) + addLink(t, ds, aa, aaa) + addLink(t, ds, aa, aaa) + addLink(t, ds, a, aa) + addLink(t, ds, a, aa) + return a +} + +func addLink(t *testing.T, ds ipld.DAGService, a, b ipld.Node) { + to := string(a.(*mdag.ProtoNode).Data()) + "2" + string(b.(*mdag.ProtoNode).Data()) + if err := ds.Add(context.Background(), b); err != nil { + t.Error(err) + } + if err := a.(*mdag.ProtoNode).AddNodeLink(to, b.(*mdag.ProtoNode)); err != nil { + t.Error(err) + } +} + +func child(t *testing.T, ds ipld.DAGService, a ipld.Node, name string) ipld.Node { + return mdag.NodeWithData([]byte(string(a.(*mdag.ProtoNode).Data()) + "/" + name)) +} diff --git a/ipns/README.md b/ipns/README.md new file mode 100644 index 0000000000..0afa0be00b --- /dev/null +++ b/ipns/README.md @@ -0,0 +1,31 @@ +## Usage + +To create a new IPNS record: + +```go +import ( + "time" + + ipns "github.com/ipfs/boxo/ipns" + crypto "github.com/libp2p/go-libp2p/core/crypto" +) + +// Generate a private key to sign the IPNS record with. Most of the time, +// however, you'll want to retrieve an already-existing key from IPFS using the +// go-ipfs/core/coreapi CoreAPI.KeyAPI() interface. +privateKey, publicKey, err := crypto.GenerateKeyPair(crypto.RSA, 2048) +if err != nil { + panic(err) +} + +// Create an IPNS record that expires in one hour and points to the IPFS address +// /ipfs/Qme1knMqwt1hKZbc1BmQFmnm9f36nyQGwXxPGVpVJ9rMK5 +ipnsRecord, err := ipns.Create(privateKey, []byte("/ipfs/Qme1knMqwt1hKZbc1BmQFmnm9f36nyQGwXxPGVpVJ9rMK5"), 0, time.Now().Add(1*time.Hour)) +if err != nil { + panic(err) +} +``` + +Once you have the record, you’ll need to use IPFS to *publish* it. + +There are several other major operations you can do with `go-ipns`. Check out the [API docs](https://pkg.go.dev/github.com/ipfs/boxo/ipns) or look at the tests in this repo for examples. diff --git a/ipns/errors.go b/ipns/errors.go new file mode 100644 index 0000000000..d78aafffa0 --- /dev/null +++ b/ipns/errors.go @@ -0,0 +1,44 @@ +package ipns + +import ( + "errors" +) + +// ErrExpiredRecord should be returned when an ipns record is +// invalid due to being too old +var ErrExpiredRecord = errors.New("expired record") + +// ErrUnrecognizedValidity is returned when an IpnsRecord has an +// unknown validity type. +var ErrUnrecognizedValidity = errors.New("unrecognized validity type") + +// ErrInvalidPath should be returned when an ipns record path +// is not in a valid format +var ErrInvalidPath = errors.New("record path invalid") + +// ErrSignature should be returned when an ipns record fails +// signature verification +var ErrSignature = errors.New("record signature verification failed") + +// ErrKeyFormat should be returned when an ipns record key is +// incorrectly formatted (not a peer ID) +var ErrKeyFormat = errors.New("record key could not be parsed into peer ID") + +// ErrPublicKeyNotFound should be returned when the public key +// corresponding to the ipns record path cannot be retrieved +// from the peer store +var ErrPublicKeyNotFound = errors.New("public key not found in peer store") + +// ErrPublicKeyMismatch should be returned when the public key embedded in the +// record doesn't match the expected public key. +var ErrPublicKeyMismatch = errors.New("public key in record did not match expected pubkey") + +// ErrBadRecord should be returned when an ipns record cannot be unmarshalled +var ErrBadRecord = errors.New("record could not be unmarshalled") + +// 10 KiB limit defined in https://github.com/ipfs/specs/pull/319 +const MaxRecordSize int = 10 << (10 * 1) + +// ErrRecordSize should be returned when an ipns record is +// invalid due to being too big +var ErrRecordSize = errors.New("record exceeds allowed size limit") diff --git a/ipns/examples/embed.go b/ipns/examples/embed.go new file mode 100644 index 0000000000..1f33d514d5 --- /dev/null +++ b/ipns/examples/embed.go @@ -0,0 +1,27 @@ +package examples + +import ( + "time" + + pb "github.com/ipfs/boxo/ipns/pb" + + "github.com/ipfs/boxo/ipns" + "github.com/libp2p/go-libp2p/core/crypto" +) + +// CreateEntryWithEmbed shows how you can create an IPNS entry +// and embed it with a public key. For ed25519 keys this is not needed +// so attempting to embed with an ed25519 key, will not actually embed the key +func CreateEntryWithEmbed(ipfsPath string, publicKey crypto.PubKey, privateKey crypto.PrivKey) (*pb.IpnsEntry, error) { + ipfsPathByte := []byte(ipfsPath) + eol := time.Now().Add(time.Hour * 48) + entry, err := ipns.Create(privateKey, ipfsPathByte, 1, eol, 0) + if err != nil { + return nil, err + } + err = ipns.EmbedPublicKey(publicKey, entry) + if err != nil { + return nil, err + } + return entry, nil +} diff --git a/ipns/examples/examples_test.go b/ipns/examples/examples_test.go new file mode 100644 index 0000000000..0fe182d9f3 --- /dev/null +++ b/ipns/examples/examples_test.go @@ -0,0 +1,60 @@ +package examples_test + +import ( + "testing" + + "github.com/ipfs/boxo/ipns/examples" + "github.com/libp2p/go-libp2p/core/crypto" +) + +var testPath = "/ipfs/Qme1knMqwt1hKZbc1BmQFmnm9f36nyQGwXxPGVpVJ9rMK5" + +func TestKeyGeneration(t *testing.T) { + _, err := generateRSAKey() + if err != nil { + t.Error(err) + } + + _, err = generateEDKey() + if err != nil { + t.Error(err) + } +} + +func TestEmbeddedEntryCreation(t *testing.T) { + rk, err := generateRSAKey() + if err != nil { + t.Fatal(err) + } + + ek, err := generateEDKey() + if err != nil { + t.Fatal(err) + } + _, err = examples.CreateEntryWithEmbed(testPath, rk.GetPublic(), rk) + if err != nil { + t.Error(err) + } + + _, err = examples.CreateEntryWithEmbed(testPath, ek.GetPublic(), ek) + if err != nil { + t.Error(err) + } + +} +func generateRSAKey() (crypto.PrivKey, error) { + k, err := examples.GenerateRSAKeyPair(2048) + if err != nil { + return nil, err + } + return k, nil +} + +func generateEDKey() (crypto.PrivKey, error) { + // ED25519 uses 256bit keys, and ignore the bit param + k, err := examples.GenerateEDKeyPair() + if err != nil { + return nil, err + } + return k, nil +} diff --git a/ipns/examples/key.go b/ipns/examples/key.go new file mode 100644 index 0000000000..94f219b8d2 --- /dev/null +++ b/ipns/examples/key.go @@ -0,0 +1,24 @@ +package examples + +import ( + "github.com/libp2p/go-libp2p/core/crypto" +) + +// GenerateRSAKeyPair is used to generate an RSA key pair +func GenerateRSAKeyPair(bits int) (crypto.PrivKey, error) { + priv, _, err := crypto.GenerateKeyPair(crypto.RSA, bits) + if err != nil { + return nil, err + } + return priv, nil +} + +// GenerateEDKeyPair is used to generate an ED25519 keypair +func GenerateEDKeyPair() (crypto.PrivKey, error) { + // ED25519 ignores the bit param and uses 256bit keys + priv, _, err := crypto.GenerateKeyPair(crypto.Ed25519, 256) + if err != nil { + return nil, err + } + return priv, nil +} diff --git a/ipns/ipns.go b/ipns/ipns.go new file mode 100644 index 0000000000..87ddadb034 --- /dev/null +++ b/ipns/ipns.go @@ -0,0 +1,398 @@ +package ipns + +import ( + "bytes" + "fmt" + "sort" + "time" + + "github.com/multiformats/go-multicodec" + "github.com/pkg/errors" + + "github.com/ipld/go-ipld-prime" + _ "github.com/ipld/go-ipld-prime/codec/dagcbor" // used to import the DagCbor encoder/decoder + ipldcodec "github.com/ipld/go-ipld-prime/multicodec" + basicnode "github.com/ipld/go-ipld-prime/node/basic" + + "github.com/gogo/protobuf/proto" + + pb "github.com/ipfs/boxo/ipns/pb" + + u "github.com/ipfs/boxo/util" + ic "github.com/libp2p/go-libp2p/core/crypto" + "github.com/libp2p/go-libp2p/core/peer" +) + +const ( + validity = "Validity" + validityType = "ValidityType" + value = "Value" + sequence = "Sequence" + ttl = "TTL" +) + +// Create creates a new IPNS entry and signs it with the given private key. +// +// This function does not embed the public key. If you want to do that, use +// `EmbedPublicKey`. +func Create(sk ic.PrivKey, val []byte, seq uint64, eol time.Time, ttl time.Duration) (*pb.IpnsEntry, error) { + entry := new(pb.IpnsEntry) + + entry.Value = val + typ := pb.IpnsEntry_EOL + entry.ValidityType = &typ + entry.Sequence = &seq + entry.Validity = []byte(u.FormatRFC3339(eol)) + + ttlNs := uint64(ttl.Nanoseconds()) + entry.Ttl = proto.Uint64(ttlNs) + + cborData, err := createCborDataForIpnsEntry(entry) + if err != nil { + return nil, err + } + entry.Data = cborData + + // For now we still create V1 signatures. These are deprecated, and not + // used during verification anymore (Validate func requires SignatureV2), + // but setting it here allows legacy nodes (e.g., go-ipfs < v0.9.0) to + // still resolve IPNS published by modern nodes. + sig1, err := sk.Sign(ipnsEntryDataForSigV1(entry)) + if err != nil { + return nil, errors.Wrap(err, "could not compute signature data") + } + entry.SignatureV1 = sig1 + + sig2Data, err := ipnsEntryDataForSigV2(entry) + if err != nil { + return nil, err + } + sig2, err := sk.Sign(sig2Data) + if err != nil { + return nil, err + } + entry.SignatureV2 = sig2 + + return entry, nil +} + +func createCborDataForIpnsEntry(e *pb.IpnsEntry) ([]byte, error) { + m := make(map[string]ipld.Node) + var keys []string + m[value] = basicnode.NewBytes(e.GetValue()) + keys = append(keys, value) + + m[validity] = basicnode.NewBytes(e.GetValidity()) + keys = append(keys, validity) + + m[validityType] = basicnode.NewInt(int64(e.GetValidityType())) + keys = append(keys, validityType) + + m[sequence] = basicnode.NewInt(int64(e.GetSequence())) + keys = append(keys, sequence) + + m[ttl] = basicnode.NewInt(int64(e.GetTtl())) + keys = append(keys, ttl) + + sort.Sort(cborMapKeyString_RFC7049(keys)) + + newNd := basicnode.Prototype__Map{}.NewBuilder() + ma, err := newNd.BeginMap(int64(len(keys))) + if err != nil { + return nil, err + } + + for _, k := range keys { + if err := ma.AssembleKey().AssignString(k); err != nil { + return nil, err + } + if err := ma.AssembleValue().AssignNode(m[k]); err != nil { + return nil, err + } + } + + if err := ma.Finish(); err != nil { + return nil, err + } + + nd := newNd.Build() + + enc, err := ipldcodec.LookupEncoder(uint64(multicodec.DagCbor)) + if err != nil { + return nil, err + } + + buf := new(bytes.Buffer) + if err := enc(nd, buf); err != nil { + return nil, err + } + + return buf.Bytes(), nil +} + +// Validates validates the given IPNS entry against the given public key. +func Validate(pk ic.PubKey, entry *pb.IpnsEntry) error { + // Make sure max size is respected + if entry.Size() > MaxRecordSize { + return ErrRecordSize + } + + // Check the ipns record signature with the public key + if entry.GetSignatureV2() == nil { + // always error if no valid signature could be found + return ErrSignature + } + + sig2Data, err := ipnsEntryDataForSigV2(entry) + if err != nil { + return fmt.Errorf("could not compute signature data: %w", err) + } + if ok, err := pk.Verify(sig2Data, entry.GetSignatureV2()); err != nil || !ok { + return ErrSignature + } + + // TODO: If we switch from pb.IpnsEntry to a more generic IpnsRecord type then perhaps we should only check + // this if there is no v1 signature. In the meanwhile this helps avoid some potential rough edges around people + // checking the entry fields instead of doing CBOR decoding everywhere. + // See https://github.com/ipfs/boxo/ipns/pull/42 for next steps here + if err := validateCborDataMatchesPbData(entry); err != nil { + return err + } + + eol, err := GetEOL(entry) + if err != nil { + return err + } + if time.Now().After(eol) { + return ErrExpiredRecord + } + return nil +} + +// TODO: Most of this function could probably be replaced with codegen +func validateCborDataMatchesPbData(entry *pb.IpnsEntry) error { + if len(entry.GetData()) == 0 { + return fmt.Errorf("record data is missing") + } + + dec, err := ipldcodec.LookupDecoder(uint64(multicodec.DagCbor)) + if err != nil { + return err + } + + ndbuilder := basicnode.Prototype__Map{}.NewBuilder() + if err := dec(ndbuilder, bytes.NewReader(entry.GetData())); err != nil { + return err + } + + fullNd := ndbuilder.Build() + nd, err := fullNd.LookupByString(value) + if err != nil { + return err + } + ndBytes, err := nd.AsBytes() + if err != nil { + return err + } + if !bytes.Equal(entry.GetValue(), ndBytes) { + return fmt.Errorf("field \"%v\" did not match between protobuf and CBOR", value) + } + + nd, err = fullNd.LookupByString(validity) + if err != nil { + return err + } + ndBytes, err = nd.AsBytes() + if err != nil { + return err + } + if !bytes.Equal(entry.GetValidity(), ndBytes) { + return fmt.Errorf("field \"%v\" did not match between protobuf and CBOR", validity) + } + + nd, err = fullNd.LookupByString(validityType) + if err != nil { + return err + } + ndInt, err := nd.AsInt() + if err != nil { + return err + } + if int64(entry.GetValidityType()) != ndInt { + return fmt.Errorf("field \"%v\" did not match between protobuf and CBOR", validityType) + } + + nd, err = fullNd.LookupByString(sequence) + if err != nil { + return err + } + ndInt, err = nd.AsInt() + if err != nil { + return err + } + + if entry.GetSequence() != uint64(ndInt) { + return fmt.Errorf("field \"%v\" did not match between protobuf and CBOR", sequence) + } + + nd, err = fullNd.LookupByString("TTL") + if err != nil { + return err + } + ndInt, err = nd.AsInt() + if err != nil { + return err + } + if entry.GetTtl() != uint64(ndInt) { + return fmt.Errorf("field \"%v\" did not match between protobuf and CBOR", ttl) + } + + return nil +} + +// GetEOL returns the EOL of this IPNS entry +// +// This function returns ErrUnrecognizedValidity if the validity type of the +// record isn't EOL. Otherwise, it returns an error if it can't parse the EOL. +func GetEOL(entry *pb.IpnsEntry) (time.Time, error) { + if entry.GetValidityType() != pb.IpnsEntry_EOL { + return time.Time{}, ErrUnrecognizedValidity + } + return u.ParseRFC3339(string(entry.GetValidity())) +} + +// EmbedPublicKey embeds the given public key in the given ipns entry. While not +// strictly required, some nodes (e.g., DHT servers) may reject IPNS entries +// that don't embed their public keys as they may not be able to validate them +// efficiently. +func EmbedPublicKey(pk ic.PubKey, entry *pb.IpnsEntry) error { + // Try extracting the public key from the ID. If we can, *don't* embed + // it. + id, err := peer.IDFromPublicKey(pk) + if err != nil { + return err + } + if _, err := id.ExtractPublicKey(); err != peer.ErrNoPublicKey { + // Either a *real* error or nil. + return err + } + + // We failed to extract the public key from the peer ID, embed it in the + // record. + pkBytes, err := ic.MarshalPublicKey(pk) + if err != nil { + return err + } + entry.PubKey = pkBytes + return nil +} + +// ExtractPublicKey extracts a public key matching `pid` from the IPNS record, +// if possible. +// +// This function returns (nil, nil) when no public key can be extracted and +// nothing is malformed. +func ExtractPublicKey(pid peer.ID, entry *pb.IpnsEntry) (ic.PubKey, error) { + if entry.PubKey != nil { + pk, err := ic.UnmarshalPublicKey(entry.PubKey) + if err != nil { + return nil, fmt.Errorf("unmarshaling pubkey in record: %s", err) + } + + expPid, err := peer.IDFromPublicKey(pk) + if err != nil { + return nil, fmt.Errorf("could not regenerate peerID from pubkey: %s", err) + } + + if pid != expPid { + return nil, ErrPublicKeyMismatch + } + return pk, nil + } + + return pid.ExtractPublicKey() +} + +// Compare compares two IPNS entries. It returns: +// +// * -1 if a is older than b +// * 0 if a and b cannot be ordered (this doesn't mean that they are equal) +// * +1 if a is newer than b +// +// It returns an error when either a or b are malformed. +// +// NOTE: It *does not* validate the records, the caller is responsible for calling +// `Validate` first. +// +// NOTE: If a and b cannot be ordered by this function, you can determine their +// order by comparing their serialized byte representations (using +// `bytes.Compare`). You must do this if you are implementing a libp2p record +// validator (or you can just use the one provided for you by this package). +func Compare(a, b *pb.IpnsEntry) (int, error) { + aHasV2Sig := a.GetSignatureV2() != nil + bHasV2Sig := b.GetSignatureV2() != nil + + // Having a newer signature version is better than an older signature version + if aHasV2Sig && !bHasV2Sig { + return 1, nil + } else if !aHasV2Sig && bHasV2Sig { + return -1, nil + } + + as := a.GetSequence() + bs := b.GetSequence() + + if as > bs { + return 1, nil + } else if as < bs { + return -1, nil + } + + at, err := u.ParseRFC3339(string(a.GetValidity())) + if err != nil { + return 0, err + } + + bt, err := u.ParseRFC3339(string(b.GetValidity())) + if err != nil { + return 0, err + } + + if at.After(bt) { + return 1, nil + } else if bt.After(at) { + return -1, nil + } + + return 0, nil +} + +func ipnsEntryDataForSigV1(e *pb.IpnsEntry) []byte { + return bytes.Join([][]byte{ + e.Value, + e.Validity, + []byte(fmt.Sprint(e.GetValidityType())), + }, + []byte{}) +} + +func ipnsEntryDataForSigV2(e *pb.IpnsEntry) ([]byte, error) { + dataForSig := []byte("ipns-signature:") + dataForSig = append(dataForSig, e.Data...) + + return dataForSig, nil +} + +type cborMapKeyString_RFC7049 []string + +func (x cborMapKeyString_RFC7049) Len() int { return len(x) } +func (x cborMapKeyString_RFC7049) Swap(i, j int) { x[i], x[j] = x[j], x[i] } +func (x cborMapKeyString_RFC7049) Less(i, j int) bool { + li, lj := len(x[i]), len(x[j]) + if li == lj { + return x[i] < x[j] + } + return li < lj +} + +var _ sort.Interface = (cborMapKeyString_RFC7049)(nil) diff --git a/ipns/ipns_test.go b/ipns/ipns_test.go new file mode 100644 index 0000000000..e6d521ce4f --- /dev/null +++ b/ipns/ipns_test.go @@ -0,0 +1,63 @@ +package ipns + +import ( + "fmt" + "testing" + "time" + + u "github.com/ipfs/boxo/util" + ci "github.com/libp2p/go-libp2p/core/crypto" + "github.com/libp2p/go-libp2p/core/peer" +) + +func TestEmbedPublicKey(t *testing.T) { + + sr := u.NewTimeSeededRand() + priv, pub, err := ci.GenerateKeyPairWithReader(ci.RSA, 2048, sr) + if err != nil { + t.Fatal(err) + } + + pid, err := peer.IDFromPublicKey(pub) + if err != nil { + t.Fatal(err) + } + + e, err := Create(priv, []byte("/a/b"), 0, time.Now().Add(1*time.Hour), 0) + if err != nil { + t.Fatal(err) + } + if err := EmbedPublicKey(pub, e); err != nil { + t.Fatal(err) + } + embeddedPk, err := ci.UnmarshalPublicKey(e.PubKey) + if err != nil { + t.Fatal(err) + } + embeddedPid, err := peer.IDFromPublicKey(embeddedPk) + if err != nil { + t.Fatal(err) + } + if embeddedPid != pid { + t.Fatalf("pid mismatch: %s != %s", pid, embeddedPid) + } +} + +func ExampleCreate() { + // Generate a private key to sign the IPNS record with. Most of the time, + // however, you'll want to retrieve an already-existing key from IPFS using + // go-ipfs/core/coreapi CoreAPI.KeyAPI() interface. + privateKey, _, err := ci.GenerateKeyPair(ci.RSA, 2048) + if err != nil { + panic(err) + } + + // Create an IPNS record that expires in one hour and points to the IPFS address + // /ipfs/Qme1knMqwt1hKZbc1BmQFmnm9f36nyQGwXxPGVpVJ9rMK5 + ipnsRecord, err := Create(privateKey, []byte("/ipfs/Qme1knMqwt1hKZbc1BmQFmnm9f36nyQGwXxPGVpVJ9rMK5"), 0, time.Now().Add(1*time.Hour), 0) + if err != nil { + panic(err) + } + + fmt.Println(ipnsRecord) +} diff --git a/ipns/pb/Makefile b/ipns/pb/Makefile new file mode 100644 index 0000000000..eb14b5768a --- /dev/null +++ b/ipns/pb/Makefile @@ -0,0 +1,11 @@ +PB = $(wildcard *.proto) +GO = $(PB:.proto=.pb.go) + +all: $(GO) + +%.pb.go: %.proto + protoc --proto_path=$(GOPATH)/src:. --gogofast_out=. $< + +clean: + rm -f *.pb.go + rm -f *.go diff --git a/ipns/pb/ipns.pb.go b/ipns/pb/ipns.pb.go new file mode 100644 index 0000000000..1e24888527 --- /dev/null +++ b/ipns/pb/ipns.pb.go @@ -0,0 +1,992 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: ipns.proto + +package ipns_pb + +import ( + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type IpnsEntry_ValidityType int32 + +const ( + // setting an EOL says "this record is valid until..." + IpnsEntry_EOL IpnsEntry_ValidityType = 0 +) + +var IpnsEntry_ValidityType_name = map[int32]string{ + 0: "EOL", +} + +var IpnsEntry_ValidityType_value = map[string]int32{ + "EOL": 0, +} + +func (x IpnsEntry_ValidityType) Enum() *IpnsEntry_ValidityType { + p := new(IpnsEntry_ValidityType) + *p = x + return p +} + +func (x IpnsEntry_ValidityType) String() string { + return proto.EnumName(IpnsEntry_ValidityType_name, int32(x)) +} + +func (x *IpnsEntry_ValidityType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(IpnsEntry_ValidityType_value, data, "IpnsEntry_ValidityType") + if err != nil { + return err + } + *x = IpnsEntry_ValidityType(value) + return nil +} + +func (IpnsEntry_ValidityType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_4d5b16fb32bfe8ea, []int{0, 0} +} + +type IpnsEntry struct { + Value []byte `protobuf:"bytes,1,opt,name=value" json:"value,omitempty"` + SignatureV1 []byte `protobuf:"bytes,2,opt,name=signatureV1" json:"signatureV1,omitempty"` + ValidityType *IpnsEntry_ValidityType `protobuf:"varint,3,opt,name=validityType,enum=ipns.v1.pb.IpnsEntry_ValidityType" json:"validityType,omitempty"` + Validity []byte `protobuf:"bytes,4,opt,name=validity" json:"validity,omitempty"` + Sequence *uint64 `protobuf:"varint,5,opt,name=sequence" json:"sequence,omitempty"` + Ttl *uint64 `protobuf:"varint,6,opt,name=ttl" json:"ttl,omitempty"` + // in order for nodes to properly validate a record upon receipt, they need the public + // key associated with it. For old RSA keys, its easiest if we just send this as part of + // the record itself. For newer ed25519 keys, the public key can be embedded in the + // peerID, making this field unnecessary. + PubKey []byte `protobuf:"bytes,7,opt,name=pubKey" json:"pubKey,omitempty"` + SignatureV2 []byte `protobuf:"bytes,8,opt,name=signatureV2" json:"signatureV2,omitempty"` + Data []byte `protobuf:"bytes,9,opt,name=data" json:"data,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *IpnsEntry) Reset() { *m = IpnsEntry{} } +func (m *IpnsEntry) String() string { return proto.CompactTextString(m) } +func (*IpnsEntry) ProtoMessage() {} +func (*IpnsEntry) Descriptor() ([]byte, []int) { + return fileDescriptor_4d5b16fb32bfe8ea, []int{0} +} +func (m *IpnsEntry) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IpnsEntry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_IpnsEntry.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *IpnsEntry) XXX_Merge(src proto.Message) { + xxx_messageInfo_IpnsEntry.Merge(m, src) +} +func (m *IpnsEntry) XXX_Size() int { + return m.Size() +} +func (m *IpnsEntry) XXX_DiscardUnknown() { + xxx_messageInfo_IpnsEntry.DiscardUnknown(m) +} + +var xxx_messageInfo_IpnsEntry proto.InternalMessageInfo + +func (m *IpnsEntry) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + +func (m *IpnsEntry) GetSignatureV1() []byte { + if m != nil { + return m.SignatureV1 + } + return nil +} + +func (m *IpnsEntry) GetValidityType() IpnsEntry_ValidityType { + if m != nil && m.ValidityType != nil { + return *m.ValidityType + } + return IpnsEntry_EOL +} + +func (m *IpnsEntry) GetValidity() []byte { + if m != nil { + return m.Validity + } + return nil +} + +func (m *IpnsEntry) GetSequence() uint64 { + if m != nil && m.Sequence != nil { + return *m.Sequence + } + return 0 +} + +func (m *IpnsEntry) GetTtl() uint64 { + if m != nil && m.Ttl != nil { + return *m.Ttl + } + return 0 +} + +func (m *IpnsEntry) GetPubKey() []byte { + if m != nil { + return m.PubKey + } + return nil +} + +func (m *IpnsEntry) GetSignatureV2() []byte { + if m != nil { + return m.SignatureV2 + } + return nil +} + +func (m *IpnsEntry) GetData() []byte { + if m != nil { + return m.Data + } + return nil +} + +type IpnsSignatureV2Checker struct { + PubKey []byte `protobuf:"bytes,7,opt,name=pubKey" json:"pubKey,omitempty"` + SignatureV2 []byte `protobuf:"bytes,8,opt,name=signatureV2" json:"signatureV2,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *IpnsSignatureV2Checker) Reset() { *m = IpnsSignatureV2Checker{} } +func (m *IpnsSignatureV2Checker) String() string { return proto.CompactTextString(m) } +func (*IpnsSignatureV2Checker) ProtoMessage() {} +func (*IpnsSignatureV2Checker) Descriptor() ([]byte, []int) { + return fileDescriptor_4d5b16fb32bfe8ea, []int{1} +} +func (m *IpnsSignatureV2Checker) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IpnsSignatureV2Checker) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_IpnsSignatureV2Checker.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *IpnsSignatureV2Checker) XXX_Merge(src proto.Message) { + xxx_messageInfo_IpnsSignatureV2Checker.Merge(m, src) +} +func (m *IpnsSignatureV2Checker) XXX_Size() int { + return m.Size() +} +func (m *IpnsSignatureV2Checker) XXX_DiscardUnknown() { + xxx_messageInfo_IpnsSignatureV2Checker.DiscardUnknown(m) +} + +var xxx_messageInfo_IpnsSignatureV2Checker proto.InternalMessageInfo + +func (m *IpnsSignatureV2Checker) GetPubKey() []byte { + if m != nil { + return m.PubKey + } + return nil +} + +func (m *IpnsSignatureV2Checker) GetSignatureV2() []byte { + if m != nil { + return m.SignatureV2 + } + return nil +} + +func init() { + proto.RegisterEnum("ipns.v1.pb.IpnsEntry_ValidityType", IpnsEntry_ValidityType_name, IpnsEntry_ValidityType_value) + proto.RegisterType((*IpnsEntry)(nil), "ipns.v1.pb.IpnsEntry") + proto.RegisterType((*IpnsSignatureV2Checker)(nil), "ipns.v1.pb.IpnsSignatureV2Checker") +} + +func init() { proto.RegisterFile("ipns.proto", fileDescriptor_4d5b16fb32bfe8ea) } + +var fileDescriptor_4d5b16fb32bfe8ea = []byte{ + // 272 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0xca, 0x2c, 0xc8, 0x2b, + 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x82, 0xb0, 0xcb, 0x0c, 0xf5, 0x0a, 0x92, 0x94, 0xf6, + 0x30, 0x71, 0x71, 0x7a, 0x16, 0xe4, 0x15, 0xbb, 0xe6, 0x95, 0x14, 0x55, 0x0a, 0x89, 0x70, 0xb1, + 0x96, 0x25, 0xe6, 0x94, 0xa6, 0x4a, 0x30, 0x2a, 0x30, 0x6a, 0xf0, 0x04, 0x41, 0x38, 0x42, 0x0a, + 0x5c, 0xdc, 0xc5, 0x99, 0xe9, 0x79, 0x89, 0x25, 0xa5, 0x45, 0xa9, 0x61, 0x86, 0x12, 0x4c, 0x60, + 0x39, 0x64, 0x21, 0x21, 0x37, 0x2e, 0x9e, 0xb2, 0xc4, 0x9c, 0xcc, 0x94, 0xcc, 0x92, 0xca, 0x90, + 0xca, 0x82, 0x54, 0x09, 0x66, 0x05, 0x46, 0x0d, 0x3e, 0x23, 0x25, 0x3d, 0x84, 0x45, 0x7a, 0x70, + 0x4b, 0xf4, 0xc2, 0x90, 0x54, 0x06, 0xa1, 0xe8, 0x13, 0x92, 0xe2, 0xe2, 0x80, 0xf1, 0x25, 0x58, + 0xc0, 0xd6, 0xc0, 0xf9, 0x20, 0xb9, 0xe2, 0xd4, 0xc2, 0xd2, 0xd4, 0xbc, 0xe4, 0x54, 0x09, 0x56, + 0x05, 0x46, 0x0d, 0x96, 0x20, 0x38, 0x5f, 0x48, 0x80, 0x8b, 0xb9, 0xa4, 0x24, 0x47, 0x82, 0x0d, + 0x2c, 0x0c, 0x62, 0x0a, 0x89, 0x71, 0xb1, 0x15, 0x94, 0x26, 0x79, 0xa7, 0x56, 0x4a, 0xb0, 0x83, + 0xcd, 0x81, 0xf2, 0x50, 0xfd, 0x62, 0x24, 0xc1, 0x81, 0xee, 0x17, 0x23, 0x21, 0x21, 0x2e, 0x96, + 0x94, 0xc4, 0x92, 0x44, 0x09, 0x4e, 0xb0, 0x14, 0x98, 0xad, 0x24, 0xce, 0xc5, 0x83, 0xec, 0x6a, + 0x21, 0x76, 0x2e, 0x66, 0x57, 0x7f, 0x1f, 0x01, 0x06, 0xa5, 0x20, 0x2e, 0x31, 0x90, 0xc7, 0x82, + 0x11, 0xfa, 0x9d, 0x33, 0x52, 0x93, 0xb3, 0x53, 0x8b, 0xc8, 0x77, 0x80, 0x93, 0xe8, 0x89, 0x47, + 0x72, 0x8c, 0x17, 0x1e, 0xc9, 0x31, 0x3e, 0x78, 0x24, 0xc7, 0x18, 0xc5, 0x0e, 0x0a, 0xc3, 0xf8, + 0x82, 0x24, 0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0xbd, 0x45, 0xdd, 0x1a, 0xc2, 0x01, 0x00, 0x00, +} + +func (m *IpnsEntry) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *IpnsEntry) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IpnsEntry) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Data != nil { + i -= len(m.Data) + copy(dAtA[i:], m.Data) + i = encodeVarintIpns(dAtA, i, uint64(len(m.Data))) + i-- + dAtA[i] = 0x4a + } + if m.SignatureV2 != nil { + i -= len(m.SignatureV2) + copy(dAtA[i:], m.SignatureV2) + i = encodeVarintIpns(dAtA, i, uint64(len(m.SignatureV2))) + i-- + dAtA[i] = 0x42 + } + if m.PubKey != nil { + i -= len(m.PubKey) + copy(dAtA[i:], m.PubKey) + i = encodeVarintIpns(dAtA, i, uint64(len(m.PubKey))) + i-- + dAtA[i] = 0x3a + } + if m.Ttl != nil { + i = encodeVarintIpns(dAtA, i, uint64(*m.Ttl)) + i-- + dAtA[i] = 0x30 + } + if m.Sequence != nil { + i = encodeVarintIpns(dAtA, i, uint64(*m.Sequence)) + i-- + dAtA[i] = 0x28 + } + if m.Validity != nil { + i -= len(m.Validity) + copy(dAtA[i:], m.Validity) + i = encodeVarintIpns(dAtA, i, uint64(len(m.Validity))) + i-- + dAtA[i] = 0x22 + } + if m.ValidityType != nil { + i = encodeVarintIpns(dAtA, i, uint64(*m.ValidityType)) + i-- + dAtA[i] = 0x18 + } + if m.SignatureV1 != nil { + i -= len(m.SignatureV1) + copy(dAtA[i:], m.SignatureV1) + i = encodeVarintIpns(dAtA, i, uint64(len(m.SignatureV1))) + i-- + dAtA[i] = 0x12 + } + if m.Value != nil { + i -= len(m.Value) + copy(dAtA[i:], m.Value) + i = encodeVarintIpns(dAtA, i, uint64(len(m.Value))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *IpnsSignatureV2Checker) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *IpnsSignatureV2Checker) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IpnsSignatureV2Checker) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.SignatureV2 != nil { + i -= len(m.SignatureV2) + copy(dAtA[i:], m.SignatureV2) + i = encodeVarintIpns(dAtA, i, uint64(len(m.SignatureV2))) + i-- + dAtA[i] = 0x42 + } + if m.PubKey != nil { + i -= len(m.PubKey) + copy(dAtA[i:], m.PubKey) + i = encodeVarintIpns(dAtA, i, uint64(len(m.PubKey))) + i-- + dAtA[i] = 0x3a + } + return len(dAtA) - i, nil +} + +func encodeVarintIpns(dAtA []byte, offset int, v uint64) int { + offset -= sovIpns(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *IpnsEntry) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Value != nil { + l = len(m.Value) + n += 1 + l + sovIpns(uint64(l)) + } + if m.SignatureV1 != nil { + l = len(m.SignatureV1) + n += 1 + l + sovIpns(uint64(l)) + } + if m.ValidityType != nil { + n += 1 + sovIpns(uint64(*m.ValidityType)) + } + if m.Validity != nil { + l = len(m.Validity) + n += 1 + l + sovIpns(uint64(l)) + } + if m.Sequence != nil { + n += 1 + sovIpns(uint64(*m.Sequence)) + } + if m.Ttl != nil { + n += 1 + sovIpns(uint64(*m.Ttl)) + } + if m.PubKey != nil { + l = len(m.PubKey) + n += 1 + l + sovIpns(uint64(l)) + } + if m.SignatureV2 != nil { + l = len(m.SignatureV2) + n += 1 + l + sovIpns(uint64(l)) + } + if m.Data != nil { + l = len(m.Data) + n += 1 + l + sovIpns(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *IpnsSignatureV2Checker) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.PubKey != nil { + l = len(m.PubKey) + n += 1 + l + sovIpns(uint64(l)) + } + if m.SignatureV2 != nil { + l = len(m.SignatureV2) + n += 1 + l + sovIpns(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovIpns(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozIpns(x uint64) (n int) { + return sovIpns(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *IpnsEntry) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowIpns + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IpnsEntry: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IpnsEntry: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowIpns + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthIpns + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthIpns + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...) + if m.Value == nil { + m.Value = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SignatureV1", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowIpns + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthIpns + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthIpns + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SignatureV1 = append(m.SignatureV1[:0], dAtA[iNdEx:postIndex]...) + if m.SignatureV1 == nil { + m.SignatureV1 = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ValidityType", wireType) + } + var v IpnsEntry_ValidityType + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowIpns + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= IpnsEntry_ValidityType(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ValidityType = &v + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Validity", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowIpns + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthIpns + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthIpns + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Validity = append(m.Validity[:0], dAtA[iNdEx:postIndex]...) + if m.Validity == nil { + m.Validity = []byte{} + } + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Sequence", wireType) + } + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowIpns + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Sequence = &v + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Ttl", wireType) + } + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowIpns + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Ttl = &v + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PubKey", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowIpns + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthIpns + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthIpns + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PubKey = append(m.PubKey[:0], dAtA[iNdEx:postIndex]...) + if m.PubKey == nil { + m.PubKey = []byte{} + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SignatureV2", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowIpns + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthIpns + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthIpns + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SignatureV2 = append(m.SignatureV2[:0], dAtA[iNdEx:postIndex]...) + if m.SignatureV2 == nil { + m.SignatureV2 = []byte{} + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowIpns + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthIpns + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthIpns + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) + if m.Data == nil { + m.Data = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipIpns(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthIpns + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthIpns + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IpnsSignatureV2Checker) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowIpns + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IpnsSignatureV2Checker: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IpnsSignatureV2Checker: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PubKey", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowIpns + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthIpns + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthIpns + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PubKey = append(m.PubKey[:0], dAtA[iNdEx:postIndex]...) + if m.PubKey == nil { + m.PubKey = []byte{} + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SignatureV2", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowIpns + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthIpns + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthIpns + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SignatureV2 = append(m.SignatureV2[:0], dAtA[iNdEx:postIndex]...) + if m.SignatureV2 == nil { + m.SignatureV2 = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipIpns(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthIpns + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthIpns + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipIpns(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowIpns + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowIpns + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowIpns + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthIpns + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupIpns + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthIpns + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthIpns = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowIpns = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupIpns = fmt.Errorf("proto: unexpected end of group") +) diff --git a/ipns/pb/ipns.proto b/ipns/pb/ipns.proto new file mode 100644 index 0000000000..bd89a34ec4 --- /dev/null +++ b/ipns/pb/ipns.proto @@ -0,0 +1,36 @@ +syntax = "proto2"; + +package ipns.v1.pb; + +option go_package = "ipns_pb"; + +message IpnsEntry { + enum ValidityType { + // setting an EOL says "this record is valid until..." + EOL = 0; + } + optional bytes value = 1; + optional bytes signatureV1 = 2; + + optional ValidityType validityType = 3; + optional bytes validity = 4; + + optional uint64 sequence = 5; + + optional uint64 ttl = 6; + + // in order for nodes to properly validate a record upon receipt, they need the public + // key associated with it. For old RSA keys, its easiest if we just send this as part of + // the record itself. For newer ed25519 keys, the public key can be embedded in the + // peerID, making this field unnecessary. + optional bytes pubKey = 7; + + optional bytes signatureV2 = 8; + + optional bytes data = 9; +} + +message IpnsSignatureV2Checker { + optional bytes pubKey = 7; + optional bytes signatureV2 = 8; +} diff --git a/ipns/record.go b/ipns/record.go new file mode 100644 index 0000000000..b479dab90e --- /dev/null +++ b/ipns/record.go @@ -0,0 +1,126 @@ +package ipns + +import ( + "bytes" + "errors" + + pb "github.com/ipfs/boxo/ipns/pb" + + "github.com/gogo/protobuf/proto" + logging "github.com/ipfs/go-log/v2" + record "github.com/libp2p/go-libp2p-record" + ic "github.com/libp2p/go-libp2p/core/crypto" + "github.com/libp2p/go-libp2p/core/peer" + pstore "github.com/libp2p/go-libp2p/core/peerstore" +) + +var log = logging.Logger("ipns") + +var _ record.Validator = Validator{} + +// RecordKey returns the libp2p record key for a given peer ID. +func RecordKey(pid peer.ID) string { + return "/ipns/" + string(pid) +} + +// Validator is an IPNS record validator that satisfies the libp2p record +// validator interface. +type Validator struct { + // KeyBook, if non-nil, will be used to lookup keys for validating IPNS + // records. + KeyBook pstore.KeyBook +} + +// Validate validates an IPNS record. +func (v Validator) Validate(key string, value []byte) error { + ns, pidString, err := record.SplitKey(key) + if err != nil || ns != "ipns" { + return ErrInvalidPath + } + + // Parse the value into an IpnsEntry + entry := new(pb.IpnsEntry) + err = proto.Unmarshal(value, entry) + if err != nil { + return ErrBadRecord + } + + // Get the public key defined by the ipns path + pid, err := peer.IDFromBytes([]byte(pidString)) + if err != nil { + log.Debugf("failed to parse ipns record key %s into peer ID", pidString) + return ErrKeyFormat + } + + pubk, err := v.getPublicKey(pid, entry) + if err != nil { + return err + } + + return Validate(pubk, entry) +} + +func (v Validator) getPublicKey(pid peer.ID, entry *pb.IpnsEntry) (ic.PubKey, error) { + switch pk, err := ExtractPublicKey(pid, entry); err { + case peer.ErrNoPublicKey: + case nil: + return pk, nil + default: + return nil, err + } + + if v.KeyBook == nil { + log.Debugf("public key with hash %s not found in IPNS record and no peer store provided", pid) + return nil, ErrPublicKeyNotFound + } + + pubk := v.KeyBook.PubKey(pid) + if pubk == nil { + log.Debugf("public key with hash %s not found in peer store", pid) + return nil, ErrPublicKeyNotFound + } + return pubk, nil +} + +// Select selects the best record by checking which has the highest sequence +// number and latest EOL. +// +// This function returns an error if any of the records fail to parse. Validate +// your records first! +func (v Validator) Select(k string, vals [][]byte) (int, error) { + var recs []*pb.IpnsEntry + for _, v := range vals { + e := new(pb.IpnsEntry) + if err := proto.Unmarshal(v, e); err != nil { + return -1, err + } + recs = append(recs, e) + } + + return selectRecord(recs, vals) +} + +func selectRecord(recs []*pb.IpnsEntry, vals [][]byte) (int, error) { + switch len(recs) { + case 0: + return -1, errors.New("no usable records in given set") + case 1: + return 0, nil + } + + var i int + for j := 1; j < len(recs); j++ { + cmp, err := Compare(recs[i], recs[j]) + if err != nil { + return -1, err + } + if cmp == 0 { + cmp = bytes.Compare(vals[i], vals[j]) + } + if cmp < 0 { + i = j + } + } + + return i, nil +} diff --git a/ipns/select_test.go b/ipns/select_test.go new file mode 100644 index 0000000000..5b435f62d8 --- /dev/null +++ b/ipns/select_test.go @@ -0,0 +1,126 @@ +package ipns + +import ( + "fmt" + "math/rand" + "testing" + "time" + + pb "github.com/ipfs/boxo/ipns/pb" + + "github.com/gogo/protobuf/proto" + u "github.com/ipfs/boxo/util" + ci "github.com/libp2p/go-libp2p/core/crypto" +) + +func shuffle(a []*pb.IpnsEntry) { + for n := 0; n < 5; n++ { + for i := range a { + j := rand.Intn(len(a)) + a[i], a[j] = a[j], a[i] + } + } +} + +func AssertSelected(r *pb.IpnsEntry, from ...*pb.IpnsEntry) error { + shuffle(from) + var vals [][]byte + for _, r := range from { + data, err := proto.Marshal(r) + if err != nil { + return err + } + vals = append(vals, data) + } + + i, err := selectRecord(from, vals) + if err != nil { + return err + } + + if from[i] != r { + return fmt.Errorf("selected incorrect record %d", i) + } + + return nil +} + +func TestOrdering(t *testing.T) { + // select timestamp so selection is deterministic + ts := time.Unix(1000000, 0) + + // generate a key for signing the records + r := u.NewSeededRand(15) // generate deterministic keypair + priv, _, err := ci.GenerateKeyPairWithReader(ci.RSA, 2048, r) + if err != nil { + t.Fatal(err) + } + + e1, err := Create(priv, []byte("foo"), 1, ts.Add(time.Hour), 0) + if err != nil { + t.Fatal(err) + } + + e2, err := Create(priv, []byte("bar"), 2, ts.Add(time.Hour), 0) + if err != nil { + t.Fatal(err) + } + + e3, err := Create(priv, []byte("baz"), 3, ts.Add(time.Hour), 0) + if err != nil { + t.Fatal(err) + } + + e4, err := Create(priv, []byte("cat"), 3, ts.Add(time.Hour*2), 0) + if err != nil { + t.Fatal(err) + } + + e5, err := Create(priv, []byte("dog"), 4, ts.Add(time.Hour*3), 0) + if err != nil { + t.Fatal(err) + } + + e6, err := Create(priv, []byte("fish"), 4, ts.Add(time.Hour*3), 0) + if err != nil { + t.Fatal(err) + } + + // e1 is the only record, i hope it gets this right + err = AssertSelected(e1, e1) + if err != nil { + t.Fatal(err) + } + + // e2 has the highest sequence number + err = AssertSelected(e2, e1, e2) + if err != nil { + t.Fatal(err) + } + + // e3 has the highest sequence number + err = AssertSelected(e3, e1, e2, e3) + if err != nil { + t.Fatal(err) + } + + // e4 has a higher timeout + err = AssertSelected(e4, e1, e2, e3, e4) + if err != nil { + t.Fatal(err) + } + + // e5 has the highest sequence number + err = AssertSelected(e5, e1, e2, e3, e4, e5) + if err != nil { + t.Fatal(err) + } + + // e6 should be selected as its signauture will win in the comparison + err = AssertSelected(e6, e1, e2, e3, e4, e5, e6) + if err != nil { + t.Fatal(err) + } + + _ = []interface{}{e1, e2, e3, e4, e5, e6} +} diff --git a/ipns/validate_test.go b/ipns/validate_test.go new file mode 100644 index 0000000000..e75d0aee7e --- /dev/null +++ b/ipns/validate_test.go @@ -0,0 +1,415 @@ +package ipns + +import ( + "bytes" + "errors" + "fmt" + "math/rand" + "strings" + "testing" + "time" + + "github.com/gogo/protobuf/proto" + pb "github.com/ipfs/boxo/ipns/pb" + u "github.com/ipfs/boxo/util" + ipldcodec "github.com/ipld/go-ipld-prime/multicodec" + basicnode "github.com/ipld/go-ipld-prime/node/basic" + "github.com/libp2p/go-libp2p/core/crypto" + "github.com/libp2p/go-libp2p/core/peer" + pstore "github.com/libp2p/go-libp2p/core/peerstore" + "github.com/libp2p/go-libp2p/p2p/host/peerstore/pstoremem" + "github.com/multiformats/go-multicodec" +) + +func testValidatorCase(t *testing.T, priv crypto.PrivKey, kbook pstore.KeyBook, key string, val []byte, eol time.Time, exp error) { + t.Helper() + + match := func(t *testing.T, err error) { + t.Helper() + if err != exp { + params := fmt.Sprintf("key: %s\neol: %s\n", key, eol) + if exp == nil { + t.Fatalf("Unexpected error %s for params %s", err, params) + } else if err == nil { + t.Fatalf("Expected error %s but there was no error for params %s", exp, params) + } else { + t.Fatalf("Expected error %s but got %s for params %s", exp, err, params) + } + } + } + + testValidatorCaseMatchFunc(t, priv, kbook, key, val, eol, match) +} + +func testValidatorCaseMatchFunc(t *testing.T, priv crypto.PrivKey, kbook pstore.KeyBook, key string, val []byte, eol time.Time, matchf func(*testing.T, error)) { + t.Helper() + validator := Validator{kbook} + + data := val + if data == nil { + p := []byte("/ipfs/QmfM2r8seH2GiRaC4esTjeraXEachRt8ZsSeGaWTPLyMoG") + entry, err := Create(priv, p, 1, eol, 0) + if err != nil { + t.Fatal(err) + } + + data, err = proto.Marshal(entry) + if err != nil { + t.Fatal(err) + } + } + + matchf(t, validator.Validate(key, data)) +} + +func TestValidator(t *testing.T) { + ts := time.Now() + + priv, id, _ := genKeys(t) + priv2, id2, _ := genKeys(t) + kbook, err := pstoremem.NewPeerstore() + if err != nil { + t.Fatal(err) + } + if err := kbook.AddPubKey(id, priv.GetPublic()); err != nil { + t.Fatal(err) + } + emptyKbook, err := pstoremem.NewPeerstore() + if err != nil { + t.Fatal(err) + } + + testValidatorCase(t, priv, kbook, "/ipns/"+string(id), nil, ts.Add(time.Hour), nil) + testValidatorCase(t, priv, kbook, "/ipns/"+string(id), nil, ts.Add(time.Hour*-1), ErrExpiredRecord) + testValidatorCase(t, priv, kbook, "/ipns/"+string(id), []byte("bad data"), ts.Add(time.Hour), ErrBadRecord) + testValidatorCase(t, priv, kbook, "/ipns/"+"bad key", nil, ts.Add(time.Hour), ErrKeyFormat) + testValidatorCase(t, priv, emptyKbook, "/ipns/"+string(id), nil, ts.Add(time.Hour), ErrPublicKeyNotFound) + testValidatorCase(t, priv2, kbook, "/ipns/"+string(id2), nil, ts.Add(time.Hour), ErrPublicKeyNotFound) + testValidatorCase(t, priv2, kbook, "/ipns/"+string(id), nil, ts.Add(time.Hour), ErrSignature) + testValidatorCase(t, priv, kbook, "//"+string(id), nil, ts.Add(time.Hour), ErrInvalidPath) + testValidatorCase(t, priv, kbook, "/wrong/"+string(id), nil, ts.Add(time.Hour), ErrInvalidPath) +} + +func mustMarshal(t *testing.T, entry *pb.IpnsEntry) []byte { + t.Helper() + data, err := proto.Marshal(entry) + if err != nil { + t.Fatal(err) + } + return data +} + +func TestEmbeddedPubKeyValidate(t *testing.T) { + goodeol := time.Now().Add(time.Hour) + kbook, err := pstoremem.NewPeerstore() + if err != nil { + t.Fatal(err) + } + + pth := []byte("/ipfs/QmfM2r8seH2GiRaC4esTjeraXEachRt8ZsSeGaWTPLyMoG") + + priv, _, ipnsk := genKeys(t) + + entry, err := Create(priv, pth, 1, goodeol, 0) + if err != nil { + t.Fatal(err) + } + + testValidatorCase(t, priv, kbook, ipnsk, mustMarshal(t, entry), goodeol, ErrPublicKeyNotFound) + + pubkb, err := crypto.MarshalPublicKey(priv.GetPublic()) + if err != nil { + t.Fatal(err) + } + + entry.PubKey = pubkb + testValidatorCase(t, priv, kbook, ipnsk, mustMarshal(t, entry), goodeol, nil) + + entry.PubKey = []byte("probably not a public key") + testValidatorCaseMatchFunc(t, priv, kbook, ipnsk, mustMarshal(t, entry), goodeol, func(t *testing.T, err error) { + if !strings.Contains(err.Error(), "unmarshaling pubkey in record:") { + t.Fatal("expected pubkey unmarshaling to fail") + } + }) + + opriv, _, _ := genKeys(t) + wrongkeydata, err := crypto.MarshalPublicKey(opriv.GetPublic()) + if err != nil { + t.Fatal(err) + } + + entry.PubKey = wrongkeydata + testValidatorCase(t, priv, kbook, ipnsk, mustMarshal(t, entry), goodeol, ErrPublicKeyMismatch) +} + +func TestPeerIDPubKeyValidate(t *testing.T) { + t.Skip("disabled until libp2p/go-libp2p-crypto#51 is fixed") + + goodeol := time.Now().Add(time.Hour) + kbook, err := pstoremem.NewPeerstore() + if err != nil { + t.Fatal(err) + } + + pth := []byte("/ipfs/QmfM2r8seH2GiRaC4esTjeraXEachRt8ZsSeGaWTPLyMoG") + + sk, pk, err := crypto.GenerateEd25519Key(rand.New(rand.NewSource(42))) + if err != nil { + t.Fatal(err) + } + + pid, err := peer.IDFromPublicKey(pk) + if err != nil { + t.Fatal(err) + } + + ipnsk := "/ipns/" + string(pid) + + entry, err := Create(sk, pth, 1, goodeol, 0) + if err != nil { + t.Fatal(err) + } + + dataNoKey, err := proto.Marshal(entry) + if err != nil { + t.Fatal(err) + } + + testValidatorCase(t, sk, kbook, ipnsk, dataNoKey, goodeol, nil) +} + +func TestOnlySignatureV2Validate(t *testing.T) { + goodeol := time.Now().Add(time.Hour) + + sk, pk, err := crypto.GenerateEd25519Key(rand.New(rand.NewSource(42))) + if err != nil { + t.Fatal(err) + } + + path1 := []byte("/path/1") + entry, err := Create(sk, path1, 1, goodeol, 0) + if err != nil { + t.Fatal(err) + } + + if err := Validate(pk, entry); err != nil { + t.Fatal(err) + } + + entry.SignatureV2 = nil + if err := Validate(pk, entry); !errors.Is(err, ErrSignature) { + t.Fatal(err) + } +} + +func TestSignatureV1Ignored(t *testing.T) { + goodeol := time.Now().Add(time.Hour) + + sk, pk, err := crypto.GenerateEd25519Key(rand.New(rand.NewSource(42))) + if err != nil { + t.Fatal(err) + } + + pid, err := peer.IDFromPublicKey(pk) + if err != nil { + t.Fatal(err) + } + + ipnsk := "/ipns/" + string(pid) + + path1 := []byte("/path/1") + entry1, err := Create(sk, path1, 1, goodeol, 0) + if err != nil { + t.Fatal(err) + } + + path2 := []byte("/path/2") + entry2, err := Create(sk, path2, 2, goodeol, 0) + if err != nil { + t.Fatal(err) + } + + if err := Validate(pk, entry1); err != nil { + t.Fatal(err) + } + + if err := Validate(pk, entry2); err != nil { + t.Fatal(err) + } + + v := Validator{} + best, err := v.Select(ipnsk, [][]byte{mustMarshal(t, entry1), mustMarshal(t, entry2)}) + if err != nil { + t.Fatal(err) + } + if best != 1 { + t.Fatal("entry2 should be better than entry1") + } + + // Having only the v1 signature should be invalid + entry2.SignatureV2 = nil + if err := Validate(pk, entry2); !errors.Is(err, ErrSignature) { + t.Fatal(err) + } + + // Record with v2 signature should always be preferred + best, err = v.Select(ipnsk, [][]byte{mustMarshal(t, entry1), mustMarshal(t, entry2)}) + if err != nil { + t.Fatal(err) + } + if best != 0 { + t.Fatal("entry1 should be better than entry2") + } + + // Having a missing v1 signature is acceptable as long as there is a valid v2 signature + entry1.SignatureV1 = nil + if err := Validate(pk, entry1); err != nil { + t.Fatal(err) + } + + // Having an invalid v1 signature is acceptable as long as there is a valid v2 signature + entry1.SignatureV1 = []byte("garbage") + if err := Validate(pk, entry1); err != nil { + t.Fatal(err) + } +} + +func TestMaxSizeValidate(t *testing.T) { + goodeol := time.Now().Add(time.Hour) + + sk, pk, err := crypto.GenerateEd25519Key(rand.New(rand.NewSource(42))) + if err != nil { + t.Fatal(err) + } + + // Create record over the max size (value+other fields) + value := make([]byte, MaxRecordSize) + entry, err := Create(sk, value, 1, goodeol, 0) + if err != nil { + t.Fatal(err) + } + // Must fail with ErrRecordSize + if err := Validate(pk, entry); !errors.Is(err, ErrRecordSize) { + t.Fatal(err) + } +} + +func TestCborDataCanonicalization(t *testing.T) { + goodeol := time.Now().Add(time.Hour) + + sk, pk, err := crypto.GenerateEd25519Key(rand.New(rand.NewSource(42))) + if err != nil { + t.Fatal(err) + } + + path := append([]byte("/path/1"), 0x00) + seqnum := uint64(1) + entry, err := Create(sk, path, seqnum, goodeol, time.Hour) + if err != nil { + t.Fatal(err) + } + + if err := Validate(pk, entry); err != nil { + t.Fatal(err) + } + + dec, err := ipldcodec.LookupDecoder(uint64(multicodec.DagCbor)) + if err != nil { + t.Fatal(err) + } + + ndbuilder := basicnode.Prototype__Map{}.NewBuilder() + if err := dec(ndbuilder, bytes.NewReader(entry.GetData())); err != nil { + t.Fatal(err) + } + + nd := ndbuilder.Build() + iter := nd.MapIterator() + var fields []string + for !iter.Done() { + k, v, err := iter.Next() + if err != nil { + t.Fatal(err) + } + kstr, err := k.AsString() + if err != nil { + t.Fatal(err) + } + + switch kstr { + case value: + b, err := v.AsBytes() + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(path, b) { + t.Fatal("value did not match") + } + case sequence: + s, err := v.AsInt() + if err != nil { + t.Fatal(err) + } + if uint64(s) != seqnum { + t.Fatal("sequence numbers did not match") + } + case validity: + val, err := v.AsBytes() + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(val, []byte(u.FormatRFC3339(goodeol))) { + t.Fatal("validity did not match") + } + case validityType: + vt, err := v.AsInt() + if err != nil { + t.Fatal(err) + } + if uint64(vt) != 0 { + t.Fatal("validity types did not match") + } + case ttl: + ttlVal, err := v.AsInt() + if err != nil { + t.Fatal(err) + } + // TODO: test non-zero TTL + if uint64(ttlVal) != uint64(time.Hour.Nanoseconds()) { + t.Fatal("TTLs did not match") + } + } + + fields = append(fields, kstr) + } + + // Check for map sort order (i.e. by length then by value) + expectedOrder := []string{"TTL", "Value", "Sequence", "Validity", "ValidityType"} + if len(fields) != len(expectedOrder) { + t.Fatal("wrong number of fields") + } + + for i, f := range fields { + expected := expectedOrder[i] + if f != expected { + t.Fatalf("expected %s, got %s", expected, f) + } + } +} + +func genKeys(t *testing.T) (crypto.PrivKey, peer.ID, string) { + sr := u.NewTimeSeededRand() + priv, _, err := crypto.GenerateKeyPairWithReader(crypto.RSA, 2048, sr) + if err != nil { + t.Fatal(err) + } + + // Create entry with expiry in one hour + pid, err := peer.IDFromPrivateKey(priv) + if err != nil { + t.Fatal(err) + } + ipnsKey := RecordKey(pid) + + return priv, pid, ipnsKey +} diff --git a/keystore/keystore.go b/keystore/keystore.go new file mode 100644 index 0000000000..fc6793a1ee --- /dev/null +++ b/keystore/keystore.go @@ -0,0 +1,188 @@ +package keystore + +import ( + "fmt" + "os" + "path/filepath" + "strings" + + "encoding/base32" + + logging "github.com/ipfs/go-log" + ci "github.com/libp2p/go-libp2p/core/crypto" +) + +var log = logging.Logger("keystore") + +var codec = base32.StdEncoding.WithPadding(base32.NoPadding) + +// Keystore provides a key management interface +type Keystore interface { + // Has returns whether or not a key exists in the Keystore + Has(string) (bool, error) + // Put stores a key in the Keystore, if a key with the same name already exists, returns ErrKeyExists + Put(string, ci.PrivKey) error + // Get retrieves a key from the Keystore if it exists, and returns ErrNoSuchKey + // otherwise. + Get(string) (ci.PrivKey, error) + // Delete removes a key from the Keystore + Delete(string) error + // List returns a list of key identifier + List() ([]string, error) +} + +// ErrNoSuchKey is an error message returned when no key of a given name was found. +var ErrNoSuchKey = fmt.Errorf("no key by the given name was found") + +// ErrKeyExists is an error message returned when a key already exists +var ErrKeyExists = fmt.Errorf("key by that name already exists, refusing to overwrite") + +const keyFilenamePrefix = "key_" + +// FSKeystore is a keystore backed by files in a given directory stored on disk. +type FSKeystore struct { + dir string +} + +// NewFSKeystore returns a new filesystem-backed keystore. +func NewFSKeystore(dir string) (*FSKeystore, error) { + err := os.Mkdir(dir, 0700) + switch { + case os.IsExist(err): + case err == nil: + default: + return nil, err + } + return &FSKeystore{dir}, nil +} + +// Has returns whether or not a key exists in the Keystore +func (ks *FSKeystore) Has(name string) (bool, error) { + name, err := encode(name) + if err != nil { + return false, err + } + + kp := filepath.Join(ks.dir, name) + + _, err = os.Stat(kp) + + if os.IsNotExist(err) { + return false, nil + } + return err == nil, err +} + +// Put stores a key in the Keystore, if a key with the same name already exists, returns ErrKeyExists +func (ks *FSKeystore) Put(name string, k ci.PrivKey) error { + name, err := encode(name) + if err != nil { + return err + } + + b, err := ci.MarshalPrivateKey(k) + if err != nil { + return err + } + + kp := filepath.Join(ks.dir, name) + + fi, err := os.OpenFile(kp, os.O_CREATE|os.O_EXCL|os.O_WRONLY, 0400) + if err != nil { + if os.IsExist(err) { + err = ErrKeyExists + } + return err + } + defer fi.Close() + + _, err = fi.Write(b) + + return err +} + +// Get retrieves a key from the Keystore if it exists, and returns ErrNoSuchKey +// otherwise. +func (ks *FSKeystore) Get(name string) (ci.PrivKey, error) { + name, err := encode(name) + if err != nil { + return nil, err + } + + kp := filepath.Join(ks.dir, name) + + data, err := os.ReadFile(kp) + if err != nil { + if os.IsNotExist(err) { + return nil, ErrNoSuchKey + } + return nil, err + } + + return ci.UnmarshalPrivateKey(data) +} + +// Delete removes a key from the Keystore +func (ks *FSKeystore) Delete(name string) error { + name, err := encode(name) + if err != nil { + return err + } + + kp := filepath.Join(ks.dir, name) + + return os.Remove(kp) +} + +// List return a list of key identifier +func (ks *FSKeystore) List() ([]string, error) { + dir, err := os.Open(ks.dir) + if err != nil { + return nil, err + } + + dirs, err := dir.Readdirnames(0) + if err != nil { + return nil, err + } + + list := make([]string, 0, len(dirs)) + + for _, name := range dirs { + decodedName, err := decode(name) + if err == nil { + list = append(list, decodedName) + } else { + log.Errorf("Ignoring keyfile with invalid encoded filename: %s", name) + } + } + + return list, nil +} + +func encode(name string) (string, error) { + if name == "" { + return "", fmt.Errorf("key name must be at least one character") + } + + encodedName := codec.EncodeToString([]byte(name)) + log.Debugf("Encoded key name: %s to: %s", name, encodedName) + + return keyFilenamePrefix + strings.ToLower(encodedName), nil +} + +func decode(name string) (string, error) { + if !strings.HasPrefix(name, keyFilenamePrefix) { + return "", fmt.Errorf("key's filename has unexpected format") + } + + nameWithoutPrefix := strings.ToUpper(name[len(keyFilenamePrefix):]) + decodedName, err := codec.DecodeString(nameWithoutPrefix) + if err != nil { + return "", err + } + + log.Debugf("Decoded key name: %s to: %s", name, decodedName) + + return string(decodedName), nil +} diff --git a/keystore/keystore_test.go b/keystore/keystore_test.go new file mode 100644 index 0000000000..9a44062173 --- /dev/null +++ b/keystore/keystore_test.go @@ -0,0 +1,277 @@ +package keystore + +import ( + "fmt" + "math/rand" + "os" + "path/filepath" + "sort" + "testing" + + ci "github.com/libp2p/go-libp2p/core/crypto" +) + +type rr struct{} + +func (rr rr) Read(b []byte) (int, error) { + return rand.Read(b) +} + +func privKeyOrFatal(t *testing.T) ci.PrivKey { + priv, _, err := ci.GenerateEd25519Key(rr{}) + if err != nil { + t.Fatal(err) + } + return priv +} + +func TestKeystoreBasics(t *testing.T) { + tdir, err := os.MkdirTemp("", "keystore-test") + if err != nil { + t.Fatal(err) + } + + ks, err := NewFSKeystore(tdir) + if err != nil { + t.Fatal(err) + } + + l, err := ks.List() + if err != nil { + t.Fatal(err) + } + + if len(l) != 0 { + t.Fatal("expected no keys") + } + + k1 := privKeyOrFatal(t) + k2 := privKeyOrFatal(t) + k3 := privKeyOrFatal(t) + k4 := privKeyOrFatal(t) + + err = ks.Put("foo", k1) + if err != nil { + t.Fatal(err) + } + + err = ks.Put("bar", k2) + if err != nil { + t.Fatal(err) + } + + l, err = ks.List() + if err != nil { + t.Fatal(err) + } + + sort.Strings(l) + if l[0] != "bar" || l[1] != "foo" { + t.Fatal("wrong entries listed") + } + + if err := assertDirContents(tdir, []string{"foo", "bar"}); err != nil { + t.Fatal(err) + } + + err = ks.Put("foo", k3) + if err == nil { + t.Fatal("should not be able to overwrite key") + } + + if err := assertDirContents(tdir, []string{"foo", "bar"}); err != nil { + t.Fatal(err) + } + + exist, err := ks.Has("foo") + if !exist { + t.Fatal("should know it has a key named foo") + } + if err != nil { + t.Fatal(err) + } + + exist, err = ks.Has("nonexistingkey") + if exist { + t.Fatal("should know it doesn't have a key named nonexistingkey") + } + if err != nil { + t.Fatal(err) + } + + if err := ks.Delete("bar"); err != nil { + t.Fatal(err) + } + + if err := assertDirContents(tdir, []string{"foo"}); err != nil { + t.Fatal(err) + } + + if err := ks.Put("beep", k3); err != nil { + t.Fatal(err) + } + + if err := ks.Put("boop", k4); err != nil { + t.Fatal(err) + } + + if err := assertDirContents(tdir, []string{"foo", "beep", "boop"}); err != nil { + t.Fatal(err) + } + + if err := assertGetKey(ks, "foo", k1); err != nil { + t.Fatal(err) + } + + if err := assertGetKey(ks, "beep", k3); err != nil { + t.Fatal(err) + } + + if err := assertGetKey(ks, "boop", k4); err != nil { + t.Fatal(err) + } + + if err := ks.Put("..///foo/", k1); err != nil { + t.Fatal(err) + } + + if err := ks.Put("", k1); err == nil { + t.Fatal("shouldn't be able to put a key with no name") + } + + if err := ks.Put(".foo", k1); err != nil { + t.Fatal(err) + } +} + +func TestInvalidKeyFiles(t *testing.T) { + tdir, err := os.MkdirTemp("", "keystore-test") + + if err != nil { + t.Fatal(err) + } + + defer os.RemoveAll(tdir) + + ks, err := NewFSKeystore(tdir) + if err != nil { + t.Fatal(err) + } + + key := privKeyOrFatal(t) + + bytes, err := key.Raw() + if err != nil { + t.Fatal(err) + } + + encodedName, err := encode("valid") + if err != nil { + t.Fatal(err) + } + + err = os.WriteFile(filepath.Join(ks.dir, encodedName), bytes, 0644) + if err != nil { + t.Fatal(err) + } + + err = os.WriteFile(filepath.Join(ks.dir, "z.invalid"), bytes, 0644) + if err != nil { + t.Fatal(err) + } + + l, err := ks.List() + if err != nil { + t.Fatal(err) + } + + sort.Strings(l) + if len(l) != 1 { + t.Fatal("wrong entry count") + } + + if l[0] != "valid" { + t.Fatal("wrong entries listed") + } + + exist, err := ks.Has("valid") + if !exist { + t.Fatal("should know it has a key named valid") + } + if err != nil { + t.Fatal(err) + } +} + +func TestNonExistingKey(t *testing.T) { + tdir, err := os.MkdirTemp("", "keystore-test") + if err != nil { + t.Fatal(err) + } + + ks, err := NewFSKeystore(tdir) + if err != nil { + t.Fatal(err) + } + + k, err := ks.Get("does-it-exist") + if err != ErrNoSuchKey { + t.Fatalf("expected: %s, got %s", ErrNoSuchKey, err) + } + if k != nil { + t.Fatalf("Get on nonexistant key should give nil") + } +} + +func TestMakeKeystoreNoDir(t *testing.T) { + _, err := NewFSKeystore("/this/is/not/a/real/dir") + if err == nil { + t.Fatal("shouldnt be able to make a keystore in a nonexistant directory") + } +} + +func assertGetKey(ks Keystore, name string, exp ci.PrivKey) error { + outK, err := ks.Get(name) + if err != nil { + return err + } + + if !outK.Equals(exp) { + return fmt.Errorf("key we got out didn't match expectation") + } + + return nil +} + +func assertDirContents(dir string, exp []string) error { + finfos, err := os.ReadDir(dir) + if err != nil { + return err + } + + if len(finfos) != len(exp) { + return fmt.Errorf("expected %d directory entries", len(exp)) + } + + var names []string + for _, fi := range finfos { + decodedName, err := decode(fi.Name()) + if err != nil { + return err + } + names = append(names, decodedName) + } + + sort.Strings(names) + sort.Strings(exp) + if len(names) != len(exp) { + return fmt.Errorf("directory had wrong number of entries in it") + } + + for i, v := range names { + if v != exp[i] { + return fmt.Errorf("had wrong entry in directory") + } + } + return nil +} diff --git a/keystore/memkeystore.go b/keystore/memkeystore.go new file mode 100644 index 0000000000..0ea62f4e1a --- /dev/null +++ b/keystore/memkeystore.go @@ -0,0 +1,64 @@ +package keystore + +import ( + "errors" + + ci "github.com/libp2p/go-libp2p/core/crypto" +) + +// MemKeystore is an in memory keystore implementation that is not persisted to +// any backing storage. +type MemKeystore struct { + keys map[string]ci.PrivKey +} + +// NewMemKeystore creates a MemKeystore. +func NewMemKeystore() *MemKeystore { + return &MemKeystore{make(map[string]ci.PrivKey)} +} + +// Has return whether or not a key exists in the Keystore +func (mk *MemKeystore) Has(name string) (bool, error) { + _, ok := mk.keys[name] + return ok, nil +} + +// Put store a key in the Keystore +func (mk *MemKeystore) Put(name string, k ci.PrivKey) error { + if name == "" { + return errors.New("key name must be at least one character") + } + + _, ok := mk.keys[name] + if ok { + return ErrKeyExists + } + + mk.keys[name] = k + return nil +} + +// Get retrieve a key from the Keystore +func (mk *MemKeystore) Get(name string) (ci.PrivKey, error) { + k, ok := mk.keys[name] + if !ok { + return nil, ErrNoSuchKey + } + + return k, nil +} + +// Delete remove a key from the Keystore +func (mk *MemKeystore) Delete(name string) error { + delete(mk.keys, name) + return nil +} + +// List return a list of key identifier +func (mk *MemKeystore) List() ([]string, error) { + out := make([]string, 0, len(mk.keys)) + for k := range mk.keys { + out = append(out, k) + } + return out, nil +} diff --git a/keystore/memkeystore_test.go b/keystore/memkeystore_test.go new file mode 100644 index 0000000000..907cbbd0e7 --- /dev/null +++ b/keystore/memkeystore_test.go @@ -0,0 +1,99 @@ +package keystore + +import ( + "sort" + "testing" +) + +func TestMemKeyStoreBasics(t *testing.T) { + ks := NewMemKeystore() + + l, err := ks.List() + if err != nil { + t.Fatal(err) + } + + if len(l) != 0 { + t.Fatal("expected no keys") + } + + k1 := privKeyOrFatal(t) + k2 := privKeyOrFatal(t) + k3 := privKeyOrFatal(t) + k4 := privKeyOrFatal(t) + + err = ks.Put("foo", k1) + if err != nil { + t.Fatal(err) + } + + err = ks.Put("bar", k2) + if err != nil { + t.Fatal(err) + } + + l, err = ks.List() + if err != nil { + t.Fatal(err) + } + + sort.Strings(l) + if l[0] != "bar" || l[1] != "foo" { + t.Fatal("wrong entries listed") + } + + err = ks.Put("foo", k3) + if err == nil { + t.Fatal("should not be able to overwrite key") + } + + exist, err := ks.Has("foo") + if !exist { + t.Fatal("should know it has a key named foo") + } + if err != nil { + t.Fatal(err) + } + + exist, err = ks.Has("nonexistingkey") + if exist { + t.Fatal("should know it doesn't have a key named nonexistingkey") + } + if err != nil { + t.Fatal(err) + } + + if err := ks.Delete("bar"); err != nil { + t.Fatal(err) + } + if err := ks.Put("beep", k3); err != nil { + t.Fatal(err) + } + + if err := ks.Put("boop", k4); err != nil { + t.Fatal(err) + } + if err := assertGetKey(ks, "foo", k1); err != nil { + t.Fatal(err) + } + + if err := assertGetKey(ks, "beep", k3); err != nil { + t.Fatal(err) + } + + if err := assertGetKey(ks, "boop", k4); err != nil { + t.Fatal(err) + } + + if err := ks.Put("..///foo/", k1); err != nil { + t.Fatal(err) + } + + if err := ks.Put("", k1); err == nil { + t.Fatal("shouldn't be able to put a key with no name") + } + + if err := ks.Put(".foo", k1); err != nil { + t.Fatal(err) + } +} diff --git a/logo.svg b/logo.svg new file mode 100644 index 0000000000..735f3ab805 --- /dev/null +++ b/logo.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/mfs/dir.go b/mfs/dir.go new file mode 100644 index 0000000000..f107925325 --- /dev/null +++ b/mfs/dir.go @@ -0,0 +1,427 @@ +package mfs + +import ( + "context" + "errors" + "fmt" + "os" + "path" + "sync" + "time" + + ft "github.com/ipfs/boxo/unixfs" + uio "github.com/ipfs/boxo/unixfs/io" + dag "github.com/ipfs/boxo/ipld/merkledag" + + cid "github.com/ipfs/go-cid" + ipld "github.com/ipfs/go-ipld-format" +) + +var ErrNotYetImplemented = errors.New("not yet implemented") +var ErrInvalidChild = errors.New("invalid child node") +var ErrDirExists = errors.New("directory already has entry by that name") + +// TODO: There's too much functionality associated with this structure, +// let's organize it (and if possible extract part of it elsewhere) +// and document the main features of `Directory` here. +type Directory struct { + inode + + // Internal cache with added entries to the directory, its cotents + // are synched with the underlying `unixfsDir` node in `sync()`. + entriesCache map[string]FSNode + + lock sync.Mutex + // TODO: What content is being protected here exactly? The entire directory? + + ctx context.Context + + // UnixFS directory implementation used for creating, + // reading and editing directories. + unixfsDir uio.Directory + + modTime time.Time +} + +// NewDirectory constructs a new MFS directory. +// +// You probably don't want to call this directly. Instead, construct a new root +// using NewRoot. +func NewDirectory(ctx context.Context, name string, node ipld.Node, parent parent, dserv ipld.DAGService) (*Directory, error) { + db, err := uio.NewDirectoryFromNode(dserv, node) + if err != nil { + return nil, err + } + + return &Directory{ + inode: inode{ + name: name, + parent: parent, + dagService: dserv, + }, + ctx: ctx, + unixfsDir: db, + entriesCache: make(map[string]FSNode), + modTime: time.Now(), + }, nil +} + +// GetCidBuilder gets the CID builder of the root node +func (d *Directory) GetCidBuilder() cid.Builder { + return d.unixfsDir.GetCidBuilder() +} + +// SetCidBuilder sets the CID builder +func (d *Directory) SetCidBuilder(b cid.Builder) { + d.unixfsDir.SetCidBuilder(b) +} + +// This method implements the `parent` interface. It first does the local +// update of the child entry in the underlying UnixFS directory and saves +// the newly created directory node with the updated entry in the DAG +// service. Then it propagates the update upwards (through this same +// interface) repeating the whole process in the parent. +func (d *Directory) updateChildEntry(c child) error { + newDirNode, err := d.localUpdate(c) + if err != nil { + return err + } + + // Continue to propagate the update process upwards + // (all the way up to the root). + return d.parent.updateChildEntry(child{d.name, newDirNode}) +} + +// This method implements the part of `updateChildEntry` that needs +// to be locked around: in charge of updating the UnixFS layer and +// generating the new node reflecting the update. It also stores the +// new node in the DAG layer. +func (d *Directory) localUpdate(c child) (*dag.ProtoNode, error) { + d.lock.Lock() + defer d.lock.Unlock() + + err := d.updateChild(c) + if err != nil { + return nil, err + } + // TODO: Clearly define how are we propagating changes to lower layers + // like UnixFS. + + nd, err := d.unixfsDir.GetNode() + if err != nil { + return nil, err + } + + pbnd, ok := nd.(*dag.ProtoNode) + if !ok { + return nil, dag.ErrNotProtobuf + } + + err = d.dagService.Add(d.ctx, nd) + if err != nil { + return nil, err + } + + return pbnd.Copy().(*dag.ProtoNode), nil + // TODO: Why do we need a copy? +} + +// Update child entry in the underlying UnixFS directory. +func (d *Directory) updateChild(c child) error { + err := d.unixfsDir.AddChild(d.ctx, c.Name, c.Node) + if err != nil { + return err + } + + d.modTime = time.Now() + + return nil +} + +func (d *Directory) Type() NodeType { + return TDir +} + +// childNode returns a FSNode under this directory by the given name if it exists. +// it does *not* check the cached dirs and files +func (d *Directory) childNode(name string) (FSNode, error) { + nd, err := d.childFromDag(name) + if err != nil { + return nil, err + } + + return d.cacheNode(name, nd) +} + +// cacheNode caches a node into d.childDirs or d.files and returns the FSNode. +func (d *Directory) cacheNode(name string, nd ipld.Node) (FSNode, error) { + switch nd := nd.(type) { + case *dag.ProtoNode: + fsn, err := ft.FSNodeFromBytes(nd.Data()) + if err != nil { + return nil, err + } + + switch fsn.Type() { + case ft.TDirectory, ft.THAMTShard: + ndir, err := NewDirectory(d.ctx, name, nd, d, d.dagService) + if err != nil { + return nil, err + } + + d.entriesCache[name] = ndir + return ndir, nil + case ft.TFile, ft.TRaw, ft.TSymlink: + nfi, err := NewFile(name, nd, d, d.dagService) + if err != nil { + return nil, err + } + d.entriesCache[name] = nfi + return nfi, nil + case ft.TMetadata: + return nil, ErrNotYetImplemented + default: + return nil, ErrInvalidChild + } + case *dag.RawNode: + nfi, err := NewFile(name, nd, d, d.dagService) + if err != nil { + return nil, err + } + d.entriesCache[name] = nfi + return nfi, nil + default: + return nil, fmt.Errorf("unrecognized node type in cache node") + } +} + +// Child returns the child of this directory by the given name +func (d *Directory) Child(name string) (FSNode, error) { + d.lock.Lock() + defer d.lock.Unlock() + return d.childUnsync(name) +} + +func (d *Directory) Uncache(name string) { + d.lock.Lock() + defer d.lock.Unlock() + delete(d.entriesCache, name) +} + +// childFromDag searches through this directories dag node for a child link +// with the given name +func (d *Directory) childFromDag(name string) (ipld.Node, error) { + return d.unixfsDir.Find(d.ctx, name) +} + +// childUnsync returns the child under this directory by the given name +// without locking, useful for operations which already hold a lock +func (d *Directory) childUnsync(name string) (FSNode, error) { + entry, ok := d.entriesCache[name] + if ok { + return entry, nil + } + + return d.childNode(name) +} + +type NodeListing struct { + Name string + Type int + Size int64 + Hash string +} + +func (d *Directory) ListNames(ctx context.Context) ([]string, error) { + d.lock.Lock() + defer d.lock.Unlock() + + var out []string + err := d.unixfsDir.ForEachLink(ctx, func(l *ipld.Link) error { + out = append(out, l.Name) + return nil + }) + if err != nil { + return nil, err + } + + return out, nil +} + +func (d *Directory) List(ctx context.Context) ([]NodeListing, error) { + var out []NodeListing + err := d.ForEachEntry(ctx, func(nl NodeListing) error { + out = append(out, nl) + return nil + }) + return out, err +} + +func (d *Directory) ForEachEntry(ctx context.Context, f func(NodeListing) error) error { + d.lock.Lock() + defer d.lock.Unlock() + return d.unixfsDir.ForEachLink(ctx, func(l *ipld.Link) error { + c, err := d.childUnsync(l.Name) + if err != nil { + return err + } + + nd, err := c.GetNode() + if err != nil { + return err + } + + child := NodeListing{ + Name: l.Name, + Type: int(c.Type()), + Hash: nd.Cid().String(), + } + + if c, ok := c.(*File); ok { + size, err := c.Size() + if err != nil { + return err + } + child.Size = size + } + + return f(child) + }) +} + +func (d *Directory) Mkdir(name string) (*Directory, error) { + d.lock.Lock() + defer d.lock.Unlock() + + fsn, err := d.childUnsync(name) + if err == nil { + switch fsn := fsn.(type) { + case *Directory: + return fsn, os.ErrExist + case *File: + return nil, os.ErrExist + default: + return nil, fmt.Errorf("unrecognized type: %#v", fsn) + } + } + + ndir := ft.EmptyDirNode() + ndir.SetCidBuilder(d.GetCidBuilder()) + + err = d.dagService.Add(d.ctx, ndir) + if err != nil { + return nil, err + } + + err = d.unixfsDir.AddChild(d.ctx, name, ndir) + if err != nil { + return nil, err + } + + dirobj, err := NewDirectory(d.ctx, name, ndir, d, d.dagService) + if err != nil { + return nil, err + } + + d.entriesCache[name] = dirobj + return dirobj, nil +} + +func (d *Directory) Unlink(name string) error { + d.lock.Lock() + defer d.lock.Unlock() + + delete(d.entriesCache, name) + + return d.unixfsDir.RemoveChild(d.ctx, name) +} + +func (d *Directory) Flush() error { + nd, err := d.GetNode() + if err != nil { + return err + } + + return d.parent.updateChildEntry(child{d.name, nd}) +} + +// AddChild adds the node 'nd' under this directory giving it the name 'name' +func (d *Directory) AddChild(name string, nd ipld.Node) error { + d.lock.Lock() + defer d.lock.Unlock() + + _, err := d.childUnsync(name) + if err == nil { + return ErrDirExists + } + + err = d.dagService.Add(d.ctx, nd) + if err != nil { + return err + } + + err = d.unixfsDir.AddChild(d.ctx, name, nd) + if err != nil { + return err + } + + d.modTime = time.Now() + return nil +} + +func (d *Directory) sync() error { + for name, entry := range d.entriesCache { + nd, err := entry.GetNode() + if err != nil { + return err + } + + err = d.updateChild(child{name, nd}) + if err != nil { + return err + } + } + + // TODO: Should we clean the cache here? + + return nil +} + +func (d *Directory) Path() string { + cur := d + var out string + for cur != nil { + switch parent := cur.parent.(type) { + case *Directory: + out = path.Join(cur.name, out) + cur = parent + case *Root: + return "/" + out + default: + panic("directory parent neither a directory nor a root") + } + } + return out +} + +func (d *Directory) GetNode() (ipld.Node, error) { + d.lock.Lock() + defer d.lock.Unlock() + + err := d.sync() + if err != nil { + return nil, err + } + + nd, err := d.unixfsDir.GetNode() + if err != nil { + return nil, err + } + + err = d.dagService.Add(d.ctx, nd) + if err != nil { + return nil, err + } + + return nd.Copy(), err +} diff --git a/mfs/fd.go b/mfs/fd.go new file mode 100644 index 0000000000..7f9897b1cb --- /dev/null +++ b/mfs/fd.go @@ -0,0 +1,197 @@ +package mfs + +import ( + "fmt" + "io" + + mod "github.com/ipfs/boxo/unixfs/mod" + + context "context" + + ipld "github.com/ipfs/go-ipld-format" +) + +type state uint8 + +const ( + stateCreated state = iota + stateFlushed + stateDirty + stateClosed +) + +// One `File` can have many `FileDescriptor`s associated to it +// (only one if it's RW, many if they are RO, see `File.desclock`). +// A `FileDescriptor` contains the "view" of the file (through an +// instance of a `DagModifier`), that's why it (and not the `File`) +// has the responsibility to `Flush` (which crystallizes that view +// in the `File`'s `Node`). +type FileDescriptor interface { + io.Reader + CtxReadFull(context.Context, []byte) (int, error) + + io.Writer + io.WriterAt + + io.Closer + io.Seeker + + Truncate(int64) error + Size() (int64, error) + Flush() error +} + +type fileDescriptor struct { + inode *File + mod *mod.DagModifier + flags Flags + + state state +} + +func (fi *fileDescriptor) checkWrite() error { + if fi.state == stateClosed { + return ErrClosed + } + if !fi.flags.Write { + return fmt.Errorf("file is read-only") + } + return nil +} + +func (fi *fileDescriptor) checkRead() error { + if fi.state == stateClosed { + return ErrClosed + } + if !fi.flags.Read { + return fmt.Errorf("file is write-only") + } + return nil +} + +// Size returns the size of the file referred to by this descriptor +func (fi *fileDescriptor) Size() (int64, error) { + return fi.mod.Size() +} + +// Truncate truncates the file to size +func (fi *fileDescriptor) Truncate(size int64) error { + if err := fi.checkWrite(); err != nil { + return fmt.Errorf("truncate failed: %s", err) + } + fi.state = stateDirty + return fi.mod.Truncate(size) +} + +// Write writes the given data to the file at its current offset +func (fi *fileDescriptor) Write(b []byte) (int, error) { + if err := fi.checkWrite(); err != nil { + return 0, fmt.Errorf("write failed: %s", err) + } + fi.state = stateDirty + return fi.mod.Write(b) +} + +// Read reads into the given buffer from the current offset +func (fi *fileDescriptor) Read(b []byte) (int, error) { + if err := fi.checkRead(); err != nil { + return 0, fmt.Errorf("read failed: %s", err) + } + return fi.mod.Read(b) +} + +// Read reads into the given buffer from the current offset +func (fi *fileDescriptor) CtxReadFull(ctx context.Context, b []byte) (int, error) { + if err := fi.checkRead(); err != nil { + return 0, fmt.Errorf("read failed: %s", err) + } + return fi.mod.CtxReadFull(ctx, b) +} + +// Close flushes, then propogates the modified dag node up the directory structure +// and signals a republish to occur +func (fi *fileDescriptor) Close() error { + if fi.state == stateClosed { + return ErrClosed + } + if fi.flags.Write { + defer fi.inode.desclock.Unlock() + } else if fi.flags.Read { + defer fi.inode.desclock.RUnlock() + } + err := fi.flushUp(fi.flags.Sync) + fi.state = stateClosed + return err +} + +// Flush generates a new version of the node of the underlying +// UnixFS directory (adding it to the DAG service) and updates +// the entry in the parent directory (setting `fullSync` to +// propagate the update all the way to the root). +func (fi *fileDescriptor) Flush() error { + return fi.flushUp(true) +} + +// flushUp syncs the file and adds it to the dagservice +// it *must* be called with the File's lock taken +// If `fullSync` is set the changes are propagated upwards +// (the `Up` part of `flushUp`). +func (fi *fileDescriptor) flushUp(fullSync bool) error { + var nd ipld.Node + switch fi.state { + case stateCreated, stateDirty: + var err error + nd, err = fi.mod.GetNode() + if err != nil { + return err + } + err = fi.inode.dagService.Add(context.TODO(), nd) + if err != nil { + return err + } + + // TODO: Very similar logic to the update process in + // `Directory`, the logic should be unified, both structures + // (`File` and `Directory`) are backed by a IPLD node with + // a UnixFS format that is the actual target of the update + // (regenerating it and adding it to the DAG service). + fi.inode.nodeLock.Lock() + // Always update the file descriptor's inode with the created/modified node. + fi.inode.node = nd + // Save the members to be used for subsequent calls + parent := fi.inode.parent + name := fi.inode.name + fi.inode.nodeLock.Unlock() + + // Bubble up the update's to the parent, only if fullSync is set to true. + if fullSync { + if err := parent.updateChildEntry(child{name, nd}); err != nil { + return err + } + } + + fi.state = stateFlushed + return nil + case stateFlushed: + return nil + default: + panic("invalid state") + } +} + +// Seek implements io.Seeker +func (fi *fileDescriptor) Seek(offset int64, whence int) (int64, error) { + if fi.state == stateClosed { + return 0, fmt.Errorf("seek failed: %s", ErrClosed) + } + return fi.mod.Seek(offset, whence) +} + +// Write At writes the given bytes at the offset 'at' +func (fi *fileDescriptor) WriteAt(b []byte, at int64) (int, error) { + if err := fi.checkWrite(); err != nil { + return 0, fmt.Errorf("write-at failed: %s", err) + } + fi.state = stateDirty + return fi.mod.WriteAt(b, at) +} diff --git a/mfs/file.go b/mfs/file.go new file mode 100644 index 0000000000..24203e6a00 --- /dev/null +++ b/mfs/file.go @@ -0,0 +1,179 @@ +package mfs + +import ( + "context" + "fmt" + "sync" + + ft "github.com/ipfs/boxo/unixfs" + mod "github.com/ipfs/boxo/unixfs/mod" + dag "github.com/ipfs/boxo/ipld/merkledag" + + chunker "github.com/ipfs/boxo/chunker" + ipld "github.com/ipfs/go-ipld-format" +) + +// File represents a file in the MFS, its logic its mainly targeted +// to coordinating (potentially many) `FileDescriptor`s pointing to +// it. +type File struct { + inode + + // Lock to coordinate the `FileDescriptor`s associated to this file. + desclock sync.RWMutex + + // This isn't any node, it's the root node that represents the + // entire DAG of nodes that comprise the file. + // TODO: Rename, there should be an explicit term for these root nodes + // of a particular sub-DAG that abstract an upper layer's entity. + node ipld.Node + + // Lock around the `node` that represents this file, necessary because + // there may be many `FileDescriptor`s operating on this `File`. + nodeLock sync.RWMutex + + RawLeaves bool +} + +// NewFile returns a NewFile object with the given parameters. If the +// Cid version is non-zero RawLeaves will be enabled. +func NewFile(name string, node ipld.Node, parent parent, dserv ipld.DAGService) (*File, error) { + fi := &File{ + inode: inode{ + name: name, + parent: parent, + dagService: dserv, + }, + node: node, + } + if node.Cid().Prefix().Version > 0 { + fi.RawLeaves = true + } + return fi, nil +} + +func (fi *File) Open(flags Flags) (_ FileDescriptor, _retErr error) { + if flags.Write { + fi.desclock.Lock() + defer func() { + if _retErr != nil { + fi.desclock.Unlock() + } + }() + } else if flags.Read { + fi.desclock.RLock() + defer func() { + if _retErr != nil { + fi.desclock.RUnlock() + } + }() + } else { + return nil, fmt.Errorf("file opened for neither reading nor writing") + } + + fi.nodeLock.RLock() + node := fi.node + fi.nodeLock.RUnlock() + + // TODO: Move this `switch` logic outside (maybe even + // to another package, this seems like a job of UnixFS), + // `NewDagModifier` uses the IPLD node, we're not + // extracting anything just doing a safety check. + switch node := node.(type) { + case *dag.ProtoNode: + fsn, err := ft.FSNodeFromBytes(node.Data()) + if err != nil { + return nil, err + } + + switch fsn.Type() { + default: + return nil, fmt.Errorf("unsupported fsnode type for 'file'") + case ft.TSymlink: + return nil, fmt.Errorf("symlinks not yet supported") + case ft.TFile, ft.TRaw: + // OK case + } + case *dag.RawNode: + // Ok as well. + } + + dmod, err := mod.NewDagModifier(context.TODO(), node, fi.dagService, chunker.DefaultSplitter) + // TODO: Remove the use of the `chunker` package here, add a new `NewDagModifier` in + // `go-unixfs` with the `DefaultSplitter` already included. + if err != nil { + return nil, err + } + dmod.RawLeaves = fi.RawLeaves + + return &fileDescriptor{ + inode: fi, + flags: flags, + mod: dmod, + state: stateCreated, + }, nil +} + +// Size returns the size of this file +// TODO: Should we be providing this API? +// TODO: There's already a `FileDescriptor.Size()` that +// through the `DagModifier`'s `fileSize` function is doing +// pretty much the same thing as here, we should at least call +// that function and wrap the `ErrNotUnixfs` with an MFS text. +func (fi *File) Size() (int64, error) { + fi.nodeLock.RLock() + defer fi.nodeLock.RUnlock() + switch nd := fi.node.(type) { + case *dag.ProtoNode: + fsn, err := ft.FSNodeFromBytes(nd.Data()) + if err != nil { + return 0, err + } + return int64(fsn.FileSize()), nil + case *dag.RawNode: + return int64(len(nd.RawData())), nil + default: + return 0, fmt.Errorf("unrecognized node type in mfs/file.Size()") + } +} + +// GetNode returns the dag node associated with this file +// TODO: Use this method and do not access the `nodeLock` directly anywhere else. +func (fi *File) GetNode() (ipld.Node, error) { + fi.nodeLock.RLock() + defer fi.nodeLock.RUnlock() + return fi.node, nil +} + +// TODO: Tight coupling with the `FileDescriptor`, at the +// very least this should be an independent function that +// takes a `File` argument and automates the open/flush/close +// operations. +// TODO: Why do we need to flush a file that isn't opened? +// (the `OpenWriteOnly` seems to implicitly be targeting a +// closed file, a file we forgot to flush? can we close +// a file without flushing?) +func (fi *File) Flush() error { + // open the file in fullsync mode + fd, err := fi.Open(Flags{Write: true, Sync: true}) + if err != nil { + return err + } + + defer fd.Close() + + return fd.Flush() +} + +func (fi *File) Sync() error { + // just being able to take the writelock means the descriptor is synced + // TODO: Why? + fi.desclock.Lock() + defer fi.desclock.Unlock() // Defer works around "empty critical section (SA2001)" + return nil +} + +// Type returns the type FSNode this is +func (fi *File) Type() NodeType { + return TFile +} diff --git a/mfs/inode.go b/mfs/inode.go new file mode 100644 index 0000000000..50bed0b38e --- /dev/null +++ b/mfs/inode.go @@ -0,0 +1,21 @@ +package mfs + +import ( + ipld "github.com/ipfs/go-ipld-format" +) + +// inode abstracts the common characteristics of the MFS `File` +// and `Directory`. All of its attributes are initialized at +// creation. +type inode struct { + // name of this `inode` in the MFS path (the same value + // is also stored as the name of the DAG link). + name string + + // parent directory of this `inode` (which may be the `Root`). + parent parent + + // dagService used to store modifications made to the contents + // of the file or directory the `inode` belongs to. + dagService ipld.DAGService +} diff --git a/mfs/mfs_test.go b/mfs/mfs_test.go new file mode 100644 index 0000000000..e99e73eaac --- /dev/null +++ b/mfs/mfs_test.go @@ -0,0 +1,1421 @@ +package mfs + +import ( + "bytes" + "context" + "encoding/binary" + "errors" + "fmt" + "io" + "math/rand" + "os" + "sort" + "sync" + "testing" + "time" + + path "github.com/ipfs/boxo/path" + + bserv "github.com/ipfs/boxo/blockservice" + ft "github.com/ipfs/boxo/unixfs" + importer "github.com/ipfs/boxo/unixfs/importer" + uio "github.com/ipfs/boxo/unixfs/io" + dag "github.com/ipfs/boxo/ipld/merkledag" + + bstore "github.com/ipfs/boxo/blockstore" + chunker "github.com/ipfs/boxo/chunker" + offline "github.com/ipfs/boxo/exchange/offline" + u "github.com/ipfs/boxo/util" + cid "github.com/ipfs/go-cid" + ds "github.com/ipfs/go-datastore" + dssync "github.com/ipfs/go-datastore/sync" + ipld "github.com/ipfs/go-ipld-format" +) + +func emptyDirNode() *dag.ProtoNode { + return dag.NodeWithData(ft.FolderPBData()) +} + +func getDagserv(t *testing.T) ipld.DAGService { + db := dssync.MutexWrap(ds.NewMapDatastore()) + bs := bstore.NewBlockstore(db) + blockserv := bserv.New(bs, offline.Exchange(bs)) + return dag.NewDAGService(blockserv) +} + +func getRandFile(t *testing.T, ds ipld.DAGService, size int64) ipld.Node { + r := io.LimitReader(u.NewTimeSeededRand(), size) + return fileNodeFromReader(t, ds, r) +} + +func fileNodeFromReader(t *testing.T, ds ipld.DAGService, r io.Reader) ipld.Node { + nd, err := importer.BuildDagFromReader(ds, chunker.DefaultSplitter(r)) + if err != nil { + t.Fatal(err) + } + return nd +} + +func mkdirP(t *testing.T, root *Directory, pth string) *Directory { + dirs := path.SplitList(pth) + cur := root + for _, d := range dirs { + n, err := cur.Mkdir(d) + if err != nil && err != os.ErrExist { + t.Fatal(err) + } + if err == os.ErrExist { + fsn, err := cur.Child(d) + if err != nil { + t.Fatal(err) + } + switch fsn := fsn.(type) { + case *Directory: + n = fsn + case *File: + t.Fatal("tried to make a directory where a file already exists") + } + } + + cur = n + } + return cur +} + +func assertDirNotAtPath(root *Directory, pth string) error { + _, err := DirLookup(root, pth) + if err == nil { + return fmt.Errorf("%s exists in %s", pth, root.name) + } + return nil +} + +func assertDirAtPath(root *Directory, pth string, children []string) error { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + fsn, err := DirLookup(root, pth) + if err != nil { + return err + } + + dir, ok := fsn.(*Directory) + if !ok { + return fmt.Errorf("%s was not a directory", pth) + } + + listing, err := dir.List(ctx) + if err != nil { + return err + } + + var names []string + for _, d := range listing { + names = append(names, d.Name) + } + + sort.Strings(children) + sort.Strings(names) + if !compStrArrs(children, names) { + return errors.New("directories children did not match") + } + + return nil +} + +func compStrArrs(a, b []string) bool { + if len(a) != len(b) { + return false + } + + for i := 0; i < len(a); i++ { + if a[i] != b[i] { + return false + } + } + + return true +} + +func assertFileAtPath(ds ipld.DAGService, root *Directory, expn ipld.Node, pth string) error { + exp, ok := expn.(*dag.ProtoNode) + if !ok { + return dag.ErrNotProtobuf + } + + parts := path.SplitList(pth) + cur := root + for i, d := range parts[:len(parts)-1] { + next, err := cur.Child(d) + if err != nil { + return fmt.Errorf("looking for %s failed: %s", pth, err) + } + + nextDir, ok := next.(*Directory) + if !ok { + return fmt.Errorf("%s points to a non-directory", parts[:i+1]) + } + + cur = nextDir + } + + last := parts[len(parts)-1] + finaln, err := cur.Child(last) + if err != nil { + return err + } + + file, ok := finaln.(*File) + if !ok { + return fmt.Errorf("%s was not a file", pth) + } + + rfd, err := file.Open(Flags{Read: true}) + if err != nil { + return err + } + + out, err := io.ReadAll(rfd) + if err != nil { + return err + } + + expbytes, err := catNode(ds, exp) + if err != nil { + return err + } + + if !bytes.Equal(out, expbytes) { + return fmt.Errorf("incorrect data at path") + } + return nil +} + +func catNode(ds ipld.DAGService, nd *dag.ProtoNode) ([]byte, error) { + r, err := uio.NewDagReader(context.TODO(), nd, ds) + if err != nil { + return nil, err + } + defer r.Close() + + return io.ReadAll(r) +} + +func setupRoot(ctx context.Context, t *testing.T) (ipld.DAGService, *Root) { + ds := getDagserv(t) + + root := emptyDirNode() + rt, err := NewRoot(ctx, ds, root, func(ctx context.Context, c cid.Cid) error { + fmt.Println("PUBLISHED: ", c) + return nil + }) + + if err != nil { + t.Fatal(err) + } + + return ds, rt +} + +func TestBasic(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ds, rt := setupRoot(ctx, t) + + rootdir := rt.GetDirectory() + + // test making a basic dir + _, err := rootdir.Mkdir("a") + if err != nil { + t.Fatal(err) + } + + path := "a/b/c/d/e/f/g" + d := mkdirP(t, rootdir, path) + + fi := getRandFile(t, ds, 1000) + + // test inserting that file + err = d.AddChild("afile", fi) + if err != nil { + t.Fatal(err) + } + + err = assertFileAtPath(ds, rootdir, fi, "a/b/c/d/e/f/g/afile") + if err != nil { + t.Fatal(err) + } +} + +func TestMkdir(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + _, rt := setupRoot(ctx, t) + + rootdir := rt.GetDirectory() + + dirsToMake := []string{"a", "B", "foo", "bar", "cats", "fish"} + sort.Strings(dirsToMake) // sort for easy comparing later + + for _, d := range dirsToMake { + _, err := rootdir.Mkdir(d) + if err != nil { + t.Fatal(err) + } + } + + err := assertDirAtPath(rootdir, "/", dirsToMake) + if err != nil { + t.Fatal(err) + } + + for _, d := range dirsToMake { + mkdirP(t, rootdir, "a/"+d) + } + + err = assertDirAtPath(rootdir, "/a", dirsToMake) + if err != nil { + t.Fatal(err) + } + + // mkdir over existing dir should fail + _, err = rootdir.Mkdir("a") + if err == nil { + t.Fatal("should have failed!") + } +} + +func TestDirectoryLoadFromDag(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ds, rt := setupRoot(ctx, t) + + rootdir := rt.GetDirectory() + + nd := getRandFile(t, ds, 1000) + err := ds.Add(ctx, nd) + if err != nil { + t.Fatal(err) + } + + fihash := nd.Cid() + + dir := emptyDirNode() + err = ds.Add(ctx, dir) + if err != nil { + t.Fatal(err) + } + + dirhash := dir.Cid() + + top := emptyDirNode() + top.SetLinks([]*ipld.Link{ + { + Name: "a", + Cid: fihash, + }, + { + Name: "b", + Cid: dirhash, + }, + }) + + err = rootdir.AddChild("foo", top) + if err != nil { + t.Fatal(err) + } + + // get this dir + topi, err := rootdir.Child("foo") + if err != nil { + t.Fatal(err) + } + + topd := topi.(*Directory) + + path := topd.Path() + if path != "/foo" { + t.Fatalf("Expected path '/foo', got '%s'", path) + } + + // mkdir over existing but unloaded child file should fail + _, err = topd.Mkdir("a") + if err == nil { + t.Fatal("expected to fail!") + } + + // mkdir over existing but unloaded child dir should fail + _, err = topd.Mkdir("b") + if err == nil { + t.Fatal("expected to fail!") + } + + // adding a child over an existing path fails + err = topd.AddChild("b", nd) + if err == nil { + t.Fatal("expected to fail!") + } + + err = assertFileAtPath(ds, rootdir, nd, "foo/a") + if err != nil { + t.Fatal(err) + } + + err = assertDirAtPath(rootdir, "foo/b", nil) + if err != nil { + t.Fatal(err) + } + + err = rootdir.Unlink("foo") + if err != nil { + t.Fatal(err) + } + + err = assertDirAtPath(rootdir, "", nil) + if err != nil { + t.Fatal(err) + } +} + +func TestMvFile(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + dagService, rt := setupRoot(ctx, t) + rootDir := rt.GetDirectory() + + fi := getRandFile(t, dagService, 1000) + + err := rootDir.AddChild("afile", fi) + if err != nil { + t.Fatal(err) + } + + err = Mv(rt, "/afile", "/bfile") + if err != nil { + t.Fatal(err) + } + + err = assertFileAtPath(dagService, rootDir, fi, "bfile") + if err != nil { + t.Fatal(err) + } +} + +func TestMvFileToSubdir(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + dagService, rt := setupRoot(ctx, t) + rootDir := rt.GetDirectory() + + _ = mkdirP(t, rootDir, "test1") + + fi := getRandFile(t, dagService, 1000) + + err := rootDir.AddChild("afile", fi) + if err != nil { + t.Fatal(err) + } + + err = Mv(rt, "/afile", "/test1") + if err != nil { + t.Fatal(err) + } + + err = assertFileAtPath(dagService, rootDir, fi, "test1/afile") + if err != nil { + t.Fatal(err) + } +} + +func TestMvFileToSubdirWithRename(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + dagService, rt := setupRoot(ctx, t) + rootDir := rt.GetDirectory() + + _ = mkdirP(t, rootDir, "test1") + + fi := getRandFile(t, dagService, 1000) + + err := rootDir.AddChild("afile", fi) + if err != nil { + t.Fatal(err) + } + + err = Mv(rt, "/afile", "/test1/bfile") + if err != nil { + t.Fatal(err) + } + + err = assertFileAtPath(dagService, rootDir, fi, "test1/bfile") + if err != nil { + t.Fatal(err) + } +} + +func TestMvDir(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + dagService, rt := setupRoot(ctx, t) + rootDir := rt.GetDirectory() + + _ = mkdirP(t, rootDir, "test1") + d2 := mkdirP(t, rootDir, "test2") + + fi := getRandFile(t, dagService, 1000) + + err := d2.AddChild("afile", fi) + if err != nil { + t.Fatal(err) + } + + err = Mv(rt, "/test2", "/test1") + if err != nil { + t.Fatal(err) + } + + err = assertDirNotAtPath(rootDir, "test2") + if err != nil { + t.Fatal(err) + } + + err = assertFileAtPath(dagService, rootDir, fi, "test1/test2/afile") + if err != nil { + t.Fatal(err) + } +} + +func TestMfsFile(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ds, rt := setupRoot(ctx, t) + + rootdir := rt.GetDirectory() + + fisize := 1000 + nd := getRandFile(t, ds, 1000) + + err := rootdir.AddChild("file", nd) + if err != nil { + t.Fatal(err) + } + + fsn, err := rootdir.Child("file") + if err != nil { + t.Fatal(err) + } + + fi := fsn.(*File) + + if fi.Type() != TFile { + t.Fatal("some is seriously wrong here") + } + + wfd, err := fi.Open(Flags{Read: true, Write: true, Sync: true}) + if err != nil { + t.Fatal(err) + } + + // assert size is as expected + size, err := fi.Size() + if err != nil { + t.Fatal(err) + } + if size != int64(fisize) { + t.Fatal("size isnt correct") + } + + // write to beginning of file + b := []byte("THIS IS A TEST") + n, err := wfd.Write(b) + if err != nil { + t.Fatal(err) + } + + if n != len(b) { + t.Fatal("didnt write correct number of bytes") + } + + // make sure size hasnt changed + size, err = wfd.Size() + if err != nil { + t.Fatal(err) + } + if size != int64(fisize) { + t.Fatal("size isnt correct") + } + + // seek back to beginning + ns, err := wfd.Seek(0, io.SeekStart) + if err != nil { + t.Fatal(err) + } + + if ns != 0 { + t.Fatal("didnt seek to beginning") + } + + // read back bytes we wrote + buf := make([]byte, len(b)) + n, err = wfd.Read(buf) + if err != nil { + t.Fatal(err) + } + + if n != len(buf) { + t.Fatal("didnt read enough") + } + + if !bytes.Equal(buf, b) { + t.Fatal("data read was different than data written") + } + + // truncate file to ten bytes + err = wfd.Truncate(10) + if err != nil { + t.Fatal(err) + } + + size, err = wfd.Size() + if err != nil { + t.Fatal(err) + } + + if size != 10 { + t.Fatal("size was incorrect: ", size) + } + + // 'writeAt' to extend it + data := []byte("this is a test foo foo foo") + nwa, err := wfd.WriteAt(data, 5) + if err != nil { + t.Fatal(err) + } + + if nwa != len(data) { + t.Fatal(err) + } + + // assert size once more + size, err = wfd.Size() + if err != nil { + t.Fatal(err) + } + + if size != int64(5+len(data)) { + t.Fatal("size was incorrect") + } + + // close it out! + err = wfd.Close() + if err != nil { + t.Fatal(err) + } + + // make sure we can get node. TODO: verify it later + _, err = fi.GetNode() + if err != nil { + t.Fatal(err) + } +} + +func TestMfsDirListNames(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ds, rt := setupRoot(ctx, t) + + rootdir := rt.GetDirectory() + + rand.Seed(time.Now().UTC().UnixNano()) + + total := rand.Intn(10) + 1 + fNames := make([]string, 0, total) + + for i := 0; i < total; i++ { + fn := randomName() + fNames = append(fNames, fn) + nd := getRandFile(t, ds, rand.Int63n(1000)+1) + err := rootdir.AddChild(fn, nd) + if err != nil { + t.Fatal(err) + } + } + + list, err := rootdir.ListNames(ctx) + + if err != nil { + t.Fatal(err) + } + + for _, lName := range list { + found := false + for _, fName := range fNames { + if lName == fName { + found = true + break + } + } + if !found { + t.Fatal(lName + " not found in directory listing") + } + } +} + +func randomWalk(d *Directory, n int) (*Directory, error) { + for i := 0; i < n; i++ { + dirents, err := d.List(context.Background()) + if err != nil { + return nil, err + } + + var childdirs []NodeListing + for _, child := range dirents { + if child.Type == int(TDir) { + childdirs = append(childdirs, child) + } + } + if len(childdirs) == 0 { + return d, nil + } + + next := childdirs[rand.Intn(len(childdirs))].Name + + nextD, err := d.Child(next) + if err != nil { + return nil, err + } + + d = nextD.(*Directory) + } + return d, nil +} + +func randomName() string { + set := "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890_" + length := rand.Intn(10) + 2 + var out string + for i := 0; i < length; i++ { + j := rand.Intn(len(set)) + out += set[j : j+1] + } + return out +} + +func actorMakeFile(d *Directory) error { + d, err := randomWalk(d, rand.Intn(7)) + if err != nil { + return err + } + + name := randomName() + f, err := NewFile(name, dag.NodeWithData(ft.FilePBData(nil, 0)), d, d.dagService) + if err != nil { + return err + } + + wfd, err := f.Open(Flags{Write: true, Sync: true}) + if err != nil { + return err + } + + rread := rand.New(rand.NewSource(time.Now().UnixNano())) + r := io.LimitReader(rread, int64(77*rand.Intn(123)+1)) + _, err = io.Copy(wfd, r) + if err != nil { + return err + } + + return wfd.Close() +} + +func actorMkdir(d *Directory) error { + d, err := randomWalk(d, rand.Intn(7)) + if err != nil { + return err + } + + _, err = d.Mkdir(randomName()) + + return err +} + +func randomFile(d *Directory) (*File, error) { + d, err := randomWalk(d, rand.Intn(6)) + if err != nil { + return nil, err + } + + ents, err := d.List(context.Background()) + if err != nil { + return nil, err + } + + var files []string + for _, e := range ents { + if e.Type == int(TFile) { + files = append(files, e.Name) + } + } + + if len(files) == 0 { + return nil, nil + } + + fname := files[rand.Intn(len(files))] + fsn, err := d.Child(fname) + if err != nil { + return nil, err + } + + fi, ok := fsn.(*File) + if !ok { + return nil, errors.New("file wasn't a file, race?") + } + + return fi, nil +} + +func actorWriteFile(d *Directory) error { + fi, err := randomFile(d) + if err != nil { + return err + } + if fi == nil { + return nil + } + + size := rand.Intn(1024) + 1 + buf := make([]byte, size) + rand.Read(buf) + + s, err := fi.Size() + if err != nil { + return err + } + + wfd, err := fi.Open(Flags{Write: true, Sync: true}) + if err != nil { + return err + } + + offset := rand.Int63n(s) + + n, err := wfd.WriteAt(buf, offset) + if err != nil { + return err + } + if n != size { + return fmt.Errorf("didnt write enough") + } + + return wfd.Close() +} + +func actorReadFile(d *Directory) error { + fi, err := randomFile(d) + if err != nil { + return err + } + if fi == nil { + return nil + } + + _, err = fi.Size() + if err != nil { + return err + } + + rfd, err := fi.Open(Flags{Read: true}) + if err != nil { + return err + } + + _, err = io.ReadAll(rfd) + if err != nil { + return err + } + + return rfd.Close() +} + +func testActor(rt *Root, iterations int, errs chan error) { + d := rt.GetDirectory() + for i := 0; i < iterations; i++ { + switch rand.Intn(5) { + case 0: + if err := actorMkdir(d); err != nil { + errs <- err + return + } + case 1, 2: + if err := actorMakeFile(d); err != nil { + errs <- err + return + } + case 3: + if err := actorWriteFile(d); err != nil { + errs <- err + return + } + case 4: + if err := actorReadFile(d); err != nil { + errs <- err + return + } + } + } + errs <- nil +} + +func TestMfsStress(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + _, rt := setupRoot(ctx, t) + + numroutines := 10 + + errs := make(chan error) + for i := 0; i < numroutines; i++ { + go testActor(rt, 50, errs) + } + + for i := 0; i < numroutines; i++ { + err := <-errs + if err != nil { + t.Fatal(err) + } + } +} + +func TestMfsHugeDir(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + _, rt := setupRoot(ctx, t) + + for i := 0; i < 10000; i++ { + err := Mkdir(rt, fmt.Sprintf("/dir%d", i), MkdirOpts{Mkparents: false, Flush: false}) + if err != nil { + t.Fatal(err) + } + } +} + +func TestMkdirP(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + _, rt := setupRoot(ctx, t) + + err := Mkdir(rt, "/a/b/c/d/e/f", MkdirOpts{Mkparents: true, Flush: true}) + if err != nil { + t.Fatal(err) + } +} + +func TestConcurrentWriteAndFlush(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ds, rt := setupRoot(ctx, t) + + d := mkdirP(t, rt.GetDirectory(), "foo/bar/baz") + fn := fileNodeFromReader(t, ds, bytes.NewBuffer(nil)) + err := d.AddChild("file", fn) + if err != nil { + t.Fatal(err) + } + + nloops := 500 + + wg := new(sync.WaitGroup) + wg.Add(1) + go func() { + defer wg.Done() + for i := 0; i < nloops; i++ { + err := writeFile(rt, "/foo/bar/baz/file", func(_ []byte) []byte { return []byte("STUFF") }) + if err != nil { + t.Error("file write failed: ", err) + return + } + } + }() + + for i := 0; i < nloops; i++ { + _, err := rt.GetDirectory().GetNode() + if err != nil { + t.Fatal(err) + } + } + + wg.Wait() +} + +func TestFlushing(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + _, rt := setupRoot(ctx, t) + + dir := rt.GetDirectory() + c := mkdirP(t, dir, "a/b/c") + d := mkdirP(t, dir, "a/b/d") + e := mkdirP(t, dir, "a/b/e") + + data := []byte("this is a test\n") + nd1 := dag.NodeWithData(ft.FilePBData(data, uint64(len(data)))) + + if err := c.AddChild("TEST", nd1); err != nil { + t.Fatal(err) + } + if err := d.AddChild("TEST", nd1); err != nil { + t.Fatal(err) + } + if err := e.AddChild("TEST", nd1); err != nil { + t.Fatal(err) + } + if err := dir.AddChild("FILE", nd1); err != nil { + t.Fatal(err) + } + + nd, err := FlushPath(ctx, rt, "/a/b/c/TEST") + if err != nil { + t.Fatal(err) + } + if nd.Cid().String() != "QmYi7wrRFKVCcTB56A6Pep2j31Q5mHfmmu21RzHXu25RVR" { + t.Fatalf("unexpected node from FlushPath: %s", nd.Cid()) + } + + if _, err := FlushPath(ctx, rt, "/a/b/d/TEST"); err != nil { + t.Fatal(err) + } + + if _, err := FlushPath(ctx, rt, "/a/b/e/TEST"); err != nil { + t.Fatal(err) + } + + if _, err := FlushPath(ctx, rt, "/FILE"); err != nil { + t.Fatal(err) + } + + rnd, err := dir.GetNode() + if err != nil { + t.Fatal(err) + } + + pbrnd, ok := rnd.(*dag.ProtoNode) + if !ok { + t.Fatal(dag.ErrNotProtobuf) + } + + fsnode, err := ft.FSNodeFromBytes(pbrnd.Data()) + if err != nil { + t.Fatal(err) + } + + if fsnode.Type() != ft.TDirectory { + t.Fatal("root wasnt a directory") + } + + rnk := rnd.Cid() + exp := "QmWMVyhTuyxUrXX3ynz171jq76yY3PktfY9Bxiph7b9ikr" + if rnk.String() != exp { + t.Fatalf("dag looks wrong, expected %s, but got %s", exp, rnk.String()) + } +} + +func readFile(rt *Root, path string, offset int64, buf []byte) error { + n, err := Lookup(rt, path) + if err != nil { + return err + } + + fi, ok := n.(*File) + if !ok { + return fmt.Errorf("%s was not a file", path) + } + + fd, err := fi.Open(Flags{Read: true}) + if err != nil { + return err + } + + _, err = fd.Seek(offset, io.SeekStart) + if err != nil { + return err + } + + nread, err := fd.Read(buf) + if err != nil { + return err + } + if nread != len(buf) { + return fmt.Errorf("didn't read enough") + } + + return fd.Close() +} + +func TestConcurrentReads(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + ds, rt := setupRoot(ctx, t) + + rootdir := rt.GetDirectory() + + path := "a/b/c" + d := mkdirP(t, rootdir, path) + + buf := make([]byte, 2048) + rand.Read(buf) + + fi := fileNodeFromReader(t, ds, bytes.NewReader(buf)) + err := d.AddChild("afile", fi) + if err != nil { + t.Fatal(err) + } + + var wg sync.WaitGroup + nloops := 100 + for i := 0; i < 10; i++ { + wg.Add(1) + go func(me int) { + defer wg.Done() + mybuf := make([]byte, len(buf)) + for j := 0; j < nloops; j++ { + offset := rand.Intn(len(buf)) + length := rand.Intn(len(buf) - offset) + + err := readFile(rt, "/a/b/c/afile", int64(offset), mybuf[:length]) + if err != nil { + t.Error("readfile failed: ", err) + return + } + + if !bytes.Equal(mybuf[:length], buf[offset:offset+length]) { + t.Error("incorrect read!") + } + } + }(i) + } + wg.Wait() +} + +func writeFile(rt *Root, path string, transform func([]byte) []byte) error { + n, err := Lookup(rt, path) + if err != nil { + return err + } + + fi, ok := n.(*File) + if !ok { + return fmt.Errorf("expected to receive a file, but didnt get one") + } + + fd, err := fi.Open(Flags{Read: true, Write: true, Sync: true}) + if err != nil { + return err + } + defer fd.Close() + + data, err := io.ReadAll(fd) + if err != nil { + return err + } + data = transform(data) + + _, err = fd.Seek(0, io.SeekStart) + if err != nil { + return err + } + err = fd.Truncate(0) + if err != nil { + return err + } + + nw, err := fd.Write(data) + if err != nil { + return err + } + + if nw != len(data) { + return fmt.Errorf("wrote incorrect amount: %d != 10", nw) + } + + return nil +} + +func TestConcurrentWrites(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + ds, rt := setupRoot(ctx, t) + + rootdir := rt.GetDirectory() + + path := "a/b/c" + d := mkdirP(t, rootdir, path) + + fi := fileNodeFromReader(t, ds, bytes.NewReader(make([]byte, 0))) + err := d.AddChild("afile", fi) + if err != nil { + t.Fatal(err) + } + + var wg sync.WaitGroup + nloops := 100 + errs := make(chan error, 1000) + for i := 0; i < 10; i++ { + wg.Add(1) + go func() { + defer wg.Done() + var lastSeen uint64 + for j := 0; j < nloops; j++ { + err := writeFile(rt, "a/b/c/afile", func(buf []byte) []byte { + if len(buf) == 0 { + if lastSeen > 0 { + errs <- fmt.Errorf("file corrupted, last seen: %d", lastSeen) + return buf + } + buf = make([]byte, 8) + } else if len(buf) != 8 { + errs <- fmt.Errorf("buf not the right size") + return buf + } + + num := binary.LittleEndian.Uint64(buf) + if num < lastSeen { + errs <- fmt.Errorf("count decreased: was %d, is %d", lastSeen, num) + return buf + } else { + t.Logf("count correct: was %d, is %d", lastSeen, num) + } + num++ + binary.LittleEndian.PutUint64(buf, num) + lastSeen = num + return buf + }) + if err != nil { + errs <- fmt.Errorf("writefile failed: %v", err) + return + } + } + }() + } + wg.Wait() + close(errs) + for e := range errs { + t.Fatal(e) + } + buf := make([]byte, 8) + if err := readFile(rt, "a/b/c/afile", 0, buf); err != nil { + t.Fatal(err) + } + actual := binary.LittleEndian.Uint64(buf) + expected := uint64(10 * nloops) + if actual != expected { + t.Fatalf("iteration mismatch: expect %d, got %d", expected, actual) + } +} + +func TestFileDescriptors(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + ds, rt := setupRoot(ctx, t) + dir := rt.GetDirectory() + + nd := dag.NodeWithData(ft.FilePBData(nil, 0)) + fi, err := NewFile("test", nd, dir, ds) + if err != nil { + t.Fatal(err) + } + + // test read only + rfd1, err := fi.Open(Flags{Read: true}) + if err != nil { + t.Fatal(err) + } + + err = rfd1.Truncate(0) + if err == nil { + t.Fatal("shouldnt be able to truncate readonly fd") + } + + _, err = rfd1.Write([]byte{}) + if err == nil { + t.Fatal("shouldnt be able to write to readonly fd") + } + + _, err = rfd1.Read([]byte{}) + if err != nil { + t.Fatalf("expected to be able to read from file: %s", err) + } + + done := make(chan struct{}) + go func() { + defer close(done) + // can open second readonly file descriptor + rfd2, err := fi.Open(Flags{Read: true}) + if err != nil { + t.Error(err) + return + } + + rfd2.Close() + }() + + select { + case <-time.After(time.Second): + t.Fatal("open second file descriptor failed") + case <-done: + } + + if t.Failed() { + return + } + + // test not being able to open for write until reader are closed + done = make(chan struct{}) + go func() { + defer close(done) + wfd1, err := fi.Open(Flags{Write: true, Sync: true}) + if err != nil { + t.Error(err) + } + + wfd1.Close() + }() + + select { + case <-time.After(time.Millisecond * 200): + case <-done: + if t.Failed() { + return + } + + t.Fatal("shouldnt have been able to open file for writing") + } + + err = rfd1.Close() + if err != nil { + t.Fatal(err) + } + + select { + case <-time.After(time.Second): + t.Fatal("should have been able to open write fd after closing read fd") + case <-done: + } + + wfd, err := fi.Open(Flags{Write: true, Sync: true}) + if err != nil { + t.Fatal(err) + } + + _, err = wfd.Read([]byte{}) + if err == nil { + t.Fatal("shouldnt have been able to read from write only filedescriptor") + } + + _, err = wfd.Write([]byte{}) + if err != nil { + t.Fatal(err) + } +} + +func TestTruncateAtSize(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ds, rt := setupRoot(ctx, t) + + dir := rt.GetDirectory() + + nd := dag.NodeWithData(ft.FilePBData(nil, 0)) + fi, err := NewFile("test", nd, dir, ds) + if err != nil { + t.Fatal(err) + } + + fd, err := fi.Open(Flags{Read: true, Write: true, Sync: true}) + if err != nil { + t.Fatal(err) + } + defer fd.Close() + _, err = fd.Write([]byte("test")) + if err != nil { + t.Fatal(err) + } + fd.Truncate(4) +} + +func TestTruncateAndWrite(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ds, rt := setupRoot(ctx, t) + + dir := rt.GetDirectory() + + nd := dag.NodeWithData(ft.FilePBData(nil, 0)) + fi, err := NewFile("test", nd, dir, ds) + if err != nil { + t.Fatal(err) + } + + fd, err := fi.Open(Flags{Read: true, Write: true, Sync: true}) + if err != nil { + t.Fatal(err) + } + defer fd.Close() + for i := 0; i < 200; i++ { + err = fd.Truncate(0) + if err != nil { + t.Fatal(err) + } + l, err := fd.Write([]byte("test")) + if err != nil { + t.Fatal(err) + } + if l != len("test") { + t.Fatal("incorrect write length") + } + + _, err = fd.Seek(0, io.SeekStart) + if err != nil { + t.Fatal(err) + } + + data, err := io.ReadAll(fd) + if err != nil { + t.Fatal(err) + } + if string(data) != "test" { + t.Fatalf("read error at read %d, read: %v", i, data) + } + } +} + +func TestFSNodeType(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ds, rt := setupRoot(ctx, t) + + // check for IsDir + nd := dag.NodeWithData(ft.FolderPBData()) + di, err := NewDirectory(ctx, "test", nd, rt.GetDirectory(), ds) + if err != nil { + t.Fatal(err) + } + ret := IsDir(di) + if !ret { + t.Fatal("FSNode type should be dir, but not") + } + + // check for IsFile + fnd := dag.NodeWithData(ft.FilePBData(nil, 0)) + fi, err := NewFile("test", fnd, rt.GetDirectory(), ds) + if err != nil { + t.Fatal(err) + } + ret = IsFile(fi) + if !ret { + t.Fatal("FSNode type should be file, but not") + } +} diff --git a/mfs/ops.go b/mfs/ops.go new file mode 100644 index 0000000000..78156dd52d --- /dev/null +++ b/mfs/ops.go @@ -0,0 +1,246 @@ +package mfs + +import ( + "context" + "fmt" + "os" + gopath "path" + "strings" + + path "github.com/ipfs/boxo/path" + + cid "github.com/ipfs/go-cid" + ipld "github.com/ipfs/go-ipld-format" +) + +// TODO: Evaluate moving all this operations to as `Root` +// methods, since all of them use it as its first argument +// and there is no clear documentation that explains this +// separation. + +// Mv moves the file or directory at 'src' to 'dst' +// TODO: Document what the strings 'src' and 'dst' represent. +func Mv(r *Root, src, dst string) error { + srcDirName, srcFname := gopath.Split(src) + + var dstDirName string + var dstFname string + if dst[len(dst)-1] == '/' { + dstDirName = dst + dstFname = srcFname + } else { + dstDirName, dstFname = gopath.Split(dst) + } + + // get parent directories of both src and dest first + dstDir, err := lookupDir(r, dstDirName) + if err != nil { + return err + } + + srcDir, err := lookupDir(r, srcDirName) + if err != nil { + return err + } + + srcObj, err := srcDir.Child(srcFname) + if err != nil { + return err + } + + nd, err := srcObj.GetNode() + if err != nil { + return err + } + + fsn, err := dstDir.Child(dstFname) + if err == nil { + switch n := fsn.(type) { + case *File: + _ = dstDir.Unlink(dstFname) + case *Directory: + dstDir = n + dstFname = srcFname + default: + return fmt.Errorf("unexpected type at path: %s", dst) + } + } else if err != os.ErrNotExist { + return err + } + + err = dstDir.AddChild(dstFname, nd) + if err != nil { + return err + } + + if srcDir.name == dstDir.name && srcFname == dstFname { + return nil + } + + return srcDir.Unlink(srcFname) +} + +func lookupDir(r *Root, path string) (*Directory, error) { + di, err := Lookup(r, path) + if err != nil { + return nil, err + } + + d, ok := di.(*Directory) + if !ok { + return nil, fmt.Errorf("%s is not a directory", path) + } + + return d, nil +} + +// PutNode inserts 'nd' at 'path' in the given mfs +// TODO: Rename or clearly document that this is not about nodes but actually +// MFS files/directories (that in the underlying representation can be +// considered as just nodes). +// TODO: Document why are we handling IPLD nodes in the first place when we +// are actually referring to files/directories (that is, it can't be any +// node, it has to have a specific format). +// TODO: Can this function add directories or just files? What would be the +// difference between adding a directory with this method and creating it +// with `Mkdir`. +func PutNode(r *Root, path string, nd ipld.Node) error { + dirp, filename := gopath.Split(path) + if filename == "" { + return fmt.Errorf("cannot create file with empty name") + } + + pdir, err := lookupDir(r, dirp) + if err != nil { + return err + } + + return pdir.AddChild(filename, nd) +} + +// MkdirOpts is used by Mkdir +type MkdirOpts struct { + Mkparents bool + Flush bool + CidBuilder cid.Builder +} + +// Mkdir creates a directory at 'path' under the directory 'd', creating +// intermediary directories as needed if 'mkparents' is set to true +func Mkdir(r *Root, pth string, opts MkdirOpts) error { + if pth == "" { + return fmt.Errorf("no path given to Mkdir") + } + parts := path.SplitList(pth) + if parts[0] == "" { + parts = parts[1:] + } + + // allow 'mkdir /a/b/c/' to create c + if parts[len(parts)-1] == "" { + parts = parts[:len(parts)-1] + } + + if len(parts) == 0 { + // this will only happen on 'mkdir /' + if opts.Mkparents { + return nil + } + return fmt.Errorf("cannot create directory '/': Already exists") + } + + cur := r.GetDirectory() + for i, d := range parts[:len(parts)-1] { + fsn, err := cur.Child(d) + if err == os.ErrNotExist && opts.Mkparents { + mkd, err := cur.Mkdir(d) + if err != nil { + return err + } + if opts.CidBuilder != nil { + mkd.SetCidBuilder(opts.CidBuilder) + } + fsn = mkd + } else if err != nil { + return err + } + + next, ok := fsn.(*Directory) + if !ok { + return fmt.Errorf("%s was not a directory", path.Join(parts[:i])) + } + cur = next + } + + final, err := cur.Mkdir(parts[len(parts)-1]) + if err != nil { + if !opts.Mkparents || err != os.ErrExist || final == nil { + return err + } + } + if opts.CidBuilder != nil { + final.SetCidBuilder(opts.CidBuilder) + } + + if opts.Flush { + err := final.Flush() + if err != nil { + return err + } + } + + return nil +} + +// Lookup extracts the root directory and performs a lookup under it. +// TODO: Now that the root is always a directory, can this function +// be collapsed with `DirLookup`? Or at least be made a method of `Root`? +func Lookup(r *Root, path string) (FSNode, error) { + dir := r.GetDirectory() + + return DirLookup(dir, path) +} + +// DirLookup will look up a file or directory at the given path +// under the directory 'd' +func DirLookup(d *Directory, pth string) (FSNode, error) { + pth = strings.Trim(pth, "/") + parts := path.SplitList(pth) + if len(parts) == 1 && parts[0] == "" { + return d, nil + } + + var cur FSNode + cur = d + for i, p := range parts { + chdir, ok := cur.(*Directory) + if !ok { + return nil, fmt.Errorf("cannot access %s: Not a directory", path.Join(parts[:i+1])) + } + + child, err := chdir.Child(p) + if err != nil { + return nil, err + } + + cur = child + } + return cur, nil +} + +// TODO: Document this function and link its functionality +// with the republisher. +func FlushPath(ctx context.Context, rt *Root, pth string) (ipld.Node, error) { + nd, err := Lookup(rt, pth) + if err != nil { + return nil, err + } + + err = nd.Flush() + if err != nil { + return nil, err + } + + rt.repub.WaitPub(ctx) + return nd.GetNode() +} diff --git a/mfs/options.go b/mfs/options.go new file mode 100644 index 0000000000..6bdcd71006 --- /dev/null +++ b/mfs/options.go @@ -0,0 +1,7 @@ +package mfs + +type Flags struct { + Read bool + Write bool + Sync bool +} diff --git a/mfs/repub.go b/mfs/repub.go new file mode 100644 index 0000000000..463810414c --- /dev/null +++ b/mfs/repub.go @@ -0,0 +1,197 @@ +package mfs + +import ( + "context" + "time" + + cid "github.com/ipfs/go-cid" +) + +// PubFunc is the user-defined function that determines exactly what +// logic entails "publishing" a `Cid` value. +type PubFunc func(context.Context, cid.Cid) error + +// Republisher manages when to publish a given entry. +type Republisher struct { + TimeoutLong time.Duration + TimeoutShort time.Duration + RetryTimeout time.Duration + pubfunc PubFunc + + update chan cid.Cid + immediatePublish chan chan struct{} + + ctx context.Context + cancel func() +} + +// NewRepublisher creates a new Republisher object to republish the given root +// using the given short and long time intervals. +func NewRepublisher(ctx context.Context, pf PubFunc, tshort, tlong time.Duration) *Republisher { + ctx, cancel := context.WithCancel(ctx) + return &Republisher{ + TimeoutShort: tshort, + TimeoutLong: tlong, + RetryTimeout: tlong, + update: make(chan cid.Cid, 1), + pubfunc: pf, + immediatePublish: make(chan chan struct{}), + ctx: ctx, + cancel: cancel, + } +} + +// WaitPub waits for the current value to be published (or returns early +// if it already has). +func (rp *Republisher) WaitPub(ctx context.Context) error { + wait := make(chan struct{}) + select { + case rp.immediatePublish <- wait: + case <-ctx.Done(): + return ctx.Err() + } + select { + case <-wait: + return nil + case <-ctx.Done(): + return ctx.Err() + } +} + +func (rp *Republisher) Close() error { + // TODO(steb): Wait for `Run` to stop + err := rp.WaitPub(rp.ctx) + rp.cancel() + return err +} + +// Update the current value. The value will be published after a delay but each +// consecutive call to Update may extend this delay up to TimeoutLong. +func (rp *Republisher) Update(c cid.Cid) { + select { + case <-rp.update: + select { + case rp.update <- c: + default: + // Don't try again. If we hit this case, there's a + // concurrent publish and we can safely let that + // concurrent publish win. + } + case rp.update <- c: + } +} + +// Run contains the core logic of the `Republisher`. It calls the user-defined +// `pubfunc` function whenever the `Cid` value is updated to a *new* value. The +// complexity comes from the fact that `pubfunc` may be slow so we need to batch +// updates. +// +// Algorithm: +// 1. When we receive the first update after publishing, we set a `longer` timer. +// 2. When we receive any update, we reset the `quick` timer. +// 3. If either the `quick` timeout or the `longer` timeout elapses, +// we call `publish` with the latest updated value. +// +// The `longer` timer ensures that we delay publishing by at most +// `TimeoutLong`. The `quick` timer allows us to publish sooner if +// it looks like there are no more updates coming down the pipe. +// +// Note: If a publish fails, we retry repeatedly every TimeoutRetry. +func (rp *Republisher) Run(lastPublished cid.Cid) { + quick := time.NewTimer(0) + if !quick.Stop() { + <-quick.C + } + longer := time.NewTimer(0) + if !longer.Stop() { + <-longer.C + } + + var toPublish cid.Cid + for rp.ctx.Err() == nil { + var waiter chan struct{} + + select { + case <-rp.ctx.Done(): + return + case newValue := <-rp.update: + // Skip already published values. + if lastPublished.Equals(newValue) { + // Break to the end of the switch to cleanup any + // timers. + toPublish = cid.Undef + break + } + + // If we aren't already waiting to publish something, + // reset the long timeout. + if !toPublish.Defined() { + longer.Reset(rp.TimeoutLong) + } + + // Always reset the short timeout. + quick.Reset(rp.TimeoutShort) + + // Finally, set the new value to publish. + toPublish = newValue + continue + case waiter = <-rp.immediatePublish: + // Make sure to grab the *latest* value to publish. + select { + case toPublish = <-rp.update: + default: + } + + // Avoid publishing duplicate values + if lastPublished.Equals(toPublish) { + toPublish = cid.Undef + } + case <-quick.C: + case <-longer.C: + } + + // Cleanup, publish, and close waiters. + + // 1. Stop any timers. Don't use the `if !t.Stop() { ... }` + // idiom as these timers may not be running. + + quick.Stop() + select { + case <-quick.C: + default: + } + + longer.Stop() + select { + case <-longer.C: + default: + } + + // 2. If we have a value to publish, publish it now. + if toPublish.Defined() { + for { + err := rp.pubfunc(rp.ctx, toPublish) + if err == nil { + break + } + // Keep retrying until we succeed or we abort. + // TODO(steb): We could try pulling new values + // off `update` but that's not critical (and + // complicates this code a bit). We'll pull off + // a new value on the next loop through. + select { + case <-time.After(rp.RetryTimeout): + case <-rp.ctx.Done(): + return + } + } + lastPublished = toPublish + toPublish = cid.Undef + } + + // 3. Trigger anything waiting in `WaitPub`. + if waiter != nil { + close(waiter) + } + } +} diff --git a/mfs/repub_test.go b/mfs/repub_test.go new file mode 100644 index 0000000000..6be5624ab1 --- /dev/null +++ b/mfs/repub_test.go @@ -0,0 +1,75 @@ +package mfs + +import ( + "context" + "testing" + "time" + + cid "github.com/ipfs/go-cid" + ci "github.com/libp2p/go-libp2p-testing/ci" +) + +func TestRepublisher(t *testing.T) { + if ci.IsRunning() { + t.Skip("dont run timing tests in CI") + } + + ctx := context.TODO() + + pub := make(chan struct{}) + + pf := func(ctx context.Context, c cid.Cid) error { + pub <- struct{}{} + return nil + } + + testCid1, _ := cid.Parse("QmeomffUNfmQy76CQGy9NdmqEnnHU9soCexBnGU3ezPHVH") + testCid2, _ := cid.Parse("QmeomffUNfmQy76CQGy9NdmqEnnHU9soCexBnGU3ezPHVX") + + tshort := time.Millisecond * 50 + tlong := time.Second / 2 + + rp := NewRepublisher(ctx, pf, tshort, tlong) + go rp.Run(cid.Undef) + + rp.Update(testCid1) + + // should hit short timeout + select { + case <-time.After(tshort * 2): + t.Fatal("publish didnt happen in time") + case <-pub: + } + + cctx, cancel := context.WithCancel(context.Background()) + + go func() { + for { + rp.Update(testCid2) + time.Sleep(time.Millisecond * 10) + select { + case <-cctx.Done(): + return + default: + } + } + }() + + select { + case <-pub: + t.Fatal("shouldnt have received publish yet!") + case <-time.After((tlong * 9) / 10): + } + select { + case <-pub: + case <-time.After(tlong / 2): + t.Fatal("waited too long for pub!") + } + + cancel() + + err := rp.Close() + if err != nil { + t.Fatal(err) + } +} diff --git a/mfs/root.go b/mfs/root.go new file mode 100644 index 0000000000..96706a14f9 --- /dev/null +++ b/mfs/root.go @@ -0,0 +1,218 @@ +// package mfs implements an in memory model of a mutable IPFS filesystem. +// TODO: Develop on this line (and move it to `doc.go`). + +package mfs + +import ( + "context" + "errors" + "fmt" + "time" + + ft "github.com/ipfs/boxo/unixfs" + dag "github.com/ipfs/boxo/ipld/merkledag" + + ipld "github.com/ipfs/go-ipld-format" + logging "github.com/ipfs/go-log" +) + +// TODO: Remove if not used. +var ErrNotExist = errors.New("no such rootfs") +var ErrClosed = errors.New("file closed") + +var log = logging.Logger("mfs") + +// TODO: Remove if not used. +var ErrIsDirectory = errors.New("error: is a directory") + +// The information that an MFS `Directory` has about its children +// when updating one of its entries: when a child mutates it signals +// its parent directory to update its entry (under `Name`) with the +// new content (in `Node`). +type child struct { + Name string + Node ipld.Node +} + +// This interface represents the basic property of MFS directories of updating +// children entries with modified content. Implemented by both the MFS +// `Directory` and `Root` (which is basically a `Directory` with republishing +// support). +// +// TODO: What is `fullsync`? (unnamed `bool` argument) +// TODO: There are two types of persistence/flush that need to be +// distinguished here, one at the DAG level (when I store the modified +// nodes in the DAG service) and one in the UnixFS/MFS level (when I modify +// the entry/link of the directory that pointed to the modified node). +type parent interface { + // Method called by a child to its parent to signal to update the content + // pointed to in the entry by that child's name. The child sends its own + // information in the `child` structure. As modifying a directory entry + // entails modifying its contents the parent will also call *its* parent's + // `updateChildEntry` to update the entry pointing to the new directory, + // this mechanism is in turn repeated until reaching the `Root`. + updateChildEntry(c child) error +} + +type NodeType int + +const ( + TFile NodeType = iota + TDir +) + +// FSNode abstracts the `Directory` and `File` structures, it represents +// any child node in the MFS (i.e., all the nodes besides the `Root`). It +// is the counterpart of the `parent` interface which represents any +// parent node in the MFS (`Root` and `Directory`). +// (Not to be confused with the `unixfs.FSNode`.) +type FSNode interface { + GetNode() (ipld.Node, error) + + Flush() error + Type() NodeType +} + +// IsDir checks whether the FSNode is dir type +func IsDir(fsn FSNode) bool { + return fsn.Type() == TDir +} + +// IsFile checks whether the FSNode is file type +func IsFile(fsn FSNode) bool { + return fsn.Type() == TFile +} + +// Root represents the root of a filesystem tree. +type Root struct { + + // Root directory of the MFS layout. + dir *Directory + + repub *Republisher +} + +// NewRoot creates a new Root and starts up a republisher routine for it. +func NewRoot(parent context.Context, ds ipld.DAGService, node *dag.ProtoNode, pf PubFunc) (*Root, error) { + + var repub *Republisher + if pf != nil { + repub = NewRepublisher(parent, pf, time.Millisecond*300, time.Second*3) + + // No need to take the lock here since we just created + // the `Republisher` and no one has access to it yet. + + go repub.Run(node.Cid()) + } + + root := &Root{ + repub: repub, + } + + fsn, err := ft.FSNodeFromBytes(node.Data()) + if err != nil { + log.Error("IPNS pointer was not unixfs node") + // TODO: IPNS pointer? + return nil, err + } + + switch fsn.Type() { + case ft.TDirectory, ft.THAMTShard: + newDir, err := NewDirectory(parent, node.String(), node, root, ds) + if err != nil { + return nil, err + } + + root.dir = newDir + case ft.TFile, ft.TMetadata, ft.TRaw: + return nil, fmt.Errorf("root can't be a file (unixfs type: %s)", fsn.Type()) + // TODO: This special error reporting case doesn't seem worth it, we either + // have a UnixFS directory or we don't. + default: + return nil, fmt.Errorf("unrecognized unixfs type: %s", fsn.Type()) + } + return root, nil +} + +// GetDirectory returns the root directory. +func (kr *Root) GetDirectory() *Directory { + return kr.dir +} + +// Flush signals that an update has occurred since the last publish, +// and updates the Root republisher. +// TODO: We are definitely abusing the "flush" terminology here. +func (kr *Root) Flush() error { + nd, err := kr.GetDirectory().GetNode() + if err != nil { + return err + } + + if kr.repub != nil { + kr.repub.Update(nd.Cid()) + } + return nil +} + +// FlushMemFree flushes the root directory and then uncaches all of its links. +// This has the effect of clearing out potentially stale references and allows +// them to be garbage collected. +// CAUTION: Take care not to ever call this while holding a reference to any +// child directories. Those directories will be bad references and using them +// may have unintended racy side effects. +// A better implemented mfs system (one that does smarter internal caching and +// refcounting) shouldnt need this method. +// TODO: Review the motivation behind this method once the cache system is +// refactored. +func (kr *Root) FlushMemFree(ctx context.Context) error { + dir := kr.GetDirectory() + + if err := dir.Flush(); err != nil { + return err + } + + dir.lock.Lock() + defer dir.lock.Unlock() + + for name := range dir.entriesCache { + delete(dir.entriesCache, name) + } + // TODO: Can't we just create new maps? + + return nil +} + +// updateChildEntry implements the `parent` interface, and signals +// to the publisher that there are changes ready to be published. +// This is the only thing that separates a `Root` from a `Directory`. +// TODO: Evaluate merging both. +// TODO: The `sync` argument isn't used here (we've already reached +// the top), document it and maybe make it an anonymous variable (if +// that's possible). +func (kr *Root) updateChildEntry(c child) error { + err := kr.GetDirectory().dagService.Add(context.TODO(), c.Node) + if err != nil { + return err + } + // TODO: Why are we not using the inner directory lock nor + // applying the same procedure as `Directory.updateChildEntry`? + + if kr.repub != nil { + kr.repub.Update(c.Node.Cid()) + } + return nil +} + +func (kr *Root) Close() error { + nd, err := kr.GetDirectory().GetNode() + if err != nil { + return err + } + + if kr.repub != nil { + kr.repub.Update(nd.Cid()) + return kr.repub.Close() + } + + return nil +} diff --git a/namesys/base.go b/namesys/base.go new file mode 100644 index 0000000000..06b24bedce --- /dev/null +++ b/namesys/base.go @@ -0,0 +1,126 @@ +package namesys + +import ( + "context" + "strings" + "time" + + opts "github.com/ipfs/boxo/coreiface/options/namesys" + path "github.com/ipfs/boxo/path" +) + +type onceResult struct { + value path.Path + ttl time.Duration + err error +} + +type resolver interface { + resolveOnceAsync(ctx context.Context, name string, options opts.ResolveOpts) <-chan onceResult +} + +// resolve is a helper for implementing Resolver.ResolveN using resolveOnce. +func resolve(ctx context.Context, r resolver, name string, options opts.ResolveOpts) (path.Path, error) { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + err := ErrResolveFailed + var p path.Path + + resCh := resolveAsync(ctx, r, name, options) + + for res := range resCh { + p, err = res.Path, res.Err + if err != nil { + break + } + } + + return p, err +} + +func resolveAsync(ctx context.Context, r resolver, name string, options opts.ResolveOpts) <-chan Result { + ctx, span := StartSpan(ctx, "ResolveAsync") + defer span.End() + + resCh := r.resolveOnceAsync(ctx, name, options) + depth := options.Depth + outCh := make(chan Result, 1) + + go func() { + defer close(outCh) + ctx, span := StartSpan(ctx, "ResolveAsync.Worker") + defer span.End() + + var subCh <-chan Result + var cancelSub context.CancelFunc + defer func() { + if cancelSub != nil { + cancelSub() + } + }() + + for { + select { + case res, ok := <-resCh: + if !ok { + resCh = nil + break + } + + if res.err != nil { + emitResult(ctx, outCh, Result{Err: res.err}) + return + } + log.Debugf("resolved %s to %s", name, res.value.String()) + if !strings.HasPrefix(res.value.String(), ipnsPrefix) { + emitResult(ctx, outCh, Result{Path: res.value}) + break + } + + if depth == 1 { + emitResult(ctx, outCh, Result{Path: res.value, Err: ErrResolveRecursion}) + break + } + + subopts := options + if subopts.Depth > 1 { + subopts.Depth-- + } + + var subCtx context.Context + if cancelSub != nil { + // Cancel previous recursive resolve since it won't be used anyways + cancelSub() + } + subCtx, cancelSub = context.WithCancel(ctx) + _ = cancelSub + + p := strings.TrimPrefix(res.value.String(), ipnsPrefix) + subCh = resolveAsync(subCtx, r, p, subopts) + case res, ok := <-subCh: + if !ok { + subCh = nil + break + } + + // We don't bother returning here in case of context timeout as there is + // no good reason to do that, and we may still be able to emit a result + emitResult(ctx, outCh, res) + case <-ctx.Done(): + return + } + if resCh == nil && subCh == nil { + return + } + } + }() + return outCh +} + +func emitResult(ctx context.Context, outCh chan<- Result, r Result) { + select { + case outCh <- r: + case <-ctx.Done(): + } +} diff --git a/namesys/cache.go b/namesys/cache.go new file mode 100644 index 0000000000..8b7f50794c --- /dev/null +++ b/namesys/cache.go @@ -0,0 +1,62 @@ +package namesys + +import ( + "time" + + path "github.com/ipfs/boxo/path" +) + +func (ns *mpns) cacheGet(name string) (path.Path, bool) { + // existence of optional mapping defined via IPFS_NS_MAP is checked first + if ns.staticMap != nil { + val, ok := ns.staticMap[name] + if ok { + return val, true + } + } + + if ns.cache == nil { + return "", false + } + + ientry, ok := ns.cache.Get(name) + if !ok { + return "", false + } + + entry, ok := ientry.(cacheEntry) + if !ok { + // should never happen, purely for sanity + log.Panicf("unexpected type %T in cache for %q.", ientry, name) + } + + if time.Now().Before(entry.eol) { + return entry.val, true + } + + ns.cache.Remove(name) + + return "", false +} + +func (ns *mpns) cacheSet(name string, val path.Path, ttl time.Duration) { + if ns.cache == nil || ttl <= 0 { + return + } + ns.cache.Add(name, cacheEntry{ + val: val, + eol: time.Now().Add(ttl), + }) +} + +func (ns *mpns) cacheInvalidate(name string) { + if ns.cache == nil { + return + } + ns.cache.Remove(name) +} + +type cacheEntry struct { + val path.Path + eol time.Time +} diff --git a/namesys/dns.go b/namesys/dns.go new file mode 100644 index 0000000000..6f846fcdac --- /dev/null +++ b/namesys/dns.go @@ -0,0 +1,195 @@ +package namesys + +import ( + "context" + "errors" + "fmt" + "net" + gpath "path" + "strings" + + opts "github.com/ipfs/boxo/coreiface/options/namesys" + path "github.com/ipfs/boxo/path" + dns "github.com/miekg/dns" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" +) + +// LookupTXTFunc is a function that lookups TXT record values. +type LookupTXTFunc func(ctx context.Context, name string) (txt []string, err error) + +// DNSResolver implements a Resolver on DNS domains +type DNSResolver struct { + lookupTXT LookupTXTFunc + // TODO: maybe some sort of caching? + // cache would need a timeout +} + +// NewDNSResolver constructs a name resolver using DNS TXT records. +func NewDNSResolver(lookup LookupTXTFunc) *DNSResolver { + return &DNSResolver{lookupTXT: lookup} +} + +// Resolve implements Resolver. +func (r *DNSResolver) Resolve(ctx context.Context, name string, options ...opts.ResolveOpt) (path.Path, error) { + ctx, span := StartSpan(ctx, "DNSResolver.Resolve") + defer span.End() + + return resolve(ctx, r, name, opts.ProcessOpts(options)) +} + +// ResolveAsync implements Resolver. +func (r *DNSResolver) ResolveAsync(ctx context.Context, name string, options ...opts.ResolveOpt) <-chan Result { + ctx, span := StartSpan(ctx, "DNSResolver.ResolveAsync") + defer span.End() + + return resolveAsync(ctx, r, name, opts.ProcessOpts(options)) +} + +type lookupRes struct { + path path.Path + error error +} + +// resolveOnce implements resolver. +// TXT records for a given domain name should contain a b58 +// encoded multihash. +func (r *DNSResolver) resolveOnceAsync(ctx context.Context, name string, options opts.ResolveOpts) <-chan onceResult { + ctx, span := StartSpan(ctx, "DNSResolver.ResolveOnceAsync") + defer span.End() + + var fqdn string + out := make(chan onceResult, 1) + segments := strings.SplitN(name, "/", 2) + domain := segments[0] + + if _, ok := dns.IsDomainName(domain); !ok { + out <- onceResult{err: fmt.Errorf("not a valid domain name: %s", domain)} + close(out) + return out + } + log.Debugf("DNSResolver resolving %s", domain) + + if strings.HasSuffix(domain, ".") { + fqdn = domain + } else { + fqdn = domain + "." + } + + rootChan := make(chan lookupRes, 1) + go workDomain(ctx, r, fqdn, rootChan) + + subChan := make(chan lookupRes, 1) + go workDomain(ctx, r, "_dnslink."+fqdn, subChan) + + appendPath := func(p path.Path) (path.Path, error) { + if len(segments) > 1 { + return path.FromSegments("", strings.TrimRight(p.String(), "/"), segments[1]) + } + return p, nil + } + + go func() { + defer close(out) + ctx, span := StartSpan(ctx, "DNSResolver.ResolveOnceAsync.Worker") + defer span.End() + + var rootResErr, subResErr error + for { + select { + case subRes, ok := <-subChan: + if !ok { + subChan = nil + break + } + if subRes.error == nil { + p, err := appendPath(subRes.path) + emitOnceResult(ctx, out, onceResult{value: p, err: err}) + // Return without waiting for rootRes, since this result + // (for "_dnslink."+fqdn) takes precedence + return + } + subResErr = subRes.error + case rootRes, ok := <-rootChan: + if !ok { + rootChan = nil + break + } + if rootRes.error == nil { + p, err := appendPath(rootRes.path) + emitOnceResult(ctx, out, onceResult{value: p, err: err}) + // Do not return here. Wait for subRes so that it is + // output last if good, thereby giving subRes precedence. + } else { + rootResErr = rootRes.error + } + case <-ctx.Done(): + return + } + if subChan == nil && rootChan == nil { + // If here, then both lookups are done + // + // If both lookups failed due to no TXT records with a + // dnslink, then output a more specific error message + if rootResErr == ErrResolveFailed && subResErr == ErrResolveFailed { + // Wrap error so that it can be tested if it is a ErrResolveFailed + err := fmt.Errorf("%w: _dnslink subdomain at %q is missing a TXT record (https://docs.ipfs.tech/concepts/dnslink/)", ErrResolveFailed, gpath.Base(name)) + emitOnceResult(ctx, out, onceResult{err: err}) + } + return + } + } + }() + + return out +} + +func workDomain(ctx context.Context, r *DNSResolver, name string, res chan lookupRes) { + ctx, span := StartSpan(ctx, "DNSResolver.WorkDomain", trace.WithAttributes(attribute.String("Name", name))) + defer span.End() + + defer close(res) + + txt, err := r.lookupTXT(ctx, name) + if err != nil { + if dnsErr, ok := err.(*net.DNSError); ok { + // If no TXT records found, return same error as when no text + // records contain dnslink. Otherwise, return the actual error. + if dnsErr.IsNotFound { + err = ErrResolveFailed + } + } + // Could not look up any text records for name + res <- lookupRes{"", err} + return + } + + for _, t := range txt { + p, err := parseEntry(t) + if err == nil { + res <- lookupRes{p, nil} + return + } + } + + // There were no TXT records with a dnslink + res <- lookupRes{"", ErrResolveFailed} +} + +func parseEntry(txt string) (path.Path, error) { + p, err := path.ParseCidToPath(txt) // bare IPFS multihashes + if err == nil { + return p, nil + } + + return tryParseDNSLink(txt) +} + +func tryParseDNSLink(txt string) (path.Path, error) { + parts := strings.SplitN(txt, "=", 2) + if len(parts) == 2 && parts[0] == "dnslink" { + return path.ParsePath(parts[1]) + } + + return "", errors.New("not a valid dnslink entry") +} diff --git a/namesys/dns_test.go b/namesys/dns_test.go new file mode 100644 index 0000000000..a31a53582b --- /dev/null +++ b/namesys/dns_test.go @@ -0,0 +1,184 @@ +package namesys + +import ( + "context" + "fmt" + "testing" + + opts "github.com/ipfs/boxo/coreiface/options/namesys" +) + +type mockDNS struct { + entries map[string][]string +} + +func (m *mockDNS) lookupTXT(ctx context.Context, name string) (txt []string, err error) { + txt, ok := m.entries[name] + if !ok { + return nil, fmt.Errorf("no TXT entry for %s", name) + } + return txt, nil +} + +func TestDnsEntryParsing(t *testing.T) { + goodEntries := []string{ + "QmY3hE8xgFCjGcz6PHgnvJz5HZi1BaKRfPkn1ghZUcYMjD", + "dnslink=/ipfs/QmY3hE8xgFCjGcz6PHgnvJz5HZi1BaKRfPkn1ghZUcYMjD", + "dnslink=/ipns/QmY3hE8xgFCjGcz6PHgnvJz5HZi1BaKRfPkn1ghZUcYMjD", + "dnslink=/ipfs/QmY3hE8xgFCjGcz6PHgnvJz5HZi1BaKRfPkn1ghZUcYMjD/foo", + "dnslink=/ipns/QmY3hE8xgFCjGcz6PHgnvJz5HZi1BaKRfPkn1ghZUcYMjD/bar", + "dnslink=/ipfs/QmY3hE8xgFCjGcz6PHgnvJz5HZi1BaKRfPkn1ghZUcYMjD/foo/bar/baz", + "dnslink=/ipfs/QmY3hE8xgFCjGcz6PHgnvJz5HZi1BaKRfPkn1ghZUcYMjD/foo/bar/baz/", + "dnslink=/ipfs/QmY3hE8xgFCjGcz6PHgnvJz5HZi1BaKRfPkn1ghZUcYMjD", + } + + badEntries := []string{ + "QmYhE8xgFCjGcz6PHgnvJz5NOTCORRECT", + "quux=/ipfs/QmY3hE8xgFCjGcz6PHgnvJz5HZi1BaKRfPkn1ghZUcYMjD", + "dnslink=", + "dnslink=/QmY3hE8xgFCjGcz6PHgnvJz5HZi1BaKRfPkn1ghZUcYMjD/foo", + "dnslink=ipns/QmY3hE8xgFCjGcz6PHgnvJz5HZi1BaKRfPkn1ghZUcYMjD/bar", + } + + for _, e := range goodEntries { + _, err := parseEntry(e) + if err != nil { + t.Log("expected entry to parse correctly!") + t.Log(e) + t.Fatal(err) + } + } + + for _, e := range badEntries { + _, err := parseEntry(e) + if err == nil { + t.Log("expected entry parse to fail!") + t.Fatal(err) + } + } +} + +func newMockDNS() *mockDNS { + return &mockDNS{ + entries: map[string][]string{ + "multihash.example.com.": { + "dnslink=QmY3hE8xgFCjGcz6PHgnvJz5HZi1BaKRfPkn1ghZUcYMjD", + }, + "ipfs.example.com.": { + "dnslink=/ipfs/QmY3hE8xgFCjGcz6PHgnvJz5HZi1BaKRfPkn1ghZUcYMjD", + }, + "_dnslink.dipfs.example.com.": { + "dnslink=/ipfs/QmY3hE8xgFCjGcz6PHgnvJz5HZi1BaKRfPkn1ghZUcYMjD", + }, + "dns1.example.com.": { + "dnslink=/ipns/ipfs.example.com", + }, + "dns2.example.com.": { + "dnslink=/ipns/dns1.example.com", + }, + "multi.example.com.": { + "some stuff", + "dnslink=/ipns/dns1.example.com", + "masked dnslink=/ipns/example.invalid", + }, + "equals.example.com.": { + "dnslink=/ipfs/QmY3hE8xgFCjGcz6PHgnvJz5HZi1BaKRfPkn1ghZUcYMjD/=equals", + }, + "loop1.example.com.": { + "dnslink=/ipns/loop2.example.com", + }, + "loop2.example.com.": { + "dnslink=/ipns/loop1.example.com", + }, + "_dnslink.dloop1.example.com.": { + "dnslink=/ipns/loop2.example.com", + }, + "_dnslink.dloop2.example.com.": { + "dnslink=/ipns/loop1.example.com", + }, + "bad.example.com.": { + "dnslink=", + }, + "withsegment.example.com.": { + "dnslink=/ipfs/QmY3hE8xgFCjGcz6PHgnvJz5HZi1BaKRfPkn1ghZUcYMjD/sub/segment", + }, + "withrecsegment.example.com.": { + "dnslink=/ipns/withsegment.example.com/subsub", + }, + "withtrailing.example.com.": { + "dnslink=/ipfs/QmY3hE8xgFCjGcz6PHgnvJz5HZi1BaKRfPkn1ghZUcYMjD/sub/", + }, + "withtrailingrec.example.com.": { + "dnslink=/ipns/withtrailing.example.com/segment/", + }, + "double.example.com.": { + "dnslink=/ipfs/QmY3hE8xgFCjGcz6PHgnvJz5HZi1BaKRfPkn1ghZUcYMjD", + }, + "_dnslink.double.example.com.": { + "dnslink=/ipfs/QmY3hE8xgFCjGcz6PHgnvJz5HZi1BaKRfPkn1ghZUcYMjD", + }, + "double.conflict.com.": { + "dnslink=/ipfs/QmY3hE8xgFCjGcz6PHgnvJz5HZi1BaKRfPkn1ghZUcYMjD", + }, + "_dnslink.conflict.example.com.": { + "dnslink=/ipfs/QmY3hE8xgFCjGcz6PHgnvJz5HZi1BaKRfPkn1ghZUcYMjE", + }, + "fqdn.example.com.": { + "dnslink=/ipfs/QmYvMB9yrsSf7RKBghkfwmHJkzJhW2ZgVwq3LxBXXPasFr", + }, + "en.wikipedia-on-ipfs.org.": { + "dnslink=/ipfs/bafybeiaysi4s6lnjev27ln5icwm6tueaw2vdykrtjkwiphwekaywqhcjze", + }, + "custom.non-icann.tldextravaganza.": { + "dnslink=/ipfs/bafybeieto6mcuvqlechv4iadoqvnffondeiwxc2bcfcewhvpsd2odvbmvm", + }, + "singlednslabelshouldbeok.": { + "dnslink=/ipfs/bafybeih4a6ylafdki6ailjrdvmr7o4fbbeceeeuty4v3qyyouiz5koqlpi", + }, + "www.wealdtech.eth.": { + "dnslink=/ipns/ipfs.example.com", + }, + }, + } +} + +func TestDNSResolution(t *testing.T) { + mock := newMockDNS() + r := &DNSResolver{lookupTXT: mock.lookupTXT} + testResolution(t, r, "multihash.example.com", opts.DefaultDepthLimit, "/ipfs/QmY3hE8xgFCjGcz6PHgnvJz5HZi1BaKRfPkn1ghZUcYMjD", nil) + testResolution(t, r, "ipfs.example.com", opts.DefaultDepthLimit, "/ipfs/QmY3hE8xgFCjGcz6PHgnvJz5HZi1BaKRfPkn1ghZUcYMjD", nil) + testResolution(t, r, "dipfs.example.com", opts.DefaultDepthLimit, "/ipfs/QmY3hE8xgFCjGcz6PHgnvJz5HZi1BaKRfPkn1ghZUcYMjD", nil) + testResolution(t, r, "dns1.example.com", opts.DefaultDepthLimit, "/ipfs/QmY3hE8xgFCjGcz6PHgnvJz5HZi1BaKRfPkn1ghZUcYMjD", nil) + testResolution(t, r, "dns1.example.com", 1, "/ipns/ipfs.example.com", ErrResolveRecursion) + testResolution(t, r, "dns2.example.com", opts.DefaultDepthLimit, "/ipfs/QmY3hE8xgFCjGcz6PHgnvJz5HZi1BaKRfPkn1ghZUcYMjD", nil) + testResolution(t, r, "dns2.example.com", 1, "/ipns/dns1.example.com", ErrResolveRecursion) + testResolution(t, r, "dns2.example.com", 2, "/ipns/ipfs.example.com", ErrResolveRecursion) + testResolution(t, r, "multi.example.com", opts.DefaultDepthLimit, "/ipfs/QmY3hE8xgFCjGcz6PHgnvJz5HZi1BaKRfPkn1ghZUcYMjD", nil) + testResolution(t, r, "multi.example.com", 1, "/ipns/dns1.example.com", ErrResolveRecursion) + testResolution(t, r, "multi.example.com", 2, "/ipns/ipfs.example.com", ErrResolveRecursion) + testResolution(t, r, "equals.example.com", opts.DefaultDepthLimit, "/ipfs/QmY3hE8xgFCjGcz6PHgnvJz5HZi1BaKRfPkn1ghZUcYMjD/=equals", nil) + testResolution(t, r, "loop1.example.com", 1, "/ipns/loop2.example.com", ErrResolveRecursion) + testResolution(t, r, "loop1.example.com", 2, "/ipns/loop1.example.com", ErrResolveRecursion) + testResolution(t, r, "loop1.example.com", 3, "/ipns/loop2.example.com", ErrResolveRecursion) + testResolution(t, r, "loop1.example.com", opts.DefaultDepthLimit, "/ipns/loop1.example.com", ErrResolveRecursion) + testResolution(t, r, "dloop1.example.com", 1, "/ipns/loop2.example.com", ErrResolveRecursion) + testResolution(t, r, "dloop1.example.com", 2, "/ipns/loop1.example.com", ErrResolveRecursion) + testResolution(t, r, "dloop1.example.com", 3, "/ipns/loop2.example.com", ErrResolveRecursion) + testResolution(t, r, "dloop1.example.com", opts.DefaultDepthLimit, "/ipns/loop1.example.com", ErrResolveRecursion) + testResolution(t, r, "bad.example.com", opts.DefaultDepthLimit, "", ErrResolveFailed) + testResolution(t, r, "withsegment.example.com", opts.DefaultDepthLimit, "/ipfs/QmY3hE8xgFCjGcz6PHgnvJz5HZi1BaKRfPkn1ghZUcYMjD/sub/segment", nil) + testResolution(t, r, "withrecsegment.example.com", opts.DefaultDepthLimit, "/ipfs/QmY3hE8xgFCjGcz6PHgnvJz5HZi1BaKRfPkn1ghZUcYMjD/sub/segment/subsub", nil) + testResolution(t, r, "withsegment.example.com/test1", opts.DefaultDepthLimit, "/ipfs/QmY3hE8xgFCjGcz6PHgnvJz5HZi1BaKRfPkn1ghZUcYMjD/sub/segment/test1", nil) + testResolution(t, r, "withrecsegment.example.com/test2", opts.DefaultDepthLimit, "/ipfs/QmY3hE8xgFCjGcz6PHgnvJz5HZi1BaKRfPkn1ghZUcYMjD/sub/segment/subsub/test2", nil) + testResolution(t, r, "withrecsegment.example.com/test3/", opts.DefaultDepthLimit, "/ipfs/QmY3hE8xgFCjGcz6PHgnvJz5HZi1BaKRfPkn1ghZUcYMjD/sub/segment/subsub/test3/", nil) + testResolution(t, r, "withtrailingrec.example.com", opts.DefaultDepthLimit, "/ipfs/QmY3hE8xgFCjGcz6PHgnvJz5HZi1BaKRfPkn1ghZUcYMjD/sub/segment/", nil) + testResolution(t, r, "double.example.com", opts.DefaultDepthLimit, "/ipfs/QmY3hE8xgFCjGcz6PHgnvJz5HZi1BaKRfPkn1ghZUcYMjD", nil) + testResolution(t, r, "conflict.example.com", opts.DefaultDepthLimit, "/ipfs/QmY3hE8xgFCjGcz6PHgnvJz5HZi1BaKRfPkn1ghZUcYMjE", nil) + testResolution(t, r, "fqdn.example.com.", opts.DefaultDepthLimit, "/ipfs/QmYvMB9yrsSf7RKBghkfwmHJkzJhW2ZgVwq3LxBXXPasFr", nil) + testResolution(t, r, "en.wikipedia-on-ipfs.org", 2, "/ipfs/bafybeiaysi4s6lnjev27ln5icwm6tueaw2vdykrtjkwiphwekaywqhcjze", nil) + testResolution(t, r, "custom.non-icann.tldextravaganza.", 2, "/ipfs/bafybeieto6mcuvqlechv4iadoqvnffondeiwxc2bcfcewhvpsd2odvbmvm", nil) + testResolution(t, r, "singlednslabelshouldbeok", 2, "/ipfs/bafybeih4a6ylafdki6ailjrdvmr7o4fbbeceeeuty4v3qyyouiz5koqlpi", nil) + testResolution(t, r, "www.wealdtech.eth", 1, "/ipns/ipfs.example.com", ErrResolveRecursion) + testResolution(t, r, "www.wealdtech.eth", 2, "/ipfs/QmY3hE8xgFCjGcz6PHgnvJz5HZi1BaKRfPkn1ghZUcYMjD", nil) + testResolution(t, r, "www.wealdtech.eth", 2, "/ipfs/QmY3hE8xgFCjGcz6PHgnvJz5HZi1BaKRfPkn1ghZUcYMjD", nil) +} diff --git a/namesys/interface.go b/namesys/interface.go new file mode 100644 index 0000000000..655be179db --- /dev/null +++ b/namesys/interface.go @@ -0,0 +1,100 @@ +/* +Package namesys implements resolvers and publishers for the IPFS +naming system (IPNS). + +The core of IPFS is an immutable, content-addressable Merkle graph. +That works well for many use cases, but doesn't allow you to answer +questions like "what is Alice's current homepage?". The mutable name +system allows Alice to publish information like: + + The current homepage for alice.example.com is + /ipfs/Qmcqtw8FfrVSBaRmbWwHxt3AuySBhJLcvmFYi3Lbc4xnwj + +or: + + The current homepage for node + QmatmE9msSfkKxoffpHwNLNKgwZG8eT9Bud6YoPab52vpy + is + /ipfs/Qmcqtw8FfrVSBaRmbWwHxt3AuySBhJLcvmFYi3Lbc4xnwj + +The mutable name system also allows users to resolve those references +to find the immutable IPFS object currently referenced by a given +mutable name. + +For command-line bindings to this functionality, see: + + ipfs name + ipfs dns + ipfs resolve +*/ +package namesys + +import ( + "errors" + + "context" + + opts "github.com/ipfs/boxo/coreiface/options/namesys" + "github.com/ipfs/boxo/path" + ci "github.com/libp2p/go-libp2p/core/crypto" +) + +// ErrResolveFailed signals an error when attempting to resolve. +var ErrResolveFailed = errors.New("could not resolve name") + +// ErrResolveRecursion signals a recursion-depth limit. +var ErrResolveRecursion = errors.New( + "could not resolve name (recursion limit exceeded)") + +// ErrPublishFailed signals an error when attempting to publish. +var ErrPublishFailed = errors.New("could not publish name") + +// NameSystem represents a cohesive name publishing and resolving system. +// +// Publishing a name is the process of establishing a mapping, a key-value +// pair, according to naming rules and databases. +// +// Resolving a name is the process of looking up the value associated with the +// key (name). +type NameSystem interface { + Resolver + Publisher +} + +// Result is the return type for Resolver.ResolveAsync. +type Result struct { + Path path.Path + Err error +} + +// Resolver is an object capable of resolving names. +type Resolver interface { + + // Resolve performs a recursive lookup, returning the dereferenced + // path. For example, if ipfs.io has a DNS TXT record pointing to + // /ipns/QmatmE9msSfkKxoffpHwNLNKgwZG8eT9Bud6YoPab52vpy + // and there is a DHT IPNS entry for + // QmatmE9msSfkKxoffpHwNLNKgwZG8eT9Bud6YoPab52vpy + // -> /ipfs/Qmcqtw8FfrVSBaRmbWwHxt3AuySBhJLcvmFYi3Lbc4xnwj + // then + // Resolve(ctx, "/ipns/ipfs.io") + // will resolve both names, returning + // /ipfs/Qmcqtw8FfrVSBaRmbWwHxt3AuySBhJLcvmFYi3Lbc4xnwj + // + // There is a default depth-limit to avoid infinite recursion. Most + // users will be fine with this default limit, but if you need to + // adjust the limit you can specify it as an option. + Resolve(ctx context.Context, name string, options ...opts.ResolveOpt) (value path.Path, err error) + + // ResolveAsync performs recursive name lookup, like Resolve, but it returns + // entries as they are discovered in the DHT. Each returned result is guaranteed + // to be "better" (which usually means newer) than the previous one. + ResolveAsync(ctx context.Context, name string, options ...opts.ResolveOpt) <-chan Result +} + +// Publisher is an object capable of publishing particular names. +type Publisher interface { + // Publish establishes a name-value mapping. + // TODO make this not PrivKey specific. + Publish(ctx context.Context, name ci.PrivKey, value path.Path, options ...opts.PublishOption) error +} diff --git a/namesys/ipns_resolver_validation_test.go b/namesys/ipns_resolver_validation_test.go new file mode 100644 index 0000000000..9799e5ba37 --- /dev/null +++ b/namesys/ipns_resolver_validation_test.go @@ -0,0 +1,209 @@ +package namesys + +import ( + "context" + "testing" + "time" + + opts "github.com/ipfs/boxo/coreiface/options/namesys" + "github.com/ipfs/boxo/ipns" + ipns_pb "github.com/ipfs/boxo/ipns/pb" + "github.com/ipfs/boxo/path" + mockrouting "github.com/ipfs/boxo/routing/mock" + "github.com/ipfs/boxo/routing/offline" + ds "github.com/ipfs/go-datastore" + dssync "github.com/ipfs/go-datastore/sync" + record "github.com/libp2p/go-libp2p-record" + testutil "github.com/libp2p/go-libp2p-testing/net" + ci "github.com/libp2p/go-libp2p/core/crypto" + "github.com/libp2p/go-libp2p/core/peer" + pstore "github.com/libp2p/go-libp2p/core/peerstore" + "github.com/libp2p/go-libp2p/core/routing" + "github.com/libp2p/go-libp2p/core/test" + "github.com/libp2p/go-libp2p/p2p/host/peerstore/pstoremem" +) + +func TestResolverValidation(t *testing.T) { + t.Run("RSA", + func(t *testing.T) { + testResolverValidation(t, ci.RSA) + }) + t.Run("Ed25519", + func(t *testing.T) { + testResolverValidation(t, ci.Ed25519) + }) + t.Run("ECDSA", + func(t *testing.T) { + testResolverValidation(t, ci.ECDSA) + }) + t.Run("Secp256k1", + func(t *testing.T) { + testResolverValidation(t, ci.Secp256k1) + }) +} + +func testResolverValidation(t *testing.T, keyType int) { + ctx := context.Background() + rid := testutil.RandIdentityOrFatal(t) + dstore := dssync.MutexWrap(ds.NewMapDatastore()) + peerstore, err := pstoremem.NewPeerstore() + if err != nil { + t.Fatal(err) + } + + vstore := newMockValueStore(rid, dstore, peerstore) + resolver := NewIpnsResolver(vstore) + + nvVstore := offline.NewOfflineRouter(dstore, mockrouting.MockValidator{}) + + // Create entry with expiry in one hour + priv, id, _, ipnsDHTPath := genKeys(t, keyType) + ts := time.Now() + p := []byte("/ipfs/QmfM2r8seH2GiRaC4esTjeraXEachRt8ZsSeGaWTPLyMoG") + entry, err := createIPNSRecordWithEmbeddedPublicKey(priv, p, 1, ts.Add(time.Hour), 0) + if err != nil { + t.Fatal(err) + } + + // Publish entry + err = PublishEntry(ctx, vstore, ipnsDHTPath, entry) + if err != nil { + t.Fatal(err) + } + + // Resolve entry + resp, err := resolve(ctx, resolver, id.Pretty(), opts.DefaultResolveOpts()) + if err != nil { + t.Fatal(err) + } + if resp != path.Path(p) { + t.Fatalf("Mismatch between published path %s and resolved path %s", p, resp) + } + // Create expired entry + expiredEntry, err := createIPNSRecordWithEmbeddedPublicKey(priv, p, 1, ts.Add(-1*time.Hour), 0) + if err != nil { + t.Fatal(err) + } + + // Publish entry + err = PublishEntry(ctx, nvVstore, ipnsDHTPath, expiredEntry) + if err != nil { + t.Fatal(err) + } + + // Record should fail validation because entry is expired + _, err = resolve(ctx, resolver, id.Pretty(), opts.DefaultResolveOpts()) + if err == nil { + t.Fatal("ValidateIpnsRecord should have returned error") + } + + // Create IPNS record path with a different private key + priv2, id2, _, ipnsDHTPath2 := genKeys(t, keyType) + + // Publish entry + err = PublishEntry(ctx, nvVstore, ipnsDHTPath2, entry) + if err != nil { + t.Fatal(err) + } + + // Record should fail validation because public key defined by + // ipns path doesn't match record signature + _, err = resolve(ctx, resolver, id2.Pretty(), opts.DefaultResolveOpts()) + if err == nil { + t.Fatal("ValidateIpnsRecord should have failed signature verification") + } + + // Try embedding the incorrect private key inside the entry + if err := ipns.EmbedPublicKey(priv2.GetPublic(), entry); err != nil { + t.Fatal(err) + } + + // Publish entry + err = PublishEntry(ctx, nvVstore, ipnsDHTPath2, entry) + if err != nil { + t.Fatal(err) + } + + // Record should fail validation because public key defined by + // ipns path doesn't match record signature + _, err = resolve(ctx, resolver, id2.Pretty(), opts.DefaultResolveOpts()) + if err == nil { + t.Fatal("ValidateIpnsRecord should have failed signature verification") + } +} + +func genKeys(t *testing.T, keyType int) (ci.PrivKey, peer.ID, string, string) { + bits := 0 + if keyType == ci.RSA { + bits = 2048 + } + + sk, pk, err := test.RandTestKeyPair(keyType, bits) + if err != nil { + t.Fatal(err) + } + id, err := peer.IDFromPublicKey(pk) + if err != nil { + t.Fatal(err) + } + return sk, id, PkKeyForID(id), ipns.RecordKey(id) +} + +func createIPNSRecordWithEmbeddedPublicKey(sk ci.PrivKey, val []byte, seq uint64, eol time.Time, ttl time.Duration) (*ipns_pb.IpnsEntry, error) { + entry, err := ipns.Create(sk, val, seq, eol, ttl) + if err != nil { + return nil, err + } + if err := ipns.EmbedPublicKey(sk.GetPublic(), entry); err != nil { + return nil, err + } + + return entry, nil +} + +type mockValueStore struct { + r routing.ValueStore + kbook pstore.KeyBook +} + +func newMockValueStore(id testutil.Identity, dstore ds.Datastore, kbook pstore.KeyBook) *mockValueStore { + return &mockValueStore{ + r: offline.NewOfflineRouter(dstore, record.NamespacedValidator{ + "ipns": ipns.Validator{KeyBook: kbook}, + "pk": record.PublicKeyValidator{}, + }), + kbook: kbook, + } +} + +func (m *mockValueStore) GetValue(ctx context.Context, k string, opts ...routing.Option) ([]byte, error) { + return m.r.GetValue(ctx, k, opts...) +} + +func (m *mockValueStore) SearchValue(ctx context.Context, k string, opts ...routing.Option) (<-chan []byte, error) { + return m.r.SearchValue(ctx, k, opts...) +} + +func (m *mockValueStore) GetPublicKey(ctx context.Context, p peer.ID) (ci.PubKey, error) { + pk := m.kbook.PubKey(p) + if pk != nil { + return pk, nil + } + + pkkey := routing.KeyForPublicKey(p) + val, err := m.GetValue(ctx, pkkey) + if err != nil { + return nil, err + } + + pk, err = ci.UnmarshalPublicKey(val) + if err != nil { + return nil, err + } + + return pk, m.kbook.AddPubKey(p, pk) +} + +func (m *mockValueStore) PutValue(ctx context.Context, k string, d []byte, opts ...routing.Option) error { + return m.r.PutValue(ctx, k, d, opts...) +} diff --git a/namesys/namesys.go b/namesys/namesys.go new file mode 100644 index 0000000000..e341b63d72 --- /dev/null +++ b/namesys/namesys.go @@ -0,0 +1,338 @@ +// Package namesys defines Resolver and Publisher interfaces for IPNS paths, +// that is, IPFS paths in the form of /ipns/. A "resolved" +// IPNS path becomes an /ipfs/ path. +// +// Traditionally, these paths would be in the form of /ipns/peer_id, which +// references an IPNS record in a distributed ValueStore (usually the IPFS +// DHT). +// +// Additionally, the /ipns/ namespace can also be used with domain names that +// use DNSLink (/ipns/, https://docs.ipfs.io/concepts/dnslink/) +// +// The package provides implementations for all three resolvers. +package namesys + +import ( + "context" + "fmt" + "os" + "strings" + "time" + + lru "github.com/hashicorp/golang-lru" + iface "github.com/ipfs/boxo/coreiface" + opts "github.com/ipfs/boxo/coreiface/options/namesys" + "github.com/ipfs/boxo/path" + "github.com/ipfs/go-cid" + ds "github.com/ipfs/go-datastore" + dssync "github.com/ipfs/go-datastore/sync" + ci "github.com/libp2p/go-libp2p/core/crypto" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/libp2p/go-libp2p/core/routing" + "github.com/miekg/dns" + madns "github.com/multiformats/go-multiaddr-dns" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" +) + +// mpns (a multi-protocol NameSystem) implements generic IPFS naming. +// +// Uses several Resolvers: +// (a) IPFS routing naming: SFS-like PKI names. +// (b) dns domains: resolves using links in DNS TXT records +// +// It can only publish to: (a) IPFS routing naming. +type mpns struct { + ds ds.Datastore + + dnsResolver, ipnsResolver resolver + ipnsPublisher Publisher + + staticMap map[string]path.Path + cache *lru.Cache +} + +type Option func(*mpns) error + +// WithCache is an option that instructs the name system to use a (LRU) cache of the given size. +func WithCache(size int) Option { + return func(ns *mpns) error { + if size <= 0 { + return fmt.Errorf("invalid cache size %d; must be > 0", size) + } + + cache, err := lru.New(size) + if err != nil { + return err + } + + ns.cache = cache + return nil + } +} + +// WithDNSResolver is an option that supplies a custom DNS resolver to use instead of the system +// default. +func WithDNSResolver(rslv madns.BasicResolver) Option { + return func(ns *mpns) error { + ns.dnsResolver = NewDNSResolver(rslv.LookupTXT) + return nil + } +} + +// WithDatastore is an option that supplies a datastore to use instead of an in-memory map datastore. The datastore is used to store published IPNS records and make them available for querying. +func WithDatastore(ds ds.Datastore) Option { + return func(ns *mpns) error { + ns.ds = ds + return nil + } +} + +func loadStaticMap(list string) (map[string]path.Path, error) { + staticMap := make(map[string]path.Path) + for _, pair := range strings.Split(list, ",") { + mapping := strings.SplitN(pair, ":", 2) + key := mapping[0] + value := path.FromString(mapping[1]) + + ipnsKey, err := peer.Decode(key) + if err == nil { + key = iface.FormatKeyID(ipnsKey) + } + + staticMap[key] = value + } + return staticMap, nil +} + +// NewNameSystem will construct the IPFS naming system based on Routing +func NewNameSystem(r routing.ValueStore, opts ...Option) (NameSystem, error) { + var staticMap map[string]path.Path + + // Prewarm namesys cache with static records for deterministic tests and debugging. + // Useful for testing things like DNSLink without real DNS lookup. + // Example: + // IPFS_NS_MAP="dnslink-test.example.com:/ipfs/bafkreicysg23kiwv34eg2d7qweipxwosdo2py4ldv42nbauguluen5v6am" + if list := os.Getenv("IPFS_NS_MAP"); list != "" { + var err error + staticMap, err = loadStaticMap(list) + + if err != nil { + return nil, err + } + } + + ns := &mpns{ + staticMap: staticMap, + } + + for _, opt := range opts { + err := opt(ns) + if err != nil { + return nil, err + } + } + + if ns.ds == nil { + ns.ds = dssync.MutexWrap(ds.NewMapDatastore()) + } + + if ns.dnsResolver == nil { + ns.dnsResolver = NewDNSResolver(madns.DefaultResolver.LookupTXT) + } + + ns.ipnsResolver = NewIpnsResolver(r) + ns.ipnsPublisher = NewIpnsPublisher(r, ns.ds) + + return ns, nil +} + +// DefaultResolverCacheTTL defines max ttl of a record placed in namesys cache. +const DefaultResolverCacheTTL = time.Minute + +// Resolve implements Resolver. +func (ns *mpns) Resolve(ctx context.Context, name string, options ...opts.ResolveOpt) (path.Path, error) { + ctx, span := StartSpan(ctx, "MPNS.Resolve", trace.WithAttributes(attribute.String("Name", name))) + defer span.End() + + if strings.HasPrefix(name, "/ipfs/") { + return path.ParsePath(name) + } + + if !strings.HasPrefix(name, "/") { + return path.ParsePath("/ipfs/" + name) + } + + return resolve(ctx, ns, name, opts.ProcessOpts(options)) +} + +func (ns *mpns) ResolveAsync(ctx context.Context, name string, options ...opts.ResolveOpt) <-chan Result { + ctx, span := StartSpan(ctx, "MPNS.ResolveAsync", trace.WithAttributes(attribute.String("Name", name))) + defer span.End() + + if strings.HasPrefix(name, "/ipfs/") { + p, err := path.ParsePath(name) + res := make(chan Result, 1) + res <- Result{p, err} + close(res) + return res + } + + if !strings.HasPrefix(name, "/") { + p, err := path.ParsePath("/ipfs/" + name) + res := make(chan Result, 1) + res <- Result{p, err} + close(res) + return res + } + + return resolveAsync(ctx, ns, name, opts.ProcessOpts(options)) +} + +// resolveOnce implements resolver. +func (ns *mpns) resolveOnceAsync(ctx context.Context, name string, options opts.ResolveOpts) <-chan onceResult { + ctx, span := StartSpan(ctx, "MPNS.ResolveOnceAsync") + defer span.End() + + out := make(chan onceResult, 1) + + if !strings.HasPrefix(name, ipnsPrefix) { + name = ipnsPrefix + name + } + segments := strings.SplitN(name, "/", 4) + if len(segments) < 3 || segments[0] != "" { + log.Debugf("invalid name syntax for %s", name) + out <- onceResult{err: ErrResolveFailed} + close(out) + return out + } + + key := segments[2] + + // Resolver selection: + // 1. if it is a PeerID/CID/multihash resolve through "ipns". + // 2. if it is a domain name, resolve through "dns" + + var res resolver + ipnsKey, err := peer.Decode(key) + + // CIDs in IPNS are expected to have libp2p-key multicodec + // We ease the transition by returning a more meaningful error with a valid CID + if err != nil { + ipnsCid, cidErr := cid.Decode(key) + if cidErr == nil && ipnsCid.Version() == 1 && ipnsCid.Type() != cid.Libp2pKey { + fixedCid := cid.NewCidV1(cid.Libp2pKey, ipnsCid.Hash()).String() + codecErr := fmt.Errorf("peer ID represented as CIDv1 require libp2p-key multicodec: retry with /ipns/%s", fixedCid) + log.Debugf("RoutingResolver: could not convert public key hash %q to peer ID: %s\n", key, codecErr) + out <- onceResult{err: codecErr} + close(out) + return out + } + } + + cacheKey := key + if err == nil { + cacheKey = iface.FormatKeyID(ipnsKey) + } + + if p, ok := ns.cacheGet(cacheKey); ok { + var err error + if len(segments) > 3 { + p, err = path.FromSegments("", strings.TrimRight(p.String(), "/"), segments[3]) + } + span.SetAttributes(attribute.Bool("CacheHit", true)) + span.RecordError(err) + + out <- onceResult{value: p, err: err} + close(out) + return out + } + span.SetAttributes(attribute.Bool("CacheHit", false)) + + if err == nil { + res = ns.ipnsResolver + } else if _, ok := dns.IsDomainName(key); ok { + res = ns.dnsResolver + } else { + out <- onceResult{err: fmt.Errorf("invalid IPNS root: %q", key)} + close(out) + return out + } + + resCh := res.resolveOnceAsync(ctx, key, options) + var best onceResult + go func() { + defer close(out) + for { + select { + case res, ok := <-resCh: + if !ok { + if best != (onceResult{}) { + ns.cacheSet(cacheKey, best.value, best.ttl) + } + return + } + if res.err == nil { + best = res + } + p := res.value + err := res.err + ttl := res.ttl + + // Attach rest of the path + if len(segments) > 3 { + p, err = path.FromSegments("", strings.TrimRight(p.String(), "/"), segments[3]) + } + + emitOnceResult(ctx, out, onceResult{value: p, ttl: ttl, err: err}) + case <-ctx.Done(): + return + } + } + }() + + return out +} + +func emitOnceResult(ctx context.Context, outCh chan<- onceResult, r onceResult) { + select { + case outCh <- r: + case <-ctx.Done(): + } +} + +// Publish implements Publisher +func (ns *mpns) Publish(ctx context.Context, name ci.PrivKey, value path.Path, options ...opts.PublishOption) error { + ctx, span := StartSpan(ctx, "MPNS.Publish") + defer span.End() + + // This is a bit hacky. We do this because the EOL is based on the current + // time, but also needed in the end of the function. Therefore, we parse + // the options immediately and add an option PublishWithEOL with the EOL + // calculated in this moment. + publishOpts := opts.ProcessPublishOptions(options) + options = append(options, opts.PublishWithEOL(publishOpts.EOL)) + + id, err := peer.IDFromPrivateKey(name) + if err != nil { + span.RecordError(err) + return err + } + span.SetAttributes(attribute.String("ID", id.String())) + if err := ns.ipnsPublisher.Publish(ctx, name, value, options...); err != nil { + // Invalidate the cache. Publishing may _partially_ succeed but + // still return an error. + ns.cacheInvalidate(string(id)) + span.RecordError(err) + return err + } + ttl := DefaultResolverCacheTTL + if publishOpts.TTL >= 0 { + ttl = publishOpts.TTL + } + if ttEOL := time.Until(publishOpts.EOL); ttEOL < ttl { + ttl = ttEOL + } + ns.cacheSet(string(id), value, ttl) + return nil +} diff --git a/namesys/namesys_test.go b/namesys/namesys_test.go new file mode 100644 index 0000000000..a3f9df01aa --- /dev/null +++ b/namesys/namesys_test.go @@ -0,0 +1,210 @@ +package namesys + +import ( + "context" + "errors" + "fmt" + "testing" + "time" + + opts "github.com/ipfs/boxo/coreiface/options/namesys" + "github.com/ipfs/boxo/ipns" + "github.com/ipfs/boxo/path" + offroute "github.com/ipfs/boxo/routing/offline" + ds "github.com/ipfs/go-datastore" + dssync "github.com/ipfs/go-datastore/sync" + record "github.com/libp2p/go-libp2p-record" + ci "github.com/libp2p/go-libp2p/core/crypto" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/libp2p/go-libp2p/p2p/host/peerstore/pstoremem" +) + +type mockResolver struct { + entries map[string]string +} + +func testResolution(t *testing.T, resolver Resolver, name string, depth uint, expected string, expError error) { + t.Helper() + p, err := resolver.Resolve(context.Background(), name, opts.Depth(depth)) + if !errors.Is(err, expError) { + t.Fatal(fmt.Errorf( + "expected %s with a depth of %d to have a '%s' error, but got '%s'", + name, depth, expError, err)) + } + if p.String() != expected { + t.Fatal(fmt.Errorf( + "%s with depth %d resolved to %s != %s", + name, depth, p.String(), expected)) + } +} + +func (r *mockResolver) resolveOnceAsync(ctx context.Context, name string, options opts.ResolveOpts) <-chan onceResult { + p, err := path.ParsePath(r.entries[name]) + out := make(chan onceResult, 1) + out <- onceResult{value: p, err: err} + close(out) + return out +} + +func mockResolverOne() *mockResolver { + return &mockResolver{ + entries: map[string]string{ + "QmatmE9msSfkKxoffpHwNLNKgwZG8eT9Bud6YoPab52vpy": "/ipfs/Qmcqtw8FfrVSBaRmbWwHxt3AuySBhJLcvmFYi3Lbc4xnwj", + "QmbCMUZw6JFeZ7Wp9jkzbye3Fzp2GGcPgC3nmeUjfVF87n": "/ipns/QmatmE9msSfkKxoffpHwNLNKgwZG8eT9Bud6YoPab52vpy", + "QmY3hE8xgFCjGcz6PHgnvJz5HZi1BaKRfPkn1ghZUcYMjD": "/ipns/ipfs.io", + "QmQ4QZh8nrsczdUEwTyfBope4THUhqxqc1fx6qYhhzZQei": "/ipfs/QmP3ouCnU8NNLsW6261pAx2pNLV2E4dQoisB1sgda12Act", + "12D3KooWFB51PRY9BxcXSH6khFXw1BZeszeLDy7C8GciskqCTZn5": "/ipns/QmbCMUZw6JFeZ7Wp9jkzbye3Fzp2GGcPgC3nmeUjfVF87n", // ed25519+identity multihash + "bafzbeickencdqw37dpz3ha36ewrh4undfjt2do52chtcky4rxkj447qhdm": "/ipns/QmbCMUZw6JFeZ7Wp9jkzbye3Fzp2GGcPgC3nmeUjfVF87n", // cidv1 in base32 with libp2p-key multicodec + }, + } +} + +func mockResolverTwo() *mockResolver { + return &mockResolver{ + entries: map[string]string{ + "ipfs.io": "/ipns/QmbCMUZw6JFeZ7Wp9jkzbye3Fzp2GGcPgC3nmeUjfVF87n", + }, + } +} + +func TestNamesysResolution(t *testing.T) { + r := &mpns{ + ipnsResolver: mockResolverOne(), + dnsResolver: mockResolverTwo(), + } + + testResolution(t, r, "Qmcqtw8FfrVSBaRmbWwHxt3AuySBhJLcvmFYi3Lbc4xnwj", opts.DefaultDepthLimit, "/ipfs/Qmcqtw8FfrVSBaRmbWwHxt3AuySBhJLcvmFYi3Lbc4xnwj", nil) + testResolution(t, r, "/ipns/QmatmE9msSfkKxoffpHwNLNKgwZG8eT9Bud6YoPab52vpy", opts.DefaultDepthLimit, "/ipfs/Qmcqtw8FfrVSBaRmbWwHxt3AuySBhJLcvmFYi3Lbc4xnwj", nil) + testResolution(t, r, "/ipns/QmbCMUZw6JFeZ7Wp9jkzbye3Fzp2GGcPgC3nmeUjfVF87n", opts.DefaultDepthLimit, "/ipfs/Qmcqtw8FfrVSBaRmbWwHxt3AuySBhJLcvmFYi3Lbc4xnwj", nil) + testResolution(t, r, "/ipns/QmbCMUZw6JFeZ7Wp9jkzbye3Fzp2GGcPgC3nmeUjfVF87n", 1, "/ipns/QmatmE9msSfkKxoffpHwNLNKgwZG8eT9Bud6YoPab52vpy", ErrResolveRecursion) + testResolution(t, r, "/ipns/ipfs.io", opts.DefaultDepthLimit, "/ipfs/Qmcqtw8FfrVSBaRmbWwHxt3AuySBhJLcvmFYi3Lbc4xnwj", nil) + testResolution(t, r, "/ipns/ipfs.io", 1, "/ipns/QmbCMUZw6JFeZ7Wp9jkzbye3Fzp2GGcPgC3nmeUjfVF87n", ErrResolveRecursion) + testResolution(t, r, "/ipns/ipfs.io", 2, "/ipns/QmatmE9msSfkKxoffpHwNLNKgwZG8eT9Bud6YoPab52vpy", ErrResolveRecursion) + testResolution(t, r, "/ipns/QmY3hE8xgFCjGcz6PHgnvJz5HZi1BaKRfPkn1ghZUcYMjD", opts.DefaultDepthLimit, "/ipfs/Qmcqtw8FfrVSBaRmbWwHxt3AuySBhJLcvmFYi3Lbc4xnwj", nil) + testResolution(t, r, "/ipns/QmY3hE8xgFCjGcz6PHgnvJz5HZi1BaKRfPkn1ghZUcYMjD", 1, "/ipns/ipfs.io", ErrResolveRecursion) + testResolution(t, r, "/ipns/QmY3hE8xgFCjGcz6PHgnvJz5HZi1BaKRfPkn1ghZUcYMjD", 2, "/ipns/QmbCMUZw6JFeZ7Wp9jkzbye3Fzp2GGcPgC3nmeUjfVF87n", ErrResolveRecursion) + testResolution(t, r, "/ipns/QmY3hE8xgFCjGcz6PHgnvJz5HZi1BaKRfPkn1ghZUcYMjD", 3, "/ipns/QmatmE9msSfkKxoffpHwNLNKgwZG8eT9Bud6YoPab52vpy", ErrResolveRecursion) + testResolution(t, r, "/ipns/12D3KooWFB51PRY9BxcXSH6khFXw1BZeszeLDy7C8GciskqCTZn5", 1, "/ipns/QmbCMUZw6JFeZ7Wp9jkzbye3Fzp2GGcPgC3nmeUjfVF87n", ErrResolveRecursion) + testResolution(t, r, "/ipns/bafzbeickencdqw37dpz3ha36ewrh4undfjt2do52chtcky4rxkj447qhdm", 1, "/ipns/QmbCMUZw6JFeZ7Wp9jkzbye3Fzp2GGcPgC3nmeUjfVF87n", ErrResolveRecursion) +} + +func TestNamesysResolutionWithCache(t *testing.T) { + nsMap := "dnslink-test.example.com:/ipfs/bafyaaeykceeaeeqlgiydemzngazc2mrtbimaw,12D3KooWQbpsnyzdBcxw6GUMbijV8WgXE4L8EtfnbcQWLfyxBKho:/ipfs/bafyaagakcyeaeeqqgiydemzngazc2mrtfvuxa3ttbimba,k51qzi5uqu5dkwkqm42v9j9kqcam2jiuvloi16g72i4i4amoo2m8u3ol3mqu6s:/ipfs/bafyaahikdmeaeeqvgiydemzngazc2mrtfvuxa3ttfvsgk5lybimbk" + + staticMap, err := loadStaticMap(nsMap) + if err != nil { + t.Fatal(err) + } + + r := &mpns{ + ipnsResolver: mockResolverOne(), + dnsResolver: mockResolverTwo(), + staticMap: staticMap, + } + + testResolution(t, r, "/ipns/dnslink-test.example.com", opts.DefaultDepthLimit, "/ipfs/bafyaaeykceeaeeqlgiydemzngazc2mrtbimaw", nil) + + testResolution(t, r, "/ipns/bafzaajaiaejcbw5i6oyqsktsn36r2vxgl2jzosyao46rybqztxt4rx4tfa3hpogg", opts.DefaultDepthLimit, "/ipfs/bafyaagakcyeaeeqqgiydemzngazc2mrtfvuxa3ttbimba", nil) + testResolution(t, r, "/ipns/k51qzi5uqu5dlnojhwrggtpty9c0cp5hvnkdozowth4eqb726jvoros8k9niyu", opts.DefaultDepthLimit, "/ipfs/bafyaagakcyeaeeqqgiydemzngazc2mrtfvuxa3ttbimba", nil) + testResolution(t, r, "/ipns/12D3KooWQbpsnyzdBcxw6GUMbijV8WgXE4L8EtfnbcQWLfyxBKho", opts.DefaultDepthLimit, "/ipfs/bafyaagakcyeaeeqqgiydemzngazc2mrtfvuxa3ttbimba", nil) + + testResolution(t, r, "/ipns/bafzaajaiaejcbpltl72da5f3y7ojrtsa7hsfn5bbnkjbkwyesziqqtdry6vjilku", opts.DefaultDepthLimit, "/ipfs/bafyaahikdmeaeeqvgiydemzngazc2mrtfvuxa3ttfvsgk5lybimbk", nil) + testResolution(t, r, "/ipns/k51qzi5uqu5dkwkqm42v9j9kqcam2jiuvloi16g72i4i4amoo2m8u3ol3mqu6s", opts.DefaultDepthLimit, "/ipfs/bafyaahikdmeaeeqvgiydemzngazc2mrtfvuxa3ttfvsgk5lybimbk", nil) + testResolution(t, r, "/ipns/12D3KooWNZuG8phqhoNK9KWcUhwfzA3biDKNCUNVWEaJgigr6Acj", opts.DefaultDepthLimit, "/ipfs/bafyaahikdmeaeeqvgiydemzngazc2mrtfvuxa3ttfvsgk5lybimbk", nil) + +} + +func TestPublishWithCache0(t *testing.T) { + dst := dssync.MutexWrap(ds.NewMapDatastore()) + priv, _, err := ci.GenerateKeyPair(ci.RSA, 2048) + if err != nil { + t.Fatal(err) + } + ps, err := pstoremem.NewPeerstore() + if err != nil { + t.Fatal(err) + } + pid, err := peer.IDFromPrivateKey(priv) + if err != nil { + t.Fatal(err) + } + err = ps.AddPrivKey(pid, priv) + if err != nil { + t.Fatal(err) + } + + routing := offroute.NewOfflineRouter(dst, record.NamespacedValidator{ + "ipns": ipns.Validator{KeyBook: ps}, + "pk": record.PublicKeyValidator{}, + }) + + nsys, err := NewNameSystem(routing, WithDatastore(dst)) + if err != nil { + t.Fatal(err) + } + + // CID is arbitrary. + p, err := path.ParsePath("QmUNLLsPACCz1vLxQVkXqqLX5R1X345qqfHbsf67hvA3Nn") + if err != nil { + t.Fatal(err) + } + err = nsys.Publish(context.Background(), priv, p) + if err != nil { + t.Fatal(err) + } +} + +func TestPublishWithTTL(t *testing.T) { + dst := dssync.MutexWrap(ds.NewMapDatastore()) + priv, _, err := ci.GenerateKeyPair(ci.RSA, 2048) + if err != nil { + t.Fatal(err) + } + ps, err := pstoremem.NewPeerstore() + if err != nil { + t.Fatal(err) + } + pid, err := peer.IDFromPrivateKey(priv) + if err != nil { + t.Fatal(err) + } + err = ps.AddPrivKey(pid, priv) + if err != nil { + t.Fatal(err) + } + + routing := offroute.NewOfflineRouter(dst, record.NamespacedValidator{ + "ipns": ipns.Validator{KeyBook: ps}, + "pk": record.PublicKeyValidator{}, + }) + + nsys, err := NewNameSystem(routing, WithDatastore(dst), WithCache(128)) + if err != nil { + t.Fatal(err) + } + + // CID is arbitrary. + p, err := path.ParsePath("QmUNLLsPACCz1vLxQVkXqqLX5R1X345qqfHbsf67hvA3Nn") + if err != nil { + t.Fatal(err) + } + + ttl := 1 * time.Second + eol := time.Now().Add(2 * time.Second) + + err = nsys.Publish(context.Background(), priv, p, opts.PublishWithEOL(eol), opts.PublishWithTTL(ttl)) + if err != nil { + t.Fatal(err) + } + ientry, ok := nsys.(*mpns).cache.Get(string(pid)) + if !ok { + t.Fatal("cache get failed") + } + entry, ok := ientry.(cacheEntry) + if !ok { + t.Fatal("bad cache item returned") + } + if entry.eol.Sub(eol) > 10*time.Millisecond { + t.Fatalf("bad cache ttl: expected %s, got %s", eol, entry.eol) + } +} diff --git a/namesys/publisher.go b/namesys/publisher.go new file mode 100644 index 0000000000..24a0b8e4d1 --- /dev/null +++ b/namesys/publisher.go @@ -0,0 +1,285 @@ +package namesys + +import ( + "context" + "strings" + "sync" + "time" + + "github.com/gogo/protobuf/proto" + opts "github.com/ipfs/boxo/coreiface/options/namesys" + "github.com/ipfs/boxo/ipns" + pb "github.com/ipfs/boxo/ipns/pb" + "github.com/ipfs/boxo/path" + ds "github.com/ipfs/go-datastore" + dsquery "github.com/ipfs/go-datastore/query" + "github.com/libp2p/go-libp2p/core/crypto" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/libp2p/go-libp2p/core/routing" + "github.com/whyrusleeping/base32" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" +) + +const ipnsPrefix = "/ipns/" + +// IpnsPublisher is capable of publishing and resolving names to the IPFS +// routing system. +type IpnsPublisher struct { + routing routing.ValueStore + ds ds.Datastore + + // Used to ensure we assign IPNS records *sequential* sequence numbers. + mu sync.Mutex +} + +// NewIpnsPublisher constructs a publisher for the IPFS Routing name system. +func NewIpnsPublisher(route routing.ValueStore, ds ds.Datastore) *IpnsPublisher { + if ds == nil { + panic("nil datastore") + } + return &IpnsPublisher{routing: route, ds: ds} +} + +// Publish implements Publisher. Accepts a keypair and a value, +// and publishes it out to the routing system +func (p *IpnsPublisher) Publish(ctx context.Context, k crypto.PrivKey, value path.Path, options ...opts.PublishOption) error { + log.Debugf("Publish %s", value) + + ctx, span := StartSpan(ctx, "IpnsPublisher.Publish", trace.WithAttributes(attribute.String("Value", value.String()))) + defer span.End() + + record, err := p.updateRecord(ctx, k, value, options...) + if err != nil { + return err + } + + return PutRecordToRouting(ctx, p.routing, k.GetPublic(), record) +} + +// IpnsDsKey returns a datastore key given an IPNS identifier (peer +// ID). Defines the storage key for IPNS records in the local datastore. +func IpnsDsKey(id peer.ID) ds.Key { + return ds.NewKey("/ipns/" + base32.RawStdEncoding.EncodeToString([]byte(id))) +} + +// ListPublished returns the latest IPNS records published by this node and +// their expiration times. +// +// This method will not search the routing system for records published by other +// nodes. +func (p *IpnsPublisher) ListPublished(ctx context.Context) (map[peer.ID]*pb.IpnsEntry, error) { + query, err := p.ds.Query(ctx, dsquery.Query{ + Prefix: ipnsPrefix, + }) + if err != nil { + return nil, err + } + defer query.Close() + + records := make(map[peer.ID]*pb.IpnsEntry) + for { + select { + case result, ok := <-query.Next(): + if !ok { + return records, nil + } + if result.Error != nil { + return nil, result.Error + } + e := new(pb.IpnsEntry) + if err := proto.Unmarshal(result.Value, e); err != nil { + // Might as well return what we can. + log.Error("found an invalid IPNS entry:", err) + continue + } + if !strings.HasPrefix(result.Key, ipnsPrefix) { + log.Errorf("datastore query for keys with prefix %s returned a key: %s", ipnsPrefix, result.Key) + continue + } + k := result.Key[len(ipnsPrefix):] + pid, err := base32.RawStdEncoding.DecodeString(k) + if err != nil { + log.Errorf("ipns ds key invalid: %s", result.Key) + continue + } + records[peer.ID(pid)] = e + case <-ctx.Done(): + return nil, ctx.Err() + } + } +} + +// GetPublished returns the record this node has published corresponding to the +// given peer ID. +// +// If `checkRouting` is true and we have no existing record, this method will +// check the routing system for any existing records. +func (p *IpnsPublisher) GetPublished(ctx context.Context, id peer.ID, checkRouting bool) (*pb.IpnsEntry, error) { + ctx, cancel := context.WithTimeout(ctx, time.Second*30) + defer cancel() + + value, err := p.ds.Get(ctx, IpnsDsKey(id)) + switch err { + case nil: + case ds.ErrNotFound: + if !checkRouting { + return nil, nil + } + ipnskey := ipns.RecordKey(id) + value, err = p.routing.GetValue(ctx, ipnskey) + if err != nil { + // Not found or other network issue. Can't really do + // anything about this case. + if err != routing.ErrNotFound { + log.Debugf("error when determining the last published IPNS record for %s: %s", id, err) + } + + return nil, nil + } + default: + return nil, err + } + e := new(pb.IpnsEntry) + if err := proto.Unmarshal(value, e); err != nil { + return nil, err + } + return e, nil +} + +func (p *IpnsPublisher) updateRecord(ctx context.Context, k crypto.PrivKey, value path.Path, options ...opts.PublishOption) (*pb.IpnsEntry, error) { + id, err := peer.IDFromPrivateKey(k) + if err != nil { + return nil, err + } + + p.mu.Lock() + defer p.mu.Unlock() + + // get previous records sequence number + rec, err := p.GetPublished(ctx, id, true) + if err != nil { + return nil, err + } + + seqno := rec.GetSequence() // returns 0 if rec is nil + if rec != nil && value != path.Path(rec.GetValue()) { + // Don't bother incrementing the sequence number unless the + // value changes. + seqno++ + } + + opts := opts.ProcessPublishOptions(options) + + // Create record + entry, err := ipns.Create(k, []byte(value), seqno, opts.EOL, opts.TTL) + if err != nil { + return nil, err + } + + data, err := proto.Marshal(entry) + if err != nil { + return nil, err + } + + // Put the new record. + key := IpnsDsKey(id) + if err := p.ds.Put(ctx, key, data); err != nil { + return nil, err + } + if err := p.ds.Sync(ctx, key); err != nil { + return nil, err + } + return entry, nil +} + +// PutRecordToRouting publishes the given entry using the provided ValueStore, +// keyed on the ID associated with the provided public key. The public key is +// also made available to the routing system so that entries can be verified. +func PutRecordToRouting(ctx context.Context, r routing.ValueStore, k crypto.PubKey, entry *pb.IpnsEntry) error { + ctx, span := StartSpan(ctx, "PutRecordToRouting") + defer span.End() + + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + errs := make(chan error, 2) // At most two errors (IPNS, and public key) + + if err := ipns.EmbedPublicKey(k, entry); err != nil { + return err + } + + id, err := peer.IDFromPublicKey(k) + if err != nil { + return err + } + + go func() { + errs <- PublishEntry(ctx, r, ipns.RecordKey(id), entry) + }() + + // Publish the public key if a public key cannot be extracted from the ID + // TODO: once v0.4.16 is widespread enough, we can stop doing this + // and at that point we can even deprecate the /pk/ namespace in the dht + // + // NOTE: This check actually checks if the public key has been embedded + // in the IPNS entry. This check is sufficient because we embed the + // public key in the IPNS entry if it can't be extracted from the ID. + if entry.PubKey != nil { + go func() { + errs <- PublishPublicKey(ctx, r, PkKeyForID(id), k) + }() + + if err := waitOnErrChan(ctx, errs); err != nil { + return err + } + } + + return waitOnErrChan(ctx, errs) +} + +func waitOnErrChan(ctx context.Context, errs chan error) error { + select { + case err := <-errs: + return err + case <-ctx.Done(): + return ctx.Err() + } +} + +// PublishPublicKey stores the given public key in the ValueStore with the +// given key. +func PublishPublicKey(ctx context.Context, r routing.ValueStore, k string, pubk crypto.PubKey) error { + ctx, span := StartSpan(ctx, "PublishPublicKey", trace.WithAttributes(attribute.String("Key", k))) + defer span.End() + + log.Debugf("Storing pubkey at: %s", k) + pkbytes, err := crypto.MarshalPublicKey(pubk) + if err != nil { + return err + } + + // Store associated public key + return r.PutValue(ctx, k, pkbytes) +} + +// PublishEntry stores the given IpnsEntry in the ValueStore with the given +// ipnskey. +func PublishEntry(ctx context.Context, r routing.ValueStore, ipnskey string, rec *pb.IpnsEntry) error { + ctx, span := StartSpan(ctx, "PublishEntry", trace.WithAttributes(attribute.String("IPNSKey", ipnskey))) + defer span.End() + + data, err := proto.Marshal(rec) + if err != nil { + return err + } + + log.Debugf("Storing ipns entry at: %x", ipnskey) + // Store ipns entry at "/ipns/"+h(pubkey) + return r.PutValue(ctx, ipnskey, data) +} + +// PkKeyForID returns the public key routing key for the given peer ID. +func PkKeyForID(id peer.ID) string { + return "/pk/" + string(id) +} diff --git a/namesys/publisher_test.go b/namesys/publisher_test.go new file mode 100644 index 0000000000..b40593c760 --- /dev/null +++ b/namesys/publisher_test.go @@ -0,0 +1,156 @@ +package namesys + +import ( + "context" + "crypto/rand" + "testing" + "time" + + "github.com/ipfs/boxo/path" + + dshelp "github.com/ipfs/boxo/datastore/dshelp" + "github.com/ipfs/boxo/ipns" + mockrouting "github.com/ipfs/boxo/routing/mock" + ds "github.com/ipfs/go-datastore" + dssync "github.com/ipfs/go-datastore/sync" + testutil "github.com/libp2p/go-libp2p-testing/net" + ci "github.com/libp2p/go-libp2p/core/crypto" + "github.com/libp2p/go-libp2p/core/peer" + ma "github.com/multiformats/go-multiaddr" +) + +type identity struct { + testutil.PeerNetParams +} + +func (p *identity) ID() peer.ID { + return p.PeerNetParams.ID +} + +func (p *identity) Address() ma.Multiaddr { + return p.Addr +} + +func (p *identity) PrivateKey() ci.PrivKey { + return p.PrivKey +} + +func (p *identity) PublicKey() ci.PubKey { + return p.PubKey +} + +func testNamekeyPublisher(t *testing.T, keyType int, expectedErr error, expectedExistence bool) { + // Context + ctx := context.Background() + + // Private key + privKey, pubKey, err := ci.GenerateKeyPairWithReader(keyType, 2048, rand.Reader) + if err != nil { + t.Fatal(err) + } + + // ID + id, err := peer.IDFromPublicKey(pubKey) + if err != nil { + t.Fatal(err) + } + + // Value + value := []byte("ipfs/TESTING") + + // Seqnum + seqnum := uint64(0) + + // Eol + eol := time.Now().Add(24 * time.Hour) + + // Routing value store + p := testutil.PeerNetParams{ + ID: id, + PrivKey: privKey, + PubKey: pubKey, + Addr: testutil.ZeroLocalTCPAddress, + } + + dstore := dssync.MutexWrap(ds.NewMapDatastore()) + serv := mockrouting.NewServer() + r := serv.ClientWithDatastore(context.Background(), &identity{p}, dstore) + + entry, err := ipns.Create(privKey, value, seqnum, eol, 0) + if err != nil { + t.Fatal(err) + } + + err = PutRecordToRouting(ctx, r, pubKey, entry) + if err != nil { + t.Fatal(err) + } + + // Check for namekey existence in value store + namekey := PkKeyForID(id) + _, err = r.GetValue(ctx, namekey) + if err != expectedErr { + t.Fatal(err) + } + + // Also check datastore for completeness + key := dshelp.NewKeyFromBinary([]byte(namekey)) + exists, err := dstore.Has(ctx, key) + if err != nil { + t.Fatal(err) + } + + if exists != expectedExistence { + t.Fatal("Unexpected key existence in datastore") + } +} + +func TestRSAPublisher(t *testing.T) { + testNamekeyPublisher(t, ci.RSA, nil, true) +} + +func TestEd22519Publisher(t *testing.T) { + testNamekeyPublisher(t, ci.Ed25519, ds.ErrNotFound, false) +} + +func TestAsyncDS(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + rt := mockrouting.NewServer().Client(testutil.RandIdentityOrFatal(t)) + ds := &checkSyncDS{ + Datastore: ds.NewMapDatastore(), + syncKeys: make(map[ds.Key]struct{}), + } + publisher := NewIpnsPublisher(rt, ds) + + ipnsFakeID := testutil.RandIdentityOrFatal(t) + ipnsVal, err := path.ParsePath("/ipns/foo.bar") + if err != nil { + t.Fatal(err) + } + + if err := publisher.Publish(ctx, ipnsFakeID.PrivateKey(), ipnsVal); err != nil { + t.Fatal(err) + } + + ipnsKey := IpnsDsKey(ipnsFakeID.ID()) + + for k := range ds.syncKeys { + if k.IsAncestorOf(ipnsKey) || k.Equal(ipnsKey) { + return + } + } + + t.Fatal("ipns key not synced") +} + +type checkSyncDS struct { + ds.Datastore + syncKeys map[ds.Key]struct{} +} + +func (d *checkSyncDS) Sync(ctx context.Context, prefix ds.Key) error { + d.syncKeys[prefix] = struct{}{} + return d.Datastore.Sync(ctx, prefix) +} diff --git a/namesys/republisher/repub.go b/namesys/republisher/repub.go new file mode 100644 index 0000000000..03426843b9 --- /dev/null +++ b/namesys/republisher/repub.go @@ -0,0 +1,186 @@ +// Package republisher provides a utility to automatically re-publish IPNS +// records related to the keys in a Keystore. +package republisher + +import ( + "context" + "errors" + "time" + + keystore "github.com/ipfs/boxo/keystore" + "github.com/ipfs/boxo/namesys" + "github.com/ipfs/boxo/path" + "go.opentelemetry.io/otel/attribute" + + "github.com/gogo/protobuf/proto" + opts "github.com/ipfs/boxo/coreiface/options/namesys" + "github.com/ipfs/boxo/ipns" + pb "github.com/ipfs/boxo/ipns/pb" + ds "github.com/ipfs/go-datastore" + logging "github.com/ipfs/go-log" + "github.com/jbenet/goprocess" + gpctx "github.com/jbenet/goprocess/context" + ic "github.com/libp2p/go-libp2p/core/crypto" + "github.com/libp2p/go-libp2p/core/peer" +) + +var errNoEntry = errors.New("no previous entry") + +var log = logging.Logger("ipns-repub") + +// DefaultRebroadcastInterval is the default interval at which we rebroadcast IPNS records +var DefaultRebroadcastInterval = time.Hour * 4 + +// InitialRebroadcastDelay is the delay before first broadcasting IPNS records on start +var InitialRebroadcastDelay = time.Minute * 1 + +// FailureRetryInterval is the interval at which we retry IPNS records broadcasts (when they fail) +var FailureRetryInterval = time.Minute * 5 + +// DefaultRecordLifetime is the default lifetime for IPNS records +const DefaultRecordLifetime = time.Hour * 24 + +// Republisher facilitates the regular publishing of all the IPNS records +// associated to keys in a Keystore. +type Republisher struct { + ns namesys.Publisher + ds ds.Datastore + self ic.PrivKey + ks keystore.Keystore + + Interval time.Duration + + // how long records that are republished should be valid for + RecordLifetime time.Duration +} + +// NewRepublisher creates a new Republisher +func NewRepublisher(ns namesys.Publisher, ds ds.Datastore, self ic.PrivKey, ks keystore.Keystore) *Republisher { + return &Republisher{ + ns: ns, + ds: ds, + self: self, + ks: ks, + Interval: DefaultRebroadcastInterval, + RecordLifetime: DefaultRecordLifetime, + } +} + +// Run starts the republisher facility. It can be stopped by stopping the +// provided proc. +func (rp *Republisher) Run(proc goprocess.Process) { + timer := time.NewTimer(InitialRebroadcastDelay) + defer timer.Stop() + if rp.Interval < InitialRebroadcastDelay { + timer.Reset(rp.Interval) + } + + for { + select { + case <-timer.C: + timer.Reset(rp.Interval) + err := rp.republishEntries(proc) + if err != nil { + log.Info("republisher failed to republish: ", err) + if FailureRetryInterval < rp.Interval { + timer.Reset(FailureRetryInterval) + } + } + case <-proc.Closing(): + return + } + } +} + +func (rp *Republisher) republishEntries(p goprocess.Process) error { + ctx, cancel := context.WithCancel(gpctx.OnClosingContext(p)) + defer cancel() + ctx, span := namesys.StartSpan(ctx, "Republisher.RepublishEntries") + defer span.End() + + // TODO: Use rp.ipns.ListPublished(). We can't currently *do* that + // because: + // 1. There's no way to get keys from the keystore by ID. + // 2. We don't actually have access to the IPNS publisher. + err := rp.republishEntry(ctx, rp.self) + if err != nil { + return err + } + + if rp.ks != nil { + keyNames, err := rp.ks.List() + if err != nil { + return err + } + for _, name := range keyNames { + priv, err := rp.ks.Get(name) + if err != nil { + return err + } + err = rp.republishEntry(ctx, priv) + if err != nil { + return err + } + + } + } + + return nil +} + +func (rp *Republisher) republishEntry(ctx context.Context, priv ic.PrivKey) error { + ctx, span := namesys.StartSpan(ctx, "Republisher.RepublishEntry") + defer span.End() + id, err := peer.IDFromPrivateKey(priv) + if err != nil { + span.RecordError(err) + return err + } + + log.Debugf("republishing ipns entry for %s", id) + + // Look for it locally only + e, err := rp.getLastIPNSEntry(ctx, id) + if err != nil { + if err == errNoEntry { + span.SetAttributes(attribute.Bool("NoEntry", true)) + return nil + } + span.RecordError(err) + return err + } + + p := path.Path(e.GetValue()) + prevEol, err := ipns.GetEOL(e) + if err != nil { + span.RecordError(err) + return err + } + + // update record with same sequence number + eol := time.Now().Add(rp.RecordLifetime) + if prevEol.After(eol) { + eol = prevEol + } + err = rp.ns.Publish(ctx, priv, p, opts.PublishWithEOL(eol)) + span.RecordError(err) + return err +} + +func (rp *Republisher) getLastIPNSEntry(ctx context.Context, id peer.ID) (*pb.IpnsEntry, error) { + // Look for it locally only + val, err := rp.ds.Get(ctx, namesys.IpnsDsKey(id)) + switch err { + case nil: + case ds.ErrNotFound: + return nil, errNoEntry + default: + return nil, err + } + + e := new(pb.IpnsEntry) + if err := proto.Unmarshal(val, e); err != nil { + return nil, err + } + return e, nil +} diff --git a/namesys/republisher/repub_test.go b/namesys/republisher/repub_test.go new file mode 100644 index 0000000000..e07b3a22cf --- /dev/null +++ b/namesys/republisher/repub_test.go @@ -0,0 +1,266 @@ +package republisher_test + +import ( + "context" + "errors" + "testing" + "time" + + "github.com/gogo/protobuf/proto" + + "github.com/jbenet/goprocess" + "github.com/libp2p/go-libp2p" + dht "github.com/libp2p/go-libp2p-kad-dht" + ic "github.com/libp2p/go-libp2p/core/crypto" + host "github.com/libp2p/go-libp2p/core/host" + peer "github.com/libp2p/go-libp2p/core/peer" + routing "github.com/libp2p/go-libp2p/core/routing" + + ds "github.com/ipfs/go-datastore" + dssync "github.com/ipfs/go-datastore/sync" + opts "github.com/ipfs/boxo/coreiface/options/namesys" + "github.com/ipfs/boxo/ipns" + ipns_pb "github.com/ipfs/boxo/ipns/pb" + "github.com/ipfs/boxo/path" + + keystore "github.com/ipfs/boxo/keystore" + "github.com/ipfs/boxo/namesys" + . "github.com/ipfs/boxo/namesys/republisher" +) + +type mockNode struct { + h host.Host + id string + privKey ic.PrivKey + store ds.Batching + dht *dht.IpfsDHT + keystore keystore.Keystore +} + +func getMockNode(t *testing.T, ctx context.Context) *mockNode { + t.Helper() + + dstore := dssync.MutexWrap(ds.NewMapDatastore()) + var idht *dht.IpfsDHT + h, err := libp2p.New( + libp2p.ListenAddrStrings("/ip4/127.0.0.1/tcp/0"), + libp2p.Routing(func(h host.Host) (routing.PeerRouting, error) { + rt, err := dht.New(ctx, h, dht.Mode(dht.ModeServer)) + idht = rt + return rt, err + }), + ) + if err != nil { + t.Fatal(err) + } + + return &mockNode{ + h: h, + id: h.ID().Pretty(), + privKey: h.Peerstore().PrivKey(h.ID()), + store: dstore, + dht: idht, + keystore: keystore.NewMemKeystore(), + } +} + +func TestRepublish(t *testing.T) { + // set cache life to zero for testing low-period repubs + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + var nsystems []namesys.NameSystem + var nodes []*mockNode + for i := 0; i < 10; i++ { + n := getMockNode(t, ctx) + ns, err := namesys.NewNameSystem(n.dht, namesys.WithDatastore(n.store)) + if err != nil { + t.Fatal(err) + } + + nsystems = append(nsystems, ns) + nodes = append(nodes, n) + } + + pinfo := host.InfoFromHost(nodes[0].h) + + for _, n := range nodes[1:] { + if err := n.h.Connect(ctx, *pinfo); err != nil { + t.Fatal(err) + } + } + + // have one node publish a record that is valid for 1 second + publisher := nodes[3] + + p := path.FromString("/ipfs/QmUNLLsPACCz1vLxQVkXqqLX5R1X345qqfHbsf67hvA3Nn") // does not need to be valid + rp := namesys.NewIpnsPublisher(publisher.dht, publisher.store) + name := "/ipns/" + publisher.id + + // Retry in case the record expires before we can fetch it. This can + // happen when running the test on a slow machine. + var expiration time.Time + timeout := time.Second + for { + expiration = time.Now().Add(time.Second) + err := rp.Publish(ctx, publisher.privKey, p, opts.PublishWithEOL(expiration)) + if err != nil { + t.Fatal(err) + } + + err = verifyResolution(nsystems, name, p) + if err == nil { + break + } + + if time.Now().After(expiration) { + timeout *= 2 + continue + } + t.Fatal(err) + } + + // Now wait a second, the records will be invalid and we should fail to resolve + time.Sleep(timeout) + if err := verifyResolutionFails(nsystems, name); err != nil { + t.Fatal(err) + } + + // The republishers that are contained within the nodes have their timeout set + // to 12 hours. Instead of trying to tweak those, we're just going to pretend + // they don't exist and make our own. + repub := NewRepublisher(rp, publisher.store, publisher.privKey, publisher.keystore) + repub.Interval = time.Second + repub.RecordLifetime = time.Second * 5 + + proc := goprocess.Go(repub.Run) + defer proc.Close() + + // now wait a couple seconds for it to fire + time.Sleep(time.Second * 2) + + // we should be able to resolve them now + if err := verifyResolution(nsystems, name, p); err != nil { + t.Fatal(err) + } +} + +func TestLongEOLRepublish(t *testing.T) { + // set cache life to zero for testing low-period repubs + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + var nsystems []namesys.NameSystem + var nodes []*mockNode + for i := 0; i < 10; i++ { + n := getMockNode(t, ctx) + ns, err := namesys.NewNameSystem(n.dht, namesys.WithDatastore(n.store)) + if err != nil { + t.Fatal(err) + } + + nsystems = append(nsystems, ns) + nodes = append(nodes, n) + } + + pinfo := host.InfoFromHost(nodes[0].h) + + for _, n := range nodes[1:] { + if err := n.h.Connect(ctx, *pinfo); err != nil { + t.Fatal(err) + } + } + + // have one node publish a record that is valid for 1 second + publisher := nodes[3] + p := path.FromString("/ipfs/QmUNLLsPACCz1vLxQVkXqqLX5R1X345qqfHbsf67hvA3Nn") // does not need to be valid + rp := namesys.NewIpnsPublisher(publisher.dht, publisher.store) + name := "/ipns/" + publisher.id + + expiration := time.Now().Add(time.Hour) + err := rp.Publish(ctx, publisher.privKey, p, opts.PublishWithEOL(expiration)) + if err != nil { + t.Fatal(err) + } + + err = verifyResolution(nsystems, name, p) + if err != nil { + t.Fatal(err) + } + + // The republishers that are contained within the nodes have their timeout set + // to 12 hours. Instead of trying to tweak those, we're just going to pretend + // they don't exist and make our own. + repub := NewRepublisher(rp, publisher.store, publisher.privKey, publisher.keystore) + repub.Interval = time.Millisecond * 500 + repub.RecordLifetime = time.Second + + proc := goprocess.Go(repub.Run) + defer proc.Close() + + // now wait a couple seconds for it to fire a few times + time.Sleep(time.Second * 2) + + err = verifyResolution(nsystems, name, p) + if err != nil { + t.Fatal(err) + } + + entry, err := getLastIPNSEntry(ctx, publisher.store, publisher.h.ID()) + if err != nil { + t.Fatal(err) + } + + finalEol, err := ipns.GetEOL(entry) + if err != nil { + t.Fatal(err) + } + + if !finalEol.Equal(expiration) { + t.Fatal("expiration time modified") + } +} + +func getLastIPNSEntry(ctx context.Context, dstore ds.Datastore, id peer.ID) (*ipns_pb.IpnsEntry, error) { + // Look for it locally only + val, err := dstore.Get(ctx, namesys.IpnsDsKey(id)) + if err != nil { + return nil, err + } + + e := new(ipns_pb.IpnsEntry) + if err := proto.Unmarshal(val, e); err != nil { + return nil, err + } + return e, nil +} + +func verifyResolution(nsystems []namesys.NameSystem, key string, exp path.Path) error { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + for _, n := range nsystems { + val, err := n.Resolve(ctx, key) + if err != nil { + return err + } + + if val != exp { + return errors.New("resolved wrong record") + } + } + return nil +} + +func verifyResolutionFails(nsystems []namesys.NameSystem, key string) error { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + for _, n := range nsystems { + _, err := n.Resolve(ctx, key) + if err == nil { + return errors.New("expected resolution to fail") + } + } + return nil +} diff --git a/namesys/resolve/resolve.go b/namesys/resolve/resolve.go new file mode 100644 index 0000000000..b2acf06028 --- /dev/null +++ b/namesys/resolve/resolve.go @@ -0,0 +1,56 @@ +package resolve + +import ( + "context" + "errors" + "fmt" + "strings" + + "github.com/ipfs/boxo/path" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" + + "github.com/ipfs/boxo/namesys" +) + +// ErrNoNamesys is an explicit error for when an IPFS node doesn't +// (yet) have a name system +var ErrNoNamesys = errors.New( + "core/resolve: no Namesys on IpfsNode - can't resolve ipns entry") + +// ResolveIPNS resolves /ipns paths +func ResolveIPNS(ctx context.Context, nsys namesys.NameSystem, p path.Path) (path.Path, error) { + ctx, span := namesys.StartSpan(ctx, "ResolveIPNS", trace.WithAttributes(attribute.String("Path", p.String()))) + defer span.End() + if strings.HasPrefix(p.String(), "/ipns/") { + // TODO(cryptix): we should be able to query the local cache for the path + if nsys == nil { + return "", ErrNoNamesys + } + + seg := p.Segments() + + if len(seg) < 2 || seg[1] == "" { // just "/" without further segments + err := fmt.Errorf("invalid path %q: ipns path missing IPNS ID", p) + return "", err + } + + extensions := seg[2:] + resolvable, err := path.FromSegments("/", seg[0], seg[1]) + if err != nil { + return "", err + } + + respath, err := nsys.Resolve(ctx, resolvable.String()) + if err != nil { + return "", err + } + + segments := append(respath.Segments(), extensions...) + p, err = path.FromSegments("/", segments...) + if err != nil { + return "", err + } + } + return p, nil +} diff --git a/namesys/resolve_test.go b/namesys/resolve_test.go new file mode 100644 index 0000000000..d2da312152 --- /dev/null +++ b/namesys/resolve_test.go @@ -0,0 +1,122 @@ +package namesys + +import ( + "context" + "errors" + "testing" + "time" + + ipns "github.com/ipfs/boxo/ipns" + path "github.com/ipfs/boxo/path" + mockrouting "github.com/ipfs/boxo/routing/mock" + ds "github.com/ipfs/go-datastore" + dssync "github.com/ipfs/go-datastore/sync" + tnet "github.com/libp2p/go-libp2p-testing/net" +) + +func TestRoutingResolve(t *testing.T) { + dstore := dssync.MutexWrap(ds.NewMapDatastore()) + serv := mockrouting.NewServer() + id := tnet.RandIdentityOrFatal(t) + d := serv.ClientWithDatastore(context.Background(), id, dstore) + + resolver := NewIpnsResolver(d) + publisher := NewIpnsPublisher(d, dstore) + + identity := tnet.RandIdentityOrFatal(t) + + h := path.FromString("/ipfs/QmZULkCELmmk5XNfCgTnCyFgAVxBRBXyDHGGMVoLFLiXEN") + err := publisher.Publish(context.Background(), identity.PrivateKey(), h) + if err != nil { + t.Fatal(err) + } + + res, err := resolver.Resolve(context.Background(), identity.ID().Pretty()) + if err != nil { + t.Fatal(err) + } + + if res != h { + t.Fatal("Got back incorrect value.") + } +} + +func TestPrexistingExpiredRecord(t *testing.T) { + dstore := dssync.MutexWrap(ds.NewMapDatastore()) + d := mockrouting.NewServer().ClientWithDatastore(context.Background(), tnet.RandIdentityOrFatal(t), dstore) + + resolver := NewIpnsResolver(d) + publisher := NewIpnsPublisher(d, dstore) + + identity := tnet.RandIdentityOrFatal(t) + + // Make an expired record and put it in the datastore + h := path.FromString("/ipfs/QmZULkCELmmk5XNfCgTnCyFgAVxBRBXyDHGGMVoLFLiXEN") + eol := time.Now().Add(time.Hour * -1) + + entry, err := ipns.Create(identity.PrivateKey(), []byte(h), 0, eol, 0) + if err != nil { + t.Fatal(err) + } + err = PutRecordToRouting(context.Background(), d, identity.PublicKey(), entry) + if err != nil { + t.Fatal(err) + } + + // Now, with an old record in the system already, try and publish a new one + err = publisher.Publish(context.Background(), identity.PrivateKey(), h) + if err != nil { + t.Fatal(err) + } + + err = verifyCanResolve(resolver, identity.ID().Pretty(), h) + if err != nil { + t.Fatal(err) + } +} + +func TestPrexistingRecord(t *testing.T) { + dstore := dssync.MutexWrap(ds.NewMapDatastore()) + d := mockrouting.NewServer().ClientWithDatastore(context.Background(), tnet.RandIdentityOrFatal(t), dstore) + + resolver := NewIpnsResolver(d) + publisher := NewIpnsPublisher(d, dstore) + + identity := tnet.RandIdentityOrFatal(t) + + // Make a good record and put it in the datastore + h := path.FromString("/ipfs/QmZULkCELmmk5XNfCgTnCyFgAVxBRBXyDHGGMVoLFLiXEN") + eol := time.Now().Add(time.Hour) + entry, err := ipns.Create(identity.PrivateKey(), []byte(h), 0, eol, 0) + if err != nil { + t.Fatal(err) + } + err = PutRecordToRouting(context.Background(), d, identity.PublicKey(), entry) + if err != nil { + t.Fatal(err) + } + + // Now, with an old record in the system already, try and publish a new one + err = publisher.Publish(context.Background(), identity.PrivateKey(), h) + if err != nil { + t.Fatal(err) + } + + err = verifyCanResolve(resolver, identity.ID().Pretty(), h) + if err != nil { + t.Fatal(err) + } +} + +func verifyCanResolve(r Resolver, name string, exp path.Path) error { + res, err := r.Resolve(context.Background(), name) + if err != nil { + return err + } + + if res != exp { + return errors.New("got back wrong record") + } + + return nil +} diff --git a/namesys/routing.go b/namesys/routing.go new file mode 100644 index 0000000000..c51d6f72b8 --- /dev/null +++ b/namesys/routing.go @@ -0,0 +1,160 @@ +package namesys + +import ( + "context" + "strings" + "time" + + "github.com/gogo/protobuf/proto" + opts "github.com/ipfs/boxo/coreiface/options/namesys" + "github.com/ipfs/boxo/ipns" + pb "github.com/ipfs/boxo/ipns/pb" + "github.com/ipfs/boxo/path" + "github.com/ipfs/go-cid" + logging "github.com/ipfs/go-log" + dht "github.com/libp2p/go-libp2p-kad-dht" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/libp2p/go-libp2p/core/routing" + mh "github.com/multiformats/go-multihash" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" +) + +var log = logging.Logger("namesys") + +// IpnsResolver implements NSResolver for the main IPFS SFS-like naming +type IpnsResolver struct { + routing routing.ValueStore +} + +// NewIpnsResolver constructs a name resolver using the IPFS Routing system +// to implement SFS-like naming on top. +func NewIpnsResolver(route routing.ValueStore) *IpnsResolver { + if route == nil { + panic("attempt to create resolver with nil routing system") + } + return &IpnsResolver{ + routing: route, + } +} + +// Resolve implements Resolver. +func (r *IpnsResolver) Resolve(ctx context.Context, name string, options ...opts.ResolveOpt) (path.Path, error) { + ctx, span := StartSpan(ctx, "IpnsResolver.Resolve", trace.WithAttributes(attribute.String("Name", name))) + defer span.End() + return resolve(ctx, r, name, opts.ProcessOpts(options)) +} + +// ResolveAsync implements Resolver. +func (r *IpnsResolver) ResolveAsync(ctx context.Context, name string, options ...opts.ResolveOpt) <-chan Result { + ctx, span := StartSpan(ctx, "IpnsResolver.ResolveAsync", trace.WithAttributes(attribute.String("Name", name))) + defer span.End() + return resolveAsync(ctx, r, name, opts.ProcessOpts(options)) +} + +// resolveOnce implements resolver. Uses the IPFS routing system to +// resolve SFS-like names. +func (r *IpnsResolver) resolveOnceAsync(ctx context.Context, name string, options opts.ResolveOpts) <-chan onceResult { + ctx, span := StartSpan(ctx, "IpnsResolver.ResolveOnceAsync", trace.WithAttributes(attribute.String("Name", name))) + defer span.End() + + out := make(chan onceResult, 1) + log.Debugf("RoutingResolver resolving %s", name) + cancel := func() {} + + if options.DhtTimeout != 0 { + // Resolution must complete within the timeout + ctx, cancel = context.WithTimeout(ctx, options.DhtTimeout) + } + + name = strings.TrimPrefix(name, "/ipns/") + + pid, err := peer.Decode(name) + if err != nil { + log.Debugf("RoutingResolver: could not convert public key hash %s to peer ID: %s\n", name, err) + out <- onceResult{err: err} + close(out) + cancel() + return out + } + + // Use the routing system to get the name. + // Note that the DHT will call the ipns validator when retrieving + // the value, which in turn verifies the ipns record signature + ipnsKey := ipns.RecordKey(pid) + + vals, err := r.routing.SearchValue(ctx, ipnsKey, dht.Quorum(int(options.DhtRecordCount))) + if err != nil { + log.Debugf("RoutingResolver: dht get for name %s failed: %s", name, err) + out <- onceResult{err: err} + close(out) + cancel() + return out + } + + go func() { + defer cancel() + defer close(out) + ctx, span := StartSpan(ctx, "IpnsResolver.ResolveOnceAsync.Worker") + defer span.End() + + for { + select { + case val, ok := <-vals: + if !ok { + return + } + + entry := new(pb.IpnsEntry) + err = proto.Unmarshal(val, entry) + if err != nil { + log.Debugf("RoutingResolver: could not unmarshal value for name %s: %s", name, err) + emitOnceResult(ctx, out, onceResult{err: err}) + return + } + + var p path.Path + // check for old style record: + if valh, err := mh.Cast(entry.GetValue()); err == nil { + // Its an old style multihash record + log.Debugf("encountered CIDv0 ipns entry: %s", valh) + p = path.FromCid(cid.NewCidV0(valh)) + } else { + // Not a multihash, probably a new style record + p, err = path.ParsePath(string(entry.GetValue())) + if err != nil { + emitOnceResult(ctx, out, onceResult{err: err}) + return + } + } + + ttl := DefaultResolverCacheTTL + if entry.Ttl != nil { + ttl = time.Duration(*entry.Ttl) + } + switch eol, err := ipns.GetEOL(entry); err { + case ipns.ErrUnrecognizedValidity: + // No EOL. + case nil: + ttEol := time.Until(eol) + if ttEol < 0 { + // It *was* valid when we first resolved it. + ttl = 0 + } else if ttEol < ttl { + ttl = ttEol + } + default: + log.Errorf("encountered error when parsing EOL: %s", err) + emitOnceResult(ctx, out, onceResult{err: err}) + return + } + + emitOnceResult(ctx, out, onceResult{value: p, ttl: ttl}) + case <-ctx.Done(): + return + } + } + }() + + return out +} diff --git a/namesys/tracing.go b/namesys/tracing.go new file mode 100644 index 0000000000..4ef84294ae --- /dev/null +++ b/namesys/tracing.go @@ -0,0 +1,13 @@ +package namesys + +import ( + "context" + "fmt" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/trace" +) + +func StartSpan(ctx context.Context, name string, opts ...trace.SpanStartOption) (context.Context, trace.Span) { + return otel.Tracer("go-namesys").Start(ctx, fmt.Sprintf("Namesys.%s", name)) +} diff --git a/path/error.go b/path/error.go new file mode 100644 index 0000000000..dafc446b5a --- /dev/null +++ b/path/error.go @@ -0,0 +1,27 @@ +package path + +import ( + "fmt" +) + +type ErrInvalidPath struct { + error error + path string +} + +func (e ErrInvalidPath) Error() string { + return fmt.Sprintf("invalid path %q: %s", e.path, e.error) +} + +func (e ErrInvalidPath) Unwrap() error { + return e.error +} + +func (e ErrInvalidPath) Is(err error) bool { + switch err.(type) { + case ErrInvalidPath: + return true + default: + return false + } +} diff --git a/path/error_test.go b/path/error_test.go new file mode 100644 index 0000000000..07aab64081 --- /dev/null +++ b/path/error_test.go @@ -0,0 +1,16 @@ +package path + +import ( + "errors" + "testing" +) + +func TestErrorIs(t *testing.T) { + if !errors.Is(ErrInvalidPath{path: "foo", error: errors.New("bar")}, ErrInvalidPath{}) { + t.Fatal("error must be error") + } + + if !errors.Is(&ErrInvalidPath{path: "foo", error: errors.New("bar")}, ErrInvalidPath{}) { + t.Fatal("pointer to error must be error") + } +} diff --git a/path/internal/tracing.go b/path/internal/tracing.go new file mode 100644 index 0000000000..f9eda2f92c --- /dev/null +++ b/path/internal/tracing.go @@ -0,0 +1,13 @@ +package internal + +import ( + "context" + "fmt" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/trace" +) + +func StartSpan(ctx context.Context, name string, opts ...trace.SpanStartOption) (context.Context, trace.Span) { + return otel.Tracer("go-path").Start(ctx, fmt.Sprintf("Path.%s", name), opts...) +} diff --git a/path/path.go b/path/path.go new file mode 100644 index 0000000000..6d53ade047 --- /dev/null +++ b/path/path.go @@ -0,0 +1,190 @@ +// Package path contains utilities to work with ipfs paths. +package path + +import ( + "fmt" + "path" + "strings" + + cid "github.com/ipfs/go-cid" +) + +// A Path represents an ipfs content path: +// - /path/to/file +// - /ipfs/ +// - /ipns//path/to/folder +// - etc +type Path string + +// ^^^ +// TODO: debate making this a private struct wrapped in a public interface +// would allow us to control creation, and cache segments. + +// FromString safely converts a string type to a Path type. +func FromString(s string) Path { + return Path(s) +} + +// FromCid safely converts a cid.Cid type to a Path type. +func FromCid(c cid.Cid) Path { + return Path("/ipfs/" + c.String()) +} + +// Segments returns the different elements of a path +// (elements are delimited by a /). +func (p Path) Segments() []string { + cleaned := path.Clean(string(p)) + segments := strings.Split(cleaned, "/") + + // Ignore leading slash + if len(segments[0]) == 0 { + segments = segments[1:] + } + + return segments +} + +// String converts a path to string. +func (p Path) String() string { + return string(p) +} + +// IsJustAKey returns true if the path is of the form or /ipfs/, or +// /ipld/ +func (p Path) IsJustAKey() bool { + parts := p.Segments() + return len(parts) == 2 && (parts[0] == "ipfs" || parts[0] == "ipld") +} + +// PopLastSegment returns a new Path without its final segment, and the final +// segment, separately. If there is no more to pop (the path is just a key), +// the original path is returned. +func (p Path) PopLastSegment() (Path, string, error) { + + if p.IsJustAKey() { + return p, "", nil + } + + segs := p.Segments() + newPath, err := ParsePath("/" + strings.Join(segs[:len(segs)-1], "/")) + if err != nil { + return "", "", err + } + + return newPath, segs[len(segs)-1], nil +} + +// FromSegments returns a path given its different segments. +func FromSegments(prefix string, seg ...string) (Path, error) { + return ParsePath(prefix + strings.Join(seg, "/")) +} + +// ParsePath returns a well-formed ipfs Path. +// The returned path will always be prefixed with /ipfs/ or /ipns/. +// The prefix will be added if not present in the given string. +// This function will return an error when the given string is +// not a valid ipfs path. +func ParsePath(txt string) (Path, error) { + parts := strings.Split(txt, "/") + if len(parts) == 1 { + kp, err := ParseCidToPath(txt) + if err == nil { + return kp, nil + } + } + + // if the path doesnt begin with a '/' + // we expect this to start with a hash, and be an 'ipfs' path + if parts[0] != "" { + if _, err := decodeCid(parts[0]); err != nil { + return "", &ErrInvalidPath{error: err, path: txt} + } + // The case when the path starts with hash without a protocol prefix + return Path("/ipfs/" + txt), nil + } + + if len(parts) < 3 { + return "", &ErrInvalidPath{error: fmt.Errorf("invalid ipfs path"), path: txt} + } + + //TODO: make this smarter + switch parts[1] { + case "ipfs", "ipld": + if parts[2] == "" { + return "", &ErrInvalidPath{error: fmt.Errorf("not enough path components"), path: txt} + } + // Validate Cid. + _, err := decodeCid(parts[2]) + if err != nil { + return "", &ErrInvalidPath{error: fmt.Errorf("invalid CID: %w", err), path: txt} + } + case "ipns": + if parts[2] == "" { + return "", &ErrInvalidPath{error: fmt.Errorf("not enough path components"), path: txt} + } + default: + return "", &ErrInvalidPath{error: fmt.Errorf("unknown namespace %q", parts[1]), path: txt} + } + + return Path(txt), nil +} + +// ParseCidToPath takes a CID in string form and returns a valid ipfs Path. +func ParseCidToPath(txt string) (Path, error) { + if txt == "" { + return "", &ErrInvalidPath{error: fmt.Errorf("empty"), path: txt} + } + + c, err := decodeCid(txt) + if err != nil { + return "", &ErrInvalidPath{error: err, path: txt} + } + + return FromCid(c), nil +} + +// IsValid checks if a path is a valid ipfs Path. +func (p *Path) IsValid() error { + _, err := ParsePath(p.String()) + return err +} + +// Join joins strings slices using / +func Join(pths []string) string { + return strings.Join(pths, "/") +} + +// SplitList splits strings usings / +func SplitList(pth string) []string { + return strings.Split(pth, "/") +} + +// SplitAbsPath clean up and split fpath. It extracts the first component (which +// must be a Multihash) and return it separately. +func SplitAbsPath(fpath Path) (cid.Cid, []string, error) { + parts := fpath.Segments() + if parts[0] == "ipfs" || parts[0] == "ipld" { + parts = parts[1:] + } + + // if nothing, bail. + if len(parts) == 0 { + return cid.Cid{}, nil, &ErrInvalidPath{error: fmt.Errorf("empty"), path: string(fpath)} + } + + c, err := decodeCid(parts[0]) + // first element in the path is a cid + if err != nil { + return cid.Cid{}, nil, &ErrInvalidPath{error: fmt.Errorf("invalid CID: %w", err), path: string(fpath)} + } + + return c, parts[1:], nil +} + +func decodeCid(cstr string) (cid.Cid, error) { + c, err := cid.Decode(cstr) + if err != nil && len(cstr) == 46 && cstr[:2] == "qm" { // https://github.com/ipfs/go-ipfs/issues/7792 + return cid.Cid{}, fmt.Errorf("%v (possible lowercased CIDv0; consider converting to a case-agnostic CIDv1, such as base32)", err) + } + return c, err +} diff --git a/path/path_test.go b/path/path_test.go new file mode 100644 index 0000000000..2b26a56786 --- /dev/null +++ b/path/path_test.go @@ -0,0 +1,128 @@ +package path + +import ( + "strings" + "testing" +) + +func TestPathParsing(t *testing.T) { + cases := map[string]bool{ + "/ipfs/QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n": true, + "/ipfs/QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n/a": true, + "/ipfs/QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n/a/b/c/d/e/f": true, + "/ipld/QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n": true, + "/ipld/QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n/a": true, + "/ipld/QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n/a/b/c/d/e/f": true, + "/ipns/QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n/a/b/c/d/e/f": true, + "/ipns/QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n": true, + "QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n/a/b/c/d/e/f": true, + "QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n": true, + "/QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n": false, + "/QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n/a": false, + "/ipfs/foo": false, + "/ipfs/": false, + "ipfs/": false, + "ipfs/QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n": false, + "/ipld/foo": false, + "/ipld/": false, + "ipld/": false, + "ipld/QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n": false, + } + + for p, expected := range cases { + _, err := ParsePath(p) + valid := err == nil + if valid != expected { + t.Fatalf("expected %s to have valid == %t", p, expected) + } + } +} + +func TestNoComponents(t *testing.T) { + for _, s := range []string{ + "/ipfs/", + "/ipns/", + "/ipld/", + } { + _, err := ParsePath(s) + if err == nil || !strings.Contains(err.Error(), "not enough path components") || !strings.Contains(err.Error(), s) { + t.Error("wrong error") + } + } +} + +func TestInvalidPaths(t *testing.T) { + for _, s := range []string{ + "/ipfs", + "/testfs", + "/", + } { + _, err := ParsePath(s) + if err == nil || !strings.Contains(err.Error(), "invalid ipfs path") || !strings.Contains(err.Error(), s) { + t.Error("wrong error") + } + } +} + +func TestIsJustAKey(t *testing.T) { + cases := map[string]bool{ + "QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n": true, + "/ipfs/QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n": true, + "/ipfs/QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n/a": false, + "/ipfs/QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n/a/b": false, + "/ipns/QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n": false, + "/ipld/QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n/a/b": false, + "/ipld/QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n": true, + } + + for p, expected := range cases { + path, err := ParsePath(p) + if err != nil { + t.Fatalf("ParsePath failed to parse \"%s\", but should have succeeded", p) + } + result := path.IsJustAKey() + if result != expected { + t.Fatalf("expected IsJustAKey(%s) to return %v, not %v", p, expected, result) + } + } +} + +func TestPopLastSegment(t *testing.T) { + cases := map[string][]string{ + "QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n": {"/ipfs/QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n", ""}, + "/ipfs/QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n": {"/ipfs/QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n", ""}, + "/ipfs/QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n/a": {"/ipfs/QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n", "a"}, + "/ipfs/QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n/a/b": {"/ipfs/QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n/a", "b"}, + "/ipns/QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n/x/y/z": {"/ipns/QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n/x/y", "z"}, + "/ipld/QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n/x/y/z": {"/ipld/QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n/x/y", "z"}, + } + + for p, expected := range cases { + path, err := ParsePath(p) + if err != nil { + t.Fatalf("ParsePath failed to parse \"%s\", but should have succeeded", p) + } + head, tail, err := path.PopLastSegment() + if err != nil { + t.Fatalf("PopLastSegment failed, but should have succeeded: %s", err) + } + headStr := head.String() + if headStr != expected[0] { + t.Fatalf("expected head of PopLastSegment(%s) to return %v, not %v", p, expected[0], headStr) + } + if tail != expected[1] { + t.Fatalf("expected tail of PopLastSegment(%s) to return %v, not %v", p, expected[1], tail) + } + } +} + +func TestV0ErrorDueToLowercase(t *testing.T) { + badb58 := "/ipfs/qmbwqxbekc3p8tqskc98xmwnzrzdtrlmimpl8wbutgsmnr" + _, err := ParsePath(badb58) + if err == nil { + t.Fatal("should have failed to decode") + } + if !strings.HasSuffix(err.Error(), "(possible lowercased CIDv0; consider converting to a case-agnostic CIDv1, such as base32)") { + t.Fatal("should have meaningful info about case-insensitive fix") + } +} diff --git a/path/resolver/resolver.go b/path/resolver/resolver.go new file mode 100644 index 0000000000..8192a9eb0b --- /dev/null +++ b/path/resolver/resolver.go @@ -0,0 +1,322 @@ +// Package resolver implements utilities for resolving paths within ipfs. +package resolver + +import ( + "context" + "errors" + "fmt" + "time" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" + + "github.com/ipfs/boxo/fetcher" + fetcherhelpers "github.com/ipfs/boxo/fetcher/helpers" + path "github.com/ipfs/boxo/path" + "github.com/ipfs/boxo/path/internal" + cid "github.com/ipfs/go-cid" + format "github.com/ipfs/go-ipld-format" + logging "github.com/ipfs/go-log" + "github.com/ipld/go-ipld-prime" + cidlink "github.com/ipld/go-ipld-prime/linking/cid" + basicnode "github.com/ipld/go-ipld-prime/node/basic" + "github.com/ipld/go-ipld-prime/schema" + "github.com/ipld/go-ipld-prime/traversal/selector/builder" +) + +var log = logging.Logger("pathresolv") + +// ErrNoComponents is used when Paths after a protocol +// do not contain at least one component +var ErrNoComponents = errors.New( + "path must contain at least one component") + +// ErrNoLink is returned when a link is not found in a path +type ErrNoLink struct { + Name string + Node cid.Cid +} + +// Error implements the Error interface for ErrNoLink with a useful +// human readable message. +func (e ErrNoLink) Error() string { + return fmt.Sprintf("no link named %q under %s", e.Name, e.Node.String()) +} + +// Resolver provides path resolution to IPFS. +type Resolver interface { + // ResolveToLastNode walks the given path and returns the cid of the + // last block referenced by the path, and the path segments to + // traverse from the final block boundary to the final node within the + // block. + ResolveToLastNode(ctx context.Context, fpath path.Path) (cid.Cid, []string, error) + // ResolvePath fetches the node for given path. It returns the last + // item returned by ResolvePathComponents and the last link traversed + // which can be used to recover the block. + ResolvePath(ctx context.Context, fpath path.Path) (ipld.Node, ipld.Link, error) + // ResolvePathComponents fetches the nodes for each segment of the given path. + // It uses the first path component as a hash (key) of the first node, then + // resolves all other components walking the links via a selector traversal + ResolvePathComponents(ctx context.Context, fpath path.Path) ([]ipld.Node, error) +} + +// basicResolver implements the Resolver interface. +// It references a FetcherFactory, which is uses to resolve nodes. +// TODO: now that this is more modular, try to unify this code with the +// +// the resolvers in namesys. +type basicResolver struct { + FetcherFactory fetcher.Factory +} + +// NewBasicResolver constructs a new basic resolver. +func NewBasicResolver(fetcherFactory fetcher.Factory) Resolver { + return &basicResolver{ + FetcherFactory: fetcherFactory, + } +} + +// ResolveToLastNode walks the given path and returns the cid of the last +// block referenced by the path, and the path segments to traverse from the +// final block boundary to the final node within the block. +func (r *basicResolver) ResolveToLastNode(ctx context.Context, fpath path.Path) (cid.Cid, []string, error) { + ctx, span := internal.StartSpan(ctx, "basicResolver.ResolveToLastNode", trace.WithAttributes(attribute.Stringer("Path", fpath))) + defer span.End() + + c, p, err := path.SplitAbsPath(fpath) + if err != nil { + return cid.Cid{}, nil, err + } + + if len(p) == 0 { + return c, nil, nil + } + + // create a selector to traverse and match all path segments + pathSelector := pathAllSelector(p[:len(p)-1]) + + // create a new cancellable session + ctx, cancel := context.WithTimeout(ctx, time.Minute) + defer cancel() + + // resolve node before last path segment + nodes, lastCid, depth, err := r.resolveNodes(ctx, c, pathSelector) + if err != nil { + return cid.Cid{}, nil, err + } + + if len(nodes) < 1 { + return cid.Cid{}, nil, fmt.Errorf("path %v did not resolve to a node", fpath) + } else if len(nodes) < len(p) { + return cid.Undef, nil, ErrNoLink{Name: p[len(nodes)-1], Node: lastCid} + } + + parent := nodes[len(nodes)-1] + lastSegment := p[len(p)-1] + + // find final path segment within node + nd, err := parent.LookupBySegment(ipld.ParsePathSegment(lastSegment)) + switch err.(type) { + case nil: + case schema.ErrNoSuchField: + return cid.Undef, nil, ErrNoLink{Name: lastSegment, Node: lastCid} + default: + return cid.Cid{}, nil, err + } + + // if last node is not a link, just return it's cid, add path to remainder and return + if nd.Kind() != ipld.Kind_Link { + // return the cid and the remainder of the path + return lastCid, p[len(p)-depth-1:], nil + } + + lnk, err := nd.AsLink() + if err != nil { + return cid.Cid{}, nil, err + } + + clnk, ok := lnk.(cidlink.Link) + if !ok { + return cid.Cid{}, nil, fmt.Errorf("path %v resolves to a link that is not a cid link: %v", fpath, lnk) + } + + return clnk.Cid, []string{}, nil +} + +// ResolvePath fetches the node for given path. It returns the last item +// returned by ResolvePathComponents and the last link traversed which can be used to recover the block. +// +// Note: if/when the context is cancelled or expires then if a multi-block ADL node is returned then it may not be +// possible to load certain values. +func (r *basicResolver) ResolvePath(ctx context.Context, fpath path.Path) (ipld.Node, ipld.Link, error) { + ctx, span := internal.StartSpan(ctx, "basicResolver.ResolvePath", trace.WithAttributes(attribute.Stringer("Path", fpath))) + defer span.End() + + // validate path + if err := fpath.IsValid(); err != nil { + return nil, nil, err + } + + c, p, err := path.SplitAbsPath(fpath) + if err != nil { + return nil, nil, err + } + + // create a selector to traverse all path segments but only match the last + pathSelector := pathLeafSelector(p) + + nodes, c, _, err := r.resolveNodes(ctx, c, pathSelector) + if err != nil { + return nil, nil, err + } + if len(nodes) < 1 { + return nil, nil, fmt.Errorf("path %v did not resolve to a node", fpath) + } + return nodes[len(nodes)-1], cidlink.Link{Cid: c}, nil +} + +// ResolveSingle simply resolves one hop of a path through a graph with no +// extra context (does not opaquely resolve through sharded nodes) +// Deprecated: fetch node as ipld-prime or convert it and then use a selector to traverse through it. +func ResolveSingle(ctx context.Context, ds format.NodeGetter, nd format.Node, names []string) (*format.Link, []string, error) { + _, span := internal.StartSpan(ctx, "ResolveSingle", trace.WithAttributes(attribute.Stringer("CID", nd.Cid()))) + defer span.End() + return nd.ResolveLink(names) +} + +// ResolvePathComponents fetches the nodes for each segment of the given path. +// It uses the first path component as a hash (key) of the first node, then +// resolves all other components walking the links via a selector traversal +// +// Note: if/when the context is cancelled or expires then if a multi-block ADL node is returned then it may not be +// possible to load certain values. +func (r *basicResolver) ResolvePathComponents(ctx context.Context, fpath path.Path) ([]ipld.Node, error) { + ctx, span := internal.StartSpan(ctx, "basicResolver.ResolvePathComponents", trace.WithAttributes(attribute.Stringer("Path", fpath))) + defer span.End() + + //lint:ignore SA1019 TODO: replace EventBegin + evt := log.EventBegin(ctx, "resolvePathComponents", logging.LoggableMap{"fpath": fpath}) + defer evt.Done() + + // validate path + if err := fpath.IsValid(); err != nil { + evt.Append(logging.LoggableMap{"error": err.Error()}) + return nil, err + } + + c, p, err := path.SplitAbsPath(fpath) + if err != nil { + evt.Append(logging.LoggableMap{"error": err.Error()}) + return nil, err + } + + // create a selector to traverse and match all path segments + pathSelector := pathAllSelector(p) + + nodes, _, _, err := r.resolveNodes(ctx, c, pathSelector) + if err != nil { + evt.Append(logging.LoggableMap{"error": err.Error()}) + } + + return nodes, err +} + +// ResolveLinks iteratively resolves names by walking the link hierarchy. +// Every node is fetched from the Fetcher, resolving the next name. +// Returns the list of nodes forming the path, starting with ndd. This list is +// guaranteed never to be empty. +// +// ResolveLinks(nd, []string{"foo", "bar", "baz"}) +// would retrieve "baz" in ("bar" in ("foo" in nd.Links).Links).Links +// +// Note: if/when the context is cancelled or expires then if a multi-block ADL node is returned then it may not be +// possible to load certain values. +func (r *basicResolver) ResolveLinks(ctx context.Context, ndd ipld.Node, names []string) ([]ipld.Node, error) { + ctx, span := internal.StartSpan(ctx, "basicResolver.ResolveLinks") + defer span.End() + + //lint:ignore SA1019 TODO: replace EventBegin + evt := log.EventBegin(ctx, "resolveLinks", logging.LoggableMap{"names": names}) + defer evt.Done() + + // create a selector to traverse and match all path segments + pathSelector := pathAllSelector(names) + + session := r.FetcherFactory.NewSession(ctx) + + // traverse selector + nodes := []ipld.Node{ndd} + err := session.NodeMatching(ctx, ndd, pathSelector, func(res fetcher.FetchResult) error { + nodes = append(nodes, res.Node) + return nil + }) + if err != nil { + evt.Append(logging.LoggableMap{"error": err.Error()}) + return nil, err + } + + return nodes, err +} + +// Finds nodes matching the selector starting with a cid. Returns the matched nodes, the cid of the block containing +// the last node, and the depth of the last node within its block (root is depth 0). +func (r *basicResolver) resolveNodes(ctx context.Context, c cid.Cid, sel ipld.Node) ([]ipld.Node, cid.Cid, int, error) { + ctx, span := internal.StartSpan(ctx, "basicResolver.resolveNodes", trace.WithAttributes(attribute.Stringer("CID", c))) + defer span.End() + session := r.FetcherFactory.NewSession(ctx) + + // traverse selector + lastLink := cid.Undef + depth := 0 + nodes := []ipld.Node{} + err := fetcherhelpers.BlockMatching(ctx, session, cidlink.Link{Cid: c}, sel, func(res fetcher.FetchResult) error { + if res.LastBlockLink == nil { + res.LastBlockLink = cidlink.Link{Cid: c} + } + cidLnk, ok := res.LastBlockLink.(cidlink.Link) + if !ok { + return fmt.Errorf("link is not a cidlink: %v", cidLnk) + } + + // if we hit a block boundary + if !lastLink.Equals(cidLnk.Cid) { + depth = 0 + lastLink = cidLnk.Cid + } else { + depth++ + } + + nodes = append(nodes, res.Node) + return nil + }) + if err != nil { + return nil, cid.Undef, 0, err + } + + return nodes, lastLink, depth, nil +} + +func pathLeafSelector(path []string) ipld.Node { + ssb := builder.NewSelectorSpecBuilder(basicnode.Prototype.Any) + return pathSelector(path, ssb, func(p string, s builder.SelectorSpec) builder.SelectorSpec { + return ssb.ExploreFields(func(efsb builder.ExploreFieldsSpecBuilder) { efsb.Insert(p, s) }) + }) +} + +func pathAllSelector(path []string) ipld.Node { + ssb := builder.NewSelectorSpecBuilder(basicnode.Prototype.Any) + return pathSelector(path, ssb, func(p string, s builder.SelectorSpec) builder.SelectorSpec { + return ssb.ExploreUnion( + ssb.Matcher(), + ssb.ExploreFields(func(efsb builder.ExploreFieldsSpecBuilder) { efsb.Insert(p, s) }), + ) + }) +} + +func pathSelector(path []string, ssb builder.SelectorSpecBuilder, reduce func(string, builder.SelectorSpec) builder.SelectorSpec) ipld.Node { + spec := ssb.Matcher() + for i := len(path) - 1; i >= 0; i-- { + spec = reduce(path[i], spec) + } + return spec.Node() +} diff --git a/path/resolver/resolver_test.go b/path/resolver/resolver_test.go new file mode 100644 index 0000000000..580108fe38 --- /dev/null +++ b/path/resolver/resolver_test.go @@ -0,0 +1,293 @@ +package resolver_test + +import ( + "bytes" + "context" + "fmt" + "math/rand" + "strings" + "testing" + "time" + + "github.com/ipfs/go-cid" + blocks "github.com/ipfs/boxo/blocks" + bsfetcher "github.com/ipfs/boxo/fetcher/impl/blockservice" + dagpb "github.com/ipld/go-codec-dagpb" + "github.com/ipld/go-ipld-prime" + cidlink "github.com/ipld/go-ipld-prime/linking/cid" + basicnode "github.com/ipld/go-ipld-prime/node/basic" + "github.com/ipld/go-ipld-prime/schema" + "github.com/multiformats/go-multihash" + + path "github.com/ipfs/boxo/path" + "github.com/ipfs/boxo/path/resolver" + merkledag "github.com/ipfs/boxo/ipld/merkledag" + dagmock "github.com/ipfs/boxo/ipld/merkledag/test" + "github.com/ipfs/go-unixfsnode" + dagcbor "github.com/ipld/go-ipld-prime/codec/dagcbor" + dagjson "github.com/ipld/go-ipld-prime/codec/dagjson" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func randNode() *merkledag.ProtoNode { + node := new(merkledag.ProtoNode) + node.SetData(make([]byte, 32)) + r := rand.New(rand.NewSource(time.Now().UnixNano())) + r.Read(node.Data()) + return node +} + +func TestRecurivePathResolution(t *testing.T) { + ctx := context.Background() + bsrv := dagmock.Bserv() + + a := randNode() + b := randNode() + c := randNode() + + err := b.AddNodeLink("grandchild", c) + if err != nil { + t.Fatal(err) + } + + err = a.AddNodeLink("child", b) + if err != nil { + t.Fatal(err) + } + + for _, n := range []*merkledag.ProtoNode{a, b, c} { + err = bsrv.AddBlock(ctx, n) + if err != nil { + t.Fatal(err) + } + } + + aKey := a.Cid() + + segments := []string{aKey.String(), "child", "grandchild"} + p, err := path.FromSegments("/ipfs/", segments...) + if err != nil { + t.Fatal(err) + } + + fetcherFactory := bsfetcher.NewFetcherConfig(bsrv) + fetcherFactory.NodeReifier = unixfsnode.Reify + fetcherFactory.PrototypeChooser = dagpb.AddSupportToChooser(func(lnk ipld.Link, lnkCtx ipld.LinkContext) (ipld.NodePrototype, error) { + if tlnkNd, ok := lnkCtx.LinkNode.(schema.TypedLinkNode); ok { + return tlnkNd.LinkTargetNodePrototype(), nil + } + return basicnode.Prototype.Any, nil + }) + resolver := resolver.NewBasicResolver(fetcherFactory) + + node, lnk, err := resolver.ResolvePath(ctx, p) + if err != nil { + t.Fatal(err) + } + + uNode, ok := node.(unixfsnode.PathedPBNode) + require.True(t, ok) + fd := uNode.FieldData() + byts, err := fd.Must().AsBytes() + require.NoError(t, err) + + assert.Equal(t, cidlink.Link{Cid: c.Cid()}, lnk) + + assert.Equal(t, c.Data(), byts) + cKey := c.Cid() + + rCid, rest, err := resolver.ResolveToLastNode(ctx, p) + if err != nil { + t.Fatal(err) + } + + if len(rest) != 0 { + t.Error("expected rest to be empty") + } + + if rCid.String() != cKey.String() { + t.Fatal(fmt.Errorf( + "ResolveToLastNode failed for %s: %s != %s", + p.String(), rCid.String(), cKey.String())) + } + + p2, err := path.FromSegments("/ipfs/", aKey.String()) + if err != nil { + t.Fatal(err) + } + + rCid, rest, err = resolver.ResolveToLastNode(ctx, p2) + if err != nil { + t.Fatal(err) + } + + if len(rest) != 0 { + t.Error("expected rest to be empty") + } + + if rCid.String() != aKey.String() { + t.Fatal(fmt.Errorf( + "ResolveToLastNode failed for %s: %s != %s", + p.String(), rCid.String(), cKey.String())) + } +} +func TestResolveToLastNode_ErrNoLink(t *testing.T) { + ctx := context.Background() + bsrv := dagmock.Bserv() + + a := randNode() + b := randNode() + c := randNode() + + err := b.AddNodeLink("grandchild", c) + if err != nil { + t.Fatal(err) + } + + err = a.AddNodeLink("child", b) + if err != nil { + t.Fatal(err) + } + + for _, n := range []*merkledag.ProtoNode{a, b, c} { + err = bsrv.AddBlock(ctx, n) + if err != nil { + t.Fatal(err) + } + } + + aKey := a.Cid() + + fetcherFactory := bsfetcher.NewFetcherConfig(bsrv) + fetcherFactory.PrototypeChooser = dagpb.AddSupportToChooser(func(lnk ipld.Link, lnkCtx ipld.LinkContext) (ipld.NodePrototype, error) { + if tlnkNd, ok := lnkCtx.LinkNode.(schema.TypedLinkNode); ok { + return tlnkNd.LinkTargetNodePrototype(), nil + } + return basicnode.Prototype.Any, nil + }) + fetcherFactory.NodeReifier = unixfsnode.Reify + r := resolver.NewBasicResolver(fetcherFactory) + + // test missing link intermediate segment + segments := []string{aKey.String(), "cheese", "time"} + p, err := path.FromSegments("/ipfs/", segments...) + require.NoError(t, err) + + _, _, err = r.ResolveToLastNode(ctx, p) + require.EqualError(t, err, resolver.ErrNoLink{Name: "cheese", Node: aKey}.Error()) + + // test missing link at end + bKey := b.Cid() + segments = []string{aKey.String(), "child", "apples"} + p, err = path.FromSegments("/ipfs/", segments...) + require.NoError(t, err) + + _, _, err = r.ResolveToLastNode(ctx, p) + require.EqualError(t, err, resolver.ErrNoLink{Name: "apples", Node: bKey}.Error()) +} + +func TestResolveToLastNode_NoUnnecessaryFetching(t *testing.T) { + ctx := context.Background() + bsrv := dagmock.Bserv() + + a := randNode() + b := randNode() + + err := a.AddNodeLink("child", b) + require.NoError(t, err) + + err = bsrv.AddBlock(ctx, a) + require.NoError(t, err) + + aKey := a.Cid() + + segments := []string{aKey.String(), "child"} + p, err := path.FromSegments("/ipfs/", segments...) + require.NoError(t, err) + + fetcherFactory := bsfetcher.NewFetcherConfig(bsrv) + fetcherFactory.PrototypeChooser = dagpb.AddSupportToChooser(func(lnk ipld.Link, lnkCtx ipld.LinkContext) (ipld.NodePrototype, error) { + if tlnkNd, ok := lnkCtx.LinkNode.(schema.TypedLinkNode); ok { + return tlnkNd.LinkTargetNodePrototype(), nil + } + return basicnode.Prototype.Any, nil + }) + fetcherFactory.NodeReifier = unixfsnode.Reify + resolver := resolver.NewBasicResolver(fetcherFactory) + + resolvedCID, remainingPath, err := resolver.ResolveToLastNode(ctx, p) + require.NoError(t, err) + + require.Equal(t, len(remainingPath), 0, "cannot have remaining path") + require.Equal(t, b.Cid(), resolvedCID) +} + +func TestPathRemainder(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + bsrv := dagmock.Bserv() + + nb := basicnode.Prototype.Any.NewBuilder() + err := dagjson.Decode(nb, strings.NewReader(`{"foo": {"bar": "baz"}}`)) + require.NoError(t, err) + out := new(bytes.Buffer) + err = dagcbor.Encode(nb.Build(), out) + require.NoError(t, err) + lnk, err := cid.Prefix{ + Version: 1, + Codec: cid.DagCBOR, + MhType: multihash.SHA2_256, + MhLength: 32, + }.Sum(out.Bytes()) + require.NoError(t, err) + blk, err := blocks.NewBlockWithCid(out.Bytes(), lnk) + require.NoError(t, err) + bsrv.AddBlock(ctx, blk) + fetcherFactory := bsfetcher.NewFetcherConfig(bsrv) + resolver := resolver.NewBasicResolver(fetcherFactory) + + rp1, remainder, err := resolver.ResolveToLastNode(ctx, path.FromString(lnk.String()+"/foo/bar")) + require.NoError(t, err) + + assert.Equal(t, lnk, rp1) + require.Equal(t, "foo/bar", path.Join(remainder)) +} + +func TestResolveToLastNode_MixedSegmentTypes(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + bsrv := dagmock.Bserv() + a := randNode() + err := bsrv.AddBlock(ctx, a) + if err != nil { + t.Fatal(err) + } + + nb := basicnode.Prototype.Any.NewBuilder() + json := `{"foo":{"bar":[0,{"boom":["baz",1,2,{"/":"CID"},"blop"]}]}}` + json = strings.ReplaceAll(json, "CID", a.Cid().String()) + err = dagjson.Decode(nb, strings.NewReader(json)) + require.NoError(t, err) + out := new(bytes.Buffer) + err = dagcbor.Encode(nb.Build(), out) + require.NoError(t, err) + lnk, err := cid.Prefix{ + Version: 1, + Codec: cid.DagCBOR, + MhType: multihash.SHA2_256, + MhLength: 32, + }.Sum(out.Bytes()) + require.NoError(t, err) + blk, err := blocks.NewBlockWithCid(out.Bytes(), lnk) + require.NoError(t, err) + bsrv.AddBlock(ctx, blk) + fetcherFactory := bsfetcher.NewFetcherConfig(bsrv) + resolver := resolver.NewBasicResolver(fetcherFactory) + + cid, remainder, err := resolver.ResolveToLastNode(ctx, path.FromString(lnk.String()+"/foo/bar/1/boom/3")) + require.NoError(t, err) + + assert.Equal(t, 0, len(remainder)) + assert.True(t, cid.Equals(a.Cid())) +} diff --git a/pinning/pinner/.gitignore b/pinning/pinner/.gitignore new file mode 100644 index 0000000000..3c342889d2 --- /dev/null +++ b/pinning/pinner/.gitignore @@ -0,0 +1,8 @@ +*~ +*.log + +# Test binary, build with `go test -c` +*.test + +# Output of the go coverage tool +*.out diff --git a/pinning/pinner/dsindex/error.go b/pinning/pinner/dsindex/error.go new file mode 100644 index 0000000000..f3b685bb95 --- /dev/null +++ b/pinning/pinner/dsindex/error.go @@ -0,0 +1,8 @@ +package dsindex + +import "errors" + +var ( + ErrEmptyKey = errors.New("key is empty") + ErrEmptyValue = errors.New("value is empty") +) diff --git a/pinning/pinner/dsindex/indexer.go b/pinning/pinner/dsindex/indexer.go new file mode 100644 index 0000000000..8384ad5d5a --- /dev/null +++ b/pinning/pinner/dsindex/indexer.go @@ -0,0 +1,277 @@ +// Package dsindex provides secondary indexing functionality for a datastore. +package dsindex + +import ( + "context" + "fmt" + "path" + + ds "github.com/ipfs/go-datastore" + "github.com/ipfs/go-datastore/namespace" + "github.com/ipfs/go-datastore/query" + "github.com/multiformats/go-multibase" +) + +// Indexer maintains a secondary index. An index is a collection of key-value +// mappings where the key is the secondary index that maps to one or more +// values, where each value is a unique key being indexed. +type Indexer interface { + // Add adds the specified value to the key + Add(ctx context.Context, key, value string) error + + // Delete deletes the specified value from the key. If the value is not in + // the datastore, this method returns no error. + Delete(ctx context.Context, key, value string) error + + // DeleteKey deletes all values in the given key. If a key is not in the + // datastore, this method returns no error. Returns a count of values that + // were deleted. + DeleteKey(ctx context.Context, key string) (count int, err error) + + // DeleteAll deletes all keys managed by this Indexer. Returns a count of + // the values that were deleted. + DeleteAll(ctx context.Context) (count int, err error) + + // ForEach calls the function for each value in the specified key, until + // there are no more values, or until the function returns false. If key + // is empty string, then all keys are iterated. + ForEach(ctx context.Context, key string, fn func(key, value string) bool) error + + // HasValue determines if the key contains the specified value + HasValue(ctx context.Context, key, value string) (bool, error) + + // HasAny determines if any value is in the specified key. If key is + // empty string, then all values are searched. + HasAny(ctx context.Context, key string) (bool, error) + + // Search returns all values for the given key + Search(ctx context.Context, key string) (values []string, err error) +} + +// indexer is a simple implementation of Indexer. This implementation relies +// on the underlying data store to support efficient querying by prefix. +// +// TODO: Consider adding caching +type indexer struct { + dstore ds.Datastore +} + +// New creates a new datastore index. All indexes are stored under the +// specified index name. +// +// To persist the actions of calling Indexer functions, it is necessary to call +// dstore.Sync. +func New(dstore ds.Datastore, name ds.Key) Indexer { + return &indexer{ + dstore: namespace.Wrap(dstore, name), + } +} + +func (x *indexer) Add(ctx context.Context, key, value string) error { + if key == "" { + return ErrEmptyKey + } + if value == "" { + return ErrEmptyValue + } + dsKey := ds.NewKey(encode(key)).ChildString(encode(value)) + return x.dstore.Put(ctx, dsKey, []byte{}) +} + +func (x *indexer) Delete(ctx context.Context, key, value string) error { + if key == "" { + return ErrEmptyKey + } + if value == "" { + return ErrEmptyValue + } + return x.dstore.Delete(ctx, ds.NewKey(encode(key)).ChildString(encode(value))) +} + +func (x *indexer) DeleteKey(ctx context.Context, key string) (int, error) { + if key == "" { + return 0, ErrEmptyKey + } + return x.deletePrefix(ctx, encode(key)) +} + +func (x *indexer) DeleteAll(ctx context.Context) (int, error) { + return x.deletePrefix(ctx, "") +} + +func (x *indexer) ForEach(ctx context.Context, key string, fn func(key, value string) bool) error { + if key != "" { + key = encode(key) + } + + q := query.Query{ + Prefix: key, + KeysOnly: true, + } + results, err := x.dstore.Query(ctx, q) + if err != nil { + return err + } + defer results.Close() + + for r := range results.Next() { + if ctx.Err() != nil { + return ctx.Err() + } + if r.Error != nil { + return fmt.Errorf("cannot read index: %v", r.Error) + } + ent := r.Entry + decIdx, err := decode(path.Base(path.Dir(ent.Key))) + if err != nil { + return fmt.Errorf("cannot decode index: %v", err) + } + decKey, err := decode(path.Base(ent.Key)) + if err != nil { + return fmt.Errorf("cannot decode key: %v", err) + } + if !fn(decIdx, decKey) { + return nil + } + } + + return nil +} + +func (x *indexer) HasValue(ctx context.Context, key, value string) (bool, error) { + if key == "" { + return false, ErrEmptyKey + } + if value == "" { + return false, ErrEmptyValue + } + return x.dstore.Has(ctx, ds.NewKey(encode(key)).ChildString(encode(value))) +} + +func (x *indexer) HasAny(ctx context.Context, key string) (bool, error) { + var any bool + err := x.ForEach(ctx, key, func(key, value string) bool { + any = true + return false + }) + return any, err +} + +func (x *indexer) Search(ctx context.Context, key string) ([]string, error) { + if key == "" { + return nil, ErrEmptyKey + } + ents, err := x.queryPrefix(ctx, encode(key)) + if err != nil { + return nil, err + } + if len(ents) == 0 { + return nil, nil + } + + values := make([]string, len(ents)) + for i := range ents { + values[i], err = decode(path.Base(ents[i].Key)) + if err != nil { + return nil, fmt.Errorf("cannot decode value: %v", err) + } + } + return values, nil +} + +// SyncIndex synchronizes the keys in the target Indexer to match those of the +// ref Indexer. This function does not change this indexer's key root (name +// passed into New). +func SyncIndex(ctx context.Context, ref, target Indexer) (bool, error) { + // Build reference index map + refs := map[string]string{} + err := ref.ForEach(ctx, "", func(key, value string) bool { + refs[value] = key + return true + }) + if err != nil { + return false, err + } + if len(refs) == 0 { + return false, nil + } + + // Compare current indexes + dels := map[string]string{} + err = target.ForEach(ctx, "", func(key, value string) bool { + refKey, ok := refs[value] + if ok && refKey == key { + // same in both; delete from refs, do not add to dels + delete(refs, value) + } else { + dels[value] = key + } + return true + }) + if err != nil { + return false, err + } + + // Items in dels are keys that no longer exist + for value, key := range dels { + err = target.Delete(ctx, key, value) + if err != nil { + return false, err + } + } + + // What remains in refs are keys that need to be added + for value, key := range refs { + err = target.Add(ctx, key, value) + if err != nil { + return false, err + } + } + + return len(refs) != 0 || len(dels) != 0, nil +} + +func (x *indexer) deletePrefix(ctx context.Context, prefix string) (int, error) { + ents, err := x.queryPrefix(ctx, prefix) + if err != nil { + return 0, err + } + + for i := range ents { + err = x.dstore.Delete(ctx, ds.NewKey(ents[i].Key)) + if err != nil { + return 0, err + } + } + + return len(ents), nil +} + +func (x *indexer) queryPrefix(ctx context.Context, prefix string) ([]query.Entry, error) { + q := query.Query{ + Prefix: prefix, + KeysOnly: true, + } + results, err := x.dstore.Query(ctx, q) + if err != nil { + return nil, err + } + return results.Rest() +} + +func encode(data string) string { + encData, err := multibase.Encode(multibase.Base64url, []byte(data)) + if err != nil { + // programming error; using unsupported encoding + panic(err.Error()) + } + return encData +} + +func decode(data string) (string, error) { + _, b, err := multibase.Decode(data) + if err != nil { + return "", err + } + return string(b), nil +} diff --git a/pinning/pinner/dsindex/indexer_test.go b/pinning/pinner/dsindex/indexer_test.go new file mode 100644 index 0000000000..45372c6053 --- /dev/null +++ b/pinning/pinner/dsindex/indexer_test.go @@ -0,0 +1,286 @@ +package dsindex + +import ( + "context" + "testing" + + ds "github.com/ipfs/go-datastore" +) + +func createIndexer() Indexer { + dstore := ds.NewMapDatastore() + nameIndex := New(dstore, ds.NewKey("/data/nameindex")) + + ctx := context.Background() + nameIndex.Add(ctx, "alice", "a1") + nameIndex.Add(ctx, "bob", "b1") + nameIndex.Add(ctx, "bob", "b2") + nameIndex.Add(ctx, "cathy", "c1") + + return nameIndex +} + +func TestAdd(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + nameIndex := createIndexer() + err := nameIndex.Add(ctx, "someone", "s1") + if err != nil { + t.Fatal(err) + } + err = nameIndex.Add(ctx, "someone", "s1") + if err != nil { + t.Fatal(err) + } + + err = nameIndex.Add(ctx, "", "noindex") + if err != ErrEmptyKey { + t.Fatal("unexpected error:", err) + } + + err = nameIndex.Add(ctx, "nokey", "") + if err != ErrEmptyValue { + t.Fatal("unexpected error:", err) + } +} + +func TestHasValue(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + nameIndex := createIndexer() + + ok, err := nameIndex.HasValue(ctx, "bob", "b1") + if err != nil { + t.Fatal(err) + } + if !ok { + t.Fatal("missing index") + } + + ok, err = nameIndex.HasValue(ctx, "bob", "b3") + if err != nil { + t.Fatal(err) + } + if ok { + t.Fatal("should not have index") + } + + _, err = nameIndex.HasValue(ctx, "", "b1") + if err != ErrEmptyKey { + t.Fatal("unexpected error:", err) + } + + _, err = nameIndex.HasValue(ctx, "bob", "") + if err != ErrEmptyValue { + t.Fatal("unexpected error:", err) + } +} + +func TestHasAny(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + nameIndex := createIndexer() + + ok, err := nameIndex.HasAny(ctx, "nothere") + if err != nil { + t.Fatal(err) + } + if ok { + t.Fatal("should return false") + } + + for _, idx := range []string{"alice", "bob", ""} { + ok, err = nameIndex.HasAny(ctx, idx) + if err != nil { + t.Fatal(err) + } + if !ok { + t.Fatal("missing index", idx) + } + } + + count, err := nameIndex.DeleteAll(ctx) + if err != nil { + t.Fatal(err) + } + if count != 4 { + t.Fatal("expected 4 deletions") + } + + ok, err = nameIndex.HasAny(ctx, "") + if err != nil { + t.Fatal(err) + } + if ok { + t.Fatal("should return false") + } +} + +func TestForEach(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + nameIndex := createIndexer() + + found := make(map[string]struct{}) + err := nameIndex.ForEach(ctx, "bob", func(key, value string) bool { + found[value] = struct{}{} + return true + }) + if err != nil { + t.Fatal(err) + } + + for _, value := range []string{"b1", "b2"} { + _, ok := found[value] + if !ok { + t.Fatal("missing key for value", value) + } + } + + values := map[string]string{} + err = nameIndex.ForEach(ctx, "", func(key, value string) bool { + values[value] = key + return true + }) + if err != nil { + t.Fatal(err) + } + if len(values) != 4 { + t.Fatal("expected 4 keys") + } + + if values["a1"] != "alice" { + t.Error("expected a1: alice") + } + if values["b1"] != "bob" { + t.Error("expected b1: bob") + } + if values["b2"] != "bob" { + t.Error("expected b2: bob") + } + if values["c1"] != "cathy" { + t.Error("expected c1: cathy") + } +} + +func TestSearch(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + nameIndex := createIndexer() + + ids, err := nameIndex.Search(ctx, "bob") + if err != nil { + t.Fatal(err) + } + if len(ids) != 2 { + t.Fatal("wrong number of ids - expected 2 got", ids) + } + for _, id := range ids { + if id != "b1" && id != "b2" { + t.Fatal("wrong value in id set") + } + } + if ids[0] == ids[1] { + t.Fatal("duplicate id") + } + + ids, err = nameIndex.Search(ctx, "cathy") + if err != nil { + t.Fatal(err) + } + if len(ids) != 1 || ids[0] != "c1" { + t.Fatal("wrong ids") + } + + ids, err = nameIndex.Search(ctx, "amit") + if err != nil { + t.Fatal(err) + } + if len(ids) != 0 { + t.Fatal("unexpected ids returned") + } +} + +func TestDelete(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + nameIndex := createIndexer() + + err := nameIndex.Delete(ctx, "bob", "b3") + if err != nil { + t.Fatal(err) + } + + err = nameIndex.Delete(ctx, "alice", "a1") + if err != nil { + t.Fatal(err) + } + + ok, err := nameIndex.HasValue(ctx, "alice", "a1") + if err != nil { + t.Fatal(err) + } + if ok { + t.Fatal("index key should have been deleted") + } + + count, err := nameIndex.DeleteKey(ctx, "bob") + if err != nil { + t.Fatal(err) + } + if count != 2 { + t.Fatal("wrong deleted count") + } + ok, _ = nameIndex.HasValue(ctx, "bob", "b1") + if ok { + t.Fatal("index not deleted") + } +} + +func TestSyncIndex(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + nameIndex := createIndexer() + + dstore := ds.NewMapDatastore() + refIndex := New(dstore, ds.NewKey("/ref")) + refIndex.Add(ctx, "alice", "a1") + refIndex.Add(ctx, "cathy", "zz") + refIndex.Add(ctx, "dennis", "d1") + + changed, err := SyncIndex(ctx, refIndex, nameIndex) + if err != nil { + t.Fatal(err) + } + if !changed { + t.Error("change was not indicated") + } + + // Create map of id->index in sync target + syncs := map[string]string{} + err = nameIndex.ForEach(ctx, "", func(key, value string) bool { + syncs[value] = key + return true + }) + if err != nil { + t.Fatal(err) + } + + // Iterate items in sync source and make sure they appear in target + var itemCount int + err = refIndex.ForEach(ctx, "", func(key, value string) bool { + itemCount++ + syncKey, ok := syncs[value] + if !ok || key != syncKey { + t.Fatal("key", key, "-->", value, "was not synced") + } + return true + }) + if err != nil { + t.Fatal(err) + } + + if itemCount != len(syncs) { + t.Fatal("different number of items in sync source and target") + } +} diff --git a/pinning/pinner/dspinner/pin.go b/pinning/pinner/dspinner/pin.go new file mode 100644 index 0000000000..8168535652 --- /dev/null +++ b/pinning/pinner/dspinner/pin.go @@ -0,0 +1,1031 @@ +// Package dspinner implements structures and methods to keep track of +// which objects a user wants to keep stored locally. This implementation +// stores pin data in a datastore. +package dspinner + +import ( + "context" + "errors" + "fmt" + "path" + "sync" + + "github.com/ipfs/go-cid" + ds "github.com/ipfs/go-datastore" + "github.com/ipfs/go-datastore/query" + ipld "github.com/ipfs/go-ipld-format" + "github.com/ipfs/boxo/ipld/merkledag" + "github.com/ipfs/boxo/ipld/merkledag/dagutils" + logging "github.com/ipfs/go-log" + "github.com/polydawn/refmt/cbor" + "github.com/polydawn/refmt/obj/atlas" + + ipfspinner "github.com/ipfs/boxo/pinning/pinner" + "github.com/ipfs/boxo/pinning/pinner/dsindex" +) + +const ( + basePath = "/pins" + pinKeyPath = "/pins/pin" + indexKeyPath = "/pins/index" + dirtyKeyPath = "/pins/state/dirty" +) + +var ( + log logging.StandardLogger = logging.Logger("pin") + + linkDirect, linkRecursive string + + pinCidDIndexPath string + pinCidRIndexPath string + pinNameIndexPath string + + dirtyKey = ds.NewKey(dirtyKeyPath) + + pinAtl atlas.Atlas +) + +func init() { + directStr, ok := ipfspinner.ModeToString(ipfspinner.Direct) + if !ok { + panic("could not find Direct pin enum") + } + linkDirect = directStr + + recursiveStr, ok := ipfspinner.ModeToString(ipfspinner.Recursive) + if !ok { + panic("could not find Recursive pin enum") + } + linkRecursive = recursiveStr + + pinCidRIndexPath = path.Join(indexKeyPath, "cidRindex") + pinCidDIndexPath = path.Join(indexKeyPath, "cidDindex") + pinNameIndexPath = path.Join(indexKeyPath, "nameIndex") + + pinAtl = atlas.MustBuild( + atlas.BuildEntry(pin{}).StructMap(). + AddField("Cid", atlas.StructMapEntry{SerialName: "cid"}). + AddField("Metadata", atlas.StructMapEntry{SerialName: "metadata", OmitEmpty: true}). + AddField("Mode", atlas.StructMapEntry{SerialName: "mode"}). + AddField("Name", atlas.StructMapEntry{SerialName: "name", OmitEmpty: true}). + Complete(), + atlas.BuildEntry(cid.Cid{}).Transform(). + TransformMarshal(atlas.MakeMarshalTransformFunc(func(live cid.Cid) ([]byte, error) { return live.MarshalBinary() })). + TransformUnmarshal(atlas.MakeUnmarshalTransformFunc(func(serializable []byte) (cid.Cid, error) { + c := cid.Cid{} + err := c.UnmarshalBinary(serializable) + if err != nil { + return cid.Cid{}, err + } + return c, nil + })).Complete(), + ) + pinAtl = pinAtl.WithMapMorphism(atlas.MapMorphism{KeySortMode: atlas.KeySortMode_Strings}) +} + +// pinner implements the Pinner interface +type pinner struct { + autoSync bool + lock sync.RWMutex + + dserv ipld.DAGService + dstore ds.Datastore + + cidDIndex dsindex.Indexer + cidRIndex dsindex.Indexer + nameIndex dsindex.Indexer + + clean int64 + dirty int64 +} + +var _ ipfspinner.Pinner = (*pinner)(nil) + +type pin struct { + Id string + Cid cid.Cid + Metadata map[string]interface{} + Mode ipfspinner.Mode + Name string +} + +func (p *pin) dsKey() ds.Key { + return ds.NewKey(path.Join(pinKeyPath, p.Id)) +} + +func newPin(c cid.Cid, mode ipfspinner.Mode, name string) *pin { + return &pin{ + Id: path.Base(ds.RandomKey().String()), + Cid: c, + Name: name, + Mode: mode, + } +} + +type syncDAGService interface { + ipld.DAGService + Sync() error +} + +// New creates a new pinner and loads its keysets from the given datastore. If +// there is no data present in the datastore, then an empty pinner is returned. +// +// By default, changes are automatically flushed to the datastore. This can be +// disabled by calling SetAutosync(false), which will require that Flush be +// called explicitly. +func New(ctx context.Context, dstore ds.Datastore, dserv ipld.DAGService) (*pinner, error) { + p := &pinner{ + autoSync: true, + cidDIndex: dsindex.New(dstore, ds.NewKey(pinCidDIndexPath)), + cidRIndex: dsindex.New(dstore, ds.NewKey(pinCidRIndexPath)), + nameIndex: dsindex.New(dstore, ds.NewKey(pinNameIndexPath)), + dserv: dserv, + dstore: dstore, + } + + data, err := dstore.Get(ctx, dirtyKey) + if err != nil { + if err == ds.ErrNotFound { + return p, nil + } + return nil, fmt.Errorf("cannot load dirty flag: %v", err) + } + if data[0] == 1 { + p.dirty = 1 + + err = p.rebuildIndexes(ctx) + if err != nil { + return nil, fmt.Errorf("cannot rebuild indexes: %v", err) + } + } + + return p, nil +} + +// SetAutosync allows auto-syncing to be enabled or disabled during runtime. +// This may be used to turn off autosync before doing many repeated pinning +// operations, and then turn it on after. Returns the previous value. +func (p *pinner) SetAutosync(auto bool) bool { + p.lock.Lock() + defer p.lock.Unlock() + + p.autoSync, auto = auto, p.autoSync + return auto +} + +// Pin the given node, optionally recursive +func (p *pinner) Pin(ctx context.Context, node ipld.Node, recurse bool) error { + err := p.dserv.Add(ctx, node) + if err != nil { + return err + } + + if recurse { + return p.doPinRecursive(ctx, node.Cid(), true) + } else { + return p.doPinDirect(ctx, node.Cid()) + } +} + +func (p *pinner) doPinRecursive(ctx context.Context, c cid.Cid, fetch bool) error { + cidKey := c.KeyString() + + p.lock.Lock() + defer p.lock.Unlock() + + found, err := p.cidRIndex.HasAny(ctx, cidKey) + if err != nil { + return err + } + if found { + return nil + } + + dirtyBefore := p.dirty + + if fetch { + // temporary unlock to fetch the entire graph + p.lock.Unlock() + // Fetch graph starting at node identified by cid + err = merkledag.FetchGraph(ctx, c, p.dserv) + p.lock.Lock() + if err != nil { + return err + } + } + + // If autosyncing, sync dag service before making any change to pins + err = p.flushDagService(ctx, false) + if err != nil { + return err + } + + // Only look again if something has changed. + if p.dirty != dirtyBefore { + found, err = p.cidRIndex.HasAny(ctx, cidKey) + if err != nil { + return err + } + if found { + return nil + } + } + + // TODO: remove this to support multiple pins per CID + found, err = p.cidDIndex.HasAny(ctx, cidKey) + if err != nil { + return err + } + if found { + _, err = p.removePinsForCid(ctx, c, ipfspinner.Direct) + if err != nil { + return err + } + } + + _, err = p.addPin(ctx, c, ipfspinner.Recursive, "") + if err != nil { + return err + } + return p.flushPins(ctx, false) +} + +func (p *pinner) doPinDirect(ctx context.Context, c cid.Cid) error { + cidKey := c.KeyString() + + p.lock.Lock() + defer p.lock.Unlock() + + found, err := p.cidRIndex.HasAny(ctx, cidKey) + if err != nil { + return err + } + if found { + return fmt.Errorf("%s already pinned recursively", c.String()) + } + + _, err = p.addPin(ctx, c, ipfspinner.Direct, "") + if err != nil { + return err + } + + return p.flushPins(ctx, false) +} + +func (p *pinner) addPin(ctx context.Context, c cid.Cid, mode ipfspinner.Mode, name string) (string, error) { + // Create new pin and store in datastore + pp := newPin(c, mode, name) + + // Serialize pin + pinData, err := encodePin(pp) + if err != nil { + return "", fmt.Errorf("could not encode pin: %v", err) + } + + p.setDirty(ctx) + + // Store the pin + err = p.dstore.Put(ctx, pp.dsKey(), pinData) + if err != nil { + return "", err + } + + // Store CID index + switch mode { + case ipfspinner.Recursive: + err = p.cidRIndex.Add(ctx, c.KeyString(), pp.Id) + case ipfspinner.Direct: + err = p.cidDIndex.Add(ctx, c.KeyString(), pp.Id) + default: + panic("pin mode must be recursive or direct") + } + if err != nil { + return "", fmt.Errorf("could not add pin cid index: %v", err) + } + + if name != "" { + // Store name index + err = p.nameIndex.Add(ctx, name, pp.Id) + if err != nil { + if mode == ipfspinner.Recursive { + e := p.cidRIndex.Delete(ctx, c.KeyString(), pp.Id) + if e != nil { + log.Errorf("error deleting index: %s", e) + } + } else { + e := p.cidDIndex.Delete(ctx, c.KeyString(), pp.Id) + if e != nil { + log.Errorf("error deleting index: %s", e) + } + } + return "", fmt.Errorf("could not add pin name index: %v", err) + } + } + + return pp.Id, nil +} + +func (p *pinner) removePin(ctx context.Context, pp *pin) error { + p.setDirty(ctx) + var err error + + // Remove cid index from datastore + if pp.Mode == ipfspinner.Recursive { + err = p.cidRIndex.Delete(ctx, pp.Cid.KeyString(), pp.Id) + } else { + err = p.cidDIndex.Delete(ctx, pp.Cid.KeyString(), pp.Id) + } + if err != nil { + return err + } + + if pp.Name != "" { + // Remove name index from datastore + err = p.nameIndex.Delete(ctx, pp.Name, pp.Id) + if err != nil { + return err + } + } + + // The pin is removed last so that an incomplete remove is detected by a + // pin that has a missing index. + err = p.dstore.Delete(ctx, pp.dsKey()) + if err != nil { + return err + } + + return nil +} + +// Unpin a given key +func (p *pinner) Unpin(ctx context.Context, c cid.Cid, recursive bool) error { + cidKey := c.KeyString() + + p.lock.Lock() + defer p.lock.Unlock() + + // TODO: use Ls() to lookup pins when new pinning API available + /* + matchSpec := map[string][]string { + "cid": []string{c.String} + } + matches := p.Ls(matchSpec) + */ + has, err := p.cidRIndex.HasAny(ctx, cidKey) + if err != nil { + return err + } + + if has { + if !recursive { + return fmt.Errorf("%s is pinned recursively", c.String()) + } + } else { + has, err = p.cidDIndex.HasAny(ctx, cidKey) + if err != nil { + return err + } + if !has { + return ipfspinner.ErrNotPinned + } + } + + removed, err := p.removePinsForCid(ctx, c, ipfspinner.Any) + if err != nil { + return err + } + if !removed { + return nil + } + + return p.flushPins(ctx, false) +} + +// IsPinned returns whether or not the given key is pinned +// and an explanation of why its pinned +func (p *pinner) IsPinned(ctx context.Context, c cid.Cid) (string, bool, error) { + p.lock.RLock() + defer p.lock.RUnlock() + return p.isPinnedWithType(ctx, c, ipfspinner.Any) +} + +// IsPinnedWithType returns whether or not the given cid is pinned with the +// given pin type, as well as returning the type of pin its pinned with. +func (p *pinner) IsPinnedWithType(ctx context.Context, c cid.Cid, mode ipfspinner.Mode) (string, bool, error) { + p.lock.RLock() + defer p.lock.RUnlock() + return p.isPinnedWithType(ctx, c, mode) +} + +func (p *pinner) isPinnedWithType(ctx context.Context, c cid.Cid, mode ipfspinner.Mode) (string, bool, error) { + cidKey := c.KeyString() + switch mode { + case ipfspinner.Recursive: + has, err := p.cidRIndex.HasAny(ctx, cidKey) + if err != nil { + return "", false, err + } + if has { + return linkRecursive, true, nil + } + return "", false, nil + case ipfspinner.Direct: + has, err := p.cidDIndex.HasAny(ctx, cidKey) + if err != nil { + return "", false, err + } + if has { + return linkDirect, true, nil + } + return "", false, nil + case ipfspinner.Internal: + return "", false, nil + case ipfspinner.Indirect: + case ipfspinner.Any: + has, err := p.cidRIndex.HasAny(ctx, cidKey) + if err != nil { + return "", false, err + } + if has { + return linkRecursive, true, nil + } + has, err = p.cidDIndex.HasAny(ctx, cidKey) + if err != nil { + return "", false, err + } + if has { + return linkDirect, true, nil + } + default: + err := fmt.Errorf( + "invalid Pin Mode '%d', must be one of {%d, %d, %d, %d, %d}", + mode, ipfspinner.Direct, ipfspinner.Indirect, ipfspinner.Recursive, + ipfspinner.Internal, ipfspinner.Any) + return "", false, err + } + + // Default is Indirect + visitedSet := cid.NewSet() + + // No index for given CID, so search children of all recursive pinned CIDs + var has bool + var rc cid.Cid + var e error + err := p.cidRIndex.ForEach(ctx, "", func(key, value string) bool { + rc, e = cid.Cast([]byte(key)) + if e != nil { + return false + } + has, e = hasChild(ctx, p.dserv, rc, c, visitedSet.Visit) + if e != nil { + return false + } + if has { + return false + } + return true + }) + if err != nil { + return "", false, err + } + if e != nil { + return "", false, e + } + + if has { + return rc.String(), true, nil + } + + return "", false, nil +} + +// CheckIfPinned checks if a set of keys are pinned, more efficient than +// calling IsPinned for each key, returns the pinned status of cid(s) +// +// TODO: If a CID is pinned by multiple pins, should they all be reported? +func (p *pinner) CheckIfPinned(ctx context.Context, cids ...cid.Cid) ([]ipfspinner.Pinned, error) { + pinned := make([]ipfspinner.Pinned, 0, len(cids)) + toCheck := cid.NewSet() + + p.lock.RLock() + defer p.lock.RUnlock() + + // First check for non-Indirect pins directly + for _, c := range cids { + cidKey := c.KeyString() + has, err := p.cidRIndex.HasAny(ctx, cidKey) + if err != nil { + return nil, err + } + if has { + pinned = append(pinned, ipfspinner.Pinned{Key: c, Mode: ipfspinner.Recursive}) + } else { + has, err = p.cidDIndex.HasAny(ctx, cidKey) + if err != nil { + return nil, err + } + if has { + pinned = append(pinned, ipfspinner.Pinned{Key: c, Mode: ipfspinner.Direct}) + } else { + toCheck.Add(c) + } + } + } + + var e error + visited := cid.NewSet() + err := p.cidRIndex.ForEach(ctx, "", func(key, value string) bool { + var rk cid.Cid + rk, e = cid.Cast([]byte(key)) + if e != nil { + return false + } + e = merkledag.Walk(ctx, merkledag.GetLinksWithDAG(p.dserv), rk, func(c cid.Cid) bool { + if toCheck.Len() == 0 || !visited.Visit(c) { + return false + } + + if toCheck.Has(c) { + pinned = append(pinned, ipfspinner.Pinned{Key: c, Mode: ipfspinner.Indirect, Via: rk}) + toCheck.Remove(c) + } + + return true + }, merkledag.Concurrent()) + if e != nil { + return false + } + return toCheck.Len() > 0 + }) + if err != nil { + return nil, err + } + if e != nil { + return nil, e + } + + // Anything left in toCheck is not pinned + for _, k := range toCheck.Keys() { + pinned = append(pinned, ipfspinner.Pinned{Key: k, Mode: ipfspinner.NotPinned}) + } + + return pinned, nil +} + +// removePinsForCid removes all pins for a cid that has the specified mode. +// Returns true if any pins, and all corresponding CID index entries, were +// removed. Otherwise, returns false. +func (p *pinner) removePinsForCid(ctx context.Context, c cid.Cid, mode ipfspinner.Mode) (bool, error) { + // Search for pins by CID + var ids []string + var err error + cidKey := c.KeyString() + switch mode { + case ipfspinner.Recursive: + ids, err = p.cidRIndex.Search(ctx, cidKey) + case ipfspinner.Direct: + ids, err = p.cidDIndex.Search(ctx, cidKey) + case ipfspinner.Any: + ids, err = p.cidRIndex.Search(ctx, cidKey) + if err != nil { + return false, err + } + dIds, err := p.cidDIndex.Search(ctx, cidKey) + if err != nil { + return false, err + } + if len(dIds) != 0 { + ids = append(ids, dIds...) + } + } + if err != nil { + return false, err + } + + var removed bool + + // Remove the pin with the requested mode + for _, pid := range ids { + var pp *pin + pp, err = p.loadPin(ctx, pid) + if err != nil { + if err == ds.ErrNotFound { + p.setDirty(ctx) + // Fix index; remove index for pin that does not exist + switch mode { + case ipfspinner.Recursive: + _, err = p.cidRIndex.DeleteKey(ctx, cidKey) + if err != nil { + return false, fmt.Errorf("error deleting index: %s", err) + } + case ipfspinner.Direct: + _, err = p.cidDIndex.DeleteKey(ctx, cidKey) + if err != nil { + return false, fmt.Errorf("error deleting index: %s", err) + } + case ipfspinner.Any: + _, err = p.cidRIndex.DeleteKey(ctx, cidKey) + if err != nil { + return false, fmt.Errorf("error deleting index: %s", err) + } + _, err = p.cidDIndex.DeleteKey(ctx, cidKey) + if err != nil { + return false, fmt.Errorf("error deleting index: %s", err) + } + } + if err = p.flushPins(ctx, true); err != nil { + return false, err + } + // Mark this as removed since it removed an index, which is + // what prevents determines if an item is pinned. + removed = true + log.Error("found CID index with missing pin") + continue + } + return false, err + } + if mode == ipfspinner.Any || pp.Mode == mode { + err = p.removePin(ctx, pp) + if err != nil { + return false, err + } + removed = true + } + } + return removed, nil +} + +// loadPin loads a single pin from the datastore. +func (p *pinner) loadPin(ctx context.Context, pid string) (*pin, error) { + pinData, err := p.dstore.Get(ctx, ds.NewKey(path.Join(pinKeyPath, pid))) + if err != nil { + return nil, err + } + return decodePin(pid, pinData) +} + +// DirectKeys returns a slice containing the directly pinned keys +func (p *pinner) DirectKeys(ctx context.Context) ([]cid.Cid, error) { + p.lock.RLock() + defer p.lock.RUnlock() + + cidSet := cid.NewSet() + var e error + err := p.cidDIndex.ForEach(ctx, "", func(key, value string) bool { + var c cid.Cid + c, e = cid.Cast([]byte(key)) + if e != nil { + return false + } + cidSet.Add(c) + return true + }) + if err != nil { + return nil, err + } + if e != nil { + return nil, e + } + + return cidSet.Keys(), nil +} + +// RecursiveKeys returns a slice containing the recursively pinned keys +func (p *pinner) RecursiveKeys(ctx context.Context) ([]cid.Cid, error) { + p.lock.RLock() + defer p.lock.RUnlock() + + cidSet := cid.NewSet() + var e error + err := p.cidRIndex.ForEach(ctx, "", func(key, value string) bool { + var c cid.Cid + c, e = cid.Cast([]byte(key)) + if e != nil { + return false + } + cidSet.Add(c) + return true + }) + if err != nil { + return nil, err + } + if e != nil { + return nil, e + } + + return cidSet.Keys(), nil +} + +// InternalPins returns all cids kept pinned for the internal state of the +// pinner +func (p *pinner) InternalPins(ctx context.Context) ([]cid.Cid, error) { + return nil, nil +} + +// Update updates a recursive pin from one cid to another. This is equivalent +// to pinning the new one and unpinning the old one. +// +// TODO: This will not work when multiple pins are supported +func (p *pinner) Update(ctx context.Context, from, to cid.Cid, unpin bool) error { + p.lock.Lock() + defer p.lock.Unlock() + + found, err := p.cidRIndex.HasAny(ctx, from.KeyString()) + if err != nil { + return err + } + if !found { + return errors.New("'from' cid was not recursively pinned already") + } + + // If `from` already recursively pinned and `to` is the same, then all done + if from == to { + return nil + } + + // Check if the `to` cid is already recursively pinned + found, err = p.cidRIndex.HasAny(ctx, to.KeyString()) + if err != nil { + return err + } + if found { + return errors.New("'to' cid was already recursively pinned") + } + + // Temporarily unlock while we fetch the differences. + p.lock.Unlock() + err = dagutils.DiffEnumerate(ctx, p.dserv, from, to) + p.lock.Lock() + + if err != nil { + return err + } + + _, err = p.addPin(ctx, to, ipfspinner.Recursive, "") + if err != nil { + return err + } + + if unpin { + _, err = p.removePinsForCid(ctx, from, ipfspinner.Recursive) + if err != nil { + return err + } + } + + return p.flushPins(ctx, false) +} + +func (p *pinner) flushDagService(ctx context.Context, force bool) error { + if !p.autoSync && !force { + return nil + } + if syncDServ, ok := p.dserv.(syncDAGService); ok { + if err := syncDServ.Sync(); err != nil { + return fmt.Errorf("cannot sync pinned data: %v", err) + } + } + return nil +} + +func (p *pinner) flushPins(ctx context.Context, force bool) error { + if !p.autoSync && !force { + return nil + } + if err := p.dstore.Sync(ctx, ds.NewKey(basePath)); err != nil { + return fmt.Errorf("cannot sync pin state: %v", err) + } + p.setClean(ctx) + return nil +} + +// Flush encodes and writes pinner keysets to the datastore +func (p *pinner) Flush(ctx context.Context) error { + p.lock.Lock() + defer p.lock.Unlock() + + err := p.flushDagService(ctx, true) + if err != nil { + return err + } + + return p.flushPins(ctx, true) +} + +// PinWithMode allows the user to have fine grained control over pin +// counts +func (p *pinner) PinWithMode(ctx context.Context, c cid.Cid, mode ipfspinner.Mode) error { + // TODO: remove his to support multiple pins per CID + switch mode { + case ipfspinner.Recursive: + return p.doPinRecursive(ctx, c, false) + case ipfspinner.Direct: + return p.doPinDirect(ctx, c) + default: + return fmt.Errorf("unrecognized pin mode") + } +} + +// hasChild recursively looks for a Cid among the children of a root Cid. +// The visit function can be used to shortcut already-visited branches. +func hasChild(ctx context.Context, ng ipld.NodeGetter, root cid.Cid, child cid.Cid, visit func(cid.Cid) bool) (bool, error) { + links, err := ipld.GetLinks(ctx, ng, root) + if err != nil { + return false, err + } + for _, lnk := range links { + c := lnk.Cid + if lnk.Cid.Equals(child) { + return true, nil + } + if visit(c) { + has, err := hasChild(ctx, ng, c, child, visit) + if err != nil { + return false, err + } + + if has { + return has, nil + } + } + } + return false, nil +} + +func encodePin(p *pin) ([]byte, error) { + b, err := cbor.MarshalAtlased(p, pinAtl) + if err != nil { + return nil, err + } + return b, nil +} + +func decodePin(pid string, data []byte) (*pin, error) { + p := &pin{Id: pid} + err := cbor.UnmarshalAtlased(cbor.DecodeOptions{}, data, p, pinAtl) + if err != nil { + return nil, err + } + return p, nil +} + +// setDirty updates the dirty counter and saves a dirty state in the datastore +// if the state was previously clean +func (p *pinner) setDirty(ctx context.Context) { + wasClean := p.dirty == p.clean + p.dirty++ + + if !wasClean { + return // do not save; was already dirty + } + + data := []byte{1} + err := p.dstore.Put(ctx, dirtyKey, data) + if err != nil { + log.Errorf("failed to set pin dirty flag: %s", err) + return + } + err = p.dstore.Sync(ctx, dirtyKey) + if err != nil { + log.Errorf("failed to sync pin dirty flag: %s", err) + } +} + +// setClean saves a clean state value in the datastore if the state was +// previously dirty +func (p *pinner) setClean(ctx context.Context) { + if p.dirty == p.clean { + return // already clean + } + + data := []byte{0} + err := p.dstore.Put(ctx, dirtyKey, data) + if err != nil { + log.Errorf("failed to set clear dirty flag: %s", err) + return + } + if err = p.dstore.Sync(ctx, dirtyKey); err != nil { + log.Errorf("failed to sync cleared pin dirty flag: %s", err) + return + } + p.clean = p.dirty // set clean +} + +// sync datastore after every 50 cid repairs +const syncRepairFrequency = 50 + +// rebuildIndexes uses the stored pins to rebuild secondary indexes. This +// resolves any discrepancy between secondary indexes and pins that could +// result from a program termination between saving the two. +func (p *pinner) rebuildIndexes(ctx context.Context) error { + // Load all pins from the datastore. + q := query.Query{ + Prefix: pinKeyPath, + } + results, err := p.dstore.Query(ctx, q) + if err != nil { + return err + } + defer results.Close() + + var checkedCount, repairedCount int + + // Iterate all pins and check if the corresponding recursive or direct + // index is missing. If the index is missing then create the index. + for r := range results.Next() { + if ctx.Err() != nil { + return ctx.Err() + } + if r.Error != nil { + return fmt.Errorf("cannot read index: %v", r.Error) + } + ent := r.Entry + pp, err := decodePin(path.Base(ent.Key), ent.Value) + if err != nil { + return err + } + + indexKey := pp.Cid.KeyString() + + var indexer, staleIndexer dsindex.Indexer + var idxrName, staleIdxrName string + if pp.Mode == ipfspinner.Recursive { + indexer = p.cidRIndex + staleIndexer = p.cidDIndex + idxrName = linkRecursive + staleIdxrName = linkDirect + } else if pp.Mode == ipfspinner.Direct { + indexer = p.cidDIndex + staleIndexer = p.cidRIndex + idxrName = linkDirect + staleIdxrName = linkRecursive + } else { + log.Error("unrecognized pin mode:", pp.Mode) + continue + } + + // Remove any stale index from unused indexer + ok, err := staleIndexer.HasValue(ctx, indexKey, pp.Id) + if err != nil { + return err + } + if ok { + // Delete any stale index + log.Errorf("deleting stale %s pin index for cid %v", staleIdxrName, pp.Cid.String()) + if err = staleIndexer.Delete(ctx, indexKey, pp.Id); err != nil { + return err + } + } + + // Check that the indexer indexes this pin + ok, err = indexer.HasValue(ctx, indexKey, pp.Id) + if err != nil { + return err + } + + var repaired bool + if !ok { + // Do not rebuild if index has an old value with leading slash + ok, err = indexer.HasValue(ctx, indexKey, "/"+pp.Id) + if err != nil { + return err + } + if !ok { + log.Errorf("repairing %s pin index for cid: %s", idxrName, pp.Cid.String()) + // There was no index found for this pin. This was either an + // incomplete add or and incomplete delete of a pin. Either + // way, restore the index to complete the add or to undo the + // incomplete delete. + if err = indexer.Add(ctx, indexKey, pp.Id); err != nil { + return err + } + repaired = true + } + } + // Check for missing name index + if pp.Name != "" { + ok, err = p.nameIndex.HasValue(ctx, pp.Name, pp.Id) + if err != nil { + return err + } + if !ok { + log.Errorf("repairing name pin index for cid: %s", pp.Cid.String()) + if err = p.nameIndex.Add(ctx, pp.Name, pp.Id); err != nil { + return err + } + } + repaired = true + } + + if repaired { + repairedCount++ + } + checkedCount++ + if checkedCount%syncRepairFrequency == 0 { + p.flushPins(ctx, true) + } + } + + log.Errorf("checked %d pins for invalid indexes, repaired %d pins", checkedCount, repairedCount) + return p.flushPins(ctx, true) +} diff --git a/pinning/pinner/dspinner/pin_test.go b/pinning/pinner/dspinner/pin_test.go new file mode 100644 index 0000000000..edf315fcbb --- /dev/null +++ b/pinning/pinner/dspinner/pin_test.go @@ -0,0 +1,1347 @@ +package dspinner + +import ( + "context" + "errors" + "fmt" + "io" + "path" + "testing" + "time" + + bs "github.com/ipfs/boxo/blockservice" + mdag "github.com/ipfs/boxo/ipld/merkledag" + + blockstore "github.com/ipfs/boxo/blockstore" + offline "github.com/ipfs/boxo/exchange/offline" + util "github.com/ipfs/boxo/util" + cid "github.com/ipfs/go-cid" + ds "github.com/ipfs/go-datastore" + "github.com/ipfs/go-datastore/query" + dssync "github.com/ipfs/go-datastore/sync" + lds "github.com/ipfs/go-ds-leveldb" + ipld "github.com/ipfs/go-ipld-format" + logging "github.com/ipfs/go-log" + + ipfspin "github.com/ipfs/boxo/pinning/pinner" +) + +var rand = util.NewTimeSeededRand() + +type fakeLogger struct { + logging.StandardLogger + lastError error +} + +func (f *fakeLogger) Error(args ...interface{}) { + f.lastError = errors.New(fmt.Sprint(args...)) +} + +func (f *fakeLogger) Errorf(format string, args ...interface{}) { + f.lastError = fmt.Errorf(format, args...) +} + +func randNode() (*mdag.ProtoNode, cid.Cid) { + nd := new(mdag.ProtoNode) + nd.SetData(make([]byte, 32)) + _, err := io.ReadFull(rand, nd.Data()) + if err != nil { + panic(err) + } + k := nd.Cid() + return nd, k +} + +func assertPinned(t *testing.T, p ipfspin.Pinner, c cid.Cid, failmsg string) { + _, pinned, err := p.IsPinned(context.Background(), c) + if err != nil { + t.Fatal(err) + } + + if !pinned { + t.Fatal(failmsg) + } +} + +func assertPinnedWithType(t *testing.T, p ipfspin.Pinner, c cid.Cid, mode ipfspin.Mode, failmsg string) { + modeText, pinned, err := p.IsPinnedWithType(context.Background(), c, mode) + if err != nil { + t.Fatal(err) + } + + expect, ok := ipfspin.ModeToString(mode) + if !ok { + t.Fatal("unrecognized pin mode") + } + + if !pinned { + t.Fatal(failmsg) + } + + if mode == ipfspin.Any { + return + } + + if expect != modeText { + t.Fatal("expected", expect, "pin, got", modeText) + } +} + +func assertUnpinned(t *testing.T, p ipfspin.Pinner, c cid.Cid, failmsg string) { + _, pinned, err := p.IsPinned(context.Background(), c) + if err != nil { + t.Fatal(err) + } + + if pinned { + t.Fatal(failmsg) + } +} + +func TestPinnerBasic(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + dstore := dssync.MutexWrap(ds.NewMapDatastore()) + bstore := blockstore.NewBlockstore(dstore) + bserv := bs.New(bstore, offline.Exchange(bstore)) + + dserv := mdag.NewDAGService(bserv) + + p, err := New(ctx, dstore, dserv) + if err != nil { + t.Fatal(err) + } + + a, ak := randNode() + err = dserv.Add(ctx, a) + if err != nil { + t.Fatal(err) + } + + // Pin A{} + err = p.Pin(ctx, a, false) + if err != nil { + t.Fatal(err) + } + + assertPinned(t, p, ak, "Failed to find key") + assertPinnedWithType(t, p, ak, ipfspin.Direct, "Expected direct pin") + + // create new node c, to be indirectly pinned through b + c, _ := randNode() + err = dserv.Add(ctx, c) + if err != nil { + t.Fatal(err) + } + ck := c.Cid() + + // Create new node b, to be parent to a and c + b, _ := randNode() + err = b.AddNodeLink("child", a) + if err != nil { + t.Fatal(err) + } + err = b.AddNodeLink("otherchild", c) + if err != nil { + t.Fatal(err) + } + + err = dserv.Add(ctx, b) + if err != nil { + t.Fatal(err) + } + bk := b.Cid() + + // recursively pin B{A,C} + err = p.Pin(ctx, b, true) + if err != nil { + t.Fatal(err) + } + + assertPinned(t, p, ck, "child of recursively pinned node not found") + + assertPinned(t, p, bk, "Pinned node not found") + assertPinnedWithType(t, p, bk, ipfspin.Recursive, "Recursively pinned node not found") + + d, _ := randNode() + err = d.AddNodeLink("a", a) + if err != nil { + panic(err) + } + err = d.AddNodeLink("c", c) + if err != nil { + panic(err) + } + + e, _ := randNode() + err = d.AddNodeLink("e", e) + if err != nil { + panic(err) + } + + // Must be in dagserv for unpin to work + err = dserv.Add(ctx, e) + if err != nil { + t.Fatal(err) + } + err = dserv.Add(ctx, d) + if err != nil { + t.Fatal(err) + } + + // Add D{A,C,E} + err = p.Pin(ctx, d, true) + if err != nil { + t.Fatal(err) + } + + dk := d.Cid() + assertPinned(t, p, dk, "pinned node not found.") + + cids, err := p.RecursiveKeys(ctx) + if err != nil { + t.Fatal(err) + } + if len(cids) != 2 { + t.Error("expected 2 recursive pins") + } + if !(bk == cids[0] || bk == cids[1]) { + t.Error("expected recursive pin of B") + } + if !(dk == cids[0] || dk == cids[1]) { + t.Error("expected recursive pin of D") + } + + pinned, err := p.CheckIfPinned(ctx, ak, bk, ck, dk) + if err != nil { + t.Fatal(err) + } + if len(pinned) != 4 { + t.Error("incorrect number of results") + } + for _, pn := range pinned { + switch pn.Key { + case ak: + if pn.Mode != ipfspin.Direct { + t.Error("A pinned with wrong mode") + } + case bk: + if pn.Mode != ipfspin.Recursive { + t.Error("B pinned with wrong mode") + } + case ck: + if pn.Mode != ipfspin.Indirect { + t.Error("C should be pinned indirectly") + } + if pn.Via != dk && pn.Via != bk { + t.Error("C should be pinned via D or B") + } + case dk: + if pn.Mode != ipfspin.Recursive { + t.Error("D pinned with wrong mode") + } + } + } + + cids, err = p.DirectKeys(ctx) + if err != nil { + t.Fatal(err) + } + if len(cids) != 1 { + t.Error("expected 1 direct pin") + } + if cids[0] != ak { + t.Error("wrong direct pin") + } + + cids, _ = p.InternalPins(ctx) + if len(cids) != 0 { + t.Error("shound not have internal keys") + } + + err = p.Unpin(ctx, dk, false) + if err == nil { + t.Fatal("expected error unpinning recursive pin without specifying recursive") + } + + // Test recursive unpin + err = p.Unpin(ctx, dk, true) + if err != nil { + t.Fatal(err) + } + + err = p.Unpin(ctx, dk, true) + if err != ipfspin.ErrNotPinned { + t.Fatal("expected error:", ipfspin.ErrNotPinned) + } + + err = p.Flush(ctx) + if err != nil { + t.Fatal(err) + } + + p, err = New(ctx, dstore, dserv) + if err != nil { + t.Fatal(err) + } + + // Test directly pinned + assertPinned(t, p, ak, "Could not find pinned node!") + + // Test recursively pinned + assertPinned(t, p, bk, "could not find recursively pinned node") + + // Remove the pin but not the index to simulate corruption + ids, err := p.cidDIndex.Search(ctx, ak.KeyString()) + if err != nil { + t.Fatal(err) + } + if len(ids) == 0 { + t.Fatal("did not find pin for cid", ak.String()) + } + pp, err := p.loadPin(ctx, ids[0]) + if err != nil { + t.Fatal(err) + } + if pp.Mode != ipfspin.Direct { + t.Error("loaded pin has wrong mode") + } + if pp.Cid != ak { + t.Error("loaded pin has wrong cid") + } + err = p.dstore.Delete(ctx, pp.dsKey()) + if err != nil { + t.Fatal(err) + } + + realLog := log + fakeLog := &fakeLogger{} + fakeLog.StandardLogger = log + log = fakeLog + err = p.Pin(ctx, a, true) + if err != nil { + t.Fatal(err) + } + if fakeLog.lastError == nil { + t.Error("expected error to be logged") + } else if fakeLog.lastError.Error() != "found CID index with missing pin" { + t.Error("did not get expected log message") + } + + log = realLog +} + +func TestAddLoadPin(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + dstore := dssync.MutexWrap(ds.NewMapDatastore()) + bstore := blockstore.NewBlockstore(dstore) + bserv := bs.New(bstore, offline.Exchange(bstore)) + + dserv := mdag.NewDAGService(bserv) + + p, err := New(ctx, dstore, dserv) + if err != nil { + t.Fatal(err) + } + + a, ak := randNode() + err = dserv.Add(ctx, a) + if err != nil { + panic(err) + } + + mode := ipfspin.Recursive + name := "my-pin" + pid, err := p.addPin(ctx, ak, mode, name) + if err != nil { + t.Fatal(err) + } + + // Load pin and check that data decoded correctly + pinData, err := p.loadPin(ctx, pid) + if err != nil { + t.Fatal(err) + } + if pinData.Mode != mode { + t.Error("worng pin mode") + } + if pinData.Cid != ak { + t.Error("wrong pin cid") + } + if pinData.Name != name { + t.Error("wrong pin name; expected", name, "got", pinData.Name) + } +} + +func TestIsPinnedLookup(t *testing.T) { + // Test that lookups work in pins which share + // the same branches. For that construct this tree: + // + // A5->A4->A3->A2->A1->A0 + // / / + // B------- / + // \ / + // C--------------- + // + // This ensures that IsPinned works for all objects both when they + // are pinned and once they have been unpinned. + aBranchLen := 6 + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + dstore := dssync.MutexWrap(ds.NewMapDatastore()) + bstore := blockstore.NewBlockstore(dstore) + bserv := bs.New(bstore, offline.Exchange(bstore)) + + dserv := mdag.NewDAGService(bserv) + + // Create new pinner. New will not load anything since there are + // no pins saved in the datastore yet. + p, err := New(ctx, dstore, dserv) + if err != nil { + t.Fatal(err) + } + + aKeys, bk, ck, err := makeTree(ctx, aBranchLen, dserv, p) + if err != nil { + t.Fatal(err) + } + + assertPinned(t, p, aKeys[0], "A0 should be pinned") + assertPinned(t, p, aKeys[1], "A1 should be pinned") + assertPinned(t, p, ck, "C should be pinned") + assertPinned(t, p, bk, "B should be pinned") + + // Unpin A5 recursively + if err = p.Unpin(ctx, aKeys[5], true); err != nil { + t.Fatal(err) + } + + assertPinned(t, p, aKeys[0], "A0 should still be pinned through B") + assertUnpinned(t, p, aKeys[4], "A4 should be unpinned") + + // Unpin B recursively + if err = p.Unpin(ctx, bk, true); err != nil { + t.Fatal(err) + } + assertUnpinned(t, p, bk, "B should be unpinned") + assertUnpinned(t, p, aKeys[1], "A1 should be unpinned") + assertPinned(t, p, aKeys[0], "A0 should still be pinned through C") +} + +func TestDuplicateSemantics(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + dstore := dssync.MutexWrap(ds.NewMapDatastore()) + bstore := blockstore.NewBlockstore(dstore) + bserv := bs.New(bstore, offline.Exchange(bstore)) + + dserv := mdag.NewDAGService(bserv) + + p, err := New(ctx, dstore, dserv) + if err != nil { + t.Fatal(err) + } + + a, _ := randNode() + err = dserv.Add(ctx, a) + if err != nil { + t.Fatal(err) + } + + // pin is recursively + err = p.Pin(ctx, a, true) + if err != nil { + t.Fatal(err) + } + + // pinning directly should fail + err = p.Pin(ctx, a, false) + if err == nil { + t.Fatal("expected direct pin to fail") + } + + // pinning recursively again should succeed + err = p.Pin(ctx, a, true) + if err != nil { + t.Fatal(err) + } +} + +func TestFlush(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + dstore := dssync.MutexWrap(ds.NewMapDatastore()) + bstore := blockstore.NewBlockstore(dstore) + bserv := bs.New(bstore, offline.Exchange(bstore)) + + dserv := mdag.NewDAGService(bserv) + p, err := New(ctx, dstore, dserv) + if err != nil { + t.Fatal(err) + } + _, k := randNode() + + p.PinWithMode(ctx, k, ipfspin.Recursive) + if err = p.Flush(ctx); err != nil { + t.Fatal(err) + } + assertPinned(t, p, k, "expected key to still be pinned") +} + +func TestPinRecursiveFail(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + dstore := dssync.MutexWrap(ds.NewMapDatastore()) + bstore := blockstore.NewBlockstore(dstore) + bserv := bs.New(bstore, offline.Exchange(bstore)) + dserv := mdag.NewDAGService(bserv) + + p, err := New(ctx, dstore, dserv) + if err != nil { + t.Fatal(err) + } + + a, _ := randNode() + b, _ := randNode() + err = a.AddNodeLink("child", b) + if err != nil { + t.Fatal(err) + } + + // NOTE: This isnt a time based test, we expect the pin to fail + mctx, cancel := context.WithTimeout(ctx, time.Millisecond) + defer cancel() + + err = p.Pin(mctx, a, true) + if err == nil { + t.Fatal("should have failed to pin here") + } + + err = dserv.Add(ctx, b) + if err != nil { + t.Fatal(err) + } + + err = dserv.Add(ctx, a) + if err != nil { + t.Fatal(err) + } + + // this one is time based... but shouldnt cause any issues + mctx, cancel = context.WithTimeout(ctx, time.Second) + defer cancel() + err = p.Pin(mctx, a, true) + if err != nil { + t.Fatal(err) + } +} + +func TestPinUpdate(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + dstore := dssync.MutexWrap(ds.NewMapDatastore()) + bstore := blockstore.NewBlockstore(dstore) + bserv := bs.New(bstore, offline.Exchange(bstore)) + + dserv := mdag.NewDAGService(bserv) + p, err := New(ctx, dstore, dserv) + if err != nil { + t.Fatal(err) + } + n1, c1 := randNode() + n2, c2 := randNode() + _, c3 := randNode() + + if err = dserv.Add(ctx, n1); err != nil { + t.Fatal(err) + } + if err = dserv.Add(ctx, n2); err != nil { + t.Fatal(err) + } + + if err = p.Pin(ctx, n1, true); err != nil { + t.Fatal(err) + } + + if err = p.Update(ctx, c1, c2, true); err != nil { + t.Fatal(err) + } + + assertPinned(t, p, c2, "c2 should be pinned now") + assertUnpinned(t, p, c1, "c1 should no longer be pinned") + + if err = p.Update(ctx, c2, c1, false); err != nil { + t.Fatal(err) + } + + // Test updating same pin that is already pinned. + if err = p.Update(ctx, c2, c2, true); err != nil { + t.Fatal(err) + } + // Check that pin is still pinned. + _, ok, err := p.IsPinned(ctx, c2) + if err != nil { + t.Fatal(err) + } + if !ok { + t.Fatal("c2 should still be pinned") + } + + // Test updating same pin that is not pinned. + if err = p.Update(ctx, c3, c3, false); err == nil { + t.Fatal("expected error updating unpinned cid") + } + _, ok, err = p.IsPinned(ctx, c3) + if err != nil { + t.Fatal(err) + } + if ok { + t.Fatal("c3 should not be pinned") + } + + assertPinned(t, p, c2, "c2 should be pinned still") + assertPinned(t, p, c1, "c1 should be pinned now") +} + +func TestLoadDirty(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + dstore := dssync.MutexWrap(ds.NewMapDatastore()) + bstore := blockstore.NewBlockstore(dstore) + bserv := bs.New(bstore, offline.Exchange(bstore)) + dserv := mdag.NewDAGService(bserv) + + p, err := New(ctx, dstore, dserv) + if err != nil { + t.Fatal(err) + } + prev := p.SetAutosync(false) + if !prev { + t.Fatal("expected previous autosync to be true") + } + prev = p.SetAutosync(false) + if prev { + t.Fatal("expected previous autosync to be false") + } + prev = p.SetAutosync(true) + if prev { + t.Fatal("expected previous autosync to be false") + } + + a, ak := randNode() + err = dserv.Add(ctx, a) + if err != nil { + t.Fatal(err) + } + + _, bk := randNode() + + err = p.Pin(ctx, a, true) + if err != nil { + t.Fatal(err) + } + + cidAKey := ak.KeyString() + cidBKey := bk.KeyString() + + // Corrupt index + cidRIndex := p.cidRIndex + _, err = cidRIndex.DeleteKey(ctx, cidAKey) + if err != nil { + t.Fatal(err) + } + err = cidRIndex.Add(ctx, cidBKey, "not-a-pin-id") + if err != nil { + t.Fatal(err) + } + + // Force dirty, since Pin syncs automatically + p.setDirty(ctx) + + // Verify dirty + data, err := dstore.Get(ctx, dirtyKey) + if err != nil { + t.Fatalf("could not read dirty flag: %v", err) + } + if data[0] != 1 { + t.Fatal("dirty flag not set") + } + + has, err := cidRIndex.HasAny(ctx, cidAKey) + if err != nil { + t.Fatal(err) + } + if has { + t.Fatal("index should be deleted") + } + + // Create new pinner on same datastore that was never flushed. This should + // detect the dirty flag and repair the indexes. + p, err = New(ctx, dstore, dserv) + if err != nil { + t.Fatal(err) + } + + // Verify not dirty + data, err = dstore.Get(ctx, dirtyKey) + if err != nil { + t.Fatalf("could not read dirty flag: %v", err) + } + if data[0] != 0 { + t.Fatal("dirty flag is set") + } + + // Verify index rebuilt + cidRIndex = p.cidRIndex + has, err = cidRIndex.HasAny(ctx, cidAKey) + if err != nil { + t.Fatal(err) + } + if !has { + t.Fatal("index should have been rebuilt") + } + + has, err = p.removePinsForCid(ctx, bk, ipfspin.Any) + if err != nil { + t.Fatal(err) + } + if !has { + t.Fatal("expected Unpin to return true since index removed") + } +} + +func TestEncodeDecodePin(t *testing.T) { + _, c := randNode() + + pin := newPin(c, ipfspin.Recursive, "testpin") + pin.Metadata = make(map[string]interface{}, 2) + pin.Metadata["hello"] = "world" + pin.Metadata["foo"] = "bar" + + encBytes, err := encodePin(pin) + if err != nil { + t.Fatal(err) + } + + decPin, err := decodePin(pin.Id, encBytes) + if err != nil { + t.Fatal(err) + } + + if decPin.Id != pin.Id { + t.Errorf("wrong pin id: expect %q got %q", pin.Id, decPin.Id) + } + if decPin.Cid != pin.Cid { + t.Errorf("wrong pin cid: expect %q got %q", pin.Cid.String(), decPin.Cid.String()) + } + if decPin.Mode != pin.Mode { + expect, _ := ipfspin.ModeToString(pin.Mode) + got, _ := ipfspin.ModeToString(decPin.Mode) + t.Errorf("wrong pin mode: expect %s got %s", expect, got) + } + if decPin.Name != pin.Name { + t.Errorf("wrong pin name: expect %q got %q", pin.Name, decPin.Name) + } + for key, val := range pin.Metadata { + dval, ok := decPin.Metadata[key] + if !ok { + t.Errorf("decoded pin missing metadata key %q", key) + } + if dval != val { + t.Errorf("wrong metadata value: expected %q got %q", val, dval) + } + } +} + +func makeTree(ctx context.Context, aBranchLen int, dserv ipld.DAGService, p ipfspin.Pinner) (aKeys []cid.Cid, bk cid.Cid, ck cid.Cid, err error) { + if aBranchLen < 3 { + err = errors.New("set aBranchLen to at least 3") + return + } + + aNodes := make([]*mdag.ProtoNode, aBranchLen) + aKeys = make([]cid.Cid, aBranchLen) + for i := 0; i < aBranchLen; i++ { + a, _ := randNode() + if i >= 1 { + if err = a.AddNodeLink("child", aNodes[i-1]); err != nil { + return + } + } + + if err = dserv.Add(ctx, a); err != nil { + return + } + aNodes[i] = a + aKeys[i] = a.Cid() + } + + // Pin last A recursively + if err = p.Pin(ctx, aNodes[aBranchLen-1], true); err != nil { + return + } + + // Create node B and add A3 as child + b, _ := randNode() + if err = b.AddNodeLink("mychild", aNodes[3]); err != nil { + return + } + + // Create C node + c, _ := randNode() + // Add A0 as child of C + if err = c.AddNodeLink("child", aNodes[0]); err != nil { + return + } + + // Add C + if err = dserv.Add(ctx, c); err != nil { + return + } + ck = c.Cid() + + // Add C to B and Add B + if err = b.AddNodeLink("myotherchild", c); err != nil { + return + } + if err = dserv.Add(ctx, b); err != nil { + return + } + bk = b.Cid() + + // Pin C recursively + if err = p.Pin(ctx, c, true); err != nil { + return + } + + // Pin B recursively + if err = p.Pin(ctx, b, true); err != nil { + return + } + + if err = p.Flush(ctx); err != nil { + return + } + + return +} + +func makeNodes(count int, dserv ipld.DAGService) []ipld.Node { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + nodes := make([]ipld.Node, count) + for i := 0; i < count; i++ { + n, _ := randNode() + err := dserv.Add(ctx, n) + if err != nil { + panic(err) + } + nodes[i] = n + } + return nodes +} + +func pinNodes(nodes []ipld.Node, p ipfspin.Pinner, recursive bool) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + var err error + + for i := range nodes { + err = p.Pin(ctx, nodes[i], recursive) + if err != nil { + panic(err) + } + } + err = p.Flush(ctx) + if err != nil { + panic(err) + } +} + +func unpinNodes(nodes []ipld.Node, p ipfspin.Pinner) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + var err error + + for i := range nodes { + err = p.Unpin(ctx, nodes[i].Cid(), true) + if err != nil { + panic(err) + } + } + err = p.Flush(ctx) + if err != nil { + panic(err) + } +} + +type batchWrap struct { + ds.Datastore +} + +func (d *batchWrap) Batch(_ context.Context) (ds.Batch, error) { + return ds.NewBasicBatch(d), nil +} + +func makeStore() (ds.Datastore, ipld.DAGService) { + ldstore, err := lds.NewDatastore("", nil) + if err != nil { + panic(err) + } + dstore := &batchWrap{ldstore} + bstore := blockstore.NewBlockstore(dstore) + bserv := bs.New(bstore, offline.Exchange(bstore)) + dserv := mdag.NewDAGService(bserv) + return dstore, dserv +} + +// BenchmarkLoadRebuild loads a pinner that has some number of saved pins, and +// compares the load time when rebuilding indexes to loading without rebuilding +// indexes. +func BenchmarkLoad(b *testing.B) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + dstore, dserv := makeStore() + pinner, err := New(ctx, dstore, dserv) + if err != nil { + panic(err.Error()) + } + + nodes := makeNodes(4096, dserv) + pinNodes(nodes, pinner, true) + + b.Run("RebuildTrue", func(b *testing.B) { + for i := 0; i < b.N; i++ { + err = dstore.Put(ctx, dirtyKey, []byte{1}) + if err != nil { + panic(err.Error()) + } + + _, err = New(ctx, dstore, dserv) + if err != nil { + panic(err.Error()) + } + } + }) + + b.Run("RebuildFalse", func(b *testing.B) { + for i := 0; i < b.N; i++ { + err = dstore.Put(ctx, dirtyKey, []byte{0}) + if err != nil { + panic(err.Error()) + } + + _, err = New(ctx, dstore, dserv) + if err != nil { + panic(err.Error()) + } + } + }) +} + +// BenchmarkNthPins shows the time it takes to create/save 1 pin when a number +// of other pins already exist. Each run in the series shows performance for +// creating a pin in a larger number of existing pins. +func BenchmarkNthPin(b *testing.B) { + dstore, dserv := makeStore() + pinner, err := New(context.Background(), dstore, dserv) + if err != nil { + panic(err.Error()) + } + + for count := 1000; count <= 10000; count += 1000 { + b.Run(fmt.Sprint("PinDS-", count), func(b *testing.B) { + benchmarkNthPin(b, count, pinner, dserv) + }) + } +} + +func benchmarkNthPin(b *testing.B, count int, pinner ipfspin.Pinner, dserv ipld.DAGService) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + nodes := makeNodes(count, dserv) + pinNodes(nodes[:count-1], pinner, true) + b.ResetTimer() + + which := count - 1 + for i := 0; i < b.N; i++ { + // Pin the Nth node and Flush + err := pinner.Pin(ctx, nodes[which], true) + if err != nil { + panic(err) + } + err = pinner.Flush(ctx) + if err != nil { + panic(err) + } + // Unpin the nodes so that it can pinned next iter. + b.StopTimer() + err = pinner.Unpin(ctx, nodes[which].Cid(), true) + if err != nil { + panic(err) + } + err = pinner.Flush(ctx) + if err != nil { + panic(err) + } + b.StartTimer() + } +} + +// BenchmarkNPins demonstrates creating individual pins. Each run in the +// series shows performance for a larger number of individual pins. +func BenchmarkNPins(b *testing.B) { + for count := 128; count < 16386; count <<= 1 { + b.Run(fmt.Sprint("PinDS-", count), func(b *testing.B) { + dstore, dserv := makeStore() + pinner, err := New(context.Background(), dstore, dserv) + if err != nil { + panic(err.Error()) + } + benchmarkNPins(b, count, pinner, dserv) + }) + } +} + +func benchmarkNPins(b *testing.B, count int, pinner ipfspin.Pinner, dserv ipld.DAGService) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + nodes := makeNodes(count, dserv) + b.ResetTimer() + + for i := 0; i < b.N; i++ { + // Pin all the nodes one at a time. + for j := range nodes { + err := pinner.Pin(ctx, nodes[j], true) + if err != nil { + panic(err) + } + err = pinner.Flush(ctx) + if err != nil { + panic(err) + } + } + + // Unpin all nodes so that they can be pinned next iter. + b.StopTimer() + unpinNodes(nodes, pinner) + b.StartTimer() + } +} + +// BenchmarkNUnpins demonstrates unpinning individual pins. Each run in the +// series shows performance for a larger number of individual unpins. +func BenchmarkNUnpins(b *testing.B) { + for count := 128; count < 16386; count <<= 1 { + b.Run(fmt.Sprint("UnpinDS-", count), func(b *testing.B) { + dstore, dserv := makeStore() + pinner, err := New(context.Background(), dstore, dserv) + if err != nil { + panic(err.Error()) + } + benchmarkNUnpins(b, count, pinner, dserv) + }) + } +} + +func benchmarkNUnpins(b *testing.B, count int, pinner ipfspin.Pinner, dserv ipld.DAGService) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + nodes := makeNodes(count, dserv) + pinNodes(nodes, pinner, true) + b.ResetTimer() + + for i := 0; i < b.N; i++ { + for j := range nodes { + // Unpin nodes one at a time. + err := pinner.Unpin(ctx, nodes[j].Cid(), true) + if err != nil { + panic(err) + } + err = pinner.Flush(ctx) + if err != nil { + panic(err) + } + } + // Pin all nodes so that they can be unpinned next iter. + b.StopTimer() + pinNodes(nodes, pinner, true) + b.StartTimer() + } +} + +// BenchmarkPinAllSeries shows times to pin all nodes with only one Flush at +// the end. +func BenchmarkPinAll(b *testing.B) { + for count := 128; count < 16386; count <<= 1 { + b.Run(fmt.Sprint("PinAllDS-", count), func(b *testing.B) { + dstore, dserv := makeStore() + pinner, err := New(context.Background(), dstore, dserv) + if err != nil { + panic(err) + } + benchmarkPinAll(b, count, pinner, dserv) + }) + } +} + +func benchmarkPinAll(b *testing.B, count int, pinner ipfspin.Pinner, dserv ipld.DAGService) { + nodes := makeNodes(count, dserv) + b.ResetTimer() + + for i := 0; i < b.N; i++ { + pinNodes(nodes, pinner, true) + + b.StopTimer() + unpinNodes(nodes, pinner) + b.StartTimer() + } +} + +func BenchmarkRebuild(b *testing.B) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + dstore, dserv := makeStore() + pinIncr := 32768 + + for pins := pinIncr; pins <= pinIncr*5; pins += pinIncr { + pinner, err := New(ctx, dstore, dserv) + if err != nil { + panic(err.Error()) + } + nodes := makeNodes(pinIncr, dserv) + pinNodes(nodes, pinner, true) + + b.Run(fmt.Sprintf("Rebuild %d", pins), func(b *testing.B) { + b.ReportAllocs() + for i := 0; i < b.N; i++ { + err = dstore.Put(ctx, dirtyKey, []byte{1}) + if err != nil { + panic(err.Error()) + } + + _, err = New(ctx, dstore, dserv) + if err != nil { + panic(err.Error()) + } + } + }) + } +} + +func TestCidIndex(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + dstore, dserv := makeStore() + pinner, err := New(ctx, dstore, dserv) + if err != nil { + t.Fatal(err) + } + nodes := makeNodes(1, dserv) + node := nodes[0] + + c := node.Cid() + cidKey := c.KeyString() + + // Pin the cid + pid, err := pinner.addPin(ctx, c, ipfspin.Recursive, "") + if err != nil { + t.Fatal(err) + } + + t.Log("Added pin:", pid) + t.Log("CID index:", c.String(), "-->", pid) + + // Check that the index exists + ok, err := pinner.cidRIndex.HasAny(ctx, cidKey) + if err != nil { + t.Fatal(err) + } + if !ok { + t.Fatal("R-index has no value for", cidKey) + } + + // Check that searching for the cid returns a value + values, err := pinner.cidRIndex.Search(ctx, cidKey) + if err != nil { + t.Fatal(err) + } + if len(values) != 1 { + t.Fatal("expect index to return one value") + } + if values[0] != pid { + t.Fatal("indexer should have has value", cidKey, "-->", pid) + } + + // Check that index has specific value + ok, err = pinner.cidRIndex.HasValue(ctx, cidKey, pid) + if err != nil { + t.Fatal(err) + } + if !ok { + t.Fatal("indexer should have has value", cidKey, "-->", pid) + } + + // Iterate values of index + var seen bool + err = pinner.cidRIndex.ForEach(ctx, "", func(key, value string) bool { + if seen { + t.Fatal("expected one key-value pair") + } + if key != cidKey { + t.Fatal("unexpected key:", key) + } + if value != pid { + t.Fatal("unexpected value:", value) + } + seen = true + return true + }) + if err != nil { + t.Fatal(err) + } + + // Load all pins from the datastore. + q := query.Query{ + Prefix: pinKeyPath, + } + results, err := pinner.dstore.Query(ctx, q) + if err != nil { + t.Fatal(err) + } + defer results.Close() + + // Iterate all pins and check if the corresponding recursive or direct + // index is missing. If the index is missing then create the index. + seen = false + for r := range results.Next() { + if seen { + t.Fatal("has more than one pin") + } + if r.Error != nil { + t.Fatal(fmt.Errorf("cannot read index: %v", r.Error)) + } + ent := r.Entry + pp, err := decodePin(path.Base(ent.Key), ent.Value) + if err != nil { + t.Fatal(err) + } + t.Log("Found pin:", pp.Id) + if pp.Id != pid { + t.Fatal("ID of loaded pin is not the same known to indexer") + } + seen = true + } +} + +func TestRebuild(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + dstore, dserv := makeStore() + pinner, err := New(ctx, dstore, dserv) + if err != nil { + t.Fatal(err) + } + nodes := makeNodes(3, dserv) + pinNodes(nodes, pinner, true) + + c1 := nodes[0].Cid() + cid1Key := c1.KeyString() + c2 := nodes[1].Cid() + cid2Key := c2.KeyString() + c3 := nodes[2].Cid() + cid3Key := c3.KeyString() + + // Get pin IDs + values, err := pinner.cidRIndex.Search(ctx, cid1Key) + if err != nil { + t.Fatal(err) + } + pid1 := values[0] + values, err = pinner.cidRIndex.Search(ctx, cid2Key) + if err != nil { + t.Fatal(err) + } + pid2 := values[0] + values, err = pinner.cidRIndex.Search(ctx, cid3Key) + if err != nil { + t.Fatal(err) + } + pid3 := values[0] + + // Corrupt by adding direct index when there is already a recursive index + err = pinner.cidDIndex.Add(ctx, cid1Key, pid1) + if err != nil { + t.Fatal(err) + } + + // Corrupt index by deleting cid index 2 to simulate an incomplete add or delete + _, err = pinner.cidRIndex.DeleteKey(ctx, cid2Key) + if err != nil { + t.Fatal(err) + } + + // Corrupt index by deleting pin to simulate corruption + var pp *pin + pp, err = pinner.loadPin(ctx, pid3) + if err != nil { + t.Fatal(err) + } + err = pinner.dstore.Delete(ctx, pp.dsKey()) + if err != nil { + t.Fatal(err) + } + + pinner.setDirty(ctx) + + // Rebuild indexes + pinner, err = New(ctx, dstore, dserv) + if err != nil { + t.Fatal(err) + } + + // Verify that indexes have same values as before + err = verifyIndexValue(ctx, pinner, cid1Key, pid1) + if err != nil { + t.Fatal(err) + } + err = verifyIndexValue(ctx, pinner, cid2Key, pid2) + if err != nil { + t.Fatal(err) + } + err = verifyIndexValue(ctx, pinner, cid3Key, pid3) + if err != nil { + t.Fatal(err) + } +} + +func verifyIndexValue(ctx context.Context, pinner *pinner, cidKey, expectedPid string) error { + values, err := pinner.cidRIndex.Search(ctx, cidKey) + if err != nil { + return err + } + if len(values) != 1 { + return errors.New("expected 1 value") + } + if expectedPid != values[0] { + return errors.New("index has wrong value") + } + ok, err := pinner.cidDIndex.HasAny(ctx, cidKey) + if err != nil { + return err + } + if ok { + return errors.New("should not have a direct index") + } + return nil +} diff --git a/pinning/pinner/dspinner/sync_test.go b/pinning/pinner/dspinner/sync_test.go new file mode 100644 index 0000000000..311833a95c --- /dev/null +++ b/pinning/pinner/dspinner/sync_test.go @@ -0,0 +1,88 @@ +package dspinner + +import ( + "context" + "os" + "testing" + + bs "github.com/ipfs/boxo/blockservice" + blockstore "github.com/ipfs/boxo/blockstore" + offline "github.com/ipfs/boxo/exchange/offline" + ds "github.com/ipfs/go-datastore" + bds "github.com/ipfs/go-ds-badger" + lds "github.com/ipfs/go-ds-leveldb" + ipld "github.com/ipfs/go-ipld-format" + mdag "github.com/ipfs/boxo/ipld/merkledag" +) + +func makeStoreLevelDB(dir string) (ds.Datastore, ipld.DAGService) { + ldstore, err := lds.NewDatastore(dir, nil) + if err != nil { + panic(err) + } + // dstore := &batchWrap{ldstore} + dstore := ldstore + bstore := blockstore.NewBlockstore(dstore) + bserv := bs.New(bstore, offline.Exchange(bstore)) + dserv := mdag.NewDAGService(bserv) + return dstore, dserv +} + +func makeStoreBadger(dir string) (ds.Datastore, ipld.DAGService) { + bdstore, err := bds.NewDatastore(dir, nil) + if err != nil { + panic(err) + } + dstore := &batchWrap{bdstore} + bstore := blockstore.NewBlockstore(dstore) + bserv := bs.New(bstore, offline.Exchange(bstore)) + dserv := mdag.NewDAGService(bserv) + return dstore, dserv +} + +func benchAutoSync(b *testing.B, N int, auto bool, dstore ds.Datastore, dserv ipld.DAGService) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + pinner, err := New(ctx, dstore, dserv) + if err != nil { + panic(err.Error()) + } + + nodes := makeNodes(N, dserv) + + pinner.SetAutosync(auto) + pinNodes(nodes, pinner, true) +} + +func BenchmarkSyncOnceBadger(b *testing.B) { + const dsDir = "b-once" + dstoreB1, dservB1 := makeStoreBadger(dsDir) + defer os.RemoveAll(dsDir) + benchAutoSync(b, b.N, false, dstoreB1, dservB1) + dstoreB1.Close() +} + +func BenchmarkSyncEveryBadger(b *testing.B) { + const dsDir = "b-every" + dstoreB2, dservB2 := makeStoreBadger(dsDir) + defer os.RemoveAll(dsDir) + benchAutoSync(b, b.N, true, dstoreB2, dservB2) + dstoreB2.Close() +} + +func BenchmarkSyncOnceLevelDB(b *testing.B) { + const dsDir = "l-once" + dstoreL1, dservL1 := makeStoreLevelDB(dsDir) + defer os.RemoveAll(dsDir) + benchAutoSync(b, b.N, false, dstoreL1, dservL1) + dstoreL1.Close() +} + +func BenchmarkSyncEveryLevelDB(b *testing.B) { + const dsDir = "l-every" + dstoreL2, dservL2 := makeStoreLevelDB(dsDir) + defer os.RemoveAll(dsDir) + benchAutoSync(b, b.N, true, dstoreL2, dservL2) + dstoreL2.Close() +} diff --git a/pinning/pinner/pin.go b/pinning/pinner/pin.go new file mode 100644 index 0000000000..fcf7d764a7 --- /dev/null +++ b/pinning/pinner/pin.go @@ -0,0 +1,158 @@ +// Package pin implements structures and methods to keep track of +// which objects a user wants to keep stored locally. +package pin + +import ( + "context" + "fmt" + + cid "github.com/ipfs/go-cid" + ipld "github.com/ipfs/go-ipld-format" +) + +const ( + linkRecursive = "recursive" + linkDirect = "direct" + linkIndirect = "indirect" + linkInternal = "internal" + linkNotPinned = "not pinned" + linkAny = "any" + linkAll = "all" +) + +// Mode allows to specify different types of pin (recursive, direct etc.). +// See the Pin Modes constants for a full list. +type Mode int + +// Pin Modes +const ( + // Recursive pins pin the target cids along with any reachable children. + Recursive Mode = iota + + // Direct pins pin just the target cid. + Direct + + // Indirect pins are cids who have some ancestor pinned recursively. + Indirect + + // Internal pins are cids used to keep the internal state of the pinner. + Internal + + // NotPinned + NotPinned + + // Any refers to any pinned cid + Any +) + +// ModeToString returns a human-readable name for the Mode. +func ModeToString(mode Mode) (string, bool) { + m := map[Mode]string{ + Recursive: linkRecursive, + Direct: linkDirect, + Indirect: linkIndirect, + Internal: linkInternal, + NotPinned: linkNotPinned, + Any: linkAny, + } + s, ok := m[mode] + return s, ok +} + +// StringToMode parses the result of ModeToString() back to a Mode. +// It returns a boolean which is set to false if the mode is unknown. +func StringToMode(s string) (Mode, bool) { + m := map[string]Mode{ + linkRecursive: Recursive, + linkDirect: Direct, + linkIndirect: Indirect, + linkInternal: Internal, + linkNotPinned: NotPinned, + linkAny: Any, + linkAll: Any, // "all" and "any" means the same thing + } + mode, ok := m[s] + return mode, ok +} + +// ErrNotPinned is returned when trying to unpin items that are not pinned. +var ErrNotPinned = fmt.Errorf("not pinned or pinned indirectly") + +// A Pinner provides the necessary methods to keep track of Nodes which are +// to be kept locally, according to a pin mode. In practice, a Pinner is in +// in charge of keeping the list of items from the local storage that should +// not be garbage-collected. +type Pinner interface { + // IsPinned returns whether or not the given cid is pinned + // and an explanation of why its pinned + IsPinned(ctx context.Context, c cid.Cid) (string, bool, error) + + // IsPinnedWithType returns whether or not the given cid is pinned with the + // given pin type, as well as returning the type of pin its pinned with. + IsPinnedWithType(ctx context.Context, c cid.Cid, mode Mode) (string, bool, error) + + // Pin the given node, optionally recursively. + // Pin will make sure that the given node and its children if recursive is set + // are stored locally. + Pin(ctx context.Context, node ipld.Node, recursive bool) error + + // Unpin the given cid. If recursive is true, removes either a recursive or + // a direct pin. If recursive is false, only removes a direct pin. + // If the pin doesn't exist, return ErrNotPinned + Unpin(ctx context.Context, cid cid.Cid, recursive bool) error + + // Update updates a recursive pin from one cid to another + // this is more efficient than simply pinning the new one and unpinning the + // old one + Update(ctx context.Context, from, to cid.Cid, unpin bool) error + + // Check if a set of keys are pinned, more efficient than + // calling IsPinned for each key + CheckIfPinned(ctx context.Context, cids ...cid.Cid) ([]Pinned, error) + + // PinWithMode is for manually editing the pin structure. Use with + // care! If used improperly, garbage collection may not be + // successful. + PinWithMode(context.Context, cid.Cid, Mode) error + + // Flush writes the pin state to the backing datastore + Flush(ctx context.Context) error + + // DirectKeys returns all directly pinned cids + DirectKeys(ctx context.Context) ([]cid.Cid, error) + + // RecursiveKeys returns all recursively pinned cids + RecursiveKeys(ctx context.Context) ([]cid.Cid, error) + + // InternalPins returns all cids kept pinned for the internal state of the + // pinner + InternalPins(ctx context.Context) ([]cid.Cid, error) +} + +// Pinned represents CID which has been pinned with a pinning strategy. +// The Via field allows to identify the pinning parent of this CID, in the +// case that the item is not pinned directly (but rather pinned recursively +// by some ascendant). +type Pinned struct { + Key cid.Cid + Mode Mode + Via cid.Cid +} + +// Pinned returns whether or not the given cid is pinned +func (p Pinned) Pinned() bool { + return p.Mode != NotPinned +} + +// String Returns pin status as string +func (p Pinned) String() string { + switch p.Mode { + case NotPinned: + return "not pinned" + case Indirect: + return fmt.Sprintf("pinned via %s", p.Via) + default: + modeStr, _ := ModeToString(p.Mode) + return fmt.Sprintf("pinned: %s", modeStr) + } +} diff --git a/pinning/remote/client/.gitignore b/pinning/remote/client/.gitignore new file mode 100644 index 0000000000..66fd13c903 --- /dev/null +++ b/pinning/remote/client/.gitignore @@ -0,0 +1,15 @@ +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Test binary, built with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Dependency directories (remove the comment below to include it) +# vendor/ diff --git a/pinning/remote/client/README.md b/pinning/remote/client/README.md new file mode 100644 index 0000000000..717d9538b8 --- /dev/null +++ b/pinning/remote/client/README.md @@ -0,0 +1,16 @@ +## Updating Pinning Service Spec + +Download the openapi-generator from https://github.com/OpenAPITools/openapi-generator and generate the code using: + +Current code generated with: openapi-generator 5.0.0-beta + +``` +openapi-generator generate -g go-experimental -i https://raw.githubusercontent.com/ipfs/pinning-services-api-spec/master/ipfs-pinning-service.yaml -o openapi +rm openapi/go.mod openapi/go.sum +``` + +Notes: +Due to https://github.com/OpenAPITools/openapi-generator/issues/7473 the code generator the http error codes processing +may need some manual editing. + +`go-experimental` is becoming mainstream and so in later versions will be replaced with `go` diff --git a/pinning/remote/client/client.go b/pinning/remote/client/client.go new file mode 100644 index 0000000000..6c869d40f1 --- /dev/null +++ b/pinning/remote/client/client.go @@ -0,0 +1,428 @@ +package go_pinning_service_http_client + +import ( + "context" + "fmt" + "net/http" + "time" + + "github.com/pkg/errors" + + "github.com/ipfs/boxo/pinning/remote/client/openapi" + "github.com/ipfs/go-cid" + "github.com/multiformats/go-multiaddr" + "github.com/multiformats/go-multibase" + + logging "github.com/ipfs/go-log/v2" +) + +var logger = logging.Logger("pinning-service-http-client") + +const UserAgent = "go-pinning-service-http-client" + +type Client struct { + client *openapi.APIClient +} + +func NewClient(url, bearerToken string) *Client { + config := openapi.NewConfiguration() + config.UserAgent = UserAgent + config.AddDefaultHeader("Authorization", "Bearer "+bearerToken) + config.Servers = openapi.ServerConfigurations{ + openapi.ServerConfiguration{ + URL: url, + }, + } + + return &Client{client: openapi.NewAPIClient(config)} +} + +// TODO: We should probably make sure there are no duplicates sent +type lsSettings struct { + cids []string + name string + status []Status + before *time.Time + after *time.Time + limit *int32 + meta map[string]string +} + +type LsOption func(options *lsSettings) error + +var PinOpts = pinOpts{} + +type pinOpts struct { + pinLsOpts + pinAddOpts +} + +type pinLsOpts struct{} + +func (pinLsOpts) FilterCIDs(cids ...cid.Cid) LsOption { + return func(options *lsSettings) error { + enc := getCIDEncoder() + for _, c := range cids { + options.cids = append(options.cids, c.Encode(enc)) + } + return nil + } +} + +const maxNameSize = 255 + +func (pinLsOpts) FilterName(name string) LsOption { + return func(options *lsSettings) error { + if len(name) > maxNameSize { + return fmt.Errorf("name cannot be longer than %d", maxNameSize) + } + options.name = name + return nil + } +} + +func (pinLsOpts) FilterStatus(statuses ...Status) LsOption { + return func(options *lsSettings) error { + for _, s := range statuses { + valid := false + for _, existing := range validStatuses { + if existing == s { + valid = true + break + } + } + if !valid { + return fmt.Errorf("invalid status %s", s) + } + } + options.status = append(options.status, statuses...) + return nil + } +} + +func (pinLsOpts) FilterBefore(t time.Time) LsOption { + return func(options *lsSettings) error { + options.before = &t + return nil + } +} + +func (pinLsOpts) FilterAfter(t time.Time) LsOption { + return func(options *lsSettings) error { + options.after = &t + return nil + } +} + +const recordLimit = 1000 +const defaultLimit = 10 + +func (pinLsOpts) Limit(limit int) LsOption { + return func(options *lsSettings) error { + if limit > recordLimit { + return fmt.Errorf("limit exceeded maximum record limit of %d", recordLimit) + } + limitCasted := int32(limit) + options.limit = &limitCasted + return nil + } +} + +func (pinLsOpts) LsMeta(meta map[string]string) LsOption { + return func(options *lsSettings) error { + options.meta = meta + return nil + } +} + +type pinResults = openapi.PinResults + +func (c *Client) Ls(ctx context.Context, opts ...LsOption) (chan PinStatusGetter, chan error) { + res := make(chan PinStatusGetter, 1) + errs := make(chan error, 1) + + settings := new(lsSettings) + for _, o := range opts { + if err := o(settings); err != nil { + close(res) + errs <- err + close(errs) + return res, errs + } + } + + go func() { + defer func() { + if r := recover(); r != nil { + var err error + switch x := r.(type) { + case string: + err = fmt.Errorf("unexpected error while listing remote pins: %s", x) + case error: + err = fmt.Errorf("unexpected error while listing remote pins: %w", x) + default: + err = errors.New("unknown panic while listing remote pins") + } + errs <- err + } + close(errs) + close(res) + }() + + for { + pinRes, err := c.lsInternal(ctx, settings) + if err != nil { + errs <- err + return + } + + results := pinRes.GetResults() + for _, r := range results { + select { + case res <- &pinStatusObject{r}: + case <-ctx.Done(): + errs <- ctx.Err() + return + } + } + + batchSize := len(results) + if int(pinRes.Count) == batchSize { + // no more batches + return + } + + // Better DX/UX for cases like https://github.com/application-research/estuary/issues/124 + if batchSize == 0 && int(pinRes.Count) != 0 { + errs <- fmt.Errorf("invalid pinning service response: PinResults.count=%d but no PinResults.results", int(pinRes.Count)) + return + } + + oldestResult := results[batchSize-1] + settings.before = &oldestResult.Created + } + }() + + return res, errs +} + +func (c *Client) LsSync(ctx context.Context, opts ...LsOption) ([]PinStatusGetter, error) { + resCh, errCh := c.Ls(ctx, opts...) + + var res []PinStatusGetter + for r := range resCh { + res = append(res, r) + } + + return res, <-errCh +} + +// Manual version of Ls that returns a single batch of results and int with total count +func (c *Client) LsBatchSync(ctx context.Context, opts ...LsOption) ([]PinStatusGetter, int, error) { + var res []PinStatusGetter + + settings := new(lsSettings) + for _, o := range opts { + if err := o(settings); err != nil { + return nil, 0, err + } + } + + pinRes, err := c.lsInternal(ctx, settings) + if err != nil { + return nil, 0, err + } + + results := pinRes.GetResults() + for _, r := range results { + res = append(res, &pinStatusObject{r}) + } + + return res, int(pinRes.Count), nil +} + +func (c *Client) lsInternal(ctx context.Context, settings *lsSettings) (pinResults, error) { + getter := c.client.PinsApi.PinsGet(ctx) + if len(settings.cids) > 0 { + getter = getter.Cid(settings.cids) + } + if len(settings.status) > 0 { + statuses := make([]openapi.Status, len(settings.status)) + for i := 0; i < len(statuses); i++ { + statuses[i] = openapi.Status(settings.status[i]) + } + getter = getter.Status(statuses) + } + if settings.limit == nil { + getter = getter.Limit(defaultLimit) + } else { + getter = getter.Limit(*settings.limit) + } + if len(settings.name) > 0 { + getter = getter.Name(settings.name) + } + if settings.before != nil { + getter = getter.Before(*settings.before) + } + if settings.after != nil { + getter = getter.After(*settings.after) + } + if settings.meta != nil { + getter = getter.Meta(settings.meta) + } + + // TODO: Ignoring HTTP Response OK? + results, httpresp, err := getter.Execute() + if err != nil { + err := httperr(httpresp, err) + return pinResults{}, err + } + + return results, nil +} + +// TODO: We should probably make sure there are no duplicates sent +type addSettings struct { + name string + origins []string + meta map[string]string +} + +type AddOption func(options *addSettings) error + +type pinAddOpts struct{} + +func (pinAddOpts) WithName(name string) AddOption { + return func(options *addSettings) error { + if len(name) > maxNameSize { + return fmt.Errorf("name cannot be longer than %d", maxNameSize) + } + options.name = name + return nil + } +} + +func (pinAddOpts) WithOrigins(origins ...multiaddr.Multiaddr) AddOption { + return func(options *addSettings) error { + for _, o := range origins { + options.origins = append(options.origins, o.String()) + } + return nil + } +} + +func (pinAddOpts) AddMeta(meta map[string]string) AddOption { + return func(options *addSettings) error { + options.meta = meta + return nil + } +} + +func (c *Client) Add(ctx context.Context, cid cid.Cid, opts ...AddOption) (PinStatusGetter, error) { + settings := new(addSettings) + for _, o := range opts { + if err := o(settings); err != nil { + return nil, err + } + } + + adder := c.client.PinsApi.PinsPost(ctx) + p := openapi.Pin{ + Cid: cid.Encode(getCIDEncoder()), + } + + if len(settings.origins) > 0 { + p.SetOrigins(settings.origins) + } + if settings.meta != nil { + p.SetMeta(settings.meta) + } + if len(settings.name) > 0 { + p.SetName(settings.name) + } + + result, httpresp, err := adder.Pin(p).Execute() + if err != nil { + err := httperr(httpresp, err) + return nil, err + } + + return &pinStatusObject{result}, nil +} + +func (c *Client) GetStatusByID(ctx context.Context, pinID string) (PinStatusGetter, error) { + getter := c.client.PinsApi.PinsRequestidGet(ctx, pinID) + result, httpresp, err := getter.Execute() + if err != nil { + err := httperr(httpresp, err) + return nil, err + } + + return &pinStatusObject{result}, nil +} + +func (c *Client) DeleteByID(ctx context.Context, pinID string) error { + deleter := c.client.PinsApi.PinsRequestidDelete(ctx, pinID) + httpresp, err := deleter.Execute() + if err != nil { + err := httperr(httpresp, err) + return err + } + return nil +} + +func (c *Client) Replace(ctx context.Context, pinID string, cid cid.Cid, opts ...AddOption) (PinStatusGetter, error) { + settings := new(addSettings) + for _, o := range opts { + if err := o(settings); err != nil { + return nil, err + } + } + + adder := c.client.PinsApi.PinsRequestidPost(ctx, pinID) + p := openapi.Pin{ + Cid: cid.Encode(getCIDEncoder()), + } + + if len(settings.origins) > 0 { + p.SetOrigins(settings.origins) + } + if settings.meta != nil { + p.SetMeta(settings.meta) + } + if len(settings.name) > 0 { + p.SetName(settings.name) + } + + result, httpresp, err := adder.Pin(p).Execute() + if err != nil { + err := httperr(httpresp, err) + return nil, err + } + + return &pinStatusObject{result}, nil +} + +func getCIDEncoder() multibase.Encoder { + enc, err := multibase.NewEncoder(multibase.Base32) + if err != nil { + panic(err) + } + return enc +} + +func httperr(resp *http.Response, e error) error { + oerr, ok := e.(openapi.GenericOpenAPIError) + if ok { + ferr, ok := oerr.Model().(openapi.Failure) + if ok { + return errors.Wrapf(e, "reason: %q, details: %q", ferr.Error.GetReason(), ferr.Error.GetDetails()) + } + } + + if resp == nil { + return errors.Wrapf(e, "empty response from remote pinning service") + } + + return errors.Wrapf(e, "remote pinning service returned http error %d", resp.StatusCode) +} diff --git a/pinning/remote/client/cmd/main.go b/pinning/remote/client/cmd/main.go new file mode 100644 index 0000000000..c85f51effa --- /dev/null +++ b/pinning/remote/client/cmd/main.go @@ -0,0 +1,92 @@ +package main + +import ( + "context" + "fmt" + "os" + "time" + + pinclient "github.com/ipfs/boxo/pinning/remote/client" + "github.com/ipfs/go-cid" +) + +func main() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + url, ok := os.LookupEnv("PS_URL") + if !ok { + panic("No Pinning Service URL found") + } + + key, ok := os.LookupEnv("PS_KEY") + if !ok { + panic("No Pinning Service API Key found") + } + + c := pinclient.NewClient(url, key) + + ipfsPgCid, err := cid.Parse("bafybeiayvrj27f65vbecspbnuavehcb3znvnt2strop2rfbczupudoizya") + if err != nil { + panic(err) + } + + libp2pCid, err := cid.Parse("bafybeiejgrxo4p4uofgfzvlg5twrg5w7tfwpf7aciiswfacfbdpevg2xfy") + if err != nil { + panic(err) + } + _ = ipfsPgCid + + listPins(ctx, c) + + fmt.Println("Adding libp2p home page") + ps, err := c.Add(ctx, libp2pCid, pinclient.PinOpts.WithName("libp2p")) + if err == nil { + fmt.Printf("PinStatus: %v \n", ps) + } else { + fmt.Println(err) + } + + listPins(ctx, c) + + fmt.Println("Check on pin status") + if ps == nil { + panic("Skipping pin status check because the pin is null") + } + + var pinned bool + for !pinned { + status, err := c.GetStatusByID(ctx, ps.GetRequestId()) + if err == nil { + fmt.Println(status.GetStatus()) + pinned = status.GetStatus() == pinclient.StatusPinned + } else { + fmt.Println(err) + } + time.Sleep(time.Millisecond * 500) + } + + listPins(ctx, c) + + fmt.Println("Delete pin") + err = c.DeleteByID(ctx, ps.GetRequestId()) + if err == nil { + fmt.Println("Successfully deleted pin") + } else { + fmt.Println(err) + } + + listPins(ctx, c) +} + +func listPins(ctx context.Context, c *pinclient.Client) { + fmt.Println("List all pins") + pins, err := c.LsSync(ctx) + if err != nil { + fmt.Println(err) + } else { + for _, p := range pins { + fmt.Printf("Pin: %v \n", p) + } + } +} diff --git a/pinning/remote/client/model.go b/pinning/remote/client/model.go new file mode 100644 index 0000000000..60d6144778 --- /dev/null +++ b/pinning/remote/client/model.go @@ -0,0 +1,167 @@ +package go_pinning_service_http_client + +import ( + "encoding/json" + "fmt" + "time" + + "github.com/ipfs/boxo/pinning/remote/client/openapi" + "github.com/ipfs/go-cid" + "github.com/multiformats/go-multiaddr" +) + +// PinGetter Getter for Pin object +type PinGetter interface { + fmt.Stringer + json.Marshaler + // CID to be pinned recursively + GetCid() cid.Cid + // Optional name for pinned data; can be used for lookups later + GetName() string + // Optional list of multiaddrs known to provide the data + GetOrigins() []string + // Optional metadata for pin object + GetMeta() map[string]string +} + +type pinObject struct { + openapi.Pin +} + +func (p *pinObject) MarshalJSON() ([]byte, error) { + var originsStr string + if o := p.GetOrigins(); o != nil { + originsBytes, err := json.Marshal(o) + if err == nil { + originsStr = string(originsBytes) + } + } + + metaStr := "{}" + if meta := p.GetMeta(); meta != nil { + metaBytes, err := json.Marshal(meta) + if err == nil { + metaStr = string(metaBytes) + } + } + + str := fmt.Sprintf("{ \"Cid\" : \"%v\", \"Name\" : \"%s\", \"Origins\" : %v, \"Meta\" : %v }", + p.GetCid(), p.GetName(), originsStr, metaStr) + return []byte(str), nil +} + +func (p *pinObject) String() string { + marshalled, err := json.MarshalIndent(p, "", "\t") + if err != nil { + return "" + } + + return string(marshalled) +} + +func (p *pinObject) GetCid() cid.Cid { + c, err := cid.Parse(p.Pin.Cid) + if err != nil { + return cid.Undef + } + return c +} + +type Status string + +const ( + StatusUnknown Status = "" + StatusQueued Status = Status(openapi.QUEUED) + StatusPinning Status = Status(openapi.PINNING) + StatusPinned Status = Status(openapi.PINNED) + StatusFailed Status = Status(openapi.FAILED) +) + +func (s Status) String() string { + switch s { + case StatusQueued, StatusPinning, StatusPinned, StatusFailed: + return string(s) + default: + return string(StatusUnknown) + } +} + +var validStatuses = []Status{"queued", "pinning", "pinned", "failed"} + +// PinStatusGetter Getter for Pin object with status +type PinStatusGetter interface { + fmt.Stringer + json.Marshaler + // Globally unique ID of the pin request; can be used to check the status of ongoing pinning, modification of pin object, or pin removal + GetRequestId() string + GetStatus() Status + // Immutable timestamp indicating when a pin request entered a pinning service; can be used for filtering results and pagination + GetCreated() time.Time + GetPin() PinGetter + // List of multiaddrs designated by pinning service for transferring any new data from external peers + GetDelegates() []multiaddr.Multiaddr + // Optional info for PinStatus response + GetInfo() map[string]string +} + +type pinStatusObject struct { + openapi.PinStatus +} + +func (p *pinStatusObject) GetDelegates() []multiaddr.Multiaddr { + delegates := p.PinStatus.GetDelegates() + addrs := make([]multiaddr.Multiaddr, 0, len(delegates)) + for _, d := range delegates { + a, err := multiaddr.NewMultiaddr(d) + if err != nil { + logger.Errorf("returned delegate is an invalid multiaddr: %w", err) + continue + } + addrs = append(addrs, a) + } + return addrs +} + +func (p *pinStatusObject) GetPin() PinGetter { + return &pinObject{p.Pin} +} + +func (p *pinStatusObject) GetStatus() Status { + return Status(p.PinStatus.GetStatus()) +} + +func (p *pinStatusObject) GetRequestId() string { + return p.GetRequestid() +} + +func (p *pinStatusObject) MarshalJSON() ([]byte, error) { + var delegatesStr string + if d := p.GetDelegates(); d != nil { + delegatesBytes, err := json.Marshal(d) + if err == nil { + delegatesStr = string(delegatesBytes) + } + } + + infoStr := "{}" + if info := p.GetInfo(); info != nil { + infoBytes, err := json.Marshal(info) + if err == nil { + infoStr = string(infoBytes) + } + } + + str := fmt.Sprintf("{\"Pin\" : %v, \"RequestID\" : \"%s\", \"Status\" : \"%s\", \"Created\" : \"%v\", \"Delegates\" : %v, \"Info\" : %v }", + p.GetPin(), p.GetRequestId(), p.GetStatus(), p.GetCreated(), delegatesStr, infoStr) + + return []byte(str), nil +} + +func (p *pinStatusObject) String() string { + marshalled, err := json.MarshalIndent(p, "", "\t") + if err != nil { + return "" + } + + return string(marshalled) +} diff --git a/pinning/remote/client/openapi/README.md b/pinning/remote/client/openapi/README.md new file mode 100644 index 0000000000..fe21b9a009 --- /dev/null +++ b/pinning/remote/client/openapi/README.md @@ -0,0 +1,217 @@ +# Go API client for openapi + + + +## About this spec +The IPFS Pinning Service API is intended to be an implementation-agnostic API: +- For use and implementation by pinning service providers +- For use in client mode by IPFS nodes and GUI-based applications + +> **Note**: while ready for implementation, this spec is still a work in progress! 🏗️ **Your input and feedback are welcome and valuable as we develop this API spec. Please join the design discussion at [github.com/ipfs/pinning-services-api-spec](https://github.com/ipfs/pinning-services-api-spec).** + +# Schemas +This section describes the most important object types and conventions. + +A full list of fields and schemas can be found in the `schemas` section of the [YAML file](https://github.com/ipfs/pinning-services-api-spec/blob/master/ipfs-pinning-service.yaml). + +## Identifiers +### cid +[Content Identifier (CID)](https://docs.ipfs.io/concepts/content-addressing/) points at the root of a DAG that is pinned recursively. +### requestid +Unique identifier of a pin request. + +When a pin is created, the service responds with unique `requestid` that can be later used for pin removal. When the same `cid` is pinned again, a different `requestid` is returned to differentiate between those pin requests. + +Service implementation should use UUID, `hash(accessToken,Pin,PinStatus.created)`, or any other opaque identifier that provides equally strong protection against race conditions. + +## Objects +### Pin object + +![pin object](https://bafybeideck2fchyxna4wqwc2mo67yriokehw3yujboc5redjdaajrk2fjq.ipfs.dweb.link/pin.png) + +The `Pin` object is a representation of a pin request. + +It includes the `cid` of data to be pinned, as well as optional metadata in `name`, `origins`, and `meta`. + +### Pin status response + +![pin status response object](https://bafybeideck2fchyxna4wqwc2mo67yriokehw3yujboc5redjdaajrk2fjq.ipfs.dweb.link/pinstatus.png) + +The `PinStatus` object is a representation of the current state of a pinning operation. +It includes the original `pin` object, along with the current `status` and globally unique `requestid` of the entire pinning request, which can be used for future status checks and management. Addresses in the `delegates` array are peers delegated by the pinning service for facilitating direct file transfers (more details in the provider hints section). Any additional vendor-specific information is returned in optional `info`. + +## The pin lifecycle + +![pinning service objects and lifecycle](https://bafybeideck2fchyxna4wqwc2mo67yriokehw3yujboc5redjdaajrk2fjq.ipfs.dweb.link/lifecycle.png) + +### Creating a new pin object +The user sends a `Pin` object to `POST /pins` and receives a `PinStatus` response: +- `requestid` in `PinStatus` is the identifier of the pin operation, which can can be used for checking status, and removing the pin in the future +- `status` in `PinStatus` indicates the current state of a pin + +### Checking status of in-progress pinning +`status` (in `PinStatus`) may indicate a pending state (`queued` or `pinning`). This means the data behind `Pin.cid` was not found on the pinning service and is being fetched from the IPFS network at large, which may take time. + +In this case, the user can periodically check pinning progress via `GET /pins/{requestid}` until pinning is successful, or the user decides to remove the pending pin. + +### Replacing an existing pin object +The user can replace an existing pin object via `POST /pins/{requestid}`. This is a shortcut for removing a pin object identified by `requestid` and creating a new one in a single API call that protects against undesired garbage collection of blocks common to both pins. Useful when updating a pin representing a huge dataset where most of blocks did not change. The new pin object `requestid` is returned in the `PinStatus` response. The old pin object is deleted automatically. + +### Removing a pin object +A pin object can be removed via `DELETE /pins/{requestid}`. + + +## Provider hints +Pinning of new data can be accelerated by providing a list of known data sources in `Pin.origins`, and connecting at least one of them to pinning service nodes at `PinStatus.delegates`. + +The most common scenario is a client putting its own IPFS node's multiaddrs in `Pin.origins`, and then directly connecting to every multiaddr returned by a pinning service in `PinStatus.delegates` to initiate transfer. + +This ensures data transfer starts immediately (without waiting for provider discovery over DHT), and direct dial from a client works around peer routing issues in restrictive network topologies such as NATs. + +## Custom metadata +Pinning services are encouraged to add support for additional features by leveraging the optional `Pin.meta` and `PinStatus.info` fields. While these attributes can be application- or vendor-specific, we encourage the community at large to leverage these attributes as a sandbox to come up with conventions that could become part of future revisions of this API. +### Pin metadata +String keys and values passed in `Pin.meta` are persisted with the pin object. + +Potential uses: +- `Pin.meta[app_id]`: Attaching a unique identifier to pins created by an app enables filtering pins per app via `?meta={\"app_id\":}` +- `Pin.meta[vendor_policy]`: Vendor-specific policy (for example: which region to use, how many copies to keep) + +Note that it is OK for a client to omit or ignore these optional attributes; doing so should not impact the basic pinning functionality. + +### Pin status info +Additional `PinStatus.info` can be returned by pinning service. + +Potential uses: +- `PinStatus.info[status_details]`: more info about the current status (queue position, percentage of transferred data, summary of where data is stored, etc); when `PinStatus.status=failed`, it could provide a reason why a pin operation failed (e.g. lack of funds, DAG too big, etc.) +- `PinStatus.info[dag_size]`: the size of pinned data, along with DAG overhead +- `PinStatus.info[raw_size]`: the size of data without DAG overhead (eg. unixfs) +- `PinStatus.info[pinned_until]`: if vendor supports time-bound pins, this could indicate when the pin will expire + +# Pagination and filtering +Pin objects can be listed by executing `GET /pins` with optional parameters: + +- When no filters are provided, the endpoint will return a small batch of the 10 most recently created items, from the latest to the oldest. +- The number of returned items can be adjusted with the `limit` parameter (implicit default is 10). +- If the value in `PinResults.count` is bigger than the length of `PinResults.results`, the client can infer there are more results that can be queried. +- To read more items, pass the `before` filter with the timestamp from `PinStatus.created` found in the oldest item in the current batch of results. Repeat to read all results. +- Returned results can be fine-tuned by applying optional `after`, `cid`, `name`, `status`, or `meta` filters. + +> **Note**: pagination by the `created` timestamp requires each value to be globally unique. Any future considerations to add support for bulk creation must account for this. + + + +## Overview +This API client was generated by the [OpenAPI Generator](https://openapi-generator.tech) project. By using the [OpenAPI-spec](https://www.openapis.org/) from a remote server, you can easily generate an API client. + +- API version: 0.1.1 +- Package version: 1.0.0 +- Build package: org.openapitools.codegen.languages.GoClientExperimentalCodegen + +## Installation + +Install the following dependencies: + +```shell +go get github.com/stretchr/testify/assert +go get golang.org/x/oauth2 +go get golang.org/x/net/context +``` + +Put the package under your project folder and add the following in import: + +```golang +import sw "./openapi" +``` + +## Configuration of Server URL + +Default configuration comes with `Servers` field that contains server objects as defined in the OpenAPI specification. + +### Select Server Configuration + +For using other server than the one defined on index 0 set context value `sw.ContextServerIndex` of type `int`. + +```golang +ctx := context.WithValue(context.Background(), sw.ContextServerIndex, 1) +``` + +### Templated Server URL + +Templated server URL is formatted using default variables from configuration or from context value `sw.ContextServerVariables` of type `map[string]string`. + +```golang +ctx := context.WithValue(context.Background(), sw.ContextServerVariables, map[string]string{ + "basePath": "v2", +}) +``` + +Note, enum values are always validated and all unused variables are silently ignored. + +### URLs Configuration per Operation + +Each operation can use different server URL defined using `OperationServers` map in the `Configuration`. +An operation is uniquely identifield by `"{classname}Service.{nickname}"` string. +Similar rules for overriding default operation server index and variables applies by using `sw.ContextOperationServerIndices` and `sw.ContextOperationServerVariables` context maps. + +``` +ctx := context.WithValue(context.Background(), sw.ContextOperationServerIndices, map[string]int{ + "{classname}Service.{nickname}": 2, +}) +ctx = context.WithValue(context.Background(), sw.ContextOperationServerVariables, map[string]map[string]string{ + "{classname}Service.{nickname}": { + "port": "8443", + }, +}) +``` + +## Documentation for API Endpoints + +All URIs are relative to *https://pinning-service.example.com* + +Class | Method | HTTP request | Description +------------ | ------------- | ------------- | ------------- +*PinsApi* | [**PinsGet**](docs/PinsApi.md#pinsget) | **Get** /pins | List pin objects +*PinsApi* | [**PinsPost**](docs/PinsApi.md#pinspost) | **Post** /pins | Add pin object +*PinsApi* | [**PinsRequestidDelete**](docs/PinsApi.md#pinsrequestiddelete) | **Delete** /pins/{requestid} | Remove pin object +*PinsApi* | [**PinsRequestidGet**](docs/PinsApi.md#pinsrequestidget) | **Get** /pins/{requestid} | Get pin object +*PinsApi* | [**PinsRequestidPost**](docs/PinsApi.md#pinsrequestidpost) | **Post** /pins/{requestid} | Replace pin object + + +## Documentation For Models + + - [Failure](docs/Failure.md) + - [FailureError](docs/FailureError.md) + - [Pin](docs/Pin.md) + - [PinResults](docs/PinResults.md) + - [PinStatus](docs/PinStatus.md) + - [Status](docs/Status.md) + + +## Documentation For Authorization + + + +### accessToken + + +## Documentation for Utility Methods + +Due to the fact that model structure members are all pointers, this package contains +a number of utility functions to easily obtain pointers to values of basic types. +Each of these functions takes a value of the given basic type and returns a pointer to it: + +* `PtrBool` +* `PtrInt` +* `PtrInt32` +* `PtrInt64` +* `PtrFloat` +* `PtrFloat32` +* `PtrFloat64` +* `PtrString` +* `PtrTime` + +## Author + + + diff --git a/pinning/remote/client/openapi/api_pins.go b/pinning/remote/client/openapi/api_pins.go new file mode 100644 index 0000000000..b2858c5eea --- /dev/null +++ b/pinning/remote/client/openapi/api_pins.go @@ -0,0 +1,654 @@ +/* + * IPFS Pinning Service API + * + * ## About this spec The IPFS Pinning Service API is intended to be an implementation-agnostic API: - For use and implementation by pinning service providers - For use in client mode by IPFS nodes and GUI-based applications > **Note**: while ready for implementation, this spec is still a work in progress! 🏗️ **Your input and feedback are welcome and valuable as we develop this API spec. Please join the design discussion at [github.com/ipfs/pinning-services-api-spec](https://github.com/ipfs/pinning-services-api-spec).** # Schemas This section describes the most important object types and conventions. A full list of fields and schemas can be found in the `schemas` section of the [YAML file](https://github.com/ipfs/pinning-services-api-spec/blob/master/ipfs-pinning-service.yaml). ## Identifiers ### cid [Content Identifier (CID)](https://docs.ipfs.io/concepts/content-addressing/) points at the root of a DAG that is pinned recursively. ### requestid Unique identifier of a pin request. When a pin is created, the service responds with unique `requestid` that can be later used for pin removal. When the same `cid` is pinned again, a different `requestid` is returned to differentiate between those pin requests. Service implementation should use UUID, `hash(accessToken,Pin,PinStatus.created)`, or any other opaque identifier that provides equally strong protection against race conditions. ## Objects ### Pin object ![pin object](https://bafybeideck2fchyxna4wqwc2mo67yriokehw3yujboc5redjdaajrk2fjq.ipfs.dweb.link/pin.png) The `Pin` object is a representation of a pin request. It includes the `cid` of data to be pinned, as well as optional metadata in `name`, `origins`, and `meta`. ### Pin status response ![pin status response object](https://bafybeideck2fchyxna4wqwc2mo67yriokehw3yujboc5redjdaajrk2fjq.ipfs.dweb.link/pinstatus.png) The `PinStatus` object is a representation of the current state of a pinning operation. It includes the original `pin` object, along with the current `status` and globally unique `requestid` of the entire pinning request, which can be used for future status checks and management. Addresses in the `delegates` array are peers delegated by the pinning service for facilitating direct file transfers (more details in the provider hints section). Any additional vendor-specific information is returned in optional `info`. ## The pin lifecycle ![pinning service objects and lifecycle](https://bafybeideck2fchyxna4wqwc2mo67yriokehw3yujboc5redjdaajrk2fjq.ipfs.dweb.link/lifecycle.png) ### Creating a new pin object The user sends a `Pin` object to `POST /pins` and receives a `PinStatus` response: - `requestid` in `PinStatus` is the identifier of the pin operation, which can can be used for checking status, and removing the pin in the future - `status` in `PinStatus` indicates the current state of a pin ### Checking status of in-progress pinning `status` (in `PinStatus`) may indicate a pending state (`queued` or `pinning`). This means the data behind `Pin.cid` was not found on the pinning service and is being fetched from the IPFS network at large, which may take time. In this case, the user can periodically check pinning progress via `GET /pins/{requestid}` until pinning is successful, or the user decides to remove the pending pin. ### Replacing an existing pin object The user can replace an existing pin object via `POST /pins/{requestid}`. This is a shortcut for removing a pin object identified by `requestid` and creating a new one in a single API call that protects against undesired garbage collection of blocks common to both pins. Useful when updating a pin representing a huge dataset where most of blocks did not change. The new pin object `requestid` is returned in the `PinStatus` response. The old pin object is deleted automatically. ### Removing a pin object A pin object can be removed via `DELETE /pins/{requestid}`. ## Provider hints Pinning of new data can be accelerated by providing a list of known data sources in `Pin.origins`, and connecting at least one of them to pinning service nodes at `PinStatus.delegates`. The most common scenario is a client putting its own IPFS node's multiaddrs in `Pin.origins`, and then directly connecting to every multiaddr returned by a pinning service in `PinStatus.delegates` to initiate transfer. This ensures data transfer starts immediately (without waiting for provider discovery over DHT), and direct dial from a client works around peer routing issues in restrictive network topologies such as NATs. ## Custom metadata Pinning services are encouraged to add support for additional features by leveraging the optional `Pin.meta` and `PinStatus.info` fields. While these attributes can be application- or vendor-specific, we encourage the community at large to leverage these attributes as a sandbox to come up with conventions that could become part of future revisions of this API. ### Pin metadata String keys and values passed in `Pin.meta` are persisted with the pin object. Potential uses: - `Pin.meta[app_id]`: Attaching a unique identifier to pins created by an app enables filtering pins per app via `?meta={\"app_id\":}` - `Pin.meta[vendor_policy]`: Vendor-specific policy (for example: which region to use, how many copies to keep) Note that it is OK for a client to omit or ignore these optional attributes; doing so should not impact the basic pinning functionality. ### Pin status info Additional `PinStatus.info` can be returned by pinning service. Potential uses: - `PinStatus.info[status_details]`: more info about the current status (queue position, percentage of transferred data, summary of where data is stored, etc); when `PinStatus.status=failed`, it could provide a reason why a pin operation failed (e.g. lack of funds, DAG too big, etc.) - `PinStatus.info[dag_size]`: the size of pinned data, along with DAG overhead - `PinStatus.info[raw_size]`: the size of data without DAG overhead (eg. unixfs) - `PinStatus.info[pinned_until]`: if vendor supports time-bound pins, this could indicate when the pin will expire # Pagination and filtering Pin objects can be listed by executing `GET /pins` with optional parameters: - When no filters are provided, the endpoint will return a small batch of the 10 most recently created items, from the latest to the oldest. - The number of returned items can be adjusted with the `limit` parameter (implicit default is 10). - If the value in `PinResults.count` is bigger than the length of `PinResults.results`, the client can infer there are more results that can be queried. - To read more items, pass the `before` filter with the timestamp from `PinStatus.created` found in the oldest item in the current batch of results. Repeat to read all results. - Returned results can be fine-tuned by applying optional `after`, `cid`, `name`, `status`, or `meta` filters. > **Note**: pagination by the `created` timestamp requires each value to be globally unique. Any future considerations to add support for bulk creation must account for this. + * + * API version: 0.1.1 + * Generated by: OpenAPI Generator (https://openapi-generator.tech) + */ + +package openapi + +import ( + _context "context" + _io "io" + _nethttp "net/http" + _neturl "net/url" + "strings" + "time" +) + +// Linger please +var ( + _ _context.Context +) + +// PinsApiService PinsApi service +type PinsApiService service + +type apiPinsGetRequest struct { + ctx _context.Context + apiService *PinsApiService + cid *[]string + name *string + status *[]Status + before *time.Time + after *time.Time + limit *int32 + meta *map[string]string +} + +func (r apiPinsGetRequest) Cid(cid []string) apiPinsGetRequest { + r.cid = &cid + return r +} + +func (r apiPinsGetRequest) Name(name string) apiPinsGetRequest { + r.name = &name + return r +} + +func (r apiPinsGetRequest) Status(status []Status) apiPinsGetRequest { + r.status = &status + return r +} + +func (r apiPinsGetRequest) Before(before time.Time) apiPinsGetRequest { + r.before = &before + return r +} + +func (r apiPinsGetRequest) After(after time.Time) apiPinsGetRequest { + r.after = &after + return r +} + +func (r apiPinsGetRequest) Limit(limit int32) apiPinsGetRequest { + r.limit = &limit + return r +} + +func (r apiPinsGetRequest) Meta(meta map[string]string) apiPinsGetRequest { + r.meta = &meta + return r +} + +/* +PinsGet List pin objects +List all the pin objects, matching optional filters; when no filter is provided, only successful pins are returned + - @param ctx _context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background(). + +@return apiPinsGetRequest +*/ +func (a *PinsApiService) PinsGet(ctx _context.Context) apiPinsGetRequest { + return apiPinsGetRequest{ + apiService: a, + ctx: ctx, + } +} + +/* +Execute executes the request + + @return PinResults +*/ +func (r apiPinsGetRequest) Execute() (PinResults, *_nethttp.Response, error) { + var ( + localVarHTTPMethod = _nethttp.MethodGet + localVarPostBody interface{} + localVarFormFileName string + localVarFileName string + localVarFileBytes []byte + localVarReturnValue PinResults + ) + + localBasePath, err := r.apiService.client.cfg.ServerURLWithContext(r.ctx, "PinsApiService.PinsGet") + if err != nil { + return localVarReturnValue, nil, GenericOpenAPIError{error: err.Error()} + } + + localVarPath := localBasePath + "/pins" + + localVarHeaderParams := make(map[string]string) + localVarQueryParams := _neturl.Values{} + localVarFormParams := _neturl.Values{} + + if r.cid != nil { + localVarQueryParams.Add("cid", parameterToString(*r.cid, "csv")) + } + if r.name != nil { + localVarQueryParams.Add("name", parameterToString(*r.name, "")) + } + if r.status != nil { + localVarQueryParams.Add("status", parameterToString(*r.status, "csv")) + } + if r.before != nil { + localVarQueryParams.Add("before", parameterToString(*r.before, "")) + } + if r.after != nil { + localVarQueryParams.Add("after", parameterToString(*r.after, "")) + } + if r.limit != nil { + localVarQueryParams.Add("limit", parameterToString(*r.limit, "")) + } + if r.meta != nil { + localVarQueryParams.Add("meta", parameterToString(*r.meta, "")) + } + // to determine the Content-Type header + localVarHTTPContentTypes := []string{} + + // set Content-Type header + localVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes) + if localVarHTTPContentType != "" { + localVarHeaderParams["Content-Type"] = localVarHTTPContentType + } + + // to determine the Accept header + localVarHTTPHeaderAccepts := []string{"application/json"} + + // set Accept header + localVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts) + if localVarHTTPHeaderAccept != "" { + localVarHeaderParams["Accept"] = localVarHTTPHeaderAccept + } + req, err := r.apiService.client.prepareRequest(r.ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes) + if err != nil { + return localVarReturnValue, nil, err + } + + localVarHTTPResponse, err := r.apiService.client.callAPI(req) + if err != nil || localVarHTTPResponse == nil { + return localVarReturnValue, localVarHTTPResponse, err + } + + localVarBody, err := _io.ReadAll(localVarHTTPResponse.Body) + localVarHTTPResponse.Body.Close() + if err != nil { + return localVarReturnValue, localVarHTTPResponse, err + } + + if localVarHTTPResponse.StatusCode >= 300 { + newErr := GenericOpenAPIError{ + body: localVarBody, + error: localVarHTTPResponse.Status, + } + if localVarHTTPResponse.StatusCode >= 400 && localVarHTTPResponse.StatusCode <= 600 { + var v Failure + err = r.apiService.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get("Content-Type")) + if err != nil { + newErr.error = err.Error() + return localVarReturnValue, localVarHTTPResponse, newErr + } + newErr.model = v + return localVarReturnValue, localVarHTTPResponse, newErr + } + return localVarReturnValue, localVarHTTPResponse, newErr + } + + err = r.apiService.client.decode(&localVarReturnValue, localVarBody, localVarHTTPResponse.Header.Get("Content-Type")) + if err != nil { + newErr := GenericOpenAPIError{ + body: localVarBody, + error: err.Error(), + } + return localVarReturnValue, localVarHTTPResponse, newErr + } + + return localVarReturnValue, localVarHTTPResponse, nil +} + +type apiPinsPostRequest struct { + ctx _context.Context + apiService *PinsApiService + pin *Pin +} + +func (r apiPinsPostRequest) Pin(pin Pin) apiPinsPostRequest { + r.pin = &pin + return r +} + +/* +PinsPost Add pin object +Add a new pin object for the current access token + - @param ctx _context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background(). + +@return apiPinsPostRequest +*/ +func (a *PinsApiService) PinsPost(ctx _context.Context) apiPinsPostRequest { + return apiPinsPostRequest{ + apiService: a, + ctx: ctx, + } +} + +/* +Execute executes the request + + @return PinStatus +*/ +func (r apiPinsPostRequest) Execute() (PinStatus, *_nethttp.Response, error) { + var ( + localVarHTTPMethod = _nethttp.MethodPost + localVarPostBody interface{} + localVarFormFileName string + localVarFileName string + localVarFileBytes []byte + localVarReturnValue PinStatus + ) + + localBasePath, err := r.apiService.client.cfg.ServerURLWithContext(r.ctx, "PinsApiService.PinsPost") + if err != nil { + return localVarReturnValue, nil, GenericOpenAPIError{error: err.Error()} + } + + localVarPath := localBasePath + "/pins" + + localVarHeaderParams := make(map[string]string) + localVarQueryParams := _neturl.Values{} + localVarFormParams := _neturl.Values{} + + if r.pin == nil { + return localVarReturnValue, nil, reportError("pin is required and must be specified") + } + + // to determine the Content-Type header + localVarHTTPContentTypes := []string{"application/json"} + + // set Content-Type header + localVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes) + if localVarHTTPContentType != "" { + localVarHeaderParams["Content-Type"] = localVarHTTPContentType + } + + // to determine the Accept header + localVarHTTPHeaderAccepts := []string{"application/json"} + + // set Accept header + localVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts) + if localVarHTTPHeaderAccept != "" { + localVarHeaderParams["Accept"] = localVarHTTPHeaderAccept + } + // body params + localVarPostBody = r.pin + req, err := r.apiService.client.prepareRequest(r.ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes) + if err != nil { + return localVarReturnValue, nil, err + } + + localVarHTTPResponse, err := r.apiService.client.callAPI(req) + if err != nil || localVarHTTPResponse == nil { + return localVarReturnValue, localVarHTTPResponse, err + } + + localVarBody, err := _io.ReadAll(localVarHTTPResponse.Body) + localVarHTTPResponse.Body.Close() + if err != nil { + return localVarReturnValue, localVarHTTPResponse, err + } + + if localVarHTTPResponse.StatusCode >= 300 { + newErr := GenericOpenAPIError{ + body: localVarBody, + error: localVarHTTPResponse.Status, + } + if localVarHTTPResponse.StatusCode >= 400 && localVarHTTPResponse.StatusCode <= 600 { + var v Failure + err = r.apiService.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get("Content-Type")) + if err != nil { + newErr.error = err.Error() + return localVarReturnValue, localVarHTTPResponse, newErr + } + newErr.model = v + return localVarReturnValue, localVarHTTPResponse, newErr + } + return localVarReturnValue, localVarHTTPResponse, newErr + } + + err = r.apiService.client.decode(&localVarReturnValue, localVarBody, localVarHTTPResponse.Header.Get("Content-Type")) + if err != nil { + newErr := GenericOpenAPIError{ + body: localVarBody, + error: err.Error(), + } + return localVarReturnValue, localVarHTTPResponse, newErr + } + + return localVarReturnValue, localVarHTTPResponse, nil +} + +type apiPinsRequestidDeleteRequest struct { + ctx _context.Context + apiService *PinsApiService + requestid string +} + +/* +PinsRequestidDelete Remove pin object +Remove a pin object + - @param ctx _context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background(). + - @param requestid + +@return apiPinsRequestidDeleteRequest +*/ +func (a *PinsApiService) PinsRequestidDelete(ctx _context.Context, requestid string) apiPinsRequestidDeleteRequest { + return apiPinsRequestidDeleteRequest{ + apiService: a, + ctx: ctx, + requestid: requestid, + } +} + +/* +Execute executes the request +*/ +func (r apiPinsRequestidDeleteRequest) Execute() (*_nethttp.Response, error) { + var ( + localVarHTTPMethod = _nethttp.MethodDelete + localVarPostBody interface{} + localVarFormFileName string + localVarFileName string + localVarFileBytes []byte + ) + + localBasePath, err := r.apiService.client.cfg.ServerURLWithContext(r.ctx, "PinsApiService.PinsRequestidDelete") + if err != nil { + return nil, GenericOpenAPIError{error: err.Error()} + } + + localVarPath := localBasePath + "/pins/{requestid}" + localVarPath = strings.Replace(localVarPath, "{"+"requestid"+"}", _neturl.PathEscape(parameterToString(r.requestid, "")), -1) + + localVarHeaderParams := make(map[string]string) + localVarQueryParams := _neturl.Values{} + localVarFormParams := _neturl.Values{} + + // to determine the Content-Type header + localVarHTTPContentTypes := []string{} + + // set Content-Type header + localVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes) + if localVarHTTPContentType != "" { + localVarHeaderParams["Content-Type"] = localVarHTTPContentType + } + + // to determine the Accept header + localVarHTTPHeaderAccepts := []string{"application/json"} + + // set Accept header + localVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts) + if localVarHTTPHeaderAccept != "" { + localVarHeaderParams["Accept"] = localVarHTTPHeaderAccept + } + req, err := r.apiService.client.prepareRequest(r.ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes) + if err != nil { + return nil, err + } + + localVarHTTPResponse, err := r.apiService.client.callAPI(req) + if err != nil || localVarHTTPResponse == nil { + return localVarHTTPResponse, err + } + + localVarBody, err := _io.ReadAll(localVarHTTPResponse.Body) + localVarHTTPResponse.Body.Close() + if err != nil { + return localVarHTTPResponse, err + } + + if localVarHTTPResponse.StatusCode >= 300 { + newErr := GenericOpenAPIError{ + body: localVarBody, + error: localVarHTTPResponse.Status, + } + if localVarHTTPResponse.StatusCode >= 400 && localVarHTTPResponse.StatusCode <= 600 { + var v Failure + err = r.apiService.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get("Content-Type")) + if err != nil { + newErr.error = err.Error() + return localVarHTTPResponse, newErr + } + newErr.model = v + return localVarHTTPResponse, newErr + } + return localVarHTTPResponse, newErr + } + + return localVarHTTPResponse, nil +} + +type apiPinsRequestidGetRequest struct { + ctx _context.Context + apiService *PinsApiService + requestid string +} + +/* +PinsRequestidGet Get pin object +Get a pin object and its status + - @param ctx _context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background(). + - @param requestid + +@return apiPinsRequestidGetRequest +*/ +func (a *PinsApiService) PinsRequestidGet(ctx _context.Context, requestid string) apiPinsRequestidGetRequest { + return apiPinsRequestidGetRequest{ + apiService: a, + ctx: ctx, + requestid: requestid, + } +} + +/* +Execute executes the request + + @return PinStatus +*/ +func (r apiPinsRequestidGetRequest) Execute() (PinStatus, *_nethttp.Response, error) { + var ( + localVarHTTPMethod = _nethttp.MethodGet + localVarPostBody interface{} + localVarFormFileName string + localVarFileName string + localVarFileBytes []byte + localVarReturnValue PinStatus + ) + + localBasePath, err := r.apiService.client.cfg.ServerURLWithContext(r.ctx, "PinsApiService.PinsRequestidGet") + if err != nil { + return localVarReturnValue, nil, GenericOpenAPIError{error: err.Error()} + } + + localVarPath := localBasePath + "/pins/{requestid}" + localVarPath = strings.Replace(localVarPath, "{"+"requestid"+"}", _neturl.PathEscape(parameterToString(r.requestid, "")), -1) + + localVarHeaderParams := make(map[string]string) + localVarQueryParams := _neturl.Values{} + localVarFormParams := _neturl.Values{} + + // to determine the Content-Type header + localVarHTTPContentTypes := []string{} + + // set Content-Type header + localVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes) + if localVarHTTPContentType != "" { + localVarHeaderParams["Content-Type"] = localVarHTTPContentType + } + + // to determine the Accept header + localVarHTTPHeaderAccepts := []string{"application/json"} + + // set Accept header + localVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts) + if localVarHTTPHeaderAccept != "" { + localVarHeaderParams["Accept"] = localVarHTTPHeaderAccept + } + req, err := r.apiService.client.prepareRequest(r.ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes) + if err != nil { + return localVarReturnValue, nil, err + } + + localVarHTTPResponse, err := r.apiService.client.callAPI(req) + if err != nil || localVarHTTPResponse == nil { + return localVarReturnValue, localVarHTTPResponse, err + } + + localVarBody, err := _io.ReadAll(localVarHTTPResponse.Body) + localVarHTTPResponse.Body.Close() + if err != nil { + return localVarReturnValue, localVarHTTPResponse, err + } + + if localVarHTTPResponse.StatusCode >= 300 { + newErr := GenericOpenAPIError{ + body: localVarBody, + error: localVarHTTPResponse.Status, + } + if localVarHTTPResponse.StatusCode >= 400 && localVarHTTPResponse.StatusCode <= 600 { + var v Failure + err = r.apiService.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get("Content-Type")) + if err != nil { + newErr.error = err.Error() + return localVarReturnValue, localVarHTTPResponse, newErr + } + newErr.model = v + return localVarReturnValue, localVarHTTPResponse, newErr + } + return localVarReturnValue, localVarHTTPResponse, newErr + } + + err = r.apiService.client.decode(&localVarReturnValue, localVarBody, localVarHTTPResponse.Header.Get("Content-Type")) + if err != nil { + newErr := GenericOpenAPIError{ + body: localVarBody, + error: err.Error(), + } + return localVarReturnValue, localVarHTTPResponse, newErr + } + + return localVarReturnValue, localVarHTTPResponse, nil +} + +type apiPinsRequestidPostRequest struct { + ctx _context.Context + apiService *PinsApiService + requestid string + pin *Pin +} + +func (r apiPinsRequestidPostRequest) Pin(pin Pin) apiPinsRequestidPostRequest { + r.pin = &pin + return r +} + +/* +PinsRequestidPost Replace pin object +Replace an existing pin object (shortcut for executing remove and add operations in one step to avoid unnecessary garbage collection of blocks present in both recursive pins) + - @param ctx _context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background(). + - @param requestid + +@return apiPinsRequestidPostRequest +*/ +func (a *PinsApiService) PinsRequestidPost(ctx _context.Context, requestid string) apiPinsRequestidPostRequest { + return apiPinsRequestidPostRequest{ + apiService: a, + ctx: ctx, + requestid: requestid, + } +} + +/* +Execute executes the request + + @return PinStatus +*/ +func (r apiPinsRequestidPostRequest) Execute() (PinStatus, *_nethttp.Response, error) { + var ( + localVarHTTPMethod = _nethttp.MethodPost + localVarPostBody interface{} + localVarFormFileName string + localVarFileName string + localVarFileBytes []byte + localVarReturnValue PinStatus + ) + + localBasePath, err := r.apiService.client.cfg.ServerURLWithContext(r.ctx, "PinsApiService.PinsRequestidPost") + if err != nil { + return localVarReturnValue, nil, GenericOpenAPIError{error: err.Error()} + } + + localVarPath := localBasePath + "/pins/{requestid}" + localVarPath = strings.Replace(localVarPath, "{"+"requestid"+"}", _neturl.PathEscape(parameterToString(r.requestid, "")), -1) + + localVarHeaderParams := make(map[string]string) + localVarQueryParams := _neturl.Values{} + localVarFormParams := _neturl.Values{} + + if r.pin == nil { + return localVarReturnValue, nil, reportError("pin is required and must be specified") + } + + // to determine the Content-Type header + localVarHTTPContentTypes := []string{"application/json"} + + // set Content-Type header + localVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes) + if localVarHTTPContentType != "" { + localVarHeaderParams["Content-Type"] = localVarHTTPContentType + } + + // to determine the Accept header + localVarHTTPHeaderAccepts := []string{"application/json"} + + // set Accept header + localVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts) + if localVarHTTPHeaderAccept != "" { + localVarHeaderParams["Accept"] = localVarHTTPHeaderAccept + } + // body params + localVarPostBody = r.pin + req, err := r.apiService.client.prepareRequest(r.ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes) + if err != nil { + return localVarReturnValue, nil, err + } + + localVarHTTPResponse, err := r.apiService.client.callAPI(req) + if err != nil || localVarHTTPResponse == nil { + return localVarReturnValue, localVarHTTPResponse, err + } + + localVarBody, err := _io.ReadAll(localVarHTTPResponse.Body) + localVarHTTPResponse.Body.Close() + if err != nil { + return localVarReturnValue, localVarHTTPResponse, err + } + + if localVarHTTPResponse.StatusCode >= 300 { + newErr := GenericOpenAPIError{ + body: localVarBody, + error: localVarHTTPResponse.Status, + } + if localVarHTTPResponse.StatusCode >= 400 && localVarHTTPResponse.StatusCode <= 600 { + var v Failure + err = r.apiService.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get("Content-Type")) + if err != nil { + newErr.error = err.Error() + return localVarReturnValue, localVarHTTPResponse, newErr + } + newErr.model = v + return localVarReturnValue, localVarHTTPResponse, newErr + } + return localVarReturnValue, localVarHTTPResponse, newErr + } + + err = r.apiService.client.decode(&localVarReturnValue, localVarBody, localVarHTTPResponse.Header.Get("Content-Type")) + if err != nil { + newErr := GenericOpenAPIError{ + body: localVarBody, + error: err.Error(), + } + return localVarReturnValue, localVarHTTPResponse, newErr + } + + return localVarReturnValue, localVarHTTPResponse, nil +} diff --git a/pinning/remote/client/openapi/client.go b/pinning/remote/client/openapi/client.go new file mode 100644 index 0000000000..b3cea998b5 --- /dev/null +++ b/pinning/remote/client/openapi/client.go @@ -0,0 +1,498 @@ +/* + * IPFS Pinning Service API + * + * ## About this spec The IPFS Pinning Service API is intended to be an implementation-agnostic API: - For use and implementation by pinning service providers - For use in client mode by IPFS nodes and GUI-based applications > **Note**: while ready for implementation, this spec is still a work in progress! 🏗️ **Your input and feedback are welcome and valuable as we develop this API spec. Please join the design discussion at [github.com/ipfs/pinning-services-api-spec](https://github.com/ipfs/pinning-services-api-spec).** # Schemas This section describes the most important object types and conventions. A full list of fields and schemas can be found in the `schemas` section of the [YAML file](https://github.com/ipfs/pinning-services-api-spec/blob/master/ipfs-pinning-service.yaml). ## Identifiers ### cid [Content Identifier (CID)](https://docs.ipfs.io/concepts/content-addressing/) points at the root of a DAG that is pinned recursively. ### requestid Unique identifier of a pin request. When a pin is created, the service responds with unique `requestid` that can be later used for pin removal. When the same `cid` is pinned again, a different `requestid` is returned to differentiate between those pin requests. Service implementation should use UUID, `hash(accessToken,Pin,PinStatus.created)`, or any other opaque identifier that provides equally strong protection against race conditions. ## Objects ### Pin object ![pin object](https://bafybeideck2fchyxna4wqwc2mo67yriokehw3yujboc5redjdaajrk2fjq.ipfs.dweb.link/pin.png) The `Pin` object is a representation of a pin request. It includes the `cid` of data to be pinned, as well as optional metadata in `name`, `origins`, and `meta`. ### Pin status response ![pin status response object](https://bafybeideck2fchyxna4wqwc2mo67yriokehw3yujboc5redjdaajrk2fjq.ipfs.dweb.link/pinstatus.png) The `PinStatus` object is a representation of the current state of a pinning operation. It includes the original `pin` object, along with the current `status` and globally unique `requestid` of the entire pinning request, which can be used for future status checks and management. Addresses in the `delegates` array are peers delegated by the pinning service for facilitating direct file transfers (more details in the provider hints section). Any additional vendor-specific information is returned in optional `info`. ## The pin lifecycle ![pinning service objects and lifecycle](https://bafybeideck2fchyxna4wqwc2mo67yriokehw3yujboc5redjdaajrk2fjq.ipfs.dweb.link/lifecycle.png) ### Creating a new pin object The user sends a `Pin` object to `POST /pins` and receives a `PinStatus` response: - `requestid` in `PinStatus` is the identifier of the pin operation, which can can be used for checking status, and removing the pin in the future - `status` in `PinStatus` indicates the current state of a pin ### Checking status of in-progress pinning `status` (in `PinStatus`) may indicate a pending state (`queued` or `pinning`). This means the data behind `Pin.cid` was not found on the pinning service and is being fetched from the IPFS network at large, which may take time. In this case, the user can periodically check pinning progress via `GET /pins/{requestid}` until pinning is successful, or the user decides to remove the pending pin. ### Replacing an existing pin object The user can replace an existing pin object via `POST /pins/{requestid}`. This is a shortcut for removing a pin object identified by `requestid` and creating a new one in a single API call that protects against undesired garbage collection of blocks common to both pins. Useful when updating a pin representing a huge dataset where most of blocks did not change. The new pin object `requestid` is returned in the `PinStatus` response. The old pin object is deleted automatically. ### Removing a pin object A pin object can be removed via `DELETE /pins/{requestid}`. ## Provider hints Pinning of new data can be accelerated by providing a list of known data sources in `Pin.origins`, and connecting at least one of them to pinning service nodes at `PinStatus.delegates`. The most common scenario is a client putting its own IPFS node's multiaddrs in `Pin.origins`, and then directly connecting to every multiaddr returned by a pinning service in `PinStatus.delegates` to initiate transfer. This ensures data transfer starts immediately (without waiting for provider discovery over DHT), and direct dial from a client works around peer routing issues in restrictive network topologies such as NATs. ## Custom metadata Pinning services are encouraged to add support for additional features by leveraging the optional `Pin.meta` and `PinStatus.info` fields. While these attributes can be application- or vendor-specific, we encourage the community at large to leverage these attributes as a sandbox to come up with conventions that could become part of future revisions of this API. ### Pin metadata String keys and values passed in `Pin.meta` are persisted with the pin object. Potential uses: - `Pin.meta[app_id]`: Attaching a unique identifier to pins created by an app enables filtering pins per app via `?meta={\"app_id\":}` - `Pin.meta[vendor_policy]`: Vendor-specific policy (for example: which region to use, how many copies to keep) Note that it is OK for a client to omit or ignore these optional attributes; doing so should not impact the basic pinning functionality. ### Pin status info Additional `PinStatus.info` can be returned by pinning service. Potential uses: - `PinStatus.info[status_details]`: more info about the current status (queue position, percentage of transferred data, summary of where data is stored, etc); when `PinStatus.status=failed`, it could provide a reason why a pin operation failed (e.g. lack of funds, DAG too big, etc.) - `PinStatus.info[dag_size]`: the size of pinned data, along with DAG overhead - `PinStatus.info[raw_size]`: the size of data without DAG overhead (eg. unixfs) - `PinStatus.info[pinned_until]`: if vendor supports time-bound pins, this could indicate when the pin will expire # Pagination and filtering Pin objects can be listed by executing `GET /pins` with optional parameters: - When no filters are provided, the endpoint will return a small batch of the 10 most recently created items, from the latest to the oldest. - The number of returned items can be adjusted with the `limit` parameter (implicit default is 10). - If the value in `PinResults.count` is bigger than the length of `PinResults.results`, the client can infer there are more results that can be queried. - To read more items, pass the `before` filter with the timestamp from `PinStatus.created` found in the oldest item in the current batch of results. Repeat to read all results. - Returned results can be fine-tuned by applying optional `after`, `cid`, `name`, `status`, or `meta` filters. > **Note**: pagination by the `created` timestamp requires each value to be globally unique. Any future considerations to add support for bulk creation must account for this. + * + * API version: 0.1.1 + * Generated by: OpenAPI Generator (https://openapi-generator.tech) + */ + +package openapi + +import ( + "bytes" + "context" + "encoding/json" + "encoding/xml" + "errors" + "fmt" + "io" + "log" + "mime/multipart" + "net/http" + "net/http/httputil" + "net/url" + "os" + "path/filepath" + "reflect" + "regexp" + "strings" + "time" + + "golang.org/x/oauth2" +) + +var ( + jsonCheck = regexp.MustCompile(`(?i:(?:application|text)/(?:vnd\.[^;]+\+)?json)`) + xmlCheck = regexp.MustCompile(`(?i:(?:application|text)/xml)`) +) + +// APIClient manages communication with the IPFS Pinning Service API API v0.1.1 +// In most cases there should be only one, shared, APIClient. +type APIClient struct { + cfg *Configuration + common service // Reuse a single struct instead of allocating one for each service on the heap. + + // API Services + + PinsApi *PinsApiService +} + +type service struct { + client *APIClient +} + +// NewAPIClient creates a new API client. Requires a userAgent string describing your application. +// optionally a custom http.Client to allow for advanced features such as caching. +func NewAPIClient(cfg *Configuration) *APIClient { + if cfg.HTTPClient == nil { + cfg.HTTPClient = http.DefaultClient + } + + c := &APIClient{} + c.cfg = cfg + c.common.client = c + + // API Services + c.PinsApi = (*PinsApiService)(&c.common) + + return c +} + +// selectHeaderContentType select a content type from the available list. +func selectHeaderContentType(contentTypes []string) string { + if len(contentTypes) == 0 { + return "" + } + if contains(contentTypes, "application/json") { + return "application/json" + } + return contentTypes[0] // use the first content type specified in 'consumes' +} + +// selectHeaderAccept join all accept types and return +func selectHeaderAccept(accepts []string) string { + if len(accepts) == 0 { + return "" + } + + if contains(accepts, "application/json") { + return "application/json" + } + + return strings.Join(accepts, ",") +} + +// contains is a case insenstive match, finding needle in a haystack +func contains(haystack []string, needle string) bool { + for _, a := range haystack { + if strings.EqualFold(a, needle) { + return true + } + } + return false +} + +// parameterToString convert interface{} parameters to string, using a delimiter if format is provided. +func parameterToString(obj interface{}, collectionFormat string) string { + var delimiter string + + switch collectionFormat { + case "pipes": + delimiter = "|" + case "ssv": + delimiter = " " + case "tsv": + delimiter = "\t" + case "csv": + delimiter = "," + } + + if reflect.TypeOf(obj).Kind() == reflect.Slice { + return strings.Trim(strings.Replace(fmt.Sprint(obj), " ", delimiter, -1), "[]") + } else if t, ok := obj.(time.Time); ok { + return t.Format(time.RFC3339Nano) + } + + return fmt.Sprintf("%v", obj) +} + +// callAPI do the request. +func (c *APIClient) callAPI(request *http.Request) (*http.Response, error) { + if c.cfg.Debug { + dump, err := httputil.DumpRequestOut(request, true) + if err != nil { + return nil, err + } + log.Printf("\n%s\n", string(dump)) + } + + resp, err := c.cfg.HTTPClient.Do(request) + if err != nil { + return resp, err + } + + if c.cfg.Debug { + dump, err := httputil.DumpResponse(resp, true) + if err != nil { + return resp, err + } + log.Printf("\n%s\n", string(dump)) + } + return resp, err +} + +// Allow modification of underlying config for alternate implementations and testing +// Caution: modifying the configuration while live can cause data races and potentially unwanted behavior +func (c *APIClient) GetConfig() *Configuration { + return c.cfg +} + +// prepareRequest build the request +func (c *APIClient) prepareRequest( + ctx context.Context, + path string, method string, + postBody interface{}, + headerParams map[string]string, + queryParams url.Values, + formParams url.Values, + formFileName string, + fileName string, + fileBytes []byte) (localVarRequest *http.Request, err error) { + + var body *bytes.Buffer + + // Detect postBody type and post. + if postBody != nil { + contentType := headerParams["Content-Type"] + if contentType == "" { + contentType = detectContentType(postBody) + headerParams["Content-Type"] = contentType + } + + body, err = setBody(postBody, contentType) + if err != nil { + return nil, err + } + } + + // add form parameters and file if available. + if strings.HasPrefix(headerParams["Content-Type"], "multipart/form-data") && len(formParams) > 0 || (len(fileBytes) > 0 && fileName != "") { + if body != nil { + return nil, errors.New("cannot specify postBody and multipart form at the same time") + } + body = &bytes.Buffer{} + w := multipart.NewWriter(body) + + for k, v := range formParams { + for _, iv := range v { + if strings.HasPrefix(k, "@") { // file + err = addFile(w, k[1:], iv) + if err != nil { + return nil, err + } + } else { // form value + w.WriteField(k, iv) + } + } + } + if len(fileBytes) > 0 && fileName != "" { + w.Boundary() + //_, fileNm := filepath.Split(fileName) + part, err := w.CreateFormFile(formFileName, filepath.Base(fileName)) + if err != nil { + return nil, err + } + _, err = part.Write(fileBytes) + if err != nil { + return nil, err + } + } + + // Set the Boundary in the Content-Type + headerParams["Content-Type"] = w.FormDataContentType() + + // Set Content-Length + headerParams["Content-Length"] = fmt.Sprintf("%d", body.Len()) + w.Close() + } + + if strings.HasPrefix(headerParams["Content-Type"], "application/x-www-form-urlencoded") && len(formParams) > 0 { + if body != nil { + return nil, errors.New("cannot specify postBody and x-www-form-urlencoded form at the same time") + } + body = &bytes.Buffer{} + body.WriteString(formParams.Encode()) + // Set Content-Length + headerParams["Content-Length"] = fmt.Sprintf("%d", body.Len()) + } + + // Setup path and query parameters + url, err := url.Parse(path) + if err != nil { + return nil, err + } + + // Override request host, if applicable + if c.cfg.Host != "" { + url.Host = c.cfg.Host + } + + // Override request scheme, if applicable + if c.cfg.Scheme != "" { + url.Scheme = c.cfg.Scheme + } + + // Adding Query Param + query := url.Query() + for k, v := range queryParams { + for _, iv := range v { + query.Add(k, iv) + } + } + + // Encode the parameters. + url.RawQuery = query.Encode() + + // Generate a new request + if body != nil { + localVarRequest, err = http.NewRequest(method, url.String(), body) + } else { + localVarRequest, err = http.NewRequest(method, url.String(), nil) + } + if err != nil { + return nil, err + } + + // add header parameters, if any + if len(headerParams) > 0 { + headers := http.Header{} + for h, v := range headerParams { + headers.Set(h, v) + } + localVarRequest.Header = headers + } + + // Add the user agent to the request. + localVarRequest.Header.Add("User-Agent", c.cfg.UserAgent) + + if ctx != nil { + // add context to the request + localVarRequest = localVarRequest.WithContext(ctx) + + // Walk through any authentication. + + // OAuth2 authentication + if tok, ok := ctx.Value(ContextOAuth2).(oauth2.TokenSource); ok { + // We were able to grab an oauth2 token from the context + var latestToken *oauth2.Token + if latestToken, err = tok.Token(); err != nil { + return nil, err + } + + latestToken.SetAuthHeader(localVarRequest) + } + + // Basic HTTP Authentication + if auth, ok := ctx.Value(ContextBasicAuth).(BasicAuth); ok { + localVarRequest.SetBasicAuth(auth.UserName, auth.Password) + } + + // AccessToken Authentication + if auth, ok := ctx.Value(ContextAccessToken).(string); ok { + localVarRequest.Header.Add("Authorization", "Bearer "+auth) + } + } + + for header, value := range c.cfg.DefaultHeader { + localVarRequest.Header.Add(header, value) + } + return localVarRequest, nil +} + +func (c *APIClient) decode(v interface{}, b []byte, contentType string) (err error) { + if len(b) == 0 { + return nil + } + if s, ok := v.(*string); ok { + *s = string(b) + return nil + } + if xmlCheck.MatchString(contentType) { + if err = xml.Unmarshal(b, v); err != nil { + return err + } + return nil + } + if jsonCheck.MatchString(contentType) { + if actualObj, ok := v.(interface{ GetActualInstance() interface{} }); ok { // oneOf, anyOf schemas + if unmarshalObj, ok := actualObj.(interface{ UnmarshalJSON([]byte) error }); ok { // make sure it has UnmarshalJSON defined + if err = unmarshalObj.UnmarshalJSON(b); err != nil { + return err + } + } else { + return errors.New("unknown type with GetActualInstance but no unmarshalObj.UnmarshalJSON defined") + } + } else if err = json.Unmarshal(b, v); err != nil { // simple model + return err + } + return nil + } + return errors.New("undefined response type") +} + +// Add a file to the multipart request +func addFile(w *multipart.Writer, fieldName, path string) error { + file, err := os.Open(path) + if err != nil { + return err + } + defer file.Close() + + part, err := w.CreateFormFile(fieldName, filepath.Base(path)) + if err != nil { + return err + } + _, err = io.Copy(part, file) + + return err +} + +// Prevent trying to import "fmt" +func reportError(format string, a ...interface{}) error { + return fmt.Errorf(format, a...) +} + +// Set request body from an interface{} +func setBody(body interface{}, contentType string) (bodyBuf *bytes.Buffer, err error) { + if bodyBuf == nil { + bodyBuf = &bytes.Buffer{} + } + + if reader, ok := body.(io.Reader); ok { + _, err = bodyBuf.ReadFrom(reader) + } else if b, ok := body.([]byte); ok { + _, err = bodyBuf.Write(b) + } else if s, ok := body.(string); ok { + _, err = bodyBuf.WriteString(s) + } else if s, ok := body.(*string); ok { + _, err = bodyBuf.WriteString(*s) + } else if jsonCheck.MatchString(contentType) { + err = json.NewEncoder(bodyBuf).Encode(body) + } else if xmlCheck.MatchString(contentType) { + err = xml.NewEncoder(bodyBuf).Encode(body) + } + + if err != nil { + return nil, err + } + + if bodyBuf.Len() == 0 { + err = fmt.Errorf("invalid body type %s", contentType) + return nil, err + } + return bodyBuf, nil +} + +// detectContentType method is used to figure out `Request.Body` content type for request header +func detectContentType(body interface{}) string { + contentType := "text/plain; charset=utf-8" + kind := reflect.TypeOf(body).Kind() + + switch kind { + case reflect.Struct, reflect.Map, reflect.Ptr: + contentType = "application/json; charset=utf-8" + case reflect.String: + contentType = "text/plain; charset=utf-8" + default: + if b, ok := body.([]byte); ok { + contentType = http.DetectContentType(b) + } else if kind == reflect.Slice { + contentType = "application/json; charset=utf-8" + } + } + + return contentType +} + +// Ripped from https://github.com/gregjones/httpcache/blob/master/httpcache.go +type cacheControl map[string]string + +func parseCacheControl(headers http.Header) cacheControl { + cc := cacheControl{} + ccHeader := headers.Get("Cache-Control") + for _, part := range strings.Split(ccHeader, ",") { + part = strings.Trim(part, " ") + if part == "" { + continue + } + if strings.ContainsRune(part, '=') { + keyval := strings.Split(part, "=") + cc[strings.Trim(keyval[0], " ")] = strings.Trim(keyval[1], ",") + } else { + cc[part] = "" + } + } + return cc +} + +// CacheExpires helper function to determine remaining time before repeating a request. +func CacheExpires(r *http.Response) time.Time { + // Figure out when the cache expires. + var expires time.Time + now, err := time.Parse(time.RFC1123, r.Header.Get("date")) + if err != nil { + return time.Now() + } + respCacheControl := parseCacheControl(r.Header) + + if maxAge, ok := respCacheControl["max-age"]; ok { + lifetime, err := time.ParseDuration(maxAge + "s") + if err != nil { + expires = now + } else { + expires = now.Add(lifetime) + } + } else { + expiresHeader := r.Header.Get("Expires") + if expiresHeader != "" { + expires, err = time.Parse(time.RFC1123, expiresHeader) + if err != nil { + expires = now + } + } + } + return expires +} + +// GenericOpenAPIError Provides access to the body, error and model on returned errors. +type GenericOpenAPIError struct { + body []byte + error string + model interface{} +} + +// Error returns non-empty string if there was an error. +func (e GenericOpenAPIError) Error() string { + return e.error +} + +// Body returns the raw bytes of the response +func (e GenericOpenAPIError) Body() []byte { + return e.body +} + +// Model returns the unpacked model of the error +func (e GenericOpenAPIError) Model() interface{} { + return e.model +} diff --git a/pinning/remote/client/openapi/configuration.go b/pinning/remote/client/openapi/configuration.go new file mode 100644 index 0000000000..bb0a8e5073 --- /dev/null +++ b/pinning/remote/client/openapi/configuration.go @@ -0,0 +1,228 @@ +/* + * IPFS Pinning Service API + * + * ## About this spec The IPFS Pinning Service API is intended to be an implementation-agnostic API: - For use and implementation by pinning service providers - For use in client mode by IPFS nodes and GUI-based applications > **Note**: while ready for implementation, this spec is still a work in progress! 🏗️ **Your input and feedback are welcome and valuable as we develop this API spec. Please join the design discussion at [github.com/ipfs/pinning-services-api-spec](https://github.com/ipfs/pinning-services-api-spec).** # Schemas This section describes the most important object types and conventions. A full list of fields and schemas can be found in the `schemas` section of the [YAML file](https://github.com/ipfs/pinning-services-api-spec/blob/master/ipfs-pinning-service.yaml). ## Identifiers ### cid [Content Identifier (CID)](https://docs.ipfs.io/concepts/content-addressing/) points at the root of a DAG that is pinned recursively. ### requestid Unique identifier of a pin request. When a pin is created, the service responds with unique `requestid` that can be later used for pin removal. When the same `cid` is pinned again, a different `requestid` is returned to differentiate between those pin requests. Service implementation should use UUID, `hash(accessToken,Pin,PinStatus.created)`, or any other opaque identifier that provides equally strong protection against race conditions. ## Objects ### Pin object ![pin object](https://bafybeideck2fchyxna4wqwc2mo67yriokehw3yujboc5redjdaajrk2fjq.ipfs.dweb.link/pin.png) The `Pin` object is a representation of a pin request. It includes the `cid` of data to be pinned, as well as optional metadata in `name`, `origins`, and `meta`. ### Pin status response ![pin status response object](https://bafybeideck2fchyxna4wqwc2mo67yriokehw3yujboc5redjdaajrk2fjq.ipfs.dweb.link/pinstatus.png) The `PinStatus` object is a representation of the current state of a pinning operation. It includes the original `pin` object, along with the current `status` and globally unique `requestid` of the entire pinning request, which can be used for future status checks and management. Addresses in the `delegates` array are peers delegated by the pinning service for facilitating direct file transfers (more details in the provider hints section). Any additional vendor-specific information is returned in optional `info`. ## The pin lifecycle ![pinning service objects and lifecycle](https://bafybeideck2fchyxna4wqwc2mo67yriokehw3yujboc5redjdaajrk2fjq.ipfs.dweb.link/lifecycle.png) ### Creating a new pin object The user sends a `Pin` object to `POST /pins` and receives a `PinStatus` response: - `requestid` in `PinStatus` is the identifier of the pin operation, which can can be used for checking status, and removing the pin in the future - `status` in `PinStatus` indicates the current state of a pin ### Checking status of in-progress pinning `status` (in `PinStatus`) may indicate a pending state (`queued` or `pinning`). This means the data behind `Pin.cid` was not found on the pinning service and is being fetched from the IPFS network at large, which may take time. In this case, the user can periodically check pinning progress via `GET /pins/{requestid}` until pinning is successful, or the user decides to remove the pending pin. ### Replacing an existing pin object The user can replace an existing pin object via `POST /pins/{requestid}`. This is a shortcut for removing a pin object identified by `requestid` and creating a new one in a single API call that protects against undesired garbage collection of blocks common to both pins. Useful when updating a pin representing a huge dataset where most of blocks did not change. The new pin object `requestid` is returned in the `PinStatus` response. The old pin object is deleted automatically. ### Removing a pin object A pin object can be removed via `DELETE /pins/{requestid}`. ## Provider hints Pinning of new data can be accelerated by providing a list of known data sources in `Pin.origins`, and connecting at least one of them to pinning service nodes at `PinStatus.delegates`. The most common scenario is a client putting its own IPFS node's multiaddrs in `Pin.origins`, and then directly connecting to every multiaddr returned by a pinning service in `PinStatus.delegates` to initiate transfer. This ensures data transfer starts immediately (without waiting for provider discovery over DHT), and direct dial from a client works around peer routing issues in restrictive network topologies such as NATs. ## Custom metadata Pinning services are encouraged to add support for additional features by leveraging the optional `Pin.meta` and `PinStatus.info` fields. While these attributes can be application- or vendor-specific, we encourage the community at large to leverage these attributes as a sandbox to come up with conventions that could become part of future revisions of this API. ### Pin metadata String keys and values passed in `Pin.meta` are persisted with the pin object. Potential uses: - `Pin.meta[app_id]`: Attaching a unique identifier to pins created by an app enables filtering pins per app via `?meta={\"app_id\":}` - `Pin.meta[vendor_policy]`: Vendor-specific policy (for example: which region to use, how many copies to keep) Note that it is OK for a client to omit or ignore these optional attributes; doing so should not impact the basic pinning functionality. ### Pin status info Additional `PinStatus.info` can be returned by pinning service. Potential uses: - `PinStatus.info[status_details]`: more info about the current status (queue position, percentage of transferred data, summary of where data is stored, etc); when `PinStatus.status=failed`, it could provide a reason why a pin operation failed (e.g. lack of funds, DAG too big, etc.) - `PinStatus.info[dag_size]`: the size of pinned data, along with DAG overhead - `PinStatus.info[raw_size]`: the size of data without DAG overhead (eg. unixfs) - `PinStatus.info[pinned_until]`: if vendor supports time-bound pins, this could indicate when the pin will expire # Pagination and filtering Pin objects can be listed by executing `GET /pins` with optional parameters: - When no filters are provided, the endpoint will return a small batch of the 10 most recently created items, from the latest to the oldest. - The number of returned items can be adjusted with the `limit` parameter (implicit default is 10). - If the value in `PinResults.count` is bigger than the length of `PinResults.results`, the client can infer there are more results that can be queried. - To read more items, pass the `before` filter with the timestamp from `PinStatus.created` found in the oldest item in the current batch of results. Repeat to read all results. - Returned results can be fine-tuned by applying optional `after`, `cid`, `name`, `status`, or `meta` filters. > **Note**: pagination by the `created` timestamp requires each value to be globally unique. Any future considerations to add support for bulk creation must account for this. + * + * API version: 0.1.1 + * Generated by: OpenAPI Generator (https://openapi-generator.tech) + */ + +package openapi + +import ( + "context" + "fmt" + "net/http" + "strings" +) + +// contextKeys are used to identify the type of value in the context. +// Since these are string, it is possible to get a short description of the +// context key for logging and debugging using key.String(). + +type contextKey string + +func (c contextKey) String() string { + return "auth " + string(c) +} + +var ( + // ContextOAuth2 takes an oauth2.TokenSource as authentication for the request. + ContextOAuth2 = contextKey("token") + + // ContextBasicAuth takes BasicAuth as authentication for the request. + ContextBasicAuth = contextKey("basic") + + // ContextAccessToken takes a string oauth2 access token as authentication for the request. + ContextAccessToken = contextKey("accesstoken") + + // ContextAPIKeys takes a string apikey as authentication for the request + ContextAPIKeys = contextKey("apiKeys") + + // ContextHttpSignatureAuth takes HttpSignatureAuth as authentication for the request. + ContextHttpSignatureAuth = contextKey("httpsignature") + + // ContextServerIndex uses a server configuration from the index. + ContextServerIndex = contextKey("serverIndex") + + // ContextOperationServerIndices uses a server configuration from the index mapping. + ContextOperationServerIndices = contextKey("serverOperationIndices") + + // ContextServerVariables overrides a server configuration variables. + ContextServerVariables = contextKey("serverVariables") + + // ContextOperationServerVariables overrides a server configuration variables using operation specific values. + ContextOperationServerVariables = contextKey("serverOperationVariables") +) + +// BasicAuth provides basic http authentication to a request passed via context using ContextBasicAuth +type BasicAuth struct { + UserName string `json:"userName,omitempty"` + Password string `json:"password,omitempty"` +} + +// APIKey provides API key based authentication to a request passed via context using ContextAPIKey +type APIKey struct { + Key string + Prefix string +} + +// ServerVariable stores the information about a server variable +type ServerVariable struct { + Description string + DefaultValue string + EnumValues []string +} + +// ServerConfiguration stores the information about a server +type ServerConfiguration struct { + URL string + Description string + Variables map[string]ServerVariable +} + +// ServerConfigurations stores multiple ServerConfiguration items +type ServerConfigurations []ServerConfiguration + +// Configuration stores the configuration of the API client +type Configuration struct { + Host string `json:"host,omitempty"` + Scheme string `json:"scheme,omitempty"` + DefaultHeader map[string]string `json:"defaultHeader,omitempty"` + UserAgent string `json:"userAgent,omitempty"` + Debug bool `json:"debug,omitempty"` + Servers ServerConfigurations + OperationServers map[string]ServerConfigurations + HTTPClient *http.Client +} + +// NewConfiguration returns a new Configuration object +func NewConfiguration() *Configuration { + cfg := &Configuration{ + DefaultHeader: make(map[string]string), + UserAgent: "OpenAPI-Generator/1.0.0/go", + Debug: false, + Servers: ServerConfigurations{ + { + URL: "https://pinning-service.example.com", + Description: "No description provided", + }, + }, + OperationServers: map[string]ServerConfigurations{}, + } + return cfg +} + +// AddDefaultHeader adds a new HTTP header to the default header in the request +func (c *Configuration) AddDefaultHeader(key string, value string) { + c.DefaultHeader[key] = value +} + +// URL formats template on a index using given variables +func (sc ServerConfigurations) URL(index int, variables map[string]string) (string, error) { + if index < 0 || len(sc) <= index { + return "", fmt.Errorf("index %v out of range %v", index, len(sc)-1) + } + server := sc[index] + url := server.URL + + // go through variables and replace placeholders + for name, variable := range server.Variables { + if value, ok := variables[name]; ok { + found := bool(len(variable.EnumValues) == 0) + for _, enumValue := range variable.EnumValues { + if value == enumValue { + found = true + } + } + if !found { + return "", fmt.Errorf("the variable %s in the server URL has invalid value %v. Must be %v", name, value, variable.EnumValues) + } + url = strings.Replace(url, "{"+name+"}", value, -1) + } else { + url = strings.Replace(url, "{"+name+"}", variable.DefaultValue, -1) + } + } + return url, nil +} + +// ServerURL returns URL based on server settings +func (c *Configuration) ServerURL(index int, variables map[string]string) (string, error) { + return c.Servers.URL(index, variables) +} + +func getServerIndex(ctx context.Context) (int, error) { + si := ctx.Value(ContextServerIndex) + if si != nil { + if index, ok := si.(int); ok { + return index, nil + } + return 0, reportError("Invalid type %T should be int", si) + } + return 0, nil +} + +func getServerOperationIndex(ctx context.Context, endpoint string) (int, error) { + osi := ctx.Value(ContextOperationServerIndices) + if osi != nil { + if operationIndices, ok := osi.(map[string]int); !ok { + return 0, reportError("Invalid type %T should be map[string]int", osi) + } else { + index, ok := operationIndices[endpoint] + if ok { + return index, nil + } + } + } + return getServerIndex(ctx) +} + +func getServerVariables(ctx context.Context) (map[string]string, error) { + sv := ctx.Value(ContextServerVariables) + if sv != nil { + if variables, ok := sv.(map[string]string); ok { + return variables, nil + } + return nil, reportError("ctx value of ContextServerVariables has invalid type %T should be map[string]string", sv) + } + return nil, nil +} + +func getServerOperationVariables(ctx context.Context, endpoint string) (map[string]string, error) { + osv := ctx.Value(ContextOperationServerVariables) + if osv != nil { + if operationVariables, ok := osv.(map[string]map[string]string); !ok { + return nil, reportError("ctx value of ContextOperationServerVariables has invalid type %T should be map[string]map[string]string", osv) + } else { + variables, ok := operationVariables[endpoint] + if ok { + return variables, nil + } + } + } + return getServerVariables(ctx) +} + +// ServerURLWithContext returns a new server URL given an endpoint +func (c *Configuration) ServerURLWithContext(ctx context.Context, endpoint string) (string, error) { + sc, ok := c.OperationServers[endpoint] + if !ok { + sc = c.Servers + } + + if ctx == nil { + return sc.URL(0, nil) + } + + index, err := getServerOperationIndex(ctx, endpoint) + if err != nil { + return "", err + } + + variables, err := getServerOperationVariables(ctx, endpoint) + if err != nil { + return "", err + } + + return sc.URL(index, variables) +} diff --git a/pinning/remote/client/openapi/docs/Failure.md b/pinning/remote/client/openapi/docs/Failure.md new file mode 100644 index 0000000000..c899f7138a --- /dev/null +++ b/pinning/remote/client/openapi/docs/Failure.md @@ -0,0 +1,51 @@ +# Failure + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**Error** | [**FailureError**](Failure_error.md) | | + +## Methods + +### NewFailure + +`func NewFailure(error_ FailureError, ) *Failure` + +NewFailure instantiates a new Failure object +This constructor will assign default values to properties that have it defined, +and makes sure properties required by API are set, but the set of arguments +will change when the set of required properties is changed + +### NewFailureWithDefaults + +`func NewFailureWithDefaults() *Failure` + +NewFailureWithDefaults instantiates a new Failure object +This constructor will only assign default values to properties that have it defined, +but it doesn't guarantee that properties required by API are set + +### GetError + +`func (o *Failure) GetError() FailureError` + +GetError returns the Error field if non-nil, zero value otherwise. + +### GetErrorOk + +`func (o *Failure) GetErrorOk() (*FailureError, bool)` + +GetErrorOk returns a tuple with the Error field if it's non-nil, zero value otherwise +and a boolean to check if the value has been set. + +### SetError + +`func (o *Failure) SetError(v FailureError)` + +SetError sets Error field to given value. + + + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/pinning/remote/client/openapi/docs/FailureError.md b/pinning/remote/client/openapi/docs/FailureError.md new file mode 100644 index 0000000000..478f1b942a --- /dev/null +++ b/pinning/remote/client/openapi/docs/FailureError.md @@ -0,0 +1,77 @@ +# FailureError + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**Reason** | **string** | Mandatory string identifying the type of error | +**Details** | Pointer to **string** | Optional, longer description of the error; may include UUID of transaction for support, links to documentation etc | [optional] + +## Methods + +### NewFailureError + +`func NewFailureError(reason string, ) *FailureError` + +NewFailureError instantiates a new FailureError object +This constructor will assign default values to properties that have it defined, +and makes sure properties required by API are set, but the set of arguments +will change when the set of required properties is changed + +### NewFailureErrorWithDefaults + +`func NewFailureErrorWithDefaults() *FailureError` + +NewFailureErrorWithDefaults instantiates a new FailureError object +This constructor will only assign default values to properties that have it defined, +but it doesn't guarantee that properties required by API are set + +### GetReason + +`func (o *FailureError) GetReason() string` + +GetReason returns the Reason field if non-nil, zero value otherwise. + +### GetReasonOk + +`func (o *FailureError) GetReasonOk() (*string, bool)` + +GetReasonOk returns a tuple with the Reason field if it's non-nil, zero value otherwise +and a boolean to check if the value has been set. + +### SetReason + +`func (o *FailureError) SetReason(v string)` + +SetReason sets Reason field to given value. + + +### GetDetails + +`func (o *FailureError) GetDetails() string` + +GetDetails returns the Details field if non-nil, zero value otherwise. + +### GetDetailsOk + +`func (o *FailureError) GetDetailsOk() (*string, bool)` + +GetDetailsOk returns a tuple with the Details field if it's non-nil, zero value otherwise +and a boolean to check if the value has been set. + +### SetDetails + +`func (o *FailureError) SetDetails(v string)` + +SetDetails sets Details field to given value. + +### HasDetails + +`func (o *FailureError) HasDetails() bool` + +HasDetails returns a boolean if a field has been set. + + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/pinning/remote/client/openapi/docs/Pin.md b/pinning/remote/client/openapi/docs/Pin.md new file mode 100644 index 0000000000..e5d3e0f18f --- /dev/null +++ b/pinning/remote/client/openapi/docs/Pin.md @@ -0,0 +1,129 @@ +# Pin + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**Cid** | **string** | Content Identifier (CID) to be pinned recursively | +**Name** | Pointer to **string** | Optional name for pinned data; can be used for lookups later | [optional] +**Origins** | Pointer to **[]string** | Optional list of multiaddrs known to provide the data | [optional] +**Meta** | Pointer to **map[string]string** | Optional metadata for pin object | [optional] + +## Methods + +### NewPin + +`func NewPin(cid string, ) *Pin` + +NewPin instantiates a new Pin object +This constructor will assign default values to properties that have it defined, +and makes sure properties required by API are set, but the set of arguments +will change when the set of required properties is changed + +### NewPinWithDefaults + +`func NewPinWithDefaults() *Pin` + +NewPinWithDefaults instantiates a new Pin object +This constructor will only assign default values to properties that have it defined, +but it doesn't guarantee that properties required by API are set + +### GetCid + +`func (o *Pin) GetCid() string` + +GetCid returns the Cid field if non-nil, zero value otherwise. + +### GetCidOk + +`func (o *Pin) GetCidOk() (*string, bool)` + +GetCidOk returns a tuple with the Cid field if it's non-nil, zero value otherwise +and a boolean to check if the value has been set. + +### SetCid + +`func (o *Pin) SetCid(v string)` + +SetCid sets Cid field to given value. + + +### GetName + +`func (o *Pin) GetName() string` + +GetName returns the Name field if non-nil, zero value otherwise. + +### GetNameOk + +`func (o *Pin) GetNameOk() (*string, bool)` + +GetNameOk returns a tuple with the Name field if it's non-nil, zero value otherwise +and a boolean to check if the value has been set. + +### SetName + +`func (o *Pin) SetName(v string)` + +SetName sets Name field to given value. + +### HasName + +`func (o *Pin) HasName() bool` + +HasName returns a boolean if a field has been set. + +### GetOrigins + +`func (o *Pin) GetOrigins() []string` + +GetOrigins returns the Origins field if non-nil, zero value otherwise. + +### GetOriginsOk + +`func (o *Pin) GetOriginsOk() (*[]string, bool)` + +GetOriginsOk returns a tuple with the Origins field if it's non-nil, zero value otherwise +and a boolean to check if the value has been set. + +### SetOrigins + +`func (o *Pin) SetOrigins(v []string)` + +SetOrigins sets Origins field to given value. + +### HasOrigins + +`func (o *Pin) HasOrigins() bool` + +HasOrigins returns a boolean if a field has been set. + +### GetMeta + +`func (o *Pin) GetMeta() map[string]string` + +GetMeta returns the Meta field if non-nil, zero value otherwise. + +### GetMetaOk + +`func (o *Pin) GetMetaOk() (*map[string]string, bool)` + +GetMetaOk returns a tuple with the Meta field if it's non-nil, zero value otherwise +and a boolean to check if the value has been set. + +### SetMeta + +`func (o *Pin) SetMeta(v map[string]string)` + +SetMeta sets Meta field to given value. + +### HasMeta + +`func (o *Pin) HasMeta() bool` + +HasMeta returns a boolean if a field has been set. + + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/pinning/remote/client/openapi/docs/PinResults.md b/pinning/remote/client/openapi/docs/PinResults.md new file mode 100644 index 0000000000..1982bfddb0 --- /dev/null +++ b/pinning/remote/client/openapi/docs/PinResults.md @@ -0,0 +1,72 @@ +# PinResults + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**Count** | **int32** | The total number of pin objects that exist for passed query filters | +**Results** | [**[]PinStatus**](PinStatus.md) | An array of PinStatus results | + +## Methods + +### NewPinResults + +`func NewPinResults(count int32, results []PinStatus, ) *PinResults` + +NewPinResults instantiates a new PinResults object +This constructor will assign default values to properties that have it defined, +and makes sure properties required by API are set, but the set of arguments +will change when the set of required properties is changed + +### NewPinResultsWithDefaults + +`func NewPinResultsWithDefaults() *PinResults` + +NewPinResultsWithDefaults instantiates a new PinResults object +This constructor will only assign default values to properties that have it defined, +but it doesn't guarantee that properties required by API are set + +### GetCount + +`func (o *PinResults) GetCount() int32` + +GetCount returns the Count field if non-nil, zero value otherwise. + +### GetCountOk + +`func (o *PinResults) GetCountOk() (*int32, bool)` + +GetCountOk returns a tuple with the Count field if it's non-nil, zero value otherwise +and a boolean to check if the value has been set. + +### SetCount + +`func (o *PinResults) SetCount(v int32)` + +SetCount sets Count field to given value. + + +### GetResults + +`func (o *PinResults) GetResults() []PinStatus` + +GetResults returns the Results field if non-nil, zero value otherwise. + +### GetResultsOk + +`func (o *PinResults) GetResultsOk() (*[]PinStatus, bool)` + +GetResultsOk returns a tuple with the Results field if it's non-nil, zero value otherwise +and a boolean to check if the value has been set. + +### SetResults + +`func (o *PinResults) SetResults(v []PinStatus)` + +SetResults sets Results field to given value. + + + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/pinning/remote/client/openapi/docs/PinStatus.md b/pinning/remote/client/openapi/docs/PinStatus.md new file mode 100644 index 0000000000..40ae992ab0 --- /dev/null +++ b/pinning/remote/client/openapi/docs/PinStatus.md @@ -0,0 +1,161 @@ +# PinStatus + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**Requestid** | **string** | Globally unique identifier of the pin request; can be used to check the status of ongoing pinning, or pin removal | +**Status** | [**Status**](Status.md) | | +**Created** | [**time.Time**](time.Time.md) | Immutable timestamp indicating when a pin request entered a pinning service; can be used for filtering results and pagination | +**Pin** | [**Pin**](Pin.md) | | +**Delegates** | **[]string** | List of multiaddrs designated by pinning service for transferring any new data from external peers | +**Info** | Pointer to **map[string]string** | Optional info for PinStatus response | [optional] + +## Methods + +### NewPinStatus + +`func NewPinStatus(requestid string, status Status, created time.Time, pin Pin, delegates []string, ) *PinStatus` + +NewPinStatus instantiates a new PinStatus object +This constructor will assign default values to properties that have it defined, +and makes sure properties required by API are set, but the set of arguments +will change when the set of required properties is changed + +### NewPinStatusWithDefaults + +`func NewPinStatusWithDefaults() *PinStatus` + +NewPinStatusWithDefaults instantiates a new PinStatus object +This constructor will only assign default values to properties that have it defined, +but it doesn't guarantee that properties required by API are set + +### GetRequestid + +`func (o *PinStatus) GetRequestid() string` + +GetRequestid returns the Requestid field if non-nil, zero value otherwise. + +### GetRequestidOk + +`func (o *PinStatus) GetRequestidOk() (*string, bool)` + +GetRequestidOk returns a tuple with the Requestid field if it's non-nil, zero value otherwise +and a boolean to check if the value has been set. + +### SetRequestid + +`func (o *PinStatus) SetRequestid(v string)` + +SetRequestid sets Requestid field to given value. + + +### GetStatus + +`func (o *PinStatus) GetStatus() Status` + +GetStatus returns the Status field if non-nil, zero value otherwise. + +### GetStatusOk + +`func (o *PinStatus) GetStatusOk() (*Status, bool)` + +GetStatusOk returns a tuple with the Status field if it's non-nil, zero value otherwise +and a boolean to check if the value has been set. + +### SetStatus + +`func (o *PinStatus) SetStatus(v Status)` + +SetStatus sets Status field to given value. + + +### GetCreated + +`func (o *PinStatus) GetCreated() time.Time` + +GetCreated returns the Created field if non-nil, zero value otherwise. + +### GetCreatedOk + +`func (o *PinStatus) GetCreatedOk() (*time.Time, bool)` + +GetCreatedOk returns a tuple with the Created field if it's non-nil, zero value otherwise +and a boolean to check if the value has been set. + +### SetCreated + +`func (o *PinStatus) SetCreated(v time.Time)` + +SetCreated sets Created field to given value. + + +### GetPin + +`func (o *PinStatus) GetPin() Pin` + +GetPin returns the Pin field if non-nil, zero value otherwise. + +### GetPinOk + +`func (o *PinStatus) GetPinOk() (*Pin, bool)` + +GetPinOk returns a tuple with the Pin field if it's non-nil, zero value otherwise +and a boolean to check if the value has been set. + +### SetPin + +`func (o *PinStatus) SetPin(v Pin)` + +SetPin sets Pin field to given value. + + +### GetDelegates + +`func (o *PinStatus) GetDelegates() []string` + +GetDelegates returns the Delegates field if non-nil, zero value otherwise. + +### GetDelegatesOk + +`func (o *PinStatus) GetDelegatesOk() (*[]string, bool)` + +GetDelegatesOk returns a tuple with the Delegates field if it's non-nil, zero value otherwise +and a boolean to check if the value has been set. + +### SetDelegates + +`func (o *PinStatus) SetDelegates(v []string)` + +SetDelegates sets Delegates field to given value. + + +### GetInfo + +`func (o *PinStatus) GetInfo() map[string]string` + +GetInfo returns the Info field if non-nil, zero value otherwise. + +### GetInfoOk + +`func (o *PinStatus) GetInfoOk() (*map[string]string, bool)` + +GetInfoOk returns a tuple with the Info field if it's non-nil, zero value otherwise +and a boolean to check if the value has been set. + +### SetInfo + +`func (o *PinStatus) SetInfo(v map[string]string)` + +SetInfo sets Info field to given value. + +### HasInfo + +`func (o *PinStatus) HasInfo() bool` + +HasInfo returns a boolean if a field has been set. + + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/pinning/remote/client/openapi/docs/PinsApi.md b/pinning/remote/client/openapi/docs/PinsApi.md new file mode 100644 index 0000000000..00cff667b6 --- /dev/null +++ b/pinning/remote/client/openapi/docs/PinsApi.md @@ -0,0 +1,367 @@ +# \PinsApi + +All URIs are relative to *https://pinning-service.example.com* + +Method | HTTP request | Description +------------- | ------------- | ------------- +[**PinsGet**](PinsApi.md#PinsGet) | **Get** /pins | List pin objects +[**PinsPost**](PinsApi.md#PinsPost) | **Post** /pins | Add pin object +[**PinsRequestidDelete**](PinsApi.md#PinsRequestidDelete) | **Delete** /pins/{requestid} | Remove pin object +[**PinsRequestidGet**](PinsApi.md#PinsRequestidGet) | **Get** /pins/{requestid} | Get pin object +[**PinsRequestidPost**](PinsApi.md#PinsRequestidPost) | **Post** /pins/{requestid} | Replace pin object + + + +## PinsGet + +> PinResults PinsGet(ctx).Cid(cid).Name(name).Status(status).Before(before).After(after).Limit(limit).Meta(meta).Execute() + +List pin objects + + + +### Example + +```go +package main + +import ( + "context" + "fmt" + "os" + openapiclient "./openapi" +) + +func main() { + cid := []string{"Inner_example"} // []string | Return pin objects responsible for pinning the specified CID(s); be aware that using longer hash functions introduces further constraints on the number of CIDs that will fit under the limit of 2000 characters per URL in browser contexts (optional) + name := "name_example" // string | Return pin objects with names that contain provided value (case-insensitive, partial or full match) (optional) + status := []Status{openapiclient.Status{}} // []Status | Return pin objects for pins with the specified status (optional) + before := Get-Date // time.Time | Return results created (queued) before provided timestamp (optional) + after := Get-Date // time.Time | Return results created (queued) after provided timestamp (optional) + limit := 987 // int32 | Max records to return (optional) (default to 10) + meta := map[string]string{ "Key" = "Value" } // map[string]string | Return pin objects that match specified metadata (optional) + + configuration := openapiclient.NewConfiguration() + api_client := openapiclient.NewAPIClient(configuration) + resp, r, err := api_client.PinsApi.PinsGet(context.Background(), ).Cid(cid).Name(name).Status(status).Before(before).After(after).Limit(limit).Meta(meta).Execute() + if err != nil { + fmt.Fprintf(os.Stderr, "Error when calling `PinsApi.PinsGet``: %v\n", err) + fmt.Fprintf(os.Stderr, "Full HTTP response: %v\n", r) + } + // response from `PinsGet`: PinResults + fmt.Fprintf(os.Stdout, "Response from `PinsApi.PinsGet`: %v\n", resp) +} +``` + +### Path Parameters + + + +### Other Parameters + +Other parameters are passed through a pointer to a apiPinsGetRequest struct via the builder pattern + + +Name | Type | Description | Notes +------------- | ------------- | ------------- | ------------- + **cid** | [**[]string**](string.md) | Return pin objects responsible for pinning the specified CID(s); be aware that using longer hash functions introduces further constraints on the number of CIDs that will fit under the limit of 2000 characters per URL in browser contexts | + **name** | **string** | Return pin objects with names that contain provided value (case-insensitive, partial or full match) | + **status** | [**[]Status**](Status.md) | Return pin objects for pins with the specified status | + **before** | **time.Time** | Return results created (queued) before provided timestamp | + **after** | **time.Time** | Return results created (queued) after provided timestamp | + **limit** | **int32** | Max records to return | [default to 10] + **meta** | [**map[string]string**](string.md) | Return pin objects that match specified metadata | + +### Return type + +[**PinResults**](PinResults.md) + +### Authorization + +[accessToken](../README.md#accessToken) + +### HTTP request headers + +- **Content-Type**: Not defined +- **Accept**: application/json + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) +[[Back to Model list]](../README.md#documentation-for-models) +[[Back to README]](../README.md) + + +## PinsPost + +> PinStatus PinsPost(ctx).Pin(pin).Execute() + +Add pin object + + + +### Example + +```go +package main + +import ( + "context" + "fmt" + "os" + openapiclient "./openapi" +) + +func main() { + pin := openapiclient.Pin{Cid: "Cid_example", Name: "Name_example", Origins: []string{"Origins_example"), Meta: map[string]string{ "Key" = "Value" }} // Pin | + + configuration := openapiclient.NewConfiguration() + api_client := openapiclient.NewAPIClient(configuration) + resp, r, err := api_client.PinsApi.PinsPost(context.Background(), pin).Execute() + if err != nil { + fmt.Fprintf(os.Stderr, "Error when calling `PinsApi.PinsPost``: %v\n", err) + fmt.Fprintf(os.Stderr, "Full HTTP response: %v\n", r) + } + // response from `PinsPost`: PinStatus + fmt.Fprintf(os.Stdout, "Response from `PinsApi.PinsPost`: %v\n", resp) +} +``` + +### Path Parameters + + + +### Other Parameters + +Other parameters are passed through a pointer to a apiPinsPostRequest struct via the builder pattern + + +Name | Type | Description | Notes +------------- | ------------- | ------------- | ------------- + **pin** | [**Pin**](Pin.md) | | + +### Return type + +[**PinStatus**](PinStatus.md) + +### Authorization + +[accessToken](../README.md#accessToken) + +### HTTP request headers + +- **Content-Type**: application/json +- **Accept**: application/json + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) +[[Back to Model list]](../README.md#documentation-for-models) +[[Back to README]](../README.md) + + +## PinsRequestidDelete + +> PinsRequestidDelete(ctx, requestid).Execute() + +Remove pin object + + + +### Example + +```go +package main + +import ( + "context" + "fmt" + "os" + openapiclient "./openapi" +) + +func main() { + requestid := "requestid_example" // string | + + configuration := openapiclient.NewConfiguration() + api_client := openapiclient.NewAPIClient(configuration) + resp, r, err := api_client.PinsApi.PinsRequestidDelete(context.Background(), requestid).Execute() + if err != nil { + fmt.Fprintf(os.Stderr, "Error when calling `PinsApi.PinsRequestidDelete``: %v\n", err) + fmt.Fprintf(os.Stderr, "Full HTTP response: %v\n", r) + } +} +``` + +### Path Parameters + + +Name | Type | Description | Notes +------------- | ------------- | ------------- | ------------- +**ctx** | **context.Context** | context for authentication, logging, cancellation, deadlines, tracing, etc. +**requestid** | **string** | | + +### Other Parameters + +Other parameters are passed through a pointer to a apiPinsRequestidDeleteRequest struct via the builder pattern + + +Name | Type | Description | Notes +------------- | ------------- | ------------- | ------------- + + +### Return type + + (empty response body) + +### Authorization + +[accessToken](../README.md#accessToken) + +### HTTP request headers + +- **Content-Type**: Not defined +- **Accept**: application/json + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) +[[Back to Model list]](../README.md#documentation-for-models) +[[Back to README]](../README.md) + + +## PinsRequestidGet + +> PinStatus PinsRequestidGet(ctx, requestid).Execute() + +Get pin object + + + +### Example + +```go +package main + +import ( + "context" + "fmt" + "os" + openapiclient "./openapi" +) + +func main() { + requestid := "requestid_example" // string | + + configuration := openapiclient.NewConfiguration() + api_client := openapiclient.NewAPIClient(configuration) + resp, r, err := api_client.PinsApi.PinsRequestidGet(context.Background(), requestid).Execute() + if err != nil { + fmt.Fprintf(os.Stderr, "Error when calling `PinsApi.PinsRequestidGet``: %v\n", err) + fmt.Fprintf(os.Stderr, "Full HTTP response: %v\n", r) + } + // response from `PinsRequestidGet`: PinStatus + fmt.Fprintf(os.Stdout, "Response from `PinsApi.PinsRequestidGet`: %v\n", resp) +} +``` + +### Path Parameters + + +Name | Type | Description | Notes +------------- | ------------- | ------------- | ------------- +**ctx** | **context.Context** | context for authentication, logging, cancellation, deadlines, tracing, etc. +**requestid** | **string** | | + +### Other Parameters + +Other parameters are passed through a pointer to a apiPinsRequestidGetRequest struct via the builder pattern + + +Name | Type | Description | Notes +------------- | ------------- | ------------- | ------------- + + +### Return type + +[**PinStatus**](PinStatus.md) + +### Authorization + +[accessToken](../README.md#accessToken) + +### HTTP request headers + +- **Content-Type**: Not defined +- **Accept**: application/json + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) +[[Back to Model list]](../README.md#documentation-for-models) +[[Back to README]](../README.md) + + +## PinsRequestidPost + +> PinStatus PinsRequestidPost(ctx, requestid).Pin(pin).Execute() + +Replace pin object + + + +### Example + +```go +package main + +import ( + "context" + "fmt" + "os" + openapiclient "./openapi" +) + +func main() { + requestid := "requestid_example" // string | + pin := openapiclient.Pin{Cid: "Cid_example", Name: "Name_example", Origins: []string{"Origins_example"), Meta: map[string]string{ "Key" = "Value" }} // Pin | + + configuration := openapiclient.NewConfiguration() + api_client := openapiclient.NewAPIClient(configuration) + resp, r, err := api_client.PinsApi.PinsRequestidPost(context.Background(), requestid, pin).Execute() + if err != nil { + fmt.Fprintf(os.Stderr, "Error when calling `PinsApi.PinsRequestidPost``: %v\n", err) + fmt.Fprintf(os.Stderr, "Full HTTP response: %v\n", r) + } + // response from `PinsRequestidPost`: PinStatus + fmt.Fprintf(os.Stdout, "Response from `PinsApi.PinsRequestidPost`: %v\n", resp) +} +``` + +### Path Parameters + + +Name | Type | Description | Notes +------------- | ------------- | ------------- | ------------- +**ctx** | **context.Context** | context for authentication, logging, cancellation, deadlines, tracing, etc. +**requestid** | **string** | | + +### Other Parameters + +Other parameters are passed through a pointer to a apiPinsRequestidPostRequest struct via the builder pattern + + +Name | Type | Description | Notes +------------- | ------------- | ------------- | ------------- + + **pin** | [**Pin**](Pin.md) | | + +### Return type + +[**PinStatus**](PinStatus.md) + +### Authorization + +[accessToken](../README.md#accessToken) + +### HTTP request headers + +- **Content-Type**: application/json +- **Accept**: application/json + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) +[[Back to Model list]](../README.md#documentation-for-models) +[[Back to README]](../README.md) + diff --git a/pinning/remote/client/openapi/docs/Status.md b/pinning/remote/client/openapi/docs/Status.md new file mode 100644 index 0000000000..01176af11e --- /dev/null +++ b/pinning/remote/client/openapi/docs/Status.md @@ -0,0 +1,11 @@ +# Status + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- + + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/pinning/remote/client/openapi/model_failure.go b/pinning/remote/client/openapi/model_failure.go new file mode 100644 index 0000000000..ec1fc8268a --- /dev/null +++ b/pinning/remote/client/openapi/model_failure.go @@ -0,0 +1,105 @@ +/* + * IPFS Pinning Service API + * + * ## About this spec The IPFS Pinning Service API is intended to be an implementation-agnostic API: - For use and implementation by pinning service providers - For use in client mode by IPFS nodes and GUI-based applications > **Note**: while ready for implementation, this spec is still a work in progress! 🏗️ **Your input and feedback are welcome and valuable as we develop this API spec. Please join the design discussion at [github.com/ipfs/pinning-services-api-spec](https://github.com/ipfs/pinning-services-api-spec).** # Schemas This section describes the most important object types and conventions. A full list of fields and schemas can be found in the `schemas` section of the [YAML file](https://github.com/ipfs/pinning-services-api-spec/blob/master/ipfs-pinning-service.yaml). ## Identifiers ### cid [Content Identifier (CID)](https://docs.ipfs.io/concepts/content-addressing/) points at the root of a DAG that is pinned recursively. ### requestid Unique identifier of a pin request. When a pin is created, the service responds with unique `requestid` that can be later used for pin removal. When the same `cid` is pinned again, a different `requestid` is returned to differentiate between those pin requests. Service implementation should use UUID, `hash(accessToken,Pin,PinStatus.created)`, or any other opaque identifier that provides equally strong protection against race conditions. ## Objects ### Pin object ![pin object](https://bafybeideck2fchyxna4wqwc2mo67yriokehw3yujboc5redjdaajrk2fjq.ipfs.dweb.link/pin.png) The `Pin` object is a representation of a pin request. It includes the `cid` of data to be pinned, as well as optional metadata in `name`, `origins`, and `meta`. ### Pin status response ![pin status response object](https://bafybeideck2fchyxna4wqwc2mo67yriokehw3yujboc5redjdaajrk2fjq.ipfs.dweb.link/pinstatus.png) The `PinStatus` object is a representation of the current state of a pinning operation. It includes the original `pin` object, along with the current `status` and globally unique `requestid` of the entire pinning request, which can be used for future status checks and management. Addresses in the `delegates` array are peers delegated by the pinning service for facilitating direct file transfers (more details in the provider hints section). Any additional vendor-specific information is returned in optional `info`. ## The pin lifecycle ![pinning service objects and lifecycle](https://bafybeideck2fchyxna4wqwc2mo67yriokehw3yujboc5redjdaajrk2fjq.ipfs.dweb.link/lifecycle.png) ### Creating a new pin object The user sends a `Pin` object to `POST /pins` and receives a `PinStatus` response: - `requestid` in `PinStatus` is the identifier of the pin operation, which can can be used for checking status, and removing the pin in the future - `status` in `PinStatus` indicates the current state of a pin ### Checking status of in-progress pinning `status` (in `PinStatus`) may indicate a pending state (`queued` or `pinning`). This means the data behind `Pin.cid` was not found on the pinning service and is being fetched from the IPFS network at large, which may take time. In this case, the user can periodically check pinning progress via `GET /pins/{requestid}` until pinning is successful, or the user decides to remove the pending pin. ### Replacing an existing pin object The user can replace an existing pin object via `POST /pins/{requestid}`. This is a shortcut for removing a pin object identified by `requestid` and creating a new one in a single API call that protects against undesired garbage collection of blocks common to both pins. Useful when updating a pin representing a huge dataset where most of blocks did not change. The new pin object `requestid` is returned in the `PinStatus` response. The old pin object is deleted automatically. ### Removing a pin object A pin object can be removed via `DELETE /pins/{requestid}`. ## Provider hints Pinning of new data can be accelerated by providing a list of known data sources in `Pin.origins`, and connecting at least one of them to pinning service nodes at `PinStatus.delegates`. The most common scenario is a client putting its own IPFS node's multiaddrs in `Pin.origins`, and then directly connecting to every multiaddr returned by a pinning service in `PinStatus.delegates` to initiate transfer. This ensures data transfer starts immediately (without waiting for provider discovery over DHT), and direct dial from a client works around peer routing issues in restrictive network topologies such as NATs. ## Custom metadata Pinning services are encouraged to add support for additional features by leveraging the optional `Pin.meta` and `PinStatus.info` fields. While these attributes can be application- or vendor-specific, we encourage the community at large to leverage these attributes as a sandbox to come up with conventions that could become part of future revisions of this API. ### Pin metadata String keys and values passed in `Pin.meta` are persisted with the pin object. Potential uses: - `Pin.meta[app_id]`: Attaching a unique identifier to pins created by an app enables filtering pins per app via `?meta={\"app_id\":}` - `Pin.meta[vendor_policy]`: Vendor-specific policy (for example: which region to use, how many copies to keep) Note that it is OK for a client to omit or ignore these optional attributes; doing so should not impact the basic pinning functionality. ### Pin status info Additional `PinStatus.info` can be returned by pinning service. Potential uses: - `PinStatus.info[status_details]`: more info about the current status (queue position, percentage of transferred data, summary of where data is stored, etc); when `PinStatus.status=failed`, it could provide a reason why a pin operation failed (e.g. lack of funds, DAG too big, etc.) - `PinStatus.info[dag_size]`: the size of pinned data, along with DAG overhead - `PinStatus.info[raw_size]`: the size of data without DAG overhead (eg. unixfs) - `PinStatus.info[pinned_until]`: if vendor supports time-bound pins, this could indicate when the pin will expire # Pagination and filtering Pin objects can be listed by executing `GET /pins` with optional parameters: - When no filters are provided, the endpoint will return a small batch of the 10 most recently created items, from the latest to the oldest. - The number of returned items can be adjusted with the `limit` parameter (implicit default is 10). - If the value in `PinResults.count` is bigger than the length of `PinResults.results`, the client can infer there are more results that can be queried. - To read more items, pass the `before` filter with the timestamp from `PinStatus.created` found in the oldest item in the current batch of results. Repeat to read all results. - Returned results can be fine-tuned by applying optional `after`, `cid`, `name`, `status`, or `meta` filters. > **Note**: pagination by the `created` timestamp requires each value to be globally unique. Any future considerations to add support for bulk creation must account for this. + * + * API version: 0.1.1 + * Generated by: OpenAPI Generator (https://openapi-generator.tech) + */ + +package openapi + +import ( + "encoding/json" +) + +// Failure Response for a failed request +type Failure struct { + Error FailureError `json:"error"` +} + +// NewFailure instantiates a new Failure object +// This constructor will assign default values to properties that have it defined, +// and makes sure properties required by API are set, but the set of arguments +// will change when the set of required properties is changed +func NewFailure(error_ FailureError) *Failure { + this := Failure{} + this.Error = error_ + return &this +} + +// NewFailureWithDefaults instantiates a new Failure object +// This constructor will only assign default values to properties that have it defined, +// but it doesn't guarantee that properties required by API are set +func NewFailureWithDefaults() *Failure { + this := Failure{} + return &this +} + +// GetError returns the Error field value +func (o *Failure) GetError() FailureError { + if o == nil { + var ret FailureError + return ret + } + + return o.Error +} + +// GetErrorOk returns a tuple with the Error field value +// and a boolean to check if the value has been set. +func (o *Failure) GetErrorOk() (*FailureError, bool) { + if o == nil { + return nil, false + } + return &o.Error, true +} + +// SetError sets field value +func (o *Failure) SetError(v FailureError) { + o.Error = v +} + +func (o Failure) MarshalJSON() ([]byte, error) { + toSerialize := map[string]interface{}{} + if true { + toSerialize["error"] = o.Error + } + return json.Marshal(toSerialize) +} + +type NullableFailure struct { + value *Failure + isSet bool +} + +func (v NullableFailure) Get() *Failure { + return v.value +} + +func (v *NullableFailure) Set(val *Failure) { + v.value = val + v.isSet = true +} + +func (v NullableFailure) IsSet() bool { + return v.isSet +} + +func (v *NullableFailure) Unset() { + v.value = nil + v.isSet = false +} + +func NewNullableFailure(val *Failure) *NullableFailure { + return &NullableFailure{value: val, isSet: true} +} + +func (v NullableFailure) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +func (v *NullableFailure) UnmarshalJSON(src []byte) error { + v.isSet = true + return json.Unmarshal(src, &v.value) +} diff --git a/pinning/remote/client/openapi/model_failure_error.go b/pinning/remote/client/openapi/model_failure_error.go new file mode 100644 index 0000000000..4f38acdf96 --- /dev/null +++ b/pinning/remote/client/openapi/model_failure_error.go @@ -0,0 +1,143 @@ +/* + * IPFS Pinning Service API + * + * ## About this spec The IPFS Pinning Service API is intended to be an implementation-agnostic API: - For use and implementation by pinning service providers - For use in client mode by IPFS nodes and GUI-based applications > **Note**: while ready for implementation, this spec is still a work in progress! 🏗️ **Your input and feedback are welcome and valuable as we develop this API spec. Please join the design discussion at [github.com/ipfs/pinning-services-api-spec](https://github.com/ipfs/pinning-services-api-spec).** # Schemas This section describes the most important object types and conventions. A full list of fields and schemas can be found in the `schemas` section of the [YAML file](https://github.com/ipfs/pinning-services-api-spec/blob/master/ipfs-pinning-service.yaml). ## Identifiers ### cid [Content Identifier (CID)](https://docs.ipfs.io/concepts/content-addressing/) points at the root of a DAG that is pinned recursively. ### requestid Unique identifier of a pin request. When a pin is created, the service responds with unique `requestid` that can be later used for pin removal. When the same `cid` is pinned again, a different `requestid` is returned to differentiate between those pin requests. Service implementation should use UUID, `hash(accessToken,Pin,PinStatus.created)`, or any other opaque identifier that provides equally strong protection against race conditions. ## Objects ### Pin object ![pin object](https://bafybeideck2fchyxna4wqwc2mo67yriokehw3yujboc5redjdaajrk2fjq.ipfs.dweb.link/pin.png) The `Pin` object is a representation of a pin request. It includes the `cid` of data to be pinned, as well as optional metadata in `name`, `origins`, and `meta`. ### Pin status response ![pin status response object](https://bafybeideck2fchyxna4wqwc2mo67yriokehw3yujboc5redjdaajrk2fjq.ipfs.dweb.link/pinstatus.png) The `PinStatus` object is a representation of the current state of a pinning operation. It includes the original `pin` object, along with the current `status` and globally unique `requestid` of the entire pinning request, which can be used for future status checks and management. Addresses in the `delegates` array are peers delegated by the pinning service for facilitating direct file transfers (more details in the provider hints section). Any additional vendor-specific information is returned in optional `info`. ## The pin lifecycle ![pinning service objects and lifecycle](https://bafybeideck2fchyxna4wqwc2mo67yriokehw3yujboc5redjdaajrk2fjq.ipfs.dweb.link/lifecycle.png) ### Creating a new pin object The user sends a `Pin` object to `POST /pins` and receives a `PinStatus` response: - `requestid` in `PinStatus` is the identifier of the pin operation, which can can be used for checking status, and removing the pin in the future - `status` in `PinStatus` indicates the current state of a pin ### Checking status of in-progress pinning `status` (in `PinStatus`) may indicate a pending state (`queued` or `pinning`). This means the data behind `Pin.cid` was not found on the pinning service and is being fetched from the IPFS network at large, which may take time. In this case, the user can periodically check pinning progress via `GET /pins/{requestid}` until pinning is successful, or the user decides to remove the pending pin. ### Replacing an existing pin object The user can replace an existing pin object via `POST /pins/{requestid}`. This is a shortcut for removing a pin object identified by `requestid` and creating a new one in a single API call that protects against undesired garbage collection of blocks common to both pins. Useful when updating a pin representing a huge dataset where most of blocks did not change. The new pin object `requestid` is returned in the `PinStatus` response. The old pin object is deleted automatically. ### Removing a pin object A pin object can be removed via `DELETE /pins/{requestid}`. ## Provider hints Pinning of new data can be accelerated by providing a list of known data sources in `Pin.origins`, and connecting at least one of them to pinning service nodes at `PinStatus.delegates`. The most common scenario is a client putting its own IPFS node's multiaddrs in `Pin.origins`, and then directly connecting to every multiaddr returned by a pinning service in `PinStatus.delegates` to initiate transfer. This ensures data transfer starts immediately (without waiting for provider discovery over DHT), and direct dial from a client works around peer routing issues in restrictive network topologies such as NATs. ## Custom metadata Pinning services are encouraged to add support for additional features by leveraging the optional `Pin.meta` and `PinStatus.info` fields. While these attributes can be application- or vendor-specific, we encourage the community at large to leverage these attributes as a sandbox to come up with conventions that could become part of future revisions of this API. ### Pin metadata String keys and values passed in `Pin.meta` are persisted with the pin object. Potential uses: - `Pin.meta[app_id]`: Attaching a unique identifier to pins created by an app enables filtering pins per app via `?meta={\"app_id\":}` - `Pin.meta[vendor_policy]`: Vendor-specific policy (for example: which region to use, how many copies to keep) Note that it is OK for a client to omit or ignore these optional attributes; doing so should not impact the basic pinning functionality. ### Pin status info Additional `PinStatus.info` can be returned by pinning service. Potential uses: - `PinStatus.info[status_details]`: more info about the current status (queue position, percentage of transferred data, summary of where data is stored, etc); when `PinStatus.status=failed`, it could provide a reason why a pin operation failed (e.g. lack of funds, DAG too big, etc.) - `PinStatus.info[dag_size]`: the size of pinned data, along with DAG overhead - `PinStatus.info[raw_size]`: the size of data without DAG overhead (eg. unixfs) - `PinStatus.info[pinned_until]`: if vendor supports time-bound pins, this could indicate when the pin will expire # Pagination and filtering Pin objects can be listed by executing `GET /pins` with optional parameters: - When no filters are provided, the endpoint will return a small batch of the 10 most recently created items, from the latest to the oldest. - The number of returned items can be adjusted with the `limit` parameter (implicit default is 10). - If the value in `PinResults.count` is bigger than the length of `PinResults.results`, the client can infer there are more results that can be queried. - To read more items, pass the `before` filter with the timestamp from `PinStatus.created` found in the oldest item in the current batch of results. Repeat to read all results. - Returned results can be fine-tuned by applying optional `after`, `cid`, `name`, `status`, or `meta` filters. > **Note**: pagination by the `created` timestamp requires each value to be globally unique. Any future considerations to add support for bulk creation must account for this. + * + * API version: 0.1.1 + * Generated by: OpenAPI Generator (https://openapi-generator.tech) + */ + +package openapi + +import ( + "encoding/json" +) + +// FailureError struct for FailureError +type FailureError struct { + // Mandatory string identifying the type of error + Reason string `json:"reason"` + // Optional, longer description of the error; may include UUID of transaction for support, links to documentation etc + Details *string `json:"details,omitempty"` +} + +// NewFailureError instantiates a new FailureError object +// This constructor will assign default values to properties that have it defined, +// and makes sure properties required by API are set, but the set of arguments +// will change when the set of required properties is changed +func NewFailureError(reason string) *FailureError { + this := FailureError{} + this.Reason = reason + return &this +} + +// NewFailureErrorWithDefaults instantiates a new FailureError object +// This constructor will only assign default values to properties that have it defined, +// but it doesn't guarantee that properties required by API are set +func NewFailureErrorWithDefaults() *FailureError { + this := FailureError{} + return &this +} + +// GetReason returns the Reason field value +func (o *FailureError) GetReason() string { + if o == nil { + var ret string + return ret + } + + return o.Reason +} + +// GetReasonOk returns a tuple with the Reason field value +// and a boolean to check if the value has been set. +func (o *FailureError) GetReasonOk() (*string, bool) { + if o == nil { + return nil, false + } + return &o.Reason, true +} + +// SetReason sets field value +func (o *FailureError) SetReason(v string) { + o.Reason = v +} + +// GetDetails returns the Details field value if set, zero value otherwise. +func (o *FailureError) GetDetails() string { + if o == nil || o.Details == nil { + var ret string + return ret + } + return *o.Details +} + +// GetDetailsOk returns a tuple with the Details field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *FailureError) GetDetailsOk() (*string, bool) { + if o == nil || o.Details == nil { + return nil, false + } + return o.Details, true +} + +// HasDetails returns a boolean if a field has been set. +func (o *FailureError) HasDetails() bool { + if o != nil && o.Details != nil { + return true + } + + return false +} + +// SetDetails gets a reference to the given string and assigns it to the Details field. +func (o *FailureError) SetDetails(v string) { + o.Details = &v +} + +func (o FailureError) MarshalJSON() ([]byte, error) { + toSerialize := map[string]interface{}{} + if true { + toSerialize["reason"] = o.Reason + } + if o.Details != nil { + toSerialize["details"] = o.Details + } + return json.Marshal(toSerialize) +} + +type NullableFailureError struct { + value *FailureError + isSet bool +} + +func (v NullableFailureError) Get() *FailureError { + return v.value +} + +func (v *NullableFailureError) Set(val *FailureError) { + v.value = val + v.isSet = true +} + +func (v NullableFailureError) IsSet() bool { + return v.isSet +} + +func (v *NullableFailureError) Unset() { + v.value = nil + v.isSet = false +} + +func NewNullableFailureError(val *FailureError) *NullableFailureError { + return &NullableFailureError{value: val, isSet: true} +} + +func (v NullableFailureError) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +func (v *NullableFailureError) UnmarshalJSON(src []byte) error { + v.isSet = true + return json.Unmarshal(src, &v.value) +} diff --git a/pinning/remote/client/openapi/model_pin.go b/pinning/remote/client/openapi/model_pin.go new file mode 100644 index 0000000000..0152d43bbf --- /dev/null +++ b/pinning/remote/client/openapi/model_pin.go @@ -0,0 +1,217 @@ +/* + * IPFS Pinning Service API + * + * ## About this spec The IPFS Pinning Service API is intended to be an implementation-agnostic API: - For use and implementation by pinning service providers - For use in client mode by IPFS nodes and GUI-based applications > **Note**: while ready for implementation, this spec is still a work in progress! 🏗️ **Your input and feedback are welcome and valuable as we develop this API spec. Please join the design discussion at [github.com/ipfs/pinning-services-api-spec](https://github.com/ipfs/pinning-services-api-spec).** # Schemas This section describes the most important object types and conventions. A full list of fields and schemas can be found in the `schemas` section of the [YAML file](https://github.com/ipfs/pinning-services-api-spec/blob/master/ipfs-pinning-service.yaml). ## Identifiers ### cid [Content Identifier (CID)](https://docs.ipfs.io/concepts/content-addressing/) points at the root of a DAG that is pinned recursively. ### requestid Unique identifier of a pin request. When a pin is created, the service responds with unique `requestid` that can be later used for pin removal. When the same `cid` is pinned again, a different `requestid` is returned to differentiate between those pin requests. Service implementation should use UUID, `hash(accessToken,Pin,PinStatus.created)`, or any other opaque identifier that provides equally strong protection against race conditions. ## Objects ### Pin object ![pin object](https://bafybeideck2fchyxna4wqwc2mo67yriokehw3yujboc5redjdaajrk2fjq.ipfs.dweb.link/pin.png) The `Pin` object is a representation of a pin request. It includes the `cid` of data to be pinned, as well as optional metadata in `name`, `origins`, and `meta`. ### Pin status response ![pin status response object](https://bafybeideck2fchyxna4wqwc2mo67yriokehw3yujboc5redjdaajrk2fjq.ipfs.dweb.link/pinstatus.png) The `PinStatus` object is a representation of the current state of a pinning operation. It includes the original `pin` object, along with the current `status` and globally unique `requestid` of the entire pinning request, which can be used for future status checks and management. Addresses in the `delegates` array are peers delegated by the pinning service for facilitating direct file transfers (more details in the provider hints section). Any additional vendor-specific information is returned in optional `info`. ## The pin lifecycle ![pinning service objects and lifecycle](https://bafybeideck2fchyxna4wqwc2mo67yriokehw3yujboc5redjdaajrk2fjq.ipfs.dweb.link/lifecycle.png) ### Creating a new pin object The user sends a `Pin` object to `POST /pins` and receives a `PinStatus` response: - `requestid` in `PinStatus` is the identifier of the pin operation, which can can be used for checking status, and removing the pin in the future - `status` in `PinStatus` indicates the current state of a pin ### Checking status of in-progress pinning `status` (in `PinStatus`) may indicate a pending state (`queued` or `pinning`). This means the data behind `Pin.cid` was not found on the pinning service and is being fetched from the IPFS network at large, which may take time. In this case, the user can periodically check pinning progress via `GET /pins/{requestid}` until pinning is successful, or the user decides to remove the pending pin. ### Replacing an existing pin object The user can replace an existing pin object via `POST /pins/{requestid}`. This is a shortcut for removing a pin object identified by `requestid` and creating a new one in a single API call that protects against undesired garbage collection of blocks common to both pins. Useful when updating a pin representing a huge dataset where most of blocks did not change. The new pin object `requestid` is returned in the `PinStatus` response. The old pin object is deleted automatically. ### Removing a pin object A pin object can be removed via `DELETE /pins/{requestid}`. ## Provider hints Pinning of new data can be accelerated by providing a list of known data sources in `Pin.origins`, and connecting at least one of them to pinning service nodes at `PinStatus.delegates`. The most common scenario is a client putting its own IPFS node's multiaddrs in `Pin.origins`, and then directly connecting to every multiaddr returned by a pinning service in `PinStatus.delegates` to initiate transfer. This ensures data transfer starts immediately (without waiting for provider discovery over DHT), and direct dial from a client works around peer routing issues in restrictive network topologies such as NATs. ## Custom metadata Pinning services are encouraged to add support for additional features by leveraging the optional `Pin.meta` and `PinStatus.info` fields. While these attributes can be application- or vendor-specific, we encourage the community at large to leverage these attributes as a sandbox to come up with conventions that could become part of future revisions of this API. ### Pin metadata String keys and values passed in `Pin.meta` are persisted with the pin object. Potential uses: - `Pin.meta[app_id]`: Attaching a unique identifier to pins created by an app enables filtering pins per app via `?meta={\"app_id\":}` - `Pin.meta[vendor_policy]`: Vendor-specific policy (for example: which region to use, how many copies to keep) Note that it is OK for a client to omit or ignore these optional attributes; doing so should not impact the basic pinning functionality. ### Pin status info Additional `PinStatus.info` can be returned by pinning service. Potential uses: - `PinStatus.info[status_details]`: more info about the current status (queue position, percentage of transferred data, summary of where data is stored, etc); when `PinStatus.status=failed`, it could provide a reason why a pin operation failed (e.g. lack of funds, DAG too big, etc.) - `PinStatus.info[dag_size]`: the size of pinned data, along with DAG overhead - `PinStatus.info[raw_size]`: the size of data without DAG overhead (eg. unixfs) - `PinStatus.info[pinned_until]`: if vendor supports time-bound pins, this could indicate when the pin will expire # Pagination and filtering Pin objects can be listed by executing `GET /pins` with optional parameters: - When no filters are provided, the endpoint will return a small batch of the 10 most recently created items, from the latest to the oldest. - The number of returned items can be adjusted with the `limit` parameter (implicit default is 10). - If the value in `PinResults.count` is bigger than the length of `PinResults.results`, the client can infer there are more results that can be queried. - To read more items, pass the `before` filter with the timestamp from `PinStatus.created` found in the oldest item in the current batch of results. Repeat to read all results. - Returned results can be fine-tuned by applying optional `after`, `cid`, `name`, `status`, or `meta` filters. > **Note**: pagination by the `created` timestamp requires each value to be globally unique. Any future considerations to add support for bulk creation must account for this. + * + * API version: 0.1.1 + * Generated by: OpenAPI Generator (https://openapi-generator.tech) + */ + +package openapi + +import ( + "encoding/json" +) + +// Pin Pin object +type Pin struct { + // Content Identifier (CID) to be pinned recursively + Cid string `json:"cid"` + // Optional name for pinned data; can be used for lookups later + Name *string `json:"name,omitempty"` + // Optional list of multiaddrs known to provide the data + Origins *[]string `json:"origins,omitempty"` + // Optional metadata for pin object + Meta *map[string]string `json:"meta,omitempty"` +} + +// NewPin instantiates a new Pin object +// This constructor will assign default values to properties that have it defined, +// and makes sure properties required by API are set, but the set of arguments +// will change when the set of required properties is changed +func NewPin(cid string) *Pin { + this := Pin{} + this.Cid = cid + return &this +} + +// NewPinWithDefaults instantiates a new Pin object +// This constructor will only assign default values to properties that have it defined, +// but it doesn't guarantee that properties required by API are set +func NewPinWithDefaults() *Pin { + this := Pin{} + return &this +} + +// GetCid returns the Cid field value +func (o *Pin) GetCid() string { + if o == nil { + var ret string + return ret + } + + return o.Cid +} + +// GetCidOk returns a tuple with the Cid field value +// and a boolean to check if the value has been set. +func (o *Pin) GetCidOk() (*string, bool) { + if o == nil { + return nil, false + } + return &o.Cid, true +} + +// SetCid sets field value +func (o *Pin) SetCid(v string) { + o.Cid = v +} + +// GetName returns the Name field value if set, zero value otherwise. +func (o *Pin) GetName() string { + if o == nil || o.Name == nil { + var ret string + return ret + } + return *o.Name +} + +// GetNameOk returns a tuple with the Name field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *Pin) GetNameOk() (*string, bool) { + if o == nil || o.Name == nil { + return nil, false + } + return o.Name, true +} + +// HasName returns a boolean if a field has been set. +func (o *Pin) HasName() bool { + if o != nil && o.Name != nil { + return true + } + + return false +} + +// SetName gets a reference to the given string and assigns it to the Name field. +func (o *Pin) SetName(v string) { + o.Name = &v +} + +// GetOrigins returns the Origins field value if set, zero value otherwise. +func (o *Pin) GetOrigins() []string { + if o == nil || o.Origins == nil { + var ret []string + return ret + } + return *o.Origins +} + +// GetOriginsOk returns a tuple with the Origins field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *Pin) GetOriginsOk() (*[]string, bool) { + if o == nil || o.Origins == nil { + return nil, false + } + return o.Origins, true +} + +// HasOrigins returns a boolean if a field has been set. +func (o *Pin) HasOrigins() bool { + if o != nil && o.Origins != nil { + return true + } + + return false +} + +// SetOrigins gets a reference to the given []string and assigns it to the Origins field. +func (o *Pin) SetOrigins(v []string) { + o.Origins = &v +} + +// GetMeta returns the Meta field value if set, zero value otherwise. +func (o *Pin) GetMeta() map[string]string { + if o == nil || o.Meta == nil { + var ret map[string]string + return ret + } + return *o.Meta +} + +// GetMetaOk returns a tuple with the Meta field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *Pin) GetMetaOk() (*map[string]string, bool) { + if o == nil || o.Meta == nil { + return nil, false + } + return o.Meta, true +} + +// HasMeta returns a boolean if a field has been set. +func (o *Pin) HasMeta() bool { + if o != nil && o.Meta != nil { + return true + } + + return false +} + +// SetMeta gets a reference to the given map[string]string and assigns it to the Meta field. +func (o *Pin) SetMeta(v map[string]string) { + o.Meta = &v +} + +func (o Pin) MarshalJSON() ([]byte, error) { + toSerialize := map[string]interface{}{} + if true { + toSerialize["cid"] = o.Cid + } + if o.Name != nil { + toSerialize["name"] = o.Name + } + if o.Origins != nil { + toSerialize["origins"] = o.Origins + } + if o.Meta != nil { + toSerialize["meta"] = o.Meta + } + return json.Marshal(toSerialize) +} + +type NullablePin struct { + value *Pin + isSet bool +} + +func (v NullablePin) Get() *Pin { + return v.value +} + +func (v *NullablePin) Set(val *Pin) { + v.value = val + v.isSet = true +} + +func (v NullablePin) IsSet() bool { + return v.isSet +} + +func (v *NullablePin) Unset() { + v.value = nil + v.isSet = false +} + +func NewNullablePin(val *Pin) *NullablePin { + return &NullablePin{value: val, isSet: true} +} + +func (v NullablePin) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +func (v *NullablePin) UnmarshalJSON(src []byte) error { + v.isSet = true + return json.Unmarshal(src, &v.value) +} diff --git a/pinning/remote/client/openapi/model_pin_results.go b/pinning/remote/client/openapi/model_pin_results.go new file mode 100644 index 0000000000..eacb5e0212 --- /dev/null +++ b/pinning/remote/client/openapi/model_pin_results.go @@ -0,0 +1,136 @@ +/* + * IPFS Pinning Service API + * + * ## About this spec The IPFS Pinning Service API is intended to be an implementation-agnostic API: - For use and implementation by pinning service providers - For use in client mode by IPFS nodes and GUI-based applications > **Note**: while ready for implementation, this spec is still a work in progress! 🏗️ **Your input and feedback are welcome and valuable as we develop this API spec. Please join the design discussion at [github.com/ipfs/pinning-services-api-spec](https://github.com/ipfs/pinning-services-api-spec).** # Schemas This section describes the most important object types and conventions. A full list of fields and schemas can be found in the `schemas` section of the [YAML file](https://github.com/ipfs/pinning-services-api-spec/blob/master/ipfs-pinning-service.yaml). ## Identifiers ### cid [Content Identifier (CID)](https://docs.ipfs.io/concepts/content-addressing/) points at the root of a DAG that is pinned recursively. ### requestid Unique identifier of a pin request. When a pin is created, the service responds with unique `requestid` that can be later used for pin removal. When the same `cid` is pinned again, a different `requestid` is returned to differentiate between those pin requests. Service implementation should use UUID, `hash(accessToken,Pin,PinStatus.created)`, or any other opaque identifier that provides equally strong protection against race conditions. ## Objects ### Pin object ![pin object](https://bafybeideck2fchyxna4wqwc2mo67yriokehw3yujboc5redjdaajrk2fjq.ipfs.dweb.link/pin.png) The `Pin` object is a representation of a pin request. It includes the `cid` of data to be pinned, as well as optional metadata in `name`, `origins`, and `meta`. ### Pin status response ![pin status response object](https://bafybeideck2fchyxna4wqwc2mo67yriokehw3yujboc5redjdaajrk2fjq.ipfs.dweb.link/pinstatus.png) The `PinStatus` object is a representation of the current state of a pinning operation. It includes the original `pin` object, along with the current `status` and globally unique `requestid` of the entire pinning request, which can be used for future status checks and management. Addresses in the `delegates` array are peers delegated by the pinning service for facilitating direct file transfers (more details in the provider hints section). Any additional vendor-specific information is returned in optional `info`. ## The pin lifecycle ![pinning service objects and lifecycle](https://bafybeideck2fchyxna4wqwc2mo67yriokehw3yujboc5redjdaajrk2fjq.ipfs.dweb.link/lifecycle.png) ### Creating a new pin object The user sends a `Pin` object to `POST /pins` and receives a `PinStatus` response: - `requestid` in `PinStatus` is the identifier of the pin operation, which can can be used for checking status, and removing the pin in the future - `status` in `PinStatus` indicates the current state of a pin ### Checking status of in-progress pinning `status` (in `PinStatus`) may indicate a pending state (`queued` or `pinning`). This means the data behind `Pin.cid` was not found on the pinning service and is being fetched from the IPFS network at large, which may take time. In this case, the user can periodically check pinning progress via `GET /pins/{requestid}` until pinning is successful, or the user decides to remove the pending pin. ### Replacing an existing pin object The user can replace an existing pin object via `POST /pins/{requestid}`. This is a shortcut for removing a pin object identified by `requestid` and creating a new one in a single API call that protects against undesired garbage collection of blocks common to both pins. Useful when updating a pin representing a huge dataset where most of blocks did not change. The new pin object `requestid` is returned in the `PinStatus` response. The old pin object is deleted automatically. ### Removing a pin object A pin object can be removed via `DELETE /pins/{requestid}`. ## Provider hints Pinning of new data can be accelerated by providing a list of known data sources in `Pin.origins`, and connecting at least one of them to pinning service nodes at `PinStatus.delegates`. The most common scenario is a client putting its own IPFS node's multiaddrs in `Pin.origins`, and then directly connecting to every multiaddr returned by a pinning service in `PinStatus.delegates` to initiate transfer. This ensures data transfer starts immediately (without waiting for provider discovery over DHT), and direct dial from a client works around peer routing issues in restrictive network topologies such as NATs. ## Custom metadata Pinning services are encouraged to add support for additional features by leveraging the optional `Pin.meta` and `PinStatus.info` fields. While these attributes can be application- or vendor-specific, we encourage the community at large to leverage these attributes as a sandbox to come up with conventions that could become part of future revisions of this API. ### Pin metadata String keys and values passed in `Pin.meta` are persisted with the pin object. Potential uses: - `Pin.meta[app_id]`: Attaching a unique identifier to pins created by an app enables filtering pins per app via `?meta={\"app_id\":}` - `Pin.meta[vendor_policy]`: Vendor-specific policy (for example: which region to use, how many copies to keep) Note that it is OK for a client to omit or ignore these optional attributes; doing so should not impact the basic pinning functionality. ### Pin status info Additional `PinStatus.info` can be returned by pinning service. Potential uses: - `PinStatus.info[status_details]`: more info about the current status (queue position, percentage of transferred data, summary of where data is stored, etc); when `PinStatus.status=failed`, it could provide a reason why a pin operation failed (e.g. lack of funds, DAG too big, etc.) - `PinStatus.info[dag_size]`: the size of pinned data, along with DAG overhead - `PinStatus.info[raw_size]`: the size of data without DAG overhead (eg. unixfs) - `PinStatus.info[pinned_until]`: if vendor supports time-bound pins, this could indicate when the pin will expire # Pagination and filtering Pin objects can be listed by executing `GET /pins` with optional parameters: - When no filters are provided, the endpoint will return a small batch of the 10 most recently created items, from the latest to the oldest. - The number of returned items can be adjusted with the `limit` parameter (implicit default is 10). - If the value in `PinResults.count` is bigger than the length of `PinResults.results`, the client can infer there are more results that can be queried. - To read more items, pass the `before` filter with the timestamp from `PinStatus.created` found in the oldest item in the current batch of results. Repeat to read all results. - Returned results can be fine-tuned by applying optional `after`, `cid`, `name`, `status`, or `meta` filters. > **Note**: pagination by the `created` timestamp requires each value to be globally unique. Any future considerations to add support for bulk creation must account for this. + * + * API version: 0.1.1 + * Generated by: OpenAPI Generator (https://openapi-generator.tech) + */ + +package openapi + +import ( + "encoding/json" +) + +// PinResults Response used for listing pin objects matching request +type PinResults struct { + // The total number of pin objects that exist for passed query filters + Count int32 `json:"count"` + // An array of PinStatus results + Results []PinStatus `json:"results"` +} + +// NewPinResults instantiates a new PinResults object +// This constructor will assign default values to properties that have it defined, +// and makes sure properties required by API are set, but the set of arguments +// will change when the set of required properties is changed +func NewPinResults(count int32, results []PinStatus) *PinResults { + this := PinResults{} + this.Count = count + this.Results = results + return &this +} + +// NewPinResultsWithDefaults instantiates a new PinResults object +// This constructor will only assign default values to properties that have it defined, +// but it doesn't guarantee that properties required by API are set +func NewPinResultsWithDefaults() *PinResults { + this := PinResults{} + return &this +} + +// GetCount returns the Count field value +func (o *PinResults) GetCount() int32 { + if o == nil { + var ret int32 + return ret + } + + return o.Count +} + +// GetCountOk returns a tuple with the Count field value +// and a boolean to check if the value has been set. +func (o *PinResults) GetCountOk() (*int32, bool) { + if o == nil { + return nil, false + } + return &o.Count, true +} + +// SetCount sets field value +func (o *PinResults) SetCount(v int32) { + o.Count = v +} + +// GetResults returns the Results field value +func (o *PinResults) GetResults() []PinStatus { + if o == nil { + var ret []PinStatus + return ret + } + + return o.Results +} + +// GetResultsOk returns a tuple with the Results field value +// and a boolean to check if the value has been set. +func (o *PinResults) GetResultsOk() (*[]PinStatus, bool) { + if o == nil { + return nil, false + } + return &o.Results, true +} + +// SetResults sets field value +func (o *PinResults) SetResults(v []PinStatus) { + o.Results = v +} + +func (o PinResults) MarshalJSON() ([]byte, error) { + toSerialize := map[string]interface{}{} + if true { + toSerialize["count"] = o.Count + } + if true { + toSerialize["results"] = o.Results + } + return json.Marshal(toSerialize) +} + +type NullablePinResults struct { + value *PinResults + isSet bool +} + +func (v NullablePinResults) Get() *PinResults { + return v.value +} + +func (v *NullablePinResults) Set(val *PinResults) { + v.value = val + v.isSet = true +} + +func (v NullablePinResults) IsSet() bool { + return v.isSet +} + +func (v *NullablePinResults) Unset() { + v.value = nil + v.isSet = false +} + +func NewNullablePinResults(val *PinResults) *NullablePinResults { + return &NullablePinResults{value: val, isSet: true} +} + +func (v NullablePinResults) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +func (v *NullablePinResults) UnmarshalJSON(src []byte) error { + v.isSet = true + return json.Unmarshal(src, &v.value) +} diff --git a/pinning/remote/client/openapi/model_pin_status.go b/pinning/remote/client/openapi/model_pin_status.go new file mode 100644 index 0000000000..0f44e62c2d --- /dev/null +++ b/pinning/remote/client/openapi/model_pin_status.go @@ -0,0 +1,262 @@ +/* + * IPFS Pinning Service API + * + * ## About this spec The IPFS Pinning Service API is intended to be an implementation-agnostic API: - For use and implementation by pinning service providers - For use in client mode by IPFS nodes and GUI-based applications > **Note**: while ready for implementation, this spec is still a work in progress! 🏗️ **Your input and feedback are welcome and valuable as we develop this API spec. Please join the design discussion at [github.com/ipfs/pinning-services-api-spec](https://github.com/ipfs/pinning-services-api-spec).** # Schemas This section describes the most important object types and conventions. A full list of fields and schemas can be found in the `schemas` section of the [YAML file](https://github.com/ipfs/pinning-services-api-spec/blob/master/ipfs-pinning-service.yaml). ## Identifiers ### cid [Content Identifier (CID)](https://docs.ipfs.io/concepts/content-addressing/) points at the root of a DAG that is pinned recursively. ### requestid Unique identifier of a pin request. When a pin is created, the service responds with unique `requestid` that can be later used for pin removal. When the same `cid` is pinned again, a different `requestid` is returned to differentiate between those pin requests. Service implementation should use UUID, `hash(accessToken,Pin,PinStatus.created)`, or any other opaque identifier that provides equally strong protection against race conditions. ## Objects ### Pin object ![pin object](https://bafybeideck2fchyxna4wqwc2mo67yriokehw3yujboc5redjdaajrk2fjq.ipfs.dweb.link/pin.png) The `Pin` object is a representation of a pin request. It includes the `cid` of data to be pinned, as well as optional metadata in `name`, `origins`, and `meta`. ### Pin status response ![pin status response object](https://bafybeideck2fchyxna4wqwc2mo67yriokehw3yujboc5redjdaajrk2fjq.ipfs.dweb.link/pinstatus.png) The `PinStatus` object is a representation of the current state of a pinning operation. It includes the original `pin` object, along with the current `status` and globally unique `requestid` of the entire pinning request, which can be used for future status checks and management. Addresses in the `delegates` array are peers delegated by the pinning service for facilitating direct file transfers (more details in the provider hints section). Any additional vendor-specific information is returned in optional `info`. ## The pin lifecycle ![pinning service objects and lifecycle](https://bafybeideck2fchyxna4wqwc2mo67yriokehw3yujboc5redjdaajrk2fjq.ipfs.dweb.link/lifecycle.png) ### Creating a new pin object The user sends a `Pin` object to `POST /pins` and receives a `PinStatus` response: - `requestid` in `PinStatus` is the identifier of the pin operation, which can can be used for checking status, and removing the pin in the future - `status` in `PinStatus` indicates the current state of a pin ### Checking status of in-progress pinning `status` (in `PinStatus`) may indicate a pending state (`queued` or `pinning`). This means the data behind `Pin.cid` was not found on the pinning service and is being fetched from the IPFS network at large, which may take time. In this case, the user can periodically check pinning progress via `GET /pins/{requestid}` until pinning is successful, or the user decides to remove the pending pin. ### Replacing an existing pin object The user can replace an existing pin object via `POST /pins/{requestid}`. This is a shortcut for removing a pin object identified by `requestid` and creating a new one in a single API call that protects against undesired garbage collection of blocks common to both pins. Useful when updating a pin representing a huge dataset where most of blocks did not change. The new pin object `requestid` is returned in the `PinStatus` response. The old pin object is deleted automatically. ### Removing a pin object A pin object can be removed via `DELETE /pins/{requestid}`. ## Provider hints Pinning of new data can be accelerated by providing a list of known data sources in `Pin.origins`, and connecting at least one of them to pinning service nodes at `PinStatus.delegates`. The most common scenario is a client putting its own IPFS node's multiaddrs in `Pin.origins`, and then directly connecting to every multiaddr returned by a pinning service in `PinStatus.delegates` to initiate transfer. This ensures data transfer starts immediately (without waiting for provider discovery over DHT), and direct dial from a client works around peer routing issues in restrictive network topologies such as NATs. ## Custom metadata Pinning services are encouraged to add support for additional features by leveraging the optional `Pin.meta` and `PinStatus.info` fields. While these attributes can be application- or vendor-specific, we encourage the community at large to leverage these attributes as a sandbox to come up with conventions that could become part of future revisions of this API. ### Pin metadata String keys and values passed in `Pin.meta` are persisted with the pin object. Potential uses: - `Pin.meta[app_id]`: Attaching a unique identifier to pins created by an app enables filtering pins per app via `?meta={\"app_id\":}` - `Pin.meta[vendor_policy]`: Vendor-specific policy (for example: which region to use, how many copies to keep) Note that it is OK for a client to omit or ignore these optional attributes; doing so should not impact the basic pinning functionality. ### Pin status info Additional `PinStatus.info` can be returned by pinning service. Potential uses: - `PinStatus.info[status_details]`: more info about the current status (queue position, percentage of transferred data, summary of where data is stored, etc); when `PinStatus.status=failed`, it could provide a reason why a pin operation failed (e.g. lack of funds, DAG too big, etc.) - `PinStatus.info[dag_size]`: the size of pinned data, along with DAG overhead - `PinStatus.info[raw_size]`: the size of data without DAG overhead (eg. unixfs) - `PinStatus.info[pinned_until]`: if vendor supports time-bound pins, this could indicate when the pin will expire # Pagination and filtering Pin objects can be listed by executing `GET /pins` with optional parameters: - When no filters are provided, the endpoint will return a small batch of the 10 most recently created items, from the latest to the oldest. - The number of returned items can be adjusted with the `limit` parameter (implicit default is 10). - If the value in `PinResults.count` is bigger than the length of `PinResults.results`, the client can infer there are more results that can be queried. - To read more items, pass the `before` filter with the timestamp from `PinStatus.created` found in the oldest item in the current batch of results. Repeat to read all results. - Returned results can be fine-tuned by applying optional `after`, `cid`, `name`, `status`, or `meta` filters. > **Note**: pagination by the `created` timestamp requires each value to be globally unique. Any future considerations to add support for bulk creation must account for this. + * + * API version: 0.1.1 + * Generated by: OpenAPI Generator (https://openapi-generator.tech) + */ + +package openapi + +import ( + "encoding/json" + "time" +) + +// PinStatus Pin object with status +type PinStatus struct { + // Globally unique identifier of the pin request; can be used to check the status of ongoing pinning, or pin removal + Requestid string `json:"requestid"` + Status Status `json:"status"` + // Immutable timestamp indicating when a pin request entered a pinning service; can be used for filtering results and pagination + Created time.Time `json:"created"` + Pin Pin `json:"pin"` + // List of multiaddrs designated by pinning service for transferring any new data from external peers + Delegates []string `json:"delegates"` + // Optional info for PinStatus response + Info *map[string]string `json:"info,omitempty"` +} + +// NewPinStatus instantiates a new PinStatus object +// This constructor will assign default values to properties that have it defined, +// and makes sure properties required by API are set, but the set of arguments +// will change when the set of required properties is changed +func NewPinStatus(requestid string, status Status, created time.Time, pin Pin, delegates []string) *PinStatus { + this := PinStatus{} + this.Requestid = requestid + this.Status = status + this.Created = created + this.Pin = pin + this.Delegates = delegates + return &this +} + +// NewPinStatusWithDefaults instantiates a new PinStatus object +// This constructor will only assign default values to properties that have it defined, +// but it doesn't guarantee that properties required by API are set +func NewPinStatusWithDefaults() *PinStatus { + this := PinStatus{} + return &this +} + +// GetRequestid returns the Requestid field value +func (o *PinStatus) GetRequestid() string { + if o == nil { + var ret string + return ret + } + + return o.Requestid +} + +// GetRequestidOk returns a tuple with the Requestid field value +// and a boolean to check if the value has been set. +func (o *PinStatus) GetRequestidOk() (*string, bool) { + if o == nil { + return nil, false + } + return &o.Requestid, true +} + +// SetRequestid sets field value +func (o *PinStatus) SetRequestid(v string) { + o.Requestid = v +} + +// GetStatus returns the Status field value +func (o *PinStatus) GetStatus() Status { + if o == nil { + var ret Status + return ret + } + + return o.Status +} + +// GetStatusOk returns a tuple with the Status field value +// and a boolean to check if the value has been set. +func (o *PinStatus) GetStatusOk() (*Status, bool) { + if o == nil { + return nil, false + } + return &o.Status, true +} + +// SetStatus sets field value +func (o *PinStatus) SetStatus(v Status) { + o.Status = v +} + +// GetCreated returns the Created field value +func (o *PinStatus) GetCreated() time.Time { + if o == nil { + var ret time.Time + return ret + } + + return o.Created +} + +// GetCreatedOk returns a tuple with the Created field value +// and a boolean to check if the value has been set. +func (o *PinStatus) GetCreatedOk() (*time.Time, bool) { + if o == nil { + return nil, false + } + return &o.Created, true +} + +// SetCreated sets field value +func (o *PinStatus) SetCreated(v time.Time) { + o.Created = v +} + +// GetPin returns the Pin field value +func (o *PinStatus) GetPin() Pin { + if o == nil { + var ret Pin + return ret + } + + return o.Pin +} + +// GetPinOk returns a tuple with the Pin field value +// and a boolean to check if the value has been set. +func (o *PinStatus) GetPinOk() (*Pin, bool) { + if o == nil { + return nil, false + } + return &o.Pin, true +} + +// SetPin sets field value +func (o *PinStatus) SetPin(v Pin) { + o.Pin = v +} + +// GetDelegates returns the Delegates field value +func (o *PinStatus) GetDelegates() []string { + if o == nil { + var ret []string + return ret + } + + return o.Delegates +} + +// GetDelegatesOk returns a tuple with the Delegates field value +// and a boolean to check if the value has been set. +func (o *PinStatus) GetDelegatesOk() (*[]string, bool) { + if o == nil { + return nil, false + } + return &o.Delegates, true +} + +// SetDelegates sets field value +func (o *PinStatus) SetDelegates(v []string) { + o.Delegates = v +} + +// GetInfo returns the Info field value if set, zero value otherwise. +func (o *PinStatus) GetInfo() map[string]string { + if o == nil || o.Info == nil { + var ret map[string]string + return ret + } + return *o.Info +} + +// GetInfoOk returns a tuple with the Info field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *PinStatus) GetInfoOk() (*map[string]string, bool) { + if o == nil || o.Info == nil { + return nil, false + } + return o.Info, true +} + +// HasInfo returns a boolean if a field has been set. +func (o *PinStatus) HasInfo() bool { + if o != nil && o.Info != nil { + return true + } + + return false +} + +// SetInfo gets a reference to the given map[string]string and assigns it to the Info field. +func (o *PinStatus) SetInfo(v map[string]string) { + o.Info = &v +} + +func (o PinStatus) MarshalJSON() ([]byte, error) { + toSerialize := map[string]interface{}{} + if true { + toSerialize["requestid"] = o.Requestid + } + if true { + toSerialize["status"] = o.Status + } + if true { + toSerialize["created"] = o.Created + } + if true { + toSerialize["pin"] = o.Pin + } + if true { + toSerialize["delegates"] = o.Delegates + } + if o.Info != nil { + toSerialize["info"] = o.Info + } + return json.Marshal(toSerialize) +} + +type NullablePinStatus struct { + value *PinStatus + isSet bool +} + +func (v NullablePinStatus) Get() *PinStatus { + return v.value +} + +func (v *NullablePinStatus) Set(val *PinStatus) { + v.value = val + v.isSet = true +} + +func (v NullablePinStatus) IsSet() bool { + return v.isSet +} + +func (v *NullablePinStatus) Unset() { + v.value = nil + v.isSet = false +} + +func NewNullablePinStatus(val *PinStatus) *NullablePinStatus { + return &NullablePinStatus{value: val, isSet: true} +} + +func (v NullablePinStatus) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +func (v *NullablePinStatus) UnmarshalJSON(src []byte) error { + v.isSet = true + return json.Unmarshal(src, &v.value) +} diff --git a/pinning/remote/client/openapi/model_status.go b/pinning/remote/client/openapi/model_status.go new file mode 100644 index 0000000000..56944819fa --- /dev/null +++ b/pinning/remote/client/openapi/model_status.go @@ -0,0 +1,84 @@ +/* + * IPFS Pinning Service API + * + * ## About this spec The IPFS Pinning Service API is intended to be an implementation-agnostic API: - For use and implementation by pinning service providers - For use in client mode by IPFS nodes and GUI-based applications > **Note**: while ready for implementation, this spec is still a work in progress! 🏗️ **Your input and feedback are welcome and valuable as we develop this API spec. Please join the design discussion at [github.com/ipfs/pinning-services-api-spec](https://github.com/ipfs/pinning-services-api-spec).** # Schemas This section describes the most important object types and conventions. A full list of fields and schemas can be found in the `schemas` section of the [YAML file](https://github.com/ipfs/pinning-services-api-spec/blob/master/ipfs-pinning-service.yaml). ## Identifiers ### cid [Content Identifier (CID)](https://docs.ipfs.io/concepts/content-addressing/) points at the root of a DAG that is pinned recursively. ### requestid Unique identifier of a pin request. When a pin is created, the service responds with unique `requestid` that can be later used for pin removal. When the same `cid` is pinned again, a different `requestid` is returned to differentiate between those pin requests. Service implementation should use UUID, `hash(accessToken,Pin,PinStatus.created)`, or any other opaque identifier that provides equally strong protection against race conditions. ## Objects ### Pin object ![pin object](https://bafybeideck2fchyxna4wqwc2mo67yriokehw3yujboc5redjdaajrk2fjq.ipfs.dweb.link/pin.png) The `Pin` object is a representation of a pin request. It includes the `cid` of data to be pinned, as well as optional metadata in `name`, `origins`, and `meta`. ### Pin status response ![pin status response object](https://bafybeideck2fchyxna4wqwc2mo67yriokehw3yujboc5redjdaajrk2fjq.ipfs.dweb.link/pinstatus.png) The `PinStatus` object is a representation of the current state of a pinning operation. It includes the original `pin` object, along with the current `status` and globally unique `requestid` of the entire pinning request, which can be used for future status checks and management. Addresses in the `delegates` array are peers delegated by the pinning service for facilitating direct file transfers (more details in the provider hints section). Any additional vendor-specific information is returned in optional `info`. ## The pin lifecycle ![pinning service objects and lifecycle](https://bafybeideck2fchyxna4wqwc2mo67yriokehw3yujboc5redjdaajrk2fjq.ipfs.dweb.link/lifecycle.png) ### Creating a new pin object The user sends a `Pin` object to `POST /pins` and receives a `PinStatus` response: - `requestid` in `PinStatus` is the identifier of the pin operation, which can can be used for checking status, and removing the pin in the future - `status` in `PinStatus` indicates the current state of a pin ### Checking status of in-progress pinning `status` (in `PinStatus`) may indicate a pending state (`queued` or `pinning`). This means the data behind `Pin.cid` was not found on the pinning service and is being fetched from the IPFS network at large, which may take time. In this case, the user can periodically check pinning progress via `GET /pins/{requestid}` until pinning is successful, or the user decides to remove the pending pin. ### Replacing an existing pin object The user can replace an existing pin object via `POST /pins/{requestid}`. This is a shortcut for removing a pin object identified by `requestid` and creating a new one in a single API call that protects against undesired garbage collection of blocks common to both pins. Useful when updating a pin representing a huge dataset where most of blocks did not change. The new pin object `requestid` is returned in the `PinStatus` response. The old pin object is deleted automatically. ### Removing a pin object A pin object can be removed via `DELETE /pins/{requestid}`. ## Provider hints Pinning of new data can be accelerated by providing a list of known data sources in `Pin.origins`, and connecting at least one of them to pinning service nodes at `PinStatus.delegates`. The most common scenario is a client putting its own IPFS node's multiaddrs in `Pin.origins`, and then directly connecting to every multiaddr returned by a pinning service in `PinStatus.delegates` to initiate transfer. This ensures data transfer starts immediately (without waiting for provider discovery over DHT), and direct dial from a client works around peer routing issues in restrictive network topologies such as NATs. ## Custom metadata Pinning services are encouraged to add support for additional features by leveraging the optional `Pin.meta` and `PinStatus.info` fields. While these attributes can be application- or vendor-specific, we encourage the community at large to leverage these attributes as a sandbox to come up with conventions that could become part of future revisions of this API. ### Pin metadata String keys and values passed in `Pin.meta` are persisted with the pin object. Potential uses: - `Pin.meta[app_id]`: Attaching a unique identifier to pins created by an app enables filtering pins per app via `?meta={\"app_id\":}` - `Pin.meta[vendor_policy]`: Vendor-specific policy (for example: which region to use, how many copies to keep) Note that it is OK for a client to omit or ignore these optional attributes; doing so should not impact the basic pinning functionality. ### Pin status info Additional `PinStatus.info` can be returned by pinning service. Potential uses: - `PinStatus.info[status_details]`: more info about the current status (queue position, percentage of transferred data, summary of where data is stored, etc); when `PinStatus.status=failed`, it could provide a reason why a pin operation failed (e.g. lack of funds, DAG too big, etc.) - `PinStatus.info[dag_size]`: the size of pinned data, along with DAG overhead - `PinStatus.info[raw_size]`: the size of data without DAG overhead (eg. unixfs) - `PinStatus.info[pinned_until]`: if vendor supports time-bound pins, this could indicate when the pin will expire # Pagination and filtering Pin objects can be listed by executing `GET /pins` with optional parameters: - When no filters are provided, the endpoint will return a small batch of the 10 most recently created items, from the latest to the oldest. - The number of returned items can be adjusted with the `limit` parameter (implicit default is 10). - If the value in `PinResults.count` is bigger than the length of `PinResults.results`, the client can infer there are more results that can be queried. - To read more items, pass the `before` filter with the timestamp from `PinStatus.created` found in the oldest item in the current batch of results. Repeat to read all results. - Returned results can be fine-tuned by applying optional `after`, `cid`, `name`, `status`, or `meta` filters. > **Note**: pagination by the `created` timestamp requires each value to be globally unique. Any future considerations to add support for bulk creation must account for this. + * + * API version: 0.1.1 + * Generated by: OpenAPI Generator (https://openapi-generator.tech) + */ + +package openapi + +import ( + "encoding/json" + "fmt" +) + +// Status Status a pin object can have at a pinning service +type Status string + +// List of Status +const ( + QUEUED Status = "queued" + PINNING Status = "pinning" + PINNED Status = "pinned" + FAILED Status = "failed" +) + +func (v *Status) UnmarshalJSON(src []byte) error { + var value string + err := json.Unmarshal(src, &value) + if err != nil { + return err + } + enumTypeValue := Status(value) + for _, existing := range []Status{"queued", "pinning", "pinned", "failed"} { + if existing == enumTypeValue { + *v = enumTypeValue + return nil + } + } + + return fmt.Errorf("%+v is not a valid Status", value) +} + +// Ptr returns reference to Status value +func (v Status) Ptr() *Status { + return &v +} + +type NullableStatus struct { + value *Status + isSet bool +} + +func (v NullableStatus) Get() *Status { + return v.value +} + +func (v *NullableStatus) Set(val *Status) { + v.value = val + v.isSet = true +} + +func (v NullableStatus) IsSet() bool { + return v.isSet +} + +func (v *NullableStatus) Unset() { + v.value = nil + v.isSet = false +} + +func NewNullableStatus(val *Status) *NullableStatus { + return &NullableStatus{value: val, isSet: true} +} + +func (v NullableStatus) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +func (v *NullableStatus) UnmarshalJSON(src []byte) error { + v.isSet = true + return json.Unmarshal(src, &v.value) +} diff --git a/pinning/remote/client/openapi/response.go b/pinning/remote/client/openapi/response.go new file mode 100644 index 0000000000..8f9fb0b081 --- /dev/null +++ b/pinning/remote/client/openapi/response.go @@ -0,0 +1,46 @@ +/* + * IPFS Pinning Service API + * + * ## About this spec The IPFS Pinning Service API is intended to be an implementation-agnostic API: - For use and implementation by pinning service providers - For use in client mode by IPFS nodes and GUI-based applications > **Note**: while ready for implementation, this spec is still a work in progress! 🏗️ **Your input and feedback are welcome and valuable as we develop this API spec. Please join the design discussion at [github.com/ipfs/pinning-services-api-spec](https://github.com/ipfs/pinning-services-api-spec).** # Schemas This section describes the most important object types and conventions. A full list of fields and schemas can be found in the `schemas` section of the [YAML file](https://github.com/ipfs/pinning-services-api-spec/blob/master/ipfs-pinning-service.yaml). ## Identifiers ### cid [Content Identifier (CID)](https://docs.ipfs.io/concepts/content-addressing/) points at the root of a DAG that is pinned recursively. ### requestid Unique identifier of a pin request. When a pin is created, the service responds with unique `requestid` that can be later used for pin removal. When the same `cid` is pinned again, a different `requestid` is returned to differentiate between those pin requests. Service implementation should use UUID, `hash(accessToken,Pin,PinStatus.created)`, or any other opaque identifier that provides equally strong protection against race conditions. ## Objects ### Pin object ![pin object](https://bafybeideck2fchyxna4wqwc2mo67yriokehw3yujboc5redjdaajrk2fjq.ipfs.dweb.link/pin.png) The `Pin` object is a representation of a pin request. It includes the `cid` of data to be pinned, as well as optional metadata in `name`, `origins`, and `meta`. ### Pin status response ![pin status response object](https://bafybeideck2fchyxna4wqwc2mo67yriokehw3yujboc5redjdaajrk2fjq.ipfs.dweb.link/pinstatus.png) The `PinStatus` object is a representation of the current state of a pinning operation. It includes the original `pin` object, along with the current `status` and globally unique `requestid` of the entire pinning request, which can be used for future status checks and management. Addresses in the `delegates` array are peers delegated by the pinning service for facilitating direct file transfers (more details in the provider hints section). Any additional vendor-specific information is returned in optional `info`. ## The pin lifecycle ![pinning service objects and lifecycle](https://bafybeideck2fchyxna4wqwc2mo67yriokehw3yujboc5redjdaajrk2fjq.ipfs.dweb.link/lifecycle.png) ### Creating a new pin object The user sends a `Pin` object to `POST /pins` and receives a `PinStatus` response: - `requestid` in `PinStatus` is the identifier of the pin operation, which can can be used for checking status, and removing the pin in the future - `status` in `PinStatus` indicates the current state of a pin ### Checking status of in-progress pinning `status` (in `PinStatus`) may indicate a pending state (`queued` or `pinning`). This means the data behind `Pin.cid` was not found on the pinning service and is being fetched from the IPFS network at large, which may take time. In this case, the user can periodically check pinning progress via `GET /pins/{requestid}` until pinning is successful, or the user decides to remove the pending pin. ### Replacing an existing pin object The user can replace an existing pin object via `POST /pins/{requestid}`. This is a shortcut for removing a pin object identified by `requestid` and creating a new one in a single API call that protects against undesired garbage collection of blocks common to both pins. Useful when updating a pin representing a huge dataset where most of blocks did not change. The new pin object `requestid` is returned in the `PinStatus` response. The old pin object is deleted automatically. ### Removing a pin object A pin object can be removed via `DELETE /pins/{requestid}`. ## Provider hints Pinning of new data can be accelerated by providing a list of known data sources in `Pin.origins`, and connecting at least one of them to pinning service nodes at `PinStatus.delegates`. The most common scenario is a client putting its own IPFS node's multiaddrs in `Pin.origins`, and then directly connecting to every multiaddr returned by a pinning service in `PinStatus.delegates` to initiate transfer. This ensures data transfer starts immediately (without waiting for provider discovery over DHT), and direct dial from a client works around peer routing issues in restrictive network topologies such as NATs. ## Custom metadata Pinning services are encouraged to add support for additional features by leveraging the optional `Pin.meta` and `PinStatus.info` fields. While these attributes can be application- or vendor-specific, we encourage the community at large to leverage these attributes as a sandbox to come up with conventions that could become part of future revisions of this API. ### Pin metadata String keys and values passed in `Pin.meta` are persisted with the pin object. Potential uses: - `Pin.meta[app_id]`: Attaching a unique identifier to pins created by an app enables filtering pins per app via `?meta={\"app_id\":}` - `Pin.meta[vendor_policy]`: Vendor-specific policy (for example: which region to use, how many copies to keep) Note that it is OK for a client to omit or ignore these optional attributes; doing so should not impact the basic pinning functionality. ### Pin status info Additional `PinStatus.info` can be returned by pinning service. Potential uses: - `PinStatus.info[status_details]`: more info about the current status (queue position, percentage of transferred data, summary of where data is stored, etc); when `PinStatus.status=failed`, it could provide a reason why a pin operation failed (e.g. lack of funds, DAG too big, etc.) - `PinStatus.info[dag_size]`: the size of pinned data, along with DAG overhead - `PinStatus.info[raw_size]`: the size of data without DAG overhead (eg. unixfs) - `PinStatus.info[pinned_until]`: if vendor supports time-bound pins, this could indicate when the pin will expire # Pagination and filtering Pin objects can be listed by executing `GET /pins` with optional parameters: - When no filters are provided, the endpoint will return a small batch of the 10 most recently created items, from the latest to the oldest. - The number of returned items can be adjusted with the `limit` parameter (implicit default is 10). - If the value in `PinResults.count` is bigger than the length of `PinResults.results`, the client can infer there are more results that can be queried. - To read more items, pass the `before` filter with the timestamp from `PinStatus.created` found in the oldest item in the current batch of results. Repeat to read all results. - Returned results can be fine-tuned by applying optional `after`, `cid`, `name`, `status`, or `meta` filters. > **Note**: pagination by the `created` timestamp requires each value to be globally unique. Any future considerations to add support for bulk creation must account for this. + * + * API version: 0.1.1 + * Generated by: OpenAPI Generator (https://openapi-generator.tech) + */ + +package openapi + +import ( + "net/http" +) + +// APIResponse stores the API response returned by the server. +type APIResponse struct { + *http.Response `json:"-"` + Message string `json:"message,omitempty"` + // Operation is the name of the OpenAPI operation. + Operation string `json:"operation,omitempty"` + // RequestURL is the request URL. This value is always available, even if the + // embedded *http.Response is nil. + RequestURL string `json:"url,omitempty"` + // Method is the HTTP method used for the request. This value is always + // available, even if the embedded *http.Response is nil. + Method string `json:"method,omitempty"` + // Payload holds the contents of the response body (which may be nil or empty). + // This is provided here as the raw response.Body() reader will have already + // been drained. + Payload []byte `json:"-"` +} + +// NewAPIResponse returns a new APIResonse object. +func NewAPIResponse(r *http.Response) *APIResponse { + + response := &APIResponse{Response: r} + return response +} + +// NewAPIResponseWithError returns a new APIResponse object with the provided error message. +func NewAPIResponseWithError(errorMessage string) *APIResponse { + + response := &APIResponse{Message: errorMessage} + return response +} diff --git a/pinning/remote/client/openapi/utils.go b/pinning/remote/client/openapi/utils.go new file mode 100644 index 0000000000..25d36f11b6 --- /dev/null +++ b/pinning/remote/client/openapi/utils.go @@ -0,0 +1,327 @@ +/* + * IPFS Pinning Service API + * + * ## About this spec The IPFS Pinning Service API is intended to be an implementation-agnostic API: - For use and implementation by pinning service providers - For use in client mode by IPFS nodes and GUI-based applications > **Note**: while ready for implementation, this spec is still a work in progress! 🏗️ **Your input and feedback are welcome and valuable as we develop this API spec. Please join the design discussion at [github.com/ipfs/pinning-services-api-spec](https://github.com/ipfs/pinning-services-api-spec).** # Schemas This section describes the most important object types and conventions. A full list of fields and schemas can be found in the `schemas` section of the [YAML file](https://github.com/ipfs/pinning-services-api-spec/blob/master/ipfs-pinning-service.yaml). ## Identifiers ### cid [Content Identifier (CID)](https://docs.ipfs.io/concepts/content-addressing/) points at the root of a DAG that is pinned recursively. ### requestid Unique identifier of a pin request. When a pin is created, the service responds with unique `requestid` that can be later used for pin removal. When the same `cid` is pinned again, a different `requestid` is returned to differentiate between those pin requests. Service implementation should use UUID, `hash(accessToken,Pin,PinStatus.created)`, or any other opaque identifier that provides equally strong protection against race conditions. ## Objects ### Pin object ![pin object](https://bafybeideck2fchyxna4wqwc2mo67yriokehw3yujboc5redjdaajrk2fjq.ipfs.dweb.link/pin.png) The `Pin` object is a representation of a pin request. It includes the `cid` of data to be pinned, as well as optional metadata in `name`, `origins`, and `meta`. ### Pin status response ![pin status response object](https://bafybeideck2fchyxna4wqwc2mo67yriokehw3yujboc5redjdaajrk2fjq.ipfs.dweb.link/pinstatus.png) The `PinStatus` object is a representation of the current state of a pinning operation. It includes the original `pin` object, along with the current `status` and globally unique `requestid` of the entire pinning request, which can be used for future status checks and management. Addresses in the `delegates` array are peers delegated by the pinning service for facilitating direct file transfers (more details in the provider hints section). Any additional vendor-specific information is returned in optional `info`. ## The pin lifecycle ![pinning service objects and lifecycle](https://bafybeideck2fchyxna4wqwc2mo67yriokehw3yujboc5redjdaajrk2fjq.ipfs.dweb.link/lifecycle.png) ### Creating a new pin object The user sends a `Pin` object to `POST /pins` and receives a `PinStatus` response: - `requestid` in `PinStatus` is the identifier of the pin operation, which can can be used for checking status, and removing the pin in the future - `status` in `PinStatus` indicates the current state of a pin ### Checking status of in-progress pinning `status` (in `PinStatus`) may indicate a pending state (`queued` or `pinning`). This means the data behind `Pin.cid` was not found on the pinning service and is being fetched from the IPFS network at large, which may take time. In this case, the user can periodically check pinning progress via `GET /pins/{requestid}` until pinning is successful, or the user decides to remove the pending pin. ### Replacing an existing pin object The user can replace an existing pin object via `POST /pins/{requestid}`. This is a shortcut for removing a pin object identified by `requestid` and creating a new one in a single API call that protects against undesired garbage collection of blocks common to both pins. Useful when updating a pin representing a huge dataset where most of blocks did not change. The new pin object `requestid` is returned in the `PinStatus` response. The old pin object is deleted automatically. ### Removing a pin object A pin object can be removed via `DELETE /pins/{requestid}`. ## Provider hints Pinning of new data can be accelerated by providing a list of known data sources in `Pin.origins`, and connecting at least one of them to pinning service nodes at `PinStatus.delegates`. The most common scenario is a client putting its own IPFS node's multiaddrs in `Pin.origins`, and then directly connecting to every multiaddr returned by a pinning service in `PinStatus.delegates` to initiate transfer. This ensures data transfer starts immediately (without waiting for provider discovery over DHT), and direct dial from a client works around peer routing issues in restrictive network topologies such as NATs. ## Custom metadata Pinning services are encouraged to add support for additional features by leveraging the optional `Pin.meta` and `PinStatus.info` fields. While these attributes can be application- or vendor-specific, we encourage the community at large to leverage these attributes as a sandbox to come up with conventions that could become part of future revisions of this API. ### Pin metadata String keys and values passed in `Pin.meta` are persisted with the pin object. Potential uses: - `Pin.meta[app_id]`: Attaching a unique identifier to pins created by an app enables filtering pins per app via `?meta={\"app_id\":}` - `Pin.meta[vendor_policy]`: Vendor-specific policy (for example: which region to use, how many copies to keep) Note that it is OK for a client to omit or ignore these optional attributes; doing so should not impact the basic pinning functionality. ### Pin status info Additional `PinStatus.info` can be returned by pinning service. Potential uses: - `PinStatus.info[status_details]`: more info about the current status (queue position, percentage of transferred data, summary of where data is stored, etc); when `PinStatus.status=failed`, it could provide a reason why a pin operation failed (e.g. lack of funds, DAG too big, etc.) - `PinStatus.info[dag_size]`: the size of pinned data, along with DAG overhead - `PinStatus.info[raw_size]`: the size of data without DAG overhead (eg. unixfs) - `PinStatus.info[pinned_until]`: if vendor supports time-bound pins, this could indicate when the pin will expire # Pagination and filtering Pin objects can be listed by executing `GET /pins` with optional parameters: - When no filters are provided, the endpoint will return a small batch of the 10 most recently created items, from the latest to the oldest. - The number of returned items can be adjusted with the `limit` parameter (implicit default is 10). - If the value in `PinResults.count` is bigger than the length of `PinResults.results`, the client can infer there are more results that can be queried. - To read more items, pass the `before` filter with the timestamp from `PinStatus.created` found in the oldest item in the current batch of results. Repeat to read all results. - Returned results can be fine-tuned by applying optional `after`, `cid`, `name`, `status`, or `meta` filters. > **Note**: pagination by the `created` timestamp requires each value to be globally unique. Any future considerations to add support for bulk creation must account for this. + * + * API version: 0.1.1 + * Generated by: OpenAPI Generator (https://openapi-generator.tech) + */ + +package openapi + +import ( + "encoding/json" + "time" +) + +// PtrBool is a helper routine that returns a pointer to given integer value. +func PtrBool(v bool) *bool { return &v } + +// PtrInt is a helper routine that returns a pointer to given integer value. +func PtrInt(v int) *int { return &v } + +// PtrInt32 is a helper routine that returns a pointer to given integer value. +func PtrInt32(v int32) *int32 { return &v } + +// PtrInt64 is a helper routine that returns a pointer to given integer value. +func PtrInt64(v int64) *int64 { return &v } + +// PtrFloat32 is a helper routine that returns a pointer to given float value. +func PtrFloat32(v float32) *float32 { return &v } + +// PtrFloat64 is a helper routine that returns a pointer to given float value. +func PtrFloat64(v float64) *float64 { return &v } + +// PtrString is a helper routine that returns a pointer to given string value. +func PtrString(v string) *string { return &v } + +// PtrTime is helper routine that returns a pointer to given Time value. +func PtrTime(v time.Time) *time.Time { return &v } + +type NullableBool struct { + value *bool + isSet bool +} + +func (v NullableBool) Get() *bool { + return v.value +} + +func (v *NullableBool) Set(val *bool) { + v.value = val + v.isSet = true +} + +func (v NullableBool) IsSet() bool { + return v.isSet +} + +func (v *NullableBool) Unset() { + v.value = nil + v.isSet = false +} + +func NewNullableBool(val *bool) *NullableBool { + return &NullableBool{value: val, isSet: true} +} + +func (v NullableBool) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +func (v *NullableBool) UnmarshalJSON(src []byte) error { + v.isSet = true + return json.Unmarshal(src, &v.value) +} + +type NullableInt struct { + value *int + isSet bool +} + +func (v NullableInt) Get() *int { + return v.value +} + +func (v *NullableInt) Set(val *int) { + v.value = val + v.isSet = true +} + +func (v NullableInt) IsSet() bool { + return v.isSet +} + +func (v *NullableInt) Unset() { + v.value = nil + v.isSet = false +} + +func NewNullableInt(val *int) *NullableInt { + return &NullableInt{value: val, isSet: true} +} + +func (v NullableInt) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +func (v *NullableInt) UnmarshalJSON(src []byte) error { + v.isSet = true + return json.Unmarshal(src, &v.value) +} + +type NullableInt32 struct { + value *int32 + isSet bool +} + +func (v NullableInt32) Get() *int32 { + return v.value +} + +func (v *NullableInt32) Set(val *int32) { + v.value = val + v.isSet = true +} + +func (v NullableInt32) IsSet() bool { + return v.isSet +} + +func (v *NullableInt32) Unset() { + v.value = nil + v.isSet = false +} + +func NewNullableInt32(val *int32) *NullableInt32 { + return &NullableInt32{value: val, isSet: true} +} + +func (v NullableInt32) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +func (v *NullableInt32) UnmarshalJSON(src []byte) error { + v.isSet = true + return json.Unmarshal(src, &v.value) +} + +type NullableInt64 struct { + value *int64 + isSet bool +} + +func (v NullableInt64) Get() *int64 { + return v.value +} + +func (v *NullableInt64) Set(val *int64) { + v.value = val + v.isSet = true +} + +func (v NullableInt64) IsSet() bool { + return v.isSet +} + +func (v *NullableInt64) Unset() { + v.value = nil + v.isSet = false +} + +func NewNullableInt64(val *int64) *NullableInt64 { + return &NullableInt64{value: val, isSet: true} +} + +func (v NullableInt64) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +func (v *NullableInt64) UnmarshalJSON(src []byte) error { + v.isSet = true + return json.Unmarshal(src, &v.value) +} + +type NullableFloat32 struct { + value *float32 + isSet bool +} + +func (v NullableFloat32) Get() *float32 { + return v.value +} + +func (v *NullableFloat32) Set(val *float32) { + v.value = val + v.isSet = true +} + +func (v NullableFloat32) IsSet() bool { + return v.isSet +} + +func (v *NullableFloat32) Unset() { + v.value = nil + v.isSet = false +} + +func NewNullableFloat32(val *float32) *NullableFloat32 { + return &NullableFloat32{value: val, isSet: true} +} + +func (v NullableFloat32) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +func (v *NullableFloat32) UnmarshalJSON(src []byte) error { + v.isSet = true + return json.Unmarshal(src, &v.value) +} + +type NullableFloat64 struct { + value *float64 + isSet bool +} + +func (v NullableFloat64) Get() *float64 { + return v.value +} + +func (v *NullableFloat64) Set(val *float64) { + v.value = val + v.isSet = true +} + +func (v NullableFloat64) IsSet() bool { + return v.isSet +} + +func (v *NullableFloat64) Unset() { + v.value = nil + v.isSet = false +} + +func NewNullableFloat64(val *float64) *NullableFloat64 { + return &NullableFloat64{value: val, isSet: true} +} + +func (v NullableFloat64) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +func (v *NullableFloat64) UnmarshalJSON(src []byte) error { + v.isSet = true + return json.Unmarshal(src, &v.value) +} + +type NullableString struct { + value *string + isSet bool +} + +func (v NullableString) Get() *string { + return v.value +} + +func (v *NullableString) Set(val *string) { + v.value = val + v.isSet = true +} + +func (v NullableString) IsSet() bool { + return v.isSet +} + +func (v *NullableString) Unset() { + v.value = nil + v.isSet = false +} + +func NewNullableString(val *string) *NullableString { + return &NullableString{value: val, isSet: true} +} + +func (v NullableString) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +func (v *NullableString) UnmarshalJSON(src []byte) error { + v.isSet = true + return json.Unmarshal(src, &v.value) +} + +type NullableTime struct { + value *time.Time + isSet bool +} + +func (v NullableTime) Get() *time.Time { + return v.value +} + +func (v *NullableTime) Set(val *time.Time) { + v.value = val + v.isSet = true +} + +func (v NullableTime) IsSet() bool { + return v.isSet +} + +func (v *NullableTime) Unset() { + v.value = nil + v.isSet = false +} + +func NewNullableTime(val *time.Time) *NullableTime { + return &NullableTime{value: val, isSet: true} +} + +func (v NullableTime) MarshalJSON() ([]byte, error) { + return v.value.MarshalJSON() +} + +func (v *NullableTime) UnmarshalJSON(src []byte) error { + v.isSet = true + return json.Unmarshal(src, &v.value) +} diff --git a/provider/README.md b/provider/README.md new file mode 100644 index 0000000000..0e4f4650d2 --- /dev/null +++ b/provider/README.md @@ -0,0 +1,30 @@ +## Usage + +Here's how you create, start, interact with, and stop the provider system: + +```golang +import ( + "context" + "time" + + "github.com/ipfs/boxo/provider" + "github.com/ipfs/boxo/provider/queue" + "github.com/ipfs/boxo/provider/simple" +) + +rsys := (your routing system here) +dstore := (your datastore here) +cid := (your cid to provide here) + +q := queue.NewQueue(context.Background(), "example", dstore) + +reprov := simple.NewReprovider(context.Background(), time.Hour * 12, rsys, simple.NewBlockstoreProvider(dstore)) +prov := simple.NewProvider(context.Background(), q, rsys) +sys := provider.NewSystem(prov, reprov) + +sys.Run() + +sys.Provide(cid) + +sys.Close() +``` diff --git a/provider/batched/system.go b/provider/batched/system.go new file mode 100644 index 0000000000..e3cb0325a0 --- /dev/null +++ b/provider/batched/system.go @@ -0,0 +1,420 @@ +package batched + +import ( + "context" + "errors" + "fmt" + "strconv" + "sync" + "time" + + provider "github.com/ipfs/boxo/provider" + "github.com/ipfs/boxo/provider/queue" + "github.com/ipfs/boxo/provider/simple" + "github.com/ipfs/boxo/verifcid" + "github.com/ipfs/go-cid" + "github.com/ipfs/go-datastore" + logging "github.com/ipfs/go-log" + "github.com/multiformats/go-multihash" +) + +var log = logging.Logger("provider.batched") + +type BatchProvidingSystem struct { + ctx context.Context + close context.CancelFunc + closewg sync.WaitGroup + + reprovideInterval time.Duration + initalReprovideDelay time.Duration + initialReprovideDelaySet bool + + rsys provideMany + keyProvider simple.KeyChanFunc + + q *queue.Queue + ds datastore.Batching + + reprovideCh chan cid.Cid + + totalProvides, lastReprovideBatchSize int + avgProvideDuration, lastReprovideDuration time.Duration +} + +var _ provider.System = (*BatchProvidingSystem)(nil) + +type provideMany interface { + ProvideMany(ctx context.Context, keys []multihash.Multihash) error + Ready() bool +} + +// Option defines the functional option type that can be used to configure +// BatchProvidingSystem instances +type Option func(system *BatchProvidingSystem) error + +var lastReprovideKey = datastore.NewKey("/provider/reprovide/lastreprovide") + +func New(provider provideMany, q *queue.Queue, opts ...Option) (*BatchProvidingSystem, error) { + s := &BatchProvidingSystem{ + reprovideInterval: time.Hour * 24, + rsys: provider, + keyProvider: nil, + q: q, + ds: datastore.NewMapDatastore(), + reprovideCh: make(chan cid.Cid), + } + + for _, o := range opts { + if err := o(s); err != nil { + return nil, err + } + } + + // Setup default behavior for the initial reprovide delay + // + // If the reprovide ticker is larger than a minute (likely), + // provide once after we've been up a minute. + // + // Don't provide _immediately_ as we might be just about to stop. + if !s.initialReprovideDelaySet && s.reprovideInterval > time.Minute { + s.initalReprovideDelay = time.Minute + s.initialReprovideDelaySet = true + } + + if s.keyProvider == nil { + s.keyProvider = func(ctx context.Context) (<-chan cid.Cid, error) { + ch := make(chan cid.Cid) + close(ch) + return ch, nil + } + } + + // This is after the options processing so we do not have to worry about leaking a context if there is an + // initialization error processing the options + ctx, cancel := context.WithCancel(context.Background()) + s.ctx = ctx + s.close = cancel + + return s, nil +} + +func Datastore(batching datastore.Batching) Option { + return func(system *BatchProvidingSystem) error { + system.ds = batching + return nil + } +} + +func ReproviderInterval(duration time.Duration) Option { + return func(system *BatchProvidingSystem) error { + system.reprovideInterval = duration + return nil + } +} + +func KeyProvider(fn simple.KeyChanFunc) Option { + return func(system *BatchProvidingSystem) error { + system.keyProvider = fn + return nil + } +} + +func initialReprovideDelay(duration time.Duration) Option { + return func(system *BatchProvidingSystem) error { + system.initialReprovideDelaySet = true + system.initalReprovideDelay = duration + return nil + } +} + +func (s *BatchProvidingSystem) Run() { + // how long we wait between the first provider we hear about and batching up the provides to send out + const pauseDetectionThreshold = time.Millisecond * 500 + // how long we are willing to collect providers for the batch after we receive the first one + const maxCollectionDuration = time.Minute * 10 + + provCh := s.q.Dequeue() + + s.closewg.Add(1) + go func() { + defer s.closewg.Done() + + m := make(map[cid.Cid]struct{}) + + // setup stopped timers + maxCollectionDurationTimer := time.NewTimer(time.Hour) + pauseDetectTimer := time.NewTimer(time.Hour) + stopAndEmptyTimer(maxCollectionDurationTimer) + stopAndEmptyTimer(pauseDetectTimer) + + // make sure timers are cleaned up + defer maxCollectionDurationTimer.Stop() + defer pauseDetectTimer.Stop() + + resetTimersAfterReceivingProvide := func() { + firstProvide := len(m) == 0 + if firstProvide { + // after receiving the first provider start up the timers + maxCollectionDurationTimer.Reset(maxCollectionDuration) + pauseDetectTimer.Reset(pauseDetectionThreshold) + } else { + // otherwise just do a full restart of the pause timer + stopAndEmptyTimer(pauseDetectTimer) + pauseDetectTimer.Reset(pauseDetectionThreshold) + } + } + + for { + performedReprovide := false + + // at the start of every loop the maxCollectionDurationTimer and pauseDetectTimer should be already be + // stopped and have empty channels + loop: + for { + select { + case <-maxCollectionDurationTimer.C: + // if this timer has fired then the pause timer has started so let's stop and empty it + stopAndEmptyTimer(pauseDetectTimer) + break loop + default: + } + + select { + case c := <-provCh: + resetTimersAfterReceivingProvide() + m[c] = struct{}{} + continue + default: + } + + select { + case c := <-provCh: + resetTimersAfterReceivingProvide() + m[c] = struct{}{} + case c := <-s.reprovideCh: + resetTimersAfterReceivingProvide() + m[c] = struct{}{} + performedReprovide = true + case <-pauseDetectTimer.C: + // if this timer has fired then the max collection timer has started so let's stop and empty it + stopAndEmptyTimer(maxCollectionDurationTimer) + break loop + case <-maxCollectionDurationTimer.C: + // if this timer has fired then the pause timer has started so let's stop and empty it + stopAndEmptyTimer(pauseDetectTimer) + break loop + case <-s.ctx.Done(): + return + } + } + + if len(m) == 0 { + continue + } + + keys := make([]multihash.Multihash, 0, len(m)) + for c := range m { + delete(m, c) + + // hash security + if err := verifcid.ValidateCid(c); err != nil { + log.Errorf("insecure hash in reprovider, %s (%s)", c, err) + continue + } + + keys = append(keys, c.Hash()) + } + + // in case after removing all the invalid CIDs there are no valid ones left + if len(keys) == 0 { + continue + } + + for !s.rsys.Ready() { + log.Debugf("reprovider system not ready") + select { + case <-time.After(time.Minute): + case <-s.ctx.Done(): + return + } + } + + log.Debugf("starting provide of %d keys", len(keys)) + start := time.Now() + err := s.rsys.ProvideMany(s.ctx, keys) + if err != nil { + log.Debugf("providing failed %v", err) + continue + } + dur := time.Since(start) + + totalProvideTime := int64(s.totalProvides) * int64(s.avgProvideDuration) + recentAvgProvideDuration := time.Duration(int64(dur) / int64(len(keys))) + s.avgProvideDuration = time.Duration((totalProvideTime + int64(dur)) / int64(s.totalProvides+len(keys))) + s.totalProvides += len(keys) + + log.Debugf("finished providing of %d keys. It took %v with an average of %v per provide", len(keys), dur, recentAvgProvideDuration) + + if performedReprovide { + s.lastReprovideBatchSize = len(keys) + s.lastReprovideDuration = dur + + if err := s.ds.Put(s.ctx, lastReprovideKey, storeTime(time.Now())); err != nil { + log.Errorf("could not store last reprovide time: %v", err) + } + if err := s.ds.Sync(s.ctx, lastReprovideKey); err != nil { + log.Errorf("could not perform sync of last reprovide time: %v", err) + } + } + } + }() + + s.closewg.Add(1) + go func() { + defer s.closewg.Done() + + var initialReprovideCh, reprovideCh <-chan time.Time + + // If reproviding is enabled (non-zero) + if s.reprovideInterval > 0 { + reprovideTicker := time.NewTicker(s.reprovideInterval) + defer reprovideTicker.Stop() + reprovideCh = reprovideTicker.C + + // if there is a non-zero initial reprovide time that was set in the initializer or if the fallback has been + if s.initialReprovideDelaySet { + initialReprovideTimer := time.NewTimer(s.initalReprovideDelay) + defer initialReprovideTimer.Stop() + + initialReprovideCh = initialReprovideTimer.C + } + } + + for s.ctx.Err() == nil { + select { + case <-initialReprovideCh: + case <-reprovideCh: + case <-s.ctx.Done(): + return + } + + err := s.reprovide(s.ctx, false) + + // only log if we've hit an actual error, otherwise just tell the client we're shutting down + if s.ctx.Err() == nil && err != nil { + log.Errorf("failed to reprovide: %s", err) + } + } + }() +} + +func stopAndEmptyTimer(t *time.Timer) { + if !t.Stop() { + <-t.C + } +} + +func storeTime(t time.Time) []byte { + val := []byte(fmt.Sprintf("%d", t.UnixNano())) + return val +} + +func parseTime(b []byte) (time.Time, error) { + tns, err := strconv.ParseInt(string(b), 10, 64) + if err != nil { + return time.Time{}, err + } + return time.Unix(0, tns), nil +} + +func (s *BatchProvidingSystem) Close() error { + s.close() + err := s.q.Close() + s.closewg.Wait() + return err +} + +func (s *BatchProvidingSystem) Provide(cid cid.Cid) error { + return s.q.Enqueue(cid) +} + +func (s *BatchProvidingSystem) Reprovide(ctx context.Context) error { + return s.reprovide(ctx, true) +} + +func (s *BatchProvidingSystem) reprovide(ctx context.Context, force bool) error { + if !s.shouldReprovide() && !force { + return nil + } + + kch, err := s.keyProvider(ctx) + if err != nil { + return err + } + +reprovideCidLoop: + for { + select { + case c, ok := <-kch: + if !ok { + break reprovideCidLoop + } + + select { + case s.reprovideCh <- c: + case <-ctx.Done(): + return ctx.Err() + } + case <-ctx.Done(): + return ctx.Err() + } + } + + return nil +} + +func (s *BatchProvidingSystem) getLastReprovideTime() (time.Time, error) { + val, err := s.ds.Get(s.ctx, lastReprovideKey) + if errors.Is(err, datastore.ErrNotFound) { + return time.Time{}, nil + } + if err != nil { + return time.Time{}, fmt.Errorf("could not get last reprovide time") + } + + t, err := parseTime(val) + if err != nil { + return time.Time{}, fmt.Errorf("could not decode last reprovide time, got %q", string(val)) + } + + return t, nil +} + +func (s *BatchProvidingSystem) shouldReprovide() bool { + t, err := s.getLastReprovideTime() + if err != nil { + log.Debugf("getting last reprovide time failed: %s", err) + return false + } + + if time.Since(t) < time.Duration(float64(s.reprovideInterval)*0.5) { + return false + } + return true +} + +type BatchedProviderStats struct { + TotalProvides, LastReprovideBatchSize int + AvgProvideDuration, LastReprovideDuration time.Duration +} + +// Stat returns various stats about this provider system +func (s *BatchProvidingSystem) Stat(ctx context.Context) (BatchedProviderStats, error) { + // TODO: Does it matter that there is no locking around the total+average values? + return BatchedProviderStats{ + TotalProvides: s.totalProvides, + LastReprovideBatchSize: s.lastReprovideBatchSize, + AvgProvideDuration: s.avgProvideDuration, + LastReprovideDuration: s.lastReprovideDuration, + }, nil +} diff --git a/provider/batched/system_test.go b/provider/batched/system_test.go new file mode 100644 index 0000000000..4fe0624aff --- /dev/null +++ b/provider/batched/system_test.go @@ -0,0 +1,117 @@ +package batched + +import ( + "context" + "strconv" + "sync" + "testing" + "time" + + "github.com/ipfs/go-cid" + "github.com/ipfs/go-datastore" + dssync "github.com/ipfs/go-datastore/sync" + mh "github.com/multiformats/go-multihash" + + q "github.com/ipfs/boxo/provider/queue" +) + +type mockProvideMany struct { + lk sync.Mutex + keys []mh.Multihash +} + +func (m *mockProvideMany) ProvideMany(ctx context.Context, keys []mh.Multihash) error { + m.lk.Lock() + defer m.lk.Unlock() + m.keys = keys + return nil +} + +func (m *mockProvideMany) Ready() bool { + return true +} + +func (m *mockProvideMany) GetKeys() []mh.Multihash { + m.lk.Lock() + defer m.lk.Unlock() + return m.keys[:] +} + +var _ provideMany = (*mockProvideMany)(nil) + +func TestBatched(t *testing.T) { + ctx := context.Background() + defer ctx.Done() + + ds := dssync.MutexWrap(datastore.NewMapDatastore()) + queue, err := q.NewQueue(ctx, "test", ds) + if err != nil { + t.Fatal(err) + } + + provider := &mockProvideMany{} + + ctx, cancel := context.WithTimeout(ctx, time.Second*10) + defer cancel() + + const numProvides = 100 + keysToProvide := make(map[cid.Cid]int) + for i := 0; i < numProvides; i++ { + h, err := mh.Sum([]byte(strconv.Itoa(i)), mh.SHA2_256, -1) + if err != nil { + panic(err) + } + c := cid.NewCidV1(cid.Raw, h) + keysToProvide[c] = i + } + + batchSystem, err := New(provider, queue, KeyProvider(func(ctx context.Context) (<-chan cid.Cid, error) { + ch := make(chan cid.Cid) + go func() { + for k := range keysToProvide { + select { + case ch <- k: + case <-ctx.Done(): + return + } + } + }() + return ch, nil + }), initialReprovideDelay(0)) + if err != nil { + t.Fatal(err) + } + + batchSystem.Run() + + var keys []mh.Multihash + for { + if ctx.Err() != nil { + t.Fatal("test hung") + } + keys = provider.GetKeys() + if len(keys) != 0 { + break + } + time.Sleep(time.Millisecond * 100) + } + + if len(keys) != numProvides { + t.Fatalf("expected %d provider keys, got %d", numProvides, len(keys)) + } + + provMap := make(map[string]struct{}) + for _, k := range keys { + provMap[string(k)] = struct{}{} + } + + for i := 0; i < numProvides; i++ { + h, err := mh.Sum([]byte(strconv.Itoa(i)), mh.SHA2_256, -1) + if err != nil { + panic(err) + } + if _, found := provMap[string(h)]; !found { + t.Fatalf("could not find provider with value %d", i) + } + } +} diff --git a/provider/offline.go b/provider/offline.go new file mode 100644 index 0000000000..030a70ab13 --- /dev/null +++ b/provider/offline.go @@ -0,0 +1,29 @@ +package provider + +import ( + "context" + + "github.com/ipfs/go-cid" +) + +type offlineProvider struct{} + +// NewOfflineProvider creates a ProviderSystem that does nothing +func NewOfflineProvider() System { + return &offlineProvider{} +} + +func (op *offlineProvider) Run() { +} + +func (op *offlineProvider) Close() error { + return nil +} + +func (op *offlineProvider) Provide(cid.Cid) error { + return nil +} + +func (op *offlineProvider) Reprovide(context.Context) error { + return nil +} diff --git a/provider/provider.go b/provider/provider.go new file mode 100644 index 0000000000..3b9c6ba3eb --- /dev/null +++ b/provider/provider.go @@ -0,0 +1,27 @@ +package provider + +import ( + "context" + + "github.com/ipfs/go-cid" +) + +// Provider announces blocks to the network +type Provider interface { + // Run is used to begin processing the provider work + Run() + // Provide takes a cid and makes an attempt to announce it to the network + Provide(cid.Cid) error + // Close stops the provider + Close() error +} + +// Reprovider reannounces blocks to the network +type Reprovider interface { + // Run is used to begin processing the reprovider work and waiting for reprovide triggers + Run() + // Trigger a reprovide + Trigger(context.Context) error + // Close stops the reprovider + Close() error +} diff --git a/provider/queue/queue.go b/provider/queue/queue.go new file mode 100644 index 0000000000..618256bbee --- /dev/null +++ b/provider/queue/queue.go @@ -0,0 +1,156 @@ +package queue + +import ( + "context" + "fmt" + cid "github.com/ipfs/go-cid" + datastore "github.com/ipfs/go-datastore" + namespace "github.com/ipfs/go-datastore/namespace" + query "github.com/ipfs/go-datastore/query" + logging "github.com/ipfs/go-log" +) + +var log = logging.Logger("provider.queue") + +// Queue provides a best-effort durability, FIFO interface to the datastore for storing cids +// +// Best-effort durability just means that cids in the process of being provided when a +// crash or shutdown occurs may be in the queue when the node is brought back online +// depending on whether the underlying datastore has synchronous or asynchronous writes. +type Queue struct { + // used to differentiate queues in datastore + // e.g. provider vs reprovider + name string + ctx context.Context + ds datastore.Datastore // Must be threadsafe + dequeue chan cid.Cid + enqueue chan cid.Cid + close context.CancelFunc + closed chan struct{} + + counter uint64 +} + +// NewQueue creates a queue for cids +func NewQueue(ctx context.Context, name string, ds datastore.Datastore) (*Queue, error) { + namespaced := namespace.Wrap(ds, datastore.NewKey("/"+name+"/queue/")) + cancelCtx, cancel := context.WithCancel(ctx) + q := &Queue{ + name: name, + ctx: cancelCtx, + ds: namespaced, + dequeue: make(chan cid.Cid), + enqueue: make(chan cid.Cid), + close: cancel, + closed: make(chan struct{}, 1), + } + q.work() + return q, nil +} + +// Close stops the queue +func (q *Queue) Close() error { + q.close() + <-q.closed + return nil +} + +// Enqueue puts a cid in the queue +func (q *Queue) Enqueue(cid cid.Cid) error { + select { + case q.enqueue <- cid: + return nil + case <-q.ctx.Done(): + return fmt.Errorf("failed to enqueue CID: shutting down") + } +} + +// Dequeue returns a channel that if listened to will remove entries from the queue +func (q *Queue) Dequeue() <-chan cid.Cid { + return q.dequeue +} + +// Run dequeues and enqueues when available. +func (q *Queue) work() { + go func() { + var k datastore.Key = datastore.Key{} + var c cid.Cid = cid.Undef + + defer func() { + // also cancels any in-progess enqueue tasks. + q.close() + // unblocks anyone waiting + close(q.dequeue) + // unblocks the close call + close(q.closed) + }() + + for { + if c == cid.Undef { + head, err := q.getQueueHead() + + if err != nil { + log.Errorf("error querying for head of queue: %s, stopping provider", err) + return + } else if head != nil { + k = datastore.NewKey(head.Key) + c, err = cid.Parse(head.Value) + if err != nil { + log.Warnf("error parsing queue entry cid with key (%s), removing it from queue: %s", head.Key, err) + err = q.ds.Delete(q.ctx, k) + if err != nil { + log.Errorf("error deleting queue entry with key (%s), due to error (%s), stopping provider", head.Key, err) + return + } + continue + } + } else { + c = cid.Undef + } + } + + // If c != cid.Undef set dequeue and attempt write, otherwise wait for enqueue + var dequeue chan cid.Cid + if c != cid.Undef { + dequeue = q.dequeue + } + + select { + case toQueue := <-q.enqueue: + keyPath := fmt.Sprintf("%020d/%s", q.counter, c.String()) + q.counter++ + nextKey := datastore.NewKey(keyPath) + + if err := q.ds.Put(q.ctx, nextKey, toQueue.Bytes()); err != nil { + log.Errorf("Failed to enqueue cid: %s", err) + continue + } + case dequeue <- c: + err := q.ds.Delete(q.ctx, k) + + if err != nil { + log.Errorf("Failed to delete queued cid %s with key %s: %s", c, k, err) + continue + } + c = cid.Undef + case <-q.ctx.Done(): + return + } + } + }() +} + +func (q *Queue) getQueueHead() (*query.Entry, error) { + qry := query.Query{Orders: []query.Order{query.OrderByKey{}}, Limit: 1} + results, err := q.ds.Query(q.ctx, qry) + if err != nil { + return nil, err + } + defer results.Close() + r, ok := results.NextSync() + if !ok { + return nil, nil + } + + return &r.Entry, r.Error +} diff --git a/provider/queue/queue_test.go b/provider/queue/queue_test.go new file mode 100644 index 0000000000..a0fa36c3ad --- /dev/null +++ b/provider/queue/queue_test.go @@ -0,0 +1,133 @@ +package queue + +import ( + "context" + "testing" + "time" + + "github.com/ipfs/go-cid" + "github.com/ipfs/go-datastore" + "github.com/ipfs/go-datastore/sync" + blocksutil "github.com/ipfs/go-ipfs-blocksutil" +) + +var blockGenerator = blocksutil.NewBlockGenerator() + +func makeCids(n int) []cid.Cid { + cids := make([]cid.Cid, 0, n) + for i := 0; i < n; i++ { + c := blockGenerator.Next().Cid() + cids = append(cids, c) + } + return cids +} + +func assertOrdered(cids []cid.Cid, q *Queue, t *testing.T) { + for _, c := range cids { + select { + case dequeued := <-q.dequeue: + if c != dequeued { + t.Fatalf("Error in ordering of CIDs retrieved from queue. Expected: %s, got: %s", c, dequeued) + } + + case <-time.After(time.Second * 1): + t.Fatal("Timeout waiting for cids to be provided.") + } + } +} + +func TestBasicOperation(t *testing.T) { + ctx := context.Background() + defer ctx.Done() + + ds := sync.MutexWrap(datastore.NewMapDatastore()) + queue, err := NewQueue(ctx, "test", ds) + if err != nil { + t.Fatal(err) + } + + cids := makeCids(10) + + for _, c := range cids { + queue.Enqueue(c) + } + + assertOrdered(cids, queue, t) +} + +func TestMangledData(t *testing.T) { + ctx := context.Background() + defer ctx.Done() + + ds := sync.MutexWrap(datastore.NewMapDatastore()) + queue, err := NewQueue(ctx, "test", ds) + if err != nil { + t.Fatal(err) + } + + cids := makeCids(10) + for _, c := range cids { + queue.Enqueue(c) + } + + // put bad data in the queue + queueKey := datastore.NewKey("/test/0") + err = queue.ds.Put(ctx, queueKey, []byte("borked")) + if err != nil { + t.Fatal(err) + } + + // expect to only see the valid cids we entered + expected := cids + assertOrdered(expected, queue, t) +} + +func TestInitialization(t *testing.T) { + ctx := context.Background() + defer ctx.Done() + + ds := sync.MutexWrap(datastore.NewMapDatastore()) + queue, err := NewQueue(ctx, "test", ds) + if err != nil { + t.Fatal(err) + } + + cids := makeCids(10) + for _, c := range cids { + queue.Enqueue(c) + } + + assertOrdered(cids[:5], queue, t) + + // make a new queue, same data + queue, err = NewQueue(ctx, "test", ds) + if err != nil { + t.Fatal(err) + } + + assertOrdered(cids[5:], queue, t) +} + +func TestInitializationWithManyCids(t *testing.T) { + ctx := context.Background() + defer ctx.Done() + + ds := sync.MutexWrap(datastore.NewMapDatastore()) + queue, err := NewQueue(ctx, "test", ds) + if err != nil { + t.Fatal(err) + } + + cids := makeCids(25) + for _, c := range cids { + queue.Enqueue(c) + } + + // make a new queue, same data + queue, err = NewQueue(ctx, "test", ds) + if err != nil { + t.Fatal(err) + } + + assertOrdered(cids, queue, t) +} diff --git a/provider/simple/provider.go b/provider/simple/provider.go new file mode 100644 index 0000000000..63de031ad0 --- /dev/null +++ b/provider/simple/provider.go @@ -0,0 +1,116 @@ +// Package simple implements structures and methods to provide blocks, +// keep track of which blocks are provided, and to allow those blocks to +// be reprovided. +package simple + +import ( + "context" + "time" + + q "github.com/ipfs/boxo/provider/queue" + "github.com/ipfs/go-cid" + logging "github.com/ipfs/go-log" + "github.com/libp2p/go-libp2p/core/routing" +) + +var logP = logging.Logger("provider.simple") + +// Provider announces blocks to the network +type Provider struct { + ctx context.Context + // the CIDs for which provide announcements should be made + queue *q.Queue + // used to announce providing to the network + contentRouting routing.ContentRouting + // how long to wait for announce to complete before giving up + timeout time.Duration + // how many workers concurrently work through thhe queue + workerLimit int +} + +// Option defines the functional option type that can be used to configure +// provider instances +type Option func(*Provider) + +// WithTimeout is an option to set a timeout on a provider +func WithTimeout(timeout time.Duration) Option { + return func(p *Provider) { + p.timeout = timeout + } +} + +// MaxWorkers is an option to set the max workers on a provider +func MaxWorkers(count int) Option { + return func(p *Provider) { + p.workerLimit = count + } +} + +// NewProvider creates a provider that announces blocks to the network using a content router +func NewProvider(ctx context.Context, queue *q.Queue, contentRouting routing.ContentRouting, options ...Option) *Provider { + p := &Provider{ + ctx: ctx, + queue: queue, + contentRouting: contentRouting, + workerLimit: 8, + } + + for _, option := range options { + option(p) + } + + return p +} + +// Close stops the provider +func (p *Provider) Close() error { + return p.queue.Close() +} + +// Run workers to handle provide requests. +func (p *Provider) Run() { + p.handleAnnouncements() +} + +// Provide the given cid using specified strategy. +func (p *Provider) Provide(root cid.Cid) error { + return p.queue.Enqueue(root) +} + +// Handle all outgoing cids by providing (announcing) them +func (p *Provider) handleAnnouncements() { + for workers := 0; workers < p.workerLimit; workers++ { + go func() { + for p.ctx.Err() == nil { + select { + case <-p.ctx.Done(): + return + case c, ok := <-p.queue.Dequeue(): + if !ok { + // queue closed. + return + } + + p.doProvide(c) + } + } + }() + } +} + +func (p *Provider) doProvide(c cid.Cid) { + ctx := p.ctx + if p.timeout > 0 { + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, p.timeout) + defer cancel() + } else { + ctx = p.ctx + } + + logP.Info("announce - start - ", c) + if err := p.contentRouting.Provide(ctx, c, true); err != nil { + logP.Warnf("Unable to provide entry: %s, %s", c, err) + } + logP.Info("announce - end - ", c) +} diff --git a/provider/simple/provider_test.go b/provider/simple/provider_test.go new file mode 100644 index 0000000000..96f009937b --- /dev/null +++ b/provider/simple/provider_test.go @@ -0,0 +1,162 @@ +package simple_test + +import ( + "context" + "math/rand" + "testing" + "time" + + "github.com/ipfs/go-cid" + "github.com/ipfs/go-datastore" + "github.com/ipfs/go-datastore/sync" + blocksutil "github.com/ipfs/go-ipfs-blocksutil" + "github.com/libp2p/go-libp2p/core/peer" + + q "github.com/ipfs/boxo/provider/queue" + + . "github.com/ipfs/boxo/provider/simple" +) + +var blockGenerator = blocksutil.NewBlockGenerator() + +type mockRouting struct { + provided chan cid.Cid +} + +func (r *mockRouting) Provide(ctx context.Context, cid cid.Cid, recursive bool) error { + select { + case r.provided <- cid: + case <-ctx.Done(): + panic("context cancelled, but shouldn't have") + } + return nil +} + +func (r *mockRouting) FindProvidersAsync(ctx context.Context, cid cid.Cid, timeout int) <-chan peer.AddrInfo { + return nil +} + +func mockContentRouting() *mockRouting { + r := mockRouting{} + r.provided = make(chan cid.Cid) + return &r +} + +func TestAnnouncement(t *testing.T) { + ctx := context.Background() + defer ctx.Done() + + ds := sync.MutexWrap(datastore.NewMapDatastore()) + queue, err := q.NewQueue(ctx, "test", ds) + if err != nil { + t.Fatal(err) + } + + r := mockContentRouting() + + prov := NewProvider(ctx, queue, r) + prov.Run() + + cids := cid.NewSet() + + for i := 0; i < 100; i++ { + c := blockGenerator.Next().Cid() + cids.Add(c) + } + + go func() { + for _, c := range cids.Keys() { + err = prov.Provide(c) + // A little goroutine stirring to exercise some different states + r := rand.Intn(10) + time.Sleep(time.Microsecond * time.Duration(r)) + } + }() + + for cids.Len() > 0 { + select { + case cp := <-r.provided: + if !cids.Has(cp) { + t.Fatal("Wrong CID provided") + } + cids.Remove(cp) + case <-time.After(time.Second * 5): + t.Fatal("Timeout waiting for cids to be provided.") + } + } + prov.Close() + + select { + case cp := <-r.provided: + t.Fatal("did not expect to provide CID: ", cp) + case <-time.After(time.Second * 1): + } +} + +func TestClose(t *testing.T) { + ctx := context.Background() + defer ctx.Done() + + ds := sync.MutexWrap(datastore.NewMapDatastore()) + queue, err := q.NewQueue(ctx, "test", ds) + if err != nil { + t.Fatal(err) + } + + r := mockContentRouting() + + prov := NewProvider(ctx, queue, r) + prov.Run() + + prov.Close() + + select { + case cp := <-r.provided: + t.Fatal("did not expect to provide anything, provided: ", cp) + case <-time.After(time.Second * 1): + } +} + +func TestAnnouncementTimeout(t *testing.T) { + ctx := context.Background() + defer ctx.Done() + + ds := sync.MutexWrap(datastore.NewMapDatastore()) + queue, err := q.NewQueue(ctx, "test", ds) + if err != nil { + t.Fatal(err) + } + + r := mockContentRouting() + + prov := NewProvider(ctx, queue, r, WithTimeout(1*time.Second)) + prov.Run() + + cids := cid.NewSet() + + for i := 0; i < 100; i++ { + c := blockGenerator.Next().Cid() + cids.Add(c) + } + + go func() { + for _, c := range cids.Keys() { + err = prov.Provide(c) + // A little goroutine stirring to exercise some different states + r := rand.Intn(10) + time.Sleep(time.Microsecond * time.Duration(r)) + } + }() + + for cids.Len() > 0 { + select { + case cp := <-r.provided: + if !cids.Has(cp) { + t.Fatal("Wrong CID provided") + } + cids.Remove(cp) + case <-time.After(time.Second * 5): + t.Fatal("Timeout waiting for cids to be provided.") + } + } +} diff --git a/provider/simple/reprovide.go b/provider/simple/reprovide.go new file mode 100644 index 0000000000..a29b484fc4 --- /dev/null +++ b/provider/simple/reprovide.go @@ -0,0 +1,255 @@ +package simple + +import ( + "context" + "errors" + "fmt" + "time" + + "github.com/cenkalti/backoff" + blocks "github.com/ipfs/boxo/blockstore" + "github.com/ipfs/boxo/fetcher" + fetcherhelpers "github.com/ipfs/boxo/fetcher/helpers" + "github.com/ipfs/boxo/verifcid" + "github.com/ipfs/go-cid" + "github.com/ipfs/go-cidutil" + logging "github.com/ipfs/go-log" + cidlink "github.com/ipld/go-ipld-prime/linking/cid" + "github.com/libp2p/go-libp2p/core/routing" +) + +var logR = logging.Logger("reprovider.simple") + +// ErrClosed is returned by Trigger when operating on a closed reprovider. +var ErrClosed = errors.New("reprovider service stopped") + +// KeyChanFunc is function streaming CIDs to pass to content routing +type KeyChanFunc func(context.Context) (<-chan cid.Cid, error) + +// Reprovider reannounces blocks to the network +type Reprovider struct { + // Reprovider context. Cancel to stop, then wait on closedCh. + ctx context.Context + cancel context.CancelFunc + closedCh chan struct{} + + // Trigger triggers a reprovide. + trigger chan chan<- error + + // The routing system to provide values through + rsys routing.ContentRouting + + keyProvider KeyChanFunc + + tick time.Duration +} + +// NewReprovider creates new Reprovider instance. +func NewReprovider(ctx context.Context, reprovideInterval time.Duration, rsys routing.ContentRouting, keyProvider KeyChanFunc) *Reprovider { + ctx, cancel := context.WithCancel(ctx) + return &Reprovider{ + ctx: ctx, + cancel: cancel, + closedCh: make(chan struct{}), + trigger: make(chan chan<- error), + + rsys: rsys, + keyProvider: keyProvider, + tick: reprovideInterval, + } +} + +// Close the reprovider +func (rp *Reprovider) Close() error { + rp.cancel() + <-rp.closedCh + return nil +} + +// Run re-provides keys with 'tick' interval or when triggered +func (rp *Reprovider) Run() { + defer close(rp.closedCh) + + var initialReprovideCh, reprovideCh <-chan time.Time + + // If reproviding is enabled (non-zero) + if rp.tick > 0 { + reprovideTicker := time.NewTicker(rp.tick) + defer reprovideTicker.Stop() + reprovideCh = reprovideTicker.C + + // If the reprovide ticker is larger than a minute (likely), + // provide once after we've been up a minute. + // + // Don't provide _immediately_ as we might be just about to stop. + if rp.tick > time.Minute { + initialReprovideTimer := time.NewTimer(time.Minute) + defer initialReprovideTimer.Stop() + + initialReprovideCh = initialReprovideTimer.C + } + } + + var done chan<- error + for rp.ctx.Err() == nil { + select { + case <-initialReprovideCh: + case <-reprovideCh: + case done = <-rp.trigger: + case <-rp.ctx.Done(): + return + } + + err := rp.Reprovide() + + // only log if we've hit an actual error, otherwise just tell the client we're shutting down + if rp.ctx.Err() != nil { + err = ErrClosed + } else if err != nil { + logR.Errorf("failed to reprovide: %s", err) + } + + if done != nil { + if err != nil { + done <- err + } + close(done) + } + } +} + +// Reprovide registers all keys given by rp.keyProvider to libp2p content routing +func (rp *Reprovider) Reprovide() error { + keychan, err := rp.keyProvider(rp.ctx) + if err != nil { + return fmt.Errorf("failed to get key chan: %s", err) + } + for c := range keychan { + // hash security + if err := verifcid.ValidateCid(c); err != nil { + logR.Errorf("insecure hash in reprovider, %s (%s)", c, err) + continue + } + op := func() error { + err := rp.rsys.Provide(rp.ctx, c, true) + if err != nil { + logR.Debugf("Failed to provide key: %s", err) + } + return err + } + + err := backoff.Retry(op, backoff.WithContext(backoff.NewExponentialBackOff(), rp.ctx)) + if err != nil { + logR.Debugf("Providing failed after number of retries: %s", err) + return err + } + } + return nil +} + +// Trigger starts the reprovision process in rp.Run and waits for it to finish. +// +// Returns an error if a reprovide is already in progress. +func (rp *Reprovider) Trigger(ctx context.Context) error { + resultCh := make(chan error, 1) + select { + case rp.trigger <- resultCh: + default: + return fmt.Errorf("reprovider is already running") + } + + select { + case err := <-resultCh: + return err + case <-rp.ctx.Done(): + return ErrClosed + case <-ctx.Done(): + return ctx.Err() + } +} + +// Strategies + +// NewBlockstoreProvider returns key provider using bstore.AllKeysChan +func NewBlockstoreProvider(bstore blocks.Blockstore) KeyChanFunc { + return func(ctx context.Context) (<-chan cid.Cid, error) { + return bstore.AllKeysChan(ctx) + } +} + +// Pinner interface defines how the simple.Reprovider wants to interact +// with a Pinning service +type Pinner interface { + DirectKeys(ctx context.Context) ([]cid.Cid, error) + RecursiveKeys(ctx context.Context) ([]cid.Cid, error) +} + +// NewPinnedProvider returns provider supplying pinned keys +func NewPinnedProvider(onlyRoots bool, pinning Pinner, fetchConfig fetcher.Factory) KeyChanFunc { + return func(ctx context.Context) (<-chan cid.Cid, error) { + set, err := pinSet(ctx, pinning, fetchConfig, onlyRoots) + if err != nil { + return nil, err + } + + outCh := make(chan cid.Cid) + go func() { + defer close(outCh) + for c := range set.New { + select { + case <-ctx.Done(): + return + case outCh <- c: + } + } + + }() + + return outCh, nil + } +} + +func pinSet(ctx context.Context, pinning Pinner, fetchConfig fetcher.Factory, onlyRoots bool) (*cidutil.StreamingSet, error) { + set := cidutil.NewStreamingSet() + + go func() { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + defer close(set.New) + + dkeys, err := pinning.DirectKeys(ctx) + if err != nil { + logR.Errorf("reprovide direct pins: %s", err) + return + } + for _, key := range dkeys { + set.Visitor(ctx)(key) + } + + rkeys, err := pinning.RecursiveKeys(ctx) + if err != nil { + logR.Errorf("reprovide indirect pins: %s", err) + return + } + + session := fetchConfig.NewSession(ctx) + for _, key := range rkeys { + set.Visitor(ctx)(key) + if !onlyRoots { + err := fetcherhelpers.BlockAll(ctx, session, cidlink.Link{Cid: key}, func(res fetcher.FetchResult) error { + clink, ok := res.LastBlockLink.(cidlink.Link) + if ok { + set.Visitor(ctx)(clink.Cid) + } + return nil + }) + if err != nil { + logR.Errorf("reprovide indirect pins: %s", err) + return + } + } + } + }() + + return set, nil +} diff --git a/provider/simple/reprovide_test.go b/provider/simple/reprovide_test.go new file mode 100644 index 0000000000..99efe794f1 --- /dev/null +++ b/provider/simple/reprovide_test.go @@ -0,0 +1,284 @@ +package simple_test + +import ( + "bytes" + "context" + "testing" + "time" + + "github.com/ipfs/go-cid" + ds "github.com/ipfs/go-datastore" + dssync "github.com/ipfs/go-datastore/sync" + blocks "github.com/ipfs/boxo/blocks" + bsrv "github.com/ipfs/boxo/blockservice" + blockstore "github.com/ipfs/boxo/blockstore" + offline "github.com/ipfs/boxo/exchange/offline" + bsfetcher "github.com/ipfs/boxo/fetcher/impl/blockservice" + mock "github.com/ipfs/boxo/routing/mock" + "github.com/ipld/go-ipld-prime" + "github.com/ipld/go-ipld-prime/codec/dagcbor" + "github.com/ipld/go-ipld-prime/fluent/qp" + cidlink "github.com/ipld/go-ipld-prime/linking/cid" + basicnode "github.com/ipld/go-ipld-prime/node/basic" + testutil "github.com/libp2p/go-libp2p-testing/net" + "github.com/libp2p/go-libp2p/core/peer" + mh "github.com/multiformats/go-multihash" + + . "github.com/ipfs/boxo/provider/simple" +) + +func setupRouting(t *testing.T) (clA, clB mock.Client, idA, idB peer.ID) { + mrserv := mock.NewServer() + + iidA := testutil.RandIdentityOrFatal(t) + iidB := testutil.RandIdentityOrFatal(t) + + clA = mrserv.Client(iidA) + clB = mrserv.Client(iidB) + + return clA, clB, iidA.ID(), iidB.ID() +} + +func setupDag(t *testing.T) (nodes []cid.Cid, bstore blockstore.Blockstore) { + bstore = blockstore.NewBlockstore(dssync.MutexWrap(ds.NewMapDatastore())) + for _, data := range []string{"foo", "bar"} { + nb := basicnode.Prototype.Any.NewBuilder() + err := nb.AssignString(data) + if err != nil { + t.Fatal(err) + } + blk := toBlock(t, nb.Build()) + err = bstore.Put(context.Background(), blk) + if err != nil { + t.Fatal(err) + } + nodes = append(nodes, blk.Cid()) + nd, err := qp.BuildMap(basicnode.Prototype.Map, 1, func(ma ipld.MapAssembler) { + qp.MapEntry(ma, "child", qp.Link(cidlink.Link{Cid: blk.Cid()})) + }) + if err != nil { + t.Fatal(err) + } + blk = toBlock(t, nd) + err = bstore.Put(context.Background(), blk) + if err != nil { + t.Fatal(err) + } + nodes = append(nodes, blk.Cid()) + } + + return nodes, bstore +} + +func toBlock(t *testing.T, nd ipld.Node) blocks.Block { + buf := new(bytes.Buffer) + err := dagcbor.Encode(nd, buf) + if err != nil { + t.Fatal(err) + } + c, err := cid.Prefix{ + Version: 1, + Codec: cid.DagCBOR, + MhType: mh.SHA2_256, + MhLength: -1, + }.Sum(buf.Bytes()) + if err != nil { + t.Fatal(err) + } + blk, err := blocks.NewBlockWithCid(buf.Bytes(), c) + if err != nil { + t.Fatal(err) + } + return blk +} + +func TestReprovide(t *testing.T) { + testReprovide(t, func(r *Reprovider, ctx context.Context) error { + return r.Reprovide() + }) +} + +func TestTrigger(t *testing.T) { + testReprovide(t, func(r *Reprovider, ctx context.Context) error { + go r.Run() + time.Sleep(1 * time.Second) + defer r.Close() + err := r.Trigger(ctx) + return err + }) +} + +func testReprovide(t *testing.T, trigger func(r *Reprovider, ctx context.Context) error) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + clA, clB, idA, _ := setupRouting(t) + nodes, bstore := setupDag(t) + + keyProvider := NewBlockstoreProvider(bstore) + reprov := NewReprovider(ctx, time.Hour, clA, keyProvider) + reprov.Trigger(context.Background()) + err := trigger(reprov, ctx) + if err != nil { + t.Fatal(err) + } + + var providers []peer.AddrInfo + maxProvs := 100 + + for _, c := range nodes { + // We provide raw cids because of the multihash keying + // FIXME(@Jorropo): I think this change should be done in the DHT layer, probably an issue with our routing mock. + b := c.Bytes() + b[1] = 0x55 // rewrite the cid to raw + _, c, err := cid.CidFromBytes(b) + if err != nil { + t.Fatal(err) + } + provChan := clB.FindProvidersAsync(ctx, c, maxProvs) + for p := range provChan { + providers = append(providers, p) + } + + if len(providers) == 0 { + t.Fatal("Should have gotten a provider") + } + + if providers[0].ID != idA { + t.Fatal("Somehow got the wrong peer back as a provider.") + } + } +} + +func TestTriggerTwice(t *testing.T) { + // Ensure we can only trigger once at a time. + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + clA, _, _, _ := setupRouting(t) + + keyCh := make(chan cid.Cid) + startCh := make(chan struct{}) + keyFunc := func(ctx context.Context) (<-chan cid.Cid, error) { + <-startCh + return keyCh, nil + } + + reprov := NewReprovider(ctx, time.Hour, clA, keyFunc) + go reprov.Run() + defer reprov.Close() + + // Wait for the reprovider to start, otherwise, the reprovider will + // think a concurrent reprovide is running. + // + // We _could_ fix this race... but that would be complexity for nothing. + // 1. We start a reprovide 1 minute after startup anyways. + // 2. The window is really narrow. + time.Sleep(1 * time.Second) + + errCh := make(chan error, 2) + + // Trigger in the background + go func() { + errCh <- reprov.Trigger(ctx) + }() + + // Wait for the trigger to really start. + startCh <- struct{}{} + + start := time.Now() + // Try to trigger again, this should fail immediately. + if err := reprov.Trigger(ctx); err == nil { + t.Fatal("expected an error") + } + if time.Since(start) > 10*time.Millisecond { + t.Fatal("expected reprovide to fail instantly") + } + + // Let the trigger progress. + close(keyCh) + + // Check the result. + err := <-errCh + if err != nil { + t.Fatal(err) + } + + // Try to trigger again, this should work. + go func() { + errCh <- reprov.Trigger(ctx) + }() + startCh <- struct{}{} + err = <-errCh + if err != nil { + t.Fatal(err) + } +} + +type mockPinner struct { + recursive []cid.Cid + direct []cid.Cid +} + +func (mp *mockPinner) DirectKeys(ctx context.Context) ([]cid.Cid, error) { + return mp.direct, nil +} + +func (mp *mockPinner) RecursiveKeys(ctx context.Context) ([]cid.Cid, error) { + return mp.recursive, nil +} + +func TestReprovidePinned(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + nodes, bstore := setupDag(t) + + fetchConfig := bsfetcher.NewFetcherConfig(bsrv.New(bstore, offline.Exchange(bstore))) + + for i := 0; i < 2; i++ { + clA, clB, idA, _ := setupRouting(t) + + onlyRoots := i == 0 + t.Logf("only roots: %v", onlyRoots) + + var provide, dont []cid.Cid + if onlyRoots { + provide = []cid.Cid{nodes[1], nodes[3]} + dont = []cid.Cid{nodes[0], nodes[2]} + } else { + provide = []cid.Cid{nodes[0], nodes[1], nodes[3]} + dont = []cid.Cid{nodes[2]} + } + + keyProvider := NewPinnedProvider(onlyRoots, &mockPinner{ + recursive: []cid.Cid{nodes[1]}, + direct: []cid.Cid{nodes[3]}, + }, fetchConfig) + + reprov := NewReprovider(ctx, time.Hour, clA, keyProvider) + err := reprov.Reprovide() + if err != nil { + t.Fatal(err) + } + + for i, c := range provide { + prov, ok := <-clB.FindProvidersAsync(ctx, c, 1) + if !ok { + t.Errorf("Should have gotten a provider for %d", i) + continue + } + + if prov.ID != idA { + t.Errorf("Somehow got the wrong peer back as a provider.") + continue + } + } + for i, c := range dont { + prov, ok := <-clB.FindProvidersAsync(ctx, c, 1) + if ok { + t.Fatalf("found provider %s for %d, expected none", prov.ID, i) + } + } + } +} diff --git a/provider/system.go b/provider/system.go new file mode 100644 index 0000000000..9fc3e8879c --- /dev/null +++ b/provider/system.go @@ -0,0 +1,60 @@ +package provider + +import ( + "context" + + "github.com/ipfs/go-cid" +) + +// System defines the interface for interacting with the value +// provider system +type System interface { + Run() + Close() error + Provide(cid.Cid) error + Reprovide(context.Context) error +} + +type system struct { + provider Provider + reprovider Reprovider +} + +// NewSystem constructs a new provider system from a provider and reprovider +func NewSystem(provider Provider, reprovider Reprovider) System { + return &system{provider, reprovider} +} + +// Run the provider system by running the provider and reprovider +func (s *system) Run() { + go s.provider.Run() + go s.reprovider.Run() +} + +// Close the provider and reprovider +func (s *system) Close() error { + var errs []error + + if err := s.provider.Close(); err != nil { + errs = append(errs, err) + } + + if err := s.reprovider.Close(); err != nil { + errs = append(errs, err) + } + + if len(errs) > 0 { + return errs[0] + } + return nil +} + +// Provide a value +func (s *system) Provide(cid cid.Cid) error { + return s.provider.Provide(cid) +} + +// Reprovide all the previously provided values +func (s *system) Reprovide(ctx context.Context) error { + return s.reprovider.Trigger(ctx) +} diff --git a/routing/http/README.md b/routing/http/README.md new file mode 100644 index 0000000000..65650ed509 --- /dev/null +++ b/routing/http/README.md @@ -0,0 +1,24 @@ +go-delegated-routing +======================= + +> Delegated routing Client and Server over Reframe RPC + +This package provides delegated routing implementation in Go: +- Client (for IPFS nodes like [Kubo](https://github.com/ipfs/kubo/blob/master/docs/config.md#routingrouters-parameters)), +- Server (for public indexers such as https://cid.contact) + +## Documentation + +- Go docs: https://pkg.go.dev/github.com/ipfs/boxo/routing/http/ + +## Lead Maintainer + +🦗🎶 + +## Contributing + +Contributions are welcome! This repository is part of the IPFS project and therefore governed by our [contributing guidelines](https://github.com/ipfs/community/blob/master/CONTRIBUTING.md). + +## License + +[SPDX-License-Identifier: Apache-2.0 OR MIT](LICENSE.md) \ No newline at end of file diff --git a/routing/http/client/client.go b/routing/http/client/client.go new file mode 100644 index 0000000000..4e5cfb8f47 --- /dev/null +++ b/routing/http/client/client.go @@ -0,0 +1,247 @@ +package client + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "net/http" + "time" + + "github.com/benbjohnson/clock" + ipns "github.com/ipfs/boxo/ipns" + "github.com/ipfs/boxo/routing/http/contentrouter" + "github.com/ipfs/boxo/routing/http/internal/drjson" + "github.com/ipfs/boxo/routing/http/server" + "github.com/ipfs/boxo/routing/http/types" + "github.com/ipfs/go-cid" + logging "github.com/ipfs/go-log/v2" + record "github.com/libp2p/go-libp2p-record" + "github.com/libp2p/go-libp2p/core/crypto" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/multiformats/go-multiaddr" +) + +var logger = logging.Logger("service/delegatedrouting") + +type client struct { + baseURL string + httpClient httpClient + validator record.Validator + clock clock.Clock + + peerID peer.ID + addrs []types.Multiaddr + identity crypto.PrivKey + + // called immeidately after signing a provide req + // used for testing, e.g. testing the server with a mangled signature + afterSignCallback func(req *types.WriteBitswapProviderRecord) +} + +// defaultUserAgent is used as a fallback to inform HTTP server which library +// version sent a request +var defaultUserAgent = moduleVersion() + +var _ contentrouter.Client = &client{} + +type httpClient interface { + Do(req *http.Request) (*http.Response, error) +} + +type option func(*client) + +func WithIdentity(identity crypto.PrivKey) option { + return func(c *client) { + c.identity = identity + } +} + +func WithHTTPClient(h httpClient) option { + return func(c *client) { + c.httpClient = h + } +} + +func WithUserAgent(ua string) option { + return func(c *client) { + if ua == "" { + return + } + httpClient, ok := c.httpClient.(*http.Client) + if !ok { + return + } + transport, ok := httpClient.Transport.(*ResponseBodyLimitedTransport) + if !ok { + return + } + transport.UserAgent = ua + } +} + +func WithProviderInfo(peerID peer.ID, addrs []multiaddr.Multiaddr) option { + return func(c *client) { + c.peerID = peerID + for _, a := range addrs { + c.addrs = append(c.addrs, types.Multiaddr{Multiaddr: a}) + } + } +} + +// New creates a content routing API client. +// The Provider and identity parameters are option. If they are nil, the `Provide` method will not function. +func New(baseURL string, opts ...option) (*client, error) { + defaultHTTPClient := &http.Client{ + Transport: &ResponseBodyLimitedTransport{ + RoundTripper: http.DefaultTransport, + LimitBytes: 1 << 20, + UserAgent: defaultUserAgent, + }, + } + client := &client{ + baseURL: baseURL, + httpClient: defaultHTTPClient, + validator: ipns.Validator{}, + clock: clock.New(), + } + + for _, opt := range opts { + opt(client) + } + + if client.identity != nil && client.peerID.Size() != 0 && !client.peerID.MatchesPublicKey(client.identity.GetPublic()) { + return nil, errors.New("identity does not match provider") + } + + return client, nil +} + +func (c *client) FindProviders(ctx context.Context, key cid.Cid) (provs []types.ProviderResponse, err error) { + measurement := newMeasurement("FindProviders") + defer func() { + measurement.length = len(provs) + measurement.record(ctx) + }() + + url := c.baseURL + server.ProvidePath + key.String() + req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil) + if err != nil { + return nil, err + } + measurement.host = req.Host + + start := c.clock.Now() + resp, err := c.httpClient.Do(req) + + measurement.err = err + measurement.latency = c.clock.Since(start) + + if err != nil { + return nil, err + } + defer resp.Body.Close() + + measurement.statusCode = resp.StatusCode + if resp.StatusCode == http.StatusNotFound { + return nil, nil + } + + if resp.StatusCode != http.StatusOK { + return nil, httpError(resp.StatusCode, resp.Body) + } + + parsedResp := &types.ReadProvidersResponse{} + err = json.NewDecoder(resp.Body).Decode(parsedResp) + return parsedResp.Providers, err +} + +func (c *client) ProvideBitswap(ctx context.Context, keys []cid.Cid, ttl time.Duration) (time.Duration, error) { + if c.identity == nil { + return 0, errors.New("cannot provide Bitswap records without an identity") + } + if c.peerID.Size() == 0 { + return 0, errors.New("cannot provide Bitswap records without a peer ID") + } + + ks := make([]types.CID, len(keys)) + for i, c := range keys { + ks[i] = types.CID{Cid: c} + } + + now := c.clock.Now() + + req := types.WriteBitswapProviderRecord{ + Protocol: "transport-bitswap", + Schema: types.SchemaBitswap, + Payload: types.BitswapPayload{ + Keys: ks, + AdvisoryTTL: &types.Duration{Duration: ttl}, + Timestamp: &types.Time{Time: now}, + ID: &c.peerID, + Addrs: c.addrs, + }, + } + err := req.Sign(c.peerID, c.identity) + if err != nil { + return 0, err + } + + if c.afterSignCallback != nil { + c.afterSignCallback(&req) + } + + advisoryTTL, err := c.provideSignedBitswapRecord(ctx, &req) + if err != nil { + return 0, err + } + + return advisoryTTL, err +} + +// ProvideAsync makes a provide request to a delegated router +func (c *client) provideSignedBitswapRecord(ctx context.Context, bswp *types.WriteBitswapProviderRecord) (time.Duration, error) { + req := types.WriteProvidersRequest{Providers: []types.WriteProviderRecord{bswp}} + + url := c.baseURL + server.ProvidePath + + b, err := drjson.MarshalJSONBytes(req) + if err != nil { + return 0, err + } + + httpReq, err := http.NewRequestWithContext(ctx, http.MethodPut, url, bytes.NewBuffer(b)) + if err != nil { + return 0, err + } + + resp, err := c.httpClient.Do(httpReq) + if err != nil { + return 0, fmt.Errorf("making HTTP req to provide a signed record: %w", err) + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + return 0, httpError(resp.StatusCode, resp.Body) + } + var provideResult types.WriteProvidersResponse + err = json.NewDecoder(resp.Body).Decode(&provideResult) + if err != nil { + return 0, err + } + if len(provideResult.ProvideResults) != 1 { + return 0, fmt.Errorf("expected 1 result but got %d", len(provideResult.ProvideResults)) + } + + v, ok := provideResult.ProvideResults[0].(*types.WriteBitswapProviderRecordResponse) + if !ok { + return 0, fmt.Errorf("expected AdvisoryTTL field") + } + + if v.AdvisoryTTL != nil { + return v.AdvisoryTTL.Duration, nil + } + + return 0, nil +} diff --git a/routing/http/client/client_test.go b/routing/http/client/client_test.go new file mode 100644 index 0000000000..d8fc4abac1 --- /dev/null +++ b/routing/http/client/client_test.go @@ -0,0 +1,342 @@ +package client + +import ( + "context" + "crypto/rand" + "net/http" + "net/http/httptest" + "runtime" + "testing" + "time" + + "github.com/benbjohnson/clock" + "github.com/ipfs/boxo/routing/http/server" + "github.com/ipfs/boxo/routing/http/types" + "github.com/ipfs/go-cid" + "github.com/libp2p/go-libp2p/core/crypto" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/multiformats/go-multiaddr" + "github.com/multiformats/go-multibase" + "github.com/multiformats/go-multihash" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" +) + +type mockContentRouter struct{ mock.Mock } + +func (m *mockContentRouter) FindProviders(ctx context.Context, key cid.Cid) ([]types.ProviderResponse, error) { + args := m.Called(ctx, key) + return args.Get(0).([]types.ProviderResponse), args.Error(1) +} +func (m *mockContentRouter) ProvideBitswap(ctx context.Context, req *server.BitswapWriteProvideRequest) (time.Duration, error) { + args := m.Called(ctx, req) + return args.Get(0).(time.Duration), args.Error(1) +} + +func (m *mockContentRouter) Provide(ctx context.Context, req *server.WriteProvideRequest) (types.ProviderResponse, error) { + args := m.Called(ctx, req) + return args.Get(0).(types.ProviderResponse), args.Error(1) +} + +type testDeps struct { + router *mockContentRouter + server *httptest.Server + peerID peer.ID + addrs []multiaddr.Multiaddr + client *client +} + +func makeTestDeps(t *testing.T) testDeps { + const testUserAgent = "testUserAgent" + peerID, addrs, identity := makeProviderAndIdentity() + router := &mockContentRouter{} + server := httptest.NewServer(server.Handler(router)) + t.Cleanup(server.Close) + serverAddr := "http://" + server.Listener.Addr().String() + c, err := New(serverAddr, WithProviderInfo(peerID, addrs), WithIdentity(identity), WithUserAgent(testUserAgent)) + if err != nil { + panic(err) + } + assertUserAgentOverride(t, c, testUserAgent) + return testDeps{ + router: router, + server: server, + peerID: peerID, + addrs: addrs, + client: c, + } +} + +func assertUserAgentOverride(t *testing.T, c *client, expected string) { + httpClient, ok := c.httpClient.(*http.Client) + if !ok { + t.Error("invalid c.httpClient") + } + transport, ok := httpClient.Transport.(*ResponseBodyLimitedTransport) + if !ok { + t.Error("invalid httpClient.Transport") + } + if transport.UserAgent != expected { + t.Error("invalid httpClient.Transport.UserAgent") + } +} + +func makeCID() cid.Cid { + buf := make([]byte, 63) + _, err := rand.Read(buf) + if err != nil { + panic(err) + } + mh, err := multihash.Encode(buf, multihash.SHA2_256) + if err != nil { + panic(err) + } + c := cid.NewCidV1(0, mh) + return c +} + +func addrsToDRAddrs(addrs []multiaddr.Multiaddr) (drmas []types.Multiaddr) { + for _, a := range addrs { + drmas = append(drmas, types.Multiaddr{Multiaddr: a}) + } + return +} + +func drAddrsToAddrs(drmas []types.Multiaddr) (addrs []multiaddr.Multiaddr) { + for _, a := range drmas { + addrs = append(addrs, a.Multiaddr) + } + return +} + +func makeBSReadProviderResp() types.ReadBitswapProviderRecord { + peerID, addrs, _ := makeProviderAndIdentity() + return types.ReadBitswapProviderRecord{ + Protocol: "transport-bitswap", + Schema: types.SchemaBitswap, + ID: &peerID, + Addrs: addrsToDRAddrs(addrs), + } +} + +func makeProviderAndIdentity() (peer.ID, []multiaddr.Multiaddr, crypto.PrivKey) { + priv, _, err := crypto.GenerateEd25519Key(rand.Reader) + if err != nil { + panic(err) + } + peerID, err := peer.IDFromPrivateKey(priv) + if err != nil { + panic(err) + } + ma1, err := multiaddr.NewMultiaddr("/ip4/0.0.0.0/tcp/4001") + if err != nil { + panic(err) + } + + ma2, err := multiaddr.NewMultiaddr("/ip4/0.0.0.0/tcp/4002") + if err != nil { + panic(err) + } + + return peerID, []multiaddr.Multiaddr{ma1, ma2}, priv +} + +func TestClient_FindProviders(t *testing.T) { + bsReadProvResp := makeBSReadProviderResp() + bitswapProvs := []types.ProviderResponse{&bsReadProvResp} + + cases := []struct { + name string + httpStatusCode int + stopServer bool + routerProvs []types.ProviderResponse + routerErr error + + expProvs []types.ProviderResponse + expErrContains []string + expWinErrContains []string + }{ + { + name: "happy case", + routerProvs: bitswapProvs, + expProvs: bitswapProvs, + }, + { + name: "returns an error if there's a non-200 response", + httpStatusCode: 500, + expErrContains: []string{"HTTP error with StatusCode=500: "}, + }, + { + name: "returns an error if the HTTP client returns a non-HTTP error", + stopServer: true, + expErrContains: []string{"connect: connection refused"}, + expWinErrContains: []string{"connectex: No connection could be made because the target machine actively refused it."}, + }, + } + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + deps := makeTestDeps(t) + client := deps.client + router := deps.router + + if c.httpStatusCode != 0 { + deps.server.Config.Handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(c.httpStatusCode) + }) + } + + if c.stopServer { + deps.server.Close() + } + cid := makeCID() + + router.On("FindProviders", mock.Anything, cid). + Return(c.routerProvs, c.routerErr) + + provs, err := client.FindProviders(context.Background(), cid) + + var errList []string + if runtime.GOOS == "windows" && len(c.expWinErrContains) != 0 { + errList = c.expWinErrContains + } else { + errList = c.expErrContains + } + + for _, exp := range errList { + require.ErrorContains(t, err, exp) + } + if len(errList) == 0 { + require.NoError(t, err) + } + + assert.Equal(t, c.expProvs, provs) + }) + } +} + +func TestClient_Provide(t *testing.T) { + cases := []struct { + name string + manglePath bool + mangleSignature bool + stopServer bool + noProviderInfo bool + noIdentity bool + + cids []cid.Cid + ttl time.Duration + + routerAdvisoryTTL time.Duration + routerErr error + + expErrContains string + expWinErrContains string + + expAdvisoryTTL time.Duration + }{ + { + name: "happy case", + cids: []cid.Cid{makeCID()}, + ttl: 1 * time.Hour, + routerAdvisoryTTL: 1 * time.Minute, + + expAdvisoryTTL: 1 * time.Minute, + }, + { + name: "should return a 403 if the payload signature verification fails", + cids: []cid.Cid{}, + mangleSignature: true, + + expErrContains: "HTTP error with StatusCode=403", + }, + { + name: "should return error if identity is not provided", + noIdentity: true, + expErrContains: "cannot provide Bitswap records without an identity", + }, + { + name: "should return error if provider is not provided", + noProviderInfo: true, + expErrContains: "cannot provide Bitswap records without a peer ID", + }, + { + name: "returns an error if there's a non-200 response", + manglePath: true, + expErrContains: "HTTP error with StatusCode=404: 404 page not found", + }, + { + name: "returns an error if the HTTP client returns a non-HTTP error", + stopServer: true, + expErrContains: "connect: connection refused", + expWinErrContains: "connectex: No connection could be made because the target machine actively refused it.", + }, + } + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + // deps := makeTestDeps(t) + deps := makeTestDeps(t) + client := deps.client + router := deps.router + + if c.noIdentity { + client.identity = nil + } + if c.noProviderInfo { + client.peerID = "" + client.addrs = nil + } + + clock := clock.NewMock() + clock.Set(time.Now()) + client.clock = clock + + ctx := context.Background() + + if c.manglePath { + client.baseURL += "/foo" + } + if c.stopServer { + deps.server.Close() + } + if c.mangleSignature { + client.afterSignCallback = func(req *types.WriteBitswapProviderRecord) { + mh, err := multihash.Encode([]byte("boom"), multihash.SHA2_256) + require.NoError(t, err) + mb, err := multibase.Encode(multibase.Base64, mh) + require.NoError(t, err) + + req.Signature = mb + } + } + + expectedProvReq := &server.BitswapWriteProvideRequest{ + Keys: c.cids, + Timestamp: clock.Now().Truncate(time.Millisecond), + AdvisoryTTL: c.ttl, + Addrs: drAddrsToAddrs(client.addrs), + ID: client.peerID, + } + + router.On("ProvideBitswap", mock.Anything, expectedProvReq). + Return(c.routerAdvisoryTTL, c.routerErr) + + advisoryTTL, err := client.ProvideBitswap(ctx, c.cids, c.ttl) + + var errorString string + if runtime.GOOS == "windows" && c.expWinErrContains != "" { + errorString = c.expWinErrContains + } else { + errorString = c.expErrContains + } + + if errorString != "" { + require.ErrorContains(t, err, errorString) + } else { + require.NoError(t, err) + } + + assert.Equal(t, c.expAdvisoryTTL, advisoryTTL) + }) + } +} diff --git a/routing/http/client/error.go b/routing/http/client/error.go new file mode 100644 index 0000000000..b3b7904d51 --- /dev/null +++ b/routing/http/client/error.go @@ -0,0 +1,27 @@ +package client + +import ( + "fmt" + "io" +) + +type HTTPError struct { + StatusCode int + Body string +} + +func (e *HTTPError) Error() string { + return fmt.Sprintf("HTTP error with StatusCode=%d: %s", e.StatusCode, e.Body) +} + +func httpError(statusCode int, body io.Reader) error { + bodyBytes, err := io.ReadAll(io.LimitReader(body, 1024)) + if err != nil { + logger.Warnw("could not read body bytes from error response", "Error", err) + bodyBytes = []byte("unable to read body") + } + return &HTTPError{ + StatusCode: statusCode, + Body: string(bodyBytes), + } +} diff --git a/routing/http/client/measures.go b/routing/http/client/measures.go new file mode 100644 index 0000000000..942460518e --- /dev/null +++ b/routing/http/client/measures.go @@ -0,0 +1,119 @@ +package client + +import ( + "context" + "errors" + "net" + "strconv" + "time" + + "go.opencensus.io/stats" + "go.opencensus.io/stats/view" + "go.opencensus.io/tag" +) + +var ( + distMS = view.Distribution(0, 1, 2, 5, 10, 20, 50, 100, 200, 500, 1000, 2000, 5000, 10000, 20000) + distLength = view.Distribution(0, 1, 2, 5, 10, 11, 12, 15, 20, 50, 100, 200, 500) + + measureLatency = stats.Int64("routing_http_client_latency", "the latency of operations by the routing HTTP client", stats.UnitMilliseconds) + measureLength = stats.Int64("routing_http_client_length", "the number of elements in a response collection", stats.UnitDimensionless) + + keyOperation = tag.MustNewKey("operation") + keyHost = tag.MustNewKey("host") + keyStatusCode = tag.MustNewKey("code") + keyError = tag.MustNewKey("error") + + ViewLatency = &view.View{ + Measure: measureLatency, + Aggregation: distMS, + TagKeys: []tag.Key{keyOperation, keyHost, keyStatusCode, keyError}, + } + ViewLength = &view.View{ + Measure: measureLength, + Aggregation: distLength, + TagKeys: []tag.Key{keyOperation, keyHost}, + } + + OpenCensusViews = []*view.View{ + ViewLatency, + ViewLength, + } +) + +type measurement struct { + operation string + err error + latency time.Duration + statusCode int + host string + length int +} + +func (m measurement) record(ctx context.Context) { + stats.RecordWithTags( + ctx, + []tag.Mutator{ + tag.Upsert(keyHost, m.host), + tag.Upsert(keyOperation, m.operation), + tag.Upsert(keyStatusCode, strconv.Itoa(m.statusCode)), + tag.Upsert(keyError, metricsErrStr(m.err)), + }, + measureLatency.M(m.latency.Milliseconds()), + ) + if m.err == nil { + stats.RecordWithTags( + ctx, + []tag.Mutator{ + tag.Upsert(keyHost, m.host), + tag.Upsert(keyOperation, m.operation), + }, + measureLength.M(int64(m.length)), + ) + } +} + +func newMeasurement(operation string) measurement { + return measurement{ + operation: operation, + host: "None", + } +} + +// metricsErrStr converts an error into a string that can be used as a metric label. +// Errs are mapped to strings explicitly to avoid accidental high dimensionality. +func metricsErrStr(err error) string { + if err == nil { + return "None" + } + var httpErr *HTTPError + if errors.As(err, &httpErr) { + return "HTTP" + } + if errors.Is(err, context.DeadlineExceeded) { + return "DeadlineExceeded" + } + if errors.Is(err, context.Canceled) { + return "Canceled" + } + var dnsErr *net.DNSError + if errors.As(err, &dnsErr) { + if dnsErr.IsNotFound { + return "DNSNotFound" + } + if dnsErr.IsTimeout { + return "DNSTimeout" + } + return "DNS" + } + + var netErr net.Error + if errors.As(err, &netErr) { + if netErr.Timeout() { + return "NetTimeout" + } + return "Net" + } + + return "Other" +} diff --git a/routing/http/client/transport.go b/routing/http/client/transport.go new file mode 100644 index 0000000000..357d25cb2e --- /dev/null +++ b/routing/http/client/transport.go @@ -0,0 +1,79 @@ +package client + +import ( + "fmt" + "io" + "net/http" + "reflect" + "runtime/debug" + "strings" +) + +type ResponseBodyLimitedTransport struct { + http.RoundTripper + LimitBytes int64 + UserAgent string +} + +func (r *ResponseBodyLimitedTransport) RoundTrip(req *http.Request) (*http.Response, error) { + if r.UserAgent != "" { + req.Header.Set("User-Agent", r.UserAgent) + } + resp, err := r.RoundTripper.RoundTrip(req) + if resp != nil && resp.Body != nil { + resp.Body = &limitReadCloser{ + limit: r.LimitBytes, + ReadCloser: resp.Body, + } + } + return resp, err +} + +type limitReadCloser struct { + limit int64 + bytesRead int64 + io.ReadCloser +} + +func (l *limitReadCloser) Read(p []byte) (int, error) { + n, err := l.ReadCloser.Read(p) + l.bytesRead += int64(n) + if l.bytesRead > l.limit { + return 0, fmt.Errorf("reached read limit of %d bytes after reading %d bytes", l.limit, l.bytesRead) + } + return n, err +} + +// ImportPath is the canonical import path that allows us to identify +// official client builds vs modified forks, and use that info in User-Agent header. +var ImportPath = importPath() + +// importPath returns the path that library consumers would have in go.mod +func importPath() string { + p := reflect.ValueOf(ResponseBodyLimitedTransport{}).Type().PkgPath() + // we have monorepo, so stripping the remainder + return strings.TrimSuffix(p, "/routing/http/client") +} + +// moduleVersion returns a useful user agent version string allowing us to +// identify requests coming from official releases of this module vs forks. +func moduleVersion() (ua string) { + ua = ImportPath + var module *debug.Module + if bi, ok := debug.ReadBuildInfo(); ok { + // If debug.ReadBuildInfo was successful, we can read Version by finding + // this client in the dependency list of the app that has it in go.mod + for _, dep := range bi.Deps { + if dep.Path == ImportPath { + module = dep + break + } + } + if module != nil { + ua += "@" + module.Version + return + } + ua += "@unknown" + } + return +} diff --git a/routing/http/client/transport_test.go b/routing/http/client/transport_test.go new file mode 100644 index 0000000000..7da545062c --- /dev/null +++ b/routing/http/client/transport_test.go @@ -0,0 +1,84 @@ +package client + +import ( + "io" + "net/http" + "net/http/httptest" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +type testServer struct { + bytesToWrite int +} + +func (s *testServer) ServeHTTP(w http.ResponseWriter, r *http.Request) { + bytes := make([]byte, s.bytesToWrite) + for i := 0; i < s.bytesToWrite; i++ { + bytes[i] = 'a' + } + _, err := w.Write(bytes) + if err != nil { + panic(err) + } +} + +func TestResponseBodyLimitedTransport(t *testing.T) { + for _, c := range []struct { + name string + limit int64 + serverSend int + + expErr string + }{ + { + name: "under the limit should succeed", + limit: 1 << 20, + serverSend: 1 << 19, + }, + { + name: "over the limit should fail", + limit: 1 << 20, + serverSend: 1 << 21, + expErr: "reached read limit of 1048576 bytes after reading", + }, + { + name: "exactly on the limit should succeed", + limit: 1 << 20, + serverSend: 1 << 20, + }, + } { + t.Run(c.name, func(t *testing.T) { + server := httptest.NewServer(&testServer{bytesToWrite: c.serverSend}) + t.Cleanup(server.Close) + + client := server.Client() + client.Transport = &ResponseBodyLimitedTransport{ + LimitBytes: c.limit, + RoundTripper: client.Transport, + } + + resp, err := client.Get(server.URL) + require.NoError(t, err) + defer resp.Body.Close() + + _, err = io.ReadAll(resp.Body) + + if c.expErr == "" { + assert.NoError(t, err) + } else { + assert.Contains(t, err.Error(), c.expErr) + } + + }) + } +} + +func TestUserAgentVersionString(t *testing.T) { + // forks will have to update below lines to pass test + assert.Equal(t, importPath(), "github.com/ipfs/boxo") + // @unknown because we run in tests + assert.Equal(t, moduleVersion(), "github.com/ipfs/boxo@unknown") +} diff --git a/routing/http/contentrouter/contentrouter.go b/routing/http/contentrouter/contentrouter.go new file mode 100644 index 0000000000..572ac26559 --- /dev/null +++ b/routing/http/contentrouter/contentrouter.go @@ -0,0 +1,140 @@ +package contentrouter + +import ( + "context" + "reflect" + "time" + + "github.com/ipfs/boxo/routing/http/internal" + "github.com/ipfs/boxo/routing/http/types" + "github.com/ipfs/go-cid" + logging "github.com/ipfs/go-log/v2" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/libp2p/go-libp2p/core/routing" + "github.com/multiformats/go-multiaddr" + "github.com/multiformats/go-multihash" +) + +var logger = logging.Logger("service/contentrouting") + +const ttl = 24 * time.Hour + +type Client interface { + ProvideBitswap(ctx context.Context, keys []cid.Cid, ttl time.Duration) (time.Duration, error) + FindProviders(ctx context.Context, key cid.Cid) ([]types.ProviderResponse, error) +} + +type contentRouter struct { + client Client + maxProvideConcurrency int + maxProvideBatchSize int +} + +var _ routing.ContentRouting = (*contentRouter)(nil) + +type option func(c *contentRouter) + +func WithMaxProvideConcurrency(max int) option { + return func(c *contentRouter) { + c.maxProvideConcurrency = max + } +} + +func WithMaxProvideBatchSize(max int) option { + return func(c *contentRouter) { + c.maxProvideBatchSize = max + } +} + +func NewContentRoutingClient(c Client, opts ...option) *contentRouter { + cr := &contentRouter{ + client: c, + maxProvideConcurrency: 5, + maxProvideBatchSize: 100, + } + for _, opt := range opts { + opt(cr) + } + return cr +} + +func (c *contentRouter) Provide(ctx context.Context, key cid.Cid, announce bool) error { + // If 'true' is + // passed, it also announces it, otherwise it is just kept in the local + // accounting of which objects are being provided. + if !announce { + return nil + } + + _, err := c.client.ProvideBitswap(ctx, []cid.Cid{key}, ttl) + return err +} + +// ProvideMany provides a set of keys to the remote delegate. +// Large sets of keys are chunked into multiple requests and sent concurrently, according to the concurrency configuration. +// TODO: implement retries through transient errors +func (c *contentRouter) ProvideMany(ctx context.Context, mhKeys []multihash.Multihash) error { + keys := make([]cid.Cid, 0, len(mhKeys)) + for _, m := range mhKeys { + keys = append(keys, cid.NewCidV1(cid.Raw, m)) + } + + if len(keys) <= c.maxProvideBatchSize { + _, err := c.client.ProvideBitswap(ctx, keys, ttl) + return err + } + + return internal.DoBatch( + ctx, + c.maxProvideBatchSize, + c.maxProvideConcurrency, + keys, + func(ctx context.Context, batch []cid.Cid) error { + _, err := c.client.ProvideBitswap(ctx, batch, ttl) + return err + }, + ) +} + +// Ready is part of the existing `ProvideMany` interface. +func (c *contentRouter) Ready() bool { + return true +} + +func (c *contentRouter) FindProvidersAsync(ctx context.Context, key cid.Cid, numResults int) <-chan peer.AddrInfo { + results, err := c.client.FindProviders(ctx, key) + if err != nil { + logger.Warnw("error finding providers", "CID", key, "Error", err) + ch := make(chan peer.AddrInfo) + close(ch) + return ch + } + + ch := make(chan peer.AddrInfo, len(results)) + for _, r := range results { + if r.GetSchema() == types.SchemaBitswap { + result, ok := r.(*types.ReadBitswapProviderRecord) + if !ok { + logger.Errorw( + "problem casting find providers result", + "Schema", r.GetSchema(), + "Type", reflect.TypeOf(r).String(), + ) + continue + } + + var addrs []multiaddr.Multiaddr + for _, a := range result.Addrs { + addrs = append(addrs, a.Multiaddr) + } + + ch <- peer.AddrInfo{ + ID: *result.ID, + Addrs: addrs, + } + } + + } + close(ch) + return ch +} diff --git a/routing/http/contentrouter/contentrouter_test.go b/routing/http/contentrouter/contentrouter_test.go new file mode 100644 index 0000000000..643ad301ff --- /dev/null +++ b/routing/http/contentrouter/contentrouter_test.go @@ -0,0 +1,139 @@ +package contentrouter + +import ( + "context" + "crypto/rand" + "testing" + "time" + + "github.com/ipfs/boxo/routing/http/types" + "github.com/ipfs/go-cid" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/multiformats/go-multihash" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" +) + +type mockClient struct{ mock.Mock } + +func (m *mockClient) ProvideBitswap(ctx context.Context, keys []cid.Cid, ttl time.Duration) (time.Duration, error) { + args := m.Called(ctx, keys, ttl) + return args.Get(0).(time.Duration), args.Error(1) +} +func (m *mockClient) FindProviders(ctx context.Context, key cid.Cid) ([]types.ProviderResponse, error) { + args := m.Called(ctx, key) + return args.Get(0).([]types.ProviderResponse), args.Error(1) +} +func (m *mockClient) Ready(ctx context.Context) (bool, error) { + args := m.Called(ctx) + return args.Bool(0), args.Error(1) +} +func makeCID() cid.Cid { + buf := make([]byte, 63) + _, err := rand.Read(buf) + if err != nil { + panic(err) + } + mh, err := multihash.Encode(buf, multihash.SHA2_256) + if err != nil { + panic(err) + } + c := cid.NewCidV1(cid.Raw, mh) + return c +} + +func TestProvide(t *testing.T) { + for _, c := range []struct { + name string + announce bool + + expNotProvided bool + }{ + { + name: "announce=false results in no client request", + announce: false, + expNotProvided: true, + }, + { + name: "announce=true results in a client req", + announce: true, + }, + } { + t.Run(c.name, func(t *testing.T) { + ctx := context.Background() + key := makeCID() + client := &mockClient{} + crc := NewContentRoutingClient(client) + + if !c.expNotProvided { + client.On("ProvideBitswap", ctx, []cid.Cid{key}, ttl).Return(time.Minute, nil) + } + + err := crc.Provide(ctx, key, c.announce) + assert.NoError(t, err) + + if c.expNotProvided { + client.AssertNumberOfCalls(t, "ProvideBitswap", 0) + } + + }) + } +} + +func TestProvideMany(t *testing.T) { + cids := []cid.Cid{makeCID(), makeCID()} + var mhs []multihash.Multihash + for _, c := range cids { + mhs = append(mhs, c.Hash()) + } + ctx := context.Background() + client := &mockClient{} + crc := NewContentRoutingClient(client) + + client.On("ProvideBitswap", ctx, cids, ttl).Return(time.Minute, nil) + + err := crc.ProvideMany(ctx, mhs) + require.NoError(t, err) +} + +func TestFindProvidersAsync(t *testing.T) { + key := makeCID() + ctx := context.Background() + client := &mockClient{} + crc := NewContentRoutingClient(client) + + p1 := peer.ID("peer1") + p2 := peer.ID("peer2") + ais := []types.ProviderResponse{ + &types.ReadBitswapProviderRecord{ + Protocol: "transport-bitswap", + Schema: types.SchemaBitswap, + ID: &p1, + }, + &types.ReadBitswapProviderRecord{ + Protocol: "transport-bitswap", + Schema: types.SchemaBitswap, + ID: &p2, + }, + &types.UnknownProviderRecord{ + Protocol: "UNKNOWN", + }, + } + + client.On("FindProviders", ctx, key).Return(ais, nil) + + aiChan := crc.FindProvidersAsync(ctx, key, 2) + + var actualAIs []peer.AddrInfo + for ai := range aiChan { + actualAIs = append(actualAIs, ai) + } + + expected := []peer.AddrInfo{ + {ID: p1}, + {ID: p2}, + } + + require.Equal(t, expected, actualAIs) +} diff --git a/routing/http/internal/drjson/json.go b/routing/http/internal/drjson/json.go new file mode 100644 index 0000000000..3bc3ab9420 --- /dev/null +++ b/routing/http/internal/drjson/json.go @@ -0,0 +1,26 @@ +package drjson + +import ( + "bytes" + "encoding/json" +) + +func marshalJSON(val any) (*bytes.Buffer, error) { + buf := &bytes.Buffer{} + enc := json.NewEncoder(buf) + enc.SetEscapeHTML(false) + err := enc.Encode(val) + return buf, err +} + +// MarshalJSONBytes is needed to avoid changes +// on the original bytes due to HTML escapes. +func MarshalJSONBytes(val any) ([]byte, error) { + buf, err := marshalJSON(val) + if err != nil { + return nil, err + } + + // remove last \n added by Encode + return buf.Bytes()[:buf.Len()-1], nil +} diff --git a/routing/http/internal/drjson/json_test.go b/routing/http/internal/drjson/json_test.go new file mode 100644 index 0000000000..0e3bae81b2 --- /dev/null +++ b/routing/http/internal/drjson/json_test.go @@ -0,0 +1,16 @@ +package drjson + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestMarshalJSON(t *testing.T) { + // ensure that < is not escaped, which is the default Go behavior + bytes, err := MarshalJSONBytes(map[string]string{"<": "<"}) + if err != nil { + panic(err) + } + require.Equal(t, "{\"<\":\"<\"}", string(bytes)) +} diff --git a/routing/http/internal/goroutines.go b/routing/http/internal/goroutines.go new file mode 100644 index 0000000000..4c44428ab4 --- /dev/null +++ b/routing/http/internal/goroutines.go @@ -0,0 +1,71 @@ +package internal + +import ( + "context" + "sync" + + "github.com/samber/lo" +) + +// DoBatch processes a slice of items with concurrency no higher than maxConcurrency by splitting it into batches no larger than maxBatchSize. +// If an error is returned for any batch, the process is short-circuited and the error is immediately returned. +func DoBatch[A any](ctx context.Context, maxBatchSize, maxConcurrency int, items []A, f func(context.Context, []A) error) error { + if len(items) == 0 { + return nil + } + batches := lo.Chunk(items, maxBatchSize) + workerCtx, cancel := context.WithCancel(ctx) + defer cancel() + batchChan := make(chan []A) + errChan := make(chan error) + wg := sync.WaitGroup{} + for i := 0; i < maxConcurrency && i < len(batches); i++ { + wg.Add(1) + go func() { + defer wg.Done() + for { + select { + case batch := <-batchChan: + err := f(workerCtx, batch) + if err != nil { + select { + case errChan <- err: + case <-workerCtx.Done(): + return + } + } + case <-workerCtx.Done(): + return + } + } + }() + } + + // work sender + go func() { + defer close(errChan) + defer wg.Wait() + for _, batch := range batches { + select { + case batchChan <- batch: + case <-workerCtx.Done(): + return + } + } + cancel() + }() + + // receive any errors + select { + case err, ok := <-errChan: + if !ok { + // we finished without any errors, congratulations + return nil + } + // short circuit on the first error we get + // canceling the worker ctx and thus all workers, + return err + case <-ctx.Done(): + return ctx.Err() + } +} diff --git a/routing/http/internal/goroutines_test.go b/routing/http/internal/goroutines_test.go new file mode 100644 index 0000000000..a2ee487301 --- /dev/null +++ b/routing/http/internal/goroutines_test.go @@ -0,0 +1,107 @@ +package internal + +import ( + "context" + "errors" + "reflect" + "sync" + "testing" + "time" + + "github.com/stretchr/testify/require" +) + +func sequence(n int) (items []int) { + for i := 0; i < n; i++ { + items = append(items, i+1) + } + return +} + +func singleItemBatches(items []int) (batches [][]int) { + for _, item := range items { + batches = append(batches, []int{item}) + } + return +} + +func TestDoBatch(t *testing.T) { + cases := []struct { + name string + items []int + maxBatchSize int + maxConcurrency int + shouldErrOnce bool + + expBatches [][]int + expErrContains string + }{ + { + name: "no items", + }, + { + name: "batch size = 1", + items: sequence(3), + maxBatchSize: 1, + maxConcurrency: 1, + expBatches: [][]int{{1}, {2}, {3}}, + }, + { + name: "batch size > 1", + items: sequence(6), + maxBatchSize: 2, + maxConcurrency: 2, + expBatches: [][]int{{1, 2}, {3, 4}, {5, 6}}, + }, + { + name: "a lot of items and concurrency", + items: sequence(1000), + maxBatchSize: 1, + maxConcurrency: 100, + expBatches: singleItemBatches(sequence(1000)), + }, + } + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) + defer cancel() + + var mut sync.Mutex + var batches [][]int + + var onceMut sync.Mutex + var errored bool + + DoBatch(ctx, c.maxBatchSize, c.maxConcurrency, c.items, func(ctx context.Context, batch []int) error { + if c.shouldErrOnce { + onceMut.Lock() + if !errored { + errored = true + defer onceMut.Unlock() + return errors.New("boom") + } + onceMut.Unlock() + } + + mut.Lock() + batches = append(batches, batch) + mut.Unlock() + return nil + }) + + require.Equal(t, len(c.expBatches), len(batches), "expected equal len %v %v", c.expBatches, batches) + for _, expBatch := range c.expBatches { + requireContainsBatch(t, batches, expBatch) + } + }) + } +} + +func requireContainsBatch(t *testing.T, batches [][]int, batch []int) { + for _, b := range batches { + if reflect.DeepEqual(batch, b) { + return + } + } + t.Fatalf("expected batch %v, but not found in batches %v", batch, batches) +} diff --git a/routing/http/server/server.go b/routing/http/server/server.go new file mode 100644 index 0000000000..ca650ff285 --- /dev/null +++ b/routing/http/server/server.go @@ -0,0 +1,186 @@ +package server + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "time" + + "github.com/gorilla/mux" + "github.com/ipfs/boxo/routing/http/internal/drjson" + "github.com/ipfs/boxo/routing/http/types" + "github.com/ipfs/go-cid" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/multiformats/go-multiaddr" + + logging "github.com/ipfs/go-log/v2" +) + +var logger = logging.Logger("service/server/delegatedrouting") + +const ProvidePath = "/routing/v1/providers/" +const FindProvidersPath = "/routing/v1/providers/{cid}" + +type ContentRouter interface { + FindProviders(ctx context.Context, key cid.Cid) ([]types.ProviderResponse, error) + ProvideBitswap(ctx context.Context, req *BitswapWriteProvideRequest) (time.Duration, error) + Provide(ctx context.Context, req *WriteProvideRequest) (types.ProviderResponse, error) +} + +type BitswapWriteProvideRequest struct { + Keys []cid.Cid + Timestamp time.Time + AdvisoryTTL time.Duration + ID peer.ID + Addrs []multiaddr.Multiaddr +} + +type WriteProvideRequest struct { + Protocol string + Schema string + Bytes []byte +} + +type serverOption func(s *server) + +func Handler(svc ContentRouter, opts ...serverOption) http.Handler { + server := &server{ + svc: svc, + } + + for _, opt := range opts { + opt(server) + } + + r := mux.NewRouter() + r.HandleFunc(ProvidePath, server.provide).Methods(http.MethodPut) + r.HandleFunc(FindProvidersPath, server.findProviders).Methods(http.MethodGet) + + return r +} + +type server struct { + svc ContentRouter +} + +func (s *server) provide(w http.ResponseWriter, httpReq *http.Request) { + req := types.WriteProvidersRequest{} + err := json.NewDecoder(httpReq.Body).Decode(&req) + _ = httpReq.Body.Close() + if err != nil { + writeErr(w, "Provide", http.StatusBadRequest, fmt.Errorf("invalid request: %w", err)) + return + } + + resp := types.WriteProvidersResponse{} + + for i, prov := range req.Providers { + switch v := prov.(type) { + case *types.WriteBitswapProviderRecord: + err := v.Verify() + if err != nil { + logErr("Provide", "signature verification failed", err) + writeErr(w, "Provide", http.StatusForbidden, errors.New("signature verification failed")) + return + } + + keys := make([]cid.Cid, len(v.Payload.Keys)) + for i, k := range v.Payload.Keys { + keys[i] = k.Cid + + } + addrs := make([]multiaddr.Multiaddr, len(v.Payload.Addrs)) + for i, a := range v.Payload.Addrs { + addrs[i] = a.Multiaddr + } + advisoryTTL, err := s.svc.ProvideBitswap(httpReq.Context(), &BitswapWriteProvideRequest{ + Keys: keys, + Timestamp: v.Payload.Timestamp.Time, + AdvisoryTTL: v.Payload.AdvisoryTTL.Duration, + ID: *v.Payload.ID, + Addrs: addrs, + }) + if err != nil { + writeErr(w, "Provide", http.StatusInternalServerError, fmt.Errorf("delegate error: %w", err)) + return + } + resp.ProvideResults = append(resp.ProvideResults, + &types.WriteBitswapProviderRecordResponse{ + Protocol: v.Protocol, + Schema: v.Schema, + AdvisoryTTL: &types.Duration{Duration: advisoryTTL}, + }, + ) + case *types.UnknownProviderRecord: + provResp, err := s.svc.Provide(httpReq.Context(), &WriteProvideRequest{ + Protocol: v.Protocol, + Schema: v.Schema, + Bytes: v.Bytes, + }) + if err != nil { + writeErr(w, "Provide", http.StatusInternalServerError, fmt.Errorf("delegate error: %w", err)) + return + } + resp.ProvideResults = append(resp.ProvideResults, provResp) + default: + writeErr(w, "Provide", http.StatusBadRequest, fmt.Errorf("provider record %d does not contain a protocol", i)) + return + } + } + writeResult(w, "Provide", resp) +} + +func (s *server) findProviders(w http.ResponseWriter, httpReq *http.Request) { + vars := mux.Vars(httpReq) + cidStr := vars["cid"] + cid, err := cid.Decode(cidStr) + if err != nil { + writeErr(w, "FindProviders", http.StatusBadRequest, fmt.Errorf("unable to parse CID: %w", err)) + return + } + providers, err := s.svc.FindProviders(httpReq.Context(), cid) + if err != nil { + writeErr(w, "FindProviders", http.StatusInternalServerError, fmt.Errorf("delegate error: %w", err)) + return + } + response := types.ReadProvidersResponse{Providers: providers} + writeResult(w, "FindProviders", response) +} + +func writeResult(w http.ResponseWriter, method string, val any) { + w.Header().Add("Content-Type", "application/json") + + // keep the marshaling separate from the writing, so we can distinguish bugs (which surface as 500) + // from transient network issues (which surface as transport errors) + b, err := drjson.MarshalJSONBytes(val) + if err != nil { + writeErr(w, method, http.StatusInternalServerError, fmt.Errorf("marshaling response: %w", err)) + return + } + + _, err = io.Copy(w, bytes.NewBuffer(b)) + if err != nil { + logErr("Provide", "writing response body", err) + } +} + +func writeErr(w http.ResponseWriter, method string, statusCode int, cause error) { + w.WriteHeader(statusCode) + causeStr := cause.Error() + if len(causeStr) > 1024 { + causeStr = causeStr[:1024] + } + _, err := w.Write([]byte(causeStr)) + if err != nil { + logErr(method, "error writing error cause", err) + return + } +} + +func logErr(method, msg string, err error) { + logger.Infow(msg, "Method", method, "Error", err) +} diff --git a/routing/http/server/server_test.go b/routing/http/server/server_test.go new file mode 100644 index 0000000000..5609b2ed38 --- /dev/null +++ b/routing/http/server/server_test.go @@ -0,0 +1,63 @@ +package server + +import ( + "context" + "net/http" + "net/http/httptest" + "testing" + "time" + + "github.com/ipfs/boxo/routing/http/types" + "github.com/ipfs/go-cid" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" +) + +func TestHeaders(t *testing.T) { + router := &mockContentRouter{} + server := httptest.NewServer(Handler(router)) + t.Cleanup(server.Close) + serverAddr := "http://" + server.Listener.Addr().String() + + result := []types.ProviderResponse{ + &types.ReadBitswapProviderRecord{ + Protocol: "transport-bitswap", + Schema: types.SchemaBitswap, + }, + } + + c := "baeabep4vu3ceru7nerjjbk37sxb7wmftteve4hcosmyolsbsiubw2vr6pqzj6mw7kv6tbn6nqkkldnklbjgm5tzbi4hkpkled4xlcr7xz4bq" + cb, err := cid.Decode(c) + require.NoError(t, err) + + router.On("FindProviders", mock.Anything, cb). + Return(result, nil) + + resp, err := http.Get(serverAddr + ProvidePath + c) + require.NoError(t, err) + require.Equal(t, 200, resp.StatusCode) + header := resp.Header.Get("Content-Type") + require.Equal(t, "application/json", header) + + resp, err = http.Get(serverAddr + ProvidePath + "BAD_CID") + require.NoError(t, err) + require.Equal(t, 400, resp.StatusCode) + header = resp.Header.Get("Content-Type") + require.Equal(t, "text/plain; charset=utf-8", header) +} + +type mockContentRouter struct{ mock.Mock } + +func (m *mockContentRouter) FindProviders(ctx context.Context, key cid.Cid) ([]types.ProviderResponse, error) { + args := m.Called(ctx, key) + return args.Get(0).([]types.ProviderResponse), args.Error(1) +} +func (m *mockContentRouter) ProvideBitswap(ctx context.Context, req *BitswapWriteProvideRequest) (time.Duration, error) { + args := m.Called(ctx, req) + return args.Get(0).(time.Duration), args.Error(1) +} + +func (m *mockContentRouter) Provide(ctx context.Context, req *WriteProvideRequest) (types.ProviderResponse, error) { + args := m.Called(ctx, req) + return args.Get(0).(types.ProviderResponse), args.Error(1) +} diff --git a/routing/http/types/ipfs.go b/routing/http/types/ipfs.go new file mode 100644 index 0000000000..1e139fa226 --- /dev/null +++ b/routing/http/types/ipfs.go @@ -0,0 +1,42 @@ +package types + +import ( + "encoding/json" + + "github.com/ipfs/boxo/routing/http/internal/drjson" + "github.com/ipfs/go-cid" + "github.com/multiformats/go-multiaddr" +) + +type CID struct{ cid.Cid } + +func (c *CID) MarshalJSON() ([]byte, error) { return drjson.MarshalJSONBytes(c.String()) } +func (c *CID) UnmarshalJSON(b []byte) error { + var s string + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + decodedCID, err := cid.Decode(s) + if err != nil { + return err + } + c.Cid = decodedCID + return nil +} + +type Multiaddr struct{ multiaddr.Multiaddr } + +func (m *Multiaddr) UnmarshalJSON(b []byte) error { + var s string + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + ma, err := multiaddr.NewMultiaddr(s) + if err != nil { + return err + } + m.Multiaddr = ma + return nil +} diff --git a/routing/http/types/provider.go b/routing/http/types/provider.go new file mode 100644 index 0000000000..ef9e95ada7 --- /dev/null +++ b/routing/http/types/provider.go @@ -0,0 +1,142 @@ +package types + +import ( + "encoding/json" +) + +// WriteProviderRecord is a type that enforces structs to imlement it to avoid confusion +type WriteProviderRecord interface { + IsWriteProviderRecord() +} + +// ReadProviderRecord is a type that enforces structs to imlement it to avoid confusion +type ReadProviderRecord interface { + IsReadProviderRecord() +} + +type WriteProvidersRequest struct { + Providers []WriteProviderRecord +} + +func (r *WriteProvidersRequest) UnmarshalJSON(b []byte) error { + type wpr struct { + Providers []json.RawMessage + } + var tempWPR wpr + err := json.Unmarshal(b, &tempWPR) + if err != nil { + return err + } + + for _, provBytes := range tempWPR.Providers { + var rawProv UnknownProviderRecord + err := json.Unmarshal(provBytes, &rawProv) + if err != nil { + return err + } + + switch rawProv.Schema { + case SchemaBitswap: + var prov WriteBitswapProviderRecord + err := json.Unmarshal(rawProv.Bytes, &prov) + if err != nil { + return err + } + r.Providers = append(r.Providers, &prov) + default: + var prov UnknownProviderRecord + err := json.Unmarshal(b, &prov) + if err != nil { + return err + } + r.Providers = append(r.Providers, &prov) + } + } + return nil +} + +// ProviderResponse is implemented for any ProviderResponse. It needs to have a Protocol field. +type ProviderResponse interface { + GetProtocol() string + GetSchema() string +} + +// WriteProvidersResponse is the result of a Provide operation +type WriteProvidersResponse struct { + ProvideResults []ProviderResponse +} + +// rawWriteProvidersResponse is a helper struct to make possible to parse WriteProvidersResponse's +type rawWriteProvidersResponse struct { + ProvideResults []json.RawMessage +} + +func (r *WriteProvidersResponse) UnmarshalJSON(b []byte) error { + var tempWPR rawWriteProvidersResponse + err := json.Unmarshal(b, &tempWPR) + if err != nil { + return err + } + + for _, provBytes := range tempWPR.ProvideResults { + var rawProv UnknownProviderRecord + err := json.Unmarshal(provBytes, &rawProv) + if err != nil { + return err + } + + switch rawProv.Schema { + case SchemaBitswap: + var prov WriteBitswapProviderRecordResponse + err := json.Unmarshal(rawProv.Bytes, &prov) + if err != nil { + return err + } + r.ProvideResults = append(r.ProvideResults, &prov) + default: + r.ProvideResults = append(r.ProvideResults, &rawProv) + } + } + + return nil +} + +// ReadProvidersResponse is the result of a Provide request +type ReadProvidersResponse struct { + Providers []ProviderResponse +} + +// rawReadProvidersResponse is a helper struct to make possible to parse ReadProvidersResponse's +type rawReadProvidersResponse struct { + Providers []json.RawMessage +} + +func (r *ReadProvidersResponse) UnmarshalJSON(b []byte) error { + var tempFPR rawReadProvidersResponse + err := json.Unmarshal(b, &tempFPR) + if err != nil { + return err + } + + for _, provBytes := range tempFPR.Providers { + var readProv UnknownProviderRecord + err := json.Unmarshal(provBytes, &readProv) + if err != nil { + return err + } + + switch readProv.Schema { + case SchemaBitswap: + var prov ReadBitswapProviderRecord + err := json.Unmarshal(readProv.Bytes, &prov) + if err != nil { + return err + } + r.Providers = append(r.Providers, &prov) + default: + r.Providers = append(r.Providers, &readProv) + } + + } + return nil +} diff --git a/routing/http/types/provider_bitswap.go b/routing/http/types/provider_bitswap.go new file mode 100644 index 0000000000..66243dd5dc --- /dev/null +++ b/routing/http/types/provider_bitswap.go @@ -0,0 +1,184 @@ +package types + +import ( + "crypto/sha256" + "encoding/json" + "errors" + "fmt" + + "github.com/ipfs/boxo/routing/http/internal/drjson" + "github.com/libp2p/go-libp2p/core/crypto" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/multiformats/go-multibase" +) + +const SchemaBitswap = "bitswap" + +var _ WriteProviderRecord = &WriteBitswapProviderRecord{} + +// WriteBitswapProviderRecord is used when we want to add a new provider record that is using bitswap. +type WriteBitswapProviderRecord struct { + Protocol string + Schema string + Signature string + + // this content must be untouched because it is signed and we need to verify it + RawPayload json.RawMessage `json:"Payload"` + Payload BitswapPayload `json:"-"` +} + +type BitswapPayload struct { + Keys []CID + Timestamp *Time + AdvisoryTTL *Duration + ID *peer.ID + Addrs []Multiaddr +} + +func (*WriteBitswapProviderRecord) IsWriteProviderRecord() {} + +type tmpBWPR WriteBitswapProviderRecord + +func (p *WriteBitswapProviderRecord) UnmarshalJSON(b []byte) error { + var bwp tmpBWPR + err := json.Unmarshal(b, &bwp) + if err != nil { + return err + } + + p.Protocol = bwp.Protocol + p.Schema = bwp.Schema + p.Signature = bwp.Signature + p.RawPayload = bwp.RawPayload + + return json.Unmarshal(bwp.RawPayload, &p.Payload) +} + +func (p *WriteBitswapProviderRecord) IsSigned() bool { + return p.Signature != "" +} + +func (p *WriteBitswapProviderRecord) setRawPayload() error { + payloadBytes, err := drjson.MarshalJSONBytes(p.Payload) + if err != nil { + return fmt.Errorf("marshaling bitswap write provider payload: %w", err) + } + + p.RawPayload = payloadBytes + + return nil +} + +func (p *WriteBitswapProviderRecord) Sign(peerID peer.ID, key crypto.PrivKey) error { + if p.IsSigned() { + return errors.New("already signed") + } + + if key == nil { + return errors.New("no key provided") + } + + sid, err := peer.IDFromPrivateKey(key) + if err != nil { + return err + } + if sid != peerID { + return errors.New("not the correct signing key") + } + + err = p.setRawPayload() + if err != nil { + return err + } + hash := sha256.Sum256([]byte(p.RawPayload)) + sig, err := key.Sign(hash[:]) + if err != nil { + return err + } + + sigStr, err := multibase.Encode(multibase.Base64, sig) + if err != nil { + return fmt.Errorf("multibase-encoding signature: %w", err) + } + + p.Signature = sigStr + return nil +} + +func (p *WriteBitswapProviderRecord) Verify() error { + if !p.IsSigned() { + return errors.New("not signed") + } + + if p.Payload.ID == nil { + return errors.New("peer ID must be specified") + } + + // note that we only generate and set the payload if it hasn't already been set + // to allow for passing through the payload untouched if it is already provided + if p.RawPayload == nil { + err := p.setRawPayload() + if err != nil { + return err + } + } + + pk, err := p.Payload.ID.ExtractPublicKey() + if err != nil { + return fmt.Errorf("extracing public key from peer ID: %w", err) + } + + _, sigBytes, err := multibase.Decode(p.Signature) + if err != nil { + return fmt.Errorf("multibase-decoding signature to verify: %w", err) + } + + hash := sha256.Sum256([]byte(p.RawPayload)) + ok, err := pk.Verify(hash[:], sigBytes) + if err != nil { + return fmt.Errorf("verifying hash with signature: %w", err) + } + if !ok { + return errors.New("signature failed to verify") + } + + return nil +} + +var _ ProviderResponse = &WriteBitswapProviderRecordResponse{} + +// WriteBitswapProviderRecordResponse will be returned as a result of WriteBitswapProviderRecord +type WriteBitswapProviderRecordResponse struct { + Protocol string + Schema string + AdvisoryTTL *Duration +} + +func (wbprr *WriteBitswapProviderRecordResponse) GetProtocol() string { + return wbprr.Protocol +} + +func (wbprr *WriteBitswapProviderRecordResponse) GetSchema() string { + return wbprr.Schema +} + +var _ ReadProviderRecord = &ReadBitswapProviderRecord{} +var _ ProviderResponse = &ReadBitswapProviderRecord{} + +// ReadBitswapProviderRecord is a provider result with parameters for bitswap providers +type ReadBitswapProviderRecord struct { + Protocol string + Schema string + ID *peer.ID + Addrs []Multiaddr +} + +func (rbpr *ReadBitswapProviderRecord) GetProtocol() string { + return rbpr.Protocol +} + +func (rbpr *ReadBitswapProviderRecord) GetSchema() string { + return rbpr.Schema +} + +func (*ReadBitswapProviderRecord) IsReadProviderRecord() {} diff --git a/routing/http/types/provider_unknown.go b/routing/http/types/provider_unknown.go new file mode 100644 index 0000000000..3dadc0e9b9 --- /dev/null +++ b/routing/http/types/provider_unknown.go @@ -0,0 +1,61 @@ +package types + +import ( + "encoding/json" + + "github.com/ipfs/boxo/routing/http/internal/drjson" +) + +var _ ReadProviderRecord = &UnknownProviderRecord{} +var _ WriteProviderRecord = &UnknownProviderRecord{} +var _ ProviderResponse = &UnknownProviderRecord{} + +// UnknownProviderRecord is used when we cannot parse the provider record using `GetProtocol` +type UnknownProviderRecord struct { + Protocol string + Schema string + Bytes []byte +} + +func (u *UnknownProviderRecord) GetProtocol() string { + return u.Protocol +} + +func (u *UnknownProviderRecord) GetSchema() string { + return u.Schema +} + +func (u *UnknownProviderRecord) IsReadProviderRecord() {} +func (u UnknownProviderRecord) IsWriteProviderRecord() {} + +func (u *UnknownProviderRecord) UnmarshalJSON(b []byte) error { + m := map[string]interface{}{} + if err := json.Unmarshal(b, &m); err != nil { + return err + } + + ps, ok := m["Protocol"].(string) + if ok { + u.Protocol = ps + } + schema, ok := m["Schema"].(string) + if ok { + u.Schema = schema + } + + u.Bytes = b + + return nil +} + +func (u UnknownProviderRecord) MarshalJSON() ([]byte, error) { + m := map[string]interface{}{} + err := json.Unmarshal(u.Bytes, &m) + if err != nil { + return nil, err + } + m["Protocol"] = u.Protocol + m["Schema"] = u.Schema + + return drjson.MarshalJSONBytes(m) +} diff --git a/routing/http/types/time.go b/routing/http/types/time.go new file mode 100644 index 0000000000..1d938807ed --- /dev/null +++ b/routing/http/types/time.go @@ -0,0 +1,36 @@ +package types + +import ( + "encoding/json" + "time" + + "github.com/ipfs/boxo/routing/http/internal/drjson" +) + +type Time struct{ time.Time } + +func (t *Time) MarshalJSON() ([]byte, error) { + return drjson.MarshalJSONBytes(t.Time.UnixMilli()) +} +func (t *Time) UnmarshalJSON(b []byte) error { + var timestamp int64 + err := json.Unmarshal(b, ×tamp) + if err != nil { + return err + } + t.Time = time.UnixMilli(timestamp) + return nil +} + +type Duration struct{ time.Duration } + +func (d *Duration) MarshalJSON() ([]byte, error) { return drjson.MarshalJSONBytes(d.Duration) } +func (d *Duration) UnmarshalJSON(b []byte) error { + var dur int64 + err := json.Unmarshal(b, &dur) + if err != nil { + return err + } + d.Duration = time.Duration(dur) + return nil +} diff --git a/routing/mock/centralized_client.go b/routing/mock/centralized_client.go new file mode 100644 index 0000000000..ac3f938ccd --- /dev/null +++ b/routing/mock/centralized_client.go @@ -0,0 +1,88 @@ +package mockrouting + +import ( + "context" + "time" + + "github.com/ipfs/go-cid" + logging "github.com/ipfs/go-log" + tnet "github.com/libp2p/go-libp2p-testing/net" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/libp2p/go-libp2p/core/routing" + ma "github.com/multiformats/go-multiaddr" +) + +var log = logging.Logger("mockrouter") + +type client struct { + vs routing.ValueStore + server server + peer tnet.Identity +} + +// PutValue FIXME(brian): is this method meant to simulate putting a value into the network? +func (c *client) PutValue(ctx context.Context, key string, val []byte, opts ...routing.Option) error { + log.Debugf("PutValue: %s", key) + return c.vs.PutValue(ctx, key, val, opts...) +} + +// GetValue FIXME(brian): is this method meant to simulate getting a value from the network? +func (c *client) GetValue(ctx context.Context, key string, opts ...routing.Option) ([]byte, error) { + log.Debugf("GetValue: %s", key) + return c.vs.GetValue(ctx, key, opts...) +} + +func (c *client) SearchValue(ctx context.Context, key string, opts ...routing.Option) (<-chan []byte, error) { + log.Debugf("SearchValue: %s", key) + return c.vs.SearchValue(ctx, key, opts...) +} + +func (c *client) FindProviders(ctx context.Context, key cid.Cid) ([]peer.AddrInfo, error) { + return c.server.Providers(key), nil +} + +func (c *client) FindPeer(ctx context.Context, pid peer.ID) (peer.AddrInfo, error) { + log.Debugf("FindPeer: %s", pid) + return peer.AddrInfo{}, nil +} + +func (c *client) FindProvidersAsync(ctx context.Context, k cid.Cid, max int) <-chan peer.AddrInfo { + out := make(chan peer.AddrInfo) + go func() { + defer close(out) + for i, p := range c.server.Providers(k) { + if max <= i { + return + } + select { + case out <- p: + case <-ctx.Done(): + return + } + } + }() + return out +} + +// Provide returns once the message is on the network. Value is not necessarily +// visible yet. +func (c *client) Provide(_ context.Context, key cid.Cid, brd bool) error { + if !brd { + return nil + } + info := peer.AddrInfo{ + ID: c.peer.ID(), + Addrs: []ma.Multiaddr{c.peer.Address()}, + } + return c.server.Announce(info, key) +} + +func (c *client) Ping(ctx context.Context, p peer.ID) (time.Duration, error) { + return 0, nil +} + +func (c *client) Bootstrap(context.Context) error { + return nil +} + +var _ routing.Routing = &client{} diff --git a/routing/mock/centralized_server.go b/routing/mock/centralized_server.go new file mode 100644 index 0000000000..d55de70814 --- /dev/null +++ b/routing/mock/centralized_server.go @@ -0,0 +1,91 @@ +package mockrouting + +import ( + "context" + "math/rand" + "sync" + "time" + + "github.com/ipfs/boxo/routing/offline" + "github.com/ipfs/go-cid" + ds "github.com/ipfs/go-datastore" + dssync "github.com/ipfs/go-datastore/sync" + tnet "github.com/libp2p/go-libp2p-testing/net" + "github.com/libp2p/go-libp2p/core/peer" +) + +// server is the mockrouting.Client's private interface to the routing server +type server interface { + Announce(peer.AddrInfo, cid.Cid) error + Providers(cid.Cid) []peer.AddrInfo + + Server +} + +// s is an implementation of the private server interface +type s struct { + delayConf DelayConfig + + lock sync.RWMutex + providers map[string]map[peer.ID]providerRecord +} + +type providerRecord struct { + Peer peer.AddrInfo + Created time.Time +} + +func (rs *s) Announce(p peer.AddrInfo, c cid.Cid) error { + rs.lock.Lock() + defer rs.lock.Unlock() + + k := c.KeyString() + + _, ok := rs.providers[k] + if !ok { + rs.providers[k] = make(map[peer.ID]providerRecord) + } + rs.providers[k][p.ID] = providerRecord{ + Created: time.Now(), + Peer: p, + } + return nil +} + +func (rs *s) Providers(c cid.Cid) []peer.AddrInfo { + rs.delayConf.Query.Wait() // before locking + + rs.lock.RLock() + defer rs.lock.RUnlock() + k := c.KeyString() + + var ret []peer.AddrInfo + records, ok := rs.providers[k] + if !ok { + return ret + } + for _, r := range records { + if time.Since(r.Created) > rs.delayConf.ValueVisibility.Get() { + ret = append(ret, r.Peer) + } + } + + for i := range ret { + j := rand.Intn(i + 1) + ret[i], ret[j] = ret[j], ret[i] + } + + return ret +} + +func (rs *s) Client(p tnet.Identity) Client { + return rs.ClientWithDatastore(context.Background(), p, dssync.MutexWrap(ds.NewMapDatastore())) +} + +func (rs *s) ClientWithDatastore(_ context.Context, p tnet.Identity, datastore ds.Datastore) Client { + return &client{ + peer: p, + vs: offline.NewOfflineRouter(datastore, MockValidator{}), + server: rs, + } +} diff --git a/routing/mock/centralized_test.go b/routing/mock/centralized_test.go new file mode 100644 index 0000000000..403e21350f --- /dev/null +++ b/routing/mock/centralized_test.go @@ -0,0 +1,176 @@ +package mockrouting + +import ( + "context" + "testing" + "time" + + u "github.com/ipfs/boxo/util" + "github.com/ipfs/go-cid" + delay "github.com/ipfs/go-ipfs-delay" + tnet "github.com/libp2p/go-libp2p-testing/net" + "github.com/libp2p/go-libp2p/core/peer" +) + +func TestKeyNotFound(t *testing.T) { + + var pi = tnet.RandIdentityOrFatal(t) + var key = cid.NewCidV0(u.Hash([]byte("mock key"))) + var ctx = context.Background() + + rs := NewServer() + providers := rs.Client(pi).FindProvidersAsync(ctx, key, 10) + _, ok := <-providers + if ok { + t.Fatal("should be closed") + } +} + +func TestClientFindProviders(t *testing.T) { + pi := tnet.RandIdentityOrFatal(t) + rs := NewServer() + client := rs.Client(pi) + + k := cid.NewCidV0(u.Hash([]byte("hello"))) + err := client.Provide(context.Background(), k, true) + if err != nil { + t.Fatal(err) + } + + // This is bad... but simulating networks is hard + time.Sleep(time.Millisecond * 300) + max := 100 + + providersFromClient := client.FindProvidersAsync(context.Background(), k, max) + isInClient := false + for p := range providersFromClient { + if p.ID == pi.ID() { + isInClient = true + } + } + if !isInClient { + t.Fatal("Despite client providing key, client didn't receive peer when finding providers") + } +} + +func TestClientOverMax(t *testing.T) { + rs := NewServer() + k := cid.NewCidV0(u.Hash([]byte("hello"))) + numProvidersForHelloKey := 100 + for i := 0; i < numProvidersForHelloKey; i++ { + pi := tnet.RandIdentityOrFatal(t) + err := rs.Client(pi).Provide(context.Background(), k, true) + if err != nil { + t.Fatal(err) + } + } + + max := 10 + pi := tnet.RandIdentityOrFatal(t) + client := rs.Client(pi) + + providersFromClient := client.FindProvidersAsync(context.Background(), k, max) + i := 0 + for range providersFromClient { + i++ + } + if i != max { + t.Fatal("Too many providers returned") + } +} + +// TODO does dht ensure won't receive self as a provider? probably not. +func TestCanceledContext(t *testing.T) { + rs := NewServer() + k := cid.NewCidV0(u.Hash([]byte("hello"))) + + // avoid leaking goroutine, without using the context to signal + // (we want the goroutine to keep trying to publish on a + // cancelled context until we've tested it doesnt do anything.) + done := make(chan struct{}) + defer func() { done <- struct{}{} }() + + t.Log("async'ly announce infinite stream of providers for key") + i := 0 + go func() { // infinite stream + for { + select { + case <-done: + t.Log("exiting async worker") + return + default: + } + + pi, err := tnet.RandIdentity() + if err != nil { + t.Error(err) + } + err = rs.Client(pi).Provide(context.Background(), k, true) + if err != nil { + t.Error(err) + } + i++ + } + }() + + local := tnet.RandIdentityOrFatal(t) + client := rs.Client(local) + + t.Log("warning: max is finite so this test is non-deterministic") + t.Log("context cancellation could simply take lower priority") + t.Log("and result in receiving the max number of results") + max := 1000 + + t.Log("cancel the context before consuming") + ctx, cancelFunc := context.WithCancel(context.Background()) + cancelFunc() + providers := client.FindProvidersAsync(ctx, k, max) + + numProvidersReturned := 0 + for range providers { + numProvidersReturned++ + } + t.Log(numProvidersReturned) + + if numProvidersReturned == max { + t.Fatal("Context cancel had no effect") + } +} + +func TestValidAfter(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + pi := tnet.RandIdentityOrFatal(t) + key := cid.NewCidV0(u.Hash([]byte("mock key"))) + conf := DelayConfig{ + ValueVisibility: delay.Fixed(1 * time.Hour), + Query: delay.Fixed(0), + } + + rs := NewServerWithDelay(conf) + + rs.Client(pi).Provide(ctx, key, true) + + var providers []peer.AddrInfo + max := 100 + providersChan := rs.Client(pi).FindProvidersAsync(ctx, key, max) + for p := range providersChan { + providers = append(providers, p) + } + if len(providers) > 0 { + t.Fail() + } + + conf.ValueVisibility.Set(0) + time.Sleep(100 * time.Millisecond) + + providersChan = rs.Client(pi).FindProvidersAsync(ctx, key, max) + t.Log("providers", providers) + for p := range providersChan { + providers = append(providers, p) + } + if len(providers) != 1 { + t.Fail() + } +} diff --git a/routing/mock/interface.go b/routing/mock/interface.go new file mode 100644 index 0000000000..35430a72c5 --- /dev/null +++ b/routing/mock/interface.go @@ -0,0 +1,59 @@ +// Package mockrouting provides a virtual routing server. To use it, +// create a virtual routing server and use the Client() method to get a +// routing client (Routing). The server quacks like a DHT but is +// really a local in-memory hash table. +package mockrouting + +import ( + "context" + + ds "github.com/ipfs/go-datastore" + delay "github.com/ipfs/go-ipfs-delay" + tnet "github.com/libp2p/go-libp2p-testing/net" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/libp2p/go-libp2p/core/routing" +) + +// MockValidator is a record validator that always returns success. +type MockValidator struct{} + +func (MockValidator) Validate(_ string, _ []byte) error { return nil } +func (MockValidator) Select(_ string, _ [][]byte) (int, error) { return 0, nil } + +// Server provides mockrouting Clients +type Server interface { + Client(p tnet.Identity) Client + ClientWithDatastore(context.Context, tnet.Identity, ds.Datastore) Client +} + +// Client implements Routing +type Client interface { + routing.Routing +} + +// NewServer returns a mockrouting Server +func NewServer() Server { + return NewServerWithDelay(DelayConfig{ + ValueVisibility: delay.Fixed(0), + Query: delay.Fixed(0), + }) +} + +// NewServerWithDelay returns a mockrouting Server with a delay! +func NewServerWithDelay(conf DelayConfig) Server { + return &s{ + providers: make(map[string]map[peer.ID]providerRecord), + delayConf: conf, + } +} + +// DelayConfig can be used to configured the fake delays of a mock server. +// Use with NewServerWithDelay(). +type DelayConfig struct { + // ValueVisibility is the time it takes for a value to be visible in the network + // FIXME there _must_ be a better term for this + ValueVisibility delay.D + + // Query is the time it takes to receive a response from a routing query + Query delay.D +} diff --git a/routing/none/none_client.go b/routing/none/none_client.go new file mode 100644 index 0000000000..6f400b54a1 --- /dev/null +++ b/routing/none/none_client.go @@ -0,0 +1,55 @@ +// Package nilrouting implements a routing client that does nothing. +package nilrouting + +import ( + "context" + "errors" + + "github.com/ipfs/go-cid" + ds "github.com/ipfs/go-datastore" + record "github.com/libp2p/go-libp2p-record" + "github.com/libp2p/go-libp2p/core/host" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/libp2p/go-libp2p/core/routing" +) + +type nilclient struct { +} + +func (c *nilclient) PutValue(_ context.Context, _ string, _ []byte, _ ...routing.Option) error { + return nil +} + +func (c *nilclient) GetValue(_ context.Context, _ string, _ ...routing.Option) ([]byte, error) { + return nil, errors.New("tried GetValue from nil routing") +} + +func (c *nilclient) SearchValue(_ context.Context, _ string, _ ...routing.Option) (<-chan []byte, error) { + return nil, errors.New("tried SearchValue from nil routing") +} + +func (c *nilclient) FindPeer(_ context.Context, _ peer.ID) (peer.AddrInfo, error) { + return peer.AddrInfo{}, nil +} + +func (c *nilclient) FindProvidersAsync(_ context.Context, _ cid.Cid, _ int) <-chan peer.AddrInfo { + out := make(chan peer.AddrInfo) + defer close(out) + return out +} + +func (c *nilclient) Provide(_ context.Context, _ cid.Cid, _ bool) error { + return nil +} + +func (c *nilclient) Bootstrap(_ context.Context) error { + return nil +} + +// ConstructNilRouting creates an Routing client which does nothing. +func ConstructNilRouting(_ context.Context, _ host.Host, _ ds.Batching, _ record.Validator) (routing.Routing, error) { + return &nilclient{}, nil +} + +// ensure nilclient satisfies interface +var _ routing.Routing = &nilclient{} diff --git a/routing/offline/offline.go b/routing/offline/offline.go new file mode 100644 index 0000000000..b9d5281ff0 --- /dev/null +++ b/routing/offline/offline.go @@ -0,0 +1,126 @@ +// Package offline implements Routing with a client which +// is only able to perform offline operations. +package offline + +import ( + "bytes" + "context" + "errors" + "time" + + "github.com/gogo/protobuf/proto" + dshelp "github.com/ipfs/boxo/datastore/dshelp" + "github.com/ipfs/go-cid" + ds "github.com/ipfs/go-datastore" + record "github.com/libp2p/go-libp2p-record" + pb "github.com/libp2p/go-libp2p-record/pb" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/libp2p/go-libp2p/core/routing" +) + +// ErrOffline is returned when trying to perform operations that +// require connectivity. +var ErrOffline = errors.New("routing system in offline mode") + +// NewOfflineRouter returns an Routing implementation which only performs +// offline operations. It allows to Put and Get signed dht +// records to and from the local datastore. +func NewOfflineRouter(dstore ds.Datastore, validator record.Validator) routing.Routing { + return &offlineRouting{ + datastore: dstore, + validator: validator, + } +} + +// offlineRouting implements the Routing interface, +// but only provides the capability to Put and Get signed dht +// records to and from the local datastore. +type offlineRouting struct { + datastore ds.Datastore + validator record.Validator +} + +func (c *offlineRouting) PutValue(ctx context.Context, key string, val []byte, _ ...routing.Option) error { + if err := c.validator.Validate(key, val); err != nil { + return err + } + if old, err := c.GetValue(ctx, key); err == nil { + // be idempotent to be nice. + if bytes.Equal(old, val) { + return nil + } + // check to see if the older record is better + i, err := c.validator.Select(key, [][]byte{val, old}) + if err != nil { + // this shouldn't happen for validated records. + return err + } + if i != 0 { + return errors.New("can't replace a newer record with an older one") + } + } + rec := record.MakePutRecord(key, val) + data, err := proto.Marshal(rec) + if err != nil { + return err + } + + return c.datastore.Put(ctx, dshelp.NewKeyFromBinary([]byte(key)), data) +} + +func (c *offlineRouting) GetValue(ctx context.Context, key string, _ ...routing.Option) ([]byte, error) { + buf, err := c.datastore.Get(ctx, dshelp.NewKeyFromBinary([]byte(key))) + if err != nil { + return nil, err + } + + rec := new(pb.Record) + err = proto.Unmarshal(buf, rec) + if err != nil { + return nil, err + } + val := rec.GetValue() + + err = c.validator.Validate(key, val) + if err != nil { + return nil, err + } + return val, nil +} + +func (c *offlineRouting) SearchValue(ctx context.Context, key string, _ ...routing.Option) (<-chan []byte, error) { + out := make(chan []byte, 1) + go func() { + defer close(out) + v, err := c.GetValue(ctx, key) + if err == nil { + out <- v + } + }() + return out, nil +} + +func (c *offlineRouting) FindPeer(ctx context.Context, pid peer.ID) (peer.AddrInfo, error) { + return peer.AddrInfo{}, ErrOffline +} + +func (c *offlineRouting) FindProvidersAsync(ctx context.Context, k cid.Cid, max int) <-chan peer.AddrInfo { + out := make(chan peer.AddrInfo) + close(out) + return out +} + +func (c *offlineRouting) Provide(_ context.Context, k cid.Cid, _ bool) error { + return ErrOffline +} + +func (c *offlineRouting) Ping(ctx context.Context, p peer.ID) (time.Duration, error) { + return 0, ErrOffline +} + +func (c *offlineRouting) Bootstrap(context.Context) error { + return nil +} + +// ensure offlineRouting matches the Routing interface +var _ routing.Routing = &offlineRouting{} diff --git a/routing/offline/offline_test.go b/routing/offline/offline_test.go new file mode 100644 index 0000000000..9a17e86898 --- /dev/null +++ b/routing/offline/offline_test.go @@ -0,0 +1,91 @@ +package offline + +import ( + "bytes" + "context" + "testing" + + cid "github.com/ipfs/go-cid" + ds "github.com/ipfs/go-datastore" + + "github.com/libp2p/go-libp2p/core/routing" + "github.com/libp2p/go-libp2p/core/test" + + mh "github.com/multiformats/go-multihash" +) + +type blankValidator struct{} + +func (blankValidator) Validate(_ string, _ []byte) error { return nil } +func (blankValidator) Select(_ string, _ [][]byte) (int, error) { return 0, nil } + +func TestOfflineRouterStorage(t *testing.T) { + ctx := context.Background() + + nds := ds.NewMapDatastore() + offline := NewOfflineRouter(nds, blankValidator{}) + + if err := offline.PutValue(ctx, "key", []byte("testing 1 2 3")); err != nil { + t.Fatal(err) + } + + val, err := offline.GetValue(ctx, "key") + if err != nil { + t.Fatal(err) + } + if !bytes.Equal([]byte("testing 1 2 3"), val) { + t.Fatal("OfflineRouter does not properly store") + } + + _, err = offline.GetValue(ctx, "notHere") + if err == nil { + t.Fatal("Router should throw errors for unfound records") + } + + local, err := offline.GetValue(ctx, "key", routing.Offline) + if err != nil { + t.Fatal(err) + } + + _, err = offline.GetValue(ctx, "notHere", routing.Offline) + if err == nil { + t.Fatal("Router should throw errors for unfound records") + } + + if !bytes.Equal([]byte("testing 1 2 3"), local) { + t.Fatal("OfflineRouter does not properly store") + } +} + +func TestOfflineRouterLocal(t *testing.T) { + ctx := context.Background() + + nds := ds.NewMapDatastore() + offline := NewOfflineRouter(nds, blankValidator{}) + + id, _ := test.RandPeerID() + _, err := offline.FindPeer(ctx, id) + if err != ErrOffline { + t.Fatal("OfflineRouting should alert that its offline") + } + + h, _ := mh.Sum([]byte("test data1"), mh.SHA2_256, -1) + c1 := cid.NewCidV0(h) + pChan := offline.FindProvidersAsync(ctx, c1, 1) + p, ok := <-pChan + if ok { + t.Fatalf("FindProvidersAsync did not return a closed channel. Instead we got %+v !", p) + } + + h2, _ := mh.Sum([]byte("test data1"), mh.SHA2_256, -1) + c2 := cid.NewCidV0(h2) + err = offline.Provide(ctx, c2, true) + if err != ErrOffline { + t.Fatal("OfflineRouting should alert that its offline") + } + + err = offline.Bootstrap(ctx) + if err != nil { + t.Fatal("You shouldn't be able to bootstrap offline routing.") + } +} diff --git a/tar/extractor.go b/tar/extractor.go new file mode 100644 index 0000000000..b5377ddca5 --- /dev/null +++ b/tar/extractor.go @@ -0,0 +1,327 @@ +package tar + +import ( + "archive/tar" + "errors" + "fmt" + "io" + "os" + fp "path/filepath" + "strings" +) + +var errTraverseSymlink = errors.New("cannot traverse symlinks") +var errInvalidRoot = errors.New("tar has invalid root") +var errInvalidRootMultipleRoots = fmt.Errorf("contains more than one root or the root directory is not the first entry : %w", errInvalidRoot) + +// Extractor is used for extracting tar files to a filesystem. +// +// The Extractor can only extract tar files containing files, directories and symlinks. Additionally, the tar files must +// either have a single file, or symlink in them, or must have all of its objects inside of a single root directory +// object. +// +// If the tar file contains a single file/symlink then it will try and extract it with semantics similar to Linux's +// `cp`. In particular, the name of the extracted file/symlink will match the extraction path. If the extraction path +// is a directory then it will extract into the directory using its original name. +// +// Overwriting: Extraction of files and symlinks will result in overwriting the existing objects with the same name +// when possible (i.e. other files, symlinks, and empty directories). +type Extractor struct { + Path string + Progress func(int64) int64 +} + +// Extract extracts a tar file to the file system. See the Extractor for more information on the limitations on the +// tar files that can be extracted. +func (te *Extractor) Extract(reader io.Reader) error { + if isNullDevice(te.Path) { + return nil + } + + tarReader := tar.NewReader(reader) + + var firstObjectWasDir bool + + header, err := tarReader.Next() + if err != nil && err != io.EOF { + return err + } + if header == nil || err == io.EOF { + return fmt.Errorf("empty tar file") + } + + // Specially handle the first entry assuming it is a single root object (e.g. root directory, single file, + // or single symlink) + + // track what the root tar path is so we can ensure that all other entries are below the root + if strings.Contains(header.Name, "/") { + return fmt.Errorf("root name contains multiple components : %q : %w", header.Name, errInvalidRoot) + } + switch header.Name { + case "", ".", "..": + return fmt.Errorf("invalid root path: %q : %w", header.Name, errInvalidRoot) + } + rootName := header.Name + + // Get the platform-specific output path + rootOutputPath := fp.Clean(te.Path) + if err := validatePlatformPath(rootOutputPath); err != nil { + return err + } + + // If the last element in the rootOutputPath (which is passed by the user) is a symlink do not follow it + // this makes it easier for users to reason about where files are getting extracted to even when the tar is not + // from a trusted source + // + // For example, if the user extracts a mutable link to a tar file (http://sometimesbad.tld/t.tar) and situationally + // it contains a folder, file, or symlink the outputs could hop around the user's file system. This is especially + // annoying since we allow symlinks to point anywhere a user might want them to. + switch header.Typeflag { + case tar.TypeDir: + // if this is the root directory, use it as the output path for remaining files + firstObjectWasDir = true + if err := te.extractDir(rootOutputPath); err != nil { + return err + } + case tar.TypeReg, tar.TypeSymlink: + // Check if the output path already exists, so we know whether we should + // create our output with that name, or if we should put the output inside + // a preexisting directory + + rootIsExistingDirectory := false + // We do not follow links here + if stat, err := os.Lstat(rootOutputPath); err != nil { + if !os.IsNotExist(err) { + return err + } + } else if stat.IsDir() { + rootIsExistingDirectory = true + } + + outputPath := rootOutputPath + // If the root is a directory which already exists then put the file/symlink in the directory + if rootIsExistingDirectory { + // make sure the root has a valid name + if err := validatePathComponent(rootName); err != nil { + return err + } + + // If the output path directory exists then put the file/symlink into the directory. + outputPath = fp.Join(rootOutputPath, rootName) + } + + // If an object with the target name already exists overwrite it + if header.Typeflag == tar.TypeReg { + if err := te.extractFile(outputPath, tarReader); err != nil { + return err + } + } else if err := te.extractSymlink(outputPath, header); err != nil { + return err + } + default: + return fmt.Errorf("unrecognized tar header type: %d", header.Typeflag) + } + + // files come recursively in order + for { + header, err := tarReader.Next() + if err != nil && err != io.EOF { + return err + } + if header == nil || err == io.EOF { + break + } + + // Make sure that we only have a single root element + if !firstObjectWasDir { + return fmt.Errorf("the root was not a directory and the tar has multiple entries: %w", errInvalidRoot) + } + + // validate the path to remove paths we refuse to work with and make it easier to reason about + if err := validateTarPath(header.Name); err != nil { + return err + } + cleanedPath := header.Name + + relPath, err := getRelativePath(rootName, cleanedPath) + if err != nil { + return err + } + + outputPath, err := te.outputPath(rootOutputPath, relPath) + if err != nil { + return err + } + + // This check should already be covered by previous validation, but may catch bugs that slip through. + // Checks if the relative path matches or exceeds the root + // We check for matching because the outputPath function strips the original root + rel, err := fp.Rel(rootOutputPath, outputPath) + if err != nil || rel == "." { + return errInvalidRootMultipleRoots + } + for _, e := range strings.Split(fp.ToSlash(rel), "/") { + if e == ".." { + return fmt.Errorf("relative path contains '..'") + } + } + + switch header.Typeflag { + case tar.TypeDir: + if err := te.extractDir(outputPath); err != nil { + return err + } + case tar.TypeReg: + if err := te.extractFile(outputPath, tarReader); err != nil { + return err + } + case tar.TypeSymlink: + if err := te.extractSymlink(outputPath, header); err != nil { + return err + } + default: + return fmt.Errorf("unrecognized tar header type: %d", header.Typeflag) + } + } + return nil +} + +// validateTarPath returns an error if the path has problematic characters +func validateTarPath(tarPath string) error { + if len(tarPath) == 0 { + return fmt.Errorf("path is empty") + } + + if tarPath[0] == '/' { + return fmt.Errorf("%q : path starts with '/'", tarPath) + } + + elems := strings.Split(tarPath, "/") // break into elems + for _, e := range elems { + switch e { + case "", ".", "..": + return fmt.Errorf("%q : path contains %q", tarPath, e) + } + } + return nil +} + +// getRelativePath returns the relative path between rootTarPath and tarPath. Assumes both paths have been cleaned. +// Will error if the tarPath is not below the rootTarPath. +func getRelativePath(rootName, tarPath string) (string, error) { + if !strings.HasPrefix(tarPath, rootName+"/") { + return "", errInvalidRootMultipleRoots + } + return tarPath[len(rootName)+1:], nil +} + +// outputPath returns the directory path at which to place the file relativeTarPath. Assumes relativeTarPath is cleaned. +func (te *Extractor) outputPath(basePlatformPath, relativeTarPath string) (string, error) { + elems := strings.Split(relativeTarPath, "/") + + platformPath := basePlatformPath + for i, e := range elems { + if err := validatePathComponent(e); err != nil { + return "", err + } + platformPath = fp.Join(platformPath, e) + + // Last element is not checked since it will be removed (if it exists) by any of the extraction functions. + // For more details see: + // https://github.com/libarchive/libarchive/blob/0fd2ed25d78e9f4505de5dcb6208c6c0ff8d2edb/libarchive/archive_write_disk_posix.c#L2810 + if i == len(elems)-1 { + break + } + + fi, err := os.Lstat(platformPath) + if err != nil { + return "", err + } + + if fi.Mode()&os.ModeSymlink != 0 { + return "", errTraverseSymlink + } + if !fi.Mode().IsDir() { + return "", errors.New("cannot traverse non-directory objects") + } + } + + return platformPath, nil +} + +var errExtractedDirToSymlink = errors.New("cannot extract to symlink") + +func (te *Extractor) extractDir(path string) error { + err := os.MkdirAll(path, 0755) + if err != nil { + return err + } + + if stat, err := os.Lstat(path); err != nil { + return err + } else if !stat.IsDir() { + return errExtractedDirToSymlink + } + return nil +} + +func (te *Extractor) extractSymlink(path string, h *tar.Header) error { + if err := os.Remove(path); err != nil && !errors.Is(err, os.ErrNotExist) { + return err + } + + return os.Symlink(h.Linkname, path) +} + +func (te *Extractor) extractFile(path string, r *tar.Reader) error { + // Attempt removing the target so we can overwrite files, symlinks and empty directories + if err := os.Remove(path); err != nil && !errors.Is(err, os.ErrNotExist) { + return err + } + + // Create a temporary file in the target directory and then rename the temporary file to the target to better deal + // with races on the file system. + base := fp.Dir(path) + tmpfile, err := os.CreateTemp(base, "") + if err != nil { + return err + } + if err := copyWithProgress(tmpfile, r, te.Progress); err != nil { + _ = tmpfile.Close() + _ = os.Remove(tmpfile.Name()) + return err + } + if err := tmpfile.Close(); err != nil { + _ = os.Remove(tmpfile.Name()) + return err + } + + if err := os.Rename(tmpfile.Name(), path); err != nil { + _ = os.Remove(tmpfile.Name()) + return err + } + + return nil +} + +func copyWithProgress(to io.Writer, from io.Reader, cb func(int64) int64) error { + buf := make([]byte, 4096) + for { + n, err := from.Read(buf) + if n != 0 { + if cb != nil { + cb(int64(n)) + } + _, err2 := to.Write(buf[:n]) + if err2 != nil { + return err2 + } + } + if err != nil { + if err == io.EOF { + return nil + } + return err + } + } +} diff --git a/tar/extractor_test.go b/tar/extractor_test.go new file mode 100644 index 0000000000..717c65d196 --- /dev/null +++ b/tar/extractor_test.go @@ -0,0 +1,437 @@ +package tar + +import ( + "archive/tar" + "bytes" + "fmt" + "io" + "os" + fp "path/filepath" + "runtime" + "syscall" + "testing" + "time" + + "github.com/stretchr/testify/assert" +) + +var symlinksEnabled bool +var symlinksEnabledErr error + +func init() { + // check if the platform supports symlinks + // inspired by https://github.com/golang/go/blob/770f1de8c54256d5b17447028e47b201ba8e62c8/src/internal/testenv/testenv_windows.go#L17 + + tmpdir, err := os.MkdirTemp("", "platformsymtest") + if err != nil { + panic("failed to create temp directory: " + err.Error()) + } + defer os.RemoveAll(tmpdir) + + symlinksEnabledErr = os.Symlink("target", fp.Join(tmpdir, "symlink")) + symlinksEnabled = symlinksEnabledErr == nil + + if !symlinksEnabled { + // for now assume symlinks only fail on Windows, Android and Plan9 + // taken from https://github.com/golang/go/blob/770f1de8c54256d5b17447028e47b201ba8e62c8/src/internal/testenv/testenv_notwin.go#L14 + // and https://github.com/golang/go/blob/770f1de8c54256d5b17447028e47b201ba8e62c8/src/internal/testenv/testenv_windows.go#L34 + switch runtime.GOOS { + case "windows", "android", "plan9": + default: + panic(fmt.Errorf("attempted symlink creation failed: %w", symlinksEnabledErr)) + } + } +} + +func TestSingleFile(t *testing.T) { + fileName := "file..ext" + fileData := "file data" + + testTarExtraction(t, nil, []tarEntry{ + &fileTarEntry{fileName, []byte(fileData)}, + }, + func(t *testing.T, extractDir string) { + f, err := os.Open(fp.Join(extractDir, fileName)) + assert.NoError(t, err) + data, err := io.ReadAll(f) + assert.NoError(t, err) + assert.Equal(t, fileData, string(data)) + assert.NoError(t, f.Close()) + }, + nil, + ) +} + +func TestSingleDirectory(t *testing.T) { + dirName := "dir..sfx" + + testTarExtraction(t, nil, []tarEntry{ + &dirTarEntry{dirName}, + }, + func(t *testing.T, extractDir string) { + f, err := os.Open(extractDir) + if err != nil { + t.Fatal(err) + } + objs, err := f.Readdir(1) + if err == io.EOF && len(objs) == 0 { + return + } + t.Fatalf("expected an empty directory") + }, + nil, + ) +} + +func TestDirectoryFollowSymlinkToNothing(t *testing.T) { + dirName := "dir" + childName := "child" + + entries := []tarEntry{ + &dirTarEntry{dirName}, + &dirTarEntry{dirName + "/" + childName}, + } + + testTarExtraction(t, func(t *testing.T, rootDir string) { + target := fp.Join(rootDir, tarOutRoot) + if err := os.Symlink(fp.Join(target, "foo"), fp.Join(target, childName)); err != nil { + t.Fatal(err) + } + }, entries, nil, + os.ErrExist, + ) +} + +func TestDirectoryFollowSymlinkToFile(t *testing.T) { + dirName := "dir" + childName := "child" + + entries := []tarEntry{ + &dirTarEntry{dirName}, + &dirTarEntry{dirName + "/" + childName}, + } + + testTarExtraction(t, func(t *testing.T, rootDir string) { + target := fp.Join(rootDir, tarOutRoot) + symlinkTarget := fp.Join(target, "foo") + if err := os.WriteFile(symlinkTarget, []byte("original data"), os.ModePerm); err != nil { + t.Fatal(err) + } + if err := os.Symlink(symlinkTarget, fp.Join(target, childName)); err != nil { + t.Fatal(err) + } + }, entries, nil, + syscall.ENOTDIR, + ) +} + +func TestDirectoryFollowSymlinkToDirectory(t *testing.T) { + dirName := "dir" + childName := "child" + + entries := []tarEntry{ + &dirTarEntry{dirName}, + &dirTarEntry{dirName + "/" + childName}, + } + + testTarExtraction(t, func(t *testing.T, rootDir string) { + target := fp.Join(rootDir, tarOutRoot) + symlinkTarget := fp.Join(target, "foo") + if err := os.Mkdir(symlinkTarget, os.ModePerm); err != nil { + t.Fatal(err) + } + if err := os.Symlink(symlinkTarget, fp.Join(target, childName)); err != nil { + t.Fatal(err) + } + }, entries, nil, + errExtractedDirToSymlink, + ) +} + +func TestSingleSymlink(t *testing.T) { + if !symlinksEnabled { + t.Skip("symlinks disabled on this platform", symlinksEnabledErr) + } + + targetName := "file" + symlinkName := "symlink" + + testTarExtraction(t, nil, []tarEntry{ + &symlinkTarEntry{targetName, symlinkName}, + }, func(t *testing.T, extractDir string) { + symlinkPath := fp.Join(extractDir, symlinkName) + fi, err := os.Lstat(symlinkPath) + if err != nil { + t.Fatal(err) + } + assert.Equal(t, fi.Mode()&os.ModeSymlink != 0, true, "expected to be a symlink") + targetPath, err := os.Readlink(symlinkPath) + if err != nil { + t.Fatal(err) + } + assert.Equal(t, targetName, targetPath) + }, nil) +} + +func TestMultipleRoots(t *testing.T) { + testTarExtraction(t, nil, []tarEntry{ + &dirTarEntry{"root"}, + &dirTarEntry{"sibling"}, + }, nil, errInvalidRoot) +} + +func TestMultipleRootsNested(t *testing.T) { + testTarExtraction(t, nil, []tarEntry{ + &dirTarEntry{"root/child1"}, + &dirTarEntry{"root/child2"}, + }, nil, errInvalidRoot) +} + +func TestOutOfOrderRoot(t *testing.T) { + testTarExtraction(t, nil, []tarEntry{ + &dirTarEntry{"root/child"}, + &dirTarEntry{"root"}, + }, nil, errInvalidRoot) +} + +func TestOutOfOrder(t *testing.T) { + testTarExtraction(t, nil, []tarEntry{ + &dirTarEntry{"root/child/grandchild"}, + &dirTarEntry{"root/child"}, + }, nil, errInvalidRoot) +} + +func TestNestedDirectories(t *testing.T) { + testTarExtraction(t, nil, []tarEntry{ + &dirTarEntry{"root"}, + &dirTarEntry{"root/child"}, + &dirTarEntry{"root/child/grandchild"}, + }, func(t *testing.T, extractDir string) { + walkIndex := 0 + err := fp.Walk(extractDir, + func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + switch walkIndex { + case 0: + assert.Equal(t, info.Name(), tarOutRoot) + case 1: + assert.Equal(t, info.Name(), "child") + case 2: + assert.Equal(t, info.Name(), "grandchild") + default: + assert.Fail(t, "has more than 3 entries", path) + } + walkIndex++ + return nil + }) + assert.NoError(t, err) + }, nil) +} + +func TestRootDirectoryHasSubpath(t *testing.T) { + testTarExtraction(t, nil, []tarEntry{ + &dirTarEntry{"root/child"}, + &dirTarEntry{"root/child/grandchild"}, + }, nil, errInvalidRoot) +} + +func TestFilesAndFolders(t *testing.T) { + testTarExtraction(t, nil, []tarEntry{ + &dirTarEntry{"root"}, + &dirTarEntry{"root/childdir"}, + &fileTarEntry{"root/childdir/file1", []byte("some data")}, + }, nil, nil) +} + +func TestInternalSymlinkTraverse(t *testing.T) { + if !symlinksEnabled { + t.Skip("symlinks disabled on this platform", symlinksEnabledErr) + } + testTarExtraction(t, nil, []tarEntry{ + // FIXME: We are ignoring the first element in the path check so + // we add a directory at the start to bypass this. + &dirTarEntry{"root"}, + &dirTarEntry{"root/child"}, + &symlinkTarEntry{"child", "root/symlink-dir"}, + &fileTarEntry{"root/symlink-dir/file", []byte("file")}, + }, + nil, + errTraverseSymlink, + ) +} + +func TestExternalSymlinkTraverse(t *testing.T) { + if !symlinksEnabled { + t.Skip("symlinks disabled on this platform", symlinksEnabledErr) + } + testTarExtraction(t, nil, []tarEntry{ + // FIXME: We are ignoring the first element in the path check so + // we add a directory at the start to bypass this. + &dirTarEntry{"inner"}, + &symlinkTarEntry{"..", "inner/symlink-dir"}, + &fileTarEntry{"inner/symlink-dir/file", []byte("overwrite content")}, + }, + nil, + errTraverseSymlink, + ) +} + +func TestLastElementOverwrite(t *testing.T) { + if !symlinksEnabled { + t.Skip("symlinks disabled on this platform", symlinksEnabledErr) + } + const originalData = "original" + testTarExtraction(t, func(t *testing.T, rootDir string) { + // Create an outside target that will try to be overwritten. + // This file will reside outside of the extraction directory root. + f, err := os.Create(fp.Join(rootDir, "outside-ref")) + assert.NoError(t, err) + n, err := f.WriteString(originalData) + assert.NoError(t, err) + assert.Equal(t, len(originalData), n) + }, + []tarEntry{ + &dirTarEntry{"root"}, + &symlinkTarEntry{"../outside-ref", "root/symlink"}, + &fileTarEntry{"root/symlink", []byte("overwrite content")}, + }, + func(t *testing.T, extractDir string) { + // Check that outside-ref still exists but has not been + // overwritten or truncated (still size the same). + info, err := os.Stat(fp.Join(extractDir, "..", "outside-ref")) + assert.NoError(t, err) + + assert.Equal(t, len(originalData), int(info.Size()), "outside reference has been overwritten") + }, + nil, + ) +} + +const tarOutRoot = "tar-out-root" + +func testTarExtraction(t *testing.T, setup func(t *testing.T, rootDir string), tarEntries []tarEntry, check func(t *testing.T, extractDir string), extractError error) { + var err error + + // Directory structure. + // FIXME: We can't easily work on a MemFS since we would need to replace + // all the `os` calls in the extractor so using a temporary dir. + rootDir, err := os.MkdirTemp("", "tar-extraction-test") + assert.NoError(t, err) + extractDir := fp.Join(rootDir, tarOutRoot) + err = os.MkdirAll(extractDir, 0755) + assert.NoError(t, err) + + // Generated TAR file. + tarFilename := fp.Join(rootDir, "generated.tar") + tarFile, err := os.Create(tarFilename) + assert.NoError(t, err) + defer tarFile.Close() + tw := tar.NewWriter(tarFile) + defer tw.Close() + + if setup != nil { + setup(t, rootDir) + } + + writeTarFile(t, tarFilename, tarEntries) + + testExtract(t, tarFilename, extractDir, extractError) + + if check != nil { + check(t, extractDir) + } +} + +func testExtract(t *testing.T, tarFile string, extractDir string, expectedError error) { + var err error + + tarReader, err := os.Open(tarFile) + assert.NoError(t, err) + + extractor := &Extractor{Path: extractDir} + err = extractor.Extract(tarReader) + + assert.ErrorIs(t, err, expectedError) +} + +// Based on the `writeXXXHeader` family of functions in +// github.com/ipfs/go-ipfs-files@v0.0.8/tarwriter.go. +func writeTarFile(t *testing.T, path string, entries []tarEntry) { + tarFile, err := os.Create(path) + assert.NoError(t, err) + defer tarFile.Close() + + tw := tar.NewWriter(tarFile) + defer tw.Close() + + for _, e := range entries { + err = e.write(tw) + assert.NoError(t, err) + } +} + +type tarEntry interface { + write(tw *tar.Writer) error +} + +var _ tarEntry = (*fileTarEntry)(nil) +var _ tarEntry = (*dirTarEntry)(nil) +var _ tarEntry = (*symlinkTarEntry)(nil) + +type fileTarEntry struct { + path string + buf []byte +} + +func (e *fileTarEntry) write(tw *tar.Writer) error { + if err := writeFileHeader(tw, e.path, uint64(len(e.buf))); err != nil { + return err + } + + if _, err := io.Copy(tw, bytes.NewReader(e.buf)); err != nil { + return err + } + + tw.Flush() + return nil +} +func writeFileHeader(w *tar.Writer, fpath string, size uint64) error { + return w.WriteHeader(&tar.Header{ + Name: fpath, + Size: int64(size), + Typeflag: tar.TypeReg, + Mode: 0644, + ModTime: time.Now(), + // TODO: set mode, dates, etc. when added to unixFS + }) +} + +type dirTarEntry struct { + path string +} + +func (e *dirTarEntry) write(tw *tar.Writer) error { + return tw.WriteHeader(&tar.Header{ + Name: e.path, + Typeflag: tar.TypeDir, + Mode: 0777, + ModTime: time.Now(), + // TODO: set mode, dates, etc. when added to unixFS + }) +} + +type symlinkTarEntry struct { + target string + path string +} + +func (e *symlinkTarEntry) write(w *tar.Writer) error { + return w.WriteHeader(&tar.Header{ + Name: e.path, + Linkname: e.target, + Mode: 0777, + Typeflag: tar.TypeSymlink, + }) +} diff --git a/tar/sanitize.go b/tar/sanitize.go new file mode 100644 index 0000000000..86fba1991b --- /dev/null +++ b/tar/sanitize.go @@ -0,0 +1,30 @@ +//go:build !windows + +package tar + +import ( + "fmt" + "os" + "strings" +) + +func isNullDevice(path string) bool { + return path == os.DevNull +} + +func validatePlatformPath(platformPath string) error { + if strings.Contains(platformPath, "\x00") { + return fmt.Errorf("invalid platform path: path components cannot contain null: %q", platformPath) + } + return nil +} + +func validatePathComponent(c string) error { + if c == ".." { + return fmt.Errorf("invalid platform path: path component cannot be '..'") + } + if strings.Contains(c, "\x00") { + return fmt.Errorf("invalid platform path: path components cannot contain null: %q", c) + } + return nil +} diff --git a/tar/sanitize_windows.go b/tar/sanitize_windows.go new file mode 100644 index 0000000000..4a788a4844 --- /dev/null +++ b/tar/sanitize_windows.go @@ -0,0 +1,74 @@ +package tar + +import ( + "fmt" + "path/filepath" + "strings" +) + +// https://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx +var reservedNames = [...]string{"CON", "PRN", "AUX", "NUL", "COM1", "COM2", "COM3", "COM4", "COM5", "COM6", "COM7", "COM8", "COM9", "LPT1", "LPT2", "LPT3", "LPT4", "LPT5", "LPT6", "LPT7", "LPT8", "LPT9"} + +const reservedCharsStr = `[<>:"\|?*]` + "\x00" //NOTE: `/` is not included as it is our standard path separator + +func isNullDevice(path string) bool { + // This is a case insensitive comparison to NUL + if len(path) != 3 { + return false + } + if path[0]|0x20 != 'n' { + return false + } + if path[1]|0x20 != 'u' { + return false + } + if path[2]|0x20 != 'l' { + return false + } + return true +} + +// validatePathComponent returns an error if the given path component is not allowed on the platform +func validatePathComponent(c string) error { + //MSDN: Do not end a file or directory name with a space or a period + if strings.HasSuffix(c, ".") { + return fmt.Errorf("invalid platform path: path components cannot end with '.' : %q", c) + } + if strings.HasSuffix(c, " ") { + return fmt.Errorf("invalid platform path: path components cannot end with ' ' : %q", c) + } + + if c == ".." { + return fmt.Errorf("invalid platform path: path component cannot be '..'") + } + // error on reserved characters + if strings.ContainsAny(c, reservedCharsStr) { + return fmt.Errorf("invalid platform path: path components cannot contain any of %s : %q", reservedCharsStr, c) + } + + // error on reserved names + for _, rn := range reservedNames { + if c == rn { + return fmt.Errorf("invalid platform path: path component is a reserved name: %s", c) + } + } + + return nil +} + +func validatePlatformPath(platformPath string) error { + // remove the volume name + p := platformPath[len(filepath.VolumeName(platformPath)):] + + // convert to cleaned slash-path + p = filepath.ToSlash(p) + p = strings.Trim(p, "/") + + // make sure all components of the path are valid + for _, e := range strings.Split(p, "/") { + if err := validatePathComponent(e); err != nil { + return err + } + } + return nil +} diff --git a/unixfs/README.md b/unixfs/README.md new file mode 100644 index 0000000000..6c80d07c8c --- /dev/null +++ b/unixfs/README.md @@ -0,0 +1,28 @@ +## Package Directory +This package contains many subpackages, each of which can be very large on its own. + +### Top Level +The top level unixfs package defines the unixfs format datastructures, and some helper methods around it. + +### importers +The `importer` subpackage is what you'll use when you want to turn a normal file into a unixfs file. + +### io +The `io` subpackage provides helpers for reading files and manipulating directories. The `DagReader` takes a +reference to a unixfs file and returns a file handle that can be read from and seeked through. The `Directory` +interface allows you to easily read items in a directory, add items to a directory, and do lookups. + +### mod +The `mod` subpackage implements a `DagModifier` type that can be used to write to an existing unixfs file, or +create a new one. The logic for this is significantly more complicated than for the dagreader, so its a separate +type. (TODO: maybe it still belongs in the `io` subpackage though?) + +### hamt +The `hamt` subpackage implements a CHAMP hamt that is used in unixfs directory sharding. + +### archive +The `archive` subpackage implements a `tar` importer and exporter. The objects created here are not officially unixfs, +but in the future, this may be integrated more directly. + +### test +The `test` subpackage provides several utilities to make testing unixfs related things easier. diff --git a/unixfs/file/unixfile.go b/unixfs/file/unixfile.go new file mode 100644 index 0000000000..75e2cb218e --- /dev/null +++ b/unixfs/file/unixfile.go @@ -0,0 +1,183 @@ +package unixfile + +import ( + "context" + "errors" + + ft "github.com/ipfs/boxo/unixfs" + uio "github.com/ipfs/boxo/unixfs/io" + + "github.com/ipfs/boxo/files" + ipld "github.com/ipfs/go-ipld-format" + dag "github.com/ipfs/boxo/ipld/merkledag" +) + +// Number to file to prefetch in directories +// TODO: should we allow setting this via context hint? +const prefetchFiles = 4 + +type ufsDirectory struct { + ctx context.Context + dserv ipld.DAGService + dir uio.Directory + size int64 +} + +type ufsIterator struct { + ctx context.Context + files chan *ipld.Link + dserv ipld.DAGService + + curName string + curFile files.Node + + err error + errCh chan error +} + +func (it *ufsIterator) Name() string { + return it.curName +} + +func (it *ufsIterator) Node() files.Node { + return it.curFile +} + +func (it *ufsIterator) Next() bool { + if it.err != nil { + return false + } + + var l *ipld.Link + var ok bool + for !ok { + if it.files == nil && it.errCh == nil { + return false + } + select { + case l, ok = <-it.files: + if !ok { + it.files = nil + } + case err := <-it.errCh: + it.errCh = nil + it.err = err + + if err != nil { + return false + } + } + } + + it.curFile = nil + + nd, err := l.GetNode(it.ctx, it.dserv) + if err != nil { + it.err = err + return false + } + + it.curName = l.Name + it.curFile, it.err = NewUnixfsFile(it.ctx, it.dserv, nd) + return it.err == nil +} + +func (it *ufsIterator) Err() error { + return it.err +} + +func (d *ufsDirectory) Close() error { + return nil +} + +func (d *ufsDirectory) Entries() files.DirIterator { + fileCh := make(chan *ipld.Link, prefetchFiles) + errCh := make(chan error, 1) + go func() { + errCh <- d.dir.ForEachLink(d.ctx, func(link *ipld.Link) error { + if d.ctx.Err() != nil { + return d.ctx.Err() + } + select { + case fileCh <- link: + case <-d.ctx.Done(): + return d.ctx.Err() + } + return nil + }) + + close(errCh) + close(fileCh) + }() + + return &ufsIterator{ + ctx: d.ctx, + files: fileCh, + errCh: errCh, + dserv: d.dserv, + } +} + +func (d *ufsDirectory) Size() (int64, error) { + return d.size, nil +} + +type ufsFile struct { + uio.DagReader +} + +func (f *ufsFile) Size() (int64, error) { + return int64(f.DagReader.Size()), nil +} + +func newUnixfsDir(ctx context.Context, dserv ipld.DAGService, nd *dag.ProtoNode) (files.Directory, error) { + dir, err := uio.NewDirectoryFromNode(dserv, nd) + if err != nil { + return nil, err + } + + size, err := nd.Size() + if err != nil { + return nil, err + } + + return &ufsDirectory{ + ctx: ctx, + dserv: dserv, + + dir: dir, + size: int64(size), + }, nil +} + +func NewUnixfsFile(ctx context.Context, dserv ipld.DAGService, nd ipld.Node) (files.Node, error) { + switch dn := nd.(type) { + case *dag.ProtoNode: + fsn, err := ft.FSNodeFromBytes(dn.Data()) + if err != nil { + return nil, err + } + if fsn.IsDir() { + return newUnixfsDir(ctx, dserv, dn) + } + if fsn.Type() == ft.TSymlink { + return files.NewLinkFile(string(fsn.Data()), nil), nil + } + + case *dag.RawNode: + default: + return nil, errors.New("unknown node type") + } + + dr, err := uio.NewDagReader(ctx, nd, dserv) + if err != nil { + return nil, err + } + + return &ufsFile{ + DagReader: dr, + }, nil +} + +var _ files.Directory = &ufsDirectory{} +var _ files.File = &ufsFile{} diff --git a/unixfs/hamt/hamt.go b/unixfs/hamt/hamt.go new file mode 100644 index 0000000000..06d284eb4c --- /dev/null +++ b/unixfs/hamt/hamt.go @@ -0,0 +1,946 @@ +// Package hamt implements a Hash Array Mapped Trie over ipfs merkledag nodes. +// It is implemented mostly as described in the wikipedia article on HAMTs, +// however the table size is variable (usually 256 in our usages) as opposed to +// 32 as suggested in the article. The hash function used is currently +// Murmur3, but this value is configurable (the datastructure reports which +// hash function its using). +// +// The one algorithmic change we implement that is not mentioned in the +// wikipedia article is the collapsing of empty shards. +// Given the following tree: ( '[' = shards, '{' = values ) +// [ 'A' ] -> [ 'B' ] -> { "ABC" } +// +// | L-> { "ABD" } +// L-> { "ASDF" } +// +// If we simply removed "ABC", we would end up with a tree where shard 'B' only +// has a single child. This causes two issues, the first, is that now we have +// an extra lookup required to get to "ABD". The second issue is that now we +// have a tree that contains only "ABD", but is not the same tree that we would +// get by simply inserting "ABD" into a new tree. To address this, we always +// check for empty shard nodes upon deletion and prune them to maintain a +// consistent tree, independent of insertion order. +package hamt + +import ( + "context" + "fmt" + "os" + "sync" + + "golang.org/x/sync/errgroup" + + format "github.com/ipfs/boxo/unixfs" + "github.com/ipfs/boxo/unixfs/internal" + + bitfield "github.com/ipfs/go-bitfield" + cid "github.com/ipfs/go-cid" + ipld "github.com/ipfs/go-ipld-format" + dag "github.com/ipfs/boxo/ipld/merkledag" +) + +const ( + // HashMurmur3 is the multiformats identifier for Murmur3 + HashMurmur3 uint64 = 0x22 +) + +func init() { + internal.HAMTHashFunction = murmur3Hash +} + +func (ds *Shard) isValueNode() bool { + return ds.key != "" && ds.val != nil +} + +// A Shard represents the HAMT. It should be initialized with NewShard(). +type Shard struct { + childer *childer + + // Entries per node (number of possible childs indexed by the partial key). + tableSize int + // Bits needed to encode child indexes (log2 of number of entries). This is + // the number of bits taken from the hash key on each level of the tree. + tableSizeLg2 int + + builder cid.Builder + hashFunc uint64 + + // String format with number of zeros that will be present in the hexadecimal + // encoding of the child index to always reach the fixed maxpadlen chars. + // Example: maxpadlen = 4 => prefixPadStr: "%04X" (print number in hexadecimal + // format padding with zeros to always reach 4 characters). + prefixPadStr string + // Length in chars of string that encodes child indexes. We encode indexes + // as hexadecimal strings to this is log4 of number of entries. + maxpadlen int + + dserv ipld.DAGService + + // FIXME: Remove. We don't actually store "value nodes". This confusing + // abstraction just removes the maxpadlen from the link names to extract + // the actual value link the trie is storing. + // leaf node + key string + val *ipld.Link +} + +// NewShard creates a new, empty HAMT shard with the given size. +func NewShard(dserv ipld.DAGService, size int) (*Shard, error) { + return NewShardValue(dserv, size, "", nil) +} + +// NewShardValue creates a new, empty HAMT shard with the given key, value and size. +func NewShardValue(dserv ipld.DAGService, size int, key string, value *ipld.Link) (*Shard, error) { + ds, err := makeShard(dserv, size, key, value) + if err != nil { + return nil, err + } + + // FIXME: Make this at least a static configuration for testing. + ds.hashFunc = HashMurmur3 + return ds, nil +} + +func makeShard(ds ipld.DAGService, size int, key string, val *ipld.Link) (*Shard, error) { + lg2s, err := Logtwo(size) + if err != nil { + return nil, err + } + childer, err := newChilder(ds, size) + if err != nil { + return nil, err + } + maxpadding := fmt.Sprintf("%X", size-1) + s := &Shard{ + tableSizeLg2: lg2s, + prefixPadStr: fmt.Sprintf("%%0%dX", len(maxpadding)), + maxpadlen: len(maxpadding), + childer: childer, + tableSize: size, + dserv: ds, + + key: key, + val: val, + } + + s.childer.sd = s + + return s, nil +} + +// NewHamtFromDag creates new a HAMT shard from the given DAG. +func NewHamtFromDag(dserv ipld.DAGService, nd ipld.Node) (*Shard, error) { + pbnd, ok := nd.(*dag.ProtoNode) + if !ok { + return nil, dag.ErrNotProtobuf + } + + fsn, err := format.FSNodeFromBytes(pbnd.Data()) + if err != nil { + return nil, err + } + + if fsn.Type() != format.THAMTShard { + return nil, fmt.Errorf("node was not a dir shard") + } + + if fsn.HashType() != HashMurmur3 { + return nil, fmt.Errorf("only murmur3 supported as hash function") + } + + size := int(fsn.Fanout()) + + ds, err := makeShard(dserv, size, "", nil) + if err != nil { + return nil, err + } + + ds.childer.makeChilder(fsn.Data(), pbnd.Links()) + + ds.hashFunc = fsn.HashType() + ds.builder = pbnd.CidBuilder() + + return ds, nil +} + +// SetCidBuilder sets the CID Builder +func (ds *Shard) SetCidBuilder(builder cid.Builder) { + ds.builder = builder +} + +// CidBuilder gets the CID Builder, may be nil if unset +func (ds *Shard) CidBuilder() cid.Builder { + return ds.builder +} + +// Node serializes the HAMT structure into a merkledag node with unixfs formatting +func (ds *Shard) Node() (ipld.Node, error) { + out := new(dag.ProtoNode) + out.SetCidBuilder(ds.builder) + + sliceIndex := 0 + // TODO: optimized 'for each set bit' + for childIndex := 0; childIndex < ds.tableSize; childIndex++ { + if !ds.childer.has(childIndex) { + continue + } + + ch := ds.childer.child(sliceIndex) + if ch != nil { + clnk, err := ch.Link() + if err != nil { + return nil, err + } + + err = out.AddRawLink(ds.linkNamePrefix(childIndex)+ch.key, clnk) + if err != nil { + return nil, err + } + } else { + // child unloaded, just copy in link with updated name + lnk := ds.childer.link(sliceIndex) + label := lnk.Name[ds.maxpadlen:] + + err := out.AddRawLink(ds.linkNamePrefix(childIndex)+label, lnk) + if err != nil { + return nil, err + } + } + sliceIndex++ + } + + data, err := format.HAMTShardData(ds.childer.bitfield.Bytes(), uint64(ds.tableSize), HashMurmur3) + if err != nil { + return nil, err + } + + out.SetData(data) + + err = ds.dserv.Add(context.TODO(), out) + if err != nil { + return nil, err + } + + return out, nil +} + +func (ds *Shard) makeShardValue(lnk *ipld.Link) (*Shard, error) { + lnk2 := *lnk + s, err := makeShard(ds.dserv, ds.tableSize, "", nil) + if err != nil { + return nil, err + } + + s.key = lnk.Name[ds.maxpadlen:] + s.val = &lnk2 + + return s, nil +} + +// Set sets 'name' = nd in the HAMT +func (ds *Shard) Set(ctx context.Context, name string, nd ipld.Node) error { + _, err := ds.Swap(ctx, name, nd) + return err +} + +// Set sets 'name' = nd in the HAMT, using directly the information in the +// given link. This avoids writing the given node, then reading it to making a +// link out of it. +func (ds *Shard) SetLink(ctx context.Context, name string, lnk *ipld.Link) error { + hv := newHashBits(name) + + newLink := ipld.Link{ + Name: lnk.Name, + Size: lnk.Size, + Cid: lnk.Cid, + } + + // FIXME: We don't need to set the name here, it will get overwritten. + // This is confusing, confirm and remove this line. + newLink.Name = ds.linkNamePrefix(0) + name + + _, err := ds.swapValue(ctx, hv, name, &newLink) + return err +} + +// Swap sets a link pointing to the passed node as the value under the +// name key in this Shard or its children. It also returns the previous link +// under that name key (if any). +func (ds *Shard) Swap(ctx context.Context, name string, node ipld.Node) (*ipld.Link, error) { + hv := newHashBits(name) + err := ds.dserv.Add(ctx, node) + if err != nil { + return nil, err + } + + lnk, err := ipld.MakeLink(node) + if err != nil { + return nil, err + } + + // FIXME: We don't need to set the name here, it will get overwritten. + // This is confusing, confirm and remove this line. + lnk.Name = ds.linkNamePrefix(0) + name + + return ds.swapValue(ctx, hv, name, lnk) +} + +// Remove deletes the named entry if it exists. Otherwise, it returns +// os.ErrNotExist. +func (ds *Shard) Remove(ctx context.Context, name string) error { + _, err := ds.Take(ctx, name) + return err +} + +// Take is similar to the public Remove but also returns the +// old removed link (if it exists). +func (ds *Shard) Take(ctx context.Context, name string) (*ipld.Link, error) { + hv := newHashBits(name) + return ds.swapValue(ctx, hv, name, nil) +} + +// Find searches for a child node by 'name' within this hamt +func (ds *Shard) Find(ctx context.Context, name string) (*ipld.Link, error) { + hv := newHashBits(name) + + var out *ipld.Link + err := ds.getValue(ctx, hv, name, func(sv *Shard) error { + out = sv.val + return nil + }) + if err != nil { + return nil, err + } + + return out, nil +} + +type linkType int + +const ( + invalidLink linkType = iota + shardLink + shardValueLink +) + +func (ds *Shard) childLinkType(lnk *ipld.Link) (linkType, error) { + if len(lnk.Name) < ds.maxpadlen { + return invalidLink, fmt.Errorf("invalid link name '%s'", lnk.Name) + } + if len(lnk.Name) == ds.maxpadlen { + return shardLink, nil + } + return shardValueLink, nil +} + +// Link returns a merklelink to this shard node +func (ds *Shard) Link() (*ipld.Link, error) { + if ds.isValueNode() { + return ds.val, nil + } + + nd, err := ds.Node() + if err != nil { + return nil, err + } + + err = ds.dserv.Add(context.TODO(), nd) + if err != nil { + return nil, err + } + + return ipld.MakeLink(nd) +} + +func (ds *Shard) getValue(ctx context.Context, hv *hashBits, key string, cb func(*Shard) error) error { + childIndex, err := hv.Next(ds.tableSizeLg2) + if err != nil { + return err + } + + if ds.childer.has(childIndex) { + child, err := ds.childer.get(ctx, ds.childer.sliceIndex(childIndex)) + if err != nil { + return err + } + + if child.isValueNode() { + if child.key == key { + return cb(child) + } + } else { + return child.getValue(ctx, hv, key, cb) + } + } + + return os.ErrNotExist +} + +// EnumLinks collects all links in the Shard. +func (ds *Shard) EnumLinks(ctx context.Context) ([]*ipld.Link, error) { + var links []*ipld.Link + + linkResults := ds.EnumLinksAsync(ctx) + + for linkResult := range linkResults { + if linkResult.Err != nil { + return links, linkResult.Err + } + links = append(links, linkResult.Link) + } + return links, nil +} + +// ForEachLink walks the Shard and calls the given function. +func (ds *Shard) ForEachLink(ctx context.Context, f func(*ipld.Link) error) error { + return ds.walkTrie(ctx, func(sv *Shard) error { + lnk := sv.val + lnk.Name = sv.key + + return f(lnk) + }) +} + +// EnumLinksAsync returns a channel which will receive Links in the directory +// as they are enumerated, where order is not guaranteed +func (ds *Shard) EnumLinksAsync(ctx context.Context) <-chan format.LinkResult { + linkResults := make(chan format.LinkResult) + ctx, cancel := context.WithCancel(ctx) + go func() { + defer close(linkResults) + defer cancel() + + err := parallelShardWalk(ctx, ds, ds.dserv, func(formattedLink *ipld.Link) error { + emitResult(ctx, linkResults, format.LinkResult{Link: formattedLink, Err: nil}) + return nil + }) + if err != nil { + emitResult(ctx, linkResults, format.LinkResult{Link: nil, Err: err}) + } + }() + return linkResults +} + +type listCidsAndShards struct { + cids []cid.Cid + shards []*Shard +} + +func (ds *Shard) walkChildren(processLinkValues func(formattedLink *ipld.Link) error) (*listCidsAndShards, error) { + res := &listCidsAndShards{} + + for idx, lnk := range ds.childer.links { + if nextShard := ds.childer.children[idx]; nextShard == nil { + lnkLinkType, err := ds.childLinkType(lnk) + if err != nil { + return nil, err + } + + switch lnkLinkType { + case shardValueLink: + sv, err := ds.makeShardValue(lnk) + if err != nil { + return nil, err + } + formattedLink := sv.val + formattedLink.Name = sv.key + + if err := processLinkValues(formattedLink); err != nil { + return nil, err + } + case shardLink: + res.cids = append(res.cids, lnk.Cid) + default: + return nil, fmt.Errorf("unsupported shard link type") + } + + } else { + if nextShard.val != nil { + formattedLink := &ipld.Link{ + Name: nextShard.key, + Size: nextShard.val.Size, + Cid: nextShard.val.Cid, + } + if err := processLinkValues(formattedLink); err != nil { + return nil, err + } + } else { + res.shards = append(res.shards, nextShard) + } + } + } + return res, nil +} + +// parallelShardWalk is quite similar to the DAG walking algorithm from https://github.com/ipfs/boxo/ipld/merkledag/blob/594e515f162e764183243b72c2ba84f743424c8c/merkledag.go#L464 +// However, there are a few notable differences: +// 1. Some children are actualized Shard structs and some are in the blockstore, this will leverage walking over the in memory Shards as well as the stored blocks +// 2. Instead of just passing each child into the worker pool by itself we group them so that we can leverage optimizations from GetMany. +// This optimization also makes the walk a little more biased towards depth (as opposed to BFS) in the earlier part of the DAG. +// This is particularly helpful for operations like estimating the directory size which should complete quickly when possible. +// 3. None of the extra options from that package are needed +func parallelShardWalk(ctx context.Context, root *Shard, dserv ipld.DAGService, processShardValues func(formattedLink *ipld.Link) error) error { + const concurrency = 32 + + var visitlk sync.Mutex + visitSet := cid.NewSet() + visit := visitSet.Visit + + // Setup synchronization + grp, errGrpCtx := errgroup.WithContext(ctx) + + // Input and output queues for workers. + feed := make(chan *listCidsAndShards) + out := make(chan *listCidsAndShards) + done := make(chan struct{}) + + for i := 0; i < concurrency; i++ { + grp.Go(func() error { + for feedChildren := range feed { + for _, nextShard := range feedChildren.shards { + nextChildren, err := nextShard.walkChildren(processShardValues) + if err != nil { + return err + } + + select { + case out <- nextChildren: + case <-errGrpCtx.Done(): + return nil + } + } + + var linksToVisit []cid.Cid + for _, nextCid := range feedChildren.cids { + var shouldVisit bool + + visitlk.Lock() + shouldVisit = visit(nextCid) + visitlk.Unlock() + + if shouldVisit { + linksToVisit = append(linksToVisit, nextCid) + } + } + + chNodes := dserv.GetMany(errGrpCtx, linksToVisit) + for optNode := range chNodes { + if optNode.Err != nil { + return optNode.Err + } + + nextShard, err := NewHamtFromDag(dserv, optNode.Node) + if err != nil { + return err + } + + nextChildren, err := nextShard.walkChildren(processShardValues) + if err != nil { + return err + } + + select { + case out <- nextChildren: + case <-errGrpCtx.Done(): + return nil + } + } + + select { + case done <- struct{}{}: + case <-errGrpCtx.Done(): + } + } + return nil + }) + } + + send := feed + var todoQueue []*listCidsAndShards + var inProgress int + + next := &listCidsAndShards{ + shards: []*Shard{root}, + } + +dispatcherLoop: + for { + select { + case send <- next: + inProgress++ + if len(todoQueue) > 0 { + next = todoQueue[0] + todoQueue = todoQueue[1:] + } else { + next = nil + send = nil + } + case <-done: + inProgress-- + if inProgress == 0 && next == nil { + break dispatcherLoop + } + case nextNodes := <-out: + if next == nil { + next = nextNodes + send = feed + } else { + todoQueue = append(todoQueue, nextNodes) + } + case <-errGrpCtx.Done(): + break dispatcherLoop + } + } + close(feed) + return grp.Wait() +} + +func emitResult(ctx context.Context, linkResults chan<- format.LinkResult, r format.LinkResult) { + // make sure that context cancel is processed first + // the reason is due to the concurrency of EnumerateChildrenAsync + // it's possible for EnumLinksAsync to complete and close the linkResults + // channel before this code runs + select { + case <-ctx.Done(): + return + default: + } + select { + case linkResults <- r: + case <-ctx.Done(): + } +} + +func (ds *Shard) walkTrie(ctx context.Context, cb func(*Shard) error) error { + return ds.childer.each(ctx, func(s *Shard) error { + if s.isValueNode() { + if err := cb(s); err != nil { + return err + } + } else { + if err := s.walkTrie(ctx, cb); err != nil { + return err + } + } + return nil + }) +} + +// swapValue sets the link `value` in the given key, either creating the entry +// if it didn't exist or overwriting the old one. It returns the old entry (if any). +func (ds *Shard) swapValue(ctx context.Context, hv *hashBits, key string, value *ipld.Link) (*ipld.Link, error) { + idx, err := hv.Next(ds.tableSizeLg2) + if err != nil { + return nil, err + } + + if !ds.childer.has(idx) { + // Entry does not exist, create a new one. + return nil, ds.childer.insert(key, value, idx) + } + + i := ds.childer.sliceIndex(idx) + child, err := ds.childer.get(ctx, i) + if err != nil { + return nil, err + } + + if child.isValueNode() { + // Leaf node. This is the base case of this recursive function. + if child.key == key { + // We are in the correct shard (tree level) so we modify this child + // and return. + oldValue := child.val + + if value == nil { // Remove old entry. + return oldValue, ds.childer.rm(idx) + } + + child.val = value // Overwrite entry. + return oldValue, nil + } + + if value == nil { + return nil, os.ErrNotExist + } + + // We are in the same slot with another entry with a different key + // so we need to fork this leaf node into a shard with two childs: + // the old entry and the new one being inserted here. + // We don't overwrite anything here so we keep: + // `oldValue = nil` + + // The child of this shard will now be a new shard. The old child value + // will be a child of this new shard (along with the new value being + // inserted). + grandChild := child + child, err = NewShard(ds.dserv, ds.tableSize) + if err != nil { + return nil, err + } + child.builder = ds.builder + chhv := newConsumedHashBits(grandChild.key, hv.consumed) + + // We explicitly ignore the oldValue returned by the next two insertions + // (which will be nil) to highlight there is no overwrite here: they are + // done with different keys to a new (empty) shard. (At best this shard + // will create new ones until we find different slots for both.) + _, err = child.swapValue(ctx, hv, key, value) + if err != nil { + return nil, err + } + _, err = child.swapValue(ctx, chhv, grandChild.key, grandChild.val) + if err != nil { + return nil, err + } + + // Replace this leaf node with the new Shard node. + ds.childer.set(child, i) + return nil, nil + } else { + // We are in a Shard (internal node). We will recursively call this + // function until finding the leaf (the logic of the `if` case above). + oldValue, err := child.swapValue(ctx, hv, key, value) + if err != nil { + return nil, err + } + + if value == nil { + // We have removed an entry, check if we should remove shards + // as well. + switch child.childer.length() { + case 0: + // empty sub-shard, prune it + // Note: this shouldnt normally ever happen + // in the event of another implementation creates flawed + // structures, this will help to normalize them. + return oldValue, ds.childer.rm(idx) + case 1: + // The single child _should_ be a value by + // induction. However, we allow for it to be a + // shard in case an implementation is broken. + + // Have we loaded the child? Prefer that. + schild := child.childer.child(0) + if schild != nil { + if schild.isValueNode() { + ds.childer.set(schild, i) + } + return oldValue, nil + } + + // Otherwise, work with the link. + slnk := child.childer.link(0) + var lnkType linkType + lnkType, err = child.childer.sd.childLinkType(slnk) + if err != nil { + return nil, err + } + if lnkType == shardValueLink { + // sub-shard with a single value element, collapse it + ds.childer.setLink(slnk, i) + } + return oldValue, nil + } + } + + return oldValue, nil + } +} + +// linkNamePrefix takes in the bitfield index of an entry and returns its hex prefix +func (ds *Shard) linkNamePrefix(idx int) string { + return fmt.Sprintf(ds.prefixPadStr, idx) +} + +// childer wraps the links, children and bitfield +// and provides basic operation (get, rm, insert and set) of manipulating children. +// The slices `links` and `children` are always coordinated to have the entries +// in the same index. A `childIndex` belonging to one of the original `Shard.size` +// entries corresponds to a `sliceIndex` in `links` and `children` (the conversion +// is done through `bitfield`). +type childer struct { + sd *Shard + dserv ipld.DAGService + bitfield bitfield.Bitfield + + // Only one of links/children will be non-nil for every child/link. + links []*ipld.Link + children []*Shard +} + +const maximumHamtWidth = 1 << 10 // FIXME: Spec this and decide of a correct value + +func newChilder(ds ipld.DAGService, size int) (*childer, error) { + if size > maximumHamtWidth { + return nil, fmt.Errorf("hamt witdh (%d) exceed maximum allowed (%d)", size, maximumHamtWidth) + } + bf, err := bitfield.NewBitfield(size) + if err != nil { + return nil, err + } + + return &childer{ + dserv: ds, + bitfield: bf, + }, nil +} + +func (s *childer) makeChilder(data []byte, links []*ipld.Link) *childer { + s.children = make([]*Shard, len(links)) + s.bitfield.SetBytes(data) + if len(links) > 0 { + s.links = make([]*ipld.Link, len(links)) + copy(s.links, links) + } + + return s +} + +// Return the `sliceIndex` associated with a child. +func (s *childer) sliceIndex(childIndex int) (sliceIndex int) { + return s.bitfield.OnesBefore(childIndex) +} + +func (s *childer) child(sliceIndex int) *Shard { + return s.children[sliceIndex] +} + +func (s *childer) link(sliceIndex int) *ipld.Link { + return s.links[sliceIndex] +} + +func (s *childer) insert(key string, lnk *ipld.Link, idx int) error { + if lnk == nil { + return os.ErrNotExist + } + + lnk.Name = s.sd.linkNamePrefix(idx) + key + i := s.sliceIndex(idx) + + sd, err := NewShardValue(s.dserv, 256, key, lnk) + if err != nil { + return err + } + + s.children = append(s.children[:i], append([]*Shard{sd}, s.children[i:]...)...) + s.links = append(s.links[:i], append([]*ipld.Link{nil}, s.links[i:]...)...) + // Add a `nil` placeholder in `links` so the rest of the entries keep the same + // index as `children`. + s.bitfield.SetBit(idx) + + return nil +} + +func (s *childer) set(sd *Shard, i int) { + s.children[i] = sd + s.links[i] = nil +} + +func (s *childer) setLink(lnk *ipld.Link, i int) { + s.children[i] = nil + s.links[i] = lnk +} + +func (s *childer) rm(childIndex int) error { + i := s.sliceIndex(childIndex) + + if err := s.check(i); err != nil { + return err + } + + copy(s.children[i:], s.children[i+1:]) + s.children = s.children[:len(s.children)-1] + + copy(s.links[i:], s.links[i+1:]) + s.links = s.links[:len(s.links)-1] + + s.bitfield.UnsetBit(childIndex) + + return nil +} + +// get returns the i'th child of this shard. If it is cached in the +// children array, it will return it from there. Otherwise, it loads the child +// node from disk. +func (s *childer) get(ctx context.Context, sliceIndex int) (*Shard, error) { + if err := s.check(sliceIndex); err != nil { + return nil, err + } + + c := s.child(sliceIndex) + if c != nil { + return c, nil + } + + return s.loadChild(ctx, sliceIndex) +} + +// loadChild reads the i'th child node of this shard from disk and returns it +// as a 'child' interface +func (s *childer) loadChild(ctx context.Context, sliceIndex int) (*Shard, error) { + lnk := s.link(sliceIndex) + lnkLinkType, err := s.sd.childLinkType(lnk) + if err != nil { + return nil, err + } + + var c *Shard + if lnkLinkType == shardLink { + nd, err := lnk.GetNode(ctx, s.dserv) + if err != nil { + return nil, err + } + cds, err := NewHamtFromDag(s.dserv, nd) + if err != nil { + return nil, err + } + + c = cds + } else { + s, err := s.sd.makeShardValue(lnk) + if err != nil { + return nil, err + } + c = s + } + + s.set(c, sliceIndex) + + return c, nil +} + +func (s *childer) has(childIndex int) bool { + return s.bitfield.Bit(childIndex) +} + +func (s *childer) length() int { + return len(s.children) +} + +func (s *childer) each(ctx context.Context, cb func(*Shard) error) error { + for i := range s.children { + c, err := s.get(ctx, i) + if err != nil { + return err + } + + if err := cb(c); err != nil { + return err + } + } + + return nil +} + +func (s *childer) check(sliceIndex int) error { + if sliceIndex >= len(s.children) || sliceIndex < 0 { + return fmt.Errorf("invalid index passed to operate children (likely corrupt bitfield)") + } + + if len(s.children) != len(s.links) { + return fmt.Errorf("inconsistent lengths between children array and Links array") + } + + return nil +} diff --git a/unixfs/hamt/hamt_stress_test.go b/unixfs/hamt/hamt_stress_test.go new file mode 100644 index 0000000000..07c38fbb42 --- /dev/null +++ b/unixfs/hamt/hamt_stress_test.go @@ -0,0 +1,291 @@ +package hamt + +import ( + "context" + "fmt" + "math/rand" + "os" + "testing" + "time" + + ft "github.com/ipfs/boxo/unixfs" + mdtest "github.com/ipfs/boxo/ipld/merkledag/test" + + ipld "github.com/ipfs/go-ipld-format" +) + +func getNames(prefix string, count int) []string { + out := make([]string, count) + for i := 0; i < count; i++ { + out[i] = fmt.Sprintf("%s%d", prefix, i) + } + return out +} + +const ( + opAdd = iota + opDel + opFind +) + +type testOp struct { + Op int + Val string +} + +func stringArrToSet(arr []string) map[string]bool { + out := make(map[string]bool) + for _, s := range arr { + out[s] = true + } + return out +} + +// generate two different random sets of operations to result in the same +// ending directory (same set of entries at the end) and execute each of them +// in turn, then compare to ensure the output is the same on each. +func TestOrderConsistency(t *testing.T) { + seed := time.Now().UnixNano() + t.Logf("using seed = %d", seed) + ds := mdtest.Mock() + + shardWidth := 1024 + + keep := getNames("good", 4000) + temp := getNames("tempo", 6000) + + ops := genOpSet(seed, keep, temp) + s, err := executeOpSet(t, ds, shardWidth, ops) + if err != nil { + t.Fatal(err) + } + + err = validateOpSetCompletion(t, s, keep, temp) + if err != nil { + t.Fatal(err) + } + + ops2 := genOpSet(seed+1000, keep, temp) + s2, err := executeOpSet(t, ds, shardWidth, ops2) + if err != nil { + t.Fatal(err) + } + + err = validateOpSetCompletion(t, s2, keep, temp) + if err != nil { + t.Fatal(err) + } + + nd, err := s.Node() + if err != nil { + t.Fatal(err) + } + + nd2, err := s2.Node() + if err != nil { + t.Fatal(err) + } + + k := nd.Cid() + k2 := nd2.Cid() + + if !k.Equals(k2) { + t.Fatal("got different results: ", k, k2) + } +} + +func validateOpSetCompletion(t *testing.T, s *Shard, keep, temp []string) error { + ctx := context.TODO() + for _, n := range keep { + _, err := s.Find(ctx, n) + if err != nil { + return fmt.Errorf("couldnt find %s: %s", n, err) + } + } + + for _, n := range temp { + _, err := s.Find(ctx, n) + if err != os.ErrNotExist { + return fmt.Errorf("expected not to find: %s", err) + } + } + + return nil +} + +func executeOpSet(t *testing.T, ds ipld.DAGService, width int, ops []testOp) (*Shard, error) { + ctx := context.TODO() + s, err := NewShard(ds, width) + if err != nil { + return nil, err + } + + e := ft.EmptyDirNode() + ds.Add(ctx, e) + + for _, o := range ops { + switch o.Op { + case opAdd: + err := s.Set(ctx, o.Val, e) + if err != nil { + return nil, fmt.Errorf("inserting %s: %s", o.Val, err) + } + case opDel: + err := s.Remove(ctx, o.Val) + if err != nil { + return nil, fmt.Errorf("deleting %s: %s", o.Val, err) + } + case opFind: + _, err := s.Find(ctx, o.Val) + if err != nil { + return nil, fmt.Errorf("finding %s: %s", o.Val, err) + } + } + } + + return s, nil +} + +func genOpSet(seed int64, keep, temp []string) []testOp { + tempset := stringArrToSet(temp) + + allnames := append(keep, temp...) + shuffle(seed, allnames) + + var todel []string + + var ops []testOp + + for { + n := len(allnames) + len(todel) + if n == 0 { + return ops + } + + rn := rand.Intn(n) + + if rn < len(allnames) { + next := allnames[0] + allnames = allnames[1:] + ops = append(ops, testOp{ + Op: opAdd, + Val: next, + }) + + if tempset[next] { + todel = append(todel, next) + } + } else { + shuffle(seed+100, todel) + next := todel[0] + todel = todel[1:] + + ops = append(ops, testOp{ + Op: opDel, + Val: next, + }) + } + } +} + +// executes the given op set with a repl to allow easier debugging +/*func debugExecuteOpSet(ds node.DAGService, width int, ops []testOp) (*Shard, error) { + + s, err := NewShard(ds, width) + if err != nil { + return nil, err + } + + e := ft.EmptyDirNode() + ds.Add(e) + ctx := context.TODO() + + run := 0 + + opnames := map[int]string{ + opAdd: "add", + opDel: "del", + } + +mainloop: + for i := 0; i < len(ops); i++ { + o := ops[i] + + fmt.Printf("Op %d: %s %s\n", i, opnames[o.Op], o.Val) + for run == 0 { + cmd := readCommand() + parts := strings.Split(cmd, " ") + switch parts[0] { + case "": + run = 1 + case "find": + _, err := s.Find(ctx, parts[1]) + if err == nil { + fmt.Println("success") + } else { + fmt.Println(err) + } + case "run": + if len(parts) > 1 { + n, err := strconv.Atoi(parts[1]) + if err != nil { + panic(err) + } + + run = n + } else { + run = -1 + } + case "lookop": + for k = 0; k < len(ops); k++ { + if ops[k].Val == parts[1] { + fmt.Printf(" Op %d: %s %s\n", k, opnames[ops[k].Op], parts[1]) + } + } + case "restart": + var err error + s, err = NewShard(ds, width) + if err != nil { + panic(err) + } + i = -1 + continue mainloop + case "print": + nd, err := s.Node() + if err != nil { + panic(err) + } + printDag(ds, nd.(*dag.ProtoNode), 0) + } + } + run-- + + switch o.Op { + case opAdd: + err := s.Set(ctx, o.Val, e) + if err != nil { + return nil, fmt.Errorf("inserting %s: %s", o.Val, err) + } + case opDel: + fmt.Println("deleting: ", o.Val) + err := s.Remove(ctx, o.Val) + if err != nil { + return nil, fmt.Errorf("deleting %s: %s", o.Val, err) + } + case opFind: + _, err := s.Find(ctx, o.Val) + if err != nil { + return nil, fmt.Errorf("finding %s: %s", o.Val, err) + } + } + } + + return s, nil +} + +func readCommand() string { + fmt.Print("> ") + scan := bufio.NewScanner(os.Stdin) + scan.Scan() + return scan.Text() +}*/ diff --git a/unixfs/hamt/hamt_test.go b/unixfs/hamt/hamt_test.go new file mode 100644 index 0000000000..b4f7704759 --- /dev/null +++ b/unixfs/hamt/hamt_test.go @@ -0,0 +1,752 @@ +package hamt + +import ( + "context" + "fmt" + "math/rand" + "os" + "sort" + "testing" + "time" + + ft "github.com/ipfs/boxo/unixfs" + ipld "github.com/ipfs/go-ipld-format" + dag "github.com/ipfs/boxo/ipld/merkledag" + mdtest "github.com/ipfs/boxo/ipld/merkledag/test" +) + +func shuffle(seed int64, arr []string) { + r := rand.New(rand.NewSource(seed)) + for i := 0; i < len(arr); i++ { + a := r.Intn(len(arr)) + b := r.Intn(len(arr)) + arr[a], arr[b] = arr[b], arr[a] + } +} + +func makeDir(ds ipld.DAGService, size int) ([]string, *Shard, error) { + return makeDirWidth(ds, size, 256) +} + +func makeDirWidth(ds ipld.DAGService, size, width int) ([]string, *Shard, error) { + ctx := context.Background() + + s, err := NewShard(ds, width) + if err != nil { + return nil, nil, err + } + + var dirs []string + for i := 0; i < size; i++ { + dirs = append(dirs, fmt.Sprintf("DIRNAME%d", i)) + } + + shuffle(time.Now().UnixNano(), dirs) + + for i := 0; i < len(dirs); i++ { + nd := ft.EmptyDirNode() + err := ds.Add(ctx, nd) + if err != nil { + return nil, nil, err + } + err = s.Set(ctx, dirs[i], nd) + if err != nil { + return nil, nil, err + } + } + + return dirs, s, nil +} + +func assertLink(s *Shard, name string, found bool) error { + _, err := s.Find(context.Background(), name) + switch err { + case os.ErrNotExist: + if found { + return err + } + + return nil + case nil: + if found { + return nil + } + + return fmt.Errorf("expected not to find link named %s", name) + default: + return err + } +} + +func assertLinksEqual(linksA []*ipld.Link, linksB []*ipld.Link) error { + + if len(linksA) != len(linksB) { + return fmt.Errorf("links arrays are different sizes") + } + + sort.Stable(dag.LinkSlice(linksA)) + sort.Stable(dag.LinkSlice(linksB)) + for i, a := range linksA { + b := linksB[i] + if a.Name != b.Name { + return fmt.Errorf("links names mismatch") + } + + if a.Cid.String() != b.Cid.String() { + return fmt.Errorf("link hashes dont match") + } + + if a.Size != b.Size { + return fmt.Errorf("link sizes not the same") + } + } + + return nil +} + +func assertSerializationWorks(ds ipld.DAGService, s *Shard) error { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + nd, err := s.Node() + if err != nil { + return err + } + + nds, err := NewHamtFromDag(ds, nd) + if err != nil { + return err + } + + linksA, err := s.EnumLinks(ctx) + if err != nil { + return err + } + + linksB, err := nds.EnumLinks(ctx) + if err != nil { + return err + } + + return assertLinksEqual(linksA, linksB) +} + +func TestBasicSet(t *testing.T) { + ds := mdtest.Mock() + for _, w := range []int{128, 256, 512, 1024} { + t.Run(fmt.Sprintf("BasicSet%d", w), func(t *testing.T) { + names, s, err := makeDirWidth(ds, 1000, w) + if err != nil { + t.Fatal(err) + } + ctx := context.Background() + + for _, d := range names { + _, err := s.Find(ctx, d) + if err != nil { + t.Fatal(err) + } + } + }) + } +} + +func TestDirBuilding(t *testing.T) { + ds := mdtest.Mock() + _, _ = NewShard(ds, 256) + + _, s, err := makeDir(ds, 200) + if err != nil { + t.Fatal(err) + } + + nd, err := s.Node() + if err != nil { + t.Fatal(err) + } + + //printDag(ds, nd, 0) + + k := nd.Cid() + + if k.String() != "QmY89TkSEVHykWMHDmyejSWFj9CYNtvzw4UwnT9xbc4Zjc" { + t.Fatalf("output didnt match what we expected (got %s)", k.String()) + } +} + +func TestShardReload(t *testing.T) { + ds := mdtest.Mock() + _, _ = NewShard(ds, 256) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + _, s, err := makeDir(ds, 200) + if err != nil { + t.Fatal(err) + } + + nd, err := s.Node() + if err != nil { + t.Fatal(err) + } + + nds, err := NewHamtFromDag(ds, nd) + if err != nil { + t.Fatal(err) + } + + lnks, err := nds.EnumLinks(ctx) + if err != nil { + t.Fatal(err) + } + + if len(lnks) != 200 { + t.Fatal("not enough links back") + } + + _, err = nds.Find(ctx, "DIRNAME50") + if err != nil { + t.Fatal(err) + } + + // Now test roundtrip marshal with no operations + + nds, err = NewHamtFromDag(ds, nd) + if err != nil { + t.Fatal(err) + } + + ond, err := nds.Node() + if err != nil { + t.Fatal(err) + } + + outk := ond.Cid() + ndk := nd.Cid() + + if !outk.Equals(ndk) { + t.Fatal("roundtrip serialization failed") + } +} + +func TestRemoveElems(t *testing.T) { + ds := mdtest.Mock() + dirs, s, err := makeDir(ds, 500) + if err != nil { + t.Fatal(err) + } + ctx := context.Background() + + for i := 0; i < 100; i++ { + err := s.Remove(ctx, fmt.Sprintf("NOTEXIST%d", rand.Int())) + if err != os.ErrNotExist { + t.Fatal("shouldnt be able to remove things that don't exist") + } + } + + for _, d := range dirs { + _, err := s.Find(ctx, d) + if err != nil { + t.Fatal(err) + } + } + + shuffle(time.Now().UnixNano(), dirs) + + for _, d := range dirs { + err := s.Remove(ctx, d) + if err != nil { + t.Fatal(err) + } + } + + nd, err := s.Node() + if err != nil { + t.Fatal(err) + } + + if len(nd.Links()) > 0 { + t.Fatal("shouldnt have any links here") + } + + err = s.Remove(ctx, "doesnt exist") + if err != os.ErrNotExist { + t.Fatal("expected error does not exist") + } +} + +func TestRemoveAfterMarshal(t *testing.T) { + ds := mdtest.Mock() + dirs, s, err := makeDir(ds, 500) + if err != nil { + t.Fatal(err) + } + nd, err := s.Node() + if err != nil { + t.Fatal(err) + } + + s, err = NewHamtFromDag(ds, nd) + if err != nil { + t.Fatal(err) + } + + ctx := context.Background() + + shuffle(time.Now().UnixNano(), dirs) + + for i, d := range dirs { + err := s.Remove(ctx, d) + if err != nil { + t.Fatalf("%d/%d: %s", i, len(dirs), err) + } + } + + nd, err = s.Node() + if err != nil { + t.Fatal(err) + } + + if len(nd.Links()) > 0 { + t.Fatal("shouldnt have any links here") + } + + err = s.Remove(ctx, "doesnt exist") + if err != os.ErrNotExist { + t.Fatal("expected error does not exist") + } +} + +func TestSetAfterMarshal(t *testing.T) { + ds := mdtest.Mock() + _, s, err := makeDir(ds, 300) + if err != nil { + t.Fatal(err) + } + ctx := context.Background() + + nd, err := s.Node() + if err != nil { + t.Fatal(err) + } + + nds, err := NewHamtFromDag(ds, nd) + if err != nil { + t.Fatal(err) + } + + for i := 0; i < 100; i++ { + empty := ft.EmptyDirNode() + err := nds.Set(ctx, fmt.Sprintf("moredirs%d", i), empty) + if err != nil { + t.Fatal(err) + } + } + + nd, err = nds.Node() + if err != nil { + t.Fatal(err) + } + nds, err = NewHamtFromDag(ds, nd) + if err != nil { + t.Fatal(err) + } + + links, err := nds.EnumLinks(ctx) + if err != nil { + t.Fatal(err) + } + + if len(links) != 400 { + t.Fatal("expected 400 links") + } + + err = assertSerializationWorks(ds, nds) + if err != nil { + t.Fatal(err) + } +} + +func TestEnumLinksAsync(t *testing.T) { + ds := mdtest.Mock() + _, s, err := makeDir(ds, 300) + if err != nil { + t.Fatal(err) + } + ctx := context.Background() + + nd, err := s.Node() + if err != nil { + t.Fatal(err) + } + + nds, err := NewHamtFromDag(ds, nd) + if err != nil { + t.Fatal(err) + } + + linksA, err := nds.EnumLinks(ctx) + if err != nil { + t.Fatal(err) + } + + linkResults := nds.EnumLinksAsync(ctx) + + var linksB []*ipld.Link + + for linkResult := range linkResults { + if linkResult.Err != nil { + t.Fatal(linkResult.Err) + } + linksB = append(linksB, linkResult.Link) + } + + err = assertLinksEqual(linksA, linksB) + if err != nil { + t.Fatal(err) + } +} + +func TestDuplicateAddShard(t *testing.T) { + ds := mdtest.Mock() + dir, _ := NewShard(ds, 256) + nd := new(dag.ProtoNode) + ctx := context.Background() + + err := dir.Set(ctx, "test", nd) + if err != nil { + t.Fatal(err) + } + + err = dir.Set(ctx, "test", nd) + if err != nil { + t.Fatal(err) + } + + node, err := dir.Node() + if err != nil { + t.Fatal(err) + } + dir, err = NewHamtFromDag(ds, node) + if err != nil { + t.Fatal(err) + } + + lnks, err := dir.EnumLinks(ctx) + if err != nil { + t.Fatal(err) + } + + if len(lnks) != 1 { + t.Fatal("expected only one link") + } +} + +// fix https://github.com/ipfs/kubo/issues/9063 +func TestSetLink(t *testing.T) { + ds := mdtest.Mock() + dir, _ := NewShard(ds, 256) + _, s, err := makeDir(ds, 300) + if err != nil { + t.Fatal(err) + } + + lnk, err := s.Link() + if err != nil { + t.Fatal(err) + } + + ctx := context.Background() + + err = dir.SetLink(ctx, "test", lnk) + if err != nil { + t.Fatal(err) + } + + if len(dir.childer.children) != 1 { + t.Fatal("no child") + } + + for _, sh := range dir.childer.children { + if sh.childer == nil { + t.Fatal("no childer on shard") + } + } +} + +func TestLoadFailsFromNonShard(t *testing.T) { + ds := mdtest.Mock() + nd := ft.EmptyDirNode() + + _, err := NewHamtFromDag(ds, nd) + if err == nil { + t.Fatal("expected dir shard creation to fail when given normal directory") + } + + nd = new(dag.ProtoNode) + + _, err = NewHamtFromDag(ds, nd) + if err == nil { + t.Fatal("expected dir shard creation to fail when given normal directory") + } +} + +func TestFindNonExisting(t *testing.T) { + ds := mdtest.Mock() + _, s, err := makeDir(ds, 100) + if err != nil { + t.Fatal(err) + } + ctx := context.Background() + + for i := 0; i < 200; i++ { + _, err := s.Find(ctx, fmt.Sprintf("notfound%d", i)) + if err != os.ErrNotExist { + t.Fatal("expected ErrNotExist") + } + } +} + +func TestRemoveElemsAfterMarshal(t *testing.T) { + ds := mdtest.Mock() + dirs, s, err := makeDir(ds, 30) + if err != nil { + t.Fatal(err) + } + ctx := context.Background() + + sort.Strings(dirs) + + err = s.Remove(ctx, dirs[0]) + if err != nil { + t.Fatal(err) + } + + out, err := s.Find(ctx, dirs[0]) + if err == nil { + t.Fatal("expected error, got: ", out) + } + + nd, err := s.Node() + if err != nil { + t.Fatal(err) + } + + nds, err := NewHamtFromDag(ds, nd) + if err != nil { + t.Fatal(err) + } + + _, err = nds.Find(ctx, dirs[0]) + if err == nil { + t.Fatal("expected not to find ", dirs[0]) + } + + for _, d := range dirs[1:] { + _, err := nds.Find(ctx, d) + if err != nil { + t.Fatal("could not find expected link after unmarshaling") + } + } + + for _, d := range dirs[1:] { + err := nds.Remove(ctx, d) + if err != nil { + t.Fatal(err) + } + } + + nd, err = nds.Node() + if err != nil { + t.Fatal(err) + } + nds, err = NewHamtFromDag(ds, nd) + if err != nil { + t.Fatal(err) + } + + links, err := nds.EnumLinks(ctx) + if err != nil { + t.Fatal(err) + } + + if len(links) != 0 { + t.Fatal("expected all links to be removed") + } + + err = assertSerializationWorks(ds, nds) + if err != nil { + t.Fatal(err) + } +} + +func TestBitfieldIndexing(t *testing.T) { + ds := mdtest.Mock() + s, _ := NewShard(ds, 256) + + set := func(i int) { + s.childer.bitfield.SetBit(i) + } + + assert := func(i int, val int) { + if s.childer.sliceIndex(i) != val { + t.Fatalf("expected index %d to be %d", i, val) + } + } + + assert(50, 0) + set(4) + set(5) + set(60) + + assert(10, 2) + set(3) + assert(10, 3) + assert(1, 0) + + assert(100, 4) + set(50) + assert(45, 3) + set(100) + assert(100, 5) +} + +// test adding a sharded directory node as the child of another directory node. +// if improperly implemented, the parent hamt may assume the child is a part of +// itself. +func TestSetHamtChild(t *testing.T) { + ctx := context.Background() + + ds := mdtest.Mock() + s, _ := NewShard(ds, 256) + + e := ft.EmptyDirNode() + ds.Add(ctx, e) + + err := s.Set(ctx, "bar", e) + if err != nil { + t.Fatal(err) + } + + snd, err := s.Node() + if err != nil { + t.Fatal(err) + } + + _, ns, err := makeDir(ds, 50) + if err != nil { + t.Fatal(err) + } + + err = ns.Set(ctx, "foo", snd) + if err != nil { + t.Fatal(err) + } + + nsnd, err := ns.Node() + if err != nil { + t.Fatal(err) + } + + hs, err := NewHamtFromDag(ds, nsnd) + if err != nil { + t.Fatal(err) + } + + err = assertLink(hs, "bar", false) + if err != nil { + t.Fatal(err) + } + + err = assertLink(hs, "foo", true) + if err != nil { + t.Fatal(err) + } +} + +func BenchmarkHAMTWalk(b *testing.B) { + ctx := context.Background() + + ds := mdtest.Mock() + sh, _ := NewShard(ds, 256) + nd, err := sh.Node() + if err != nil { + b.Fatal(err) + } + + err = ds.Add(ctx, nd) + if err != nil { + b.Fatal(err) + } + ds.Add(ctx, ft.EmptyDirNode()) + + s, err := NewHamtFromDag(ds, nd) + if err != nil { + b.Fatal(err) + } + + for j := 0; j < 1000; j++ { + err = s.Set(ctx, fmt.Sprintf("%d", j), ft.EmptyDirNode()) + if err != nil { + b.Fatal(err) + } + } + + for i := 0; i < b.N; i++ { + cnt := 0 + err = s.ForEachLink(ctx, func(l *ipld.Link) error { + cnt++ + return nil + }) + if err != nil { + b.Fatal(err) + } + if cnt < 1000 { + b.Fatal("expected 100 children") + } + } +} + +func BenchmarkHAMTSet(b *testing.B) { + ctx := context.Background() + + ds := mdtest.Mock() + sh, _ := NewShard(ds, 256) + nd, err := sh.Node() + if err != nil { + b.Fatal(err) + } + + err = ds.Add(ctx, nd) + if err != nil { + b.Fatal(err) + } + ds.Add(ctx, ft.EmptyDirNode()) + + for i := 0; i < b.N; i++ { + s, err := NewHamtFromDag(ds, nd) + if err != nil { + b.Fatal(err) + } + + err = s.Set(context.TODO(), fmt.Sprint(i), ft.EmptyDirNode()) + if err != nil { + b.Fatal(err) + } + + out, err := s.Node() + if err != nil { + b.Fatal(err) + } + + nd = out + } +} + +func TestHamtBadSize(t *testing.T) { + for _, size := range [...]int{-8, 7, 2, 1337, 1024 + 8, -3} { + _, err := NewShard(nil, size) + if err == nil { + t.Errorf("should have failed to construct hamt with bad size: %d", size) + } + } +} diff --git a/unixfs/hamt/util.go b/unixfs/hamt/util.go new file mode 100644 index 0000000000..2216a217f6 --- /dev/null +++ b/unixfs/hamt/util.go @@ -0,0 +1,80 @@ +package hamt + +import ( + "fmt" + "math/bits" + + "github.com/ipfs/boxo/unixfs/internal" + + "github.com/spaolacci/murmur3" +) + +// hashBits is a helper that allows the reading of the 'next n bits' as an integer. +type hashBits struct { + b []byte + consumed int +} + +func newHashBits(val string) *hashBits { + return &hashBits{b: internal.HAMTHashFunction([]byte(val))} +} + +func newConsumedHashBits(val string, consumed int) *hashBits { + hv := &hashBits{b: internal.HAMTHashFunction([]byte(val))} + hv.consumed = consumed + return hv +} + +func mkmask(n int) byte { + return (1 << uint(n)) - 1 +} + +// Next returns the next 'i' bits of the hashBits value as an integer, or an +// error if there aren't enough bits. +func (hb *hashBits) Next(i int) (int, error) { + if hb.consumed+i > len(hb.b)*8 { + return 0, fmt.Errorf("sharded directory too deep") + } + return hb.next(i), nil +} + +func (hb *hashBits) next(i int) int { + curbi := hb.consumed / 8 + leftb := 8 - (hb.consumed % 8) + + curb := hb.b[curbi] + if i == leftb { + out := int(mkmask(i) & curb) + hb.consumed += i + return out + } else if i < leftb { + a := curb & mkmask(leftb) // mask out the high bits we don't want + b := a & ^mkmask(leftb-i) // mask out the low bits we don't want + c := b >> uint(leftb-i) // shift whats left down + hb.consumed += i + return int(c) + } else { + out := int(mkmask(leftb) & curb) + out <<= uint(i - leftb) + hb.consumed += leftb + out += hb.next(i - leftb) + return out + } +} + +func Logtwo(v int) (int, error) { + if v <= 0 { + return 0, fmt.Errorf("hamt size should be a power of two") + } + lg2 := bits.TrailingZeros(uint(v)) + if 1<= 0; coff -= 4096 { + t.Log(coff) + n, err := rs.Seek(coff, io.SeekStart) + if err != nil { + t.Fatal(err) + } + if n != coff { + t.Fatal("wasnt able to seek to the right position") + } + nread, err := rs.Read(out[coff : coff+4096]) + if err != nil { + t.Fatal(err) + } + if nread != 4096 { + t.Fatal("didnt read the correct number of bytes") + } + } + + err = arrComp(out, should) + if err != nil { + t.Fatal(err) + } +} diff --git a/unixfs/importer/balanced/builder.go b/unixfs/importer/balanced/builder.go new file mode 100644 index 0000000000..6b5a7e074d --- /dev/null +++ b/unixfs/importer/balanced/builder.go @@ -0,0 +1,267 @@ +// Package balanced provides methods to build balanced DAGs, which are generalistic +// DAGs in which all leaves (nodes representing chunks of data) are at the same +// distance from the root. Nodes can have only a maximum number of children; to be +// able to store more leaf data nodes balanced DAGs are extended by increasing its +// depth (and having more intermediary nodes). +// +// Internal nodes are always represented by UnixFS nodes (of type `File`) encoded +// inside DAG nodes (see the `go-unixfs` package for details of UnixFS). In +// contrast, leaf nodes with data have multiple possible representations: UnixFS +// nodes as above, raw nodes with just the file data (no format) and Filestore +// nodes (that directly link to the file on disk using a format stored on a raw +// node, see the `go-ipfs/filestore` package for details of Filestore.) +// +// In the case the entire file fits into just one node it will be formatted as a +// (single) leaf node (without parent) with the possible representations already +// mentioned. This is the only scenario where the root can be of a type different +// that the UnixFS node. +// +// Notes: +// +// 1. In the implementation. `FSNodeOverDag` structure is used for representing +// the UnixFS node encoded inside the DAG node. +// (see https://github.com/ipfs/go-ipfs/pull/5118.) +// +// 2. `TFile` is used for backwards-compatibility. It was a bug causing the leaf +// nodes to be generated with this type instead of `TRaw`. The former one +// should be used (like the trickle builder does). +// (See https://github.com/ipfs/go-ipfs/pull/5120.) +// +// +-------------+ +// | Root 4 | +// +-------------+ +// | +// +--------------------------+----------------------------+ +// | | +// +-------------+ +-------------+ +// | Node 2 | | Node 5 | +// +-------------+ +-------------+ +// | | +// +-------------+-------------+ +-------------+ +// | | | +// +-------------+ +-------------+ +-------------+ +// | Node 1 | | Node 3 | | Node 6 | +// +-------------+ +-------------+ +-------------+ +// | | | +// +------+------+ +------+------+ +------+ +// | | | | | +// +=========+ +=========+ +=========+ +=========+ +=========+ +// | Chunk 1 | | Chunk 2 | | Chunk 3 | | Chunk 4 | | Chunk 5 | +// +=========+ +=========+ +=========+ +=========+ +=========+ +package balanced + +import ( + "errors" + + ft "github.com/ipfs/boxo/unixfs" + h "github.com/ipfs/boxo/unixfs/importer/helpers" + + ipld "github.com/ipfs/go-ipld-format" +) + +// Layout builds a balanced DAG layout. In a balanced DAG of depth 1, leaf nodes +// with data are added to a single `root` until the maximum number of links is +// reached. Then, to continue adding more data leaf nodes, a `newRoot` is created +// pointing to the old `root` (which will now become and intermediary node), +// increasing the depth of the DAG to 2. This will increase the maximum number of +// data leaf nodes the DAG can have (`Maxlinks() ^ depth`). The `fillNodeRec` +// function will add more intermediary child nodes to `newRoot` (which already has +// `root` as child) that in turn will have leaf nodes with data added to them. +// After that process is completed (the maximum number of links is reached), +// `fillNodeRec` will return and the loop will be repeated: the `newRoot` created +// will become the old `root` and a new root will be created again to increase the +// depth of the DAG. The process is repeated until there is no more data to add +// (i.e. the DagBuilderHelper’s Done() function returns true). +// +// The nodes are filled recursively, so the DAG is built from the bottom up. Leaf +// nodes are created first using the chunked file data and its size. The size is +// then bubbled up to the parent (internal) node, which aggregates all the sizes of +// its children and bubbles that combined size up to its parent, and so on up to +// the root. This way, a balanced DAG acts like a B-tree when seeking to a byte +// offset in the file the graph represents: each internal node uses the file size +// of its children as an index when seeking. +// +// `Layout` creates a root and hands it off to be filled: +// +// +-------------+ +// | Root 1 | +// +-------------+ +// | +// ( fillNodeRec fills in the ) +// ( chunks on the root. ) +// | +// +------+------+ +// | | +// + - - - - + + - - - - + +// | Chunk 1 | | Chunk 2 | +// + - - - - + + - - - - + +// +// ↓ +// When the root is full but there's more data... +// ↓ +// +// +-------------+ +// | Root 1 | +// +-------------+ +// | +// +------+------+ +// | | +// +=========+ +=========+ + - - - - + +// | Chunk 1 | | Chunk 2 | | Chunk 3 | +// +=========+ +=========+ + - - - - + +// +// ↓ +// ...Layout's job is to create a new root. +// ↓ +// +// +-------------+ +// | Root 2 | +// +-------------+ +// | +// +-------------+ - - - - - - - - + +// | | +// +-------------+ ( fillNodeRec creates the ) +// | Node 1 | ( branch that connects ) +// +-------------+ ( "Root 2" to "Chunk 3." ) +// | | +// +------+------+ + - - - - -+ +// | | | +// +=========+ +=========+ + - - - - + +// | Chunk 1 | | Chunk 2 | | Chunk 3 | +// +=========+ +=========+ + - - - - + +func Layout(db *h.DagBuilderHelper) (ipld.Node, error) { + if db.Done() { + // No data, return just an empty node. + root, err := db.NewLeafNode(nil, ft.TFile) + if err != nil { + return nil, err + } + // This works without Filestore support (`ProcessFileStore`). + // TODO: Why? Is there a test case missing? + + return root, db.Add(root) + } + + // The first `root` will be a single leaf node with data + // (corner case), after that subsequent `root` nodes will + // always be internal nodes (with a depth > 0) that can + // be handled by the loop. + root, fileSize, err := db.NewLeafDataNode(ft.TFile) + if err != nil { + return nil, err + } + + // Each time a DAG of a certain `depth` is filled (because it + // has reached its maximum capacity of `db.Maxlinks()` per node) + // extend it by making it a sub-DAG of a bigger DAG with `depth+1`. + for depth := 1; !db.Done(); depth++ { + + // Add the old `root` as a child of the `newRoot`. + newRoot := db.NewFSNodeOverDag(ft.TFile) + err = newRoot.AddChild(root, fileSize, db) + if err != nil { + return nil, err + } + + // Fill the `newRoot` (that has the old `root` already as child) + // and make it the current `root` for the next iteration (when + // it will become "old"). + root, fileSize, err = fillNodeRec(db, newRoot, depth) + if err != nil { + return nil, err + } + } + + return root, db.Add(root) +} + +// fillNodeRec will "fill" the given internal (non-leaf) `node` with data by +// adding child nodes to it, either leaf data nodes (if `depth` is 1) or more +// internal nodes with higher depth (and calling itself recursively on them +// until *they* are filled with data). The data to fill the node with is +// provided by DagBuilderHelper. +// +// `node` represents a (sub-)DAG root that is being filled. If called recursively, +// it is `nil`, a new node is created. If it has been called from `Layout` (see +// diagram below) it points to the new root (that increases the depth of the DAG), +// it already has a child (the old root). New children will be added to this new +// root, and those children will in turn be filled (calling `fillNodeRec` +// recursively). +// +// +-------------+ +// | `node` | +// | (new root) | +// +-------------+ +// | +// +-------------+ - - - - - - + - - - - - - - - - - - + +// | | | +// +--------------+ + - - - - - + + - - - - - + +// | (old root) | | new child | | | +// +--------------+ + - - - - - + + - - - - - + +// | | | +// +------+------+ + - - + - - - + +// | | | | +// +=========+ +=========+ + - - - - + + - - - - + +// | Chunk 1 | | Chunk 2 | | Chunk 3 | | Chunk 4 | +// +=========+ +=========+ + - - - - + + - - - - + +// +// The `node` to be filled uses the `FSNodeOverDag` abstraction that allows adding +// child nodes without packing/unpacking the UnixFS layer node (having an internal +// `ft.FSNode` cache). +// +// It returns the `ipld.Node` representation of the passed `node` filled with +// children and the `nodeFileSize` with the total size of the file chunk (leaf) +// nodes stored under this node (parent nodes store this to enable efficient +// seeking through the DAG when reading data later). +// +// warning: **children** pinned indirectly, but input node IS NOT pinned. +func fillNodeRec(db *h.DagBuilderHelper, node *h.FSNodeOverDag, depth int) (filledNode ipld.Node, nodeFileSize uint64, err error) { + if depth < 1 { + return nil, 0, errors.New("attempt to fillNode at depth < 1") + } + + if node == nil { + node = db.NewFSNodeOverDag(ft.TFile) + } + + // Child node created on every iteration to add to parent `node`. + // It can be a leaf node or another internal node. + var childNode ipld.Node + // File size from the child node needed to update the `FSNode` + // in `node` when adding the child. + var childFileSize uint64 + + // While we have room and there is data available to be added. + for node.NumChildren() < db.Maxlinks() && !db.Done() { + + if depth == 1 { + // Base case: add leaf node with data. + childNode, childFileSize, err = db.NewLeafDataNode(ft.TFile) + if err != nil { + return nil, 0, err + } + } else { + // Recursion case: create an internal node to in turn keep + // descending in the DAG and adding child nodes to it. + childNode, childFileSize, err = fillNodeRec(db, nil, depth-1) + if err != nil { + return nil, 0, err + } + } + + err = node.AddChild(childNode, childFileSize, db) + if err != nil { + return nil, 0, err + } + } + + nodeFileSize = node.FileSize() + + // Get the final `dag.ProtoNode` with the `FSNode` data encoded inside. + filledNode, err = node.Commit() + if err != nil { + return nil, 0, err + } + + return filledNode, nodeFileSize, nil +} diff --git a/unixfs/importer/helpers/dagbuilder.go b/unixfs/importer/helpers/dagbuilder.go new file mode 100644 index 0000000000..920c053daa --- /dev/null +++ b/unixfs/importer/helpers/dagbuilder.go @@ -0,0 +1,399 @@ +package helpers + +import ( + "context" + "errors" + "io" + "os" + + dag "github.com/ipfs/boxo/ipld/merkledag" + + ft "github.com/ipfs/boxo/unixfs" + pb "github.com/ipfs/boxo/unixfs/pb" + + chunker "github.com/ipfs/boxo/chunker" + "github.com/ipfs/boxo/files" + pi "github.com/ipfs/boxo/filestore/posinfo" + cid "github.com/ipfs/go-cid" + ipld "github.com/ipfs/go-ipld-format" +) + +var ErrMissingFsRef = errors.New("missing file path or URL, can't create filestore reference") + +// DagBuilderHelper wraps together a bunch of objects needed to +// efficiently create unixfs dag trees +type DagBuilderHelper struct { + dserv ipld.DAGService + spl chunker.Splitter + recvdErr error + rawLeaves bool + nextData []byte // the next item to return. + maxlinks int + cidBuilder cid.Builder + + // Filestore support variables. + // ---------------------------- + // TODO: Encapsulate in `FilestoreNode` (which is basically what they are). + // + // Besides having the path this variable (if set) is used as a flag + // to indicate that Filestore should be used. + fullPath string + stat os.FileInfo + // Keeps track of the current file size added to the DAG (used in + // the balanced builder). It is assumed that the `DagBuilderHelper` + // is not reused to construct another DAG, but a new one (with a + // zero `offset`) is created. + offset uint64 +} + +// DagBuilderParams wraps configuration options to create a DagBuilderHelper +// from a chunker.Splitter. +type DagBuilderParams struct { + // Maximum number of links per intermediate node + Maxlinks int + + // RawLeaves signifies that the importer should use raw ipld nodes as leaves + // instead of using the unixfs TRaw type + RawLeaves bool + + // CID Builder to use if set + CidBuilder cid.Builder + + // DAGService to write blocks to (required) + Dagserv ipld.DAGService + + // NoCopy signals to the chunker that it should track fileinfo for + // filestore adds + NoCopy bool +} + +// New generates a new DagBuilderHelper from the given params and a given +// chunker.Splitter as data source. +func (dbp *DagBuilderParams) New(spl chunker.Splitter) (*DagBuilderHelper, error) { + db := &DagBuilderHelper{ + dserv: dbp.Dagserv, + spl: spl, + rawLeaves: dbp.RawLeaves, + cidBuilder: dbp.CidBuilder, + maxlinks: dbp.Maxlinks, + } + if fi, ok := spl.Reader().(files.FileInfo); dbp.NoCopy && ok { + db.fullPath = fi.AbsPath() + db.stat = fi.Stat() + } + + if dbp.NoCopy && db.fullPath == "" { // Enforce NoCopy + return nil, ErrMissingFsRef + } + + return db, nil +} + +// prepareNext consumes the next item from the splitter and puts it +// in the nextData field. it is idempotent-- if nextData is full +// it will do nothing. +func (db *DagBuilderHelper) prepareNext() { + // if we already have data waiting to be consumed, we're ready + if db.nextData != nil || db.recvdErr != nil { + return + } + + db.nextData, db.recvdErr = db.spl.NextBytes() + if db.recvdErr == io.EOF { + db.recvdErr = nil + } +} + +// Done returns whether or not we're done consuming the incoming data. +func (db *DagBuilderHelper) Done() bool { + // ensure we have an accurate perspective on data + // as `done` this may be called before `next`. + db.prepareNext() // idempotent + if db.recvdErr != nil { + return false + } + return db.nextData == nil +} + +// Next returns the next chunk of data to be inserted into the dag +// if it returns nil, that signifies that the stream is at an end, and +// that the current building operation should finish. +func (db *DagBuilderHelper) Next() ([]byte, error) { + db.prepareNext() // idempotent + d := db.nextData + db.nextData = nil // signal we've consumed it + if db.recvdErr != nil { + return nil, db.recvdErr + } + return d, nil +} + +// GetDagServ returns the dagservice object this Helper is using +func (db *DagBuilderHelper) GetDagServ() ipld.DAGService { + return db.dserv +} + +// GetCidBuilder returns the internal `cid.CidBuilder` set in the builder. +func (db *DagBuilderHelper) GetCidBuilder() cid.Builder { + return db.cidBuilder +} + +// NewLeafNode creates a leaf node filled with data. If rawLeaves is +// defined then a raw leaf will be returned. Otherwise, it will create +// and return `FSNodeOverDag` with `fsNodeType`. +func (db *DagBuilderHelper) NewLeafNode(data []byte, fsNodeType pb.Data_DataType) (ipld.Node, error) { + if len(data) > BlockSizeLimit { + return nil, ErrSizeLimitExceeded + } + + if db.rawLeaves { + // Encapsulate the data in a raw node. + if db.cidBuilder == nil { + return dag.NewRawNode(data), nil + } + rawnode, err := dag.NewRawNodeWPrefix(data, db.cidBuilder) + if err != nil { + return nil, err + } + return rawnode, nil + } + + // Encapsulate the data in UnixFS node (instead of a raw node). + fsNodeOverDag := db.NewFSNodeOverDag(fsNodeType) + fsNodeOverDag.SetFileData(data) + node, err := fsNodeOverDag.Commit() + if err != nil { + return nil, err + } + // TODO: Encapsulate this sequence of calls into a function that + // just returns the final `ipld.Node` avoiding going through + // `FSNodeOverDag`. + + return node, nil +} + +// FillNodeLayer will add datanodes as children to the give node until +// it is full in this layer or no more data. +// NOTE: This function creates raw data nodes so it only works +// for the `trickle.Layout`. +func (db *DagBuilderHelper) FillNodeLayer(node *FSNodeOverDag) error { + + // while we have room AND we're not done + for node.NumChildren() < db.maxlinks && !db.Done() { + child, childFileSize, err := db.NewLeafDataNode(ft.TRaw) + if err != nil { + return err + } + + if err := node.AddChild(child, childFileSize, db); err != nil { + return err + } + } + // TODO: Do we need to commit here? The caller who created the + // `FSNodeOverDag` should be in charge of that. + _, err := node.Commit() + return err +} + +// NewLeafDataNode builds the `node` with the data obtained from the +// Splitter with the given constraints (BlockSizeLimit, RawLeaves) +// specified when creating the DagBuilderHelper. It returns +// `ipld.Node` with the `dataSize` (that will be used to keep track of +// the DAG file size). The size of the data is computed here because +// after that it will be hidden by `NewLeafNode` inside a generic +// `ipld.Node` representation. +func (db *DagBuilderHelper) NewLeafDataNode(fsNodeType pb.Data_DataType) (node ipld.Node, dataSize uint64, err error) { + fileData, err := db.Next() + if err != nil { + return nil, 0, err + } + dataSize = uint64(len(fileData)) + + // Create a new leaf node containing the file chunk data. + node, err = db.NewLeafNode(fileData, fsNodeType) + if err != nil { + return nil, 0, err + } + + // Convert this leaf to a `FilestoreNode` if needed. + node = db.ProcessFileStore(node, dataSize) + + return node, dataSize, nil +} + +// ProcessFileStore generates, if Filestore is being used, the +// `FilestoreNode` representation of the `ipld.Node` that +// contains the file data. If Filestore is not being used just +// return the same node to continue with its addition to the DAG. +// +// The `db.offset` is updated at this point (instead of when +// `NewLeafDataNode` is called, both work in tandem but the +// offset is more related to this function). +func (db *DagBuilderHelper) ProcessFileStore(node ipld.Node, dataSize uint64) ipld.Node { + // Check if Filestore is being used. + if db.fullPath != "" { + // Check if the node is actually a raw node (needed for + // Filestore support). + if _, ok := node.(*dag.RawNode); ok { + fn := &pi.FilestoreNode{ + Node: node, + PosInfo: &pi.PosInfo{ + Offset: db.offset, + FullPath: db.fullPath, + Stat: db.stat, + }, + } + + // Update `offset` with the size of the data generated by `db.Next`. + db.offset += dataSize + + return fn + } + } + + // Filestore is not used, return the same `node` argument. + return node +} + +// Add inserts the given node in the DAGService. +func (db *DagBuilderHelper) Add(node ipld.Node) error { + return db.dserv.Add(context.TODO(), node) +} + +// Maxlinks returns the configured maximum number for links +// for nodes built with this helper. +func (db *DagBuilderHelper) Maxlinks() int { + return db.maxlinks +} + +// FSNodeOverDag encapsulates an `unixfs.FSNode` that will be stored in a +// `dag.ProtoNode`. Instead of just having a single `ipld.Node` that +// would need to be constantly (un)packed to access and modify its +// internal `FSNode` in the process of creating a UnixFS DAG, this +// structure stores an `FSNode` cache to manipulate it (add child nodes) +// directly , and only when the node has reached its final (immutable) state +// (signaled by calling `Commit()`) is it committed to a single (indivisible) +// `ipld.Node`. +// +// It is used mainly for internal (non-leaf) nodes, and for some +// representations of data leaf nodes (that don't use raw nodes or +// Filestore). +// +// It aims to replace the `UnixfsNode` structure which encapsulated too +// many possible node state combinations. +// +// TODO: Revisit the name. +type FSNodeOverDag struct { + dag *dag.ProtoNode + file *ft.FSNode +} + +// NewFSNodeOverDag creates a new `dag.ProtoNode` and `ft.FSNode` +// decoupled from one onther (and will continue in that way until +// `Commit` is called), with `fsNodeType` specifying the type of +// the UnixFS layer node (either `File` or `Raw`). +func (db *DagBuilderHelper) NewFSNodeOverDag(fsNodeType pb.Data_DataType) *FSNodeOverDag { + node := new(FSNodeOverDag) + node.dag = new(dag.ProtoNode) + node.dag.SetCidBuilder(db.GetCidBuilder()) + + node.file = ft.NewFSNode(fsNodeType) + + return node +} + +// NewFSNFromDag reconstructs a FSNodeOverDag node from a given dag node +func (db *DagBuilderHelper) NewFSNFromDag(nd *dag.ProtoNode) (*FSNodeOverDag, error) { + return NewFSNFromDag(nd) +} + +// NewFSNFromDag reconstructs a FSNodeOverDag node from a given dag node +func NewFSNFromDag(nd *dag.ProtoNode) (*FSNodeOverDag, error) { + mb, err := ft.FSNodeFromBytes(nd.Data()) + if err != nil { + return nil, err + } + + return &FSNodeOverDag{ + dag: nd, + file: mb, + }, nil +} + +// AddChild adds a `child` `ipld.Node` to both node layers. The +// `dag.ProtoNode` creates a link to the child node while the +// `ft.FSNode` stores its file size (that is, not the size of the +// node but the size of the file data that it is storing at the +// UnixFS layer). The child is also stored in the `DAGService`. +func (n *FSNodeOverDag) AddChild(child ipld.Node, fileSize uint64, db *DagBuilderHelper) error { + err := n.dag.AddNodeLink("", child) + if err != nil { + return err + } + + n.file.AddBlockSize(fileSize) + + return db.Add(child) +} + +// RemoveChild deletes the child node at the given index. +func (n *FSNodeOverDag) RemoveChild(index int, dbh *DagBuilderHelper) { + n.file.RemoveBlockSize(index) + n.dag.SetLinks(append(n.dag.Links()[:index], n.dag.Links()[index+1:]...)) +} + +// Commit unifies (resolves) the cache nodes into a single `ipld.Node` +// that represents them: the `ft.FSNode` is encoded inside the +// `dag.ProtoNode`. +// +// TODO: Make it read-only after committing, allow to commit only once. +func (n *FSNodeOverDag) Commit() (ipld.Node, error) { + fileData, err := n.file.GetBytes() + if err != nil { + return nil, err + } + n.dag.SetData(fileData) + + return n.dag, nil +} + +// NumChildren returns the number of children of the `ft.FSNode`. +func (n *FSNodeOverDag) NumChildren() int { + return n.file.NumChildren() +} + +// FileSize returns the `Filesize` attribute from the underlying +// representation of the `ft.FSNode`. +func (n *FSNodeOverDag) FileSize() uint64 { + return n.file.FileSize() +} + +// SetFileData stores the `fileData` in the `ft.FSNode`. It +// should be used only when `FSNodeOverDag` represents a leaf +// node (internal nodes don't carry data, just file sizes). +func (n *FSNodeOverDag) SetFileData(fileData []byte) { + n.file.SetData(fileData) +} + +// GetDagNode fills out the proper formatting for the FSNodeOverDag node +// inside of a DAG node and returns the dag node. +// TODO: Check if we have committed (passed the UnixFS information +// to the DAG layer) before returning this. +func (n *FSNodeOverDag) GetDagNode() (ipld.Node, error) { + return n.dag, nil +} + +// GetChild gets the ith child of this node from the given DAGService. +func (n *FSNodeOverDag) GetChild(ctx context.Context, i int, ds ipld.DAGService) (*FSNodeOverDag, error) { + nd, err := n.dag.Links()[i].GetNode(ctx, ds) + if err != nil { + return nil, err + } + + pbn, ok := nd.(*dag.ProtoNode) + if !ok { + return nil, dag.ErrNotProtobuf + } + + return NewFSNFromDag(pbn) +} diff --git a/unixfs/importer/helpers/helpers.go b/unixfs/importer/helpers/helpers.go new file mode 100644 index 0000000000..20cb598e62 --- /dev/null +++ b/unixfs/importer/helpers/helpers.go @@ -0,0 +1,31 @@ +package helpers + +import ( + "fmt" +) + +// BlockSizeLimit specifies the maximum size an imported block can have. +var BlockSizeLimit = 1048576 // 1 MB + +// rough estimates on expected sizes +var roughLinkBlockSize = 1 << 13 // 8KB +var roughLinkSize = 34 + 8 + 5 // sha256 multihash + size + no name + protobuf framing + +// DefaultLinksPerBlock governs how the importer decides how many links there +// will be per block. This calculation is based on expected distributions of: +// - the expected distribution of block sizes +// - the expected distribution of link sizes +// - desired access speed +// +// For now, we use: +// +// var roughLinkBlockSize = 1 << 13 // 8KB +// var roughLinkSize = 34 + 8 + 5 // sha256 multihash + size + no name +// // + protobuf framing +// var DefaultLinksPerBlock = (roughLinkBlockSize / roughLinkSize) +// = ( 8192 / 47 ) +// = (approximately) 174 +var DefaultLinksPerBlock = roughLinkBlockSize / roughLinkSize + +// ErrSizeLimitExceeded signals that a block is larger than BlockSizeLimit. +var ErrSizeLimitExceeded = fmt.Errorf("object size limit exceeded") diff --git a/unixfs/importer/importer.go b/unixfs/importer/importer.go new file mode 100644 index 0000000000..64fb96a63d --- /dev/null +++ b/unixfs/importer/importer.go @@ -0,0 +1,41 @@ +// Package importer implements utilities used to create IPFS DAGs from files +// and readers. +package importer + +import ( + bal "github.com/ipfs/boxo/unixfs/importer/balanced" + h "github.com/ipfs/boxo/unixfs/importer/helpers" + trickle "github.com/ipfs/boxo/unixfs/importer/trickle" + + chunker "github.com/ipfs/boxo/chunker" + ipld "github.com/ipfs/go-ipld-format" +) + +// BuildDagFromReader creates a DAG given a DAGService and a Splitter +// implementation (Splitters are io.Readers), using a Balanced layout. +func BuildDagFromReader(ds ipld.DAGService, spl chunker.Splitter) (ipld.Node, error) { + dbp := h.DagBuilderParams{ + Dagserv: ds, + Maxlinks: h.DefaultLinksPerBlock, + } + db, err := dbp.New(spl) + if err != nil { + return nil, err + } + return bal.Layout(db) +} + +// BuildTrickleDagFromReader creates a DAG given a DAGService and a Splitter +// implementation (Splitters are io.Readers), using a Trickle Layout. +func BuildTrickleDagFromReader(ds ipld.DAGService, spl chunker.Splitter) (ipld.Node, error) { + dbp := h.DagBuilderParams{ + Dagserv: ds, + Maxlinks: h.DefaultLinksPerBlock, + } + + db, err := dbp.New(spl) + if err != nil { + return nil, err + } + return trickle.Layout(db) +} diff --git a/unixfs/importer/importer_test.go b/unixfs/importer/importer_test.go new file mode 100644 index 0000000000..3d608e4cd9 --- /dev/null +++ b/unixfs/importer/importer_test.go @@ -0,0 +1,152 @@ +package importer + +import ( + "bytes" + "context" + "io" + "testing" + + uio "github.com/ipfs/boxo/unixfs/io" + + chunker "github.com/ipfs/boxo/chunker" + u "github.com/ipfs/boxo/util" + cid "github.com/ipfs/go-cid" + ipld "github.com/ipfs/go-ipld-format" + mdtest "github.com/ipfs/boxo/ipld/merkledag/test" +) + +func getBalancedDag(t testing.TB, size int64, blksize int64) (ipld.Node, ipld.DAGService) { + ds := mdtest.Mock() + r := io.LimitReader(u.NewTimeSeededRand(), size) + nd, err := BuildDagFromReader(ds, chunker.NewSizeSplitter(r, blksize)) + if err != nil { + t.Fatal(err) + } + return nd, ds +} + +func getTrickleDag(t testing.TB, size int64, blksize int64) (ipld.Node, ipld.DAGService) { + ds := mdtest.Mock() + r := io.LimitReader(u.NewTimeSeededRand(), size) + nd, err := BuildTrickleDagFromReader(ds, chunker.NewSizeSplitter(r, blksize)) + if err != nil { + t.Fatal(err) + } + return nd, ds +} + +func TestStableCid(t *testing.T) { + ds := mdtest.Mock() + buf := make([]byte, 10*1024*1024) + u.NewSeededRand(0xdeadbeef).Read(buf) + r := bytes.NewReader(buf) + + nd, err := BuildDagFromReader(ds, chunker.DefaultSplitter(r)) + if err != nil { + t.Fatal(err) + } + + expected, err := cid.Decode("QmZN1qquw84zhV4j6vT56tCcmFxaDaySL1ezTXFvMdNmrK") + if err != nil { + t.Fatal(err) + } + if !expected.Equals(nd.Cid()) { + t.Fatalf("expected CID %s, got CID %s", expected, nd) + } + + dr, err := uio.NewDagReader(context.Background(), nd, ds) + if err != nil { + t.Fatal(err) + } + + out, err := io.ReadAll(dr) + if err != nil { + t.Fatal(err) + } + + if !bytes.Equal(out, buf) { + t.Fatal("bad read") + } +} + +func TestBalancedDag(t *testing.T) { + ds := mdtest.Mock() + buf := make([]byte, 10000) + u.NewTimeSeededRand().Read(buf) + r := bytes.NewReader(buf) + + nd, err := BuildDagFromReader(ds, chunker.DefaultSplitter(r)) + if err != nil { + t.Fatal(err) + } + + dr, err := uio.NewDagReader(context.Background(), nd, ds) + if err != nil { + t.Fatal(err) + } + + out, err := io.ReadAll(dr) + if err != nil { + t.Fatal(err) + } + + if !bytes.Equal(out, buf) { + t.Fatal("bad read") + } +} + +func BenchmarkBalancedReadSmallBlock(b *testing.B) { + b.StopTimer() + nbytes := int64(10000000) + nd, ds := getBalancedDag(b, nbytes, 4096) + + b.SetBytes(nbytes) + b.StartTimer() + runReadBench(b, nd, ds) +} + +func BenchmarkTrickleReadSmallBlock(b *testing.B) { + b.StopTimer() + nbytes := int64(10000000) + nd, ds := getTrickleDag(b, nbytes, 4096) + + b.SetBytes(nbytes) + b.StartTimer() + runReadBench(b, nd, ds) +} + +func BenchmarkBalancedReadFull(b *testing.B) { + b.StopTimer() + nbytes := int64(10000000) + nd, ds := getBalancedDag(b, nbytes, chunker.DefaultBlockSize) + + b.SetBytes(nbytes) + b.StartTimer() + runReadBench(b, nd, ds) +} + +func BenchmarkTrickleReadFull(b *testing.B) { + b.StopTimer() + nbytes := int64(10000000) + nd, ds := getTrickleDag(b, nbytes, chunker.DefaultBlockSize) + + b.SetBytes(nbytes) + b.StartTimer() + runReadBench(b, nd, ds) +} + +func runReadBench(b *testing.B, nd ipld.Node, ds ipld.DAGService) { + for i := 0; i < b.N; i++ { + ctx, cancel := context.WithCancel(context.Background()) + read, err := uio.NewDagReader(ctx, nd, ds) + if err != nil { + b.Fatal(err) + } + + _, err = read.WriteTo(io.Discard) + if err != nil && err != io.EOF { + b.Fatal(err) + } + cancel() + } +} diff --git a/unixfs/importer/trickle/trickle_test.go b/unixfs/importer/trickle/trickle_test.go new file mode 100644 index 0000000000..9c34a39f23 --- /dev/null +++ b/unixfs/importer/trickle/trickle_test.go @@ -0,0 +1,666 @@ +package trickle + +import ( + "bytes" + "context" + "fmt" + "io" + mrand "math/rand" + "testing" + + ft "github.com/ipfs/boxo/unixfs" + h "github.com/ipfs/boxo/unixfs/importer/helpers" + uio "github.com/ipfs/boxo/unixfs/io" + + chunker "github.com/ipfs/boxo/chunker" + u "github.com/ipfs/boxo/util" + ipld "github.com/ipfs/go-ipld-format" + merkledag "github.com/ipfs/boxo/ipld/merkledag" + mdtest "github.com/ipfs/boxo/ipld/merkledag/test" +) + +type UseRawLeaves bool + +const ( + ProtoBufLeaves UseRawLeaves = false + RawLeaves UseRawLeaves = true +) + +func runBothSubtests(t *testing.T, tfunc func(*testing.T, UseRawLeaves)) { + t.Run("leaves=ProtoBuf", func(t *testing.T) { tfunc(t, ProtoBufLeaves) }) + t.Run("leaves=Raw", func(t *testing.T) { tfunc(t, RawLeaves) }) +} + +func buildTestDag(ds ipld.DAGService, spl chunker.Splitter, rawLeaves UseRawLeaves) (*merkledag.ProtoNode, error) { + dbp := h.DagBuilderParams{ + Dagserv: ds, + Maxlinks: h.DefaultLinksPerBlock, + RawLeaves: bool(rawLeaves), + } + + db, err := dbp.New(spl) + if err != nil { + return nil, err + } + + nd, err := Layout(db) + if err != nil { + return nil, err + } + + pbnd, ok := nd.(*merkledag.ProtoNode) + if !ok { + return nil, merkledag.ErrNotProtobuf + } + + return pbnd, VerifyTrickleDagStructure(pbnd, VerifyParams{ + Getter: ds, + Direct: dbp.Maxlinks, + LayerRepeat: depthRepeat, + RawLeaves: bool(rawLeaves), + }) +} + +// Test where calls to read are smaller than the chunk size +func TestSizeBasedSplit(t *testing.T) { + runBothSubtests(t, testSizeBasedSplit) +} + +func testSizeBasedSplit(t *testing.T, rawLeaves UseRawLeaves) { + if testing.Short() { + t.SkipNow() + } + bs := chunker.SizeSplitterGen(512) + testFileConsistency(t, bs, 32*512, rawLeaves) + + bs = chunker.SizeSplitterGen(4096) + testFileConsistency(t, bs, 32*4096, rawLeaves) + + // Uneven offset + testFileConsistency(t, bs, 31*4095, rawLeaves) +} + +func dup(b []byte) []byte { + o := make([]byte, len(b)) + copy(o, b) + return o +} + +func testFileConsistency(t *testing.T, bs chunker.SplitterGen, nbytes int, rawLeaves UseRawLeaves) { + should := make([]byte, nbytes) + u.NewTimeSeededRand().Read(should) + + read := bytes.NewReader(should) + ds := mdtest.Mock() + nd, err := buildTestDag(ds, bs(read), rawLeaves) + if err != nil { + t.Fatal(err) + } + + r, err := uio.NewDagReader(context.Background(), nd, ds) + if err != nil { + t.Fatal(err) + } + + out, err := io.ReadAll(r) + if err != nil { + t.Fatal(err) + } + + err = arrComp(out, should) + if err != nil { + t.Fatal(err) + } +} + +func TestBuilderConsistency(t *testing.T) { + runBothSubtests(t, testBuilderConsistency) +} + +func testBuilderConsistency(t *testing.T, rawLeaves UseRawLeaves) { + nbytes := 100000 + buf := new(bytes.Buffer) + io.CopyN(buf, u.NewTimeSeededRand(), int64(nbytes)) + should := dup(buf.Bytes()) + dagserv := mdtest.Mock() + nd, err := buildTestDag(dagserv, chunker.DefaultSplitter(buf), rawLeaves) + if err != nil { + t.Fatal(err) + } + r, err := uio.NewDagReader(context.Background(), nd, dagserv) + if err != nil { + t.Fatal(err) + } + + out, err := io.ReadAll(r) + if err != nil { + t.Fatal(err) + } + + err = arrComp(out, should) + if err != nil { + t.Fatal(err) + } +} + +func arrComp(a, b []byte) error { + if len(a) != len(b) { + return fmt.Errorf("arrays differ in length. %d != %d", len(a), len(b)) + } + for i, v := range a { + if v != b[i] { + return fmt.Errorf("arrays differ at index: %d", i) + } + } + return nil +} + +func TestIndirectBlocks(t *testing.T) { + runBothSubtests(t, testIndirectBlocks) +} + +func testIndirectBlocks(t *testing.T, rawLeaves UseRawLeaves) { + splitter := chunker.SizeSplitterGen(512) + nbytes := 1024 * 1024 + buf := make([]byte, nbytes) + u.NewTimeSeededRand().Read(buf) + + read := bytes.NewReader(buf) + + ds := mdtest.Mock() + dag, err := buildTestDag(ds, splitter(read), rawLeaves) + if err != nil { + t.Fatal(err) + } + + reader, err := uio.NewDagReader(context.Background(), dag, ds) + if err != nil { + t.Fatal(err) + } + + out, err := io.ReadAll(reader) + if err != nil { + t.Fatal(err) + } + + if !bytes.Equal(out, buf) { + t.Fatal("Not equal!") + } +} + +func TestSeekingBasic(t *testing.T) { + runBothSubtests(t, testSeekingBasic) +} + +func testSeekingBasic(t *testing.T, rawLeaves UseRawLeaves) { + nbytes := int64(10 * 1024) + should := make([]byte, nbytes) + u.NewTimeSeededRand().Read(should) + + read := bytes.NewReader(should) + ds := mdtest.Mock() + nd, err := buildTestDag(ds, chunker.NewSizeSplitter(read, 512), rawLeaves) + if err != nil { + t.Fatal(err) + } + + rs, err := uio.NewDagReader(context.Background(), nd, ds) + if err != nil { + t.Fatal(err) + } + + start := int64(4000) + n, err := rs.Seek(start, io.SeekStart) + if err != nil { + t.Fatal(err) + } + if n != start { + t.Fatal("Failed to seek to correct offset") + } + + out, err := io.ReadAll(rs) + if err != nil { + t.Fatal(err) + } + + err = arrComp(out, should[start:]) + if err != nil { + t.Fatal(err) + } +} + +func TestSeekToBegin(t *testing.T) { + runBothSubtests(t, testSeekToBegin) +} + +func testSeekToBegin(t *testing.T, rawLeaves UseRawLeaves) { + nbytes := int64(10 * 1024) + should := make([]byte, nbytes) + u.NewTimeSeededRand().Read(should) + + read := bytes.NewReader(should) + ds := mdtest.Mock() + nd, err := buildTestDag(ds, chunker.NewSizeSplitter(read, 500), rawLeaves) + if err != nil { + t.Fatal(err) + } + + rs, err := uio.NewDagReader(context.Background(), nd, ds) + if err != nil { + t.Fatal(err) + } + + n, err := io.CopyN(io.Discard, rs, 1024*4) + if err != nil { + t.Fatal(err) + } + if n != 4096 { + t.Fatal("Copy didnt copy enough bytes") + } + + seeked, err := rs.Seek(0, io.SeekStart) + if err != nil { + t.Fatal(err) + } + if seeked != 0 { + t.Fatal("Failed to seek to beginning") + } + + out, err := io.ReadAll(rs) + if err != nil { + t.Fatal(err) + } + + err = arrComp(out, should) + if err != nil { + t.Fatal(err) + } +} + +func TestSeekToAlmostBegin(t *testing.T) { + runBothSubtests(t, testSeekToAlmostBegin) +} + +func testSeekToAlmostBegin(t *testing.T, rawLeaves UseRawLeaves) { + nbytes := int64(10 * 1024) + should := make([]byte, nbytes) + u.NewTimeSeededRand().Read(should) + + read := bytes.NewReader(should) + ds := mdtest.Mock() + nd, err := buildTestDag(ds, chunker.NewSizeSplitter(read, 500), rawLeaves) + if err != nil { + t.Fatal(err) + } + + rs, err := uio.NewDagReader(context.Background(), nd, ds) + if err != nil { + t.Fatal(err) + } + + n, err := io.CopyN(io.Discard, rs, 1024*4) + if err != nil { + t.Fatal(err) + } + if n != 4096 { + t.Fatal("Copy didnt copy enough bytes") + } + + seeked, err := rs.Seek(1, io.SeekStart) + if err != nil { + t.Fatal(err) + } + if seeked != 1 { + t.Fatal("Failed to seek to almost beginning") + } + + out, err := io.ReadAll(rs) + if err != nil { + t.Fatal(err) + } + + err = arrComp(out, should[1:]) + if err != nil { + t.Fatal(err) + } +} + +func TestSeekEnd(t *testing.T) { + runBothSubtests(t, testSeekEnd) +} + +func testSeekEnd(t *testing.T, rawLeaves UseRawLeaves) { + nbytes := int64(50 * 1024) + should := make([]byte, nbytes) + u.NewTimeSeededRand().Read(should) + + read := bytes.NewReader(should) + ds := mdtest.Mock() + nd, err := buildTestDag(ds, chunker.NewSizeSplitter(read, 500), rawLeaves) + if err != nil { + t.Fatal(err) + } + + rs, err := uio.NewDagReader(context.Background(), nd, ds) + if err != nil { + t.Fatal(err) + } + + seeked, err := rs.Seek(0, io.SeekEnd) + if err != nil { + t.Fatal(err) + } + if seeked != nbytes { + t.Fatal("Failed to seek to end") + } +} + +func TestSeekEndSingleBlockFile(t *testing.T) { + runBothSubtests(t, testSeekEndSingleBlockFile) +} + +func testSeekEndSingleBlockFile(t *testing.T, rawLeaves UseRawLeaves) { + nbytes := int64(100) + should := make([]byte, nbytes) + u.NewTimeSeededRand().Read(should) + + read := bytes.NewReader(should) + ds := mdtest.Mock() + nd, err := buildTestDag(ds, chunker.NewSizeSplitter(read, 5000), rawLeaves) + if err != nil { + t.Fatal(err) + } + + rs, err := uio.NewDagReader(context.Background(), nd, ds) + if err != nil { + t.Fatal(err) + } + + seeked, err := rs.Seek(0, io.SeekEnd) + if err != nil { + t.Fatal(err) + } + if seeked != nbytes { + t.Fatal("Failed to seek to end") + } +} + +func TestSeekingStress(t *testing.T) { + runBothSubtests(t, testSeekingStress) +} + +func testSeekingStress(t *testing.T, rawLeaves UseRawLeaves) { + nbytes := int64(1024 * 1024) + should := make([]byte, nbytes) + u.NewTimeSeededRand().Read(should) + + read := bytes.NewReader(should) + ds := mdtest.Mock() + nd, err := buildTestDag(ds, chunker.NewSizeSplitter(read, 1000), rawLeaves) + if err != nil { + t.Fatal(err) + } + + rs, err := uio.NewDagReader(context.Background(), nd, ds) + if err != nil { + t.Fatal(err) + } + + testbuf := make([]byte, nbytes) + for i := 0; i < 50; i++ { + offset := mrand.Intn(int(nbytes)) + l := int(nbytes) - offset + n, err := rs.Seek(int64(offset), io.SeekStart) + if err != nil { + t.Fatal(err) + } + if n != int64(offset) { + t.Fatal("Seek failed to move to correct position") + } + + nread, err := rs.Read(testbuf[:l]) + if err != nil { + t.Fatal(err) + } + if nread != l { + t.Fatal("Failed to read enough bytes") + } + + err = arrComp(testbuf[:l], should[offset:offset+l]) + if err != nil { + t.Fatal(err) + } + } + +} + +func TestSeekingConsistency(t *testing.T) { + runBothSubtests(t, testSeekingConsistency) +} + +func testSeekingConsistency(t *testing.T, rawLeaves UseRawLeaves) { + nbytes := int64(128 * 1024) + should := make([]byte, nbytes) + u.NewTimeSeededRand().Read(should) + + read := bytes.NewReader(should) + ds := mdtest.Mock() + nd, err := buildTestDag(ds, chunker.NewSizeSplitter(read, 500), rawLeaves) + if err != nil { + t.Fatal(err) + } + + rs, err := uio.NewDagReader(context.Background(), nd, ds) + if err != nil { + t.Fatal(err) + } + + out := make([]byte, nbytes) + + for coff := nbytes - 4096; coff >= 0; coff -= 4096 { + t.Log(coff) + n, err := rs.Seek(coff, io.SeekStart) + if err != nil { + t.Fatal(err) + } + if n != coff { + t.Fatal("wasnt able to seek to the right position") + } + nread, err := rs.Read(out[coff : coff+4096]) + if err != nil { + t.Fatal(err) + } + if nread != 4096 { + t.Fatal("didnt read the correct number of bytes") + } + } + + err = arrComp(out, should) + if err != nil { + t.Fatal(err) + } +} + +func TestAppend(t *testing.T) { + runBothSubtests(t, testAppend) +} + +func testAppend(t *testing.T, rawLeaves UseRawLeaves) { + nbytes := int64(128 * 1024) + should := make([]byte, nbytes) + u.NewTimeSeededRand().Read(should) + + // Reader for half the bytes + read := bytes.NewReader(should[:nbytes/2]) + ds := mdtest.Mock() + nd, err := buildTestDag(ds, chunker.NewSizeSplitter(read, 500), rawLeaves) + if err != nil { + t.Fatal(err) + } + + dbp := &h.DagBuilderParams{ + Dagserv: ds, + Maxlinks: h.DefaultLinksPerBlock, + RawLeaves: bool(rawLeaves), + } + + r := bytes.NewReader(should[nbytes/2:]) + + ctx := context.Background() + + db, err := dbp.New(chunker.NewSizeSplitter(r, 500)) + if err != nil { + t.Fatal(err) + } + + nnode, err := Append(ctx, nd, db) + if err != nil { + t.Fatal(err) + } + + err = VerifyTrickleDagStructure(nnode, VerifyParams{ + Getter: ds, + Direct: dbp.Maxlinks, + LayerRepeat: depthRepeat, + RawLeaves: bool(rawLeaves), + }) + if err != nil { + t.Fatal(err) + } + + fread, err := uio.NewDagReader(ctx, nnode, ds) + if err != nil { + t.Fatal(err) + } + + out, err := io.ReadAll(fread) + if err != nil { + t.Fatal(err) + } + + err = arrComp(out, should) + if err != nil { + t.Fatal(err) + } +} + +// This test appends one byte at a time to an empty file +func TestMultipleAppends(t *testing.T) { + runBothSubtests(t, testMultipleAppends) +} + +func testMultipleAppends(t *testing.T, rawLeaves UseRawLeaves) { + ds := mdtest.Mock() + + // TODO: fix small size appends and make this number bigger + nbytes := int64(1000) + should := make([]byte, nbytes) + u.NewTimeSeededRand().Read(should) + + read := bytes.NewReader(nil) + nd, err := buildTestDag(ds, chunker.NewSizeSplitter(read, 500), rawLeaves) + if err != nil { + t.Fatal(err) + } + + dbp := &h.DagBuilderParams{ + Dagserv: ds, + Maxlinks: 4, + RawLeaves: bool(rawLeaves), + } + + spl := chunker.SizeSplitterGen(500) + + ctx := context.Background() + for i := 0; i < len(should); i++ { + + db, err := dbp.New(spl(bytes.NewReader(should[i : i+1]))) + if err != nil { + t.Fatal(err) + } + + nnode, err := Append(ctx, nd, db) + if err != nil { + t.Fatal(err) + } + + err = VerifyTrickleDagStructure(nnode, VerifyParams{ + Getter: ds, + Direct: dbp.Maxlinks, + LayerRepeat: depthRepeat, + RawLeaves: bool(rawLeaves), + }) + if err != nil { + t.Fatal(err) + } + + fread, err := uio.NewDagReader(ctx, nnode, ds) + if err != nil { + t.Fatal(err) + } + + out, err := io.ReadAll(fread) + if err != nil { + t.Fatal(err) + } + + err = arrComp(out, should[:i+1]) + if err != nil { + t.Fatal(err) + } + } +} + +func TestAppendSingleBytesToEmpty(t *testing.T) { + ds := mdtest.Mock() + + data := []byte("AB") + + nd := new(merkledag.ProtoNode) + nd.SetData(ft.FilePBData(nil, 0)) + + dbp := &h.DagBuilderParams{ + Dagserv: ds, + Maxlinks: 4, + } + + spl := chunker.SizeSplitterGen(500) + + ctx := context.Background() + + db, err := dbp.New(spl(bytes.NewReader(data[:1]))) + if err != nil { + t.Fatal(err) + } + + nnode, err := Append(ctx, nd, db) + if err != nil { + t.Fatal(err) + } + + db, err = dbp.New(spl(bytes.NewReader(data[1:]))) + if err != nil { + t.Fatal(err) + } + + nnode, err = Append(ctx, nnode, db) + if err != nil { + t.Fatal(err) + } + + fread, err := uio.NewDagReader(ctx, nnode, ds) + if err != nil { + t.Fatal(err) + } + + out, err := io.ReadAll(fread) + if err != nil { + t.Fatal(err) + } + + fmt.Println(out, data) + err = arrComp(out, data) + if err != nil { + t.Fatal(err) + } +} diff --git a/unixfs/importer/trickle/trickledag.go b/unixfs/importer/trickle/trickledag.go new file mode 100644 index 0000000000..4dff07eac2 --- /dev/null +++ b/unixfs/importer/trickle/trickledag.go @@ -0,0 +1,389 @@ +// Package trickle allows to build trickle DAGs. +// In this type of DAG, non-leave nodes are first filled +// with data leaves, and then incorporate "layers" of subtrees +// as additional links. +// +// Each layer is a trickle sub-tree and is limited by an increasing +// maximum depth. Thus, the nodes first layer +// can only hold leaves (depth 1) but subsequent layers can grow deeper. +// By default, this module places 4 nodes per layer (that is, 4 subtrees +// of the same maximum depth before increasing it). +// +// Trickle DAGs are very good for sequentially reading data, as the +// first data leaves are directly reachable from the root and those +// coming next are always nearby. They are +// suited for things like streaming applications. +package trickle + +import ( + "context" + "errors" + "fmt" + + ft "github.com/ipfs/boxo/unixfs" + h "github.com/ipfs/boxo/unixfs/importer/helpers" + + cid "github.com/ipfs/go-cid" + ipld "github.com/ipfs/go-ipld-format" + dag "github.com/ipfs/boxo/ipld/merkledag" +) + +// depthRepeat specifies how many times to append a child tree of a +// given depth. Higher values increase the width of a given node, which +// improves seek speeds. +const depthRepeat = 4 + +// Layout builds a new DAG with the trickle format using the provided +// DagBuilderHelper. See the module's description for a more detailed +// explanation. +func Layout(db *h.DagBuilderHelper) (ipld.Node, error) { + newRoot := db.NewFSNodeOverDag(ft.TFile) + root, _, err := fillTrickleRec(db, newRoot, -1) + if err != nil { + return nil, err + } + + return root, db.Add(root) +} + +// fillTrickleRec creates a trickle (sub-)tree with an optional maximum specified depth +// in the case maxDepth is greater than zero, or with unlimited depth otherwise +// (where the DAG builder will signal the end of data to end the function). +func fillTrickleRec(db *h.DagBuilderHelper, node *h.FSNodeOverDag, maxDepth int) (filledNode ipld.Node, nodeFileSize uint64, err error) { + // Always do this, even in the base case + if err := db.FillNodeLayer(node); err != nil { + return nil, 0, err + } + + // For each depth in [1, `maxDepth`) (or without limit if `maxDepth` is -1, + // initial call from `Layout`) add `depthRepeat` sub-graphs of that depth. + for depth := 1; maxDepth == -1 || depth < maxDepth; depth++ { + if db.Done() { + break + // No more data, stop here, posterior append calls will figure out + // where we left off. + } + + for repeatIndex := 0; repeatIndex < depthRepeat && !db.Done(); repeatIndex++ { + + childNode, childFileSize, err := fillTrickleRec(db, db.NewFSNodeOverDag(ft.TFile), depth) + if err != nil { + return nil, 0, err + } + + if err := node.AddChild(childNode, childFileSize, db); err != nil { + return nil, 0, err + } + } + } + + // Get the final `dag.ProtoNode` with the `FSNode` data encoded inside. + filledNode, err = node.Commit() + if err != nil { + return nil, 0, err + } + + return filledNode, node.FileSize(), nil +} + +// Append appends the data in `db` to the dag, using the Trickledag format +func Append(ctx context.Context, basen ipld.Node, db *h.DagBuilderHelper) (out ipld.Node, errOut error) { + base, ok := basen.(*dag.ProtoNode) + if !ok { + return nil, dag.ErrNotProtobuf + } + + // Convert to unixfs node for working with easily + + fsn, err := h.NewFSNFromDag(base) + if err != nil { + return nil, err + } + + // Get depth of this 'tree' + depth, repeatNumber := trickleDepthInfo(fsn, db.Maxlinks()) + if depth == 0 { + // If direct blocks not filled... + if err := db.FillNodeLayer(fsn); err != nil { + return nil, err + } + + if db.Done() { + // TODO: If `FillNodeLayer` stop `Commit`ing this should be + // the place (besides the function end) to call it. + return fsn.GetDagNode() + } + + // If continuing, our depth has increased by one + depth++ + } + + // Last child in this node may not be a full tree, lets fill it up. + if err := appendFillLastChild(ctx, fsn, depth-1, repeatNumber, db); err != nil { + return nil, err + } + + // after appendFillLastChild, our depth is now increased by one + if !db.Done() { + depth++ + } + + // Now, continue filling out tree like normal + for i := depth; !db.Done(); i++ { + for j := 0; j < depthRepeat && !db.Done(); j++ { + nextChild := db.NewFSNodeOverDag(ft.TFile) + childNode, childFileSize, err := fillTrickleRec(db, nextChild, i) + if err != nil { + return nil, err + } + err = fsn.AddChild(childNode, childFileSize, db) + if err != nil { + return nil, err + } + } + } + _, err = fsn.Commit() + if err != nil { + return nil, err + } + return fsn.GetDagNode() +} + +func appendFillLastChild(ctx context.Context, fsn *h.FSNodeOverDag, depth int, repeatNumber int, db *h.DagBuilderHelper) error { + if fsn.NumChildren() <= db.Maxlinks() { + return nil + } + // TODO: Why do we need this check, didn't the caller already take + // care of this? + + // Recursive step, grab last child + last := fsn.NumChildren() - 1 + lastChild, err := fsn.GetChild(ctx, last, db.GetDagServ()) + if err != nil { + return err + } + + // Fill out last child (may not be full tree) + newChild, nchildSize, err := appendRec(ctx, lastChild, db, depth-1) + if err != nil { + return err + } + + // Update changed child in parent node + fsn.RemoveChild(last, db) + filledNode, err := newChild.Commit() + if err != nil { + return err + } + err = fsn.AddChild(filledNode, nchildSize, db) + if err != nil { + return err + } + + // Partially filled depth layer + if repeatNumber != 0 { + for ; repeatNumber < depthRepeat && !db.Done(); repeatNumber++ { + nextChild := db.NewFSNodeOverDag(ft.TFile) + childNode, childFileSize, err := fillTrickleRec(db, nextChild, depth) + if err != nil { + return err + } + + if err := fsn.AddChild(childNode, childFileSize, db); err != nil { + return err + } + } + } + + return nil +} + +// recursive call for Append +func appendRec(ctx context.Context, fsn *h.FSNodeOverDag, db *h.DagBuilderHelper, maxDepth int) (*h.FSNodeOverDag, uint64, error) { + if maxDepth == 0 || db.Done() { + return fsn, fsn.FileSize(), nil + } + + // Get depth of this 'tree' + depth, repeatNumber := trickleDepthInfo(fsn, db.Maxlinks()) + if depth == 0 { + // If direct blocks not filled... + if err := db.FillNodeLayer(fsn); err != nil { + return nil, 0, err + } + depth++ + } + // TODO: Same as `appendFillLastChild`, when is this case possible? + + // If at correct depth, no need to continue + if depth == maxDepth { + return fsn, fsn.FileSize(), nil + } + + if err := appendFillLastChild(ctx, fsn, depth, repeatNumber, db); err != nil { + return nil, 0, err + } + + // after appendFillLastChild, our depth is now increased by one + if !db.Done() { + depth++ + } + + // Now, continue filling out tree like normal + for i := depth; i < maxDepth && !db.Done(); i++ { + for j := 0; j < depthRepeat && !db.Done(); j++ { + nextChild := db.NewFSNodeOverDag(ft.TFile) + childNode, childFileSize, err := fillTrickleRec(db, nextChild, i) + if err != nil { + return nil, 0, err + } + + if err := fsn.AddChild(childNode, childFileSize, db); err != nil { + return nil, 0, err + } + } + } + + return fsn, fsn.FileSize(), nil +} + +// Deduce where we left off in `fillTrickleRec`, returns the `depth` +// with which new sub-graphs were being added and, within that depth, +// in which `repeatNumber` of the total `depthRepeat` we should add. +func trickleDepthInfo(node *h.FSNodeOverDag, maxlinks int) (depth int, repeatNumber int) { + n := node.NumChildren() + + if n < maxlinks { + // We didn't even added the initial `maxlinks` leaf nodes (`FillNodeLayer`). + return 0, 0 + } + + nonLeafChildren := n - maxlinks + // The number of non-leaf child nodes added in `fillTrickleRec` (after + // the `FillNodeLayer` call). + + depth = nonLeafChildren/depthRepeat + 1 + // "Deduplicate" the added `depthRepeat` sub-graphs at each depth + // (rounding it up since we may be on an unfinished depth with less + // than `depthRepeat` sub-graphs). + + repeatNumber = nonLeafChildren % depthRepeat + // What's left after taking full depths of `depthRepeat` sub-graphs + // is the current `repeatNumber` we're at (this fractional part is + // what we rounded up before). + + return +} + +// VerifyParams is used by VerifyTrickleDagStructure +type VerifyParams struct { + Getter ipld.NodeGetter + Direct int + LayerRepeat int + Prefix *cid.Prefix + RawLeaves bool +} + +// VerifyTrickleDagStructure checks that the given dag matches exactly the trickle dag datastructure +// layout +func VerifyTrickleDagStructure(nd ipld.Node, p VerifyParams) error { + return verifyTDagRec(nd, -1, p) +} + +// Recursive call for verifying the structure of a trickledag +func verifyTDagRec(n ipld.Node, depth int, p VerifyParams) error { + codec := cid.DagProtobuf + if depth == 0 { + if len(n.Links()) > 0 { + return errors.New("expected direct block") + } + // zero depth dag is raw data block + switch nd := n.(type) { + case *dag.ProtoNode: + fsn, err := ft.FSNodeFromBytes(nd.Data()) + if err != nil { + return err + } + + if fsn.Type() != ft.TRaw { + return errors.New("expected raw block") + } + + if p.RawLeaves { + return errors.New("expected raw leaf, got a protobuf node") + } + case *dag.RawNode: + if !p.RawLeaves { + return errors.New("expected protobuf node as leaf") + } + codec = cid.Raw + default: + return errors.New("expected ProtoNode or RawNode") + } + } + + // verify prefix + if p.Prefix != nil { + prefix := n.Cid().Prefix() + expect := *p.Prefix // make a copy + expect.Codec = uint64(codec) + if codec == cid.Raw && expect.Version == 0 { + expect.Version = 1 + } + if expect.MhLength == -1 { + expect.MhLength = prefix.MhLength + } + if prefix != expect { + return fmt.Errorf("unexpected cid prefix: expected: %v; got %v", expect, prefix) + } + } + + if depth == 0 { + return nil + } + + nd, ok := n.(*dag.ProtoNode) + if !ok { + return errors.New("expected ProtoNode") + } + + // Verify this is a branch node + fsn, err := ft.FSNodeFromBytes(nd.Data()) + if err != nil { + return err + } + + if fsn.Type() != ft.TFile { + return fmt.Errorf("expected file as branch node, got: %s", fsn.Type()) + } + + if len(fsn.Data()) > 0 { + return errors.New("branch node should not have data") + } + + for i := 0; i < len(nd.Links()); i++ { + child, err := nd.Links()[i].GetNode(context.TODO(), p.Getter) + if err != nil { + return err + } + + if i < p.Direct { + // Direct blocks + err := verifyTDagRec(child, 0, p) + if err != nil { + return err + } + } else { + // Recursive trickle dags + rdepth := ((i - p.Direct) / p.LayerRepeat) + 1 + if rdepth >= depth && depth > 0 { + return errors.New("child dag was too deep") + } + err := verifyTDagRec(child, rdepth, p) + if err != nil { + return err + } + } + } + return nil +} diff --git a/unixfs/internal/config.go b/unixfs/internal/config.go new file mode 100644 index 0000000000..9250ae2ae3 --- /dev/null +++ b/unixfs/internal/config.go @@ -0,0 +1,3 @@ +package internal + +var HAMTHashFunction func(val []byte) []byte diff --git a/unixfs/io/completehamt_test.go b/unixfs/io/completehamt_test.go new file mode 100644 index 0000000000..231765de10 --- /dev/null +++ b/unixfs/io/completehamt_test.go @@ -0,0 +1,101 @@ +package io + +import ( + "context" + "encoding/binary" + "fmt" + "math" + "testing" + + "github.com/ipfs/boxo/unixfs/internal" + + mdtest "github.com/ipfs/boxo/ipld/merkledag/test" + "github.com/stretchr/testify/assert" + + "github.com/ipfs/boxo/unixfs" + "github.com/ipfs/boxo/unixfs/hamt" + + ipld "github.com/ipfs/go-ipld-format" +) + +// CreateCompleteHAMT creates a HAMT the following properties: +// - its height (distance/edges from root to deepest node) is specified by treeHeight. +// - all leaf Shard nodes have the same depth (and have only 'value' links). +// - all internal Shard nodes point only to other Shards (and hence have zero 'value' links). +// - the total number of 'value' links (directory entries) is: +// childsPerNode ^ (treeHeight). +// treeHeight: The number of layers of non-value HAMT nodes (e.g. height = 1 is a single shard pointing to some values) +// +// FIXME: HAMTHashFunction needs to be set to idHash by the caller. We depend on +// +// this simplification for the current logic to work. +func CreateCompleteHAMT(ds ipld.DAGService, treeHeight int, childsPerNode int) (ipld.Node, error) { + if treeHeight < 1 { + panic("treeHeight < 1") + } + if treeHeight > 8 { + panic("treeHeight > 8: we don't allow a key larger than what can be encoded in a 64-bit word") + } + + rootShard, err := hamt.NewShard(ds, childsPerNode) + if err != nil { + return nil, err + } + + // Assuming we are using the ID hash function we can just insert all + // the combinations of a byte slice that will reach the desired height. + totalChildren := int(math.Pow(float64(childsPerNode), float64(treeHeight))) + log2ofChilds, err := hamt.Logtwo(childsPerNode) + if err != nil { + return nil, err + } + if log2ofChilds*treeHeight%8 != 0 { + return nil, fmt.Errorf("childsPerNode * treeHeight should be multiple of 8") + } + bytesInKey := log2ofChilds * treeHeight / 8 + for i := 0; i < totalChildren; i++ { + var hashbuf [8]byte + binary.LittleEndian.PutUint64(hashbuf[:], uint64(i)) + var oldLink *ipld.Link + oldLink, err = rootShard.Swap(context.Background(), string(hashbuf[:bytesInKey]), unixfs.EmptyFileNode()) + if err != nil { + return nil, err + } + if oldLink != nil { + // We shouldn't be overwriting any value, otherwise the tree + // won't be complete. + return nil, fmt.Errorf("we have overwritten entry %s", + oldLink.Cid) + } + } + + return rootShard.Node() +} + +// Return the same value as the hash. +func idHash(val []byte) []byte { + return val +} + +// FIXME: This is not checking the exact height of the tree but just making +// +// sure there are as many children as we would have with a complete HAMT. +func TestCreateCompleteShard(t *testing.T) { + oldHashFunc := internal.HAMTHashFunction + defer func() { internal.HAMTHashFunction = oldHashFunc }() + internal.HAMTHashFunction = idHash + + ds := mdtest.Mock() + childsPerNode := 16 + treeHeight := 2 + node, err := CreateCompleteHAMT(ds, treeHeight, childsPerNode) + assert.NoError(t, err) + + shard, err := hamt.NewHamtFromDag(ds, node) + assert.NoError(t, err) + links, err := shard.EnumLinks(context.Background()) + assert.NoError(t, err) + + childNodes := int(math.Pow(float64(childsPerNode), float64(treeHeight))) + assert.Equal(t, childNodes, len(links)) +} diff --git a/unixfs/io/dagreader.go b/unixfs/io/dagreader.go new file mode 100644 index 0000000000..d0500d31bf --- /dev/null +++ b/unixfs/io/dagreader.go @@ -0,0 +1,488 @@ +package io + +import ( + "bytes" + "context" + "errors" + "io" + + unixfs "github.com/ipfs/boxo/unixfs" + ipld "github.com/ipfs/go-ipld-format" + mdag "github.com/ipfs/boxo/ipld/merkledag" +) + +// Common errors +var ( + ErrIsDir = errors.New("this dag node is a directory") + ErrCantReadSymlinks = errors.New("cannot currently read symlinks") + ErrUnkownNodeType = errors.New("unknown node type") + ErrSeekNotSupported = errors.New("file does not support seeking") +) + +// TODO: Rename the `DagReader` interface, this doesn't read *any* DAG, just +// DAGs with UnixFS node (and it *belongs* to the `unixfs` package). Some +// alternatives: `FileReader`, `UnixFSFileReader`, `UnixFSReader`. + +// A DagReader provides read-only read and seek acess to a unixfs file. +// Different implementations of readers are used for the different +// types of unixfs/protobuf-encoded nodes. +type DagReader interface { + ReadSeekCloser + Size() uint64 + CtxReadFull(context.Context, []byte) (int, error) +} + +// A ReadSeekCloser implements interfaces to read, copy, seek and close. +type ReadSeekCloser interface { + io.Reader + io.Seeker + io.Closer + io.WriterTo +} + +// NewDagReader creates a new reader object that reads the data represented by +// the given node, using the passed in DAGService for data retrieval. +func NewDagReader(ctx context.Context, n ipld.Node, serv ipld.NodeGetter) (DagReader, error) { + var size uint64 + + switch n := n.(type) { + case *mdag.RawNode: + size = uint64(len(n.RawData())) + + case *mdag.ProtoNode: + fsNode, err := unixfs.FSNodeFromBytes(n.Data()) + if err != nil { + return nil, err + } + + switch fsNode.Type() { + case unixfs.TFile, unixfs.TRaw: + size = fsNode.FileSize() + + case unixfs.TDirectory, unixfs.THAMTShard: + // Dont allow reading directories + return nil, ErrIsDir + + case unixfs.TMetadata: + if len(n.Links()) == 0 { + return nil, errors.New("incorrectly formatted metadata object") + } + child, err := n.Links()[0].GetNode(ctx, serv) + if err != nil { + return nil, err + } + + childpb, ok := child.(*mdag.ProtoNode) + if !ok { + return nil, mdag.ErrNotProtobuf + } + return NewDagReader(ctx, childpb, serv) + case unixfs.TSymlink: + return nil, ErrCantReadSymlinks + default: + return nil, unixfs.ErrUnrecognizedType + } + default: + return nil, ErrUnkownNodeType + } + + ctxWithCancel, cancel := context.WithCancel(ctx) + + return &dagReader{ + ctx: ctxWithCancel, + cancel: cancel, + serv: serv, + size: size, + rootNode: n, + dagWalker: ipld.NewWalker(ctxWithCancel, ipld.NewNavigableIPLDNode(n, serv)), + }, nil +} + +// dagReader provides a way to easily read the data contained in a dag. +type dagReader struct { + + // Structure to perform the DAG iteration and search, the reader + // just needs to add logic to the `Visitor` callback passed to + // `Iterate` and `Seek`. + dagWalker *ipld.Walker + + // Buffer with the data extracted from the current node being visited. + // To avoid revisiting a node to complete a (potential) partial read + // (or read after seek) the node's data is fully extracted in a single + // `readNodeDataBuffer` operation. + currentNodeData *bytes.Reader + + // Implements the `Size()` API. + size uint64 + + // Current offset for the read head within the DAG file. + offset int64 + + // Root node of the DAG, stored to re-create the `dagWalker` (effectively + // re-setting the position of the reader, used during `Seek`). + rootNode ipld.Node + + // Context passed to the `dagWalker`, the `cancel` function is used to + // cancel read operations (cancelling requested child node promises, + // see `ipld.NavigableIPLDNode.FetchChild` for details). + ctx context.Context + cancel func() + + // Passed to the `dagWalker` that will use it to request nodes. + // TODO: Revisit name. + serv ipld.NodeGetter +} + +// Size returns the total size of the data from the DAG structured file. +func (dr *dagReader) Size() uint64 { + return dr.size +} + +// Read implements the `io.Reader` interface through the `CtxReadFull` +// method using the DAG reader's internal context. +func (dr *dagReader) Read(b []byte) (int, error) { + return dr.CtxReadFull(dr.ctx, b) +} + +// CtxReadFull reads data from the DAG structured file. It always +// attempts a full read of the DAG until the `out` buffer is full. +// It uses the `Walker` structure to iterate the file DAG and read +// every node's data into the `out` buffer. +func (dr *dagReader) CtxReadFull(ctx context.Context, out []byte) (n int, err error) { + // Set the `dagWalker`'s context to the `ctx` argument, it will be used + // to fetch the child node promises (see + // `ipld.NavigableIPLDNode.FetchChild` for details). + dr.dagWalker.SetContext(ctx) + + // If there was a partially read buffer from the last visited + // node read it before visiting a new one. + if dr.currentNodeData != nil { + // TODO: Move this check inside `readNodeDataBuffer`? + n = dr.readNodeDataBuffer(out) + + if n == len(out) { + return n, nil + // Output buffer full, no need to traverse the DAG. + } + } + + // Iterate the DAG calling the passed `Visitor` function on every node + // to read its data into the `out` buffer, stop if there is an error or + // if the entire DAG is traversed (`EndOfDag`). + err = dr.dagWalker.Iterate(func(visitedNode ipld.NavigableNode) error { + node := ipld.ExtractIPLDNode(visitedNode) + + // Skip internal nodes, they shouldn't have any file data + // (see the `balanced` package for more details). + if len(node.Links()) > 0 { + return nil + } + + err = dr.saveNodeData(node) + if err != nil { + return err + } + // Save the leaf node file data in a buffer in case it is only + // partially read now and future `CtxReadFull` calls reclaim the + // rest (as each node is visited only once during `Iterate`). + // + // TODO: We could check if the entire node's data can fit in the + // remaining `out` buffer free space to skip this intermediary step. + + n += dr.readNodeDataBuffer(out[n:]) + + if n == len(out) { + // Output buffer full, no need to keep traversing the DAG, + // signal the `Walker` to pause the iteration. + dr.dagWalker.Pause() + } + + return nil + }) + + if err == ipld.EndOfDag { + return n, io.EOF + // Reached the end of the (DAG) file, no more data to read. + } else if err != nil { + return n, err + // Pass along any other errors from the `Visitor`. + } + + return n, nil +} + +// Save the UnixFS `node`'s data into the internal `currentNodeData` buffer to +// later move it to the output buffer (`Read`) or seek into it (`Seek`). +func (dr *dagReader) saveNodeData(node ipld.Node) error { + extractedNodeData, err := unixfs.ReadUnixFSNodeData(node) + if err != nil { + return err + } + + dr.currentNodeData = bytes.NewReader(extractedNodeData) + return nil +} + +// Read the `currentNodeData` buffer into `out`. This function can't have +// any errors as it's always reading from a `bytes.Reader` and asking only +// the available data in it. +func (dr *dagReader) readNodeDataBuffer(out []byte) int { + + n, _ := dr.currentNodeData.Read(out) + // Ignore the error as the EOF may not be returned in the first + // `Read` call, explicitly ask for an empty buffer below to check + // if we've reached the end. + + if dr.currentNodeData.Len() == 0 { + dr.currentNodeData = nil + // Signal that the buffer was consumed (for later `Read` calls). + // This shouldn't return an EOF error as it's just the end of a + // single node's data, not the entire DAG. + } + + dr.offset += int64(n) + // TODO: Should `offset` be incremented here or in the calling function? + // (Doing it here saves LoC but may be confusing as it's more hidden). + + return n +} + +// Similar to `readNodeDataBuffer` but it writes the contents to +// an `io.Writer` argument. +// +// TODO: Check what part of the logic between the two functions +// can be extracted away. +func (dr *dagReader) writeNodeDataBuffer(w io.Writer) (int64, error) { + + n, err := dr.currentNodeData.WriteTo(w) + if err != nil { + return n, err + } + + if dr.currentNodeData.Len() == 0 { + dr.currentNodeData = nil + // Signal that the buffer was consumed (for later `Read` calls). + // This shouldn't return an EOF error as it's just the end of a + // single node's data, not the entire DAG. + } + + dr.offset += int64(n) + return n, nil +} + +// WriteTo writes to the given writer. +// This follows the `bytes.Reader.WriteTo` implementation +// where it starts from the internal index that may have +// been modified by other `Read` calls. +// +// TODO: This implementation is very similar to `CtxReadFull`, +// the common parts should be abstracted away. +func (dr *dagReader) WriteTo(w io.Writer) (n int64, err error) { + // Use the internal reader's context to fetch the child node promises + // (see `ipld.NavigableIPLDNode.FetchChild` for details). + dr.dagWalker.SetContext(dr.ctx) + + // If there was a partially read buffer from the last visited + // node read it before visiting a new one. + if dr.currentNodeData != nil { + n, err = dr.writeNodeDataBuffer(w) + if err != nil { + return n, err + } + } + + // Iterate the DAG calling the passed `Visitor` function on every node + // to read its data into the `out` buffer, stop if there is an error or + // if the entire DAG is traversed (`EndOfDag`). + err = dr.dagWalker.Iterate(func(visitedNode ipld.NavigableNode) error { + node := ipld.ExtractIPLDNode(visitedNode) + + // Skip internal nodes, they shouldn't have any file data + // (see the `balanced` package for more details). + if len(node.Links()) > 0 { + return nil + } + + err = dr.saveNodeData(node) + if err != nil { + return err + } + // Save the leaf node file data in a buffer in case it is only + // partially read now and future `CtxReadFull` calls reclaim the + // rest (as each node is visited only once during `Iterate`). + + written, err := dr.writeNodeDataBuffer(w) + n += written + if err != nil { + return err + } + + return nil + }) + + if err == ipld.EndOfDag { + return n, nil + } + + return n, err +} + +// Close the reader (cancelling fetch node operations requested with +// the internal context, that is, `Read` calls but not `CtxReadFull` +// with user-supplied contexts). +func (dr *dagReader) Close() error { + dr.cancel() + return nil +} + +// Seek implements `io.Seeker` seeking to a given offset in the DAG file, +// it matches the standard unix `seek`. It moves the position of the internal +// `dagWalker` and may also leave a `currentNodeData` buffer loaded in case +// the seek is performed to the middle of the data in a node. +// +// TODO: Support seeking from the current position (relative seek) +// through the `dagWalker` in `io.SeekCurrent`. +func (dr *dagReader) Seek(offset int64, whence int) (int64, error) { + switch whence { + case io.SeekStart: + if offset < 0 { + return dr.offset, errors.New("invalid offset") + } + + if offset == dr.offset { + return offset, nil + // Already at the requested `offset`, nothing to do. + } + + left := offset + // Amount left to seek. + + // Seek from the beginning of the DAG. + dr.resetPosition() + + // Shortcut seeking to the beginning, we're already there. + if offset == 0 { + return 0, nil + } + + // Use the internal reader's context to fetch the child node promises + // (see `ipld.NavigableIPLDNode.FetchChild` for details). + dr.dagWalker.SetContext(dr.ctx) + // TODO: Performance: we could adjust here `preloadSize` of + // `ipld.NavigableIPLDNode` also, when seeking we only want + // to fetch one child at a time. + + // Seek the DAG by calling the provided `Visitor` function on every + // node the `dagWalker` descends to while searching which can be + // either an internal or leaf node. In the internal node case, check + // the child node sizes and set the corresponding child index to go + // down to next. In the leaf case (last visit of the search), if there + // is still an amount `left` to seek do it inside the node's data + // saved in the `currentNodeData` buffer, leaving it ready for a `Read` + // call. + err := dr.dagWalker.Seek(func(visitedNode ipld.NavigableNode) error { + node := ipld.ExtractIPLDNode(visitedNode) + + if len(node.Links()) > 0 { + // Internal node, should be a `mdag.ProtoNode` containing a + // `unixfs.FSNode` (see the `balanced` package for more details). + fsNode, err := unixfs.ExtractFSNode(node) + if err != nil { + return err + } + + // If there aren't enough size hints don't seek + // (see the `io.EOF` handling error comment below). + if fsNode.NumChildren() != len(node.Links()) { + return ErrSeekNotSupported + } + + // Internal nodes have no data, so just iterate through the + // sizes of its children (advancing the child index of the + // `dagWalker`) to find where we need to go down to next in + // the search. + for { + childSize := fsNode.BlockSize(int(dr.dagWalker.ActiveChildIndex())) + + if childSize > uint64(left) { + // This child's data contains the position requested + // in `offset`, go down this child. + return nil + } + + // Else, skip this child. + left -= int64(childSize) + err := dr.dagWalker.NextChild() + if err == ipld.ErrNextNoChild { + // No more child nodes available, nothing to do, + // the `Seek` will stop on its own. + return nil + } else if err != nil { + return err + // Pass along any other errors (that may in future + // implementations be returned by `Next`) to stop + // the search. + } + } + + } else { + // Leaf node, seek inside its data. + err := dr.saveNodeData(node) + if err != nil { + return err + } + + _, err = dr.currentNodeData.Seek(left, io.SeekStart) + if err != nil { + return err + } + // The corner case of a DAG consisting only of a single (leaf) + // node should make no difference here. In that case, where the + // node doesn't have a parent UnixFS node with size hints, this + // implementation would allow this `Seek` to be called with an + // argument larger than the buffer size which normally wouldn't + // happen (because we would skip the node based on the size + // hint) but that would just mean that a future `CtxReadFull` + // call would read no data from the `currentNodeData` buffer. + // TODO: Re-check this reasoning. + + return nil + // In the leaf node case the search will stop here. + } + }) + + if err != nil { + return 0, err + } + + dr.offset = offset + return dr.offset, nil + + case io.SeekCurrent: + if offset == 0 { + return dr.offset, nil + } + + return dr.Seek(dr.offset+offset, io.SeekStart) + // TODO: Performance. This can be improved supporting relative + // searches in the `Walker` (see `Walker.Seek`). + + case io.SeekEnd: + return dr.Seek(int64(dr.Size())+offset, io.SeekStart) + + default: + return 0, errors.New("invalid whence") + } +} + +// Reset the reader position by resetting the `dagWalker` and discarding +// any partially used node's data in the `currentNodeData` buffer, used +// in the `SeekStart` case. +func (dr *dagReader) resetPosition() { + dr.currentNodeData = nil + dr.offset = 0 + + dr.dagWalker = ipld.NewWalker(dr.ctx, ipld.NewNavigableIPLDNode(dr.rootNode, dr.serv)) + // TODO: This could be avoided (along with storing the `dr.rootNode` and + // `dr.serv` just for this call) if `Reset` is supported in the `Walker`. +} diff --git a/unixfs/io/dagreader_test.go b/unixfs/io/dagreader_test.go new file mode 100644 index 0000000000..aacf74fe1a --- /dev/null +++ b/unixfs/io/dagreader_test.go @@ -0,0 +1,326 @@ +package io + +import ( + "bytes" + "io" + "strings" + "testing" + + "github.com/ipfs/boxo/unixfs" + mdag "github.com/ipfs/boxo/ipld/merkledag" + + context "context" + + testu "github.com/ipfs/boxo/unixfs/test" +) + +func TestBasicRead(t *testing.T) { + dserv := testu.GetDAGServ() + inbuf, node := testu.GetRandomNode(t, dserv, 1024, testu.UseProtoBufLeaves) + ctx, closer := context.WithCancel(context.Background()) + defer closer() + + reader, err := NewDagReader(ctx, node, dserv) + if err != nil { + t.Fatal(err) + } + + outbuf, err := io.ReadAll(reader) + if err != nil { + t.Fatal(err) + } + + err = testu.ArrComp(inbuf, outbuf) + if err != nil { + t.Fatal(err) + } +} + +func TestSeekAndRead(t *testing.T) { + dserv := testu.GetDAGServ() + inbuf := make([]byte, 256) + for i := 0; i <= 255; i++ { + inbuf[i] = byte(i) + } + + node := testu.GetNode(t, dserv, inbuf, testu.UseProtoBufLeaves) + ctx, closer := context.WithCancel(context.Background()) + defer closer() + + reader, err := NewDagReader(ctx, node, dserv) + if err != nil { + t.Fatal(err) + } + + for i := 255; i >= 0; i-- { + reader.Seek(int64(i), io.SeekStart) + + if getOffset(reader) != int64(i) { + t.Fatal("expected offset to be increased by one after read") + } + + out := readByte(t, reader) + + if int(out) != i { + t.Fatalf("read %d at index %d, expected %d", out, i, i) + } + + if getOffset(reader) != int64(i+1) { + t.Fatal("expected offset to be increased by one after read") + } + } +} + +func TestSeekWithoutBlocksizes(t *testing.T) { + dserv := testu.GetDAGServ() + ctx, closer := context.WithCancel(context.Background()) + defer closer() + + inbuf := make([]byte, 1024) + + for i := 0; i < 256; i++ { + inbuf[i*4] = byte(i) + } + + inbuf[1023] = 1 // force the reader to be 1024 bytes + node := testu.GetNode(t, dserv, inbuf, testu.UseProtoBufLeaves) + + // remove the blocksizes + pbnode := node.Copy().(*mdag.ProtoNode) + fsnode, err := unixfs.FSNodeFromBytes(pbnode.Data()) + if err != nil { + t.Fatal(err) + } + fsnode.RemoveAllBlockSizes() + newData, err := fsnode.GetBytes() + if err != nil { + t.Fatal(err) + } + pbnode.SetData(newData) + err = dserv.Add(ctx, pbnode) + if err != nil { + t.Fatal(err) + } + node = pbnode + + reader, err := NewDagReader(ctx, node, dserv) + if err != nil { + t.Fatal(err) + } + + _, err = reader.Seek(-4, io.SeekEnd) + if err == nil { + t.Fatal("seeking shouldn't work without blocksizes") + } + + _, err = reader.Seek(4, io.SeekStart) + if err == nil { + t.Fatal("seeking shouldn't work without blocksizes") + } + + _, err = reader.Seek(4, io.SeekCurrent) + if err == nil { + t.Fatal("seeking shouldn't work without blocksizes") + } + + // Seeking to the current position or the end should still work. + + _, err = reader.Seek(0, io.SeekCurrent) + if err != nil { + t.Fatal(err) + } + + _, err = reader.Seek(0, io.SeekStart) + if err != nil { + t.Fatal(err) + } +} + +func TestRelativeSeek(t *testing.T) { + dserv := testu.GetDAGServ() + ctx, closer := context.WithCancel(context.Background()) + defer closer() + + inbuf := make([]byte, 1024) + + for i := 0; i < 256; i++ { + inbuf[i*4] = byte(i) + } + + inbuf[1023] = 1 // force the reader to be 1024 bytes + node := testu.GetNode(t, dserv, inbuf, testu.UseProtoBufLeaves) + + reader, err := NewDagReader(ctx, node, dserv) + if err != nil { + t.Fatal(err) + } + + for i := 0; i < 256; i++ { + if getOffset(reader) != int64(i*4) { + t.Fatalf("offset should be %d, was %d", i*4, getOffset(reader)) + } + out := readByte(t, reader) + if int(out) != i { + t.Fatalf("expected to read: %d at %d, read %d", i, getOffset(reader)-1, out) + } + if i != 255 { + _, err := reader.Seek(3, io.SeekCurrent) + if err != nil { + t.Fatal(err) + } + } + } + + _, err = reader.Seek(-4, io.SeekEnd) + if err != nil { + t.Fatal(err) + } + + for i := 0; i < 256; i++ { + if getOffset(reader) != int64(1020-i*4) { + t.Fatalf("offset should be %d, was %d", 1020-i*4, getOffset(reader)) + } + out := readByte(t, reader) + if int(out) != 255-i { + t.Fatalf("expected to read: %d at %d, read %d", 255-i, getOffset(reader)-1, out) + } + reader.Seek(-5, io.SeekCurrent) // seek 4 bytes but we read one byte every time so 5 bytes + } + +} + +func TestTypeFailures(t *testing.T) { + dserv := testu.GetDAGServ() + ctx, closer := context.WithCancel(context.Background()) + defer closer() + + node := unixfs.EmptyDirNode() + if _, err := NewDagReader(ctx, node, dserv); err != ErrIsDir { + t.Fatalf("excepted to get %v, got %v", ErrIsDir, err) + } + + data, err := unixfs.SymlinkData("/somelink") + if err != nil { + t.Fatal(err) + } + node = mdag.NodeWithData(data) + + if _, err := NewDagReader(ctx, node, dserv); err != ErrCantReadSymlinks { + t.Fatalf("excepted to get %v, got %v", ErrCantReadSymlinks, err) + } +} + +func TestBadPBData(t *testing.T) { + dserv := testu.GetDAGServ() + ctx, closer := context.WithCancel(context.Background()) + defer closer() + + node := mdag.NodeWithData([]byte{42}) + _, err := NewDagReader(ctx, node, dserv) + if err == nil { + t.Fatal("excepted error, got nil") + } +} + +func TestMetadataNode(t *testing.T) { + ctx, closer := context.WithCancel(context.Background()) + defer closer() + + dserv := testu.GetDAGServ() + rdata, rnode := testu.GetRandomNode(t, dserv, 512, testu.UseProtoBufLeaves) + err := dserv.Add(ctx, rnode) + if err != nil { + t.Fatal(err) + } + + data, err := unixfs.BytesForMetadata(&unixfs.Metadata{ + MimeType: "text", + Size: 125, + }) + if err != nil { + t.Fatal(err) + } + node := mdag.NodeWithData(data) + + _, err = NewDagReader(ctx, node, dserv) + if err == nil { + t.Fatal("expected an error") + } + if !strings.Contains(err.Error(), "incorrectly formatted") { + t.Fatal("expected different error") + } + + node.AddNodeLink("", rnode) + + reader, err := NewDagReader(ctx, node, dserv) + if err != nil { + t.Fatal(err) + } + readdata, err := io.ReadAll(reader) + if err != nil { + t.Fatal(err) + } + if err := testu.ArrComp(rdata, readdata); err != nil { + t.Fatal(err) + } +} + +func TestWriteTo(t *testing.T) { + dserv := testu.GetDAGServ() + inbuf, node := testu.GetRandomNode(t, dserv, 1024, testu.UseProtoBufLeaves) + ctx, closer := context.WithCancel(context.Background()) + defer closer() + + reader, err := NewDagReader(ctx, node, dserv) + if err != nil { + t.Fatal(err) + } + + outbuf := new(bytes.Buffer) + reader.WriteTo(outbuf) + + err = testu.ArrComp(inbuf, outbuf.Bytes()) + if err != nil { + t.Fatal(err) + } + +} + +func TestReaderSzie(t *testing.T) { + dserv := testu.GetDAGServ() + size := int64(1024) + _, node := testu.GetRandomNode(t, dserv, size, testu.UseProtoBufLeaves) + ctx, closer := context.WithCancel(context.Background()) + defer closer() + + reader, err := NewDagReader(ctx, node, dserv) + if err != nil { + t.Fatal(err) + } + + if reader.Size() != uint64(size) { + t.Fatal("wrong reader size") + } +} + +func readByte(t testing.TB, reader DagReader) byte { + out := make([]byte, 1) + c, err := reader.Read(out) + + if c != 1 { + t.Fatal("reader should have read just one byte") + } + if err != nil { + t.Fatal(err) + } + + return out[0] +} + +func getOffset(reader DagReader) int64 { + offset, err := reader.Seek(0, io.SeekCurrent) + if err != nil { + panic("failed to retrieve offset: " + err.Error()) + } + return offset +} diff --git a/unixfs/io/directory.go b/unixfs/io/directory.go new file mode 100644 index 0000000000..0679559a05 --- /dev/null +++ b/unixfs/io/directory.go @@ -0,0 +1,610 @@ +package io + +import ( + "context" + "fmt" + "os" + + "github.com/ipfs/boxo/unixfs/hamt" + "github.com/ipfs/boxo/unixfs/private/linksize" + + "github.com/alecthomas/units" + format "github.com/ipfs/boxo/unixfs" + "github.com/ipfs/go-cid" + ipld "github.com/ipfs/go-ipld-format" + mdag "github.com/ipfs/boxo/ipld/merkledag" + logging "github.com/ipfs/go-log" +) + +var log = logging.Logger("unixfs") + +// HAMTShardingSize is a global option that allows switching to a HAMTDirectory +// when the BasicDirectory grows above the size (in bytes) signalled by this +// flag. The default size of 0 disables the option. +// The size is not the *exact* block size of the encoded BasicDirectory but just +// the estimated size based byte length of links name and CID (BasicDirectory's +// ProtoNode doesn't use the Data field so this estimate is pretty accurate). +var HAMTShardingSize = int(256 * units.KiB) + +// DefaultShardWidth is the default value used for hamt sharding width. +// Needs to be a power of two (shard entry size) and multiple of 8 (bitfield size). +var DefaultShardWidth = 256 + +// Directory defines a UnixFS directory. It is used for creating, reading and +// editing directories. It allows to work with different directory schemes, +// like the basic or the HAMT implementation. +// +// It just allows to perform explicit edits on a single directory, working with +// directory trees is out of its scope, they are managed by the MFS layer +// (which is the main consumer of this interface). +type Directory interface { + + // SetCidBuilder sets the CID Builder of the root node. + SetCidBuilder(cid.Builder) + + // AddChild adds a (name, key) pair to the root node. + AddChild(context.Context, string, ipld.Node) error + + // ForEachLink applies the given function to Links in the directory. + ForEachLink(context.Context, func(*ipld.Link) error) error + + // EnumLinksAsync returns a channel which will receive Links in the directory + // as they are enumerated, where order is not gauranteed + EnumLinksAsync(context.Context) <-chan format.LinkResult + + // Links returns the all the links in the directory node. + Links(context.Context) ([]*ipld.Link, error) + + // Find returns the root node of the file named 'name' within this directory. + // In the case of HAMT-directories, it will traverse the tree. + // + // Returns os.ErrNotExist if the child does not exist. + Find(context.Context, string) (ipld.Node, error) + + // RemoveChild removes the child with the given name. + // + // Returns os.ErrNotExist if the child doesn't exist. + RemoveChild(context.Context, string) error + + // GetNode returns the root of this directory. + GetNode() (ipld.Node, error) + + // GetCidBuilder returns the CID Builder used. + GetCidBuilder() cid.Builder +} + +// TODO: Evaluate removing `dserv` from this layer and providing it in MFS. +// (The functions should in that case add a `DAGService` argument.) + +// Link size estimation function. For production it's usually the one here +// but during test we may mock it to get fixed sizes. +func productionLinkSize(linkName string, linkCid cid.Cid) int { + return len(linkName) + linkCid.ByteLen() +} + +func init() { + linksize.LinkSizeFunction = productionLinkSize +} + +// BasicDirectory is the basic implementation of `Directory`. All the entries +// are stored in a single node. +type BasicDirectory struct { + node *mdag.ProtoNode + dserv ipld.DAGService + + // Internal variable used to cache the estimated size of the basic directory: + // for each link, aggregate link name + link CID. DO NOT CHANGE THIS + // as it will affect the HAMT transition behavior in HAMTShardingSize. + // (We maintain this value up to date even if the HAMTShardingSize is off + // since potentially the option could be activated on the fly.) + estimatedSize int +} + +// HAMTDirectory is the HAMT implementation of `Directory`. +// (See package `hamt` for more information.) +type HAMTDirectory struct { + shard *hamt.Shard + dserv ipld.DAGService + + // Track the changes in size by the AddChild and RemoveChild calls + // for the HAMTShardingSize option. + sizeChange int +} + +func newEmptyBasicDirectory(dserv ipld.DAGService) *BasicDirectory { + return newBasicDirectoryFromNode(dserv, format.EmptyDirNode()) +} + +func newBasicDirectoryFromNode(dserv ipld.DAGService, node *mdag.ProtoNode) *BasicDirectory { + basicDir := new(BasicDirectory) + basicDir.node = node + basicDir.dserv = dserv + + // Scan node links (if any) to restore estimated size. + basicDir.computeEstimatedSize() + + return basicDir +} + +// NewDirectory returns a Directory implemented by DynamicDirectory +// containing a BasicDirectory that can be converted to a HAMTDirectory. +func NewDirectory(dserv ipld.DAGService) Directory { + return &DynamicDirectory{newEmptyBasicDirectory(dserv)} +} + +// ErrNotADir implies that the given node was not a unixfs directory +var ErrNotADir = fmt.Errorf("merkledag node was not a directory or shard") + +// NewDirectoryFromNode loads a unixfs directory from the given IPLD node and +// DAGService. +func NewDirectoryFromNode(dserv ipld.DAGService, node ipld.Node) (Directory, error) { + protoBufNode, ok := node.(*mdag.ProtoNode) + if !ok { + return nil, ErrNotADir + } + + fsNode, err := format.FSNodeFromBytes(protoBufNode.Data()) + if err != nil { + return nil, err + } + + switch fsNode.Type() { + case format.TDirectory: + return &DynamicDirectory{newBasicDirectoryFromNode(dserv, protoBufNode.Copy().(*mdag.ProtoNode))}, nil + case format.THAMTShard: + shard, err := hamt.NewHamtFromDag(dserv, node) + if err != nil { + return nil, err + } + return &DynamicDirectory{&HAMTDirectory{shard, dserv, 0}}, nil + } + + return nil, ErrNotADir +} + +func (d *BasicDirectory) computeEstimatedSize() { + d.estimatedSize = 0 + // err is just breaking the iteration and we always return nil + _ = d.ForEachLink(context.TODO(), func(l *ipld.Link) error { + d.addToEstimatedSize(l.Name, l.Cid) + return nil + }) + // ForEachLink will never fail traversing the BasicDirectory + // and neither the inner callback `addToEstimatedSize`. +} + +func (d *BasicDirectory) addToEstimatedSize(name string, linkCid cid.Cid) { + d.estimatedSize += linksize.LinkSizeFunction(name, linkCid) +} + +func (d *BasicDirectory) removeFromEstimatedSize(name string, linkCid cid.Cid) { + d.estimatedSize -= linksize.LinkSizeFunction(name, linkCid) + if d.estimatedSize < 0 { + // Something has gone very wrong. Log an error and recompute the + // size from scratch. + log.Error("BasicDirectory's estimatedSize went below 0") + d.computeEstimatedSize() + } +} + +// SetCidBuilder implements the `Directory` interface. +func (d *BasicDirectory) SetCidBuilder(builder cid.Builder) { + d.node.SetCidBuilder(builder) +} + +// AddChild implements the `Directory` interface. It adds (or replaces) +// a link to the given `node` under `name`. +func (d *BasicDirectory) AddChild(ctx context.Context, name string, node ipld.Node) error { + link, err := ipld.MakeLink(node) + if err != nil { + return err + } + + return d.addLinkChild(ctx, name, link) +} + +func (d *BasicDirectory) needsToSwitchToHAMTDir(name string, nodeToAdd ipld.Node) (bool, error) { + if HAMTShardingSize == 0 { // Option disabled. + return false, nil + } + + operationSizeChange := 0 + // Find if there is an old entry under that name that will be overwritten. + entryToRemove, err := d.node.GetNodeLink(name) + if err != mdag.ErrLinkNotFound { + if err != nil { + return false, err + } + operationSizeChange -= linksize.LinkSizeFunction(name, entryToRemove.Cid) + } + if nodeToAdd != nil { + operationSizeChange += linksize.LinkSizeFunction(name, nodeToAdd.Cid()) + } + + return d.estimatedSize+operationSizeChange >= HAMTShardingSize, nil +} + +// addLinkChild adds the link as an entry to this directory under the given +// name. Plumbing function for the AddChild API. +func (d *BasicDirectory) addLinkChild(ctx context.Context, name string, link *ipld.Link) error { + // Remove old link and account for size change (if it existed; ignore + // `ErrNotExist` otherwise). + err := d.RemoveChild(ctx, name) + if err != nil && err != os.ErrNotExist { + return err + } + + err = d.node.AddRawLink(name, link) + if err != nil { + return err + } + d.addToEstimatedSize(name, link.Cid) + return nil +} + +// EnumLinksAsync returns a channel which will receive Links in the directory +// as they are enumerated, where order is not gauranteed +func (d *BasicDirectory) EnumLinksAsync(ctx context.Context) <-chan format.LinkResult { + linkResults := make(chan format.LinkResult) + go func() { + defer close(linkResults) + for _, l := range d.node.Links() { + select { + case linkResults <- format.LinkResult{ + Link: l, + Err: nil, + }: + case <-ctx.Done(): + return + } + } + }() + return linkResults +} + +// ForEachLink implements the `Directory` interface. +func (d *BasicDirectory) ForEachLink(_ context.Context, f func(*ipld.Link) error) error { + for _, l := range d.node.Links() { + if err := f(l); err != nil { + return err + } + } + return nil +} + +// Links implements the `Directory` interface. +func (d *BasicDirectory) Links(ctx context.Context) ([]*ipld.Link, error) { + return d.node.Links(), nil +} + +// Find implements the `Directory` interface. +func (d *BasicDirectory) Find(ctx context.Context, name string) (ipld.Node, error) { + lnk, err := d.node.GetNodeLink(name) + if err == mdag.ErrLinkNotFound { + err = os.ErrNotExist + } + if err != nil { + return nil, err + } + + return d.dserv.Get(ctx, lnk.Cid) +} + +// RemoveChild implements the `Directory` interface. +func (d *BasicDirectory) RemoveChild(ctx context.Context, name string) error { + // We need to *retrieve* the link before removing it to update the estimated + // size. This means we may iterate the links slice twice: if traversing this + // becomes a problem, a factor of 2 isn't going to make much of a difference. + // We'd likely need to cache a link resolution map in that case. + link, err := d.node.GetNodeLink(name) + if err == mdag.ErrLinkNotFound { + return os.ErrNotExist + } + if err != nil { + return err // at the moment there is no other error besides ErrLinkNotFound + } + + // The name actually existed so we should update the estimated size. + d.removeFromEstimatedSize(link.Name, link.Cid) + + return d.node.RemoveNodeLink(name) + // GetNodeLink didn't return ErrLinkNotFound so this won't fail with that + // and we don't need to convert the error again. +} + +// GetNode implements the `Directory` interface. +func (d *BasicDirectory) GetNode() (ipld.Node, error) { + return d.node, nil +} + +// GetCidBuilder implements the `Directory` interface. +func (d *BasicDirectory) GetCidBuilder() cid.Builder { + return d.node.CidBuilder() +} + +// switchToSharding returns a HAMT implementation of this directory. +func (d *BasicDirectory) switchToSharding(ctx context.Context) (*HAMTDirectory, error) { + hamtDir := new(HAMTDirectory) + hamtDir.dserv = d.dserv + + shard, err := hamt.NewShard(d.dserv, DefaultShardWidth) + if err != nil { + return nil, err + } + shard.SetCidBuilder(d.node.CidBuilder()) + hamtDir.shard = shard + + for _, lnk := range d.node.Links() { + err = hamtDir.shard.SetLink(ctx, lnk.Name, lnk) + if err != nil { + return nil, err + } + } + + return hamtDir, nil +} + +// SetCidBuilder implements the `Directory` interface. +func (d *HAMTDirectory) SetCidBuilder(builder cid.Builder) { + d.shard.SetCidBuilder(builder) +} + +// AddChild implements the `Directory` interface. +func (d *HAMTDirectory) AddChild(ctx context.Context, name string, nd ipld.Node) error { + oldChild, err := d.shard.Swap(ctx, name, nd) + if err != nil { + return err + } + + if oldChild != nil { + d.removeFromSizeChange(oldChild.Name, oldChild.Cid) + } + d.addToSizeChange(name, nd.Cid()) + return nil +} + +// ForEachLink implements the `Directory` interface. +func (d *HAMTDirectory) ForEachLink(ctx context.Context, f func(*ipld.Link) error) error { + return d.shard.ForEachLink(ctx, f) +} + +// EnumLinksAsync returns a channel which will receive Links in the directory +// as they are enumerated, where order is not gauranteed +func (d *HAMTDirectory) EnumLinksAsync(ctx context.Context) <-chan format.LinkResult { + return d.shard.EnumLinksAsync(ctx) +} + +// Links implements the `Directory` interface. +func (d *HAMTDirectory) Links(ctx context.Context) ([]*ipld.Link, error) { + return d.shard.EnumLinks(ctx) +} + +// Find implements the `Directory` interface. It will traverse the tree. +func (d *HAMTDirectory) Find(ctx context.Context, name string) (ipld.Node, error) { + lnk, err := d.shard.Find(ctx, name) + if err != nil { + return nil, err + } + + return lnk.GetNode(ctx, d.dserv) +} + +// RemoveChild implements the `Directory` interface. +func (d *HAMTDirectory) RemoveChild(ctx context.Context, name string) error { + oldChild, err := d.shard.Take(ctx, name) + if err != nil { + return err + } + + if oldChild != nil { + d.removeFromSizeChange(oldChild.Name, oldChild.Cid) + } + + return nil +} + +// GetNode implements the `Directory` interface. +func (d *HAMTDirectory) GetNode() (ipld.Node, error) { + return d.shard.Node() +} + +// GetCidBuilder implements the `Directory` interface. +func (d *HAMTDirectory) GetCidBuilder() cid.Builder { + return d.shard.CidBuilder() +} + +// switchToBasic returns a BasicDirectory implementation of this directory. +func (d *HAMTDirectory) switchToBasic(ctx context.Context) (*BasicDirectory, error) { + basicDir := newEmptyBasicDirectory(d.dserv) + basicDir.SetCidBuilder(d.GetCidBuilder()) + + err := d.ForEachLink(ctx, func(lnk *ipld.Link) error { + err := basicDir.addLinkChild(ctx, lnk.Name, lnk) + if err != nil { + return err + } + + return nil + // This function enumerates all the links in the Directory requiring all + // shards to be accessible but it is only called *after* sizeBelowThreshold + // returns true, which means we have already enumerated and fetched *all* + // shards in the first place (that's the only way we can be really sure + // we are actually below the threshold). + }) + if err != nil { + return nil, err + } + + return basicDir, nil +} + +func (d *HAMTDirectory) addToSizeChange(name string, linkCid cid.Cid) { + d.sizeChange += linksize.LinkSizeFunction(name, linkCid) +} + +func (d *HAMTDirectory) removeFromSizeChange(name string, linkCid cid.Cid) { + d.sizeChange -= linksize.LinkSizeFunction(name, linkCid) +} + +// Evaluate a switch from HAMTDirectory to BasicDirectory in case the size will +// go above the threshold when we are adding or removing an entry. +// In both the add/remove operations any old name will be removed, and for the +// add operation in particular a new entry will be added under that name (otherwise +// nodeToAdd is nil). We compute both (potential) future subtraction and +// addition to the size change. +func (d *HAMTDirectory) needsToSwitchToBasicDir(ctx context.Context, name string, nodeToAdd ipld.Node) (switchToBasic bool, err error) { + if HAMTShardingSize == 0 { // Option disabled. + return false, nil + } + + operationSizeChange := 0 + + // Find if there is an old entry under that name that will be overwritten + // (AddEntry) or flat out removed (RemoveEntry). + entryToRemove, err := d.shard.Find(ctx, name) + if err != os.ErrNotExist { + if err != nil { + return false, err + } + operationSizeChange -= linksize.LinkSizeFunction(name, entryToRemove.Cid) + } + + // For the AddEntry case compute the size addition of the new entry. + if nodeToAdd != nil { + operationSizeChange += linksize.LinkSizeFunction(name, nodeToAdd.Cid()) + } + + if d.sizeChange+operationSizeChange >= 0 { + // We won't have reduced the HAMT net size. + return false, nil + } + + // We have reduced the directory size, check if went below the + // HAMTShardingSize threshold to trigger a switch. + return d.sizeBelowThreshold(ctx, operationSizeChange) +} + +// Evaluate directory size and a future sizeChange and check if it will be below +// HAMTShardingSize threshold (to trigger a transition to a BasicDirectory). +// Instead of enumerating the entire tree we eagerly call EnumLinksAsync +// until we either reach a value above the threshold (in that case no need +// to keep counting) or an error occurs (like the context being canceled +// if we take too much time fetching the necessary shards). +func (d *HAMTDirectory) sizeBelowThreshold(ctx context.Context, sizeChange int) (below bool, err error) { + if HAMTShardingSize == 0 { + panic("asked to compute HAMT size with HAMTShardingSize option off (0)") + } + + // We don't necessarily compute the full size of *all* shards as we might + // end early if we already know we're above the threshold or run out of time. + partialSize := 0 + + // We stop the enumeration once we have enough information and exit this function. + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + for linkResult := range d.EnumLinksAsync(ctx) { + if linkResult.Err != nil { + return false, linkResult.Err + } + + partialSize += linksize.LinkSizeFunction(linkResult.Link.Name, linkResult.Link.Cid) + if partialSize+sizeChange >= HAMTShardingSize { + // We have already fetched enough shards to assert we are + // above the threshold, so no need to keep fetching. + return false, nil + } + } + + // We enumerated *all* links in all shards and didn't reach the threshold. + return true, nil +} + +// DynamicDirectory wraps a Directory interface and provides extra logic +// to switch from BasicDirectory to HAMTDirectory and backwards based on +// size. +type DynamicDirectory struct { + Directory +} + +var _ Directory = (*DynamicDirectory)(nil) + +// AddChild implements the `Directory` interface. We check when adding new entries +// if we should switch to HAMTDirectory according to global option(s). +func (d *DynamicDirectory) AddChild(ctx context.Context, name string, nd ipld.Node) error { + hamtDir, ok := d.Directory.(*HAMTDirectory) + if ok { + // We evaluate a switch in the HAMTDirectory case even for an AddChild + // as it may overwrite an existing entry and end up actually reducing + // the directory size. + switchToBasic, err := hamtDir.needsToSwitchToBasicDir(ctx, name, nd) + if err != nil { + return err + } + + if switchToBasic { + basicDir, err := hamtDir.switchToBasic(ctx) + if err != nil { + return err + } + err = basicDir.AddChild(ctx, name, nd) + if err != nil { + return err + } + d.Directory = basicDir + return nil + } + + return d.Directory.AddChild(ctx, name, nd) + } + + // BasicDirectory + basicDir := d.Directory.(*BasicDirectory) + switchToHAMT, err := basicDir.needsToSwitchToHAMTDir(name, nd) + if err != nil { + return err + } + if !switchToHAMT { + return basicDir.AddChild(ctx, name, nd) + } + hamtDir, err = basicDir.switchToSharding(ctx) + if err != nil { + return err + } + err = hamtDir.AddChild(ctx, name, nd) + if err != nil { + return err + } + d.Directory = hamtDir + return nil +} + +// RemoveChild implements the `Directory` interface. Used in the case where we wrap +// a HAMTDirectory that might need to be downgraded to a BasicDirectory. The +// upgrade path is in AddChild. +func (d *DynamicDirectory) RemoveChild(ctx context.Context, name string) error { + hamtDir, ok := d.Directory.(*HAMTDirectory) + if !ok { + return d.Directory.RemoveChild(ctx, name) + } + + switchToBasic, err := hamtDir.needsToSwitchToBasicDir(ctx, name, nil) + if err != nil { + return err + } + + if !switchToBasic { + return hamtDir.RemoveChild(ctx, name) + } + + basicDir, err := hamtDir.switchToBasic(ctx) + if err != nil { + return err + } + err = basicDir.RemoveChild(ctx, name) + if err != nil { + return err + } + d.Directory = basicDir + return nil +} diff --git a/unixfs/io/directory_test.go b/unixfs/io/directory_test.go new file mode 100644 index 0000000000..518afd0742 --- /dev/null +++ b/unixfs/io/directory_test.go @@ -0,0 +1,638 @@ +package io + +import ( + "context" + "fmt" + "math" + "sort" + "strconv" + "strings" + "sync" + "testing" + "time" + + blocks "github.com/ipfs/boxo/blocks" + bsrv "github.com/ipfs/boxo/blockservice" + blockstore "github.com/ipfs/boxo/blockstore" + offline "github.com/ipfs/boxo/exchange/offline" + cid "github.com/ipfs/go-cid" + ds "github.com/ipfs/go-datastore" + dssync "github.com/ipfs/go-datastore/sync" + ipld "github.com/ipfs/go-ipld-format" + mdag "github.com/ipfs/boxo/ipld/merkledag" + mdtest "github.com/ipfs/boxo/ipld/merkledag/test" + + ft "github.com/ipfs/boxo/unixfs" + "github.com/ipfs/boxo/unixfs/hamt" + "github.com/ipfs/boxo/unixfs/internal" + "github.com/ipfs/boxo/unixfs/private/linksize" + + "github.com/stretchr/testify/assert" +) + +func TestEmptyNode(t *testing.T) { + n := ft.EmptyDirNode() + if len(n.Links()) != 0 { + t.Fatal("empty node should have 0 links") + } +} + +func TestDirectoryGrowth(t *testing.T) { + ds := mdtest.Mock() + dir := NewDirectory(ds) + ctx := context.Background() + + d := ft.EmptyDirNode() + ds.Add(ctx, d) + + nelems := 10000 + + for i := 0; i < nelems; i++ { + err := dir.AddChild(ctx, fmt.Sprintf("dir%d", i), d) + if err != nil { + t.Fatal(err) + } + } + + _, err := dir.GetNode() + if err != nil { + t.Fatal(err) + } + + links, err := dir.Links(ctx) + if err != nil { + t.Fatal(err) + } + + if len(links) != nelems { + t.Fatal("didnt get right number of elements") + } + + dirc := d.Cid() + + names := make(map[string]bool) + for _, l := range links { + names[l.Name] = true + if !l.Cid.Equals(dirc) { + t.Fatal("link wasnt correct") + } + } + + for i := 0; i < nelems; i++ { + dn := fmt.Sprintf("dir%d", i) + if !names[dn] { + t.Fatal("didnt find directory: ", dn) + } + + _, err := dir.Find(context.Background(), dn) + if err != nil { + t.Fatal(err) + } + } +} + +func TestDuplicateAddDir(t *testing.T) { + ds := mdtest.Mock() + dir := NewDirectory(ds) + ctx := context.Background() + nd := ft.EmptyDirNode() + + err := dir.AddChild(ctx, "test", nd) + if err != nil { + t.Fatal(err) + } + + err = dir.AddChild(ctx, "test", nd) + if err != nil { + t.Fatal(err) + } + + lnks, err := dir.Links(ctx) + if err != nil { + t.Fatal(err) + } + + if len(lnks) != 1 { + t.Fatal("expected only one link") + } +} + +func TestBasicDirectory_estimatedSize(t *testing.T) { + ds := mdtest.Mock() + basicDir := newEmptyBasicDirectory(ds) + + testDirectorySizeEstimation(t, basicDir, ds, func(dir Directory) int { + return dir.(*BasicDirectory).estimatedSize + }) +} + +func TestHAMTDirectory_sizeChange(t *testing.T) { + ds := mdtest.Mock() + hamtDir, err := newEmptyHAMTDirectory(ds, DefaultShardWidth) + assert.NoError(t, err) + + testDirectorySizeEstimation(t, hamtDir, ds, func(dir Directory) int { + // Since we created a HAMTDirectory from scratch with size 0 its + // internal sizeChange delta will in fact track the directory size + // throughout this run. + return dir.(*HAMTDirectory).sizeChange + }) +} + +func fullSizeEnumeration(dir Directory) int { + size := 0 + dir.ForEachLink(context.Background(), func(l *ipld.Link) error { + size += linksize.LinkSizeFunction(l.Name, l.Cid) + return nil + }) + return size +} + +func testDirectorySizeEstimation(t *testing.T, dir Directory, ds ipld.DAGService, size func(Directory) int) { + linksize.LinkSizeFunction = mockLinkSizeFunc(1) + defer func() { linksize.LinkSizeFunction = productionLinkSize }() + + ctx := context.Background() + child := ft.EmptyFileNode() + assert.NoError(t, ds.Add(ctx, child)) + + // Several overwrites should not corrupt the size estimation. + assert.NoError(t, dir.AddChild(ctx, "child", child)) + assert.NoError(t, dir.AddChild(ctx, "child", child)) + assert.NoError(t, dir.AddChild(ctx, "child", child)) + assert.NoError(t, dir.RemoveChild(ctx, "child")) + assert.NoError(t, dir.AddChild(ctx, "child", child)) + assert.NoError(t, dir.RemoveChild(ctx, "child")) + assert.Equal(t, 0, size(dir), "estimated size is not zero after removing all entries") + + dirEntries := 100 + for i := 0; i < dirEntries; i++ { + assert.NoError(t, dir.AddChild(ctx, fmt.Sprintf("child-%03d", i), child)) + } + assert.Equal(t, dirEntries, size(dir), "estimated size inaccurate after adding many entries") + + assert.NoError(t, dir.RemoveChild(ctx, "child-045")) // just random values + assert.NoError(t, dir.RemoveChild(ctx, "child-063")) + assert.NoError(t, dir.RemoveChild(ctx, "child-011")) + assert.NoError(t, dir.RemoveChild(ctx, "child-000")) + assert.NoError(t, dir.RemoveChild(ctx, "child-099")) + dirEntries -= 5 + assert.Equal(t, dirEntries, size(dir), "estimated size inaccurate after removing some entries") + + // All of the following remove operations will fail (won't impact dirEntries): + assert.Error(t, dir.RemoveChild(ctx, "nonexistent-name")) + assert.Error(t, dir.RemoveChild(ctx, "child-045")) // already removed + assert.Error(t, dir.RemoveChild(ctx, "child-100")) + assert.Equal(t, dirEntries, size(dir), "estimated size inaccurate after failed remove attempts") + + // Restore a directory from original's node and check estimated size consistency. + dirNode, err := dir.GetNode() + assert.NoError(t, err) + restoredDir, err := NewDirectoryFromNode(ds, dirNode.(*mdag.ProtoNode)) + assert.NoError(t, err) + assert.Equal(t, size(dir), fullSizeEnumeration(restoredDir), "restored directory's size doesn't match original's") + // We don't use the estimation size function for the restored directory + // because in the HAMT case this function depends on the sizeChange variable + // that will be cleared when loading the directory from the node. + // This also covers the case of comparing the size estimation `size()` with + // the full enumeration function `fullSizeEnumeration()` to make sure it's + // correct. +} + +// Any entry link size will have the fixedSize passed. +func mockLinkSizeFunc(fixedSize int) func(linkName string, linkCid cid.Cid) int { + return func(_ string, _ cid.Cid) int { + return fixedSize + } +} + +func checkBasicDirectory(t *testing.T, dir Directory, errorMessage string) { + if _, ok := dir.(*DynamicDirectory).Directory.(*BasicDirectory); !ok { + t.Fatal(errorMessage) + } +} + +func checkHAMTDirectory(t *testing.T, dir Directory, errorMessage string) { + if _, ok := dir.(*DynamicDirectory).Directory.(*HAMTDirectory); !ok { + t.Fatal(errorMessage) + } +} + +func TestProductionLinkSize(t *testing.T) { + link, err := ipld.MakeLink(ft.EmptyDirNode()) + assert.NoError(t, err) + link.Name = "directory_link_name" + assert.Equal(t, 53, productionLinkSize(link.Name, link.Cid)) + + link, err = ipld.MakeLink(ft.EmptyFileNode()) + assert.NoError(t, err) + link.Name = "file_link_name" + assert.Equal(t, 48, productionLinkSize(link.Name, link.Cid)) + + ds := mdtest.Mock() + basicDir := newEmptyBasicDirectory(ds) + assert.NoError(t, err) + for i := 0; i < 10; i++ { + basicDir.AddChild(context.Background(), strconv.FormatUint(uint64(i), 10), ft.EmptyFileNode()) + } + basicDirNode, err := basicDir.GetNode() + assert.NoError(t, err) + link, err = ipld.MakeLink(basicDirNode) + assert.NoError(t, err) + link.Name = "basic_dir" + assert.Equal(t, 43, productionLinkSize(link.Name, link.Cid)) +} + +// Test HAMTDirectory <-> BasicDirectory switch based on directory size. The +// switch is managed by the DynamicDirectory abstraction. +func TestDynamicDirectorySwitch(t *testing.T) { + oldHamtOption := HAMTShardingSize + defer func() { HAMTShardingSize = oldHamtOption }() + HAMTShardingSize = 0 // Disable automatic switch at the start. + linksize.LinkSizeFunction = mockLinkSizeFunc(1) + defer func() { linksize.LinkSizeFunction = productionLinkSize }() + + ds := mdtest.Mock() + dir := NewDirectory(ds) + checkBasicDirectory(t, dir, "new dir is not BasicDirectory") + + ctx := context.Background() + child := ft.EmptyDirNode() + err := ds.Add(ctx, child) + assert.NoError(t, err) + + err = dir.AddChild(ctx, "1", child) + assert.NoError(t, err) + checkBasicDirectory(t, dir, "added child, option still disabled") + + // Set a threshold so big a new entry won't trigger the change. + HAMTShardingSize = math.MaxInt32 + + err = dir.AddChild(ctx, "2", child) + assert.NoError(t, err) + checkBasicDirectory(t, dir, "added child, option now enabled but at max") + + // Now set it so low to make sure any new entry will trigger the upgrade. + HAMTShardingSize = 1 + + // We are already above the threshold, we trigger the switch with an overwrite + // (any AddChild() should reevaluate the size). + err = dir.AddChild(ctx, "2", child) + assert.NoError(t, err) + checkHAMTDirectory(t, dir, "added child, option at min, should switch up") + + // Set threshold at the number of current entries and delete the last one + // to trigger a switch and evaluate if the rest of the entries are conserved. + HAMTShardingSize = 2 + err = dir.RemoveChild(ctx, "2") + assert.NoError(t, err) + checkBasicDirectory(t, dir, "removed threshold entry, option at min, should switch down") +} + +func TestIntegrityOfDirectorySwitch(t *testing.T) { + ds := mdtest.Mock() + dir := NewDirectory(ds) + checkBasicDirectory(t, dir, "new dir is not BasicDirectory") + + ctx := context.Background() + child := ft.EmptyDirNode() + err := ds.Add(ctx, child) + assert.NoError(t, err) + + basicDir := newEmptyBasicDirectory(ds) + hamtDir, err := newEmptyHAMTDirectory(ds, DefaultShardWidth) + assert.NoError(t, err) + for i := 0; i < 1000; i++ { + basicDir.AddChild(ctx, strconv.FormatUint(uint64(i), 10), child) + hamtDir.AddChild(ctx, strconv.FormatUint(uint64(i), 10), child) + } + compareDirectoryEntries(t, basicDir, hamtDir) + + hamtDirFromSwitch, err := basicDir.switchToSharding(ctx) + assert.NoError(t, err) + basicDirFromSwitch, err := hamtDir.switchToBasic(ctx) + assert.NoError(t, err) + compareDirectoryEntries(t, basicDir, basicDirFromSwitch) + compareDirectoryEntries(t, hamtDir, hamtDirFromSwitch) +} + +// This is the value of concurrent fetches during dag.Walk. Used in +// test to better predict how many nodes will be fetched. +var defaultConcurrentFetch = 32 + +// FIXME: Taken from private github.com/ipfs/boxo/ipld/merkledag@v0.2.3/merkledag.go. +// (We can also pass an explicit concurrency value in `(*Shard).EnumLinksAsync()` +// and take ownership of this configuration, but departing from the more +// standard and reliable one in `go-merkledag`. + +// Test that we fetch as little nodes as needed to reach the HAMTShardingSize +// during the sizeBelowThreshold computation. +func TestHAMTEnumerationWhenComputingSize(t *testing.T) { + // Adjust HAMT global/static options for the test to simplify its logic. + // FIXME: These variables weren't designed to be modified and we should + // review in depth side effects. + + // Set all link sizes to a uniform 1 so the estimated directory size + // is just the count of its entry links (in HAMT/Shard terminology these + // are the "value" links pointing to anything that is *not* another Shard). + linksize.LinkSizeFunction = mockLinkSizeFunc(1) + defer func() { linksize.LinkSizeFunction = productionLinkSize }() + + // Use an identity hash function to ease the construction of "complete" HAMTs + // (see CreateCompleteHAMT below for more details). (Ideally this should be + // a parameter we pass and not a global option we modify in the caller.) + oldHashFunc := internal.HAMTHashFunction + defer func() { internal.HAMTHashFunction = oldHashFunc }() + internal.HAMTHashFunction = idHash + + oldHamtOption := HAMTShardingSize + defer func() { HAMTShardingSize = oldHamtOption }() + + // --- End of test static configuration adjustments. --- + + // Some arbitrary values below that make this test not that expensive. + treeHeight := 4 + // How many leaf shards nodes (with value links, + // i.e., directory entries) do we need to reach the threshold. + thresholdToWidthRatio := 4 + // Departing from DefaultShardWidth of 256 to reduce HAMT size in + // CreateCompleteHAMT. + shardWidth := 16 + HAMTShardingSize = shardWidth * thresholdToWidthRatio + + // We create a "complete" HAMT (see CreateCompleteHAMT for more details) + // with a regular structure to be able to predict how many Shard nodes we + // will need to fetch in order to reach the HAMTShardingSize threshold in + // sizeBelowThreshold (assuming a sequential DAG walk function). + + bstore := blockstore.NewBlockstore(dssync.MutexWrap(ds.NewMapDatastore())) + countGetsDS := newCountGetsDS(bstore) + dsrv := mdag.NewDAGService(bsrv.New(countGetsDS, offline.Exchange(countGetsDS))) + completeHAMTRoot, err := CreateCompleteHAMT(dsrv, treeHeight, shardWidth) + assert.NoError(t, err) + + // Calculate the optimal number of nodes to traverse + optimalNodesToFetch := 0 + nodesToProcess := HAMTShardingSize + for i := 0; i < treeHeight-1; i++ { + // divide by the shard width to get the parents and continue up the tree + parentNodes := int(math.Ceil(float64(nodesToProcess) / float64(shardWidth))) + optimalNodesToFetch += parentNodes + nodesToProcess = parentNodes + } + + // With this structure and a BFS traversal (from `parallelWalkDepth`) then + // we would roughly fetch the following nodes: + nodesToFetch := 0 + // * all layers up to (but not including) the last one with leaf nodes + // (because it's a BFS) + for i := 0; i < treeHeight-1; i++ { + nodesToFetch += int(math.Pow(float64(shardWidth), float64(i))) + } + // * `thresholdToWidthRatio` leaf Shards with enough value links to reach + // the HAMTShardingSize threshold. + nodesToFetch += thresholdToWidthRatio + + hamtDir, err := newHAMTDirectoryFromNode(dsrv, completeHAMTRoot) + assert.NoError(t, err) + + countGetsDS.resetCounter() + countGetsDS.setRequestDelay(10 * time.Millisecond) + // (Without the `setRequestDelay` above the number of nodes fetched + // drops dramatically and unpredictably as the BFS starts to behave + // more like a DFS because some search paths are fetched faster than + // others.) + below, err := hamtDir.sizeBelowThreshold(context.TODO(), 0) + assert.NoError(t, err) + assert.False(t, below) + t.Logf("fetched %d nodes (predicted range: %d-%d)", + countGetsDS.uniqueCidsFetched(), optimalNodesToFetch, nodesToFetch+defaultConcurrentFetch) + // Check that the actual number of nodes fetched is within the margin of the + // estimated `nodesToFetch` plus an extra of `defaultConcurrentFetch` since + // we are fetching in parallel. + assert.True(t, countGetsDS.uniqueCidsFetched() <= nodesToFetch+defaultConcurrentFetch) + assert.True(t, countGetsDS.uniqueCidsFetched() >= optimalNodesToFetch) +} + +// Compare entries in the leftDir against the rightDir and possibly +// missingEntries in the second. +func compareDirectoryEntries(t *testing.T, leftDir Directory, rightDir Directory) { + leftLinks, err := getAllLinksSortedByName(leftDir) + assert.NoError(t, err) + rightLinks, err := getAllLinksSortedByName(rightDir) + assert.NoError(t, err) + + assert.Equal(t, len(leftLinks), len(rightLinks)) + + for i, leftLink := range leftLinks { + assert.Equal(t, leftLink, rightLinks[i]) // FIXME: Can we just compare the entire struct? + } +} + +func getAllLinksSortedByName(d Directory) ([]*ipld.Link, error) { + entries, err := d.Links(context.Background()) + if err != nil { + return nil, err + } + sortLinksByName(entries) + return entries, nil +} + +func sortLinksByName(l []*ipld.Link) { + sort.SliceStable(l, func(i, j int) bool { + return strings.Compare(l[i].Name, l[j].Name) == -1 // FIXME: Is this correct? + }) +} + +func TestDirBuilder(t *testing.T) { + ds := mdtest.Mock() + dir := NewDirectory(ds) + ctx := context.Background() + + child := ft.EmptyDirNode() + err := ds.Add(ctx, child) + if err != nil { + t.Fatal(err) + } + + count := 5000 + + for i := 0; i < count; i++ { + err := dir.AddChild(ctx, fmt.Sprintf("entry %d", i), child) + if err != nil { + t.Fatal(err) + } + } + + dirnd, err := dir.GetNode() + if err != nil { + t.Fatal(err) + } + + links, err := dir.Links(ctx) + if err != nil { + t.Fatal(err) + } + + if len(links) != count { + t.Fatal("not enough links dawg", len(links), count) + } + + adir, err := NewDirectoryFromNode(ds, dirnd) + if err != nil { + t.Fatal(err) + } + + links, err = adir.Links(ctx) + if err != nil { + t.Fatal(err) + } + + names := make(map[string]bool) + for _, lnk := range links { + names[lnk.Name] = true + } + + for i := 0; i < count; i++ { + n := fmt.Sprintf("entry %d", i) + if !names[n] { + t.Fatal("COULDNT FIND: ", n) + } + } + + if len(links) != count { + t.Fatal("wrong number of links", len(links), count) + } + + linkResults := dir.EnumLinksAsync(ctx) + + asyncNames := make(map[string]bool) + var asyncLinks []*ipld.Link + + for linkResult := range linkResults { + if linkResult.Err != nil { + t.Fatal(linkResult.Err) + } + asyncNames[linkResult.Link.Name] = true + asyncLinks = append(asyncLinks, linkResult.Link) + } + + for i := 0; i < count; i++ { + n := fmt.Sprintf("entry %d", i) + if !asyncNames[n] { + t.Fatal("COULDNT FIND: ", n) + } + } + + if len(asyncLinks) != count { + t.Fatal("wrong number of links", len(asyncLinks), count) + } +} + +func newHAMTDirectoryFromNode(dserv ipld.DAGService, node ipld.Node) (*HAMTDirectory, error) { + shard, err := hamt.NewHamtFromDag(dserv, node) + if err != nil { + return nil, err + } + return &HAMTDirectory{ + dserv: dserv, + shard: shard, + }, nil +} + +func newEmptyHAMTDirectory(dserv ipld.DAGService, shardWidth int) (*HAMTDirectory, error) { + shard, err := hamt.NewShard(dserv, shardWidth) + if err != nil { + return nil, err + } + + return &HAMTDirectory{ + dserv: dserv, + shard: shard, + }, nil +} + +// countGetsDS is a DAG service that keeps track of the number of +// unique CIDs fetched. +type countGetsDS struct { + blockstore.Blockstore + + cidsFetched map[cid.Cid]struct{} + mapLock sync.Mutex + started bool + + getRequestDelay time.Duration +} + +var _ blockstore.Blockstore = (*countGetsDS)(nil) + +func newCountGetsDS(bs blockstore.Blockstore) *countGetsDS { + return &countGetsDS{ + bs, + make(map[cid.Cid]struct{}), + sync.Mutex{}, + false, + 0, + } +} + +func (d *countGetsDS) resetCounter() { + d.mapLock.Lock() + defer d.mapLock.Unlock() + d.cidsFetched = make(map[cid.Cid]struct{}) + d.started = true +} + +func (d *countGetsDS) uniqueCidsFetched() int { + d.mapLock.Lock() + defer d.mapLock.Unlock() + return len(d.cidsFetched) +} + +func (d *countGetsDS) setRequestDelay(timeout time.Duration) { + d.getRequestDelay = timeout +} + +func (d *countGetsDS) maybeSleep(c cid.Cid) { + d.mapLock.Lock() + _, cidRequestedBefore := d.cidsFetched[c] + d.cidsFetched[c] = struct{}{} + d.mapLock.Unlock() + + if d.getRequestDelay != 0 && !cidRequestedBefore { + // First request gets a timeout to simulate a network fetch. + // Subsequent requests get no timeout simulating an in-disk cache. + time.Sleep(d.getRequestDelay) + } +} + +func (d *countGetsDS) Has(ctx context.Context, c cid.Cid) (bool, error) { + if d.started { + panic("implement me") + } + return d.Blockstore.Has(ctx, c) +} + +func (d *countGetsDS) Get(ctx context.Context, c cid.Cid) (blocks.Block, error) { + blk, err := d.Blockstore.Get(ctx, c) + if err != nil { + return nil, err + } + + d.maybeSleep(c) + return blk, nil +} + +func (d *countGetsDS) GetSize(ctx context.Context, c cid.Cid) (int, error) { + if d.started { + panic("implement me") + } + return d.Blockstore.GetSize(ctx, c) +} + +func (d *countGetsDS) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) { + if d.started { + panic("implement me") + } + return d.Blockstore.AllKeysChan(ctx) +} diff --git a/unixfs/io/doc.go b/unixfs/io/doc.go new file mode 100644 index 0000000000..cf844bd23a --- /dev/null +++ b/unixfs/io/doc.go @@ -0,0 +1,3 @@ +// Package io implements convenience objects for working with the ipfs +// unixfs data format. +package io diff --git a/unixfs/io/resolve.go b/unixfs/io/resolve.go new file mode 100644 index 0000000000..cefe6753dc --- /dev/null +++ b/unixfs/io/resolve.go @@ -0,0 +1,41 @@ +package io + +import ( + "context" + + ft "github.com/ipfs/boxo/unixfs" + hamt "github.com/ipfs/boxo/unixfs/hamt" + dag "github.com/ipfs/boxo/ipld/merkledag" + + ipld "github.com/ipfs/go-ipld-format" +) + +// ResolveUnixfsOnce resolves a single hop of a path through a graph in a +// unixfs context. This includes handling traversing sharded directories. +func ResolveUnixfsOnce(ctx context.Context, ds ipld.NodeGetter, nd ipld.Node, names []string) (*ipld.Link, []string, error) { + pn, ok := nd.(*dag.ProtoNode) + if ok { + fsn, err := ft.FSNodeFromBytes(pn.Data()) + if err != nil { + // Not a unixfs node, use standard object traversal code + return nd.ResolveLink(names) + } + + if fsn.Type() == ft.THAMTShard { + rods := dag.NewReadOnlyDagService(ds) + s, err := hamt.NewHamtFromDag(rods, nd) + if err != nil { + return nil, nil, err + } + + out, err := s.Find(ctx, names[0]) + if err != nil { + return nil, nil, err + } + + return out, names[1:], nil + } + } + + return nd.ResolveLink(names) +} diff --git a/unixfs/mod/dagmodifier.go b/unixfs/mod/dagmodifier.go new file mode 100644 index 0000000000..e89b5f5f92 --- /dev/null +++ b/unixfs/mod/dagmodifier.go @@ -0,0 +1,605 @@ +// Package mod provides DAG modification utilities to, for example, +// insert additional nodes in a unixfs DAG or truncate them. +package mod + +import ( + "bytes" + "context" + "errors" + "io" + + ft "github.com/ipfs/boxo/unixfs" + help "github.com/ipfs/boxo/unixfs/importer/helpers" + trickle "github.com/ipfs/boxo/unixfs/importer/trickle" + uio "github.com/ipfs/boxo/unixfs/io" + + chunker "github.com/ipfs/boxo/chunker" + cid "github.com/ipfs/go-cid" + ipld "github.com/ipfs/go-ipld-format" + mdag "github.com/ipfs/boxo/ipld/merkledag" +) + +// Common errors +var ( + ErrSeekFail = errors.New("failed to seek properly") + ErrUnrecognizedWhence = errors.New("unrecognized whence") + ErrNotUnixfs = errors.New("dagmodifier only supports unixfs nodes (proto or raw)") +) + +// 2MB +var writebufferSize = 1 << 21 + +// DagModifier is the only struct licensed and able to correctly +// perform surgery on a DAG 'file' +// Dear god, please rename this to something more pleasant +type DagModifier struct { + dagserv ipld.DAGService + curNode ipld.Node + + splitter chunker.SplitterGen + ctx context.Context + readCancel func() + + writeStart uint64 + curWrOff uint64 + wrBuf *bytes.Buffer + + Prefix cid.Prefix + RawLeaves bool + + read uio.DagReader +} + +// NewDagModifier returns a new DagModifier, the Cid prefix for newly +// created nodes will be inhered from the passed in node. If the Cid +// version if not 0 raw leaves will also be enabled. The Prefix and +// RawLeaves options can be overridden by changing them after the call. +func NewDagModifier(ctx context.Context, from ipld.Node, serv ipld.DAGService, spl chunker.SplitterGen) (*DagModifier, error) { + switch from.(type) { + case *mdag.ProtoNode, *mdag.RawNode: + // ok + default: + return nil, ErrNotUnixfs + } + + prefix := from.Cid().Prefix() + prefix.Codec = cid.DagProtobuf + rawLeaves := false + if prefix.Version > 0 { + rawLeaves = true + } + + return &DagModifier{ + curNode: from.Copy(), + dagserv: serv, + splitter: spl, + ctx: ctx, + Prefix: prefix, + RawLeaves: rawLeaves, + }, nil +} + +// WriteAt will modify a dag file in place +func (dm *DagModifier) WriteAt(b []byte, offset int64) (int, error) { + // TODO: this is currently VERY inefficient + // each write that happens at an offset other than the current one causes a + // flush to disk, and dag rewrite + if offset == int64(dm.writeStart) && dm.wrBuf != nil { + // If we would overwrite the previous write + if len(b) >= dm.wrBuf.Len() { + dm.wrBuf.Reset() + } + } else if uint64(offset) != dm.curWrOff { + size, err := dm.Size() + if err != nil { + return 0, err + } + if offset > size { + err := dm.expandSparse(offset - size) + if err != nil { + return 0, err + } + } + + err = dm.Sync() + if err != nil { + return 0, err + } + dm.writeStart = uint64(offset) + } + + return dm.Write(b) +} + +// A reader that just returns zeros +type zeroReader struct{} + +func (zr zeroReader) Read(b []byte) (int, error) { + for i := range b { + b[i] = 0 + } + return len(b), nil +} + +// expandSparse grows the file with zero blocks of 4096 +// A small blocksize is chosen to aid in deduplication +func (dm *DagModifier) expandSparse(size int64) error { + r := io.LimitReader(zeroReader{}, size) + spl := chunker.NewSizeSplitter(r, 4096) + nnode, err := dm.appendData(dm.curNode, spl) + if err != nil { + return err + } + err = dm.dagserv.Add(dm.ctx, nnode) + return err +} + +// Write continues writing to the dag at the current offset +func (dm *DagModifier) Write(b []byte) (int, error) { + if dm.read != nil { + dm.read = nil + } + if dm.wrBuf == nil { + dm.wrBuf = new(bytes.Buffer) + } + + n, err := dm.wrBuf.Write(b) + if err != nil { + return n, err + } + dm.curWrOff += uint64(n) + if dm.wrBuf.Len() > writebufferSize { + err := dm.Sync() + if err != nil { + return n, err + } + } + return n, nil +} + +// Size returns the Filesize of the node +func (dm *DagModifier) Size() (int64, error) { + fileSize, err := fileSize(dm.curNode) + if err != nil { + return 0, err + } + if dm.wrBuf != nil && int64(dm.wrBuf.Len())+int64(dm.writeStart) > int64(fileSize) { + return int64(dm.wrBuf.Len()) + int64(dm.writeStart), nil + } + return int64(fileSize), nil +} + +func fileSize(n ipld.Node) (uint64, error) { + switch nd := n.(type) { + case *mdag.ProtoNode: + fsn, err := ft.FSNodeFromBytes(nd.Data()) + if err != nil { + return 0, err + } + return fsn.FileSize(), nil + case *mdag.RawNode: + return uint64(len(nd.RawData())), nil + default: + return 0, ErrNotUnixfs + } +} + +// Sync writes changes to this dag to disk +func (dm *DagModifier) Sync() error { + // No buffer? Nothing to do + if dm.wrBuf == nil { + return nil + } + + // If we have an active reader, kill it + if dm.read != nil { + dm.read = nil + dm.readCancel() + } + + // Number of bytes we're going to write + buflen := dm.wrBuf.Len() + + fs, err := fileSize(dm.curNode) + if err != nil { + return err + } + if fs < dm.writeStart { + if err := dm.expandSparse(int64(dm.writeStart - fs)); err != nil { + return err + } + } + + // overwrite existing dag nodes + thisc, err := dm.modifyDag(dm.curNode, dm.writeStart) + if err != nil { + return err + } + + dm.curNode, err = dm.dagserv.Get(dm.ctx, thisc) + if err != nil { + return err + } + + // need to write past end of current dag + if dm.wrBuf.Len() > 0 { + dm.curNode, err = dm.appendData(dm.curNode, dm.splitter(dm.wrBuf)) + if err != nil { + return err + } + + err = dm.dagserv.Add(dm.ctx, dm.curNode) + if err != nil { + return err + } + } + + dm.writeStart += uint64(buflen) + dm.wrBuf = nil + + return nil +} + +// modifyDag writes the data in 'dm.wrBuf' over the data in 'node' starting at 'offset' +// returns the new key of the passed in node. +func (dm *DagModifier) modifyDag(n ipld.Node, offset uint64) (cid.Cid, error) { + // If we've reached a leaf node. + if len(n.Links()) == 0 { + switch nd0 := n.(type) { + case *mdag.ProtoNode: + fsn, err := ft.FSNodeFromBytes(nd0.Data()) + if err != nil { + return cid.Cid{}, err + } + + _, err = dm.wrBuf.Read(fsn.Data()[offset:]) + if err != nil && err != io.EOF { + return cid.Cid{}, err + } + + // Update newly written node.. + b, err := fsn.GetBytes() + if err != nil { + return cid.Cid{}, err + } + + nd := new(mdag.ProtoNode) + nd.SetData(b) + nd.SetCidBuilder(nd0.CidBuilder()) + err = dm.dagserv.Add(dm.ctx, nd) + if err != nil { + return cid.Cid{}, err + } + + return nd.Cid(), nil + case *mdag.RawNode: + origData := nd0.RawData() + bytes := make([]byte, len(origData)) + + // copy orig data up to offset + copy(bytes, origData[:offset]) + + // copy in new data + n, err := dm.wrBuf.Read(bytes[offset:]) + if err != nil && err != io.EOF { + return cid.Cid{}, err + } + + // copy remaining data + offsetPlusN := int(offset) + n + if offsetPlusN < len(origData) { + copy(bytes[offsetPlusN:], origData[offsetPlusN:]) + } + + nd, err := mdag.NewRawNodeWPrefix(bytes, nd0.Cid().Prefix()) + if err != nil { + return cid.Cid{}, err + } + err = dm.dagserv.Add(dm.ctx, nd) + if err != nil { + return cid.Cid{}, err + } + + return nd.Cid(), nil + } + } + + node, ok := n.(*mdag.ProtoNode) + if !ok { + return cid.Cid{}, ErrNotUnixfs + } + + fsn, err := ft.FSNodeFromBytes(node.Data()) + if err != nil { + return cid.Cid{}, err + } + + var cur uint64 + for i, bs := range fsn.BlockSizes() { + // We found the correct child to write into + if cur+bs > offset { + child, err := node.Links()[i].GetNode(dm.ctx, dm.dagserv) + if err != nil { + return cid.Cid{}, err + } + + k, err := dm.modifyDag(child, offset-cur) + if err != nil { + return cid.Cid{}, err + } + + node.Links()[i].Cid = k + + // Recache serialized node + _, err = node.EncodeProtobuf(true) + if err != nil { + return cid.Cid{}, err + } + + if dm.wrBuf.Len() == 0 { + // No more bytes to write! + break + } + offset = cur + bs + } + cur += bs + } + + err = dm.dagserv.Add(dm.ctx, node) + return node.Cid(), err +} + +// appendData appends the blocks from the given chan to the end of this dag +func (dm *DagModifier) appendData(nd ipld.Node, spl chunker.Splitter) (ipld.Node, error) { + switch nd := nd.(type) { + case *mdag.ProtoNode, *mdag.RawNode: + dbp := &help.DagBuilderParams{ + Dagserv: dm.dagserv, + Maxlinks: help.DefaultLinksPerBlock, + CidBuilder: dm.Prefix, + RawLeaves: dm.RawLeaves, + } + db, err := dbp.New(spl) + if err != nil { + return nil, err + } + return trickle.Append(dm.ctx, nd, db) + default: + return nil, ErrNotUnixfs + } +} + +// Read data from this dag starting at the current offset +func (dm *DagModifier) Read(b []byte) (int, error) { + err := dm.readPrep() + if err != nil { + return 0, err + } + + n, err := dm.read.Read(b) + dm.curWrOff += uint64(n) + return n, err +} + +func (dm *DagModifier) readPrep() error { + err := dm.Sync() + if err != nil { + return err + } + + if dm.read == nil { + ctx, cancel := context.WithCancel(dm.ctx) + dr, err := uio.NewDagReader(ctx, dm.curNode, dm.dagserv) + if err != nil { + cancel() + return err + } + + i, err := dr.Seek(int64(dm.curWrOff), io.SeekStart) + if err != nil { + cancel() + return err + } + + if i != int64(dm.curWrOff) { + cancel() + return ErrSeekFail + } + + dm.readCancel = cancel + dm.read = dr + } + + return nil +} + +// CtxReadFull reads data from this dag starting at the current offset +func (dm *DagModifier) CtxReadFull(ctx context.Context, b []byte) (int, error) { + err := dm.readPrep() + if err != nil { + return 0, err + } + + n, err := dm.read.CtxReadFull(ctx, b) + dm.curWrOff += uint64(n) + return n, err +} + +// GetNode gets the modified DAG Node +func (dm *DagModifier) GetNode() (ipld.Node, error) { + err := dm.Sync() + if err != nil { + return nil, err + } + return dm.curNode.Copy(), nil +} + +// HasChanges returned whether or not there are unflushed changes to this dag +func (dm *DagModifier) HasChanges() bool { + return dm.wrBuf != nil +} + +// Seek modifies the offset according to whence. See unixfs/io for valid whence +// values. +func (dm *DagModifier) Seek(offset int64, whence int) (int64, error) { + err := dm.Sync() + if err != nil { + return 0, err + } + + fisize, err := dm.Size() + if err != nil { + return 0, err + } + + var newoffset uint64 + switch whence { + case io.SeekCurrent: + newoffset = dm.curWrOff + uint64(offset) + case io.SeekStart: + newoffset = uint64(offset) + case io.SeekEnd: + newoffset = uint64(fisize) - uint64(offset) + default: + return 0, ErrUnrecognizedWhence + } + + if int64(newoffset) > fisize { + if err := dm.expandSparse(int64(newoffset) - fisize); err != nil { + return 0, err + } + } + dm.curWrOff = newoffset + dm.writeStart = newoffset + + if dm.read != nil { + _, err = dm.read.Seek(offset, whence) + if err != nil { + return 0, err + } + } + + return int64(dm.curWrOff), nil +} + +// Truncate truncates the current Node to 'size' and replaces it with the +// new one. +func (dm *DagModifier) Truncate(size int64) error { + err := dm.Sync() + if err != nil { + return err + } + + realSize, err := dm.Size() + if err != nil { + return err + } + if size == int64(realSize) { + return nil + } + + // Truncate can also be used to expand the file + if size > int64(realSize) { + return dm.expandSparse(int64(size) - realSize) + } + + nnode, err := dm.dagTruncate(dm.ctx, dm.curNode, uint64(size)) + if err != nil { + return err + } + + err = dm.dagserv.Add(dm.ctx, nnode) + if err != nil { + return err + } + + dm.curNode = nnode + return nil +} + +// dagTruncate truncates the given node to 'size' and returns the modified Node +func (dm *DagModifier) dagTruncate(ctx context.Context, n ipld.Node, size uint64) (ipld.Node, error) { + if len(n.Links()) == 0 { + switch nd := n.(type) { + case *mdag.ProtoNode: + // TODO: this can likely be done without marshaling and remarshaling + fsn, err := ft.FSNodeFromBytes(nd.Data()) + if err != nil { + return nil, err + } + nd.SetData(ft.WrapData(fsn.Data()[:size])) + return nd, nil + case *mdag.RawNode: + return mdag.NewRawNodeWPrefix(nd.RawData()[:size], nd.Cid().Prefix()) + } + } + + nd, ok := n.(*mdag.ProtoNode) + if !ok { + return nil, ErrNotUnixfs + } + + var cur uint64 + end := 0 + var modified ipld.Node + ndata, err := ft.FSNodeFromBytes(nd.Data()) + if err != nil { + return nil, err + } + // Reset the block sizes of the node to adjust them + // with the new values of the truncated children. + ndata.RemoveAllBlockSizes() + for i, lnk := range nd.Links() { + child, err := lnk.GetNode(ctx, dm.dagserv) + if err != nil { + return nil, err + } + + childsize, err := fileSize(child) + if err != nil { + return nil, err + } + + // found the child we want to cut + if size < cur+childsize { + nchild, err := dm.dagTruncate(ctx, child, size-cur) + if err != nil { + return nil, err + } + + ndata.AddBlockSize(size - cur) + + modified = nchild + end = i + break + } + cur += childsize + ndata.AddBlockSize(childsize) + } + + err = dm.dagserv.Add(ctx, modified) + if err != nil { + return nil, err + } + + nd.SetLinks(nd.Links()[:end]) + err = nd.AddNodeLink("", modified) + if err != nil { + return nil, err + } + + d, err := ndata.GetBytes() + if err != nil { + return nil, err + } + // Save the new block sizes to the original node. + nd.SetData(d) + + // invalidate cache and recompute serialized data + _, err = nd.EncodeProtobuf(true) + if err != nil { + return nil, err + } + + return nd, nil +} diff --git a/unixfs/mod/dagmodifier_test.go b/unixfs/mod/dagmodifier_test.go new file mode 100644 index 0000000000..04972edf98 --- /dev/null +++ b/unixfs/mod/dagmodifier_test.go @@ -0,0 +1,859 @@ +package mod + +import ( + "context" + "fmt" + "io" + "testing" + + h "github.com/ipfs/boxo/unixfs/importer/helpers" + trickle "github.com/ipfs/boxo/unixfs/importer/trickle" + uio "github.com/ipfs/boxo/unixfs/io" + testu "github.com/ipfs/boxo/unixfs/test" + dag "github.com/ipfs/boxo/ipld/merkledag" + + "github.com/ipfs/boxo/unixfs" + u "github.com/ipfs/boxo/util" +) + +func testModWrite(t *testing.T, beg, size uint64, orig []byte, dm *DagModifier, opts testu.NodeOpts) []byte { + newdata := make([]byte, size) + r := u.NewTimeSeededRand() + r.Read(newdata) + + if size+beg > uint64(len(orig)) { + orig = append(orig, make([]byte, (size+beg)-uint64(len(orig)))...) + } + copy(orig[beg:], newdata) + + nmod, err := dm.WriteAt(newdata, int64(beg)) + if err != nil { + t.Fatal(err) + } + + if nmod != int(size) { + t.Fatalf("Mod length not correct! %d != %d", nmod, size) + } + + verifyNode(t, orig, dm, opts) + + return orig +} + +func verifyNode(t *testing.T, orig []byte, dm *DagModifier, opts testu.NodeOpts) { + nd, err := dm.GetNode() + if err != nil { + t.Fatal(err) + } + + err = trickle.VerifyTrickleDagStructure(nd, trickle.VerifyParams{ + Getter: dm.dagserv, + Direct: h.DefaultLinksPerBlock, + LayerRepeat: 4, + Prefix: &opts.Prefix, + RawLeaves: opts.RawLeavesUsed, + }) + if err != nil { + t.Fatal(err) + } + + rd, err := uio.NewDagReader(context.Background(), nd, dm.dagserv) + if err != nil { + t.Fatal(err) + } + + after, err := io.ReadAll(rd) + if err != nil { + t.Fatal(err) + } + + err = testu.ArrComp(after, orig) + if err != nil { + t.Fatal(err) + } +} + +func runAllSubtests(t *testing.T, tfunc func(*testing.T, testu.NodeOpts)) { + t.Run("opts=ProtoBufLeaves", func(t *testing.T) { tfunc(t, testu.UseProtoBufLeaves) }) + t.Run("opts=RawLeaves", func(t *testing.T) { tfunc(t, testu.UseRawLeaves) }) + t.Run("opts=CidV1", func(t *testing.T) { tfunc(t, testu.UseCidV1) }) + t.Run("opts=Blake2b256", func(t *testing.T) { tfunc(t, testu.UseBlake2b256) }) +} + +func TestDagModifierBasic(t *testing.T) { + runAllSubtests(t, testDagModifierBasic) +} +func testDagModifierBasic(t *testing.T, opts testu.NodeOpts) { + dserv := testu.GetDAGServ() + b, n := testu.GetRandomNode(t, dserv, 50000, opts) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + dagmod, err := NewDagModifier(ctx, n, dserv, testu.SizeSplitterGen(512)) + if err != nil { + t.Fatal(err) + } + if opts.ForceRawLeaves { + dagmod.RawLeaves = true + } + + // Within zero block + beg := uint64(15) + length := uint64(60) + + t.Log("Testing mod within zero block") + b = testModWrite(t, beg, length, b, dagmod, opts) + + // Within bounds of existing file + beg = 1000 + length = 4000 + t.Log("Testing mod within bounds of existing multiblock file.") + b = testModWrite(t, beg, length, b, dagmod, opts) + + // Extend bounds + beg = 49500 + length = 4000 + + t.Log("Testing mod that extends file.") + b = testModWrite(t, beg, length, b, dagmod, opts) + + // "Append" + beg = uint64(len(b)) + length = 3000 + t.Log("Testing pure append") + _ = testModWrite(t, beg, length, b, dagmod, opts) + + // Verify reported length + node, err := dagmod.GetNode() + if err != nil { + t.Fatal(err) + } + + size, err := fileSize(node) + if err != nil { + t.Fatal(err) + } + + expected := uint64(50000 + 3500 + 3000) + if size != expected { + t.Fatalf("Final reported size is incorrect [%d != %d]", size, expected) + } +} + +func TestMultiWrite(t *testing.T) { + runAllSubtests(t, testMultiWrite) +} +func testMultiWrite(t *testing.T, opts testu.NodeOpts) { + dserv := testu.GetDAGServ() + n := testu.GetEmptyNode(t, dserv, opts) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + dagmod, err := NewDagModifier(ctx, n, dserv, testu.SizeSplitterGen(512)) + if err != nil { + t.Fatal(err) + } + if opts.ForceRawLeaves { + dagmod.RawLeaves = true + } + + data := make([]byte, 4000) + u.NewTimeSeededRand().Read(data) + + for i := 0; i < len(data); i++ { + n, err := dagmod.WriteAt(data[i:i+1], int64(i)) + if err != nil { + t.Fatal(err) + } + if n != 1 { + t.Fatal("Somehow wrote the wrong number of bytes! (n != 1)") + } + + size, err := dagmod.Size() + if err != nil { + t.Fatal(err) + } + + if size != int64(i+1) { + t.Fatal("Size was reported incorrectly") + } + } + + verifyNode(t, data, dagmod, opts) +} + +func TestMultiWriteAndFlush(t *testing.T) { + runAllSubtests(t, testMultiWriteAndFlush) +} +func testMultiWriteAndFlush(t *testing.T, opts testu.NodeOpts) { + dserv := testu.GetDAGServ() + n := testu.GetEmptyNode(t, dserv, opts) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + dagmod, err := NewDagModifier(ctx, n, dserv, testu.SizeSplitterGen(512)) + if err != nil { + t.Fatal(err) + } + if opts.ForceRawLeaves { + dagmod.RawLeaves = true + } + + data := make([]byte, 20) + u.NewTimeSeededRand().Read(data) + + for i := 0; i < len(data); i++ { + n, err := dagmod.WriteAt(data[i:i+1], int64(i)) + if err != nil { + t.Fatal(err) + } + if n != 1 { + t.Fatal("Somehow wrote the wrong number of bytes! (n != 1)") + } + err = dagmod.Sync() + if err != nil { + t.Fatal(err) + } + } + + verifyNode(t, data, dagmod, opts) +} + +func TestWriteNewFile(t *testing.T) { + runAllSubtests(t, testWriteNewFile) +} +func testWriteNewFile(t *testing.T, opts testu.NodeOpts) { + dserv := testu.GetDAGServ() + n := testu.GetEmptyNode(t, dserv, opts) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + dagmod, err := NewDagModifier(ctx, n, dserv, testu.SizeSplitterGen(512)) + if err != nil { + t.Fatal(err) + } + if opts.ForceRawLeaves { + dagmod.RawLeaves = true + } + + towrite := make([]byte, 2000) + u.NewTimeSeededRand().Read(towrite) + + nw, err := dagmod.Write(towrite) + if err != nil { + t.Fatal(err) + } + if nw != len(towrite) { + t.Fatal("Wrote wrong amount") + } + + verifyNode(t, towrite, dagmod, opts) +} + +func TestMultiWriteCoal(t *testing.T) { + runAllSubtests(t, testMultiWriteCoal) +} +func testMultiWriteCoal(t *testing.T, opts testu.NodeOpts) { + dserv := testu.GetDAGServ() + n := testu.GetEmptyNode(t, dserv, opts) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + dagmod, err := NewDagModifier(ctx, n, dserv, testu.SizeSplitterGen(512)) + if err != nil { + t.Fatal(err) + } + if opts.ForceRawLeaves { + dagmod.RawLeaves = true + } + + data := make([]byte, 1000) + u.NewTimeSeededRand().Read(data) + + for i := 0; i < len(data); i++ { + n, err := dagmod.WriteAt(data[:i+1], 0) + if err != nil { + fmt.Println("FAIL AT ", i) + t.Fatal(err) + } + if n != i+1 { + t.Fatal("Somehow wrote the wrong number of bytes! (n != 1)") + } + + } + + verifyNode(t, data, dagmod, opts) +} + +func TestLargeWriteChunks(t *testing.T) { + runAllSubtests(t, testLargeWriteChunks) +} +func testLargeWriteChunks(t *testing.T, opts testu.NodeOpts) { + dserv := testu.GetDAGServ() + n := testu.GetEmptyNode(t, dserv, opts) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + dagmod, err := NewDagModifier(ctx, n, dserv, testu.SizeSplitterGen(512)) + if err != nil { + t.Fatal(err) + } + if opts.ForceRawLeaves { + dagmod.RawLeaves = true + } + + wrsize := 1000 + datasize := 10000000 + data := make([]byte, datasize) + + u.NewTimeSeededRand().Read(data) + + for i := 0; i < datasize/wrsize; i++ { + n, err := dagmod.WriteAt(data[i*wrsize:(i+1)*wrsize], int64(i*wrsize)) + if err != nil { + t.Fatal(err) + } + if n != wrsize { + t.Fatal("failed to write buffer") + } + } + + _, err = dagmod.Seek(0, io.SeekStart) + if err != nil { + t.Fatal(err) + } + + out, err := io.ReadAll(dagmod) + if err != nil { + t.Fatal(err) + } + + if err = testu.ArrComp(out, data); err != nil { + t.Fatal(err) + } +} + +func TestDagTruncate(t *testing.T) { + runAllSubtests(t, testDagTruncate) +} +func testDagTruncate(t *testing.T, opts testu.NodeOpts) { + dserv := testu.GetDAGServ() + b, n := testu.GetRandomNode(t, dserv, 50000, opts) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + dagmod, err := NewDagModifier(ctx, n, dserv, testu.SizeSplitterGen(512)) + if err != nil { + t.Fatal(err) + } + if opts.ForceRawLeaves { + dagmod.RawLeaves = true + } + + err = dagmod.Truncate(12345) + if err != nil { + t.Fatal(err) + } + size, err := dagmod.Size() + if err != nil { + t.Fatal(err) + } + + if size != 12345 { + t.Fatal("size was incorrect!") + } + + _, err = dagmod.Seek(0, io.SeekStart) + if err != nil { + t.Fatal(err) + } + + out, err := io.ReadAll(dagmod) + if err != nil { + t.Fatal(err) + } + + if err = testu.ArrComp(out, b[:12345]); err != nil { + t.Fatal(err) + } + + err = dagmod.Truncate(10) + if err != nil { + t.Fatal(err) + } + + size, err = dagmod.Size() + if err != nil { + t.Fatal(err) + } + + if size != 10 { + t.Fatal("size was incorrect!") + } + + err = dagmod.Truncate(0) + if err != nil { + t.Fatal(err) + } + + size, err = dagmod.Size() + if err != nil { + t.Fatal(err) + } + + if size != 0 { + t.Fatal("size was incorrect!") + } +} + +// TestDagSync tests that a DAG will expand sparse during sync +// if offset > curNode's size. +func TestDagSync(t *testing.T) { + dserv := testu.GetDAGServ() + nd := dag.NodeWithData(unixfs.FilePBData(nil, 0)) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + dagmod, err := NewDagModifier(ctx, nd, dserv, testu.SizeSplitterGen(512)) + if err != nil { + t.Fatal(err) + } + + _, err = dagmod.Write([]byte("test1")) + if err != nil { + t.Fatal(err) + } + + err = dagmod.Sync() + if err != nil { + t.Fatal(err) + } + + // Truncate leave the offset at 5 and filesize at 0 + err = dagmod.Truncate(0) + if err != nil { + t.Fatal(err) + } + + _, err = dagmod.Write([]byte("test2")) + if err != nil { + t.Fatal(err) + } + + // When Offset > filesize , Sync will call enpandSparse + err = dagmod.Sync() + if err != nil { + t.Fatal(err) + } + + _, err = dagmod.Seek(0, io.SeekStart) + if err != nil { + t.Fatal(err) + } + + out, err := io.ReadAll(dagmod) + if err != nil { + t.Fatal(err) + } + + if err = testu.ArrComp(out[5:], []byte("test2")); err != nil { + t.Fatal(err) + } +} + +// TestDagTruncateSameSize tests that a DAG truncated +// to the same size (i.e., doing nothing) doesn't modify +// the DAG (its hash). +func TestDagTruncateSameSize(t *testing.T) { + runAllSubtests(t, testDagTruncateSameSize) +} +func testDagTruncateSameSize(t *testing.T, opts testu.NodeOpts) { + dserv := testu.GetDAGServ() + _, n := testu.GetRandomNode(t, dserv, 50000, opts) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + dagmod, err := NewDagModifier(ctx, n, dserv, testu.SizeSplitterGen(512)) + if err != nil { + t.Fatal(err) + } + // Copied from `TestDagTruncate`. + + size, err := dagmod.Size() + if err != nil { + t.Fatal(err) + } + + err = dagmod.Truncate(size) + if err != nil { + t.Fatal(err) + } + + modifiedNode, err := dagmod.GetNode() + if err != nil { + t.Fatal(err) + } + + if modifiedNode.Cid().Equals(n.Cid()) == false { + t.Fatal("the node has been modified!") + } +} + +func TestSparseWrite(t *testing.T) { + runAllSubtests(t, testSparseWrite) +} +func testSparseWrite(t *testing.T, opts testu.NodeOpts) { + dserv := testu.GetDAGServ() + n := testu.GetEmptyNode(t, dserv, opts) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + dagmod, err := NewDagModifier(ctx, n, dserv, testu.SizeSplitterGen(512)) + if err != nil { + t.Fatal(err) + } + if opts.ForceRawLeaves { + dagmod.RawLeaves = true + } + + buf := make([]byte, 5000) + u.NewTimeSeededRand().Read(buf[2500:]) + + wrote, err := dagmod.WriteAt(buf[2500:], 2500) + if err != nil { + t.Fatal(err) + } + + if wrote != 2500 { + t.Fatal("incorrect write amount") + } + + _, err = dagmod.Seek(0, io.SeekStart) + if err != nil { + t.Fatal(err) + } + + out, err := io.ReadAll(dagmod) + if err != nil { + t.Fatal(err) + } + + if err = testu.ArrComp(out, buf); err != nil { + t.Fatal(err) + } +} + +func TestSeekPastEndWrite(t *testing.T) { + runAllSubtests(t, testSeekPastEndWrite) +} +func testSeekPastEndWrite(t *testing.T, opts testu.NodeOpts) { + dserv := testu.GetDAGServ() + n := testu.GetEmptyNode(t, dserv, opts) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + dagmod, err := NewDagModifier(ctx, n, dserv, testu.SizeSplitterGen(512)) + if err != nil { + t.Fatal(err) + } + if opts.ForceRawLeaves { + dagmod.RawLeaves = true + } + + buf := make([]byte, 5000) + u.NewTimeSeededRand().Read(buf[2500:]) + + nseek, err := dagmod.Seek(2500, io.SeekStart) + if err != nil { + t.Fatal(err) + } + + if nseek != 2500 { + t.Fatal("failed to seek") + } + + wrote, err := dagmod.Write(buf[2500:]) + if err != nil { + t.Fatal(err) + } + + if wrote != 2500 { + t.Fatal("incorrect write amount") + } + + _, err = dagmod.Seek(0, io.SeekStart) + if err != nil { + t.Fatal(err) + } + + out, err := io.ReadAll(dagmod) + if err != nil { + t.Fatal(err) + } + + if err = testu.ArrComp(out, buf); err != nil { + t.Fatal(err) + } +} + +func TestRelativeSeek(t *testing.T) { + runAllSubtests(t, testRelativeSeek) +} +func testRelativeSeek(t *testing.T, opts testu.NodeOpts) { + dserv := testu.GetDAGServ() + n := testu.GetEmptyNode(t, dserv, opts) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + dagmod, err := NewDagModifier(ctx, n, dserv, testu.SizeSplitterGen(512)) + if err != nil { + t.Fatal(err) + } + if opts.ForceRawLeaves { + dagmod.RawLeaves = true + } + + for i := 0; i < 64; i++ { + dagmod.Write([]byte{byte(i)}) + if _, err := dagmod.Seek(1, io.SeekCurrent); err != nil { + t.Fatal(err) + } + } + + out, err := io.ReadAll(dagmod) + if err != nil { + t.Fatal(err) + } + + for i, v := range out { + if v != 0 && i/2 != int(v) { + t.Errorf("expected %d, at index %d, got %d", i/2, i, v) + } + } +} + +func TestInvalidSeek(t *testing.T) { + runAllSubtests(t, testInvalidSeek) +} +func testInvalidSeek(t *testing.T, opts testu.NodeOpts) { + dserv := testu.GetDAGServ() + n := testu.GetEmptyNode(t, dserv, opts) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + dagmod, err := NewDagModifier(ctx, n, dserv, testu.SizeSplitterGen(512)) + if err != nil { + t.Fatal(err) + } + if opts.ForceRawLeaves { + dagmod.RawLeaves = true + } + + _, err = dagmod.Seek(10, -10) + + if err != ErrUnrecognizedWhence { + t.Fatal(err) + } +} + +func TestEndSeek(t *testing.T) { + runAllSubtests(t, testEndSeek) +} +func testEndSeek(t *testing.T, opts testu.NodeOpts) { + dserv := testu.GetDAGServ() + + n := testu.GetEmptyNode(t, dserv, opts) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + dagmod, err := NewDagModifier(ctx, n, dserv, testu.SizeSplitterGen(512)) + if err != nil { + t.Fatal(err) + } + if opts.ForceRawLeaves { + dagmod.RawLeaves = true + } + + _, err = dagmod.Write(make([]byte, 100)) + if err != nil { + t.Fatal(err) + } + + offset, err := dagmod.Seek(0, io.SeekCurrent) + if err != nil { + t.Fatal(err) + } + if offset != 100 { + t.Fatal("expected the relative seek 0 to return current location") + } + + offset, err = dagmod.Seek(0, io.SeekStart) + if err != nil { + t.Fatal(err) + } + if offset != 0 { + t.Fatal("expected the absolute seek to set offset at 0") + } + + offset, err = dagmod.Seek(0, io.SeekEnd) + if err != nil { + t.Fatal(err) + } + if offset != 100 { + t.Fatal("expected the end seek to set offset at end") + } +} + +func TestReadAndSeek(t *testing.T) { + runAllSubtests(t, testReadAndSeek) +} +func testReadAndSeek(t *testing.T, opts testu.NodeOpts) { + dserv := testu.GetDAGServ() + + n := testu.GetEmptyNode(t, dserv, opts) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + dagmod, err := NewDagModifier(ctx, n, dserv, testu.SizeSplitterGen(512)) + if err != nil { + t.Fatal(err) + } + if opts.ForceRawLeaves { + dagmod.RawLeaves = true + } + + writeBuf := []byte{0, 1, 2, 3, 4, 5, 6, 7} + dagmod.Write(writeBuf) + + if !dagmod.HasChanges() { + t.Fatal("there are changes, this should be true") + } + + readBuf := make([]byte, 4) + offset, err := dagmod.Seek(0, io.SeekStart) + if offset != 0 { + t.Fatal("expected offset to be 0") + } + if err != nil { + t.Fatal(err) + } + + // read 0,1,2,3 + c, err := dagmod.Read(readBuf) + if err != nil { + t.Fatal(err) + } + if c != 4 { + t.Fatalf("expected length of 4 got %d", c) + } + + for i := byte(0); i < 4; i++ { + if readBuf[i] != i { + t.Fatalf("wrong value %d [at index %d]", readBuf[i], i) + } + } + + // skip 4 + _, err = dagmod.Seek(1, io.SeekCurrent) + if err != nil { + t.Fatalf("error: %s, offset %d, reader offset %d", err, dagmod.curWrOff, getOffset(dagmod.read)) + } + + //read 5,6,7 + readBuf = make([]byte, 3) + c, err = dagmod.Read(readBuf) + if err != nil { + t.Fatal(err) + } + if c != 3 { + t.Fatalf("expected length of 3 got %d", c) + } + + for i := byte(0); i < 3; i++ { + if readBuf[i] != i+5 { + t.Fatalf("wrong value %d [at index %d]", readBuf[i], i) + } + + } + +} + +func TestCtxRead(t *testing.T) { + runAllSubtests(t, testCtxRead) +} +func testCtxRead(t *testing.T, opts testu.NodeOpts) { + dserv := testu.GetDAGServ() + + n := testu.GetEmptyNode(t, dserv, opts) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + dagmod, err := NewDagModifier(ctx, n, dserv, testu.SizeSplitterGen(512)) + if err != nil { + t.Fatal(err) + } + if opts.ForceRawLeaves { + dagmod.RawLeaves = true + } + + _, err = dagmod.Write([]byte{0, 1, 2, 3, 4, 5, 6, 7}) + if err != nil { + t.Fatal(err) + } + dagmod.Seek(0, io.SeekStart) + + readBuf := make([]byte, 4) + _, err = dagmod.CtxReadFull(ctx, readBuf) + if err != nil { + t.Fatal(err) + } + err = testu.ArrComp(readBuf, []byte{0, 1, 2, 3}) + if err != nil { + t.Fatal(err) + } + // TODO(Kubuxu): context cancel case, I will do it after I figure out dagreader tests, + // because this is exacelly the same. +} + +func BenchmarkDagmodWrite(b *testing.B) { + b.StopTimer() + dserv := testu.GetDAGServ() + n := testu.GetEmptyNode(b, dserv, testu.UseProtoBufLeaves) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + wrsize := 4096 + + dagmod, err := NewDagModifier(ctx, n, dserv, testu.SizeSplitterGen(512)) + if err != nil { + b.Fatal(err) + } + + buf := make([]byte, b.N*wrsize) + u.NewTimeSeededRand().Read(buf) + b.StartTimer() + b.SetBytes(int64(wrsize)) + for i := 0; i < b.N; i++ { + n, err := dagmod.Write(buf[i*wrsize : (i+1)*wrsize]) + if err != nil { + b.Fatal(err) + } + if n != wrsize { + b.Fatal("Wrote bad size") + } + } +} + +func getOffset(reader uio.DagReader) int64 { + offset, err := reader.Seek(0, io.SeekCurrent) + if err != nil { + panic("failed to retrieve offset: " + err.Error()) + } + return offset +} diff --git a/unixfs/pb/Makefile b/unixfs/pb/Makefile new file mode 100644 index 0000000000..51552a0961 --- /dev/null +++ b/unixfs/pb/Makefile @@ -0,0 +1,11 @@ +PB = $(wildcard *.proto) +GO = $(PB:.proto=.pb.go) + +all: $(GO) + +%.pb.go: %.proto + protoc --proto_path=$(GOPATH)/src:. --gogo_out=. $< + +clean: + rm -f *.pb.go + rm -f *.go diff --git a/unixfs/pb/unixfs.pb.go b/unixfs/pb/unixfs.pb.go new file mode 100644 index 0000000000..e523140075 --- /dev/null +++ b/unixfs/pb/unixfs.pb.go @@ -0,0 +1,218 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: unixfs.proto + +package unixfs_pb + +import ( + fmt "fmt" + math "math" + + proto "github.com/gogo/protobuf/proto" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +type Data_DataType int32 + +const ( + Data_Raw Data_DataType = 0 + Data_Directory Data_DataType = 1 + Data_File Data_DataType = 2 + Data_Metadata Data_DataType = 3 + Data_Symlink Data_DataType = 4 + Data_HAMTShard Data_DataType = 5 +) + +var Data_DataType_name = map[int32]string{ + 0: "Raw", + 1: "Directory", + 2: "File", + 3: "Metadata", + 4: "Symlink", + 5: "HAMTShard", +} + +var Data_DataType_value = map[string]int32{ + "Raw": 0, + "Directory": 1, + "File": 2, + "Metadata": 3, + "Symlink": 4, + "HAMTShard": 5, +} + +func (x Data_DataType) Enum() *Data_DataType { + p := new(Data_DataType) + *p = x + return p +} + +func (x Data_DataType) String() string { + return proto.EnumName(Data_DataType_name, int32(x)) +} + +func (x *Data_DataType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(Data_DataType_value, data, "Data_DataType") + if err != nil { + return err + } + *x = Data_DataType(value) + return nil +} + +func (Data_DataType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_e2fd76cc44dfc7c3, []int{0, 0} +} + +type Data struct { + Type *Data_DataType `protobuf:"varint,1,req,name=Type,enum=unixfs.pb.Data_DataType" json:"Type,omitempty"` + Data []byte `protobuf:"bytes,2,opt,name=Data" json:"Data,omitempty"` + Filesize *uint64 `protobuf:"varint,3,opt,name=filesize" json:"filesize,omitempty"` + Blocksizes []uint64 `protobuf:"varint,4,rep,name=blocksizes" json:"blocksizes,omitempty"` + HashType *uint64 `protobuf:"varint,5,opt,name=hashType" json:"hashType,omitempty"` + Fanout *uint64 `protobuf:"varint,6,opt,name=fanout" json:"fanout,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Data) Reset() { *m = Data{} } +func (m *Data) String() string { return proto.CompactTextString(m) } +func (*Data) ProtoMessage() {} +func (*Data) Descriptor() ([]byte, []int) { + return fileDescriptor_e2fd76cc44dfc7c3, []int{0} +} +func (m *Data) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Data.Unmarshal(m, b) +} +func (m *Data) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Data.Marshal(b, m, deterministic) +} +func (m *Data) XXX_Merge(src proto.Message) { + xxx_messageInfo_Data.Merge(m, src) +} +func (m *Data) XXX_Size() int { + return xxx_messageInfo_Data.Size(m) +} +func (m *Data) XXX_DiscardUnknown() { + xxx_messageInfo_Data.DiscardUnknown(m) +} + +var xxx_messageInfo_Data proto.InternalMessageInfo + +func (m *Data) GetType() Data_DataType { + if m != nil && m.Type != nil { + return *m.Type + } + return Data_Raw +} + +func (m *Data) GetData() []byte { + if m != nil { + return m.Data + } + return nil +} + +func (m *Data) GetFilesize() uint64 { + if m != nil && m.Filesize != nil { + return *m.Filesize + } + return 0 +} + +func (m *Data) GetBlocksizes() []uint64 { + if m != nil { + return m.Blocksizes + } + return nil +} + +func (m *Data) GetHashType() uint64 { + if m != nil && m.HashType != nil { + return *m.HashType + } + return 0 +} + +func (m *Data) GetFanout() uint64 { + if m != nil && m.Fanout != nil { + return *m.Fanout + } + return 0 +} + +type Metadata struct { + MimeType *string `protobuf:"bytes,1,opt,name=MimeType" json:"MimeType,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Metadata) Reset() { *m = Metadata{} } +func (m *Metadata) String() string { return proto.CompactTextString(m) } +func (*Metadata) ProtoMessage() {} +func (*Metadata) Descriptor() ([]byte, []int) { + return fileDescriptor_e2fd76cc44dfc7c3, []int{1} +} +func (m *Metadata) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Metadata.Unmarshal(m, b) +} +func (m *Metadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Metadata.Marshal(b, m, deterministic) +} +func (m *Metadata) XXX_Merge(src proto.Message) { + xxx_messageInfo_Metadata.Merge(m, src) +} +func (m *Metadata) XXX_Size() int { + return xxx_messageInfo_Metadata.Size(m) +} +func (m *Metadata) XXX_DiscardUnknown() { + xxx_messageInfo_Metadata.DiscardUnknown(m) +} + +var xxx_messageInfo_Metadata proto.InternalMessageInfo + +func (m *Metadata) GetMimeType() string { + if m != nil && m.MimeType != nil { + return *m.MimeType + } + return "" +} + +func init() { + proto.RegisterEnum("unixfs.pb.Data_DataType", Data_DataType_name, Data_DataType_value) + proto.RegisterType((*Data)(nil), "unixfs.pb.Data") + proto.RegisterType((*Metadata)(nil), "unixfs.pb.Metadata") +} + +func init() { proto.RegisterFile("unixfs.proto", fileDescriptor_e2fd76cc44dfc7c3) } + +var fileDescriptor_e2fd76cc44dfc7c3 = []byte{ + // 254 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x4c, 0x90, 0xb1, 0x6a, 0xeb, 0x30, + 0x18, 0x85, 0xaf, 0x6c, 0x25, 0xb1, 0xff, 0xeb, 0x16, 0xf1, 0x0f, 0x45, 0x74, 0x28, 0xc6, 0x43, + 0xd1, 0x50, 0x3c, 0xf4, 0x0d, 0x0a, 0xa1, 0x74, 0xf1, 0xa2, 0x84, 0xee, 0x4a, 0x22, 0x63, 0x11, + 0xc7, 0x0a, 0xb6, 0x42, 0xeb, 0x3e, 0x45, 0x1f, 0xb9, 0xc8, 0x8e, 0xdd, 0x2e, 0x82, 0x4f, 0xe7, + 0x7c, 0xe2, 0x20, 0x48, 0x2e, 0x8d, 0xf9, 0x2c, 0xbb, 0xfc, 0xdc, 0x5a, 0x67, 0x31, 0x9e, 0x68, + 0x97, 0x7d, 0x07, 0x40, 0xd7, 0xca, 0x29, 0x7c, 0x02, 0xba, 0xed, 0xcf, 0x9a, 0x93, 0x34, 0x10, + 0xb7, 0xcf, 0x3c, 0x9f, 0x2b, 0xb9, 0x8f, 0x87, 0xc3, 0xe7, 0x72, 0x68, 0x21, 0x8e, 0x16, 0x0f, + 0x52, 0x22, 0x12, 0x39, 0xbe, 0x70, 0x0f, 0x51, 0x69, 0x6a, 0xdd, 0x99, 0x2f, 0xcd, 0xc3, 0x94, + 0x08, 0x2a, 0x67, 0xc6, 0x07, 0x80, 0x5d, 0x6d, 0xf7, 0x47, 0x0f, 0x1d, 0xa7, 0x69, 0x28, 0xa8, + 0xfc, 0x73, 0xe3, 0xdd, 0x4a, 0x75, 0xd5, 0xb0, 0x60, 0x31, 0xba, 0x13, 0xe3, 0x1d, 0x2c, 0x4b, + 0xd5, 0xd8, 0x8b, 0xe3, 0xcb, 0x21, 0xb9, 0x52, 0xf6, 0x0e, 0xd1, 0xb4, 0x0a, 0x57, 0x10, 0x4a, + 0xf5, 0xc1, 0xfe, 0xe1, 0x0d, 0xc4, 0x6b, 0xd3, 0xea, 0xbd, 0xb3, 0x6d, 0xcf, 0x08, 0x46, 0x40, + 0x5f, 0x4d, 0xad, 0x59, 0x80, 0x09, 0x44, 0x85, 0x76, 0xea, 0xa0, 0x9c, 0x62, 0x21, 0xfe, 0x87, + 0xd5, 0xa6, 0x3f, 0xd5, 0xa6, 0x39, 0x32, 0xea, 0x9d, 0xb7, 0x97, 0x62, 0xbb, 0xa9, 0x54, 0x7b, + 0x60, 0x8b, 0xec, 0xf1, 0xb7, 0xe9, 0x77, 0x15, 0xe6, 0xa4, 0xaf, 0x3f, 0x43, 0x44, 0x2c, 0x67, + 0xfe, 0x09, 0x00, 0x00, 0xff, 0xff, 0xe9, 0xa0, 0x51, 0x10, 0x54, 0x01, 0x00, 0x00, +} diff --git a/unixfs/pb/unixfs.proto b/unixfs/pb/unixfs.proto new file mode 100644 index 0000000000..ffc059e8be --- /dev/null +++ b/unixfs/pb/unixfs.proto @@ -0,0 +1,26 @@ +syntax = "proto2"; + +package unixfs.pb; + +message Data { + enum DataType { + Raw = 0; + Directory = 1; + File = 2; + Metadata = 3; + Symlink = 4; + HAMTShard = 5; + } + + required DataType Type = 1; + optional bytes Data = 2; + optional uint64 filesize = 3; + repeated uint64 blocksizes = 4; + + optional uint64 hashType = 5; + optional uint64 fanout = 6; +} + +message Metadata { + optional string MimeType = 1; +} diff --git a/unixfs/private/linksize/linksize.go b/unixfs/private/linksize/linksize.go new file mode 100644 index 0000000000..e7ae098b61 --- /dev/null +++ b/unixfs/private/linksize/linksize.go @@ -0,0 +1,5 @@ +package linksize + +import "github.com/ipfs/go-cid" + +var LinkSizeFunction func(linkName string, linkCid cid.Cid) int diff --git a/unixfs/test/utils.go b/unixfs/test/utils.go new file mode 100644 index 0000000000..e28ff57460 --- /dev/null +++ b/unixfs/test/utils.go @@ -0,0 +1,138 @@ +package testu + +import ( + "bytes" + "context" + "fmt" + "io" + "testing" + + ft "github.com/ipfs/boxo/unixfs" + h "github.com/ipfs/boxo/unixfs/importer/helpers" + trickle "github.com/ipfs/boxo/unixfs/importer/trickle" + + chunker "github.com/ipfs/boxo/chunker" + u "github.com/ipfs/boxo/util" + cid "github.com/ipfs/go-cid" + ipld "github.com/ipfs/go-ipld-format" + mdag "github.com/ipfs/boxo/ipld/merkledag" + mdagmock "github.com/ipfs/boxo/ipld/merkledag/test" + mh "github.com/multiformats/go-multihash" +) + +// SizeSplitterGen creates a generator. +func SizeSplitterGen(size int64) chunker.SplitterGen { + return func(r io.Reader) chunker.Splitter { + return chunker.NewSizeSplitter(r, size) + } +} + +// GetDAGServ returns a mock DAGService. +func GetDAGServ() ipld.DAGService { + return mdagmock.Mock() +} + +// NodeOpts is used by GetNode, GetEmptyNode and GetRandomNode +type NodeOpts struct { + Prefix cid.Prefix + // ForceRawLeaves if true will force the use of raw leaves + ForceRawLeaves bool + // RawLeavesUsed is true if raw leaves or either implicitly or explicitly enabled + RawLeavesUsed bool +} + +// Some shorthands for NodeOpts. +var ( + UseProtoBufLeaves = NodeOpts{Prefix: mdag.V0CidPrefix()} + UseRawLeaves = NodeOpts{Prefix: mdag.V0CidPrefix(), ForceRawLeaves: true, RawLeavesUsed: true} + UseCidV1 = NodeOpts{Prefix: mdag.V1CidPrefix(), RawLeavesUsed: true} + UseBlake2b256 NodeOpts +) + +func init() { + UseBlake2b256 = UseCidV1 + UseBlake2b256.Prefix.MhType = mh.Names["blake2b-256"] + UseBlake2b256.Prefix.MhLength = -1 +} + +// GetNode returns a unixfs file node with the specified data. +func GetNode(t testing.TB, dserv ipld.DAGService, data []byte, opts NodeOpts) ipld.Node { + in := bytes.NewReader(data) + + dbp := h.DagBuilderParams{ + Dagserv: dserv, + Maxlinks: h.DefaultLinksPerBlock, + CidBuilder: opts.Prefix, + RawLeaves: opts.RawLeavesUsed, + } + + db, err := dbp.New(SizeSplitterGen(500)(in)) + if err != nil { + t.Fatal(err) + } + node, err := trickle.Layout(db) + if err != nil { + t.Fatal(err) + } + + return node +} + +// GetEmptyNode returns an empty unixfs file node. +func GetEmptyNode(t testing.TB, dserv ipld.DAGService, opts NodeOpts) ipld.Node { + return GetNode(t, dserv, []byte{}, opts) +} + +// GetRandomNode returns a random unixfs file node. +func GetRandomNode(t testing.TB, dserv ipld.DAGService, size int64, opts NodeOpts) ([]byte, ipld.Node) { + in := io.LimitReader(u.NewTimeSeededRand(), size) + buf, err := io.ReadAll(in) + if err != nil { + t.Fatal(err) + } + + node := GetNode(t, dserv, buf, opts) + return buf, node +} + +// ArrComp checks if two byte slices are the same. +func ArrComp(a, b []byte) error { + if len(a) != len(b) { + return fmt.Errorf("arrays differ in length. %d != %d", len(a), len(b)) + } + for i, v := range a { + if v != b[i] { + return fmt.Errorf("arrays differ at index: %d", i) + } + } + return nil +} + +// PrintDag pretty-prints the given dag to stdout. +func PrintDag(nd *mdag.ProtoNode, ds ipld.DAGService, indent int) { + fsn, err := ft.FSNodeFromBytes(nd.Data()) + if err != nil { + panic(err) + } + + for i := 0; i < indent; i++ { + fmt.Print(" ") + } + fmt.Printf("{size = %d, type = %s, children = %d", fsn.FileSize(), fsn.Type().String(), fsn.NumChildren()) + if len(nd.Links()) > 0 { + fmt.Println() + } + for _, lnk := range nd.Links() { + child, err := lnk.GetNode(context.Background(), ds) + if err != nil { + panic(err) + } + PrintDag(child.(*mdag.ProtoNode), ds, indent+1) + } + if len(nd.Links()) > 0 { + for i := 0; i < indent; i++ { + fmt.Print(" ") + } + } + fmt.Println("}") +} diff --git a/unixfs/unixfs.go b/unixfs/unixfs.go new file mode 100644 index 0000000000..6f68bdfb69 --- /dev/null +++ b/unixfs/unixfs.go @@ -0,0 +1,418 @@ +// Package unixfs implements a data format for files in the IPFS filesystem It +// is not the only format in ipfs, but it is the one that the filesystem +// assumes +package unixfs + +import ( + "errors" + "fmt" + + proto "github.com/gogo/protobuf/proto" + dag "github.com/ipfs/boxo/ipld/merkledag" + + pb "github.com/ipfs/boxo/unixfs/pb" + ipld "github.com/ipfs/go-ipld-format" +) + +// A LinkResult for any parallel enumeration of links +// TODO: Should this live in go-ipld-format? +type LinkResult struct { + Link *ipld.Link + Err error +} + +// Shorthands for protobuffer types +const ( + TRaw = pb.Data_Raw + TFile = pb.Data_File + TDirectory = pb.Data_Directory + TMetadata = pb.Data_Metadata + TSymlink = pb.Data_Symlink + THAMTShard = pb.Data_HAMTShard +) + +// Common errors +var ( + ErrMalformedFileFormat = errors.New("malformed data in file format") + ErrUnrecognizedType = errors.New("unrecognized node type") +) + +// FromBytes unmarshals a byte slice as protobuf Data. +// Deprecated: Use `FSNodeFromBytes` instead to avoid direct manipulation of `pb.Data`. +func FromBytes(data []byte) (*pb.Data, error) { + pbdata := new(pb.Data) + err := proto.Unmarshal(data, pbdata) + if err != nil { + return nil, err + } + return pbdata, nil +} + +// FilePBData creates a protobuf File with the given +// byte slice and returns the marshaled protobuf bytes representing it. +func FilePBData(data []byte, totalsize uint64) []byte { + pbfile := new(pb.Data) + typ := pb.Data_File + pbfile.Type = &typ + pbfile.Data = data + pbfile.Filesize = proto.Uint64(totalsize) + + data, err := proto.Marshal(pbfile) + if err != nil { + // This really shouldnt happen, i promise + // The only failure case for marshal is if required fields + // are not filled out, and they all are. If the proto object + // gets changed and nobody updates this function, the code + // should panic due to programmer error + panic(err) + } + return data +} + +// FolderPBData returns Bytes that represent a Directory. +func FolderPBData() []byte { + pbfile := new(pb.Data) + typ := pb.Data_Directory + pbfile.Type = &typ + + data, err := proto.Marshal(pbfile) + if err != nil { + //this really shouldnt happen, i promise + panic(err) + } + return data +} + +// WrapData marshals raw bytes into a `Data_Raw` type protobuf message. +func WrapData(b []byte) []byte { + pbdata := new(pb.Data) + typ := pb.Data_Raw + pbdata.Data = b + pbdata.Type = &typ + pbdata.Filesize = proto.Uint64(uint64(len(b))) + + out, err := proto.Marshal(pbdata) + if err != nil { + // This shouldnt happen. seriously. + panic(err) + } + + return out +} + +// SymlinkData returns a `Data_Symlink` protobuf message for the path you specify. +func SymlinkData(path string) ([]byte, error) { + pbdata := new(pb.Data) + typ := pb.Data_Symlink + pbdata.Data = []byte(path) + pbdata.Type = &typ + + out, err := proto.Marshal(pbdata) + if err != nil { + return nil, err + } + + return out, nil +} + +// HAMTShardData return a `Data_HAMTShard` protobuf message +func HAMTShardData(data []byte, fanout uint64, hashType uint64) ([]byte, error) { + pbdata := new(pb.Data) + typ := pb.Data_HAMTShard + pbdata.Type = &typ + pbdata.HashType = proto.Uint64(hashType) + pbdata.Data = data + pbdata.Fanout = proto.Uint64(fanout) + + out, err := proto.Marshal(pbdata) + if err != nil { + return nil, err + } + + return out, nil +} + +// UnwrapData unmarshals a protobuf messages and returns the contents. +func UnwrapData(data []byte) ([]byte, error) { + pbdata := new(pb.Data) + err := proto.Unmarshal(data, pbdata) + if err != nil { + return nil, err + } + return pbdata.GetData(), nil +} + +// DataSize returns the size of the contents in protobuf wrapped slice. +// For raw data it simply provides the length of it. For Data_Files, it +// will return the associated filesize. Note that Data_Directories will +// return an error. +func DataSize(data []byte) (uint64, error) { + pbdata := new(pb.Data) + err := proto.Unmarshal(data, pbdata) + if err != nil { + return 0, err + } + return size(pbdata) +} + +func size(pbdata *pb.Data) (uint64, error) { + switch pbdata.GetType() { + case pb.Data_Directory, pb.Data_HAMTShard: + return 0, errors.New("can't get data size of directory") + case pb.Data_File, pb.Data_Raw: + return pbdata.GetFilesize(), nil + case pb.Data_Symlink: + return uint64(len(pbdata.GetData())), nil + default: + return 0, errors.New("unrecognized node data type") + } +} + +// An FSNode represents a filesystem object using the UnixFS specification. +// +// The `NewFSNode` constructor should be used instead of just calling `new(FSNode)` +// to guarantee that the required (`Type` and `Filesize`) fields in the `format` +// structure are initialized before marshaling (in `GetBytes()`). +type FSNode struct { + + // UnixFS format defined as a protocol buffers message. + format pb.Data +} + +// FSNodeFromBytes unmarshal a protobuf message onto an FSNode. +func FSNodeFromBytes(b []byte) (*FSNode, error) { + n := new(FSNode) + err := proto.Unmarshal(b, &n.format) + if err != nil { + return nil, err + } + + return n, nil +} + +// NewFSNode creates a new FSNode structure with the given `dataType`. +// +// It initializes the (required) `Type` field (that doesn't have a `Set()` +// accessor so it must be specified at creation), otherwise the `Marshal()` +// method in `GetBytes()` would fail (`required field "Type" not set`). +// +// It also initializes the `Filesize` pointer field to ensure its value +// is never nil before marshaling, this is not a required field but it is +// done to be backwards compatible with previous `go-ipfs` versions hash. +// (If it wasn't initialized there could be cases where `Filesize` could +// have been left at nil, when the `FSNode` was created but no data or +// child nodes were set to adjust it, as is the case in `NewLeaf()`.) +func NewFSNode(dataType pb.Data_DataType) *FSNode { + n := new(FSNode) + n.format.Type = &dataType + + // Initialize by `Filesize` by updating it with a dummy (zero) value. + n.UpdateFilesize(0) + + return n +} + +// HashType gets hash type of format +func (n *FSNode) HashType() uint64 { + return n.format.GetHashType() +} + +// Fanout gets fanout of format +func (n *FSNode) Fanout() uint64 { + return n.format.GetFanout() +} + +// AddBlockSize adds the size of the next child block of this node +func (n *FSNode) AddBlockSize(s uint64) { + n.UpdateFilesize(int64(s)) + n.format.Blocksizes = append(n.format.Blocksizes, s) +} + +// RemoveBlockSize removes the given child block's size. +func (n *FSNode) RemoveBlockSize(i int) { + n.UpdateFilesize(-int64(n.format.Blocksizes[i])) + n.format.Blocksizes = append(n.format.Blocksizes[:i], n.format.Blocksizes[i+1:]...) +} + +// BlockSize returns the block size indexed by `i`. +// TODO: Evaluate if this function should be bounds checking. +func (n *FSNode) BlockSize(i int) uint64 { + return n.format.Blocksizes[i] +} + +// BlockSizes gets blocksizes of format +func (n *FSNode) BlockSizes() []uint64 { + return n.format.GetBlocksizes() +} + +// RemoveAllBlockSizes removes all the child block sizes of this node. +func (n *FSNode) RemoveAllBlockSizes() { + n.format.Blocksizes = []uint64{} + n.format.Filesize = proto.Uint64(uint64(len(n.Data()))) +} + +// GetBytes marshals this node as a protobuf message. +func (n *FSNode) GetBytes() ([]byte, error) { + return proto.Marshal(&n.format) +} + +// FileSize returns the size of the file. +func (n *FSNode) FileSize() uint64 { + // XXX: This needs to be able to return an error when we don't know the + // size. + size, _ := size(&n.format) + return size +} + +// NumChildren returns the number of child blocks of this node +func (n *FSNode) NumChildren() int { + return len(n.format.Blocksizes) +} + +// Data retrieves the `Data` field from the internal `format`. +func (n *FSNode) Data() []byte { + return n.format.GetData() +} + +// SetData sets the `Data` field from the internal `format` +// updating its `Filesize`. +func (n *FSNode) SetData(newData []byte) { + n.UpdateFilesize(int64(len(newData) - len(n.Data()))) + n.format.Data = newData +} + +// UpdateFilesize updates the `Filesize` field from the internal `format` +// by a signed difference (`filesizeDiff`). +// TODO: Add assert to check for `Filesize` > 0? +func (n *FSNode) UpdateFilesize(filesizeDiff int64) { + n.format.Filesize = proto.Uint64(uint64( + int64(n.format.GetFilesize()) + filesizeDiff)) +} + +// Type retrieves the `Type` field from the internal `format`. +func (n *FSNode) Type() pb.Data_DataType { + return n.format.GetType() +} + +// IsDir checks whether the node represents a directory +func (n *FSNode) IsDir() bool { + switch n.Type() { + case pb.Data_Directory, pb.Data_HAMTShard: + return true + default: + return false + } +} + +// Metadata is used to store additional FSNode information. +type Metadata struct { + MimeType string + Size uint64 +} + +// MetadataFromBytes Unmarshals a protobuf Data message into Metadata. +// The provided slice should have been encoded with BytesForMetadata(). +func MetadataFromBytes(b []byte) (*Metadata, error) { + pbd := new(pb.Data) + err := proto.Unmarshal(b, pbd) + if err != nil { + return nil, err + } + if pbd.GetType() != pb.Data_Metadata { + return nil, errors.New("incorrect node type") + } + + pbm := new(pb.Metadata) + err = proto.Unmarshal(pbd.Data, pbm) + if err != nil { + return nil, err + } + md := new(Metadata) + md.MimeType = pbm.GetMimeType() + return md, nil +} + +// Bytes marshals Metadata as a protobuf message of Metadata type. +func (m *Metadata) Bytes() ([]byte, error) { + pbm := new(pb.Metadata) + pbm.MimeType = &m.MimeType + return proto.Marshal(pbm) +} + +// BytesForMetadata wraps the given Metadata as a profobuf message of Data type, +// setting the DataType to Metadata. The wrapped bytes are itself the +// result of calling m.Bytes(). +func BytesForMetadata(m *Metadata) ([]byte, error) { + pbd := new(pb.Data) + pbd.Filesize = proto.Uint64(m.Size) + typ := pb.Data_Metadata + pbd.Type = &typ + mdd, err := m.Bytes() + if err != nil { + return nil, err + } + + pbd.Data = mdd + return proto.Marshal(pbd) +} + +// EmptyDirNode creates an empty folder Protonode. +func EmptyDirNode() *dag.ProtoNode { + return dag.NodeWithData(FolderPBData()) +} + +// EmptyFileNode creates an empty file Protonode. +func EmptyFileNode() *dag.ProtoNode { + return dag.NodeWithData(FilePBData(nil, 0)) +} + +// ReadUnixFSNodeData extracts the UnixFS data from an IPLD node. +// Raw nodes are (also) processed because they are used as leaf +// nodes containing (only) UnixFS data. +func ReadUnixFSNodeData(node ipld.Node) (data []byte, err error) { + switch node := node.(type) { + + case *dag.ProtoNode: + fsNode, err := FSNodeFromBytes(node.Data()) + if err != nil { + return nil, fmt.Errorf("incorrectly formatted protobuf: %s", err) + } + + switch fsNode.Type() { + case pb.Data_File, pb.Data_Raw: + return fsNode.Data(), nil + // Only leaf nodes (of type `Data_Raw`) contain data but due to a + // bug the `Data_File` type (normally used for internal nodes) is + // also used for leaf nodes, so both types are accepted here + // (see the `balanced` package for more details). + default: + return nil, fmt.Errorf("found %s node in unexpected place", + fsNode.Type().String()) + } + + case *dag.RawNode: + return node.RawData(), nil + + default: + return nil, ErrUnrecognizedType + // TODO: To avoid rewriting the error message, but a different error from + // `unixfs.ErrUnrecognizedType` should be used (defining it in the + // `merkledag` or `go-ipld-format` packages). + } +} + +// Extract the `unixfs.FSNode` from the `ipld.Node` (assuming this +// was implemented by a `mdag.ProtoNode`). +func ExtractFSNode(node ipld.Node) (*FSNode, error) { + protoNode, ok := node.(*dag.ProtoNode) + if !ok { + return nil, errors.New("expected a ProtoNode as internal node") + } + + fsNode, err := FSNodeFromBytes(protoNode.Data()) + if err != nil { + return nil, err + } + + return fsNode, nil +} diff --git a/unixfs/unixfs_test.go b/unixfs/unixfs_test.go new file mode 100644 index 0000000000..bbd2bd3b36 --- /dev/null +++ b/unixfs/unixfs_test.go @@ -0,0 +1,186 @@ +package unixfs + +import ( + "bytes" + "testing" + + proto "github.com/gogo/protobuf/proto" + + pb "github.com/ipfs/boxo/unixfs/pb" +) + +func TestFSNode(t *testing.T) { + fsn := NewFSNode(TFile) + for i := 0; i < 16; i++ { + fsn.AddBlockSize(100) + } + fsn.RemoveBlockSize(15) + + fsn.SetData(make([]byte, 128)) + + b, err := fsn.GetBytes() + if err != nil { + t.Fatal(err) + } + + pbn := new(pb.Data) + err = proto.Unmarshal(b, pbn) + if err != nil { + t.Fatal(err) + } + + ds, err := DataSize(b) + if err != nil { + t.Fatal(err) + } + nKids := fsn.NumChildren() + if nKids != 15 { + t.Fatal("Wrong number of child nodes") + } + + if ds != (100*15)+128 { + t.Fatal("Datasize calculations incorrect!") + } + + nfsn, err := FSNodeFromBytes(b) + if err != nil { + t.Fatal(err) + } + + if nfsn.FileSize() != (100*15)+128 { + t.Fatal("fsNode FileSize calculations incorrect") + } +} + +func TestPBdataTools(t *testing.T) { + raw := []byte{0x00, 0x01, 0x02, 0x17, 0xA1} + rawPB := WrapData(raw) + + pbDataSize, err := DataSize(rawPB) + if err != nil { + t.Fatal(err) + } + + same := len(raw) == int(pbDataSize) + if !same { + t.Fatal("WrapData changes the size of data.") + } + + rawPBBytes, err := UnwrapData(rawPB) + if err != nil { + t.Fatal(err) + } + + same = bytes.Equal(raw, rawPBBytes) + if !same { + t.Fatal("Unwrap failed to produce the correct wrapped data.") + } + + rawPBdata, err := FSNodeFromBytes(rawPB) + if err != nil { + t.Fatal(err) + } + + isRaw := rawPBdata.Type() == TRaw + if !isRaw { + t.Fatal("WrapData does not create pb.Data_Raw!") + } + + catFile := []byte("Mr_Meowgie.gif") + catPBfile := FilePBData(catFile, 17) + catSize, err := DataSize(catPBfile) + if catSize != 17 { + t.Fatal("FilePBData is the wrong size.") + } + if err != nil { + t.Fatal(err) + } + + dirPB := FolderPBData() + dir, err := FSNodeFromBytes(dirPB) + isDir := dir.Type() == TDirectory + if !isDir { + t.Fatal("FolderPBData does not create a directory!") + } + if err != nil { + t.Fatal(err) + } + _, dirErr := DataSize(dirPB) + if dirErr == nil { + t.Fatal("DataSize didn't throw an error when taking the size of a directory.") + } + + catSym, err := SymlinkData("/ipfs/adad123123/meowgie.gif") + if err != nil { + t.Fatal(err) + } + + catSymPB, err := FSNodeFromBytes(catSym) + isSym := catSymPB.Type() == TSymlink + if !isSym { + t.Fatal("Failed to make a Symlink.") + } + if err != nil { + t.Fatal(err) + } +} + +func TestSymlinkFilesize(t *testing.T) { + path := "/ipfs/adad123123/meowgie.gif" + sym, err := SymlinkData(path) + if err != nil { + t.Fatal(err) + } + size, err := DataSize(sym) + if err != nil { + t.Fatal(err) + } + if int(size) != len(path) { + t.Fatalf("size mismatch: %d != %d", size, len(path)) + } +} + +func TestMetadata(t *testing.T) { + meta := &Metadata{ + MimeType: "audio/aiff", + Size: 12345, + } + + _, err := meta.Bytes() + if err != nil { + t.Fatal(err) + } + + metaPB, err := BytesForMetadata(meta) + if err != nil { + t.Fatal(err) + } + + meta, err = MetadataFromBytes(metaPB) + if err != nil { + t.Fatal(err) + } + + mimeAiff := meta.MimeType == "audio/aiff" + if !mimeAiff { + t.Fatal("Metadata does not Marshal and Unmarshal properly!") + } + +} + +func TestIsDir(t *testing.T) { + prepares := map[pb.Data_DataType]bool{ + TDirectory: true, + THAMTShard: true, + TFile: false, + TMetadata: false, + TRaw: false, + TSymlink: false, + } + for typ, v := range prepares { + fsn := NewFSNode(typ) + if fsn.IsDir() != v { + t.Fatalf("type %v, IsDir() should be %v, but %v", typ, v, fsn.IsDir()) + } + } +} diff --git a/util/.gitignore b/util/.gitignore new file mode 100644 index 0000000000..1377554ebe --- /dev/null +++ b/util/.gitignore @@ -0,0 +1 @@ +*.swp diff --git a/util/file.go b/util/file.go new file mode 100644 index 0000000000..e6e30df4d3 --- /dev/null +++ b/util/file.go @@ -0,0 +1,12 @@ +package util + +import "os" + +// FileExists check if the file with the given path exits. +func FileExists(filename string) bool { + fi, err := os.Lstat(filename) + if fi != nil || (err != nil && !os.IsNotExist(err)) { + return true + } + return false +} diff --git a/util/file_test.go b/util/file_test.go new file mode 100644 index 0000000000..040b229270 --- /dev/null +++ b/util/file_test.go @@ -0,0 +1,10 @@ +package util + +import "testing" + +func TestFileDoesNotExist(t *testing.T) { + t.Parallel() + if FileExists("i would be surprised to discover that this file exists") { + t.Fail() + } +} diff --git a/util/time.go b/util/time.go new file mode 100644 index 0000000000..37d720fb1b --- /dev/null +++ b/util/time.go @@ -0,0 +1,22 @@ +package util + +import "time" + +// TimeFormatIpfs is the format ipfs uses to represent time in string form. +var TimeFormatIpfs = time.RFC3339Nano + +// ParseRFC3339 parses an RFC3339Nano-formatted time stamp and +// returns the UTC time. +func ParseRFC3339(s string) (time.Time, error) { + t, err := time.Parse(TimeFormatIpfs, s) + if err != nil { + return time.Time{}, err + } + return t.UTC(), nil +} + +// FormatRFC3339 returns the string representation of the +// UTC value of the given time in RFC3339Nano format. +func FormatRFC3339(t time.Time) string { + return t.UTC().Format(TimeFormatIpfs) +} diff --git a/util/time_test.go b/util/time_test.go new file mode 100644 index 0000000000..b5a98caa62 --- /dev/null +++ b/util/time_test.go @@ -0,0 +1,16 @@ +package util + +import ( + "testing" + "time" +) + +func TestTimeFormatParseInversion(t *testing.T) { + v, err := ParseRFC3339(FormatRFC3339(time.Now())) + if err != nil { + t.Fatal(err) + } + if v.Location() != time.UTC { + t.Fatal("Time should be UTC") + } +} diff --git a/util/util.go b/util/util.go new file mode 100644 index 0000000000..ffcab2f33d --- /dev/null +++ b/util/util.go @@ -0,0 +1,158 @@ +// Package util implements various utility functions used within ipfs +// that do not currently have a better place to live. +package util + +import ( + "errors" + "io" + "math/rand" + "os" + "path/filepath" + "runtime/debug" + "strings" + "time" + + b58 "github.com/mr-tron/base58/base58" + mh "github.com/multiformats/go-multihash" +) + +// DefaultIpfsHash is the current default hash function used by IPFS. +const DefaultIpfsHash = mh.SHA2_256 + +// Debug is a global flag for debugging. +var Debug bool + +// ErrNotImplemented signifies a function has not been implemented yet. +var ErrNotImplemented = errors.New("error: not implemented yet") + +// ErrTimeout implies that a timeout has been triggered +var ErrTimeout = errors.New("error: call timed out") + +// ErrSearchIncomplete implies that a search type operation didn't +// find the expected node, but did find 'a' node. +var ErrSearchIncomplete = errors.New("error: search incomplete") + +// ErrCast is returned when a cast fails AND the program should not panic. +func ErrCast() error { + debug.PrintStack() + return errCast +} + +var errCast = errors.New("cast error") + +// ExpandPathnames takes a set of paths and turns them into absolute paths +func ExpandPathnames(paths []string) ([]string, error) { + var out []string + for _, p := range paths { + abspath, err := filepath.Abs(p) + if err != nil { + return nil, err + } + out = append(out, abspath) + } + return out, nil +} + +type randGen struct { + rand.Rand +} + +// NewTimeSeededRand returns a random bytes reader +// which has been initialized with the current time. +func NewTimeSeededRand() io.Reader { + src := rand.NewSource(time.Now().UnixNano()) + return &randGen{ + Rand: *rand.New(src), + } +} + +// NewSeededRand returns a random bytes reader +// initialized with the given seed. +func NewSeededRand(seed int64) io.Reader { + src := rand.NewSource(seed) + return &randGen{ + Rand: *rand.New(src), + } +} + +func (r *randGen) Read(p []byte) (n int, err error) { + for i := 0; i < len(p); i++ { + p[i] = byte(r.Rand.Intn(255)) + } + return len(p), nil +} + +// GetenvBool is the way to check an env var as a boolean +func GetenvBool(name string) bool { + v := strings.ToLower(os.Getenv(name)) + return v == "true" || v == "t" || v == "1" +} + +// MultiErr is a util to return multiple errors +type MultiErr []error + +func (m MultiErr) Error() string { + if len(m) == 0 { + return "no errors" + } + + s := "Multiple errors: " + for i, e := range m { + if i != 0 { + s += ", " + } + s += e.Error() + } + return s +} + +// Partition splits a subject 3 parts: prefix, separator, suffix. +// The first occurrence of the separator will be matched. +// ie. Partition("Ready, steady, go!", ", ") -> ["Ready", ", ", "steady, go!"] +func Partition(subject string, sep string) (string, string, string) { + if i := strings.Index(subject, sep); i != -1 { + return subject[:i], subject[i : i+len(sep)], subject[i+len(sep):] + } + return subject, "", "" +} + +// RPartition splits a subject 3 parts: prefix, separator, suffix. +// The last occurrence of the separator will be matched. +// ie. RPartition("Ready, steady, go!", ", ") -> ["Ready, steady", ", ", "go!"] +func RPartition(subject string, sep string) (string, string, string) { + if i := strings.LastIndex(subject, sep); i != -1 { + return subject[:i], subject[i : i+len(sep)], subject[i+len(sep):] + } + return subject, "", "" +} + +// Hash is the global IPFS hash function. uses multihash SHA2_256, 256 bits +func Hash(data []byte) mh.Multihash { + h, err := mh.Sum(data, DefaultIpfsHash, -1) + if err != nil { + // this error can be safely ignored (panic) because multihash only fails + // from the selection of hash function. If the fn + length are valid, it + // won't error. + panic("multihash failed to hash using SHA2_256.") + } + return h +} + +// IsValidHash checks whether a given hash is valid (b58 decodable, len > 0) +func IsValidHash(s string) bool { + out, err := b58.Decode(s) + if err != nil { + return false + } + _, err = mh.Cast(out) + return err == nil +} + +// XOR takes two byte slices, XORs them together, returns the resulting slice. +func XOR(a, b []byte) []byte { + c := make([]byte, len(a)) + for i := 0; i < len(a); i++ { + c[i] = a[i] ^ b[i] + } + return c +} diff --git a/util/util_test.go b/util/util_test.go new file mode 100644 index 0000000000..70747ad902 --- /dev/null +++ b/util/util_test.go @@ -0,0 +1,63 @@ +package util + +import ( + "bytes" + "testing" +) + +func TestXOR(t *testing.T) { + cases := [][3][]byte{ + { + {0xFF, 0xFF, 0xFF}, + {0xFF, 0xFF, 0xFF}, + {0x00, 0x00, 0x00}, + }, + { + {0x00, 0xFF, 0x00}, + {0xFF, 0xFF, 0xFF}, + {0xFF, 0x00, 0xFF}, + }, + { + {0x55, 0x55, 0x55}, + {0x55, 0xFF, 0xAA}, + {0x00, 0xAA, 0xFF}, + }, + } + + for _, c := range cases { + r := XOR(c[0], c[1]) + if !bytes.Equal(r, c[2]) { + t.Error("XOR failed") + } + } +} + +func BenchmarkHash256K(b *testing.B) { + buf := make([]byte, 256*1024) + NewTimeSeededRand().Read(buf) + b.SetBytes(int64(256 * 1024)) + b.ResetTimer() + for i := 0; i < b.N; i++ { + Hash(buf) + } +} + +func BenchmarkHash512K(b *testing.B) { + buf := make([]byte, 512*1024) + NewTimeSeededRand().Read(buf) + b.SetBytes(int64(512 * 1024)) + b.ResetTimer() + for i := 0; i < b.N; i++ { + Hash(buf) + } +} + +func BenchmarkHash1M(b *testing.B) { + buf := make([]byte, 1024*1024) + NewTimeSeededRand().Read(buf) + b.SetBytes(int64(1024 * 1024)) + b.ResetTimer() + for i := 0; i < b.N; i++ { + Hash(buf) + } +} diff --git a/verifcid/validate.go b/verifcid/validate.go new file mode 100644 index 0000000000..7b27debc94 --- /dev/null +++ b/verifcid/validate.go @@ -0,0 +1,69 @@ +package verifcid + +import ( + "fmt" + + cid "github.com/ipfs/go-cid" + mh "github.com/multiformats/go-multihash" +) + +var ErrPossiblyInsecureHashFunction = fmt.Errorf("potentially insecure hash functions not allowed") +var ErrBelowMinimumHashLength = fmt.Errorf("hashes must be at least %d bytes long", minimumHashLength) +var ErrAboveMaximumHashLength = fmt.Errorf("hashes must be at most %d bytes long", maximumHashLength) + +const minimumHashLength = 20 +const maximumHashLength = 128 + +var goodset = map[uint64]bool{ + mh.SHA2_256: true, + mh.SHA2_512: true, + mh.SHA3_224: true, + mh.SHA3_256: true, + mh.SHA3_384: true, + mh.SHA3_512: true, + mh.SHAKE_256: true, + mh.DBL_SHA2_256: true, + mh.KECCAK_224: true, + mh.KECCAK_256: true, + mh.KECCAK_384: true, + mh.KECCAK_512: true, + mh.BLAKE3: true, + mh.IDENTITY: true, + + mh.SHA1: true, // not really secure but still useful +} + +func IsGoodHash(code uint64) bool { + good, found := goodset[code] + if good { + return true + } + + if !found { + if code >= mh.BLAKE2B_MIN+19 && code <= mh.BLAKE2B_MAX { + return true + } + if code >= mh.BLAKE2S_MIN+19 && code <= mh.BLAKE2S_MAX { + return true + } + } + + return false +} + +func ValidateCid(c cid.Cid) error { + pref := c.Prefix() + if !IsGoodHash(pref.MhType) { + return ErrPossiblyInsecureHashFunction + } + + if pref.MhType != mh.IDENTITY && pref.MhLength < minimumHashLength { + return ErrBelowMinimumHashLength + } + + if pref.MhType != mh.IDENTITY && pref.MhLength > maximumHashLength { + return ErrAboveMaximumHashLength + } + + return nil +} diff --git a/verifcid/validate_test.go b/verifcid/validate_test.go new file mode 100644 index 0000000000..5129b861af --- /dev/null +++ b/verifcid/validate_test.go @@ -0,0 +1,70 @@ +package verifcid + +import ( + "testing" + + mh "github.com/multiformats/go-multihash" + + cid "github.com/ipfs/go-cid" +) + +func TestValidateCids(t *testing.T) { + assertTrue := func(v bool) { + t.Helper() + if !v { + t.Fatal("expected success") + } + } + assertFalse := func(v bool) { + t.Helper() + if v { + t.Fatal("expected failure") + } + } + + assertTrue(IsGoodHash(mh.SHA2_256)) + assertTrue(IsGoodHash(mh.BLAKE2B_MIN + 32)) + assertTrue(IsGoodHash(mh.DBL_SHA2_256)) + assertTrue(IsGoodHash(mh.KECCAK_256)) + assertTrue(IsGoodHash(mh.SHA3)) + + assertTrue(IsGoodHash(mh.SHA1)) + + assertFalse(IsGoodHash(mh.BLAKE2B_MIN + 5)) + + mhcid := func(code uint64, length int) cid.Cid { + mhash, err := mh.Sum([]byte{}, code, length) + if err != nil { + t.Fatalf("%v: code: %x length: %d", err, code, length) + } + return cid.NewCidV1(cid.DagCBOR, mhash) + } + + cases := []struct { + cid cid.Cid + err error + }{ + {mhcid(mh.SHA2_256, 32), nil}, + {mhcid(mh.SHA2_256, 16), ErrBelowMinimumHashLength}, + {mhcid(mh.MURMUR3X64_64, 4), ErrPossiblyInsecureHashFunction}, + {mhcid(mh.BLAKE3, 32), nil}, + {mhcid(mh.BLAKE3, 69), nil}, + {mhcid(mh.BLAKE3, 128), nil}, + } + + for i, cas := range cases { + if ValidateCid(cas.cid) != cas.err { + t.Errorf("wrong result in case of %s (index %d). Expected: %s, got %s", + cas.cid, i, cas.err, ValidateCid(cas.cid)) + } + } + + longBlake3Hex := "1e810104e0bb39f30b1a3feb89f536c93be15055482df748674b00d26e5a75777702e9791074b7511b59d31c71c62f5a745689fa6c9497f68bdf1061fe07f518d410c0b0c27f41b3cf083f8a7fdc67a877e21790515762a754a45dcb8a356722698a7af5ed2bb608983d5aa75d4d61691ef132efe8631ce0afc15553a08fffc60ee9369b" + longBlake3Mh, err := mh.FromHexString(longBlake3Hex) + if err != nil { + t.Fatalf("failed to produce a multihash from the long blake3 hash: %v", err) + } + if ValidateCid(cid.NewCidV1(cid.DagCBOR, longBlake3Mh)) != ErrAboveMaximumHashLength { + t.Errorf("a CID that was longer than the maximum hash length did not error with ErrAboveMaximumHashLength") + } +} diff --git a/version.json b/version.json new file mode 100644 index 0000000000..0ad79e3bfe --- /dev/null +++ b/version.json @@ -0,0 +1,3 @@ +{ + "version": "v0.8.0" +}