diff --git a/.github/workflows/gateway-sharness.yml b/.github/workflows/gateway-sharness.yml
index 88901fc172..3da1845c24 100644
--- a/.github/workflows/gateway-sharness.yml
+++ b/.github/workflows/gateway-sharness.yml
@@ -19,24 +19,25 @@ jobs:
uses: actions/setup-go@v3
with:
go-version: 1.19.1
- - name: Checkout go-libipfs
+ - name: Checkout boxo
uses: actions/checkout@v3
with:
- path: go-libipfs
+ path: boxo
- name: Checkout Kubo
uses: actions/checkout@v3
with:
repository: ipfs/kubo
path: kubo
+ ref: 503edee648e29c62888f05fa146ab13d9c65077d
- name: Install Missing Tools
run: sudo apt install -y socat net-tools fish libxml2-utils
- name: Restore Go Cache
uses: protocol/cache-go-action@v1
with:
name: ${{ github.job }}
- - name: Replace go-libipfs in Kubo go.mod
+ - name: Replace boxo in Kubo go.mod
run: |
- go mod edit -replace=github.com/ipfs/go-libipfs=../go-libipfs
+ go mod edit -replace=github.com/ipfs/boxo=../boxo
go mod tidy
cat go.mod
working-directory: kubo
@@ -47,5 +48,5 @@ jobs:
run: find . -maxdepth 1 -name "*gateway*.sh" -print0 | xargs -0 -I {} bash -c "echo {}; {}"
working-directory: kubo/test/sharness
- name: Run Kubo CLI Tests
- run: go test -v -run=Gateway .
+ run: go test -run=Gateway .
working-directory: kubo/test/cli
diff --git a/.github/workflows/go-test.yml b/.github/workflows/go-test.yml
index c5cb3efc7a..a3a7b69f3f 100644
--- a/.github/workflows/go-test.yml
+++ b/.github/workflows/go-test.yml
@@ -46,7 +46,7 @@ jobs:
# Use -coverpkg=./..., so that we include cross-package coverage.
# If package ./A imports ./B, and ./A's tests also cover ./B,
# this means ./B's coverage will be significantly higher than 0%.
- run: go test -v -shuffle=on -coverprofile=module-coverage.txt -coverpkg=./... ./...
+ run: go test -shuffle=on -coverprofile=module-coverage.txt -coverpkg=./... ./...
- name: Run tests (32 bit)
# can't run 32 bit tests on OSX.
if: matrix.os != 'macos' &&
@@ -58,14 +58,14 @@ jobs:
with:
run: |
export "PATH=$PATH_386:$PATH"
- go test -v -shuffle=on ./...
+ go test -shuffle=on ./...
- name: Run tests with race detector
# speed things up. Windows and OSX VMs are slow
if: matrix.os == 'ubuntu' &&
contains(fromJSON(steps.config.outputs.json).skipOSes, matrix.os) == false
uses: protocol/multiple-go-modules@v1.2
with:
- run: go test -v -race ./...
+ run: go test -race ./...
- name: Collect coverage files
shell: bash
run: echo "COVERAGES=$(find . -type f -name 'module-coverage.txt' | tr -s '\n' ',' | sed 's/,$//')" >> $GITHUB_ENV
diff --git a/.github/workflows/test-examples.yml b/.github/workflows/test-examples.yml
index bf91f726b1..c29a9b9a41 100644
--- a/.github/workflows/test-examples.yml
+++ b/.github/workflows/test-examples.yml
@@ -37,7 +37,7 @@ jobs:
- name: Run tests
uses: protocol/multiple-go-modules@v1.2
with:
- run: go test -v -shuffle=on ./...
+ run: go test -shuffle=on ./...
- name: Run tests (32 bit)
if: ${{ matrix.os != 'macos' }} # can't run 32 bit tests on OSX.
uses: protocol/multiple-go-modules@v1.2
@@ -46,9 +46,9 @@ jobs:
with:
run: |
export "PATH=${{ env.PATH_386 }}:$PATH"
- go test -v -shuffle=on ./...
+ go test -shuffle=on ./...
- name: Run tests with race detector
if: ${{ matrix.os == 'ubuntu' }} # speed things up. Windows and OSX VMs are slow
uses: protocol/multiple-go-modules@v1.2
with:
- run: go test -v -race ./...
+ run: go test -race ./...
diff --git a/README.md b/README.md
index e7da309564..b5554ebc7f 100644
--- a/README.md
+++ b/README.md
@@ -1,26 +1,26 @@
-go-libipfs 🍌
+Boxo 🍌
-
+
A library for building IPFS applications and implementations.
-[![Go Test](https://github.com/ipfs/go-libipfs/actions/workflows/go-test.yml/badge.svg)](https://github.com/ipfs/go-libipfs/actions/workflows/go-test.yml)
-[![Go Docs](https://img.shields.io/badge/godoc-reference-blue.svg)](https://pkg.go.dev/github.com/ipfs/go-libipfs)
-[![codecov](https://codecov.io/gh/ipfs/go-libipfs/branch/main/graph/badge.svg?token=9eG7d8fbCB)](https://codecov.io/gh/ipfs/go-libipfs)
+[![Go Test](https://github.com/ipfs/boxo/actions/workflows/go-test.yml/badge.svg)](https://github.com/ipfs/boxo/actions/workflows/go-test.yml)
+[![Go Docs](https://img.shields.io/badge/godoc-reference-blue.svg)](https://pkg.go.dev/github.com/ipfs/boxo)
+[![codecov](https://codecov.io/gh/ipfs/boxo/branch/main/graph/badge.svg?token=9eG7d8fbCB)](https://codecov.io/gh/ipfs/boxo)
- [About](#about)
- [Motivation](#motivation)
-- [What kind of components does go-libipfs have?](#what-kind-of-components-does-go-libipfs-have)
- - [Does go-libipfs == IPFS?](#does-go-libipfs--ipfs)
+- [What kind of components does Boxo have?](#what-kind-of-components-does-boxo-have)
+ - [Does Boxo == IPFS?](#does-boxo--ipfs)
- [Is everything related to IPFS in the Go ecosystem in this repo?](#is-everything-related-to-ipfs-in-the-go-ecosystem-in-this-repo)
- [Getting started](#getting-started)
-- [Should I add my IPFS component to go-libipfs?](#should-i-add-my-ipfs-component-to-go-libipfs)
+- [Should I add my IPFS component to Boxo?](#should-i-add-my-ipfs-component-to-boxo)
- [Help](#help)
- [Governance and Access](#governance-and-access)
- [Release Process](#release-process)
@@ -31,30 +31,30 @@ go-libipfs 🍌
## About
-go-libipfs is a component library for building IPFS applications and implementations in Go.
+Boxo is a component library for building IPFS applications and implementations in Go.
-Some scenarios in which you may find go-libipfs helpful:
+Some scenarios in which you may find Boxo helpful:
* You are building an application that interacts with the IPFS network
* You are building an IPFS implementation
* You want to reuse some components of IPFS such as its Kademlia DHT, Bitswap, data encoding, etc.
* You want to experiment with IPFS
-go-libipfs powers [Kubo](https://github.com/ipfs/kubo), which is [the most popular IPFS implementation](https://github.com/protocol/network-measurements/tree/master/reports),
+Boxo powers [Kubo](https://github.com/ipfs/kubo), which is [the most popular IPFS implementation](https://github.com/protocol/network-measurements/tree/master/reports),
so its code has been battle-tested on the IPFS network for years, and is well-understood by the community.
### Motivation
**TL;DR** The goal of this repo is to help people build things. Previously users struggled to find existing useful code or to figure out how to use what they did find. We observed many running Kubo and using its HTTP RPC API. This repo aims to do better. We're taking the libraries that many were already effectively relying on in production and making them more easily discoverable and usable.
-The maintainers primarily aim to help people trying to build with IPFS in Go that were previously either giving up or relying on the [Kubo HTTP RPC API](https://docs.ipfs.tech/reference/kubo/rpc/). Some of these people will end up being better served by IPFS tooling in other languages (e.g., Javascript, Rust, Java, Python), but for those who are either looking to write in Go or to leverage the set of IPFS tooling we already have in Go we’d like to make their lives easier.
+The maintainers primarily aim to help people trying to build with IPFS in Go that were previously either giving up or relying on the [Kubo HTTP RPC API](https://docs.ipfs.tech/reference/kubo/rpc/). Some of these people will end up being better served by IPFS tooling in other languages (e.g., Javascript, Rust, Java, Python), but for those who are either looking to write in Go or to leverage the set of IPFS tooling we already have in Go we’d like to make their lives easier.
-We’d also like to make life easier on ourselves as the maintainers by reducing the maintenance burden that comes from being the owners on [many repos](https://github.com/ipfs/kubo/issues/8543) and then use that time to contribute more to the community in the form of easier to use libraries, better implementations, improved protocols, new protocols, etc.
+We’d also like to make life easier on ourselves as the maintainers by reducing the maintenance burden that comes from being the owners on [many repos](https://github.com/ipfs/kubo/issues/8543) and then use that time to contribute more to the community in the form of easier to use libraries, better implementations, improved protocols, new protocols, etc.
-Go-libipfs is not exhaustive nor comprehensive--there are plenty of useful IPFS protocols, specs, libraries, etc. that are not in go-libipfs. The goal of go-libipfs is to provide cohesive and well-maintained components for common IPFS use cases.
+Boxo is not exhaustive nor comprehensive--there are plenty of useful IPFS protocols, specs, libraries, etc. that are not in Boxo. The goal of Boxo is to provide cohesive and well-maintained components for common IPFS use cases.
-## What kind of components does go-libipfs have?
+## What kind of components does Boxo have?
-Go-libipfs includes high-quality components useful for interacting with IPFS protocols, public and private IPFS networks, and content-addressed data, such as:
+Boxo includes high-quality components useful for interacting with IPFS protocols, public and private IPFS networks, and content-addressed data, such as:
- Content routing (DHT, delegated content routing, providing)
- Data transfer (gateways, Bitswap, incremental verification)
@@ -62,44 +62,44 @@ Go-libipfs includes high-quality components useful for interacting with IPFS pro
- Interacting with public and private IPFS networks
- Working with content-addressed data
-Go-libipfs aims to provide a cohesive interface into these components. Note that not all of the underlying components necessarily reside in this respository.
+Boxo aims to provide a cohesive interface into these components. Note that not all of the underlying components necessarily reside in this respository.
-### Does go-libipfs == IPFS?
+### Does Boxo == IPFS?
No. This repo houses some IPFS functionality written in Go that has been useful in practice, and is maintained by a group that has long term commitments to the IPFS project
### Is everything related to IPFS in the Go ecosystem in this repo?
-No. Not everything related to IPFS is intended to be in go-libipfs. View it as a starter toolbox (potentially among multiple). If you’d like to build an IPFS implementation with Go, here are some tools you might want that are maintained by a group that has long term commitments to the IPFS project. There are certainly repos that others maintainer that aren't included here (e.g., ipfs/go-car) which are still useful to IPFS implementations. It's expected and fine for new IPFS functionality to be developed that won't be part of go-libipfs.
+No. Not everything related to IPFS is intended to be in Boxo. View it as a starter toolbox (potentially among multiple). If you’d like to build an IPFS implementation with Go, here are some tools you might want that are maintained by a group that has long term commitments to the IPFS project. There are certainly repos that others maintainer that aren't included here (e.g., ipfs/go-car) which are still useful to IPFS implementations. It's expected and fine for new IPFS functionality to be developed that won't be part of Boxo.
## Getting started
See [examples](./examples/README.md).
-## Should I add my IPFS component to go-libipfs?
-We happily accept external contributions! However, go-libipfs maintains a high quality bar, so code accepted into go-libipfs must meet some minimum maintenance criteria:
+## Should I add my IPFS component to Boxo?
+We happily accept external contributions! However, Boxo maintains a high quality bar, so code accepted into Boxo must meet some minimum maintenance criteria:
* Actively maintained
* Must be actively used by, or will be included in software that is actively used by, a significant number of users or production systems. Code that is not actively used cannot be properly maintained.
- * Must have multiple engineers who are willing and able to maintain the relevant code in go-libipfs for a long period of time.
- * If either of these changes, go-libipfs maintainers will consider removing the component from go-libipfs.
+ * Must have multiple engineers who are willing and able to maintain the relevant code in Boxo for a long period of time.
+ * If either of these changes, Boxo maintainers will consider removing the component from Boxo.
* Adequately tested
* At least with unit tests
* Ideally also including integration tests with other components
* Adequately documented
* Godocs at minimum
* Complex components should have their own doc.go or README.md describing the component, its use cases, tradeoffs, design rationale, etc.
-* If the maintainers are not go-libipfs maintainers, then the component must include a CODEOWNERS file with at least two code owners who can commit to reviewing PRs
+* If the maintainers are not Boxo maintainers, then the component must include a CODEOWNERS file with at least two code owners who can commit to reviewing PRs
-If you have some experimental component that you think would benefit the IPFS community, we suggest you build the component in your own repository until it's clear that there's community demand for it, and then open an issue/PR in this repository to discuss including it in go-libipfs.
+If you have some experimental component that you think would benefit the IPFS community, we suggest you build the component in your own repository until it's clear that there's community demand for it, and then open an issue/PR in this repository to discuss including it in Boxo.
## Help
-If you have questions, feel free to open an issue. You can also find the go-libipfs maintainers in [Filecoin Slack](https://filecoin.io/slack/) at #go-libipfs-maintainers. (If you would like to engage via IPFS Discord or ipfs.io Matrix, please drop into the #ipfs-implementers channel/room or file an issue, and we'll get bridging from #go-libipfs-maintainers to these other chat platforms.)
+If you have questions, feel free to open an issue. You can also find the Boxo maintainers in [Filecoin Slack](https://filecoin.io/slack/) at #Boxo-maintainers. (If you would like to engage via IPFS Discord or ipfs.io Matrix, please drop into the #ipfs-implementers channel/room or file an issue, and we'll get bridging from #Boxo-maintainers to these other chat platforms.)
## Governance and Access
See [CODEOWNERS](./docs/CODEOWNERS) for the current maintainers list. Governance for graduating additional maintainers hasn't been established. Repo permissions are all managed through [ipfs/github-mgmt](https://github.com/ipfs/github-mgmt).
## Release Process
-To be documented: https://github.com/ipfs/go-libipfs/issues/170
+To be documented: https://github.com/ipfs/boxo/issues/170
## Related Items
* [Initial proposal for "Consolidate IPFS Repositories" that spawned this project](https://github.com/ipfs/kubo/issues/8543)
diff --git a/bitswap/README.md b/bitswap/README.md
index 804341ac94..c8fd819e86 100644
--- a/bitswap/README.md
+++ b/bitswap/README.md
@@ -40,8 +40,8 @@ wants those blocks.
```golang
import (
"context"
- bitswap "github.com/ipfs/go-libipfs/bitswap"
- bsnet "github.com/ipfs/go-libipfs/bitswap/network"
+ bitswap "github.com/ipfs/boxo/bitswap"
+ bsnet "github.com/ipfs/boxo/bitswap/network"
blockstore "github.com/ipfs/go-ipfs-blockstore"
"github.com/libp2p/go-libp2p-core/routing"
"github.com/libp2p/go-libp2p-core/host"
diff --git a/bitswap/benchmarks_test.go b/bitswap/benchmarks_test.go
index 99316ca81d..0878614c5f 100644
--- a/bitswap/benchmarks_test.go
+++ b/bitswap/benchmarks_test.go
@@ -12,17 +12,17 @@ import (
"testing"
"time"
- "github.com/ipfs/go-libipfs/bitswap/internal/testutil"
- blocks "github.com/ipfs/go-libipfs/blocks"
+ "github.com/ipfs/boxo/bitswap/internal/testutil"
+ blocks "github.com/ipfs/go-block-format"
protocol "github.com/libp2p/go-libp2p/core/protocol"
+ "github.com/ipfs/boxo/bitswap"
+ bsnet "github.com/ipfs/boxo/bitswap/network"
+ testinstance "github.com/ipfs/boxo/bitswap/testinstance"
+ tn "github.com/ipfs/boxo/bitswap/testnet"
+ mockrouting "github.com/ipfs/boxo/routing/mock"
cid "github.com/ipfs/go-cid"
delay "github.com/ipfs/go-ipfs-delay"
- mockrouting "github.com/ipfs/go-ipfs-routing/mock"
- "github.com/ipfs/go-libipfs/bitswap"
- bsnet "github.com/ipfs/go-libipfs/bitswap/network"
- testinstance "github.com/ipfs/go-libipfs/bitswap/testinstance"
- tn "github.com/ipfs/go-libipfs/bitswap/testnet"
)
type fetchFunc func(b *testing.B, bs *bitswap.Bitswap, ks []cid.Cid)
diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go
index 73000f3424..e80a407aae 100644
--- a/bitswap/bitswap.go
+++ b/bitswap/bitswap.go
@@ -4,18 +4,18 @@ import (
"context"
"fmt"
- "github.com/ipfs/go-libipfs/bitswap/client"
- "github.com/ipfs/go-libipfs/bitswap/internal/defaults"
- "github.com/ipfs/go-libipfs/bitswap/message"
- "github.com/ipfs/go-libipfs/bitswap/network"
- "github.com/ipfs/go-libipfs/bitswap/server"
- "github.com/ipfs/go-libipfs/bitswap/tracer"
+ "github.com/ipfs/boxo/bitswap/client"
+ "github.com/ipfs/boxo/bitswap/internal/defaults"
+ "github.com/ipfs/boxo/bitswap/message"
+ "github.com/ipfs/boxo/bitswap/network"
+ "github.com/ipfs/boxo/bitswap/server"
+ "github.com/ipfs/boxo/bitswap/tracer"
"github.com/ipfs/go-metrics-interface"
+ blockstore "github.com/ipfs/boxo/blockstore"
+ exchange "github.com/ipfs/boxo/exchange"
+ blocks "github.com/ipfs/go-block-format"
"github.com/ipfs/go-cid"
- blockstore "github.com/ipfs/go-ipfs-blockstore"
- exchange "github.com/ipfs/go-ipfs-exchange-interface"
- blocks "github.com/ipfs/go-libipfs/blocks"
logging "github.com/ipfs/go-log"
"github.com/libp2p/go-libp2p/core/peer"
diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go
index 16c5f41627..7d478ca73d 100644
--- a/bitswap/bitswap_test.go
+++ b/bitswap/bitswap_test.go
@@ -9,19 +9,19 @@ import (
"testing"
"time"
+ "github.com/ipfs/boxo/bitswap"
+ bsmsg "github.com/ipfs/boxo/bitswap/message"
+ "github.com/ipfs/boxo/bitswap/server"
+ testinstance "github.com/ipfs/boxo/bitswap/testinstance"
+ tn "github.com/ipfs/boxo/bitswap/testnet"
+ "github.com/ipfs/boxo/internal/test"
+ mockrouting "github.com/ipfs/boxo/routing/mock"
+ blocks "github.com/ipfs/go-block-format"
cid "github.com/ipfs/go-cid"
detectrace "github.com/ipfs/go-detect-race"
blocksutil "github.com/ipfs/go-ipfs-blocksutil"
delay "github.com/ipfs/go-ipfs-delay"
- mockrouting "github.com/ipfs/go-ipfs-routing/mock"
ipld "github.com/ipfs/go-ipld-format"
- "github.com/ipfs/go-libipfs/bitswap"
- bsmsg "github.com/ipfs/go-libipfs/bitswap/message"
- "github.com/ipfs/go-libipfs/bitswap/server"
- testinstance "github.com/ipfs/go-libipfs/bitswap/testinstance"
- tn "github.com/ipfs/go-libipfs/bitswap/testnet"
- blocks "github.com/ipfs/go-libipfs/blocks"
- "github.com/ipfs/go-libipfs/internal/test"
tu "github.com/libp2p/go-libp2p-testing/etc"
p2ptestutil "github.com/libp2p/go-libp2p-testing/netutil"
peer "github.com/libp2p/go-libp2p/core/peer"
diff --git a/bitswap/client/bitswap_with_sessions_test.go b/bitswap/client/bitswap_with_sessions_test.go
index 37a5786f0c..e283791137 100644
--- a/bitswap/client/bitswap_with_sessions_test.go
+++ b/bitswap/client/bitswap_with_sessions_test.go
@@ -6,16 +6,16 @@ import (
"testing"
"time"
+ "github.com/ipfs/boxo/bitswap"
+ "github.com/ipfs/boxo/bitswap/client/internal/session"
+ testinstance "github.com/ipfs/boxo/bitswap/testinstance"
+ tn "github.com/ipfs/boxo/bitswap/testnet"
+ "github.com/ipfs/boxo/internal/test"
+ mockrouting "github.com/ipfs/boxo/routing/mock"
+ blocks "github.com/ipfs/go-block-format"
cid "github.com/ipfs/go-cid"
blocksutil "github.com/ipfs/go-ipfs-blocksutil"
delay "github.com/ipfs/go-ipfs-delay"
- mockrouting "github.com/ipfs/go-ipfs-routing/mock"
- "github.com/ipfs/go-libipfs/bitswap"
- "github.com/ipfs/go-libipfs/bitswap/client/internal/session"
- testinstance "github.com/ipfs/go-libipfs/bitswap/testinstance"
- tn "github.com/ipfs/go-libipfs/bitswap/testnet"
- blocks "github.com/ipfs/go-libipfs/blocks"
- "github.com/ipfs/go-libipfs/internal/test"
tu "github.com/libp2p/go-libp2p-testing/etc"
)
diff --git a/bitswap/client/client.go b/bitswap/client/client.go
index 31942ae60c..46e3a0ecc5 100644
--- a/bitswap/client/client.go
+++ b/bitswap/client/client.go
@@ -13,26 +13,26 @@ import (
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
+ bsbpm "github.com/ipfs/boxo/bitswap/client/internal/blockpresencemanager"
+ bsgetter "github.com/ipfs/boxo/bitswap/client/internal/getter"
+ bsmq "github.com/ipfs/boxo/bitswap/client/internal/messagequeue"
+ "github.com/ipfs/boxo/bitswap/client/internal/notifications"
+ bspm "github.com/ipfs/boxo/bitswap/client/internal/peermanager"
+ bspqm "github.com/ipfs/boxo/bitswap/client/internal/providerquerymanager"
+ bssession "github.com/ipfs/boxo/bitswap/client/internal/session"
+ bssim "github.com/ipfs/boxo/bitswap/client/internal/sessioninterestmanager"
+ bssm "github.com/ipfs/boxo/bitswap/client/internal/sessionmanager"
+ bsspm "github.com/ipfs/boxo/bitswap/client/internal/sessionpeermanager"
+ "github.com/ipfs/boxo/bitswap/internal"
+ "github.com/ipfs/boxo/bitswap/internal/defaults"
+ bsmsg "github.com/ipfs/boxo/bitswap/message"
+ bmetrics "github.com/ipfs/boxo/bitswap/metrics"
+ bsnet "github.com/ipfs/boxo/bitswap/network"
+ "github.com/ipfs/boxo/bitswap/tracer"
+ blockstore "github.com/ipfs/boxo/blockstore"
+ exchange "github.com/ipfs/boxo/exchange"
+ blocks "github.com/ipfs/go-block-format"
"github.com/ipfs/go-cid"
- blockstore "github.com/ipfs/go-ipfs-blockstore"
- exchange "github.com/ipfs/go-ipfs-exchange-interface"
- bsbpm "github.com/ipfs/go-libipfs/bitswap/client/internal/blockpresencemanager"
- bsgetter "github.com/ipfs/go-libipfs/bitswap/client/internal/getter"
- bsmq "github.com/ipfs/go-libipfs/bitswap/client/internal/messagequeue"
- "github.com/ipfs/go-libipfs/bitswap/client/internal/notifications"
- bspm "github.com/ipfs/go-libipfs/bitswap/client/internal/peermanager"
- bspqm "github.com/ipfs/go-libipfs/bitswap/client/internal/providerquerymanager"
- bssession "github.com/ipfs/go-libipfs/bitswap/client/internal/session"
- bssim "github.com/ipfs/go-libipfs/bitswap/client/internal/sessioninterestmanager"
- bssm "github.com/ipfs/go-libipfs/bitswap/client/internal/sessionmanager"
- bsspm "github.com/ipfs/go-libipfs/bitswap/client/internal/sessionpeermanager"
- "github.com/ipfs/go-libipfs/bitswap/internal"
- "github.com/ipfs/go-libipfs/bitswap/internal/defaults"
- bsmsg "github.com/ipfs/go-libipfs/bitswap/message"
- bmetrics "github.com/ipfs/go-libipfs/bitswap/metrics"
- bsnet "github.com/ipfs/go-libipfs/bitswap/network"
- "github.com/ipfs/go-libipfs/bitswap/tracer"
- blocks "github.com/ipfs/go-libipfs/blocks"
logging "github.com/ipfs/go-log"
"github.com/ipfs/go-metrics-interface"
process "github.com/jbenet/goprocess"
diff --git a/bitswap/client/internal/blockpresencemanager/blockpresencemanager_test.go b/bitswap/client/internal/blockpresencemanager/blockpresencemanager_test.go
index 991b0166c7..5e30073a36 100644
--- a/bitswap/client/internal/blockpresencemanager/blockpresencemanager_test.go
+++ b/bitswap/client/internal/blockpresencemanager/blockpresencemanager_test.go
@@ -3,9 +3,9 @@ package blockpresencemanager
import (
"testing"
+ "github.com/ipfs/boxo/bitswap/internal/testutil"
+ "github.com/ipfs/boxo/internal/test"
cid "github.com/ipfs/go-cid"
- "github.com/ipfs/go-libipfs/bitswap/internal/testutil"
- "github.com/ipfs/go-libipfs/internal/test"
peer "github.com/libp2p/go-libp2p/core/peer"
)
diff --git a/bitswap/client/internal/getter/getter.go b/bitswap/client/internal/getter/getter.go
index b091a6a3b4..4b8f5b6023 100644
--- a/bitswap/client/internal/getter/getter.go
+++ b/bitswap/client/internal/getter/getter.go
@@ -4,13 +4,13 @@ import (
"context"
"errors"
- "github.com/ipfs/go-libipfs/bitswap/client/internal"
- notifications "github.com/ipfs/go-libipfs/bitswap/client/internal/notifications"
+ "github.com/ipfs/boxo/bitswap/client/internal"
+ notifications "github.com/ipfs/boxo/bitswap/client/internal/notifications"
logging "github.com/ipfs/go-log"
+ blocks "github.com/ipfs/go-block-format"
cid "github.com/ipfs/go-cid"
ipld "github.com/ipfs/go-ipld-format"
- blocks "github.com/ipfs/go-libipfs/blocks"
)
var log = logging.Logger("bitswap")
diff --git a/bitswap/client/internal/messagequeue/donthavetimeoutmgr_test.go b/bitswap/client/internal/messagequeue/donthavetimeoutmgr_test.go
index 6cbf8d2f34..a6a28aab16 100644
--- a/bitswap/client/internal/messagequeue/donthavetimeoutmgr_test.go
+++ b/bitswap/client/internal/messagequeue/donthavetimeoutmgr_test.go
@@ -8,9 +8,9 @@ import (
"time"
"github.com/benbjohnson/clock"
+ "github.com/ipfs/boxo/bitswap/internal/testutil"
+ "github.com/ipfs/boxo/internal/test"
cid "github.com/ipfs/go-cid"
- "github.com/ipfs/go-libipfs/bitswap/internal/testutil"
- "github.com/ipfs/go-libipfs/internal/test"
"github.com/libp2p/go-libp2p/p2p/protocol/ping"
)
diff --git a/bitswap/client/internal/messagequeue/messagequeue.go b/bitswap/client/internal/messagequeue/messagequeue.go
index 2508ecd578..b529bde4ed 100644
--- a/bitswap/client/internal/messagequeue/messagequeue.go
+++ b/bitswap/client/internal/messagequeue/messagequeue.go
@@ -7,11 +7,11 @@ import (
"time"
"github.com/benbjohnson/clock"
+ bswl "github.com/ipfs/boxo/bitswap/client/wantlist"
+ bsmsg "github.com/ipfs/boxo/bitswap/message"
+ pb "github.com/ipfs/boxo/bitswap/message/pb"
+ bsnet "github.com/ipfs/boxo/bitswap/network"
cid "github.com/ipfs/go-cid"
- bswl "github.com/ipfs/go-libipfs/bitswap/client/wantlist"
- bsmsg "github.com/ipfs/go-libipfs/bitswap/message"
- pb "github.com/ipfs/go-libipfs/bitswap/message/pb"
- bsnet "github.com/ipfs/go-libipfs/bitswap/network"
logging "github.com/ipfs/go-log"
peer "github.com/libp2p/go-libp2p/core/peer"
"github.com/libp2p/go-libp2p/p2p/protocol/ping"
diff --git a/bitswap/client/internal/messagequeue/messagequeue_test.go b/bitswap/client/internal/messagequeue/messagequeue_test.go
index ac3c523a6e..59788f50b6 100644
--- a/bitswap/client/internal/messagequeue/messagequeue_test.go
+++ b/bitswap/client/internal/messagequeue/messagequeue_test.go
@@ -10,12 +10,12 @@ import (
"time"
"github.com/benbjohnson/clock"
+ "github.com/ipfs/boxo/bitswap/internal/testutil"
+ bsmsg "github.com/ipfs/boxo/bitswap/message"
+ pb "github.com/ipfs/boxo/bitswap/message/pb"
+ bsnet "github.com/ipfs/boxo/bitswap/network"
+ "github.com/ipfs/boxo/internal/test"
cid "github.com/ipfs/go-cid"
- "github.com/ipfs/go-libipfs/bitswap/internal/testutil"
- bsmsg "github.com/ipfs/go-libipfs/bitswap/message"
- pb "github.com/ipfs/go-libipfs/bitswap/message/pb"
- bsnet "github.com/ipfs/go-libipfs/bitswap/network"
- "github.com/ipfs/go-libipfs/internal/test"
peer "github.com/libp2p/go-libp2p/core/peer"
"github.com/libp2p/go-libp2p/p2p/protocol/ping"
)
diff --git a/bitswap/client/internal/notifications/notifications.go b/bitswap/client/internal/notifications/notifications.go
index 92d63ee181..ed4b79f57f 100644
--- a/bitswap/client/internal/notifications/notifications.go
+++ b/bitswap/client/internal/notifications/notifications.go
@@ -5,8 +5,8 @@ import (
"sync"
pubsub "github.com/cskr/pubsub"
+ blocks "github.com/ipfs/go-block-format"
cid "github.com/ipfs/go-cid"
- blocks "github.com/ipfs/go-libipfs/blocks"
)
const bufferSize = 16
diff --git a/bitswap/client/internal/notifications/notifications_test.go b/bitswap/client/internal/notifications/notifications_test.go
index 790c69446b..09c8eb806e 100644
--- a/bitswap/client/internal/notifications/notifications_test.go
+++ b/bitswap/client/internal/notifications/notifications_test.go
@@ -6,10 +6,10 @@ import (
"testing"
"time"
+ "github.com/ipfs/boxo/internal/test"
+ blocks "github.com/ipfs/go-block-format"
cid "github.com/ipfs/go-cid"
blocksutil "github.com/ipfs/go-ipfs-blocksutil"
- blocks "github.com/ipfs/go-libipfs/blocks"
- "github.com/ipfs/go-libipfs/internal/test"
)
func TestDuplicates(t *testing.T) {
diff --git a/bitswap/client/internal/peermanager/peermanager_test.go b/bitswap/client/internal/peermanager/peermanager_test.go
index 9c9b9d39a0..40e1f072cc 100644
--- a/bitswap/client/internal/peermanager/peermanager_test.go
+++ b/bitswap/client/internal/peermanager/peermanager_test.go
@@ -6,9 +6,9 @@ import (
"testing"
"time"
+ "github.com/ipfs/boxo/bitswap/internal/testutil"
+ "github.com/ipfs/boxo/internal/test"
cid "github.com/ipfs/go-cid"
- "github.com/ipfs/go-libipfs/bitswap/internal/testutil"
- "github.com/ipfs/go-libipfs/internal/test"
"github.com/libp2p/go-libp2p/core/peer"
)
diff --git a/bitswap/client/internal/peermanager/peerwantmanager_test.go b/bitswap/client/internal/peermanager/peerwantmanager_test.go
index 6a351c60ba..618217d6b2 100644
--- a/bitswap/client/internal/peermanager/peerwantmanager_test.go
+++ b/bitswap/client/internal/peermanager/peerwantmanager_test.go
@@ -3,9 +3,9 @@ package peermanager
import (
"testing"
+ "github.com/ipfs/boxo/bitswap/internal/testutil"
+ "github.com/ipfs/boxo/internal/test"
cid "github.com/ipfs/go-cid"
- "github.com/ipfs/go-libipfs/bitswap/internal/testutil"
- "github.com/ipfs/go-libipfs/internal/test"
peer "github.com/libp2p/go-libp2p/core/peer"
)
diff --git a/bitswap/client/internal/providerquerymanager/providerquerymanager_test.go b/bitswap/client/internal/providerquerymanager/providerquerymanager_test.go
index 57e0764697..6cf5fa4a2d 100644
--- a/bitswap/client/internal/providerquerymanager/providerquerymanager_test.go
+++ b/bitswap/client/internal/providerquerymanager/providerquerymanager_test.go
@@ -8,9 +8,9 @@ import (
"testing"
"time"
+ "github.com/ipfs/boxo/bitswap/internal/testutil"
+ "github.com/ipfs/boxo/internal/test"
cid "github.com/ipfs/go-cid"
- "github.com/ipfs/go-libipfs/bitswap/internal/testutil"
- "github.com/ipfs/go-libipfs/internal/test"
"github.com/libp2p/go-libp2p/core/peer"
)
diff --git a/bitswap/client/internal/session/peerresponsetracker_test.go b/bitswap/client/internal/session/peerresponsetracker_test.go
index 0ab3cd5c09..1aed9c4ef8 100644
--- a/bitswap/client/internal/session/peerresponsetracker_test.go
+++ b/bitswap/client/internal/session/peerresponsetracker_test.go
@@ -4,8 +4,8 @@ import (
"math"
"testing"
- "github.com/ipfs/go-libipfs/bitswap/internal/testutil"
- "github.com/ipfs/go-libipfs/internal/test"
+ "github.com/ipfs/boxo/bitswap/internal/testutil"
+ "github.com/ipfs/boxo/internal/test"
peer "github.com/libp2p/go-libp2p/core/peer"
)
diff --git a/bitswap/client/internal/session/sentwantblockstracker_test.go b/bitswap/client/internal/session/sentwantblockstracker_test.go
index c4b3c8c79f..ccb920e31c 100644
--- a/bitswap/client/internal/session/sentwantblockstracker_test.go
+++ b/bitswap/client/internal/session/sentwantblockstracker_test.go
@@ -3,8 +3,8 @@ package session
import (
"testing"
- "github.com/ipfs/go-libipfs/bitswap/internal/testutil"
- "github.com/ipfs/go-libipfs/internal/test"
+ "github.com/ipfs/boxo/bitswap/internal/testutil"
+ "github.com/ipfs/boxo/internal/test"
)
func TestSendWantBlocksTracker(t *testing.T) {
diff --git a/bitswap/client/internal/session/session.go b/bitswap/client/internal/session/session.go
index 68d0306724..8acf0050e6 100644
--- a/bitswap/client/internal/session/session.go
+++ b/bitswap/client/internal/session/session.go
@@ -4,15 +4,15 @@ import (
"context"
"time"
+ "github.com/ipfs/boxo/bitswap/client/internal"
+ bsbpm "github.com/ipfs/boxo/bitswap/client/internal/blockpresencemanager"
+ bsgetter "github.com/ipfs/boxo/bitswap/client/internal/getter"
+ notifications "github.com/ipfs/boxo/bitswap/client/internal/notifications"
+ bspm "github.com/ipfs/boxo/bitswap/client/internal/peermanager"
+ bssim "github.com/ipfs/boxo/bitswap/client/internal/sessioninterestmanager"
+ blocks "github.com/ipfs/go-block-format"
cid "github.com/ipfs/go-cid"
delay "github.com/ipfs/go-ipfs-delay"
- "github.com/ipfs/go-libipfs/bitswap/client/internal"
- bsbpm "github.com/ipfs/go-libipfs/bitswap/client/internal/blockpresencemanager"
- bsgetter "github.com/ipfs/go-libipfs/bitswap/client/internal/getter"
- notifications "github.com/ipfs/go-libipfs/bitswap/client/internal/notifications"
- bspm "github.com/ipfs/go-libipfs/bitswap/client/internal/peermanager"
- bssim "github.com/ipfs/go-libipfs/bitswap/client/internal/sessioninterestmanager"
- blocks "github.com/ipfs/go-libipfs/blocks"
logging "github.com/ipfs/go-log"
peer "github.com/libp2p/go-libp2p/core/peer"
"go.uber.org/zap"
diff --git a/bitswap/client/internal/session/session_test.go b/bitswap/client/internal/session/session_test.go
index 27fd17ac61..b60c7d1af8 100644
--- a/bitswap/client/internal/session/session_test.go
+++ b/bitswap/client/internal/session/session_test.go
@@ -6,16 +6,16 @@ import (
"testing"
"time"
+ bsbpm "github.com/ipfs/boxo/bitswap/client/internal/blockpresencemanager"
+ notifications "github.com/ipfs/boxo/bitswap/client/internal/notifications"
+ bspm "github.com/ipfs/boxo/bitswap/client/internal/peermanager"
+ bssim "github.com/ipfs/boxo/bitswap/client/internal/sessioninterestmanager"
+ bsspm "github.com/ipfs/boxo/bitswap/client/internal/sessionpeermanager"
+ "github.com/ipfs/boxo/bitswap/internal/testutil"
+ "github.com/ipfs/boxo/internal/test"
cid "github.com/ipfs/go-cid"
blocksutil "github.com/ipfs/go-ipfs-blocksutil"
delay "github.com/ipfs/go-ipfs-delay"
- bsbpm "github.com/ipfs/go-libipfs/bitswap/client/internal/blockpresencemanager"
- notifications "github.com/ipfs/go-libipfs/bitswap/client/internal/notifications"
- bspm "github.com/ipfs/go-libipfs/bitswap/client/internal/peermanager"
- bssim "github.com/ipfs/go-libipfs/bitswap/client/internal/sessioninterestmanager"
- bsspm "github.com/ipfs/go-libipfs/bitswap/client/internal/sessionpeermanager"
- "github.com/ipfs/go-libipfs/bitswap/internal/testutil"
- "github.com/ipfs/go-libipfs/internal/test"
peer "github.com/libp2p/go-libp2p/core/peer"
)
diff --git a/bitswap/client/internal/session/sessionwants_test.go b/bitswap/client/internal/session/sessionwants_test.go
index 1de335c336..bdb73ebd12 100644
--- a/bitswap/client/internal/session/sessionwants_test.go
+++ b/bitswap/client/internal/session/sessionwants_test.go
@@ -3,9 +3,9 @@ package session
import (
"testing"
+ "github.com/ipfs/boxo/bitswap/internal/testutil"
+ "github.com/ipfs/boxo/internal/test"
cid "github.com/ipfs/go-cid"
- "github.com/ipfs/go-libipfs/bitswap/internal/testutil"
- "github.com/ipfs/go-libipfs/internal/test"
)
func TestEmptySessionWants(t *testing.T) {
diff --git a/bitswap/client/internal/session/sessionwantsender.go b/bitswap/client/internal/session/sessionwantsender.go
index e06e05da35..41145fbf6d 100644
--- a/bitswap/client/internal/session/sessionwantsender.go
+++ b/bitswap/client/internal/session/sessionwantsender.go
@@ -3,7 +3,7 @@ package session
import (
"context"
- bsbpm "github.com/ipfs/go-libipfs/bitswap/client/internal/blockpresencemanager"
+ bsbpm "github.com/ipfs/boxo/bitswap/client/internal/blockpresencemanager"
cid "github.com/ipfs/go-cid"
peer "github.com/libp2p/go-libp2p/core/peer"
diff --git a/bitswap/client/internal/session/sessionwantsender_test.go b/bitswap/client/internal/session/sessionwantsender_test.go
index eb1fe06244..97ff788a9d 100644
--- a/bitswap/client/internal/session/sessionwantsender_test.go
+++ b/bitswap/client/internal/session/sessionwantsender_test.go
@@ -6,12 +6,12 @@ import (
"testing"
"time"
+ bsbpm "github.com/ipfs/boxo/bitswap/client/internal/blockpresencemanager"
+ bspm "github.com/ipfs/boxo/bitswap/client/internal/peermanager"
+ bsspm "github.com/ipfs/boxo/bitswap/client/internal/sessionpeermanager"
+ "github.com/ipfs/boxo/bitswap/internal/testutil"
+ "github.com/ipfs/boxo/internal/test"
cid "github.com/ipfs/go-cid"
- bsbpm "github.com/ipfs/go-libipfs/bitswap/client/internal/blockpresencemanager"
- bspm "github.com/ipfs/go-libipfs/bitswap/client/internal/peermanager"
- bsspm "github.com/ipfs/go-libipfs/bitswap/client/internal/sessionpeermanager"
- "github.com/ipfs/go-libipfs/bitswap/internal/testutil"
- "github.com/ipfs/go-libipfs/internal/test"
peer "github.com/libp2p/go-libp2p/core/peer"
)
diff --git a/bitswap/client/internal/session/wantinfo_test.go b/bitswap/client/internal/session/wantinfo_test.go
index 883f1eea3b..c7348a4a1f 100644
--- a/bitswap/client/internal/session/wantinfo_test.go
+++ b/bitswap/client/internal/session/wantinfo_test.go
@@ -3,8 +3,8 @@ package session
import (
"testing"
- "github.com/ipfs/go-libipfs/bitswap/internal/testutil"
- "github.com/ipfs/go-libipfs/internal/test"
+ "github.com/ipfs/boxo/bitswap/internal/testutil"
+ "github.com/ipfs/boxo/internal/test"
)
func TestEmptyWantInfo(t *testing.T) {
diff --git a/bitswap/client/internal/sessioninterestmanager/sessioninterestmanager.go b/bitswap/client/internal/sessioninterestmanager/sessioninterestmanager.go
index 320cca7354..0ab32ed1b6 100644
--- a/bitswap/client/internal/sessioninterestmanager/sessioninterestmanager.go
+++ b/bitswap/client/internal/sessioninterestmanager/sessioninterestmanager.go
@@ -3,7 +3,7 @@ package sessioninterestmanager
import (
"sync"
- blocks "github.com/ipfs/go-libipfs/blocks"
+ blocks "github.com/ipfs/go-block-format"
cid "github.com/ipfs/go-cid"
)
diff --git a/bitswap/client/internal/sessioninterestmanager/sessioninterestmanager_test.go b/bitswap/client/internal/sessioninterestmanager/sessioninterestmanager_test.go
index 2bc79c2326..85857b9ba1 100644
--- a/bitswap/client/internal/sessioninterestmanager/sessioninterestmanager_test.go
+++ b/bitswap/client/internal/sessioninterestmanager/sessioninterestmanager_test.go
@@ -3,9 +3,9 @@ package sessioninterestmanager
import (
"testing"
+ "github.com/ipfs/boxo/bitswap/internal/testutil"
+ "github.com/ipfs/boxo/internal/test"
cid "github.com/ipfs/go-cid"
- "github.com/ipfs/go-libipfs/bitswap/internal/testutil"
- "github.com/ipfs/go-libipfs/internal/test"
)
func TestEmpty(t *testing.T) {
diff --git a/bitswap/client/internal/sessionmanager/sessionmanager.go b/bitswap/client/internal/sessionmanager/sessionmanager.go
index 690077e70a..38e490a2e1 100644
--- a/bitswap/client/internal/sessionmanager/sessionmanager.go
+++ b/bitswap/client/internal/sessionmanager/sessionmanager.go
@@ -11,12 +11,12 @@ import (
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
- exchange "github.com/ipfs/go-ipfs-exchange-interface"
- "github.com/ipfs/go-libipfs/bitswap/client/internal"
- bsbpm "github.com/ipfs/go-libipfs/bitswap/client/internal/blockpresencemanager"
- notifications "github.com/ipfs/go-libipfs/bitswap/client/internal/notifications"
- bssession "github.com/ipfs/go-libipfs/bitswap/client/internal/session"
- bssim "github.com/ipfs/go-libipfs/bitswap/client/internal/sessioninterestmanager"
+ "github.com/ipfs/boxo/bitswap/client/internal"
+ bsbpm "github.com/ipfs/boxo/bitswap/client/internal/blockpresencemanager"
+ notifications "github.com/ipfs/boxo/bitswap/client/internal/notifications"
+ bssession "github.com/ipfs/boxo/bitswap/client/internal/session"
+ bssim "github.com/ipfs/boxo/bitswap/client/internal/sessioninterestmanager"
+ exchange "github.com/ipfs/boxo/exchange"
peer "github.com/libp2p/go-libp2p/core/peer"
)
diff --git a/bitswap/client/internal/sessionmanager/sessionmanager_test.go b/bitswap/client/internal/sessionmanager/sessionmanager_test.go
index c2bcf72a23..51dde7fda1 100644
--- a/bitswap/client/internal/sessionmanager/sessionmanager_test.go
+++ b/bitswap/client/internal/sessionmanager/sessionmanager_test.go
@@ -7,16 +7,16 @@ import (
"testing"
"time"
+ bsbpm "github.com/ipfs/boxo/bitswap/client/internal/blockpresencemanager"
+ notifications "github.com/ipfs/boxo/bitswap/client/internal/notifications"
+ bspm "github.com/ipfs/boxo/bitswap/client/internal/peermanager"
+ bssession "github.com/ipfs/boxo/bitswap/client/internal/session"
+ bssim "github.com/ipfs/boxo/bitswap/client/internal/sessioninterestmanager"
+ "github.com/ipfs/boxo/bitswap/internal/testutil"
+ "github.com/ipfs/boxo/internal/test"
+ blocks "github.com/ipfs/go-block-format"
cid "github.com/ipfs/go-cid"
delay "github.com/ipfs/go-ipfs-delay"
- bsbpm "github.com/ipfs/go-libipfs/bitswap/client/internal/blockpresencemanager"
- notifications "github.com/ipfs/go-libipfs/bitswap/client/internal/notifications"
- bspm "github.com/ipfs/go-libipfs/bitswap/client/internal/peermanager"
- bssession "github.com/ipfs/go-libipfs/bitswap/client/internal/session"
- bssim "github.com/ipfs/go-libipfs/bitswap/client/internal/sessioninterestmanager"
- "github.com/ipfs/go-libipfs/bitswap/internal/testutil"
- blocks "github.com/ipfs/go-libipfs/blocks"
- "github.com/ipfs/go-libipfs/internal/test"
peer "github.com/libp2p/go-libp2p/core/peer"
)
diff --git a/bitswap/client/internal/sessionpeermanager/sessionpeermanager_test.go b/bitswap/client/internal/sessionpeermanager/sessionpeermanager_test.go
index ba9b4d1652..fc1d7274d2 100644
--- a/bitswap/client/internal/sessionpeermanager/sessionpeermanager_test.go
+++ b/bitswap/client/internal/sessionpeermanager/sessionpeermanager_test.go
@@ -4,8 +4,8 @@ import (
"sync"
"testing"
- "github.com/ipfs/go-libipfs/bitswap/internal/testutil"
- "github.com/ipfs/go-libipfs/internal/test"
+ "github.com/ipfs/boxo/bitswap/internal/testutil"
+ "github.com/ipfs/boxo/internal/test"
peer "github.com/libp2p/go-libp2p/core/peer"
)
diff --git a/bitswap/client/wantlist/wantlist.go b/bitswap/client/wantlist/wantlist.go
index 9e40e16048..6cb71eeccc 100644
--- a/bitswap/client/wantlist/wantlist.go
+++ b/bitswap/client/wantlist/wantlist.go
@@ -5,7 +5,7 @@ package wantlist
import (
"sort"
- pb "github.com/ipfs/go-libipfs/bitswap/message/pb"
+ pb "github.com/ipfs/boxo/bitswap/message/pb"
cid "github.com/ipfs/go-cid"
)
diff --git a/bitswap/client/wantlist/wantlist_test.go b/bitswap/client/wantlist/wantlist_test.go
index 9177ae7e6f..829af50a6f 100644
--- a/bitswap/client/wantlist/wantlist_test.go
+++ b/bitswap/client/wantlist/wantlist_test.go
@@ -3,9 +3,9 @@ package wantlist
import (
"testing"
+ pb "github.com/ipfs/boxo/bitswap/message/pb"
+ "github.com/ipfs/boxo/internal/test"
cid "github.com/ipfs/go-cid"
- pb "github.com/ipfs/go-libipfs/bitswap/message/pb"
- "github.com/ipfs/go-libipfs/internal/test"
"github.com/stretchr/testify/require"
)
diff --git a/bitswap/decision/forward.go b/bitswap/decision/forward.go
index 913e3093c3..c7e7b42f43 100644
--- a/bitswap/decision/forward.go
+++ b/bitswap/decision/forward.go
@@ -1,6 +1,6 @@
package decision
-import "github.com/ipfs/go-libipfs/bitswap/server"
+import "github.com/ipfs/boxo/bitswap/server"
type (
// Deprecated: use server.Receipt instead
diff --git a/bitswap/forward.go b/bitswap/forward.go
index 59d32e525e..d6d657b2b6 100644
--- a/bitswap/forward.go
+++ b/bitswap/forward.go
@@ -1,8 +1,8 @@
package bitswap
import (
- "github.com/ipfs/go-libipfs/bitswap/server"
- "github.com/ipfs/go-libipfs/bitswap/tracer"
+ "github.com/ipfs/boxo/bitswap/server"
+ "github.com/ipfs/boxo/bitswap/tracer"
)
type (
diff --git a/bitswap/internal/testutil/testutil.go b/bitswap/internal/testutil/testutil.go
index fadff93400..d35bdb7435 100644
--- a/bitswap/internal/testutil/testutil.go
+++ b/bitswap/internal/testutil/testutil.go
@@ -4,11 +4,11 @@ import (
"crypto/rand"
"fmt"
+ "github.com/ipfs/boxo/bitswap/client/wantlist"
+ bsmsg "github.com/ipfs/boxo/bitswap/message"
+ blocks "github.com/ipfs/go-block-format"
cid "github.com/ipfs/go-cid"
blocksutil "github.com/ipfs/go-ipfs-blocksutil"
- "github.com/ipfs/go-libipfs/bitswap/client/wantlist"
- bsmsg "github.com/ipfs/go-libipfs/bitswap/message"
- blocks "github.com/ipfs/go-libipfs/blocks"
peer "github.com/libp2p/go-libp2p/core/peer"
)
diff --git a/bitswap/internal/testutil/testutil_test.go b/bitswap/internal/testutil/testutil_test.go
index 99c76f86be..c4dc1af156 100644
--- a/bitswap/internal/testutil/testutil_test.go
+++ b/bitswap/internal/testutil/testutil_test.go
@@ -3,7 +3,7 @@ package testutil
import (
"testing"
- blocks "github.com/ipfs/go-libipfs/blocks"
+ blocks "github.com/ipfs/go-block-format"
)
func TestGenerateBlocksOfSize(t *testing.T) {
diff --git a/bitswap/message/message.go b/bitswap/message/message.go
index 42c0a46a29..6b9d787e72 100644
--- a/bitswap/message/message.go
+++ b/bitswap/message/message.go
@@ -5,15 +5,15 @@ import (
"errors"
"io"
- "github.com/ipfs/go-libipfs/bitswap/client/wantlist"
- pb "github.com/ipfs/go-libipfs/bitswap/message/pb"
+ "github.com/ipfs/boxo/bitswap/client/wantlist"
+ pb "github.com/ipfs/boxo/bitswap/message/pb"
+ blocks "github.com/ipfs/go-block-format"
cid "github.com/ipfs/go-cid"
- blocks "github.com/ipfs/go-libipfs/blocks"
pool "github.com/libp2p/go-buffer-pool"
msgio "github.com/libp2p/go-msgio"
- u "github.com/ipfs/go-ipfs-util"
+ u "github.com/ipfs/boxo/util"
"github.com/libp2p/go-libp2p/core/network"
)
diff --git a/bitswap/message/message_test.go b/bitswap/message/message_test.go
index e4559f8b48..520abd22b6 100644
--- a/bitswap/message/message_test.go
+++ b/bitswap/message/message_test.go
@@ -4,13 +4,13 @@ import (
"bytes"
"testing"
+ "github.com/ipfs/boxo/bitswap/client/wantlist"
+ pb "github.com/ipfs/boxo/bitswap/message/pb"
blocksutil "github.com/ipfs/go-ipfs-blocksutil"
- "github.com/ipfs/go-libipfs/bitswap/client/wantlist"
- pb "github.com/ipfs/go-libipfs/bitswap/message/pb"
+ u "github.com/ipfs/boxo/util"
+ blocks "github.com/ipfs/go-block-format"
cid "github.com/ipfs/go-cid"
- u "github.com/ipfs/go-ipfs-util"
- blocks "github.com/ipfs/go-libipfs/blocks"
)
func mkFakeCid(s string) cid.Cid {
diff --git a/bitswap/message/pb/cid_test.go b/bitswap/message/pb/cid_test.go
index d0d5d2b23c..490e6b9970 100644
--- a/bitswap/message/pb/cid_test.go
+++ b/bitswap/message/pb/cid_test.go
@@ -4,10 +4,10 @@ import (
"bytes"
"testing"
+ u "github.com/ipfs/boxo/util"
"github.com/ipfs/go-cid"
- u "github.com/ipfs/go-ipfs-util"
- pb "github.com/ipfs/go-libipfs/bitswap/message/pb"
+ pb "github.com/ipfs/boxo/bitswap/message/pb"
)
func TestCID(t *testing.T) {
diff --git a/bitswap/network/connecteventmanager_test.go b/bitswap/network/connecteventmanager_test.go
index 77bbe33dcd..e3904ee555 100644
--- a/bitswap/network/connecteventmanager_test.go
+++ b/bitswap/network/connecteventmanager_test.go
@@ -5,8 +5,8 @@ import (
"testing"
"time"
- "github.com/ipfs/go-libipfs/bitswap/internal/testutil"
- "github.com/ipfs/go-libipfs/internal/test"
+ "github.com/ipfs/boxo/bitswap/internal/testutil"
+ "github.com/ipfs/boxo/internal/test"
"github.com/libp2p/go-libp2p/core/peer"
"github.com/stretchr/testify/require"
)
diff --git a/bitswap/network/interface.go b/bitswap/network/interface.go
index 7c6eeecd4b..962bc25882 100644
--- a/bitswap/network/interface.go
+++ b/bitswap/network/interface.go
@@ -4,8 +4,8 @@ import (
"context"
"time"
- bsmsg "github.com/ipfs/go-libipfs/bitswap/message"
- "github.com/ipfs/go-libipfs/bitswap/network/internal"
+ bsmsg "github.com/ipfs/boxo/bitswap/message"
+ "github.com/ipfs/boxo/bitswap/network/internal"
cid "github.com/ipfs/go-cid"
diff --git a/bitswap/network/ipfs_impl.go b/bitswap/network/ipfs_impl.go
index c796a4731e..00eb76ba4e 100644
--- a/bitswap/network/ipfs_impl.go
+++ b/bitswap/network/ipfs_impl.go
@@ -8,8 +8,8 @@ import (
"sync/atomic"
"time"
- bsmsg "github.com/ipfs/go-libipfs/bitswap/message"
- "github.com/ipfs/go-libipfs/bitswap/network/internal"
+ bsmsg "github.com/ipfs/boxo/bitswap/message"
+ "github.com/ipfs/boxo/bitswap/network/internal"
cid "github.com/ipfs/go-cid"
logging "github.com/ipfs/go-log"
diff --git a/bitswap/network/ipfs_impl_test.go b/bitswap/network/ipfs_impl_test.go
index 62b3ac98e9..61b00baa35 100644
--- a/bitswap/network/ipfs_impl_test.go
+++ b/bitswap/network/ipfs_impl_test.go
@@ -7,15 +7,15 @@ import (
"testing"
"time"
+ bsmsg "github.com/ipfs/boxo/bitswap/message"
+ pb "github.com/ipfs/boxo/bitswap/message/pb"
+ bsnet "github.com/ipfs/boxo/bitswap/network"
+ "github.com/ipfs/boxo/bitswap/network/internal"
+ tn "github.com/ipfs/boxo/bitswap/testnet"
+ "github.com/ipfs/boxo/internal/test"
+ mockrouting "github.com/ipfs/boxo/routing/mock"
ds "github.com/ipfs/go-datastore"
blocksutil "github.com/ipfs/go-ipfs-blocksutil"
- mockrouting "github.com/ipfs/go-ipfs-routing/mock"
- bsmsg "github.com/ipfs/go-libipfs/bitswap/message"
- pb "github.com/ipfs/go-libipfs/bitswap/message/pb"
- bsnet "github.com/ipfs/go-libipfs/bitswap/network"
- "github.com/ipfs/go-libipfs/bitswap/network/internal"
- tn "github.com/ipfs/go-libipfs/bitswap/testnet"
- "github.com/ipfs/go-libipfs/internal/test"
tnet "github.com/libp2p/go-libp2p-testing/net"
"github.com/libp2p/go-libp2p/core/host"
"github.com/libp2p/go-libp2p/core/network"
diff --git a/bitswap/network/ipfs_impl_timeout_test.go b/bitswap/network/ipfs_impl_timeout_test.go
index 2543075d5c..178c2fb69d 100644
--- a/bitswap/network/ipfs_impl_timeout_test.go
+++ b/bitswap/network/ipfs_impl_timeout_test.go
@@ -4,7 +4,7 @@ import (
"testing"
"time"
- "github.com/ipfs/go-libipfs/internal/test"
+ "github.com/ipfs/boxo/internal/test"
"github.com/stretchr/testify/require"
)
diff --git a/bitswap/options.go b/bitswap/options.go
index 9ccf3c4d60..da759dfe28 100644
--- a/bitswap/options.go
+++ b/bitswap/options.go
@@ -3,10 +3,10 @@ package bitswap
import (
"time"
+ "github.com/ipfs/boxo/bitswap/client"
+ "github.com/ipfs/boxo/bitswap/server"
+ "github.com/ipfs/boxo/bitswap/tracer"
delay "github.com/ipfs/go-ipfs-delay"
- "github.com/ipfs/go-libipfs/bitswap/client"
- "github.com/ipfs/go-libipfs/bitswap/server"
- "github.com/ipfs/go-libipfs/bitswap/tracer"
)
type option func(*Bitswap)
diff --git a/bitswap/sendOnlyTracer.go b/bitswap/sendOnlyTracer.go
index 9570b4390c..ad03e2922b 100644
--- a/bitswap/sendOnlyTracer.go
+++ b/bitswap/sendOnlyTracer.go
@@ -1,8 +1,8 @@
package bitswap
import (
- "github.com/ipfs/go-libipfs/bitswap/message"
- "github.com/ipfs/go-libipfs/bitswap/tracer"
+ "github.com/ipfs/boxo/bitswap/message"
+ "github.com/ipfs/boxo/bitswap/tracer"
"github.com/libp2p/go-libp2p/core/peer"
)
diff --git a/bitswap/server/forward.go b/bitswap/server/forward.go
index bb8395229b..ee353da191 100644
--- a/bitswap/server/forward.go
+++ b/bitswap/server/forward.go
@@ -1,7 +1,7 @@
package server
import (
- "github.com/ipfs/go-libipfs/bitswap/server/internal/decision"
+ "github.com/ipfs/boxo/bitswap/server/internal/decision"
)
type (
diff --git a/bitswap/server/internal/decision/blockstoremanager.go b/bitswap/server/internal/decision/blockstoremanager.go
index 4171c038db..fe438b4d90 100644
--- a/bitswap/server/internal/decision/blockstoremanager.go
+++ b/bitswap/server/internal/decision/blockstoremanager.go
@@ -5,10 +5,10 @@ import (
"fmt"
"sync"
+ bstore "github.com/ipfs/boxo/blockstore"
+ blocks "github.com/ipfs/go-block-format"
cid "github.com/ipfs/go-cid"
- bstore "github.com/ipfs/go-ipfs-blockstore"
ipld "github.com/ipfs/go-ipld-format"
- blocks "github.com/ipfs/go-libipfs/blocks"
"github.com/ipfs/go-metrics-interface"
)
diff --git a/bitswap/server/internal/decision/blockstoremanager_test.go b/bitswap/server/internal/decision/blockstoremanager_test.go
index 06c5ec56d1..3d4a8ea377 100644
--- a/bitswap/server/internal/decision/blockstoremanager_test.go
+++ b/bitswap/server/internal/decision/blockstoremanager_test.go
@@ -7,15 +7,15 @@ import (
"testing"
"time"
+ "github.com/ipfs/boxo/bitswap/internal/testutil"
+ blockstore "github.com/ipfs/boxo/blockstore"
+ "github.com/ipfs/boxo/internal/test"
+ blocks "github.com/ipfs/go-block-format"
cid "github.com/ipfs/go-cid"
ds "github.com/ipfs/go-datastore"
"github.com/ipfs/go-datastore/delayed"
ds_sync "github.com/ipfs/go-datastore/sync"
- blockstore "github.com/ipfs/go-ipfs-blockstore"
delay "github.com/ipfs/go-ipfs-delay"
- "github.com/ipfs/go-libipfs/bitswap/internal/testutil"
- blocks "github.com/ipfs/go-libipfs/blocks"
- "github.com/ipfs/go-libipfs/internal/test"
"github.com/ipfs/go-metrics-interface"
)
diff --git a/bitswap/server/internal/decision/engine.go b/bitswap/server/internal/decision/engine.go
index 208fef7403..8cd3c7ef78 100644
--- a/bitswap/server/internal/decision/engine.go
+++ b/bitswap/server/internal/decision/engine.go
@@ -10,14 +10,14 @@ import (
"github.com/google/uuid"
+ wl "github.com/ipfs/boxo/bitswap/client/wantlist"
+ "github.com/ipfs/boxo/bitswap/internal/defaults"
+ bsmsg "github.com/ipfs/boxo/bitswap/message"
+ pb "github.com/ipfs/boxo/bitswap/message/pb"
+ bmetrics "github.com/ipfs/boxo/bitswap/metrics"
+ bstore "github.com/ipfs/boxo/blockstore"
+ blocks "github.com/ipfs/go-block-format"
"github.com/ipfs/go-cid"
- bstore "github.com/ipfs/go-ipfs-blockstore"
- wl "github.com/ipfs/go-libipfs/bitswap/client/wantlist"
- "github.com/ipfs/go-libipfs/bitswap/internal/defaults"
- bsmsg "github.com/ipfs/go-libipfs/bitswap/message"
- pb "github.com/ipfs/go-libipfs/bitswap/message/pb"
- bmetrics "github.com/ipfs/go-libipfs/bitswap/metrics"
- blocks "github.com/ipfs/go-libipfs/blocks"
logging "github.com/ipfs/go-log"
"github.com/ipfs/go-metrics-interface"
"github.com/ipfs/go-peertaskqueue"
diff --git a/bitswap/server/internal/decision/engine_test.go b/bitswap/server/internal/decision/engine_test.go
index 5d93ad83f5..9b4dbc2def 100644
--- a/bitswap/server/internal/decision/engine_test.go
+++ b/bitswap/server/internal/decision/engine_test.go
@@ -13,15 +13,15 @@ import (
"time"
"github.com/benbjohnson/clock"
+ "github.com/ipfs/boxo/bitswap/internal/testutil"
+ message "github.com/ipfs/boxo/bitswap/message"
+ pb "github.com/ipfs/boxo/bitswap/message/pb"
+ blockstore "github.com/ipfs/boxo/blockstore"
+ "github.com/ipfs/boxo/internal/test"
+ blocks "github.com/ipfs/go-block-format"
"github.com/ipfs/go-cid"
ds "github.com/ipfs/go-datastore"
dssync "github.com/ipfs/go-datastore/sync"
- blockstore "github.com/ipfs/go-ipfs-blockstore"
- "github.com/ipfs/go-libipfs/bitswap/internal/testutil"
- message "github.com/ipfs/go-libipfs/bitswap/message"
- pb "github.com/ipfs/go-libipfs/bitswap/message/pb"
- blocks "github.com/ipfs/go-libipfs/blocks"
- "github.com/ipfs/go-libipfs/internal/test"
process "github.com/jbenet/goprocess"
peer "github.com/libp2p/go-libp2p/core/peer"
libp2ptest "github.com/libp2p/go-libp2p/core/test"
diff --git a/bitswap/server/internal/decision/peer_ledger.go b/bitswap/server/internal/decision/peer_ledger.go
index 102dad4c42..cc7a5e1ac2 100644
--- a/bitswap/server/internal/decision/peer_ledger.go
+++ b/bitswap/server/internal/decision/peer_ledger.go
@@ -1,8 +1,8 @@
package decision
import (
- wl "github.com/ipfs/go-libipfs/bitswap/client/wantlist"
- pb "github.com/ipfs/go-libipfs/bitswap/message/pb"
+ wl "github.com/ipfs/boxo/bitswap/client/wantlist"
+ pb "github.com/ipfs/boxo/bitswap/message/pb"
"github.com/ipfs/go-cid"
"github.com/libp2p/go-libp2p/core/peer"
diff --git a/bitswap/server/internal/decision/taskmerger_test.go b/bitswap/server/internal/decision/taskmerger_test.go
index 2a0b2dab1b..e0ce46ed6f 100644
--- a/bitswap/server/internal/decision/taskmerger_test.go
+++ b/bitswap/server/internal/decision/taskmerger_test.go
@@ -3,8 +3,8 @@ package decision
import (
"testing"
- "github.com/ipfs/go-libipfs/bitswap/internal/testutil"
- "github.com/ipfs/go-libipfs/internal/test"
+ "github.com/ipfs/boxo/bitswap/internal/testutil"
+ "github.com/ipfs/boxo/internal/test"
"github.com/ipfs/go-peertaskqueue"
"github.com/ipfs/go-peertaskqueue/peertask"
)
diff --git a/bitswap/server/server.go b/bitswap/server/server.go
index 7918b73d78..a3378d6c44 100644
--- a/bitswap/server/server.go
+++ b/bitswap/server/server.go
@@ -8,16 +8,16 @@ import (
"sync"
"time"
+ "github.com/ipfs/boxo/bitswap/internal/defaults"
+ "github.com/ipfs/boxo/bitswap/message"
+ pb "github.com/ipfs/boxo/bitswap/message/pb"
+ bmetrics "github.com/ipfs/boxo/bitswap/metrics"
+ bsnet "github.com/ipfs/boxo/bitswap/network"
+ "github.com/ipfs/boxo/bitswap/server/internal/decision"
+ "github.com/ipfs/boxo/bitswap/tracer"
+ blockstore "github.com/ipfs/boxo/blockstore"
+ blocks "github.com/ipfs/go-block-format"
"github.com/ipfs/go-cid"
- blockstore "github.com/ipfs/go-ipfs-blockstore"
- "github.com/ipfs/go-libipfs/bitswap/internal/defaults"
- "github.com/ipfs/go-libipfs/bitswap/message"
- pb "github.com/ipfs/go-libipfs/bitswap/message/pb"
- bmetrics "github.com/ipfs/go-libipfs/bitswap/metrics"
- bsnet "github.com/ipfs/go-libipfs/bitswap/network"
- "github.com/ipfs/go-libipfs/bitswap/server/internal/decision"
- "github.com/ipfs/go-libipfs/bitswap/tracer"
- blocks "github.com/ipfs/go-libipfs/blocks"
logging "github.com/ipfs/go-log"
"github.com/ipfs/go-metrics-interface"
process "github.com/jbenet/goprocess"
diff --git a/bitswap/testinstance/testinstance.go b/bitswap/testinstance/testinstance.go
index 47e22b7a2e..5a052b8314 100644
--- a/bitswap/testinstance/testinstance.go
+++ b/bitswap/testinstance/testinstance.go
@@ -4,14 +4,14 @@ import (
"context"
"time"
+ "github.com/ipfs/boxo/bitswap"
+ bsnet "github.com/ipfs/boxo/bitswap/network"
+ tn "github.com/ipfs/boxo/bitswap/testnet"
+ blockstore "github.com/ipfs/boxo/blockstore"
ds "github.com/ipfs/go-datastore"
delayed "github.com/ipfs/go-datastore/delayed"
ds_sync "github.com/ipfs/go-datastore/sync"
- blockstore "github.com/ipfs/go-ipfs-blockstore"
delay "github.com/ipfs/go-ipfs-delay"
- "github.com/ipfs/go-libipfs/bitswap"
- bsnet "github.com/ipfs/go-libipfs/bitswap/network"
- tn "github.com/ipfs/go-libipfs/bitswap/testnet"
tnet "github.com/libp2p/go-libp2p-testing/net"
p2ptestutil "github.com/libp2p/go-libp2p-testing/netutil"
peer "github.com/libp2p/go-libp2p/core/peer"
diff --git a/bitswap/testnet/interface.go b/bitswap/testnet/interface.go
index 62f3ca6256..ec28185185 100644
--- a/bitswap/testnet/interface.go
+++ b/bitswap/testnet/interface.go
@@ -1,7 +1,7 @@
package bitswap
import (
- bsnet "github.com/ipfs/go-libipfs/bitswap/network"
+ bsnet "github.com/ipfs/boxo/bitswap/network"
tnet "github.com/libp2p/go-libp2p-testing/net"
"github.com/libp2p/go-libp2p/core/peer"
diff --git a/bitswap/testnet/network_test.go b/bitswap/testnet/network_test.go
index 388b9c9c39..fc055a2d1c 100644
--- a/bitswap/testnet/network_test.go
+++ b/bitswap/testnet/network_test.go
@@ -5,12 +5,12 @@ import (
"sync"
"testing"
- bsmsg "github.com/ipfs/go-libipfs/bitswap/message"
- bsnet "github.com/ipfs/go-libipfs/bitswap/network"
+ bsmsg "github.com/ipfs/boxo/bitswap/message"
+ bsnet "github.com/ipfs/boxo/bitswap/network"
+ mockrouting "github.com/ipfs/boxo/routing/mock"
+ blocks "github.com/ipfs/go-block-format"
delay "github.com/ipfs/go-ipfs-delay"
- mockrouting "github.com/ipfs/go-ipfs-routing/mock"
- blocks "github.com/ipfs/go-libipfs/blocks"
tnet "github.com/libp2p/go-libp2p-testing/net"
"github.com/libp2p/go-libp2p/core/peer"
diff --git a/bitswap/testnet/peernet.go b/bitswap/testnet/peernet.go
index ea98b98f45..e4df19699f 100644
--- a/bitswap/testnet/peernet.go
+++ b/bitswap/testnet/peernet.go
@@ -3,10 +3,10 @@ package bitswap
import (
"context"
- bsnet "github.com/ipfs/go-libipfs/bitswap/network"
+ bsnet "github.com/ipfs/boxo/bitswap/network"
+ mockrouting "github.com/ipfs/boxo/routing/mock"
ds "github.com/ipfs/go-datastore"
- mockrouting "github.com/ipfs/go-ipfs-routing/mock"
tnet "github.com/libp2p/go-libp2p-testing/net"
"github.com/libp2p/go-libp2p/core/peer"
diff --git a/bitswap/testnet/virtual.go b/bitswap/testnet/virtual.go
index b743489662..0deb1b1ab0 100644
--- a/bitswap/testnet/virtual.go
+++ b/bitswap/testnet/virtual.go
@@ -8,12 +8,12 @@ import (
"sync/atomic"
"time"
- bsmsg "github.com/ipfs/go-libipfs/bitswap/message"
- bsnet "github.com/ipfs/go-libipfs/bitswap/network"
+ bsmsg "github.com/ipfs/boxo/bitswap/message"
+ bsnet "github.com/ipfs/boxo/bitswap/network"
+ mockrouting "github.com/ipfs/boxo/routing/mock"
cid "github.com/ipfs/go-cid"
delay "github.com/ipfs/go-ipfs-delay"
- mockrouting "github.com/ipfs/go-ipfs-routing/mock"
tnet "github.com/libp2p/go-libp2p-testing/net"
"github.com/libp2p/go-libp2p/core/connmgr"
diff --git a/bitswap/tracer/tracer.go b/bitswap/tracer/tracer.go
index 91ff25cf85..421212adf3 100644
--- a/bitswap/tracer/tracer.go
+++ b/bitswap/tracer/tracer.go
@@ -1,7 +1,7 @@
package tracer
import (
- bsmsg "github.com/ipfs/go-libipfs/bitswap/message"
+ bsmsg "github.com/ipfs/boxo/bitswap/message"
peer "github.com/libp2p/go-libp2p/core/peer"
)
diff --git a/bitswap/wantlist/forward.go b/bitswap/wantlist/forward.go
index 75a2555970..077ef9a590 100644
--- a/bitswap/wantlist/forward.go
+++ b/bitswap/wantlist/forward.go
@@ -1,8 +1,8 @@
package wantlist
import (
+ "github.com/ipfs/boxo/bitswap/client/wantlist"
"github.com/ipfs/go-cid"
- "github.com/ipfs/go-libipfs/bitswap/client/wantlist"
)
type (
diff --git a/blocks/blocks.go b/blocks/blocks.go
deleted file mode 100644
index 3d3894b3f3..0000000000
--- a/blocks/blocks.go
+++ /dev/null
@@ -1,82 +0,0 @@
-// Package blocks contains the lowest level of IPLD data structures.
-// A block is raw data accompanied by a CID. The CID contains the multihash
-// corresponding to the block.
-package blocks
-
-import (
- "errors"
- "fmt"
-
- cid "github.com/ipfs/go-cid"
- u "github.com/ipfs/go-ipfs-util"
- mh "github.com/multiformats/go-multihash"
-)
-
-// ErrWrongHash is returned when the Cid of a block is not the expected
-// according to the contents. It is currently used only when debugging.
-var ErrWrongHash = errors.New("data did not match given hash")
-
-// Block provides abstraction for blocks implementations.
-type Block interface {
- RawData() []byte
- Cid() cid.Cid
- String() string
- Loggable() map[string]interface{}
-}
-
-// A BasicBlock is a singular block of data in ipfs. It implements the Block
-// interface.
-type BasicBlock struct {
- cid cid.Cid
- data []byte
-}
-
-// NewBlock creates a Block object from opaque data. It will hash the data.
-func NewBlock(data []byte) *BasicBlock {
- // TODO: fix assumptions
- return &BasicBlock{data: data, cid: cid.NewCidV0(u.Hash(data))}
-}
-
-// NewBlockWithCid creates a new block when the hash of the data
-// is already known, this is used to save time in situations where
-// we are able to be confident that the data is correct.
-func NewBlockWithCid(data []byte, c cid.Cid) (*BasicBlock, error) {
- if u.Debug {
- chkc, err := c.Prefix().Sum(data)
- if err != nil {
- return nil, err
- }
-
- if !chkc.Equals(c) {
- return nil, ErrWrongHash
- }
- }
- return &BasicBlock{data: data, cid: c}, nil
-}
-
-// Multihash returns the hash contained in the block CID.
-func (b *BasicBlock) Multihash() mh.Multihash {
- return b.cid.Hash()
-}
-
-// RawData returns the block raw contents as a byte slice.
-func (b *BasicBlock) RawData() []byte {
- return b.data
-}
-
-// Cid returns the content identifier of the block.
-func (b *BasicBlock) Cid() cid.Cid {
- return b.cid
-}
-
-// String provides a human-readable representation of the block CID.
-func (b *BasicBlock) String() string {
- return fmt.Sprintf("[Block %s]", b.Cid())
-}
-
-// Loggable returns a go-log loggable item.
-func (b *BasicBlock) Loggable() map[string]interface{} {
- return map[string]interface{}{
- "block": b.Cid().String(),
- }
-}
diff --git a/blocks/blocks_test.go b/blocks/blocks_test.go
deleted file mode 100644
index 18f0c1f38c..0000000000
--- a/blocks/blocks_test.go
+++ /dev/null
@@ -1,98 +0,0 @@
-package blocks
-
-import (
- "bytes"
- "testing"
-
- cid "github.com/ipfs/go-cid"
- u "github.com/ipfs/go-ipfs-util"
- mh "github.com/multiformats/go-multihash"
-)
-
-func TestBlocksBasic(t *testing.T) {
-
- // Test empty data
- empty := []byte{}
- NewBlock(empty)
-
- // Test nil case
- NewBlock(nil)
-
- // Test some data
- NewBlock([]byte("Hello world!"))
-}
-
-func TestData(t *testing.T) {
- data := []byte("some data")
- block := NewBlock(data)
-
- if !bytes.Equal(block.RawData(), data) {
- t.Error("data is wrong")
- }
-}
-
-func TestHash(t *testing.T) {
- data := []byte("some other data")
- block := NewBlock(data)
-
- hash, err := mh.Sum(data, mh.SHA2_256, -1)
- if err != nil {
- t.Fatal(err)
- }
-
- if !bytes.Equal(block.Multihash(), hash) {
- t.Error("wrong multihash")
- }
-}
-
-func TestCid(t *testing.T) {
- data := []byte("yet another data")
- block := NewBlock(data)
- c := block.Cid()
-
- if !bytes.Equal(block.Multihash(), c.Hash()) {
- t.Error("key contains wrong data")
- }
-}
-
-func TestManualHash(t *testing.T) {
- oldDebugState := u.Debug
- defer (func() {
- u.Debug = oldDebugState
- })()
-
- data := []byte("I can't figure out more names .. data")
- hash, err := mh.Sum(data, mh.SHA2_256, -1)
- if err != nil {
- t.Fatal(err)
- }
-
- c := cid.NewCidV0(hash)
-
- u.Debug = false
- block, err := NewBlockWithCid(data, c)
- if err != nil {
- t.Fatal(err)
- }
-
- if !bytes.Equal(block.Multihash(), hash) {
- t.Error("wrong multihash")
- }
-
- data[5] = byte((uint32(data[5]) + 5) % 256) // Transfrom hash to be different
- block, err = NewBlockWithCid(data, c)
- if err != nil {
- t.Fatal(err)
- }
-
- if !bytes.Equal(block.Multihash(), hash) {
- t.Error("wrong multihash")
- }
-
- u.Debug = true
-
- _, err = NewBlockWithCid(data, c)
- if err != ErrWrongHash {
- t.Fatal(err)
- }
-}
diff --git a/blockservice/blockservice.go b/blockservice/blockservice.go
new file mode 100644
index 0000000000..6691b805c4
--- /dev/null
+++ b/blockservice/blockservice.go
@@ -0,0 +1,470 @@
+// package blockservice implements a BlockService interface that provides
+// a single GetBlock/AddBlock interface that seamlessly retrieves data either
+// locally or from a remote peer through the exchange.
+package blockservice
+
+import (
+ "context"
+ "io"
+ "sync"
+
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/trace"
+
+ blockstore "github.com/ipfs/boxo/blockstore"
+ exchange "github.com/ipfs/boxo/exchange"
+ "github.com/ipfs/boxo/verifcid"
+ blocks "github.com/ipfs/go-block-format"
+ cid "github.com/ipfs/go-cid"
+ ipld "github.com/ipfs/go-ipld-format"
+ logging "github.com/ipfs/go-log/v2"
+
+ "github.com/ipfs/boxo/blockservice/internal"
+)
+
+var logger = logging.Logger("blockservice")
+
+// BlockGetter is the common interface shared between blockservice sessions and
+// the blockservice.
+type BlockGetter interface {
+ // GetBlock gets the requested block.
+ GetBlock(ctx context.Context, c cid.Cid) (blocks.Block, error)
+
+ // GetBlocks does a batch request for the given cids, returning blocks as
+ // they are found, in no particular order.
+ //
+ // It may not be able to find all requested blocks (or the context may
+ // be canceled). In that case, it will close the channel early. It is up
+ // to the consumer to detect this situation and keep track which blocks
+ // it has received and which it hasn't.
+ GetBlocks(ctx context.Context, ks []cid.Cid) <-chan blocks.Block
+}
+
+// BlockService is a hybrid block datastore. It stores data in a local
+// datastore and may retrieve data from a remote Exchange.
+// It uses an internal `datastore.Datastore` instance to store values.
+type BlockService interface {
+ io.Closer
+ BlockGetter
+
+ // Blockstore returns a reference to the underlying blockstore
+ Blockstore() blockstore.Blockstore
+
+ // Exchange returns a reference to the underlying exchange (usually bitswap)
+ Exchange() exchange.Interface
+
+ // AddBlock puts a given block to the underlying datastore
+ AddBlock(ctx context.Context, o blocks.Block) error
+
+ // AddBlocks adds a slice of blocks at the same time using batching
+ // capabilities of the underlying datastore whenever possible.
+ AddBlocks(ctx context.Context, bs []blocks.Block) error
+
+ // DeleteBlock deletes the given block from the blockservice.
+ DeleteBlock(ctx context.Context, o cid.Cid) error
+}
+
+type blockService struct {
+ blockstore blockstore.Blockstore
+ exchange exchange.Interface
+ // If checkFirst is true then first check that a block doesn't
+ // already exist to avoid republishing the block on the exchange.
+ checkFirst bool
+}
+
+// NewBlockService creates a BlockService with given datastore instance.
+func New(bs blockstore.Blockstore, rem exchange.Interface) BlockService {
+ if rem == nil {
+ logger.Debug("blockservice running in local (offline) mode.")
+ }
+
+ return &blockService{
+ blockstore: bs,
+ exchange: rem,
+ checkFirst: true,
+ }
+}
+
+// NewWriteThrough creates a BlockService that guarantees writes will go
+// through to the blockstore and are not skipped by cache checks.
+func NewWriteThrough(bs blockstore.Blockstore, rem exchange.Interface) BlockService {
+ if rem == nil {
+ logger.Debug("blockservice running in local (offline) mode.")
+ }
+
+ return &blockService{
+ blockstore: bs,
+ exchange: rem,
+ checkFirst: false,
+ }
+}
+
+// Blockstore returns the blockstore behind this blockservice.
+func (s *blockService) Blockstore() blockstore.Blockstore {
+ return s.blockstore
+}
+
+// Exchange returns the exchange behind this blockservice.
+func (s *blockService) Exchange() exchange.Interface {
+ return s.exchange
+}
+
+// NewSession creates a new session that allows for
+// controlled exchange of wantlists to decrease the bandwidth overhead.
+// If the current exchange is a SessionExchange, a new exchange
+// session will be created. Otherwise, the current exchange will be used
+// directly.
+func NewSession(ctx context.Context, bs BlockService) *Session {
+ exch := bs.Exchange()
+ if sessEx, ok := exch.(exchange.SessionExchange); ok {
+ return &Session{
+ sessCtx: ctx,
+ ses: nil,
+ sessEx: sessEx,
+ bs: bs.Blockstore(),
+ notifier: exch,
+ }
+ }
+ return &Session{
+ ses: exch,
+ sessCtx: ctx,
+ bs: bs.Blockstore(),
+ notifier: exch,
+ }
+}
+
+// AddBlock adds a particular block to the service, Putting it into the datastore.
+func (s *blockService) AddBlock(ctx context.Context, o blocks.Block) error {
+ ctx, span := internal.StartSpan(ctx, "blockService.AddBlock")
+ defer span.End()
+
+ c := o.Cid()
+ // hash security
+ err := verifcid.ValidateCid(c)
+ if err != nil {
+ return err
+ }
+ if s.checkFirst {
+ if has, err := s.blockstore.Has(ctx, c); has || err != nil {
+ return err
+ }
+ }
+
+ if err := s.blockstore.Put(ctx, o); err != nil {
+ return err
+ }
+
+ logger.Debugf("BlockService.BlockAdded %s", c)
+
+ if s.exchange != nil {
+ if err := s.exchange.NotifyNewBlocks(ctx, o); err != nil {
+ logger.Errorf("NotifyNewBlocks: %s", err.Error())
+ }
+ }
+
+ return nil
+}
+
+func (s *blockService) AddBlocks(ctx context.Context, bs []blocks.Block) error {
+ ctx, span := internal.StartSpan(ctx, "blockService.AddBlocks")
+ defer span.End()
+
+ // hash security
+ for _, b := range bs {
+ err := verifcid.ValidateCid(b.Cid())
+ if err != nil {
+ return err
+ }
+ }
+ var toput []blocks.Block
+ if s.checkFirst {
+ toput = make([]blocks.Block, 0, len(bs))
+ for _, b := range bs {
+ has, err := s.blockstore.Has(ctx, b.Cid())
+ if err != nil {
+ return err
+ }
+ if !has {
+ toput = append(toput, b)
+ }
+ }
+ } else {
+ toput = bs
+ }
+
+ if len(toput) == 0 {
+ return nil
+ }
+
+ err := s.blockstore.PutMany(ctx, toput)
+ if err != nil {
+ return err
+ }
+
+ if s.exchange != nil {
+ logger.Debugf("BlockService.BlockAdded %d blocks", len(toput))
+ if err := s.exchange.NotifyNewBlocks(ctx, toput...); err != nil {
+ logger.Errorf("NotifyNewBlocks: %s", err.Error())
+ }
+ }
+ return nil
+}
+
+// GetBlock retrieves a particular block from the service,
+// Getting it from the datastore using the key (hash).
+func (s *blockService) GetBlock(ctx context.Context, c cid.Cid) (blocks.Block, error) {
+ ctx, span := internal.StartSpan(ctx, "blockService.GetBlock", trace.WithAttributes(attribute.Stringer("CID", c)))
+ defer span.End()
+
+ var f func() notifiableFetcher
+ if s.exchange != nil {
+ f = s.getExchange
+ }
+
+ return getBlock(ctx, c, s.blockstore, f) // hash security
+}
+
+func (s *blockService) getExchange() notifiableFetcher {
+ return s.exchange
+}
+
+func getBlock(ctx context.Context, c cid.Cid, bs blockstore.Blockstore, fget func() notifiableFetcher) (blocks.Block, error) {
+ err := verifcid.ValidateCid(c) // hash security
+ if err != nil {
+ return nil, err
+ }
+
+ block, err := bs.Get(ctx, c)
+ if err == nil {
+ return block, nil
+ }
+
+ if ipld.IsNotFound(err) && fget != nil {
+ f := fget() // Don't load the exchange until we have to
+
+ // TODO be careful checking ErrNotFound. If the underlying
+ // implementation changes, this will break.
+ logger.Debug("Blockservice: Searching bitswap")
+ blk, err := f.GetBlock(ctx, c)
+ if err != nil {
+ return nil, err
+ }
+ // also write in the blockstore for caching, inform the exchange that the block is available
+ err = bs.Put(ctx, blk)
+ if err != nil {
+ return nil, err
+ }
+ err = f.NotifyNewBlocks(ctx, blk)
+ if err != nil {
+ return nil, err
+ }
+ logger.Debugf("BlockService.BlockFetched %s", c)
+ return blk, nil
+ }
+
+ logger.Debug("Blockservice GetBlock: Not found")
+ return nil, err
+}
+
+// GetBlocks gets a list of blocks asynchronously and returns through
+// the returned channel.
+// NB: No guarantees are made about order.
+func (s *blockService) GetBlocks(ctx context.Context, ks []cid.Cid) <-chan blocks.Block {
+ ctx, span := internal.StartSpan(ctx, "blockService.GetBlocks")
+ defer span.End()
+
+ var f func() notifiableFetcher
+ if s.exchange != nil {
+ f = s.getExchange
+ }
+
+ return getBlocks(ctx, ks, s.blockstore, f) // hash security
+}
+
+func getBlocks(ctx context.Context, ks []cid.Cid, bs blockstore.Blockstore, fget func() notifiableFetcher) <-chan blocks.Block {
+ out := make(chan blocks.Block)
+
+ go func() {
+ defer close(out)
+
+ allValid := true
+ for _, c := range ks {
+ if err := verifcid.ValidateCid(c); err != nil {
+ allValid = false
+ break
+ }
+ }
+
+ if !allValid {
+ ks2 := make([]cid.Cid, 0, len(ks))
+ for _, c := range ks {
+ // hash security
+ if err := verifcid.ValidateCid(c); err == nil {
+ ks2 = append(ks2, c)
+ } else {
+ logger.Errorf("unsafe CID (%s) passed to blockService.GetBlocks: %s", c, err)
+ }
+ }
+ ks = ks2
+ }
+
+ var misses []cid.Cid
+ for _, c := range ks {
+ hit, err := bs.Get(ctx, c)
+ if err != nil {
+ misses = append(misses, c)
+ continue
+ }
+ select {
+ case out <- hit:
+ case <-ctx.Done():
+ return
+ }
+ }
+
+ if len(misses) == 0 || fget == nil {
+ return
+ }
+
+ f := fget() // don't load exchange unless we have to
+ rblocks, err := f.GetBlocks(ctx, misses)
+ if err != nil {
+ logger.Debugf("Error with GetBlocks: %s", err)
+ return
+ }
+
+ // batch available blocks together
+ const batchSize = 32
+ batch := make([]blocks.Block, 0, batchSize)
+ for {
+ var noMoreBlocks bool
+ batchLoop:
+ for len(batch) < batchSize {
+ select {
+ case b, ok := <-rblocks:
+ if !ok {
+ noMoreBlocks = true
+ break batchLoop
+ }
+
+ logger.Debugf("BlockService.BlockFetched %s", b.Cid())
+ batch = append(batch, b)
+ case <-ctx.Done():
+ return
+ default:
+ break batchLoop
+ }
+ }
+
+ // also write in the blockstore for caching, inform the exchange that the blocks are available
+ err = bs.PutMany(ctx, batch)
+ if err != nil {
+ logger.Errorf("could not write blocks from the network to the blockstore: %s", err)
+ return
+ }
+
+ err = f.NotifyNewBlocks(ctx, batch...)
+ if err != nil {
+ logger.Errorf("could not tell the exchange about new blocks: %s", err)
+ return
+ }
+
+ for _, b := range batch {
+ select {
+ case out <- b:
+ case <-ctx.Done():
+ return
+ }
+ }
+ batch = batch[:0]
+ if noMoreBlocks {
+ break
+ }
+ }
+ }()
+ return out
+}
+
+// DeleteBlock deletes a block in the blockservice from the datastore
+func (s *blockService) DeleteBlock(ctx context.Context, c cid.Cid) error {
+ ctx, span := internal.StartSpan(ctx, "blockService.DeleteBlock", trace.WithAttributes(attribute.Stringer("CID", c)))
+ defer span.End()
+
+ err := s.blockstore.DeleteBlock(ctx, c)
+ if err == nil {
+ logger.Debugf("BlockService.BlockDeleted %s", c)
+ }
+ return err
+}
+
+func (s *blockService) Close() error {
+ logger.Debug("blockservice is shutting down...")
+ return s.exchange.Close()
+}
+
+type notifier interface {
+ NotifyNewBlocks(context.Context, ...blocks.Block) error
+}
+
+// Session is a helper type to provide higher level access to bitswap sessions
+type Session struct {
+ bs blockstore.Blockstore
+ ses exchange.Fetcher
+ sessEx exchange.SessionExchange
+ sessCtx context.Context
+ notifier notifier
+ lk sync.Mutex
+}
+
+type notifiableFetcher interface {
+ exchange.Fetcher
+ notifier
+}
+
+type notifiableFetcherWrapper struct {
+ exchange.Fetcher
+ notifier
+}
+
+func (s *Session) getSession() notifiableFetcher {
+ s.lk.Lock()
+ defer s.lk.Unlock()
+ if s.ses == nil {
+ s.ses = s.sessEx.NewSession(s.sessCtx)
+ }
+
+ return notifiableFetcherWrapper{s.ses, s.notifier}
+}
+
+func (s *Session) getExchange() notifiableFetcher {
+ return notifiableFetcherWrapper{s.ses, s.notifier}
+}
+
+func (s *Session) getFetcherFactory() func() notifiableFetcher {
+ if s.sessEx != nil {
+ return s.getSession
+ }
+ if s.ses != nil {
+ // Our exchange isn't session compatible, let's fallback to non sessions fetches
+ return s.getExchange
+ }
+ return nil
+}
+
+// GetBlock gets a block in the context of a request session
+func (s *Session) GetBlock(ctx context.Context, c cid.Cid) (blocks.Block, error) {
+ ctx, span := internal.StartSpan(ctx, "Session.GetBlock", trace.WithAttributes(attribute.Stringer("CID", c)))
+ defer span.End()
+
+ return getBlock(ctx, c, s.bs, s.getFetcherFactory()) // hash security
+}
+
+// GetBlocks gets blocks in the context of a request session
+func (s *Session) GetBlocks(ctx context.Context, ks []cid.Cid) <-chan blocks.Block {
+ ctx, span := internal.StartSpan(ctx, "Session.GetBlocks")
+ defer span.End()
+
+ return getBlocks(ctx, ks, s.bs, s.getFetcherFactory()) // hash security
+}
+
+var _ BlockGetter = (*Session)(nil)
diff --git a/blockservice/blockservice_test.go b/blockservice/blockservice_test.go
new file mode 100644
index 0000000000..14396c8a1e
--- /dev/null
+++ b/blockservice/blockservice_test.go
@@ -0,0 +1,243 @@
+package blockservice
+
+import (
+ "context"
+ "testing"
+
+ blockstore "github.com/ipfs/boxo/blockstore"
+ exchange "github.com/ipfs/boxo/exchange"
+ offline "github.com/ipfs/boxo/exchange/offline"
+ blocks "github.com/ipfs/go-block-format"
+ cid "github.com/ipfs/go-cid"
+ ds "github.com/ipfs/go-datastore"
+ dssync "github.com/ipfs/go-datastore/sync"
+ butil "github.com/ipfs/go-ipfs-blocksutil"
+ ipld "github.com/ipfs/go-ipld-format"
+)
+
+func TestWriteThroughWorks(t *testing.T) {
+ bstore := &PutCountingBlockstore{
+ blockstore.NewBlockstore(dssync.MutexWrap(ds.NewMapDatastore())),
+ 0,
+ }
+ exchbstore := blockstore.NewBlockstore(dssync.MutexWrap(ds.NewMapDatastore()))
+ exch := offline.Exchange(exchbstore)
+ bserv := NewWriteThrough(bstore, exch)
+ bgen := butil.NewBlockGenerator()
+
+ block := bgen.Next()
+
+ t.Logf("PutCounter: %d", bstore.PutCounter)
+ err := bserv.AddBlock(context.Background(), block)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if bstore.PutCounter != 1 {
+ t.Fatalf("expected just one Put call, have: %d", bstore.PutCounter)
+ }
+
+ err = bserv.AddBlock(context.Background(), block)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if bstore.PutCounter != 2 {
+ t.Fatalf("Put should have called again, should be 2 is: %d", bstore.PutCounter)
+ }
+}
+
+func TestExchangeWrite(t *testing.T) {
+ bstore := &PutCountingBlockstore{
+ blockstore.NewBlockstore(dssync.MutexWrap(ds.NewMapDatastore())),
+ 0,
+ }
+ exchbstore := blockstore.NewBlockstore(dssync.MutexWrap(ds.NewMapDatastore()))
+ exch := ¬ifyCountingExchange{
+ offline.Exchange(exchbstore),
+ 0,
+ }
+ bserv := NewWriteThrough(bstore, exch)
+ bgen := butil.NewBlockGenerator()
+
+ for name, fetcher := range map[string]BlockGetter{
+ "blockservice": bserv,
+ "session": NewSession(context.Background(), bserv),
+ } {
+ t.Run(name, func(t *testing.T) {
+ // GetBlock
+ block := bgen.Next()
+ err := exchbstore.Put(context.Background(), block)
+ if err != nil {
+ t.Fatal(err)
+ }
+ got, err := fetcher.GetBlock(context.Background(), block.Cid())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if got.Cid() != block.Cid() {
+ t.Fatalf("GetBlock returned unexpected block")
+ }
+ if bstore.PutCounter != 1 {
+ t.Fatalf("expected one Put call, have: %d", bstore.PutCounter)
+ }
+ if exch.notifyCount != 1 {
+ t.Fatalf("expected one NotifyNewBlocks call, have: %d", exch.notifyCount)
+ }
+
+ // GetBlocks
+ b1 := bgen.Next()
+ err = exchbstore.Put(context.Background(), b1)
+ if err != nil {
+ t.Fatal(err)
+ }
+ b2 := bgen.Next()
+ err = exchbstore.Put(context.Background(), b2)
+ if err != nil {
+ t.Fatal(err)
+ }
+ bchan := fetcher.GetBlocks(context.Background(), []cid.Cid{b1.Cid(), b2.Cid()})
+ var gotBlocks []blocks.Block
+ for b := range bchan {
+ gotBlocks = append(gotBlocks, b)
+ }
+ if len(gotBlocks) != 2 {
+ t.Fatalf("expected to retrieve 2 blocks, got %d", len(gotBlocks))
+ }
+ if bstore.PutCounter != 3 {
+ t.Fatalf("expected 3 Put call, have: %d", bstore.PutCounter)
+ }
+ if exch.notifyCount != 3 {
+ t.Fatalf("expected one NotifyNewBlocks call, have: %d", exch.notifyCount)
+ }
+
+ // reset counts
+ bstore.PutCounter = 0
+ exch.notifyCount = 0
+ })
+ }
+}
+
+func TestLazySessionInitialization(t *testing.T) {
+ ctx := context.Background()
+ ctx, cancel := context.WithCancel(ctx)
+ defer cancel()
+
+ bstore := blockstore.NewBlockstore(dssync.MutexWrap(ds.NewMapDatastore()))
+ bstore2 := blockstore.NewBlockstore(dssync.MutexWrap(ds.NewMapDatastore()))
+ bstore3 := blockstore.NewBlockstore(dssync.MutexWrap(ds.NewMapDatastore()))
+ session := offline.Exchange(bstore2)
+ exch := offline.Exchange(bstore3)
+ sessionExch := &fakeSessionExchange{Interface: exch, session: session}
+ bservSessEx := NewWriteThrough(bstore, sessionExch)
+ bgen := butil.NewBlockGenerator()
+
+ block := bgen.Next()
+ err := bstore.Put(ctx, block)
+ if err != nil {
+ t.Fatal(err)
+ }
+ block2 := bgen.Next()
+ err = bstore2.Put(ctx, block2)
+ if err != nil {
+ t.Fatal(err)
+ }
+ err = session.NotifyNewBlocks(ctx, block2)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ bsession := NewSession(ctx, bservSessEx)
+ if bsession.ses != nil {
+ t.Fatal("Session exchange should not instantiated session immediately")
+ }
+ returnedBlock, err := bsession.GetBlock(ctx, block.Cid())
+ if err != nil {
+ t.Fatal("Should have fetched block locally")
+ }
+ if returnedBlock.Cid() != block.Cid() {
+ t.Fatal("Got incorrect block")
+ }
+ if bsession.ses != nil {
+ t.Fatal("Session exchange should not instantiated session if local store had block")
+ }
+ returnedBlock, err = bsession.GetBlock(ctx, block2.Cid())
+ if err != nil {
+ t.Fatal("Should have fetched block remotely")
+ }
+ if returnedBlock.Cid() != block2.Cid() {
+ t.Fatal("Got incorrect block")
+ }
+ if bsession.ses != session {
+ t.Fatal("Should have initialized session to fetch block")
+ }
+}
+
+var _ blockstore.Blockstore = (*PutCountingBlockstore)(nil)
+
+type PutCountingBlockstore struct {
+ blockstore.Blockstore
+ PutCounter int
+}
+
+func (bs *PutCountingBlockstore) Put(ctx context.Context, block blocks.Block) error {
+ bs.PutCounter++
+ return bs.Blockstore.Put(ctx, block)
+}
+
+func (bs *PutCountingBlockstore) PutMany(ctx context.Context, blocks []blocks.Block) error {
+ bs.PutCounter += len(blocks)
+ return bs.Blockstore.PutMany(ctx, blocks)
+}
+
+var _ exchange.Interface = (*notifyCountingExchange)(nil)
+
+type notifyCountingExchange struct {
+ exchange.Interface
+ notifyCount int
+}
+
+func (n *notifyCountingExchange) NotifyNewBlocks(ctx context.Context, blocks ...blocks.Block) error {
+ n.notifyCount += len(blocks)
+ return n.Interface.NotifyNewBlocks(ctx, blocks...)
+}
+
+var _ exchange.SessionExchange = (*fakeSessionExchange)(nil)
+
+type fakeSessionExchange struct {
+ exchange.Interface
+ session exchange.Fetcher
+}
+
+func (fe *fakeSessionExchange) NewSession(ctx context.Context) exchange.Fetcher {
+ if ctx == nil {
+ panic("nil context")
+ }
+ return fe.session
+}
+
+func TestNilExchange(t *testing.T) {
+ ctx := context.Background()
+ ctx, cancel := context.WithCancel(ctx)
+ defer cancel()
+
+ bgen := butil.NewBlockGenerator()
+ block := bgen.Next()
+
+ bs := blockstore.NewBlockstore(dssync.MutexWrap(ds.NewMapDatastore()))
+ bserv := NewWriteThrough(bs, nil)
+ sess := NewSession(ctx, bserv)
+ _, err := sess.GetBlock(ctx, block.Cid())
+ if !ipld.IsNotFound(err) {
+ t.Fatal("expected block to not be found")
+ }
+ err = bs.Put(ctx, block)
+ if err != nil {
+ t.Fatal(err)
+ }
+ b, err := sess.GetBlock(ctx, block.Cid())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if b.Cid() != block.Cid() {
+ t.Fatal("got the wrong block")
+ }
+}
diff --git a/blockservice/internal/tracing.go b/blockservice/internal/tracing.go
new file mode 100644
index 0000000000..96a61ff423
--- /dev/null
+++ b/blockservice/internal/tracing.go
@@ -0,0 +1,13 @@
+package internal
+
+import (
+ "context"
+ "fmt"
+
+ "go.opentelemetry.io/otel"
+ "go.opentelemetry.io/otel/trace"
+)
+
+func StartSpan(ctx context.Context, name string, opts ...trace.SpanStartOption) (context.Context, trace.Span) {
+ return otel.Tracer("go-blockservice").Start(ctx, fmt.Sprintf("Blockservice.%s", name), opts...)
+}
diff --git a/blockservice/test/blocks_test.go b/blockservice/test/blocks_test.go
new file mode 100644
index 0000000000..6bfe603e4f
--- /dev/null
+++ b/blockservice/test/blocks_test.go
@@ -0,0 +1,100 @@
+package bstest
+
+import (
+ "bytes"
+ "context"
+ "fmt"
+ "testing"
+ "time"
+
+ . "github.com/ipfs/boxo/blockservice"
+
+ blockstore "github.com/ipfs/boxo/blockstore"
+ offline "github.com/ipfs/boxo/exchange/offline"
+ u "github.com/ipfs/boxo/util"
+ blocks "github.com/ipfs/go-block-format"
+ cid "github.com/ipfs/go-cid"
+ ds "github.com/ipfs/go-datastore"
+ dssync "github.com/ipfs/go-datastore/sync"
+)
+
+func newObject(data []byte) blocks.Block {
+ return blocks.NewBlock(data)
+}
+
+func TestBlocks(t *testing.T) {
+ bstore := blockstore.NewBlockstore(dssync.MutexWrap(ds.NewMapDatastore()))
+ bs := New(bstore, offline.Exchange(bstore))
+ defer bs.Close()
+
+ o := newObject([]byte("beep boop"))
+ h := cid.NewCidV0(u.Hash([]byte("beep boop")))
+ if !o.Cid().Equals(h) {
+ t.Error("Block key and data multihash key not equal")
+ }
+
+ err := bs.AddBlock(context.Background(), o)
+ if err != nil {
+ t.Error("failed to add block to BlockService", err)
+ return
+ }
+
+ ctx, cancel := context.WithTimeout(context.Background(), time.Second*5)
+ defer cancel()
+ b2, err := bs.GetBlock(ctx, o.Cid())
+ if err != nil {
+ t.Error("failed to retrieve block from BlockService", err)
+ return
+ }
+
+ if !o.Cid().Equals(b2.Cid()) {
+ t.Error("Block keys not equal.")
+ }
+
+ if !bytes.Equal(o.RawData(), b2.RawData()) {
+ t.Error("Block data is not equal.")
+ }
+}
+
+func makeObjects(n int) []blocks.Block {
+ var out []blocks.Block
+ for i := 0; i < n; i++ {
+ out = append(out, newObject([]byte(fmt.Sprintf("object %d", i))))
+ }
+ return out
+}
+
+func TestGetBlocksSequential(t *testing.T) {
+ var servs = Mocks(4)
+ for _, s := range servs {
+ defer s.Close()
+ }
+ objs := makeObjects(50)
+
+ var cids []cid.Cid
+ for _, o := range objs {
+ cids = append(cids, o.Cid())
+ err := servs[0].AddBlock(context.Background(), o)
+ if err != nil {
+ t.Fatal(err)
+ }
+ }
+
+ t.Log("one instance at a time, get blocks concurrently")
+
+ for i := 1; i < len(servs); i++ {
+ ctx, cancel := context.WithTimeout(context.Background(), time.Second*50)
+ defer cancel()
+ out := servs[i].GetBlocks(ctx, cids)
+ gotten := make(map[string]blocks.Block)
+ for blk := range out {
+ if _, ok := gotten[blk.Cid().KeyString()]; ok {
+ t.Fatal("Got duplicate block!")
+ }
+ gotten[blk.Cid().KeyString()] = blk
+ }
+ if len(gotten) != len(objs) {
+ t.Fatalf("Didnt get enough blocks back: %d/%d", len(gotten), len(objs))
+ }
+ }
+}
diff --git a/blockservice/test/mock.go b/blockservice/test/mock.go
new file mode 100644
index 0000000000..fa6469fb6d
--- /dev/null
+++ b/blockservice/test/mock.go
@@ -0,0 +1,23 @@
+package bstest
+
+import (
+ testinstance "github.com/ipfs/boxo/bitswap/testinstance"
+ tn "github.com/ipfs/boxo/bitswap/testnet"
+ "github.com/ipfs/boxo/blockservice"
+ mockrouting "github.com/ipfs/boxo/routing/mock"
+ delay "github.com/ipfs/go-ipfs-delay"
+)
+
+// Mocks returns |n| connected mock Blockservices
+func Mocks(n int) []blockservice.BlockService {
+ net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(0))
+ sg := testinstance.NewTestInstanceGenerator(net, nil, nil)
+
+ instances := sg.Instances(n)
+
+ var servs []blockservice.BlockService
+ for _, i := range instances {
+ servs = append(servs, blockservice.New(i.Blockstore(), i.Exchange))
+ }
+ return servs
+}
diff --git a/blockstore/arc_cache.go b/blockstore/arc_cache.go
new file mode 100644
index 0000000000..ba8ecbb637
--- /dev/null
+++ b/blockstore/arc_cache.go
@@ -0,0 +1,408 @@
+package blockstore
+
+import (
+ "context"
+ "sort"
+ "sync"
+
+ lru "github.com/hashicorp/golang-lru"
+ blocks "github.com/ipfs/go-block-format"
+ cid "github.com/ipfs/go-cid"
+ ipld "github.com/ipfs/go-ipld-format"
+ metrics "github.com/ipfs/go-metrics-interface"
+)
+
+type cacheHave bool
+type cacheSize int
+
+type lock struct {
+ mu sync.RWMutex
+ refcnt int
+}
+
+// arccache wraps a BlockStore with an Adaptive Replacement Cache (ARC) that
+// does not store the actual blocks, just metadata about them: existence and
+// size. This provides block access-time improvements, allowing
+// to short-cut many searches without querying the underlying datastore.
+type arccache struct {
+ lklk sync.Mutex
+ lks map[string]*lock
+
+ cache *lru.TwoQueueCache
+
+ blockstore Blockstore
+ viewer Viewer
+
+ hits metrics.Counter
+ total metrics.Counter
+}
+
+var _ Blockstore = (*arccache)(nil)
+var _ Viewer = (*arccache)(nil)
+
+func newARCCachedBS(ctx context.Context, bs Blockstore, lruSize int) (*arccache, error) {
+ cache, err := lru.New2Q(lruSize)
+ if err != nil {
+ return nil, err
+ }
+ c := &arccache{cache: cache, blockstore: bs, lks: make(map[string]*lock)}
+ c.hits = metrics.NewCtx(ctx, "arc.hits_total", "Number of ARC cache hits").Counter()
+ c.total = metrics.NewCtx(ctx, "arc_total", "Total number of ARC cache requests").Counter()
+ if v, ok := bs.(Viewer); ok {
+ c.viewer = v
+ }
+ return c, nil
+}
+
+func (b *arccache) lock(k string, write bool) {
+ b.lklk.Lock()
+ lk, ok := b.lks[k]
+ if !ok {
+ lk = new(lock)
+ b.lks[k] = lk
+ }
+ lk.refcnt++
+ b.lklk.Unlock()
+ if write {
+ lk.mu.Lock()
+ } else {
+ lk.mu.RLock()
+ }
+}
+
+func (b *arccache) unlock(key string, write bool) {
+ b.lklk.Lock()
+ lk := b.lks[key]
+ lk.refcnt--
+ if lk.refcnt == 0 {
+ delete(b.lks, key)
+ }
+ b.lklk.Unlock()
+ if write {
+ lk.mu.Unlock()
+ } else {
+ lk.mu.RUnlock()
+ }
+}
+
+func cacheKey(k cid.Cid) string {
+ return string(k.Hash())
+}
+
+func (b *arccache) DeleteBlock(ctx context.Context, k cid.Cid) error {
+ if !k.Defined() {
+ return nil
+ }
+
+ key := cacheKey(k)
+
+ if has, _, ok := b.queryCache(key); ok && !has {
+ return nil
+ }
+
+ b.lock(key, true)
+ defer b.unlock(key, true)
+
+ err := b.blockstore.DeleteBlock(ctx, k)
+ if err == nil {
+ b.cacheHave(key, false)
+ } else {
+ b.cacheInvalidate(key)
+ }
+ return err
+}
+
+func (b *arccache) Has(ctx context.Context, k cid.Cid) (bool, error) {
+ if !k.Defined() {
+ logger.Error("undefined cid in arccache")
+ // Return cache invalid so the call to blockstore happens
+ // in case of invalid key and correct error is created.
+ return false, nil
+ }
+
+ key := cacheKey(k)
+
+ if has, _, ok := b.queryCache(key); ok {
+ return has, nil
+ }
+
+ b.lock(key, false)
+ defer b.unlock(key, false)
+
+ has, err := b.blockstore.Has(ctx, k)
+ if err != nil {
+ return false, err
+ }
+ b.cacheHave(key, has)
+ return has, nil
+}
+
+func (b *arccache) GetSize(ctx context.Context, k cid.Cid) (int, error) {
+ if !k.Defined() {
+ return -1, ipld.ErrNotFound{Cid: k}
+ }
+
+ key := cacheKey(k)
+
+ if has, blockSize, ok := b.queryCache(key); ok {
+ if !has {
+ // don't have it, return
+ return -1, ipld.ErrNotFound{Cid: k}
+ }
+ if blockSize >= 0 {
+ // have it and we know the size
+ return blockSize, nil
+ }
+ // we have it but don't know the size, ask the datastore.
+ }
+
+ b.lock(key, false)
+ defer b.unlock(key, false)
+
+ blockSize, err := b.blockstore.GetSize(ctx, k)
+ if ipld.IsNotFound(err) {
+ b.cacheHave(key, false)
+ } else if err == nil {
+ b.cacheSize(key, blockSize)
+ }
+ return blockSize, err
+}
+
+func (b *arccache) View(ctx context.Context, k cid.Cid, callback func([]byte) error) error {
+ // shortcircuit and fall back to Get if the underlying store
+ // doesn't support Viewer.
+ if b.viewer == nil {
+ blk, err := b.Get(ctx, k)
+ if err != nil {
+ return err
+ }
+ return callback(blk.RawData())
+ }
+
+ if !k.Defined() {
+ return ipld.ErrNotFound{Cid: k}
+ }
+
+ key := cacheKey(k)
+
+ if has, _, ok := b.queryCache(key); ok && !has {
+ // short circuit if the cache deterministically tells us the item
+ // doesn't exist.
+ return ipld.ErrNotFound{Cid: k}
+ }
+
+ b.lock(key, false)
+ defer b.unlock(key, false)
+
+ var cberr error
+ var size int
+ if err := b.viewer.View(ctx, k, func(buf []byte) error {
+ size = len(buf)
+ cberr = callback(buf)
+ return nil
+ }); err != nil {
+ if ipld.IsNotFound(err) {
+ b.cacheHave(key, false)
+ }
+ return err
+ }
+
+ b.cacheSize(key, size)
+
+ return cberr
+}
+
+func (b *arccache) Get(ctx context.Context, k cid.Cid) (blocks.Block, error) {
+ if !k.Defined() {
+ return nil, ipld.ErrNotFound{Cid: k}
+ }
+
+ key := cacheKey(k)
+
+ if has, _, ok := b.queryCache(key); ok && !has {
+ return nil, ipld.ErrNotFound{Cid: k}
+ }
+
+ b.lock(key, false)
+ defer b.unlock(key, false)
+
+ bl, err := b.blockstore.Get(ctx, k)
+ if bl == nil && ipld.IsNotFound(err) {
+ b.cacheHave(key, false)
+ } else if bl != nil {
+ b.cacheSize(key, len(bl.RawData()))
+ }
+ return bl, err
+}
+
+func (b *arccache) Put(ctx context.Context, bl blocks.Block) error {
+ key := cacheKey(bl.Cid())
+
+ if has, _, ok := b.queryCache(key); ok && has {
+ return nil
+ }
+
+ b.lock(key, true)
+ defer b.unlock(key, true)
+
+ err := b.blockstore.Put(ctx, bl)
+ if err == nil {
+ b.cacheSize(key, len(bl.RawData()))
+ } else {
+ b.cacheInvalidate(key)
+ }
+ return err
+}
+
+type keyedBlocks struct {
+ keys []string
+ blocks []blocks.Block
+}
+
+func (b *keyedBlocks) Len() int {
+ return len(b.keys)
+}
+
+func (b *keyedBlocks) Less(i, j int) bool {
+ return b.keys[i] < b.keys[j]
+}
+
+func (b *keyedBlocks) Swap(i, j int) {
+ b.keys[i], b.keys[j] = b.keys[j], b.keys[i]
+ b.blocks[i], b.blocks[j] = b.blocks[j], b.blocks[i]
+}
+
+func (b *keyedBlocks) append(key string, blk blocks.Block) {
+ b.keys = append(b.keys, key)
+ b.blocks = append(b.blocks, blk)
+}
+
+func (b *keyedBlocks) isEmpty() bool {
+ return len(b.keys) == 0
+}
+
+func (b *keyedBlocks) sortAndDedup() {
+ if b.isEmpty() {
+ return
+ }
+
+ sort.Sort(b)
+
+ // https://github.com/golang/go/wiki/SliceTricks#in-place-deduplicate-comparable
+ j := 0
+ for i := 1; i < len(b.keys); i++ {
+ if b.keys[j] == b.keys[i] {
+ continue
+ }
+ j++
+ b.keys[j] = b.keys[i]
+ b.blocks[j] = b.blocks[i]
+ }
+
+ b.keys = b.keys[:j+1]
+ b.blocks = b.blocks[:j+1]
+}
+
+func newKeyedBlocks(cap int) *keyedBlocks {
+ return &keyedBlocks{
+ keys: make([]string, 0, cap),
+ blocks: make([]blocks.Block, 0, cap),
+ }
+}
+
+func (b *arccache) PutMany(ctx context.Context, bs []blocks.Block) error {
+ good := newKeyedBlocks(len(bs))
+ for _, blk := range bs {
+ // call put on block if result is inconclusive or we are sure that
+ // the block isn't in storage
+ key := cacheKey(blk.Cid())
+ if has, _, ok := b.queryCache(key); !ok || (ok && !has) {
+ good.append(key, blk)
+ }
+ }
+
+ if good.isEmpty() {
+ return nil
+ }
+
+ good.sortAndDedup()
+
+ for _, key := range good.keys {
+ b.lock(key, true)
+ }
+
+ defer func() {
+ for _, key := range good.keys {
+ b.unlock(key, true)
+ }
+ }()
+
+ err := b.blockstore.PutMany(ctx, good.blocks)
+ if err != nil {
+ return err
+ }
+ for i, key := range good.keys {
+ b.cacheSize(key, len(good.blocks[i].RawData()))
+ }
+
+ return nil
+}
+
+func (b *arccache) HashOnRead(enabled bool) {
+ b.blockstore.HashOnRead(enabled)
+}
+
+func (b *arccache) cacheHave(key string, have bool) {
+ b.cache.Add(key, cacheHave(have))
+}
+
+func (b *arccache) cacheSize(key string, blockSize int) {
+ b.cache.Add(key, cacheSize(blockSize))
+}
+
+func (b *arccache) cacheInvalidate(key string) {
+ b.cache.Remove(key)
+}
+
+// queryCache checks if the CID is in the cache. If so, it returns:
+//
+// - exists (bool): whether the CID is known to exist or not.
+// - size (int): the size if cached, or -1 if not cached.
+// - ok (bool): whether present in the cache.
+//
+// When ok is false, the answer in inconclusive and the caller must ignore the
+// other two return values. Querying the underying store is necessary.
+//
+// When ok is true, exists carries the correct answer, and size carries the
+// size, if known, or -1 if not.
+func (b *arccache) queryCache(k string) (exists bool, size int, ok bool) {
+ b.total.Inc()
+
+ h, ok := b.cache.Get(k)
+ if ok {
+ b.hits.Inc()
+ switch h := h.(type) {
+ case cacheHave:
+ return bool(h), -1, true
+ case cacheSize:
+ return true, int(h), true
+ }
+ }
+ return false, -1, false
+}
+
+func (b *arccache) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) {
+ return b.blockstore.AllKeysChan(ctx)
+}
+
+func (b *arccache) GCLock(ctx context.Context) Unlocker {
+ return b.blockstore.(GCBlockstore).GCLock(ctx)
+}
+
+func (b *arccache) PinLock(ctx context.Context) Unlocker {
+ return b.blockstore.(GCBlockstore).PinLock(ctx)
+}
+
+func (b *arccache) GCRequested(ctx context.Context) bool {
+ return b.blockstore.(GCBlockstore).GCRequested(ctx)
+}
diff --git a/blockstore/arc_cache_test.go b/blockstore/arc_cache_test.go
new file mode 100644
index 0000000000..164457d1bf
--- /dev/null
+++ b/blockstore/arc_cache_test.go
@@ -0,0 +1,399 @@
+package blockstore
+
+import (
+ "context"
+ "io"
+ "math/rand"
+ "sync/atomic"
+ "testing"
+ "time"
+
+ blocks "github.com/ipfs/go-block-format"
+ cid "github.com/ipfs/go-cid"
+ ds "github.com/ipfs/go-datastore"
+ syncds "github.com/ipfs/go-datastore/sync"
+ ipld "github.com/ipfs/go-ipld-format"
+)
+
+var exampleBlock = blocks.NewBlock([]byte("foo"))
+
+func testArcCached(ctx context.Context, bs Blockstore) (*arccache, error) {
+ if ctx == nil {
+ ctx = context.TODO()
+ }
+ opts := DefaultCacheOpts()
+ opts.HasBloomFilterSize = 0
+ opts.HasBloomFilterHashes = 0
+ bbs, err := CachedBlockstore(ctx, bs, opts)
+ if err == nil {
+ return bbs.(*arccache), nil
+ }
+ return nil, err
+}
+
+func createStores(t testing.TB) (*arccache, Blockstore, *callbackDatastore) {
+ cd := &callbackDatastore{f: func() {}, ds: ds.NewMapDatastore()}
+ bs := NewBlockstore(syncds.MutexWrap(cd))
+ arc, err := testArcCached(context.TODO(), bs)
+ if err != nil {
+ t.Fatal(err)
+ }
+ return arc, bs, cd
+}
+
+func trap(message string, cd *callbackDatastore, t *testing.T) {
+ cd.SetFunc(func() {
+ t.Fatal(message)
+ })
+}
+func untrap(cd *callbackDatastore) {
+ cd.SetFunc(func() {})
+}
+
+func TestRemoveCacheEntryOnDelete(t *testing.T) {
+ arc, _, cd := createStores(t)
+
+ arc.Put(bg, exampleBlock)
+
+ cd.Lock()
+ writeHitTheDatastore := false
+ cd.Unlock()
+
+ cd.SetFunc(func() {
+ writeHitTheDatastore = true
+ })
+
+ arc.DeleteBlock(bg, exampleBlock.Cid())
+ arc.Put(bg, exampleBlock)
+ if !writeHitTheDatastore {
+ t.Fail()
+ }
+}
+
+func TestElideDuplicateWrite(t *testing.T) {
+ arc, _, cd := createStores(t)
+
+ arc.Put(bg, exampleBlock)
+ trap("write hit datastore", cd, t)
+ arc.Put(bg, exampleBlock)
+}
+
+func TestHasRequestTriggersCache(t *testing.T) {
+ arc, _, cd := createStores(t)
+
+ arc.Has(bg, exampleBlock.Cid())
+ trap("has hit datastore", cd, t)
+ if has, err := arc.Has(bg, exampleBlock.Cid()); has || err != nil {
+ t.Fatal("has was true but there is no such block")
+ }
+
+ untrap(cd)
+ err := arc.Put(bg, exampleBlock)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ trap("has hit datastore", cd, t)
+
+ if has, err := arc.Has(bg, exampleBlock.Cid()); !has || err != nil {
+ t.Fatal("has returned invalid result")
+ }
+}
+
+func TestGetFillsCache(t *testing.T) {
+ arc, _, cd := createStores(t)
+
+ if bl, err := arc.Get(bg, exampleBlock.Cid()); bl != nil || err == nil {
+ t.Fatal("block was found or there was no error")
+ }
+
+ trap("has hit datastore", cd, t)
+
+ if has, err := arc.Has(bg, exampleBlock.Cid()); has || err != nil {
+ t.Fatal("has was true but there is no such block")
+ }
+ if _, err := arc.GetSize(bg, exampleBlock.Cid()); !ipld.IsNotFound(err) {
+ t.Fatal("getsize was true but there is no such block")
+ }
+
+ untrap(cd)
+
+ if err := arc.Put(bg, exampleBlock); err != nil {
+ t.Fatal(err)
+ }
+
+ trap("has hit datastore", cd, t)
+
+ if has, err := arc.Has(bg, exampleBlock.Cid()); !has || err != nil {
+ t.Fatal("has returned invalid result")
+ }
+ if blockSize, err := arc.GetSize(bg, exampleBlock.Cid()); blockSize == -1 || err != nil {
+ t.Fatal("getsize returned invalid result", blockSize, err)
+ }
+}
+
+func TestGetAndDeleteFalseShortCircuit(t *testing.T) {
+ arc, _, cd := createStores(t)
+
+ arc.Has(bg, exampleBlock.Cid())
+ arc.GetSize(bg, exampleBlock.Cid())
+
+ trap("get hit datastore", cd, t)
+
+ if bl, err := arc.Get(bg, exampleBlock.Cid()); bl != nil || !ipld.IsNotFound(err) {
+ t.Fatal("get returned invalid result")
+ }
+
+ if arc.DeleteBlock(bg, exampleBlock.Cid()) != nil {
+ t.Fatal("expected deletes to be idempotent")
+ }
+}
+
+func TestArcCreationFailure(t *testing.T) {
+ if arc, err := newARCCachedBS(context.TODO(), nil, -1); arc != nil || err == nil {
+ t.Fatal("expected error and no cache")
+ }
+}
+
+func TestInvalidKey(t *testing.T) {
+ arc, _, _ := createStores(t)
+
+ bl, err := arc.Get(bg, cid.Cid{})
+
+ if bl != nil {
+ t.Fatal("blocks should be nil")
+ }
+ if err == nil {
+ t.Fatal("expected error")
+ }
+}
+
+func TestHasAfterSucessfulGetIsCached(t *testing.T) {
+ arc, bs, cd := createStores(t)
+
+ bs.Put(bg, exampleBlock)
+
+ arc.Get(bg, exampleBlock.Cid())
+
+ trap("has hit datastore", cd, t)
+ arc.Has(bg, exampleBlock.Cid())
+}
+
+func TestGetSizeAfterSucessfulGetIsCached(t *testing.T) {
+ arc, bs, cd := createStores(t)
+
+ bs.Put(bg, exampleBlock)
+
+ arc.Get(bg, exampleBlock.Cid())
+
+ trap("has hit datastore", cd, t)
+ arc.GetSize(bg, exampleBlock.Cid())
+}
+
+func TestGetSizeAfterSucessfulHas(t *testing.T) {
+ arc, bs, _ := createStores(t)
+
+ bs.Put(bg, exampleBlock)
+ has, err := arc.Has(bg, exampleBlock.Cid())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !has {
+ t.Fatal("expected to have block")
+ }
+
+ if size, err := arc.GetSize(bg, exampleBlock.Cid()); err != nil {
+ t.Fatal(err)
+ } else if size != len(exampleBlock.RawData()) {
+ t.Fatalf("expected size %d, got %d", len(exampleBlock.RawData()), size)
+ }
+}
+
+func TestGetSizeMissingZeroSizeBlock(t *testing.T) {
+ arc, bs, cd := createStores(t)
+ emptyBlock := blocks.NewBlock([]byte{})
+ missingBlock := blocks.NewBlock([]byte("missingBlock"))
+
+ bs.Put(bg, emptyBlock)
+
+ arc.Get(bg, emptyBlock.Cid())
+
+ trap("has hit datastore", cd, t)
+ if blockSize, err := arc.GetSize(bg, emptyBlock.Cid()); blockSize != 0 || err != nil {
+ t.Fatal("getsize returned invalid result")
+ }
+ untrap(cd)
+
+ arc.Get(bg, missingBlock.Cid())
+
+ trap("has hit datastore", cd, t)
+ if _, err := arc.GetSize(bg, missingBlock.Cid()); !ipld.IsNotFound(err) {
+ t.Fatal("getsize returned invalid result")
+ }
+}
+
+func TestDifferentKeyObjectsWork(t *testing.T) {
+ arc, bs, cd := createStores(t)
+
+ bs.Put(bg, exampleBlock)
+
+ arc.Get(bg, exampleBlock.Cid())
+
+ trap("has hit datastore", cd, t)
+ cidstr := exampleBlock.Cid().String()
+
+ ncid, err := cid.Decode(cidstr)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ arc.Has(bg, ncid)
+}
+
+func TestPutManyCaches(t *testing.T) {
+ t.Run("happy path PutMany", func(t *testing.T) {
+ arc, _, cd := createStores(t)
+ arc.PutMany(bg, []blocks.Block{exampleBlock})
+
+ trap("has hit datastore", cd, t)
+ arc.Has(bg, exampleBlock.Cid())
+ arc.GetSize(bg, exampleBlock.Cid())
+ untrap(cd)
+ arc.DeleteBlock(bg, exampleBlock.Cid())
+
+ arc.Put(bg, exampleBlock)
+ trap("PunMany has hit datastore", cd, t)
+ arc.PutMany(bg, []blocks.Block{exampleBlock})
+ })
+
+ t.Run("PutMany with duplicates", func(t *testing.T) {
+ arc, _, cd := createStores(t)
+ arc.PutMany(bg, []blocks.Block{exampleBlock, exampleBlock})
+
+ trap("has hit datastore", cd, t)
+ arc.Has(bg, exampleBlock.Cid())
+ arc.GetSize(bg, exampleBlock.Cid())
+ untrap(cd)
+ arc.DeleteBlock(bg, exampleBlock.Cid())
+
+ arc.Put(bg, exampleBlock)
+ trap("PunMany has hit datastore", cd, t)
+ arc.PutMany(bg, []blocks.Block{exampleBlock})
+ })
+}
+
+func BenchmarkARCCacheConcurrentOps(b *testing.B) {
+ // ~4k blocks seems high enough to be realistic,
+ // but low enough to cause collisions.
+ // Keep it as a power of 2, to simplify code below.
+ const numBlocks = 4 << 10
+
+ dummyBlocks := make([]blocks.Block, numBlocks)
+
+ {
+ // scope dummyRand to prevent its unsafe concurrent use below
+ dummyRand := rand.New(rand.NewSource(time.Now().UnixNano()))
+ for i := range dummyBlocks {
+ dummy := make([]byte, 32)
+ if _, err := io.ReadFull(dummyRand, dummy); err != nil {
+ b.Fatal(err)
+ }
+ dummyBlocks[i] = blocks.NewBlock(dummy)
+ }
+ }
+
+ // Each test begins with half the blocks present in the cache.
+ // This allows test cases to have both hits and misses,
+ // regardless of whether or not they do Puts.
+ putHalfBlocks := func(arc *arccache) {
+ for i, block := range dummyBlocks {
+ if i%2 == 0 {
+ if err := arc.Put(bg, block); err != nil {
+ b.Fatal(err)
+ }
+ }
+ }
+ }
+
+ // We always mix just two operations at a time.
+ const numOps = 2
+ var testOps = []struct {
+ name string
+ ops [numOps]func(*arccache, blocks.Block)
+ }{
+ {"PutDelete", [...]func(*arccache, blocks.Block){
+ func(arc *arccache, block blocks.Block) {
+ arc.Put(bg, block)
+ },
+ func(arc *arccache, block blocks.Block) {
+ arc.DeleteBlock(bg, block.Cid())
+ },
+ }},
+ {"GetDelete", [...]func(*arccache, blocks.Block){
+ func(arc *arccache, block blocks.Block) {
+ arc.Get(bg, block.Cid())
+ },
+ func(arc *arccache, block blocks.Block) {
+ arc.DeleteBlock(bg, block.Cid())
+ },
+ }},
+ {"GetPut", [...]func(*arccache, blocks.Block){
+ func(arc *arccache, block blocks.Block) {
+ arc.Get(bg, block.Cid())
+ },
+ func(arc *arccache, block blocks.Block) {
+ arc.Put(bg, block)
+ },
+ }},
+ }
+
+ for _, test := range testOps {
+ test := test // prevent reuse of the range var
+ b.Run(test.name, func(b *testing.B) {
+ arc, _, _ := createStores(b)
+ putHalfBlocks(arc)
+ var opCounts [numOps]uint64
+
+ b.ResetTimer()
+ b.ReportAllocs()
+
+ b.RunParallel(func(pb *testing.PB) {
+ rnd := rand.New(rand.NewSource(time.Now().UnixNano()))
+ for pb.Next() {
+ n := rnd.Int63()
+ blockIdx := n % numBlocks // lower bits decide the block
+ opIdx := (n / numBlocks) % numOps // higher bits decide what operation
+
+ block := dummyBlocks[blockIdx]
+ op := test.ops[opIdx]
+ op(arc, block)
+
+ atomic.AddUint64(&opCounts[opIdx], 1)
+ }
+ })
+
+ // We expect each op to fire roughly an equal amount of times.
+ // Error otherwise, as that likely means the logic is wrong.
+ var minIdx, maxIdx int
+ var minCount, maxCount uint64
+ for opIdx, count := range opCounts {
+ if minCount == 0 || count < minCount {
+ minIdx = opIdx
+ minCount = count
+ }
+ if maxCount == 0 || count > maxCount {
+ maxIdx = opIdx
+ maxCount = count
+ }
+ }
+ // Skip this check if we ran few times, to avoid false positives.
+ if maxCount > 100 {
+ ratio := float64(maxCount) / float64(minCount)
+ if maxRatio := 2.0; ratio > maxRatio {
+ b.Fatalf("op %d ran %fx as many times as %d", maxIdx, ratio, minIdx)
+ }
+ }
+
+ })
+ }
+}
diff --git a/blockstore/blockstore.go b/blockstore/blockstore.go
new file mode 100644
index 0000000000..22f198df1e
--- /dev/null
+++ b/blockstore/blockstore.go
@@ -0,0 +1,346 @@
+// Package blockstore implements a thin wrapper over a datastore, giving a
+// clean interface for Getting and Putting block objects.
+package blockstore
+
+import (
+ "context"
+ "errors"
+ "sync"
+ "sync/atomic"
+
+ dshelp "github.com/ipfs/boxo/datastore/dshelp"
+ blocks "github.com/ipfs/go-block-format"
+ cid "github.com/ipfs/go-cid"
+ ds "github.com/ipfs/go-datastore"
+ dsns "github.com/ipfs/go-datastore/namespace"
+ dsq "github.com/ipfs/go-datastore/query"
+ ipld "github.com/ipfs/go-ipld-format"
+ logging "github.com/ipfs/go-log"
+ uatomic "go.uber.org/atomic"
+)
+
+var logger = logging.Logger("blockstore")
+
+// BlockPrefix namespaces blockstore datastores
+var BlockPrefix = ds.NewKey("blocks")
+
+// ErrHashMismatch is an error returned when the hash of a block
+// is different than expected.
+var ErrHashMismatch = errors.New("block in storage has different hash than requested")
+
+// Blockstore wraps a Datastore block-centered methods and provides a layer
+// of abstraction which allows to add different caching strategies.
+type Blockstore interface {
+ DeleteBlock(context.Context, cid.Cid) error
+ Has(context.Context, cid.Cid) (bool, error)
+ Get(context.Context, cid.Cid) (blocks.Block, error)
+
+ // GetSize returns the CIDs mapped BlockSize
+ GetSize(context.Context, cid.Cid) (int, error)
+
+ // Put puts a given block to the underlying datastore
+ Put(context.Context, blocks.Block) error
+
+ // PutMany puts a slice of blocks at the same time using batching
+ // capabilities of the underlying datastore whenever possible.
+ PutMany(context.Context, []blocks.Block) error
+
+ // AllKeysChan returns a channel from which
+ // the CIDs in the Blockstore can be read. It should respect
+ // the given context, closing the channel if it becomes Done.
+ AllKeysChan(ctx context.Context) (<-chan cid.Cid, error)
+
+ // HashOnRead specifies if every read block should be
+ // rehashed to make sure it matches its CID.
+ HashOnRead(enabled bool)
+}
+
+// Viewer can be implemented by blockstores that offer zero-copy access to
+// values.
+//
+// Callers of View must not mutate or retain the byte slice, as it could be
+// an mmapped memory region, or a pooled byte buffer.
+//
+// View is especially suitable for deserialising in place.
+//
+// The callback will only be called iff the query operation is successful (and
+// the block is found); otherwise, the error will be propagated. Errors returned
+// by the callback will be propagated as well.
+type Viewer interface {
+ View(ctx context.Context, cid cid.Cid, callback func([]byte) error) error
+}
+
+// GCLocker abstract functionality to lock a blockstore when performing
+// garbage-collection operations.
+type GCLocker interface {
+ // GCLock locks the blockstore for garbage collection. No operations
+ // that expect to finish with a pin should ocurr simultaneously.
+ // Reading during GC is safe, and requires no lock.
+ GCLock(context.Context) Unlocker
+
+ // PinLock locks the blockstore for sequences of puts expected to finish
+ // with a pin (before GC). Multiple put->pin sequences can write through
+ // at the same time, but no GC should happen simulatenously.
+ // Reading during Pinning is safe, and requires no lock.
+ PinLock(context.Context) Unlocker
+
+ // GcRequested returns true if GCLock has been called and is waiting to
+ // take the lock
+ GCRequested(context.Context) bool
+}
+
+// GCBlockstore is a blockstore that can safely run garbage-collection
+// operations.
+type GCBlockstore interface {
+ Blockstore
+ GCLocker
+}
+
+// NewGCBlockstore returns a default implementation of GCBlockstore
+// using the given Blockstore and GCLocker.
+func NewGCBlockstore(bs Blockstore, gcl GCLocker) GCBlockstore {
+ return gcBlockstore{bs, gcl}
+}
+
+type gcBlockstore struct {
+ Blockstore
+ GCLocker
+}
+
+// Option is a default implementation Blockstore option
+type Option struct {
+ f func(bs *blockstore)
+}
+
+// WriteThrough skips checking if the blockstore already has a block before
+// writing it.
+func WriteThrough() Option {
+ return Option{
+ func(bs *blockstore) {
+ bs.writeThrough = true
+ },
+ }
+}
+
+// NoPrefix avoids wrapping the blockstore into the BlockPrefix namespace
+// ("/blocks"), so keys will not be modified in any way.
+func NoPrefix() Option {
+ return Option{
+ func(bs *blockstore) {
+ bs.noPrefix = true
+ },
+ }
+}
+
+// NewBlockstore returns a default Blockstore implementation
+// using the provided datastore.Batching backend.
+func NewBlockstore(d ds.Batching, opts ...Option) Blockstore {
+ bs := &blockstore{
+ datastore: d,
+ rehash: uatomic.NewBool(false),
+ }
+
+ for _, o := range opts {
+ o.f(bs)
+ }
+
+ if !bs.noPrefix {
+ bs.datastore = dsns.Wrap(bs.datastore, BlockPrefix)
+ }
+ return bs
+}
+
+// NewBlockstoreNoPrefix returns a default Blockstore implementation
+// using the provided datastore.Batching backend.
+// This constructor does not modify input keys in any way
+//
+// Deprecated: Use NewBlockstore with the NoPrefix option instead.
+func NewBlockstoreNoPrefix(d ds.Batching) Blockstore {
+ return NewBlockstore(d, NoPrefix())
+}
+
+type blockstore struct {
+ datastore ds.Batching
+
+ rehash *uatomic.Bool
+ writeThrough bool
+ noPrefix bool
+}
+
+func (bs *blockstore) HashOnRead(enabled bool) {
+ bs.rehash.Store(enabled)
+}
+
+func (bs *blockstore) Get(ctx context.Context, k cid.Cid) (blocks.Block, error) {
+ if !k.Defined() {
+ logger.Error("undefined cid in blockstore")
+ return nil, ipld.ErrNotFound{Cid: k}
+ }
+ bdata, err := bs.datastore.Get(ctx, dshelp.MultihashToDsKey(k.Hash()))
+ if err == ds.ErrNotFound {
+ return nil, ipld.ErrNotFound{Cid: k}
+ }
+ if err != nil {
+ return nil, err
+ }
+ if bs.rehash.Load() {
+ rbcid, err := k.Prefix().Sum(bdata)
+ if err != nil {
+ return nil, err
+ }
+
+ if !rbcid.Equals(k) {
+ return nil, ErrHashMismatch
+ }
+
+ return blocks.NewBlockWithCid(bdata, rbcid)
+ }
+ return blocks.NewBlockWithCid(bdata, k)
+}
+
+func (bs *blockstore) Put(ctx context.Context, block blocks.Block) error {
+ k := dshelp.MultihashToDsKey(block.Cid().Hash())
+
+ // Has is cheaper than Put, so see if we already have it
+ if !bs.writeThrough {
+ exists, err := bs.datastore.Has(ctx, k)
+ if err == nil && exists {
+ return nil // already stored.
+ }
+ }
+ return bs.datastore.Put(ctx, k, block.RawData())
+}
+
+func (bs *blockstore) PutMany(ctx context.Context, blocks []blocks.Block) error {
+ if len(blocks) == 1 {
+ // performance fast-path
+ return bs.Put(ctx, blocks[0])
+ }
+
+ t, err := bs.datastore.Batch(ctx)
+ if err != nil {
+ return err
+ }
+ for _, b := range blocks {
+ k := dshelp.MultihashToDsKey(b.Cid().Hash())
+
+ if !bs.writeThrough {
+ exists, err := bs.datastore.Has(ctx, k)
+ if err == nil && exists {
+ continue
+ }
+ }
+
+ err = t.Put(ctx, k, b.RawData())
+ if err != nil {
+ return err
+ }
+ }
+ return t.Commit(ctx)
+}
+
+func (bs *blockstore) Has(ctx context.Context, k cid.Cid) (bool, error) {
+ return bs.datastore.Has(ctx, dshelp.MultihashToDsKey(k.Hash()))
+}
+
+func (bs *blockstore) GetSize(ctx context.Context, k cid.Cid) (int, error) {
+ size, err := bs.datastore.GetSize(ctx, dshelp.MultihashToDsKey(k.Hash()))
+ if err == ds.ErrNotFound {
+ return -1, ipld.ErrNotFound{Cid: k}
+ }
+ return size, err
+}
+
+func (bs *blockstore) DeleteBlock(ctx context.Context, k cid.Cid) error {
+ return bs.datastore.Delete(ctx, dshelp.MultihashToDsKey(k.Hash()))
+}
+
+// AllKeysChan runs a query for keys from the blockstore.
+// this is very simplistic, in the future, take dsq.Query as a param?
+//
+// AllKeysChan respects context.
+func (bs *blockstore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) {
+
+ // KeysOnly, because that would be _a lot_ of data.
+ q := dsq.Query{KeysOnly: true}
+ res, err := bs.datastore.Query(ctx, q)
+ if err != nil {
+ return nil, err
+ }
+
+ output := make(chan cid.Cid, dsq.KeysOnlyBufSize)
+ go func() {
+ defer func() {
+ res.Close() // ensure exit (signals early exit, too)
+ close(output)
+ }()
+
+ for {
+ e, ok := res.NextSync()
+ if !ok {
+ return
+ }
+ if e.Error != nil {
+ logger.Errorf("blockstore.AllKeysChan got err: %s", e.Error)
+ return
+ }
+
+ // need to convert to key.Key using key.KeyFromDsKey.
+ bk, err := dshelp.BinaryFromDsKey(ds.RawKey(e.Key))
+ if err != nil {
+ logger.Warnf("error parsing key from binary: %s", err)
+ continue
+ }
+ k := cid.NewCidV1(cid.Raw, bk)
+ select {
+ case <-ctx.Done():
+ return
+ case output <- k:
+ }
+ }
+ }()
+
+ return output, nil
+}
+
+// NewGCLocker returns a default implementation of
+// GCLocker using standard [RW] mutexes.
+func NewGCLocker() GCLocker {
+ return &gclocker{}
+}
+
+type gclocker struct {
+ lk sync.RWMutex
+ gcreq int32
+}
+
+// Unlocker represents an object which can Unlock
+// something.
+type Unlocker interface {
+ Unlock(context.Context)
+}
+
+type unlocker struct {
+ unlock func()
+}
+
+func (u *unlocker) Unlock(_ context.Context) {
+ u.unlock()
+ u.unlock = nil // ensure its not called twice
+}
+
+func (bs *gclocker) GCLock(_ context.Context) Unlocker {
+ atomic.AddInt32(&bs.gcreq, 1)
+ bs.lk.Lock()
+ atomic.AddInt32(&bs.gcreq, -1)
+ return &unlocker{bs.lk.Unlock}
+}
+
+func (bs *gclocker) PinLock(_ context.Context) Unlocker {
+ bs.lk.RLock()
+ return &unlocker{bs.lk.RUnlock}
+}
+
+func (bs *gclocker) GCRequested(_ context.Context) bool {
+ return atomic.LoadInt32(&bs.gcreq) > 0
+}
diff --git a/blockstore/blockstore_test.go b/blockstore/blockstore_test.go
new file mode 100644
index 0000000000..afcaec40e3
--- /dev/null
+++ b/blockstore/blockstore_test.go
@@ -0,0 +1,333 @@
+package blockstore
+
+import (
+ "bytes"
+ "context"
+ "fmt"
+ "testing"
+
+ u "github.com/ipfs/boxo/util"
+ blocks "github.com/ipfs/go-block-format"
+ cid "github.com/ipfs/go-cid"
+ ds "github.com/ipfs/go-datastore"
+ dsq "github.com/ipfs/go-datastore/query"
+ ds_sync "github.com/ipfs/go-datastore/sync"
+ ipld "github.com/ipfs/go-ipld-format"
+)
+
+func TestGetWhenKeyNotPresent(t *testing.T) {
+ bs := NewBlockstore(ds_sync.MutexWrap(ds.NewMapDatastore()))
+ c := cid.NewCidV0(u.Hash([]byte("stuff")))
+ bl, err := bs.Get(bg, c)
+
+ if bl != nil {
+ t.Error("nil block expected")
+ }
+ if err == nil {
+ t.Error("error expected, got nil")
+ }
+}
+
+func TestGetWhenKeyIsNil(t *testing.T) {
+ bs := NewBlockstore(ds_sync.MutexWrap(ds.NewMapDatastore()))
+ _, err := bs.Get(bg, cid.Cid{})
+ if !ipld.IsNotFound(err) {
+ t.Fail()
+ }
+}
+
+func TestPutThenGetBlock(t *testing.T) {
+ bs := NewBlockstore(ds_sync.MutexWrap(ds.NewMapDatastore()))
+ block := blocks.NewBlock([]byte("some data"))
+
+ err := bs.Put(bg, block)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ blockFromBlockstore, err := bs.Get(bg, block.Cid())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !bytes.Equal(block.RawData(), blockFromBlockstore.RawData()) {
+ t.Fail()
+ }
+}
+
+func TestCidv0v1(t *testing.T) {
+ bs := NewBlockstore(ds_sync.MutexWrap(ds.NewMapDatastore()))
+ block := blocks.NewBlock([]byte("some data"))
+
+ err := bs.Put(bg, block)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ blockFromBlockstore, err := bs.Get(bg, cid.NewCidV1(cid.DagProtobuf, block.Cid().Hash()))
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !bytes.Equal(block.RawData(), blockFromBlockstore.RawData()) {
+ t.Fail()
+ }
+}
+
+func TestPutThenGetSizeBlock(t *testing.T) {
+ bs := NewBlockstore(ds_sync.MutexWrap(ds.NewMapDatastore()))
+ block := blocks.NewBlock([]byte("some data"))
+ missingBlock := blocks.NewBlock([]byte("missingBlock"))
+ emptyBlock := blocks.NewBlock([]byte{})
+
+ err := bs.Put(bg, block)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ blockSize, err := bs.GetSize(bg, block.Cid())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(block.RawData()) != blockSize {
+ t.Fail()
+ }
+
+ err = bs.Put(bg, emptyBlock)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if blockSize, err := bs.GetSize(bg, emptyBlock.Cid()); blockSize != 0 || err != nil {
+ t.Fatal(err)
+ }
+
+ if blockSize, err := bs.GetSize(bg, missingBlock.Cid()); blockSize != -1 || err == nil {
+ t.Fatal("getsize returned invalid result")
+ }
+}
+
+type countHasDS struct {
+ ds.Datastore
+ hasCount int
+}
+
+func (ds *countHasDS) Has(ctx context.Context, key ds.Key) (exists bool, err error) {
+ ds.hasCount += 1
+ return ds.Datastore.Has(ctx, key)
+}
+
+func TestPutUsesHas(t *testing.T) {
+ // Some datastores rely on the implementation detail that Put checks Has
+ // first, to avoid overriding existing objects' metadata. This test ensures
+ // that Blockstore continues to behave this way.
+ // Please ping https://github.com/ipfs/boxo/blockstore/pull/47 if this
+ // behavior is being removed.
+ ds := &countHasDS{
+ Datastore: ds.NewMapDatastore(),
+ }
+ bs := NewBlockstore(ds_sync.MutexWrap(ds))
+ bl := blocks.NewBlock([]byte("some data"))
+ if err := bs.Put(bg, bl); err != nil {
+ t.Fatal(err)
+ }
+ if err := bs.Put(bg, bl); err != nil {
+ t.Fatal(err)
+ }
+ if ds.hasCount != 2 {
+ t.Errorf("Blockstore did not call Has before attempting Put, this breaks compatibility")
+ }
+}
+
+func TestHashOnRead(t *testing.T) {
+ orginalDebug := u.Debug
+ defer (func() {
+ u.Debug = orginalDebug
+ })()
+ u.Debug = false
+
+ bs := NewBlockstore(ds_sync.MutexWrap(ds.NewMapDatastore()))
+ bl := blocks.NewBlock([]byte("some data"))
+ blBad, err := blocks.NewBlockWithCid([]byte("some other data"), bl.Cid())
+ if err != nil {
+ t.Fatal("debug is off, still got an error")
+ }
+ bl2 := blocks.NewBlock([]byte("some other data"))
+ bs.Put(bg, blBad)
+ bs.Put(bg, bl2)
+ bs.HashOnRead(true)
+
+ if _, err := bs.Get(bg, bl.Cid()); err != ErrHashMismatch {
+ t.Fatalf("expected '%v' got '%v'\n", ErrHashMismatch, err)
+ }
+
+ if b, err := bs.Get(bg, bl2.Cid()); err != nil || b.String() != bl2.String() {
+ t.Fatal("got wrong blocks")
+ }
+}
+
+func newBlockStoreWithKeys(t *testing.T, d ds.Datastore, N int) (Blockstore, []cid.Cid) {
+ if d == nil {
+ d = ds.NewMapDatastore()
+ }
+ bs := NewBlockstore(ds_sync.MutexWrap(d))
+
+ keys := make([]cid.Cid, N)
+ for i := 0; i < N; i++ {
+ block := blocks.NewBlock([]byte(fmt.Sprintf("some data %d", i)))
+ err := bs.Put(bg, block)
+ if err != nil {
+ t.Fatal(err)
+ }
+ keys[i] = block.Cid()
+ }
+ return bs, keys
+}
+
+func collect(ch <-chan cid.Cid) []cid.Cid {
+ var keys []cid.Cid
+ for k := range ch {
+ keys = append(keys, k)
+ }
+ return keys
+}
+
+func TestAllKeysSimple(t *testing.T) {
+ bs, keys := newBlockStoreWithKeys(t, nil, 100)
+
+ ctx := context.Background()
+ ch, err := bs.AllKeysChan(ctx)
+ if err != nil {
+ t.Fatal(err)
+ }
+ keys2 := collect(ch)
+
+ // for _, k2 := range keys2 {
+ // t.Log("found ", k2.B58String())
+ // }
+
+ expectMatches(t, keys, keys2)
+}
+
+func TestAllKeysRespectsContext(t *testing.T) {
+ N := 100
+
+ d := &queryTestDS{ds: ds.NewMapDatastore()}
+ bs, _ := newBlockStoreWithKeys(t, d, N)
+
+ started := make(chan struct{}, 1)
+ done := make(chan struct{}, 1)
+ errors := make(chan error, 100)
+
+ getKeys := func(ctx context.Context) {
+ started <- struct{}{}
+ ch, err := bs.AllKeysChan(ctx) // once without cancelling
+ if err != nil {
+ errors <- err
+ }
+ _ = collect(ch)
+ done <- struct{}{}
+ errors <- nil // a nil one to signal break
+ }
+
+ var results dsq.Results
+ var resultsmu = make(chan struct{})
+ resultChan := make(chan dsq.Result)
+ d.SetFunc(func(q dsq.Query) (dsq.Results, error) {
+ results = dsq.ResultsWithChan(q, resultChan)
+ resultsmu <- struct{}{}
+ return results, nil
+ })
+
+ go getKeys(context.Background())
+
+ // make sure it's waiting.
+ <-started
+ <-resultsmu
+ select {
+ case <-done:
+ t.Fatal("sync is wrong")
+ case <-results.Process().Closing():
+ t.Fatal("should not be closing")
+ case <-results.Process().Closed():
+ t.Fatal("should not be closed")
+ default:
+ }
+
+ e := dsq.Entry{Key: BlockPrefix.ChildString("foo").String()}
+ resultChan <- dsq.Result{Entry: e} // let it go.
+ close(resultChan)
+ <-done // should be done now.
+ <-results.Process().Closed() // should be closed now
+
+ // print any errors
+ for err := range errors {
+ if err == nil {
+ break
+ }
+ t.Error(err)
+ }
+
+}
+
+func expectMatches(t *testing.T, expect, actual []cid.Cid) {
+ t.Helper()
+
+ if len(expect) != len(actual) {
+ t.Errorf("expect and actual differ: %d != %d", len(expect), len(actual))
+ }
+
+ actualSet := make(map[string]bool, len(actual))
+ for _, k := range actual {
+ actualSet[string(k.Hash())] = true
+ }
+
+ for _, ek := range expect {
+ if !actualSet[string(ek.Hash())] {
+ t.Error("expected key not found: ", ek)
+ }
+ }
+}
+
+type queryTestDS struct {
+ cb func(q dsq.Query) (dsq.Results, error)
+ ds ds.Datastore
+}
+
+func (c *queryTestDS) SetFunc(f func(dsq.Query) (dsq.Results, error)) { c.cb = f }
+
+func (c *queryTestDS) Put(ctx context.Context, key ds.Key, value []byte) (err error) {
+ return c.ds.Put(ctx, key, value)
+}
+
+func (c *queryTestDS) Get(ctx context.Context, key ds.Key) (value []byte, err error) {
+ return c.ds.Get(ctx, key)
+}
+
+func (c *queryTestDS) Has(ctx context.Context, key ds.Key) (exists bool, err error) {
+ return c.ds.Has(ctx, key)
+}
+
+func (c *queryTestDS) GetSize(ctx context.Context, key ds.Key) (size int, err error) {
+ return c.ds.GetSize(ctx, key)
+}
+
+func (c *queryTestDS) Delete(ctx context.Context, key ds.Key) (err error) {
+ return c.ds.Delete(ctx, key)
+}
+
+func (c *queryTestDS) Query(ctx context.Context, q dsq.Query) (dsq.Results, error) {
+ if c.cb != nil {
+ return c.cb(q)
+ }
+ return c.ds.Query(ctx, q)
+}
+
+func (c *queryTestDS) Sync(ctx context.Context, key ds.Key) error {
+ return c.ds.Sync(ctx, key)
+}
+
+func (c *queryTestDS) Batch(_ context.Context) (ds.Batch, error) {
+ return ds.NewBasicBatch(c), nil
+}
+func (c *queryTestDS) Close() error {
+ return nil
+}
diff --git a/blockstore/bloom_cache.go b/blockstore/bloom_cache.go
new file mode 100644
index 0000000000..fddab1e53d
--- /dev/null
+++ b/blockstore/bloom_cache.go
@@ -0,0 +1,234 @@
+package blockstore
+
+import (
+ "context"
+ "fmt"
+ "sync/atomic"
+ "time"
+
+ bloom "github.com/ipfs/bbloom"
+ blocks "github.com/ipfs/go-block-format"
+ cid "github.com/ipfs/go-cid"
+ ipld "github.com/ipfs/go-ipld-format"
+ metrics "github.com/ipfs/go-metrics-interface"
+)
+
+// bloomCached returns a Blockstore that caches Has requests using a Bloom
+// filter. bloomSize is size of bloom filter in bytes. hashCount specifies the
+// number of hashing functions in the bloom filter (usually known as k).
+func bloomCached(ctx context.Context, bs Blockstore, bloomSize, hashCount int) (*bloomcache, error) {
+ bl, err := bloom.New(float64(bloomSize), float64(hashCount))
+ if err != nil {
+ return nil, err
+ }
+ bc := &bloomcache{
+ blockstore: bs,
+ bloom: bl,
+ hits: metrics.NewCtx(ctx, "bloom.hits_total",
+ "Number of cache hits in bloom cache").Counter(),
+ total: metrics.NewCtx(ctx, "bloom_total",
+ "Total number of requests to bloom cache").Counter(),
+ buildChan: make(chan struct{}),
+ }
+ if v, ok := bs.(Viewer); ok {
+ bc.viewer = v
+ }
+ go func() {
+ err := bc.build(ctx)
+ if err != nil {
+ select {
+ case <-ctx.Done():
+ logger.Warn("Cache rebuild closed by context finishing: ", err)
+ default:
+ logger.Error(err)
+ }
+ return
+ }
+ if metrics.Active() {
+ fill := metrics.NewCtx(ctx, "bloom_fill_ratio",
+ "Ratio of bloom filter fullnes, (updated once a minute)").Gauge()
+
+ t := time.NewTicker(1 * time.Minute)
+ defer t.Stop()
+ for {
+ select {
+ case <-ctx.Done():
+ return
+ case <-t.C:
+ fill.Set(bc.bloom.FillRatioTS())
+ }
+ }
+ }
+ }()
+ return bc, nil
+}
+
+type bloomcache struct {
+ active int32
+
+ bloom *bloom.Bloom
+ buildErr error
+
+ buildChan chan struct{}
+ blockstore Blockstore
+ viewer Viewer
+
+ // Statistics
+ hits metrics.Counter
+ total metrics.Counter
+}
+
+var _ Blockstore = (*bloomcache)(nil)
+var _ Viewer = (*bloomcache)(nil)
+
+func (b *bloomcache) BloomActive() bool {
+ return atomic.LoadInt32(&b.active) != 0
+}
+
+func (b *bloomcache) Wait(ctx context.Context) error {
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ case <-b.buildChan:
+ return b.buildErr
+ }
+}
+
+func (b *bloomcache) build(ctx context.Context) error {
+ logger.Debug("begin building bloomcache")
+ start := time.Now()
+ defer func() {
+ logger.Debugf("bloomcache build finished in %s", time.Since(start))
+ }()
+ defer close(b.buildChan)
+
+ ch, err := b.blockstore.AllKeysChan(ctx)
+ if err != nil {
+ b.buildErr = fmt.Errorf("AllKeysChan failed in bloomcache rebuild with: %v", err)
+ return b.buildErr
+ }
+ for {
+ select {
+ case key, ok := <-ch:
+ if !ok {
+ atomic.StoreInt32(&b.active, 1)
+ return nil
+ }
+ b.bloom.AddTS(key.Hash()) // Use binary key, the more compact the better
+ case <-ctx.Done():
+ b.buildErr = ctx.Err()
+ return b.buildErr
+ }
+ }
+}
+
+func (b *bloomcache) DeleteBlock(ctx context.Context, k cid.Cid) error {
+ if has, ok := b.hasCached(k); ok && !has {
+ return nil
+ }
+
+ return b.blockstore.DeleteBlock(ctx, k)
+}
+
+// if ok == false has is inconclusive
+// if ok == true then has respons to question: is it contained
+func (b *bloomcache) hasCached(k cid.Cid) (has bool, ok bool) {
+ b.total.Inc()
+ if !k.Defined() {
+ logger.Error("undefined in bloom cache")
+ // Return cache invalid so call to blockstore
+ // in case of invalid key is forwarded deeper
+ return false, false
+ }
+ if b.BloomActive() {
+ blr := b.bloom.HasTS(k.Hash())
+ if !blr { // not contained in bloom is only conclusive answer bloom gives
+ b.hits.Inc()
+ return false, true
+ }
+ }
+ return false, false
+}
+
+func (b *bloomcache) Has(ctx context.Context, k cid.Cid) (bool, error) {
+ if has, ok := b.hasCached(k); ok {
+ return has, nil
+ }
+
+ return b.blockstore.Has(ctx, k)
+}
+
+func (b *bloomcache) GetSize(ctx context.Context, k cid.Cid) (int, error) {
+ if has, ok := b.hasCached(k); ok && !has {
+ return -1, ipld.ErrNotFound{Cid: k}
+ }
+
+ return b.blockstore.GetSize(ctx, k)
+}
+
+func (b *bloomcache) View(ctx context.Context, k cid.Cid, callback func([]byte) error) error {
+ if b.viewer == nil {
+ blk, err := b.Get(ctx, k)
+ if err != nil {
+ return err
+ }
+ return callback(blk.RawData())
+ }
+
+ if has, ok := b.hasCached(k); ok && !has {
+ return ipld.ErrNotFound{Cid: k}
+ }
+ return b.viewer.View(ctx, k, callback)
+}
+
+func (b *bloomcache) Get(ctx context.Context, k cid.Cid) (blocks.Block, error) {
+ if has, ok := b.hasCached(k); ok && !has {
+ return nil, ipld.ErrNotFound{Cid: k}
+ }
+
+ return b.blockstore.Get(ctx, k)
+}
+
+func (b *bloomcache) Put(ctx context.Context, bl blocks.Block) error {
+ // See comment in PutMany
+ err := b.blockstore.Put(ctx, bl)
+ if err == nil {
+ b.bloom.AddTS(bl.Cid().Hash())
+ }
+ return err
+}
+
+func (b *bloomcache) PutMany(ctx context.Context, bs []blocks.Block) error {
+ // bloom cache gives only conclusive resulty if key is not contained
+ // to reduce number of puts we need conclusive information if block is contained
+ // this means that PutMany can't be improved with bloom cache so we just
+ // just do a passthrough.
+ err := b.blockstore.PutMany(ctx, bs)
+ if err != nil {
+ return err
+ }
+ for _, bl := range bs {
+ b.bloom.AddTS(bl.Cid().Hash())
+ }
+ return nil
+}
+
+func (b *bloomcache) HashOnRead(enabled bool) {
+ b.blockstore.HashOnRead(enabled)
+}
+
+func (b *bloomcache) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) {
+ return b.blockstore.AllKeysChan(ctx)
+}
+
+func (b *bloomcache) GCLock(ctx context.Context) Unlocker {
+ return b.blockstore.(GCBlockstore).GCLock(ctx)
+}
+
+func (b *bloomcache) PinLock(ctx context.Context) Unlocker {
+ return b.blockstore.(GCBlockstore).PinLock(ctx)
+}
+
+func (b *bloomcache) GCRequested(ctx context.Context) bool {
+ return b.blockstore.(GCBlockstore).GCRequested(ctx)
+}
diff --git a/blockstore/bloom_cache_test.go b/blockstore/bloom_cache_test.go
new file mode 100644
index 0000000000..3c998c5512
--- /dev/null
+++ b/blockstore/bloom_cache_test.go
@@ -0,0 +1,215 @@
+package blockstore
+
+import (
+ "context"
+ "fmt"
+ "sync"
+ "testing"
+ "time"
+
+ blocks "github.com/ipfs/go-block-format"
+ ds "github.com/ipfs/go-datastore"
+ dsq "github.com/ipfs/go-datastore/query"
+ syncds "github.com/ipfs/go-datastore/sync"
+ ipld "github.com/ipfs/go-ipld-format"
+)
+
+var bg = context.Background()
+
+func testBloomCached(ctx context.Context, bs Blockstore) (*bloomcache, error) {
+ if ctx == nil {
+ ctx = context.Background()
+ }
+ opts := DefaultCacheOpts()
+ opts.HasARCCacheSize = 0
+ bbs, err := CachedBlockstore(ctx, bs, opts)
+ if err == nil {
+ return bbs.(*bloomcache), nil
+ }
+ return nil, err
+}
+
+func TestPutManyAddsToBloom(t *testing.T) {
+ bs := NewBlockstore(syncds.MutexWrap(ds.NewMapDatastore()))
+
+ ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second)
+ defer cancel()
+
+ cachedbs, err := testBloomCached(ctx, bs)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if err := cachedbs.Wait(ctx); err != nil {
+ t.Fatalf("Failed while waiting for the filter to build: %d", cachedbs.bloom.ElementsAdded())
+ }
+
+ block1 := blocks.NewBlock([]byte("foo"))
+ block2 := blocks.NewBlock([]byte("bar"))
+ emptyBlock := blocks.NewBlock([]byte{})
+
+ cachedbs.PutMany(bg, []blocks.Block{block1, emptyBlock})
+ has, err := cachedbs.Has(bg, block1.Cid())
+ if err != nil {
+ t.Fatal(err)
+ }
+ blockSize, err := cachedbs.GetSize(bg, block1.Cid())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if blockSize == -1 || !has {
+ t.Fatal("added block is reported missing")
+ }
+
+ has, err = cachedbs.Has(bg, block2.Cid())
+ if err != nil {
+ t.Fatal(err)
+ }
+ blockSize, err = cachedbs.GetSize(bg, block2.Cid())
+ if err != nil && !ipld.IsNotFound(err) {
+ t.Fatal(err)
+ }
+ if blockSize > -1 || has {
+ t.Fatal("not added block is reported to be in blockstore")
+ }
+
+ has, err = cachedbs.Has(bg, emptyBlock.Cid())
+ if err != nil {
+ t.Fatal(err)
+ }
+ blockSize, err = cachedbs.GetSize(bg, emptyBlock.Cid())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if blockSize != 0 || !has {
+ t.Fatal("added block is reported missing")
+ }
+}
+
+func TestReturnsErrorWhenSizeNegative(t *testing.T) {
+ bs := NewBlockstore(syncds.MutexWrap(ds.NewMapDatastore()))
+ _, err := bloomCached(context.Background(), bs, -1, 1)
+ if err == nil {
+ t.Fail()
+ }
+}
+func TestHasIsBloomCached(t *testing.T) {
+ cd := &callbackDatastore{f: func() {}, ds: ds.NewMapDatastore()}
+ bs := NewBlockstore(syncds.MutexWrap(cd))
+
+ for i := 0; i < 1000; i++ {
+ bs.Put(bg, blocks.NewBlock([]byte(fmt.Sprintf("data: %d", i))))
+ }
+ ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second)
+ defer cancel()
+
+ cachedbs, err := testBloomCached(ctx, bs)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if err := cachedbs.Wait(ctx); err != nil {
+ t.Fatalf("Failed while waiting for the filter to build: %d", cachedbs.bloom.ElementsAdded())
+ }
+
+ cacheFails := 0
+ cd.SetFunc(func() {
+ cacheFails++
+ })
+
+ for i := 0; i < 1000; i++ {
+ cachedbs.Has(bg, blocks.NewBlock([]byte(fmt.Sprintf("data: %d", i+2000))).Cid())
+ }
+
+ if float64(cacheFails)/float64(1000) > float64(0.05) {
+ t.Fatalf("Bloom filter has cache miss rate of more than 5%%")
+ }
+
+ cacheFails = 0
+ block := blocks.NewBlock([]byte("newBlock"))
+
+ cachedbs.PutMany(bg, []blocks.Block{block})
+ if cacheFails != 2 {
+ t.Fatalf("expected two datastore hits: %d", cacheFails)
+ }
+ cachedbs.Put(bg, block)
+ if cacheFails != 3 {
+ t.Fatalf("expected datastore hit: %d", cacheFails)
+ }
+
+ if has, err := cachedbs.Has(bg, block.Cid()); !has || err != nil {
+ t.Fatal("has gave wrong response")
+ }
+
+ bl, err := cachedbs.Get(bg, block.Cid())
+ if bl.String() != block.String() {
+ t.Fatal("block data doesn't match")
+ }
+
+ if err != nil {
+ t.Fatal("there should't be an error")
+ }
+}
+
+var _ ds.Batching = (*callbackDatastore)(nil)
+
+type callbackDatastore struct {
+ sync.Mutex
+ f func()
+ ds ds.Datastore
+}
+
+func (c *callbackDatastore) SetFunc(f func()) {
+ c.Lock()
+ defer c.Unlock()
+ c.f = f
+}
+
+func (c *callbackDatastore) CallF() {
+ c.Lock()
+ defer c.Unlock()
+ c.f()
+}
+
+func (c *callbackDatastore) Put(ctx context.Context, key ds.Key, value []byte) (err error) {
+ c.CallF()
+ return c.ds.Put(ctx, key, value)
+}
+
+func (c *callbackDatastore) Get(ctx context.Context, key ds.Key) (value []byte, err error) {
+ c.CallF()
+ return c.ds.Get(ctx, key)
+}
+
+func (c *callbackDatastore) Has(ctx context.Context, key ds.Key) (exists bool, err error) {
+ c.CallF()
+ return c.ds.Has(ctx, key)
+}
+
+func (c *callbackDatastore) GetSize(ctx context.Context, key ds.Key) (size int, err error) {
+ c.CallF()
+ return c.ds.GetSize(ctx, key)
+}
+
+func (c *callbackDatastore) Close() error {
+ return nil
+}
+
+func (c *callbackDatastore) Delete(ctx context.Context, key ds.Key) (err error) {
+ c.CallF()
+ return c.ds.Delete(ctx, key)
+}
+
+func (c *callbackDatastore) Query(ctx context.Context, q dsq.Query) (dsq.Results, error) {
+ c.CallF()
+ return c.ds.Query(ctx, q)
+}
+
+func (c *callbackDatastore) Sync(ctx context.Context, key ds.Key) error {
+ c.CallF()
+ return c.ds.Sync(ctx, key)
+}
+
+func (c *callbackDatastore) Batch(_ context.Context) (ds.Batch, error) {
+ return ds.NewBasicBatch(c), nil
+}
diff --git a/blockstore/caching.go b/blockstore/caching.go
new file mode 100644
index 0000000000..798b84ce2b
--- /dev/null
+++ b/blockstore/caching.go
@@ -0,0 +1,55 @@
+package blockstore
+
+import (
+ "context"
+ "errors"
+
+ metrics "github.com/ipfs/go-metrics-interface"
+)
+
+// CacheOpts wraps options for CachedBlockStore().
+// Next to each option is it aproximate memory usage per unit
+type CacheOpts struct {
+ HasBloomFilterSize int // 1 byte
+ HasBloomFilterHashes int // No size, 7 is usually best, consult bloom papers
+ HasARCCacheSize int // 32 bytes
+}
+
+// DefaultCacheOpts returns a CacheOpts initialized with default values.
+func DefaultCacheOpts() CacheOpts {
+ return CacheOpts{
+ HasBloomFilterSize: 512 << 10,
+ HasBloomFilterHashes: 7,
+ HasARCCacheSize: 64 << 10,
+ }
+}
+
+// CachedBlockstore returns a blockstore wrapped in an ARCCache and
+// then in a bloom filter cache, if the options indicate it.
+func CachedBlockstore(
+ ctx context.Context,
+ bs Blockstore,
+ opts CacheOpts) (cbs Blockstore, err error) {
+ cbs = bs
+
+ if opts.HasBloomFilterSize < 0 || opts.HasBloomFilterHashes < 0 ||
+ opts.HasARCCacheSize < 0 {
+ return nil, errors.New("all options for cache need to be greater than zero")
+ }
+
+ if opts.HasBloomFilterSize != 0 && opts.HasBloomFilterHashes == 0 {
+ return nil, errors.New("bloom filter hash count can't be 0 when there is size set")
+ }
+
+ ctx = metrics.CtxSubScope(ctx, "bs.cache")
+
+ if opts.HasARCCacheSize > 0 {
+ cbs, err = newARCCachedBS(ctx, cbs, opts.HasARCCacheSize)
+ }
+ if opts.HasBloomFilterSize != 0 {
+ // *8 because of bytes to bits conversion
+ cbs, err = bloomCached(ctx, cbs, opts.HasBloomFilterSize*8, opts.HasBloomFilterHashes)
+ }
+
+ return cbs, err
+}
diff --git a/blockstore/caching_test.go b/blockstore/caching_test.go
new file mode 100644
index 0000000000..16066ad18c
--- /dev/null
+++ b/blockstore/caching_test.go
@@ -0,0 +1,38 @@
+package blockstore
+
+import (
+ "context"
+ "testing"
+)
+
+func TestCachingOptsLessThanZero(t *testing.T) {
+ opts := DefaultCacheOpts()
+ opts.HasARCCacheSize = -1
+
+ if _, err := CachedBlockstore(context.TODO(), nil, opts); err == nil {
+ t.Error("wrong ARC setting was not detected")
+ }
+
+ opts = DefaultCacheOpts()
+ opts.HasBloomFilterSize = -1
+
+ if _, err := CachedBlockstore(context.TODO(), nil, opts); err == nil {
+ t.Error("negative bloom size was not detected")
+ }
+
+ opts = DefaultCacheOpts()
+ opts.HasBloomFilterHashes = -1
+
+ if _, err := CachedBlockstore(context.TODO(), nil, opts); err == nil {
+ t.Error("negative hashes setting was not detected")
+ }
+}
+
+func TestBloomHashesAtZero(t *testing.T) {
+ opts := DefaultCacheOpts()
+ opts.HasBloomFilterHashes = 0
+
+ if _, err := CachedBlockstore(context.TODO(), nil, opts); err == nil {
+ t.Error("zero hashes setting with positive size was not detected")
+ }
+}
diff --git a/blockstore/idstore.go b/blockstore/idstore.go
new file mode 100644
index 0000000000..25a6284c89
--- /dev/null
+++ b/blockstore/idstore.go
@@ -0,0 +1,123 @@
+package blockstore
+
+import (
+ "context"
+ "io"
+
+ blocks "github.com/ipfs/go-block-format"
+ cid "github.com/ipfs/go-cid"
+ mh "github.com/multiformats/go-multihash"
+)
+
+// idstore wraps a BlockStore to add support for identity hashes
+type idstore struct {
+ bs Blockstore
+ viewer Viewer
+}
+
+var _ Blockstore = (*idstore)(nil)
+var _ Viewer = (*idstore)(nil)
+var _ io.Closer = (*idstore)(nil)
+
+func NewIdStore(bs Blockstore) Blockstore {
+ ids := &idstore{bs: bs}
+ if v, ok := bs.(Viewer); ok {
+ ids.viewer = v
+ }
+ return ids
+}
+
+func extractContents(k cid.Cid) (bool, []byte) {
+ // Pre-check by calling Prefix(), this much faster than extracting the hash.
+ if k.Prefix().MhType != mh.IDENTITY {
+ return false, nil
+ }
+
+ dmh, err := mh.Decode(k.Hash())
+ if err != nil || dmh.Code != mh.IDENTITY {
+ return false, nil
+ }
+ return true, dmh.Digest
+}
+
+func (b *idstore) DeleteBlock(ctx context.Context, k cid.Cid) error {
+ isId, _ := extractContents(k)
+ if isId {
+ return nil
+ }
+ return b.bs.DeleteBlock(ctx, k)
+}
+
+func (b *idstore) Has(ctx context.Context, k cid.Cid) (bool, error) {
+ isId, _ := extractContents(k)
+ if isId {
+ return true, nil
+ }
+ return b.bs.Has(ctx, k)
+}
+
+func (b *idstore) View(ctx context.Context, k cid.Cid, callback func([]byte) error) error {
+ if b.viewer == nil {
+ blk, err := b.Get(ctx, k)
+ if err != nil {
+ return err
+ }
+ return callback(blk.RawData())
+ }
+ isId, bdata := extractContents(k)
+ if isId {
+ return callback(bdata)
+ }
+ return b.viewer.View(ctx, k, callback)
+}
+
+func (b *idstore) GetSize(ctx context.Context, k cid.Cid) (int, error) {
+ isId, bdata := extractContents(k)
+ if isId {
+ return len(bdata), nil
+ }
+ return b.bs.GetSize(ctx, k)
+}
+
+func (b *idstore) Get(ctx context.Context, k cid.Cid) (blocks.Block, error) {
+ isId, bdata := extractContents(k)
+ if isId {
+ return blocks.NewBlockWithCid(bdata, k)
+ }
+ return b.bs.Get(ctx, k)
+}
+
+func (b *idstore) Put(ctx context.Context, bl blocks.Block) error {
+ isId, _ := extractContents(bl.Cid())
+ if isId {
+ return nil
+ }
+ return b.bs.Put(ctx, bl)
+}
+
+func (b *idstore) PutMany(ctx context.Context, bs []blocks.Block) error {
+ toPut := make([]blocks.Block, 0, len(bs))
+ for _, bl := range bs {
+ isId, _ := extractContents(bl.Cid())
+ if isId {
+ continue
+ }
+ toPut = append(toPut, bl)
+ }
+ return b.bs.PutMany(ctx, toPut)
+}
+
+func (b *idstore) HashOnRead(enabled bool) {
+ b.bs.HashOnRead(enabled)
+}
+
+func (b *idstore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) {
+ return b.bs.AllKeysChan(ctx)
+}
+
+func (b *idstore) Close() error {
+ if c, ok := b.bs.(io.Closer); ok {
+ return c.Close()
+ }
+ return nil
+}
diff --git a/blockstore/idstore_test.go b/blockstore/idstore_test.go
new file mode 100644
index 0000000000..5f96bc9031
--- /dev/null
+++ b/blockstore/idstore_test.go
@@ -0,0 +1,162 @@
+package blockstore
+
+import (
+ "context"
+ "testing"
+
+ blk "github.com/ipfs/go-block-format"
+ cid "github.com/ipfs/go-cid"
+ ds "github.com/ipfs/go-datastore"
+ mh "github.com/multiformats/go-multihash"
+)
+
+func createTestStores() (Blockstore, *callbackDatastore) {
+ cd := &callbackDatastore{f: func() {}, ds: ds.NewMapDatastore()}
+ ids := NewIdStore(NewBlockstore(cd))
+ return ids, cd
+}
+
+func TestIdStore(t *testing.T) {
+ idhash1, _ := cid.NewPrefixV1(cid.Raw, mh.IDENTITY).Sum([]byte("idhash1"))
+ idblock1, _ := blk.NewBlockWithCid([]byte("idhash1"), idhash1)
+ hash1, _ := cid.NewPrefixV1(cid.Raw, mh.SHA2_256).Sum([]byte("hash1"))
+ block1, _ := blk.NewBlockWithCid([]byte("hash1"), hash1)
+ emptyHash, _ := cid.NewPrefixV1(cid.Raw, mh.SHA2_256).Sum([]byte("emptyHash"))
+ emptyBlock, _ := blk.NewBlockWithCid([]byte{}, emptyHash)
+
+ ids, cb := createTestStores()
+
+ have, _ := ids.Has(bg, idhash1)
+ if !have {
+ t.Fatal("Has() failed on idhash")
+ }
+
+ _, err := ids.Get(bg, idhash1)
+ if err != nil {
+ t.Fatalf("Get() failed on idhash: %v", err)
+ }
+
+ noop := func() {}
+ failIfPassThough := func() {
+ t.Fatal("operation on identity hash passed though to datastore")
+ }
+
+ cb.f = failIfPassThough
+ err = ids.Put(bg, idblock1)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ cb.f = noop
+ err = ids.Put(bg, block1)
+ if err != nil {
+ t.Fatalf("Put() failed on normal block: %v", err)
+ }
+
+ have, _ = ids.Has(bg, hash1)
+ if !have {
+ t.Fatal("normal block not added to datastore")
+ }
+
+ blockSize, _ := ids.GetSize(bg, hash1)
+ if blockSize == -1 {
+ t.Fatal("normal block not added to datastore")
+ }
+
+ _, err = ids.Get(bg, hash1)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ err = ids.Put(bg, emptyBlock)
+ if err != nil {
+ t.Fatalf("Put() failed on normal block: %v", err)
+ }
+
+ have, _ = ids.Has(bg, emptyHash)
+ if !have {
+ t.Fatal("normal block not added to datastore")
+ }
+
+ blockSize, _ = ids.GetSize(bg, emptyHash)
+ if blockSize != 0 {
+ t.Fatal("normal block not added to datastore")
+ }
+
+ cb.f = failIfPassThough
+ err = ids.DeleteBlock(bg, idhash1)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ cb.f = noop
+ err = ids.DeleteBlock(bg, hash1)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ have, _ = ids.Has(bg, hash1)
+ if have {
+ t.Fatal("normal block not deleted from datastore")
+ }
+
+ blockSize, _ = ids.GetSize(bg, hash1)
+ if blockSize > -1 {
+ t.Fatal("normal block not deleted from datastore")
+ }
+
+ err = ids.DeleteBlock(bg, emptyHash)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ idhash2, _ := cid.NewPrefixV1(cid.Raw, mh.IDENTITY).Sum([]byte("idhash2"))
+ idblock2, _ := blk.NewBlockWithCid([]byte("idhash2"), idhash2)
+ hash2, _ := cid.NewPrefixV1(cid.Raw, mh.SHA2_256).Sum([]byte("hash2"))
+ block2, _ := blk.NewBlockWithCid([]byte("hash2"), hash2)
+
+ cb.f = failIfPassThough
+ err = ids.PutMany(bg, []blk.Block{idblock1, idblock2})
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ opCount := 0
+ cb.f = func() {
+ opCount++
+ }
+
+ err = ids.PutMany(bg, []blk.Block{block1, block2})
+ if err != nil {
+ t.Fatal(err)
+ }
+ if opCount != 4 {
+ // one call to Has and Put for each Cid
+ t.Fatalf("expected exactly 4 operations got %d", opCount)
+ }
+
+ opCount = 0
+ err = ids.PutMany(bg, []blk.Block{idblock1, block1})
+ if err != nil {
+ t.Fatal(err)
+ }
+ if opCount != 1 {
+ // just one call to Put from the normal (non-id) block
+ t.Fatalf("expected exactly 1 operations got %d", opCount)
+ }
+
+ ch, err := ids.AllKeysChan(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ cnt := 0
+ for c := range ch {
+ cnt++
+ if c.Prefix().MhType == mh.IDENTITY {
+ t.Fatalf("block with identity hash found in blockstore")
+ }
+ }
+ if cnt != 2 {
+ t.Fatalf("expected exactly two keys returned by AllKeysChan got %d", cnt)
+ }
+}
diff --git a/chunker/benchmark_test.go b/chunker/benchmark_test.go
new file mode 100644
index 0000000000..5069b06536
--- /dev/null
+++ b/chunker/benchmark_test.go
@@ -0,0 +1,59 @@
+package chunk
+
+import (
+ "bytes"
+ "io"
+ "math/rand"
+ "testing"
+)
+
+type newSplitter func(io.Reader) Splitter
+
+type bencSpec struct {
+ size int
+ name string
+}
+
+var bSizes = []bencSpec{
+ {1 << 10, "1K"},
+ {1 << 20, "1M"},
+ {16 << 20, "16M"},
+ {100 << 20, "100M"},
+}
+
+func benchmarkChunker(b *testing.B, ns newSplitter) {
+ for _, s := range bSizes {
+ s := s
+ b.Run(s.name, func(b *testing.B) {
+ benchmarkChunkerSize(b, ns, s.size)
+ })
+ }
+}
+
+func benchmarkChunkerSize(b *testing.B, ns newSplitter, size int) {
+ rng := rand.New(rand.NewSource(1))
+ data := make([]byte, size)
+ rng.Read(data)
+
+ b.SetBytes(int64(size))
+ b.ReportAllocs()
+ b.ResetTimer()
+
+ var res uint64
+
+ for i := 0; i < b.N; i++ {
+ r := ns(bytes.NewReader(data))
+
+ for {
+ chunk, err := r.NextBytes()
+ if err != nil {
+ if err == io.EOF {
+ break
+ }
+ b.Fatal(err)
+ }
+ res = res + uint64(len(chunk))
+ }
+ }
+ Res = Res + res
+}
diff --git a/chunker/buzhash.go b/chunker/buzhash.go
new file mode 100644
index 0000000000..83ab019dd4
--- /dev/null
+++ b/chunker/buzhash.go
@@ -0,0 +1,151 @@
+package chunk
+
+import (
+ "io"
+ "math/bits"
+
+ pool "github.com/libp2p/go-buffer-pool"
+)
+
+const (
+ buzMin = 128 << 10
+ buzMax = 512 << 10
+ buzMask = 1<<17 - 1
+)
+
+type Buzhash struct {
+ r io.Reader
+ buf []byte
+ n int
+
+ err error
+}
+
+func NewBuzhash(r io.Reader) *Buzhash {
+ return &Buzhash{
+ r: r,
+ buf: pool.Get(buzMax),
+ }
+}
+
+func (b *Buzhash) Reader() io.Reader {
+ return b.r
+}
+
+func (b *Buzhash) NextBytes() ([]byte, error) {
+ if b.err != nil {
+ return nil, b.err
+ }
+
+ n, err := io.ReadFull(b.r, b.buf[b.n:])
+ if err != nil {
+ if err == io.ErrUnexpectedEOF || err == io.EOF {
+ buffered := b.n + n
+ if buffered < buzMin {
+ b.err = io.EOF
+ // Read nothing? Don't return an empty block.
+ if buffered == 0 {
+ pool.Put(b.buf)
+ b.buf = nil
+ return nil, b.err
+ }
+ res := make([]byte, buffered)
+ copy(res, b.buf)
+
+ pool.Put(b.buf)
+ b.buf = nil
+ return res, nil
+ }
+ } else {
+ b.err = err
+ pool.Put(b.buf)
+ b.buf = nil
+ return nil, err
+ }
+ }
+
+ i := buzMin - 32
+
+ var state uint32 = 0
+
+ if buzMin > len(b.buf) {
+ panic("this is impossible")
+ }
+
+ for ; i < buzMin; i++ {
+ state = bits.RotateLeft32(state, 1)
+ state = state ^ bytehash[b.buf[i]]
+ }
+
+ {
+ max := b.n + n - 32 - 1
+
+ buf := b.buf
+ bufshf := b.buf[32:]
+ i = buzMin - 32
+ _ = buf[max]
+ _ = bufshf[max]
+
+ for ; i <= max; i++ {
+ if state&buzMask == 0 {
+ break
+ }
+ state = bits.RotateLeft32(state, 1) ^
+ bytehash[buf[i]] ^
+ bytehash[bufshf[i]]
+ }
+ i += 32
+ }
+
+ res := make([]byte, i)
+ copy(res, b.buf)
+
+ b.n = copy(b.buf, b.buf[i:b.n+n])
+
+ return res, nil
+}
+
+var bytehash = [256]uint32{
+ 0x6236e7d5, 0x10279b0b, 0x72818182, 0xdc526514, 0x2fd41e3d, 0x777ef8c8,
+ 0x83ee5285, 0x2c8f3637, 0x2f049c1a, 0x57df9791, 0x9207151f, 0x9b544818,
+ 0x74eef658, 0x2028ca60, 0x0271d91a, 0x27ae587e, 0xecf9fa5f, 0x236e71cd,
+ 0xf43a8a2e, 0xbb13380, 0x9e57912c, 0x89a26cdb, 0x9fcf3d71, 0xa86da6f1,
+ 0x9c49f376, 0x346aecc7, 0xf094a9ee, 0xea99e9cb, 0xb01713c6, 0x88acffb,
+ 0x2960a0fb, 0x344a626c, 0x7ff22a46, 0x6d7a1aa5, 0x6a714916, 0x41d454ca,
+ 0x8325b830, 0xb65f563, 0x447fecca, 0xf9d0ea5e, 0xc1d9d3d4, 0xcb5ec574,
+ 0x55aae902, 0x86edc0e7, 0xd3a9e33, 0xe70dc1e1, 0xe3c5f639, 0x9b43140a,
+ 0xc6490ac5, 0x5e4030fb, 0x8e976dd5, 0xa87468ea, 0xf830ef6f, 0xcc1ed5a5,
+ 0x611f4e78, 0xddd11905, 0xf2613904, 0x566c67b9, 0x905a5ccc, 0x7b37b3a4,
+ 0x4b53898a, 0x6b8fd29d, 0xaad81575, 0x511be414, 0x3cfac1e7, 0x8029a179,
+ 0xd40efeda, 0x7380e02, 0xdc9beffd, 0x2d049082, 0x99bc7831, 0xff5002a8,
+ 0x21ce7646, 0x1cd049b, 0xf43994f, 0xc3c6c5a5, 0xbbda5f50, 0xec15ec7,
+ 0x9adb19b6, 0xc1e80b9, 0xb9b52968, 0xae162419, 0x2542b405, 0x91a42e9d,
+ 0x6be0f668, 0x6ed7a6b9, 0xbc2777b4, 0xe162ce56, 0x4266aad5, 0x60fdb704,
+ 0x66f832a5, 0x9595f6ca, 0xfee83ced, 0x55228d99, 0x12bf0e28, 0x66896459,
+ 0x789afda, 0x282baa8, 0x2367a343, 0x591491b0, 0x2ff1a4b1, 0x410739b6,
+ 0x9b7055a0, 0x2e0eb229, 0x24fc8252, 0x3327d3df, 0xb0782669, 0x1c62e069,
+ 0x7f503101, 0xf50593ae, 0xd9eb275d, 0xe00eb678, 0x5917ccde, 0x97b9660a,
+ 0xdd06202d, 0xed229e22, 0xa9c735bf, 0xd6316fe6, 0x6fc72e4c, 0x206dfa2,
+ 0xd6b15c5a, 0x69d87b49, 0x9c97745, 0x13445d61, 0x35a975aa, 0x859aa9b9,
+ 0x65380013, 0xd1fb6391, 0xc29255fd, 0x784a3b91, 0xb9e74c26, 0x63ce4d40,
+ 0xc07cbe9e, 0xe6e4529e, 0xfb3632f, 0x9438d9c9, 0x682f94a8, 0xf8fd4611,
+ 0x257ec1ed, 0x475ce3d6, 0x60ee2db1, 0x2afab002, 0x2b9e4878, 0x86b340de,
+ 0x1482fdca, 0xfe41b3bf, 0xd4a412b0, 0xe09db98c, 0xc1af5d53, 0x7e55e25f,
+ 0xd3346b38, 0xb7a12cbd, 0x9c6827ba, 0x71f78bee, 0x8c3a0f52, 0x150491b0,
+ 0xf26de912, 0x233e3a4e, 0xd309ebba, 0xa0a9e0ff, 0xca2b5921, 0xeeb9893c,
+ 0x33829e88, 0x9870cc2a, 0x23c4b9d0, 0xeba32ea3, 0xbdac4d22, 0x3bc8c44c,
+ 0x1e8d0397, 0xf9327735, 0x783b009f, 0xeb83742, 0x2621dc71, 0xed017d03,
+ 0x5c760aa1, 0x5a69814b, 0x96e3047f, 0xa93c9cde, 0x615c86f5, 0xb4322aa5,
+ 0x4225534d, 0xd2e2de3, 0xccfccc4b, 0xbac2a57, 0xf0a06d04, 0xbc78d737,
+ 0xf2d1f766, 0xf5a7953c, 0xbcdfda85, 0x5213b7d5, 0xbce8a328, 0xd38f5f18,
+ 0xdb094244, 0xfe571253, 0x317fa7ee, 0x4a324f43, 0x3ffc39d9, 0x51b3fa8e,
+ 0x7a4bee9f, 0x78bbc682, 0x9f5c0350, 0x2fe286c, 0x245ab686, 0xed6bf7d7,
+ 0xac4988a, 0x3fe010fa, 0xc65fe369, 0xa45749cb, 0x2b84e537, 0xde9ff363,
+ 0x20540f9a, 0xaa8c9b34, 0x5bc476b3, 0x1d574bd7, 0x929100ad, 0x4721de4d,
+ 0x27df1b05, 0x58b18546, 0xb7e76764, 0xdf904e58, 0x97af57a1, 0xbd4dc433,
+ 0xa6256dfd, 0xf63998f3, 0xf1e05833, 0xe20acf26, 0xf57fd9d6, 0x90300b4d,
+ 0x89df4290, 0x68d01cbc, 0xcf893ee3, 0xcc42a046, 0x778e181b, 0x67265c76,
+ 0xe981a4c4, 0x82991da1, 0x708f7294, 0xe6e2ae62, 0xfc441870, 0x95e1b0b6,
+ 0x445f825, 0x5a93b47f, 0x5e9cf4be, 0x84da71e7, 0x9d9582b0, 0x9bf835ef,
+ 0x591f61e2, 0x43325985, 0x5d2de32e, 0x8d8fbf0f, 0x95b30f38, 0x7ad5b6e,
+ 0x4e934edf, 0x3cd4990e, 0x9053e259, 0x5c41857d}
diff --git a/chunker/buzhash_norace_test.go b/chunker/buzhash_norace_test.go
new file mode 100644
index 0000000000..50dc0e5ce2
--- /dev/null
+++ b/chunker/buzhash_norace_test.go
@@ -0,0 +1,14 @@
+//go:build !race
+
+package chunk
+
+import (
+ "testing"
+)
+
+func TestFuzzBuzhashChunking(t *testing.T) {
+ buf := make([]byte, 1024*1024*16)
+ for i := 0; i < 100; i++ {
+ testBuzhashChunking(t, buf)
+ }
+}
diff --git a/chunker/buzhash_test.go b/chunker/buzhash_test.go
new file mode 100644
index 0000000000..fe6de4434e
--- /dev/null
+++ b/chunker/buzhash_test.go
@@ -0,0 +1,91 @@
+package chunk
+
+import (
+ "bytes"
+ "io"
+ "testing"
+
+ util "github.com/ipfs/boxo/util"
+)
+
+func testBuzhashChunking(t *testing.T, buf []byte) (chunkCount int) {
+ n, err := util.NewTimeSeededRand().Read(buf)
+ if n < len(buf) {
+ t.Fatalf("expected %d bytes, got %d", len(buf), n)
+ }
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ r := NewBuzhash(bytes.NewReader(buf))
+
+ var chunks [][]byte
+
+ for {
+ chunk, err := r.NextBytes()
+ if err != nil {
+ if err == io.EOF {
+ break
+ }
+ t.Fatal(err)
+ }
+
+ chunks = append(chunks, chunk)
+ }
+ chunkCount += len(chunks)
+
+ for i, chunk := range chunks {
+ if len(chunk) == 0 {
+ t.Fatalf("chunk %d/%d is empty", i+1, len(chunks))
+ }
+ }
+
+ for i, chunk := range chunks[:len(chunks)-1] {
+ if len(chunk) < buzMin {
+ t.Fatalf("chunk %d/%d is less than the minimum size", i+1, len(chunks))
+ }
+ }
+
+ unchunked := bytes.Join(chunks, nil)
+ if !bytes.Equal(unchunked, buf) {
+ t.Fatal("data was chunked incorrectly")
+ }
+
+ return chunkCount
+}
+
+func TestBuzhashChunking(t *testing.T) {
+ buf := make([]byte, 1024*1024*16)
+ count := testBuzhashChunking(t, buf)
+ t.Logf("average block size: %d\n", len(buf)/count)
+}
+
+func TestBuzhashChunkReuse(t *testing.T) {
+ newBuzhash := func(r io.Reader) Splitter {
+ return NewBuzhash(r)
+ }
+ testReuse(t, newBuzhash)
+}
+
+func BenchmarkBuzhash2(b *testing.B) {
+ benchmarkChunker(b, func(r io.Reader) Splitter {
+ return NewBuzhash(r)
+ })
+}
+
+func TestBuzhashBitsHashBias(t *testing.T) {
+ counts := make([]byte, 32)
+ for _, h := range bytehash {
+ for i := 0; i < 32; i++ {
+ if h&1 == 1 {
+ counts[i]++
+ }
+ h = h >> 1
+ }
+ }
+ for i, c := range counts {
+ if c != 128 {
+ t.Errorf("Bit balance in position %d broken, %d ones", i, c)
+ }
+ }
+}
diff --git a/chunker/gen/main.go b/chunker/gen/main.go
new file mode 100644
index 0000000000..9d908544b8
--- /dev/null
+++ b/chunker/gen/main.go
@@ -0,0 +1,33 @@
+// This file generates bytehash LUT
+package main
+
+import (
+ "fmt"
+ "math/rand"
+)
+
+const nRounds = 200
+
+func main() {
+ rnd := rand.New(rand.NewSource(0))
+
+ lut := make([]uint32, 256)
+ for i := 0; i < 256/2; i++ {
+ lut[i] = 1<<32 - 1
+ }
+
+ for r := 0; r < nRounds; r++ {
+ for b := uint32(0); b < 32; b++ {
+ mask := uint32(1) << b
+ nmask := ^mask
+ for i, j := range rnd.Perm(256) {
+ li := lut[i]
+ lj := lut[j]
+ lut[i] = li&nmask | (lj & mask)
+ lut[j] = lj&nmask | (li & mask)
+ }
+ }
+ }
+
+ fmt.Printf("%#v", lut)
+}
diff --git a/chunker/parse.go b/chunker/parse.go
new file mode 100644
index 0000000000..486cd14adc
--- /dev/null
+++ b/chunker/parse.go
@@ -0,0 +1,114 @@
+package chunk
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "strconv"
+ "strings"
+)
+
+const (
+ // DefaultBlockSize is the chunk size that splitters produce (or aim to).
+ DefaultBlockSize int64 = 1024 * 256
+
+ // No leaf block should contain more than 1MiB of payload data ( wrapping overhead aside )
+ // This effectively mandates the maximum chunk size
+ // See discussion at https://github.com/ipfs/boxo/chunker/pull/21#discussion_r369124879 for background
+ ChunkSizeLimit int = 1048576
+)
+
+var (
+ ErrRabinMin = errors.New("rabin min must be greater than 16")
+ ErrSize = errors.New("chunker size must be greater than 0")
+ ErrSizeMax = fmt.Errorf("chunker parameters may not exceed the maximum chunk size of %d", ChunkSizeLimit)
+)
+
+// FromString returns a Splitter depending on the given string:
+// it supports "default" (""), "size-{size}", "rabin", "rabin-{blocksize}",
+// "rabin-{min}-{avg}-{max}" and "buzhash".
+func FromString(r io.Reader, chunker string) (Splitter, error) {
+ switch {
+ case chunker == "" || chunker == "default":
+ return DefaultSplitter(r), nil
+
+ case strings.HasPrefix(chunker, "size-"):
+ sizeStr := strings.Split(chunker, "-")[1]
+ size, err := strconv.Atoi(sizeStr)
+ if err != nil {
+ return nil, err
+ } else if size <= 0 {
+ return nil, ErrSize
+ } else if size > ChunkSizeLimit {
+ return nil, ErrSizeMax
+ }
+ return NewSizeSplitter(r, int64(size)), nil
+
+ case strings.HasPrefix(chunker, "rabin"):
+ return parseRabinString(r, chunker)
+
+ case chunker == "buzhash":
+ return NewBuzhash(r), nil
+
+ default:
+ return nil, fmt.Errorf("unrecognized chunker option: %s", chunker)
+ }
+}
+
+func parseRabinString(r io.Reader, chunker string) (Splitter, error) {
+ parts := strings.Split(chunker, "-")
+ switch len(parts) {
+ case 1:
+ return NewRabin(r, uint64(DefaultBlockSize)), nil
+ case 2:
+ size, err := strconv.Atoi(parts[1])
+ if err != nil {
+ return nil, err
+ } else if int(float32(size)*1.5) > ChunkSizeLimit { // FIXME - this will be addressed in a subsequent PR
+ return nil, ErrSizeMax
+ }
+ return NewRabin(r, uint64(size)), nil
+ case 4:
+ sub := strings.Split(parts[1], ":")
+ if len(sub) > 1 && sub[0] != "min" {
+ return nil, errors.New("first label must be min")
+ }
+ min, err := strconv.Atoi(sub[len(sub)-1])
+ if err != nil {
+ return nil, err
+ }
+ if min < 16 {
+ return nil, ErrRabinMin
+ }
+ sub = strings.Split(parts[2], ":")
+ if len(sub) > 1 && sub[0] != "avg" {
+ log.Error("sub == ", sub)
+ return nil, errors.New("second label must be avg")
+ }
+ avg, err := strconv.Atoi(sub[len(sub)-1])
+ if err != nil {
+ return nil, err
+ }
+
+ sub = strings.Split(parts[3], ":")
+ if len(sub) > 1 && sub[0] != "max" {
+ return nil, errors.New("final label must be max")
+ }
+ max, err := strconv.Atoi(sub[len(sub)-1])
+ if err != nil {
+ return nil, err
+ }
+
+ if min >= avg {
+ return nil, errors.New("incorrect format: rabin-min must be smaller than rabin-avg")
+ } else if avg >= max {
+ return nil, errors.New("incorrect format: rabin-avg must be smaller than rabin-max")
+ } else if max > ChunkSizeLimit {
+ return nil, ErrSizeMax
+ }
+
+ return NewRabinMinMax(r, uint64(min), uint64(avg), uint64(max)), nil
+ default:
+ return nil, errors.New("incorrect format (expected 'rabin' 'rabin-[avg]' or 'rabin-[min]-[avg]-[max]'")
+ }
+}
diff --git a/chunker/parse_test.go b/chunker/parse_test.go
new file mode 100644
index 0000000000..237a2b439a
--- /dev/null
+++ b/chunker/parse_test.go
@@ -0,0 +1,80 @@
+package chunk
+
+import (
+ "bytes"
+ "fmt"
+ "testing"
+)
+
+const (
+ testTwoThirdsOfChunkLimit = 2 * (float32(ChunkSizeLimit) / float32(3))
+)
+
+func TestParseRabin(t *testing.T) {
+ r := bytes.NewReader(randBuf(t, 1000))
+
+ _, err := FromString(r, "rabin-18-25-32")
+ if err != nil {
+ t.Errorf(err.Error())
+ }
+
+ _, err = FromString(r, "rabin-15-23-31")
+ if err != ErrRabinMin {
+ t.Fatalf("Expected an 'ErrRabinMin' error, got: %#v", err)
+ }
+
+ _, err = FromString(r, "rabin-20-20-21")
+ if err == nil || err.Error() != "incorrect format: rabin-min must be smaller than rabin-avg" {
+ t.Fatalf("Expected an arg-out-of-order error, got: %#v", err)
+ }
+
+ _, err = FromString(r, "rabin-19-21-21")
+ if err == nil || err.Error() != "incorrect format: rabin-avg must be smaller than rabin-max" {
+ t.Fatalf("Expected an arg-out-of-order error, got: %#v", err)
+ }
+
+ _, err = FromString(r, fmt.Sprintf("rabin-19-21-%d", ChunkSizeLimit))
+ if err != nil {
+ t.Fatalf("Expected success, got: %#v", err)
+ }
+
+ _, err = FromString(r, fmt.Sprintf("rabin-19-21-%d", 1+ChunkSizeLimit))
+ if err != ErrSizeMax {
+ t.Fatalf("Expected 'ErrSizeMax', got: %#v", err)
+ }
+
+ _, err = FromString(r, fmt.Sprintf("rabin-%.0f", testTwoThirdsOfChunkLimit))
+ if err != nil {
+ t.Fatalf("Expected success, got: %#v", err)
+ }
+
+ _, err = FromString(r, fmt.Sprintf("rabin-%.0f", 1+testTwoThirdsOfChunkLimit))
+ if err != ErrSizeMax {
+ t.Fatalf("Expected 'ErrSizeMax', got: %#v", err)
+ }
+
+}
+
+func TestParseSize(t *testing.T) {
+ r := bytes.NewReader(randBuf(t, 1000))
+
+ _, err := FromString(r, "size-0")
+ if err != ErrSize {
+ t.Fatalf("Expected an 'ErrSize' error, got: %#v", err)
+ }
+
+ _, err = FromString(r, "size-32")
+ if err != nil {
+ t.Fatalf("Expected success, got: %#v", err)
+ }
+
+ _, err = FromString(r, fmt.Sprintf("size-%d", ChunkSizeLimit))
+ if err != nil {
+ t.Fatalf("Expected success, got: %#v", err)
+ }
+
+ _, err = FromString(r, fmt.Sprintf("size-%d", 1+ChunkSizeLimit))
+ if err != ErrSizeMax {
+ t.Fatalf("Expected 'ErrSizeMax', got: %#v", err)
+ }
+}
diff --git a/chunker/rabin.go b/chunker/rabin.go
new file mode 100644
index 0000000000..4247057b2f
--- /dev/null
+++ b/chunker/rabin.go
@@ -0,0 +1,54 @@
+package chunk
+
+import (
+ "hash/fnv"
+ "io"
+
+ "github.com/whyrusleeping/chunker"
+)
+
+// IpfsRabinPoly is the irreducible polynomial of degree 53 used by for Rabin.
+var IpfsRabinPoly = chunker.Pol(17437180132763653)
+
+// Rabin implements the Splitter interface and splits content with Rabin
+// fingerprints.
+type Rabin struct {
+ r *chunker.Chunker
+ reader io.Reader
+}
+
+// NewRabin creates a new Rabin splitter with the given
+// average block size.
+func NewRabin(r io.Reader, avgBlkSize uint64) *Rabin {
+ min := avgBlkSize / 3
+ max := avgBlkSize + (avgBlkSize / 2)
+
+ return NewRabinMinMax(r, min, avgBlkSize, max)
+}
+
+// NewRabinMinMax returns a new Rabin splitter which uses
+// the given min, average and max block sizes.
+func NewRabinMinMax(r io.Reader, min, avg, max uint64) *Rabin {
+ h := fnv.New32a()
+ ch := chunker.New(r, IpfsRabinPoly, h, avg, min, max)
+
+ return &Rabin{
+ r: ch,
+ reader: r,
+ }
+}
+
+// NextBytes reads the next bytes from the reader and returns a slice.
+func (r *Rabin) NextBytes() ([]byte, error) {
+ ch, err := r.r.Next()
+ if err != nil {
+ return nil, err
+ }
+
+ return ch.Data, nil
+}
+
+// Reader returns the io.Reader associated to this Splitter.
+func (r *Rabin) Reader() io.Reader {
+ return r.reader
+}
diff --git a/chunker/rabin_test.go b/chunker/rabin_test.go
new file mode 100644
index 0000000000..79699e3249
--- /dev/null
+++ b/chunker/rabin_test.go
@@ -0,0 +1,108 @@
+package chunk
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "testing"
+
+ util "github.com/ipfs/boxo/util"
+ blocks "github.com/ipfs/go-block-format"
+)
+
+func TestRabinChunking(t *testing.T) {
+ data := make([]byte, 1024*1024*16)
+ n, err := util.NewTimeSeededRand().Read(data)
+ if n < len(data) {
+ t.Fatalf("expected %d bytes, got %d", len(data), n)
+ }
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ r := NewRabin(bytes.NewReader(data), 1024*256)
+
+ var chunks [][]byte
+
+ for {
+ chunk, err := r.NextBytes()
+ if err != nil {
+ if err == io.EOF {
+ break
+ }
+ t.Fatal(err)
+ }
+
+ chunks = append(chunks, chunk)
+ }
+
+ fmt.Printf("average block size: %d\n", len(data)/len(chunks))
+
+ unchunked := bytes.Join(chunks, nil)
+ if !bytes.Equal(unchunked, data) {
+ fmt.Printf("%d %d\n", len(unchunked), len(data))
+ t.Fatal("data was chunked incorrectly")
+ }
+}
+
+func chunkData(t *testing.T, newC newSplitter, data []byte) map[string]blocks.Block {
+ r := newC(bytes.NewReader(data))
+
+ blkmap := make(map[string]blocks.Block)
+
+ for {
+ blk, err := r.NextBytes()
+ if err != nil {
+ if err == io.EOF {
+ break
+ }
+ t.Fatal(err)
+ }
+
+ b := blocks.NewBlock(blk)
+ blkmap[b.Cid().KeyString()] = b
+ }
+
+ return blkmap
+}
+
+func testReuse(t *testing.T, cr newSplitter) {
+ data := make([]byte, 1024*1024*16)
+ n, err := util.NewTimeSeededRand().Read(data)
+ if n < len(data) {
+ t.Fatalf("expected %d bytes, got %d", len(data), n)
+ }
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ ch1 := chunkData(t, cr, data[1000:])
+ ch2 := chunkData(t, cr, data)
+
+ var extra int
+ for k := range ch2 {
+ _, ok := ch1[k]
+ if !ok {
+ extra++
+ }
+ }
+
+ if extra > 2 {
+ t.Logf("too many spare chunks made: %d", extra)
+ }
+}
+
+func TestRabinChunkReuse(t *testing.T) {
+ newRabin := func(r io.Reader) Splitter {
+ return NewRabin(r, 256*1024)
+ }
+ testReuse(t, newRabin)
+}
+
+var Res uint64
+
+func BenchmarkRabin(b *testing.B) {
+ benchmarkChunker(b, func(r io.Reader) Splitter {
+ return NewRabin(r, 256<<10)
+ })
+}
diff --git a/chunker/splitting.go b/chunker/splitting.go
new file mode 100644
index 0000000000..a137820ab1
--- /dev/null
+++ b/chunker/splitting.go
@@ -0,0 +1,102 @@
+// Package chunk implements streaming block splitters.
+// Splitters read data from a reader and provide byte slices (chunks)
+// The size and contents of these slices depend on the splitting method
+// used.
+package chunk
+
+import (
+ "io"
+
+ logging "github.com/ipfs/go-log"
+ pool "github.com/libp2p/go-buffer-pool"
+)
+
+var log = logging.Logger("chunk")
+
+// A Splitter reads bytes from a Reader and creates "chunks" (byte slices)
+// that can be used to build DAG nodes.
+type Splitter interface {
+ Reader() io.Reader
+ NextBytes() ([]byte, error)
+}
+
+// SplitterGen is a splitter generator, given a reader.
+type SplitterGen func(r io.Reader) Splitter
+
+// DefaultSplitter returns a SizeSplitter with the DefaultBlockSize.
+func DefaultSplitter(r io.Reader) Splitter {
+ return NewSizeSplitter(r, DefaultBlockSize)
+}
+
+// SizeSplitterGen returns a SplitterGen function which will create
+// a splitter with the given size when called.
+func SizeSplitterGen(size int64) SplitterGen {
+ return func(r io.Reader) Splitter {
+ return NewSizeSplitter(r, size)
+ }
+}
+
+// Chan returns a channel that receives each of the chunks produced
+// by a splitter, along with another one for errors.
+func Chan(s Splitter) (<-chan []byte, <-chan error) {
+ out := make(chan []byte)
+ errs := make(chan error, 1)
+ go func() {
+ defer close(out)
+ defer close(errs)
+
+ // all-chunks loop (keep creating chunks)
+ for {
+ b, err := s.NextBytes()
+ if err != nil {
+ errs <- err
+ return
+ }
+
+ out <- b
+ }
+ }()
+ return out, errs
+}
+
+type sizeSplitterv2 struct {
+ r io.Reader
+ size uint32
+ err error
+}
+
+// NewSizeSplitter returns a new size-based Splitter with the given block size.
+func NewSizeSplitter(r io.Reader, size int64) Splitter {
+ return &sizeSplitterv2{
+ r: r,
+ size: uint32(size),
+ }
+}
+
+// NextBytes produces a new chunk.
+func (ss *sizeSplitterv2) NextBytes() ([]byte, error) {
+ if ss.err != nil {
+ return nil, ss.err
+ }
+
+ full := pool.Get(int(ss.size))
+ n, err := io.ReadFull(ss.r, full)
+ switch err {
+ case io.ErrUnexpectedEOF:
+ ss.err = io.EOF
+ small := make([]byte, n)
+ copy(small, full)
+ pool.Put(full)
+ return small, nil
+ case nil:
+ return full, nil
+ default:
+ pool.Put(full)
+ return nil, err
+ }
+}
+
+// Reader returns the io.Reader associated to this Splitter.
+func (ss *sizeSplitterv2) Reader() io.Reader {
+ return ss.r
+}
diff --git a/chunker/splitting_test.go b/chunker/splitting_test.go
new file mode 100644
index 0000000000..c53dfb4a75
--- /dev/null
+++ b/chunker/splitting_test.go
@@ -0,0 +1,126 @@
+package chunk
+
+import (
+ "bytes"
+ "io"
+ "testing"
+
+ u "github.com/ipfs/boxo/util"
+)
+
+func randBuf(t *testing.T, size int) []byte {
+ buf := make([]byte, size)
+ if _, err := u.NewTimeSeededRand().Read(buf); err != nil {
+ t.Fatal("failed to read enough randomness")
+ }
+ return buf
+}
+
+func copyBuf(buf []byte) []byte {
+ cpy := make([]byte, len(buf))
+ copy(cpy, buf)
+ return cpy
+}
+
+func TestSizeSplitterOverAllocate(t *testing.T) {
+ max := 1000
+ r := bytes.NewReader(randBuf(t, max))
+ chunksize := int64(1024 * 256)
+ splitter := NewSizeSplitter(r, chunksize)
+ chunk, err := splitter.NextBytes()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if cap(chunk) > len(chunk) {
+ t.Fatal("chunk capacity too large")
+ }
+}
+
+func TestSizeSplitterIsDeterministic(t *testing.T) {
+ if testing.Short() {
+ t.SkipNow()
+ }
+
+ test := func() {
+ bufR := randBuf(t, 10000000) // crank this up to satisfy yourself.
+ bufA := copyBuf(bufR)
+ bufB := copyBuf(bufR)
+
+ chunksA, _ := Chan(DefaultSplitter(bytes.NewReader(bufA)))
+ chunksB, _ := Chan(DefaultSplitter(bytes.NewReader(bufB)))
+
+ for n := 0; ; n++ {
+ a, moreA := <-chunksA
+ b, moreB := <-chunksB
+
+ if !moreA {
+ if moreB {
+ t.Fatal("A ended, B didnt.")
+ }
+ return
+ }
+
+ if !bytes.Equal(a, b) {
+ t.Fatalf("chunk %d not equal", n)
+ }
+ }
+ }
+
+ for run := 0; run < 1; run++ { // crank this up to satisfy yourself.
+ test()
+ }
+}
+
+func TestSizeSplitterFillsChunks(t *testing.T) {
+ if testing.Short() {
+ t.SkipNow()
+ }
+
+ max := 10000000
+ b := randBuf(t, max)
+ r := &clipReader{r: bytes.NewReader(b), size: 4000}
+ chunksize := int64(1024 * 256)
+ c, _ := Chan(NewSizeSplitter(r, chunksize))
+
+ sofar := 0
+ whole := make([]byte, max)
+ for chunk := range c {
+
+ bc := b[sofar : sofar+len(chunk)]
+ if !bytes.Equal(bc, chunk) {
+ t.Fatalf("chunk not correct: (sofar: %d) %d != %d, %v != %v", sofar, len(bc), len(chunk), bc[:100], chunk[:100])
+ }
+
+ copy(whole[sofar:], chunk)
+
+ sofar += len(chunk)
+ if sofar != max && len(chunk) < int(chunksize) {
+ t.Fatal("sizesplitter split at a smaller size")
+ }
+ }
+
+ if !bytes.Equal(b, whole) {
+ t.Fatal("splitter did not split right")
+ }
+}
+
+type clipReader struct {
+ size int
+ r io.Reader
+}
+
+func (s *clipReader) Read(buf []byte) (int, error) {
+
+ // clip the incoming buffer to produce smaller chunks
+ if len(buf) > s.size {
+ buf = buf[:s.size]
+ }
+
+ return s.r.Read(buf)
+}
+
+func BenchmarkDefault(b *testing.B) {
+ benchmarkChunker(b, func(r io.Reader) Splitter {
+ return DefaultSplitter(r)
+ })
+}
diff --git a/cmd/car/README.md b/cmd/car/README.md
new file mode 100644
index 0000000000..ea415022a7
--- /dev/null
+++ b/cmd/car/README.md
@@ -0,0 +1,36 @@
+car - The CLI tool
+==================
+
+[![](https://img.shields.io/badge/made%20by-Protocol%20Labs-blue.svg?style=flat-square)](https://protocol.ai)
+
+> A CLI for interacting with car files
+
+## Usage
+
+```
+USAGE:
+ car [global options] command [command options] [arguments...]
+
+COMMANDS:
+ compile compile a car file from a debug patch
+ create, c Create a car file
+ debug debug a car file
+ detach-index Detach an index to a detached file
+ extract, x Extract the contents of a car when the car encodes UnixFS data
+ filter, f Filter the CIDs in a car
+ get-block, gb Get a block out of a car
+ get-dag, gd Get a dag out of a car
+ index, i write out the car with an index
+ inspect verifies a car and prints a basic report about its contents
+ list, l, ls List the CIDs in a car
+ root Get the root CID of a car
+ verify, v Verify a CAR is wellformed
+ help, h Shows a list of commands or help for one command
+```
+
+## Install
+
+To install the latest version of `car`, run:
+```shell script
+go install github.com/ipfs/boxo/cmd/car@latest
+```
diff --git a/cmd/car/car.go b/cmd/car/car.go
new file mode 100644
index 0000000000..d2356484cc
--- /dev/null
+++ b/cmd/car/car.go
@@ -0,0 +1,218 @@
+package main
+
+import (
+ "log"
+ "os"
+
+ "github.com/multiformats/go-multicodec"
+ "github.com/urfave/cli/v2"
+)
+
+func main() { os.Exit(main1()) }
+
+func main1() int {
+ app := &cli.App{
+ Name: "car",
+ Usage: "Utility for working with car files",
+ Commands: []*cli.Command{
+ {
+ Name: "compile",
+ Usage: "compile a car file from a debug patch",
+ Action: CompileCar,
+ Flags: []cli.Flag{
+ &cli.StringFlag{
+ Name: "output",
+ Aliases: []string{"o", "f"},
+ Usage: "The file to write to",
+ TakesFile: true,
+ },
+ },
+ },
+ {
+ Name: "create",
+ Usage: "Create a car file",
+ Aliases: []string{"c"},
+ Action: CreateCar,
+ Flags: []cli.Flag{
+ &cli.StringFlag{
+ Name: "file",
+ Aliases: []string{"f", "output", "o"},
+ Usage: "The car file to write to",
+ TakesFile: true,
+ },
+ &cli.IntFlag{
+ Name: "version",
+ Value: 2,
+ Usage: "Write output as a v1 or v2 format car",
+ },
+ },
+ },
+ {
+ Name: "debug",
+ Usage: "debug a car file",
+ Action: DebugCar,
+ Flags: []cli.Flag{
+ &cli.StringFlag{
+ Name: "output",
+ Aliases: []string{"o", "f"},
+ Usage: "The file to write to",
+ TakesFile: true,
+ },
+ },
+ },
+ {
+ Name: "detach-index",
+ Usage: "Detach an index to a detached file",
+ Action: DetachCar,
+ Subcommands: []*cli.Command{{
+ Name: "list",
+ Usage: "List a detached index",
+ Action: DetachCarList,
+ }},
+ },
+ {
+ Name: "extract",
+ Aliases: []string{"x"},
+ Usage: "Extract the contents of a car when the car encodes UnixFS data",
+ Action: ExtractCar,
+ ArgsUsage: "[output directory|-]",
+ Flags: []cli.Flag{
+ &cli.StringFlag{
+ Name: "file",
+ Aliases: []string{"f"},
+ Usage: "The car file to extract from, or stdin if omitted",
+ Required: false,
+ TakesFile: true,
+ },
+ &cli.StringFlag{
+ Name: "path",
+ Aliases: []string{"p"},
+ Usage: "The unixfs path to extract",
+ Required: false,
+ },
+ &cli.BoolFlag{
+ Name: "verbose",
+ Aliases: []string{"v"},
+ Usage: "Include verbose information about extracted contents",
+ },
+ },
+ },
+ {
+ Name: "filter",
+ Aliases: []string{"f"},
+ Usage: "Filter the CIDs in a car",
+ Action: FilterCar,
+ Flags: []cli.Flag{
+ &cli.StringFlag{
+ Name: "cid-file",
+ Usage: "A file to read CIDs from",
+ TakesFile: true,
+ },
+ &cli.BoolFlag{
+ Name: "append",
+ Usage: "Append cids to an existing output file",
+ },
+ },
+ },
+ {
+ Name: "get-block",
+ Aliases: []string{"gb"},
+ Usage: "Get a block out of a car",
+ Action: GetCarBlock,
+ },
+ {
+ Name: "get-dag",
+ Aliases: []string{"gd"},
+ Usage: "Get a dag out of a car",
+ Action: GetCarDag,
+ Flags: []cli.Flag{
+ &cli.StringFlag{
+ Name: "selector",
+ Aliases: []string{"s"},
+ Usage: "A selector over the dag",
+ },
+ &cli.BoolFlag{
+ Name: "strict",
+ Usage: "Fail if the selector finds links to blocks not in the original car",
+ },
+ &cli.IntFlag{
+ Name: "version",
+ Value: 2,
+ Usage: "Write output as a v1 or v2 format car",
+ },
+ },
+ },
+ {
+ Name: "index",
+ Aliases: []string{"i"},
+ Usage: "write out the car with an index",
+ Action: IndexCar,
+ Flags: []cli.Flag{
+ &cli.StringFlag{
+ Name: "codec",
+ Aliases: []string{"c"},
+ Usage: "The type of index to write",
+ Value: multicodec.CarMultihashIndexSorted.String(),
+ },
+ &cli.IntFlag{
+ Name: "version",
+ Value: 2,
+ Usage: "Write output as a v1 or v2 format car",
+ },
+ },
+ Subcommands: []*cli.Command{{
+ Name: "create",
+ Usage: "Write out a detached index",
+ Action: CreateIndex,
+ }},
+ },
+ {
+ Name: "inspect",
+ Usage: "verifies a car and prints a basic report about its contents",
+ Action: InspectCar,
+ Flags: []cli.Flag{
+ &cli.BoolFlag{
+ Name: "full",
+ Value: false,
+ Usage: "Check that the block data hash digests match the CIDs",
+ },
+ },
+ },
+ {
+ Name: "list",
+ Aliases: []string{"l", "ls"},
+ Usage: "List the CIDs in a car",
+ Action: ListCar,
+ Flags: []cli.Flag{
+ &cli.BoolFlag{
+ Name: "verbose",
+ Aliases: []string{"v"},
+ Usage: "Include verbose information about contained blocks",
+ },
+ &cli.BoolFlag{
+ Name: "unixfs",
+ Usage: "List unixfs filesystem from the root of the car",
+ },
+ },
+ },
+ {
+ Name: "root",
+ Usage: "Get the root CID of a car",
+ Action: CarRoot,
+ },
+ {
+ Name: "verify",
+ Aliases: []string{"v"},
+ Usage: "Verify a CAR is wellformed",
+ Action: VerifyCar,
+ },
+ },
+ }
+
+ err := app.Run(os.Args)
+ if err != nil {
+ log.Println(err)
+ return 1
+ }
+ return 0
+}
diff --git a/cmd/car/compile.go b/cmd/car/compile.go
new file mode 100644
index 0000000000..6fdee2eecb
--- /dev/null
+++ b/cmd/car/compile.go
@@ -0,0 +1,463 @@
+package main
+
+import (
+ "bufio"
+ "bytes"
+ "context"
+ "fmt"
+ "io"
+ "os"
+ "regexp"
+ "strings"
+ "unicode/utf8"
+
+ carv1 "github.com/ipfs/boxo/ipld/car"
+ "github.com/ipfs/boxo/ipld/car/util"
+ carv2 "github.com/ipfs/boxo/ipld/car/v2"
+ "github.com/ipfs/boxo/ipld/car/v2/blockstore"
+ blocks "github.com/ipfs/go-block-format"
+ "github.com/ipfs/go-cid"
+ "github.com/ipld/go-ipld-prime"
+ "github.com/ipld/go-ipld-prime/codec"
+ "github.com/ipld/go-ipld-prime/codec/dagjson"
+ "github.com/ipld/go-ipld-prime/datamodel"
+ "github.com/ipld/go-ipld-prime/linking"
+ cidlink "github.com/ipld/go-ipld-prime/linking/cid"
+ "github.com/ipld/go-ipld-prime/node/basicnode"
+ "github.com/ipld/go-ipld-prime/storage/memstore"
+ "github.com/polydawn/refmt/json"
+ "github.com/urfave/cli/v2"
+ "golang.org/x/exp/slices"
+)
+
+var (
+ plusLineRegex = regexp.MustCompile(`^\+\+\+ ([\w-]+) ([\S]+ )?([\w]+)$`)
+)
+
+// Compile is a command to translate between a human-debuggable patch-like format and a car file.
+func CompileCar(c *cli.Context) error {
+ var err error
+ inStream := os.Stdin
+ if c.Args().Len() >= 1 {
+ inStream, err = os.Open(c.Args().First())
+ if err != nil {
+ return err
+ }
+ }
+
+ //parse headers.
+ br := bufio.NewReader(inStream)
+ header, _, err := br.ReadLine()
+ if err != nil {
+ return err
+ }
+
+ v2 := strings.HasPrefix(string(header), "car compile --v2 ")
+ rest := strings.TrimPrefix(string(header), "car compile ")
+ if v2 {
+ rest = strings.TrimPrefix(rest, "--v2 ")
+ }
+ carName := strings.TrimSpace(rest)
+
+ roots := make([]cid.Cid, 0)
+ for {
+ peek, err := br.Peek(4)
+ if err == io.EOF {
+ break
+ } else if err != nil {
+ return err
+ }
+ if bytes.Equal(peek, []byte("--- ")) {
+ break
+ }
+ rootLine, _, err := br.ReadLine()
+ if err != nil {
+ return err
+ }
+ if strings.HasPrefix(string(rootLine), "root ") {
+ var rCidS string
+ fmt.Sscanf(string(rootLine), "root %s", &rCidS)
+ rCid, err := cid.Parse(rCidS)
+ if err != nil {
+ return err
+ }
+ roots = append(roots, rCid)
+ }
+ }
+
+ //parse blocks.
+ cidList := make([]cid.Cid, 0)
+ rawBlocks := make(map[cid.Cid][]byte)
+ rawCodecs := make(map[cid.Cid]string)
+
+ for {
+ nextCid, mode, nextBlk, err := parsePatch(br)
+ if err == io.EOF {
+ break
+ } else if err != nil {
+ return err
+ }
+ rawBlocks[nextCid] = nextBlk
+ rawCodecs[nextCid] = mode
+ cidList = append(cidList, nextCid)
+ }
+
+ // Re-create the original IPLD encoded blocks, but allowing for modifications of the
+ // patch data which may generate new CIDs; so we track the DAG relationships and
+ // rewrite CIDs in other referring where they get updated.
+
+ // structure as a tree
+ childMap := make(map[cid.Cid][]cid.Cid)
+ for c := range rawBlocks {
+ if _, ok := childMap[c]; !ok {
+ childMap[c] = make([]cid.Cid, 0)
+ }
+ for d, blk := range rawBlocks {
+ if c.Equals(d) {
+ continue
+ }
+ if strings.Contains(string(blk), c.String()) {
+ if _, ok := childMap[d]; !ok {
+ childMap[d] = make([]cid.Cid, 0)
+ }
+ childMap[d] = append(childMap[d], c)
+ } else if strings.Contains(string(blk), string(c.Bytes())) {
+ if _, ok := childMap[d]; !ok {
+ childMap[d] = make([]cid.Cid, 0)
+ }
+ childMap[d] = append(childMap[d], c)
+ }
+ }
+ }
+
+ // re-parse/re-build CIDs
+ outBlocks := make(map[cid.Cid][]byte)
+ for len(childMap) > 0 {
+ for origCid, kids := range childMap {
+ if len(kids) == 0 {
+ // compile to final cid
+ blk := rawBlocks[origCid]
+ finalCid, finalBlk, err := serializeBlock(c.Context, origCid.Prefix(), rawCodecs[origCid], blk)
+ if err != nil {
+ return err
+ }
+ outBlocks[finalCid] = finalBlk
+ idx := slices.Index(cidList, origCid)
+ cidList[idx] = finalCid
+
+ // update other remaining nodes of the new cid.
+ for otherCid, otherKids := range childMap {
+ for i, otherKid := range otherKids {
+ if otherKid.Equals(origCid) {
+ if !finalCid.Equals(origCid) {
+ // update block
+ rawBlocks[otherCid] = bytes.ReplaceAll(rawBlocks[otherCid], origCid.Bytes(), finalCid.Bytes())
+ rawBlocks[otherCid] = bytes.ReplaceAll(rawBlocks[otherCid], []byte(origCid.String()), []byte(finalCid.String()))
+ }
+ // remove from childMap
+ nok := append(otherKids[0:i], otherKids[i+1:]...)
+ childMap[otherCid] = nok
+ break // to next child map entry.
+ }
+ }
+ }
+
+ delete(childMap, origCid)
+ }
+ }
+ }
+
+ if !v2 {
+ // write output
+ outStream := os.Stdout
+ if c.IsSet("output") {
+ outFileName := c.String("output")
+ if outFileName == "" {
+ outFileName = carName
+ }
+ outFile, err := os.Create(outFileName)
+ if err != nil {
+ return err
+ }
+ defer outFile.Close()
+ outStream = outFile
+ }
+
+ if err := carv1.WriteHeader(&carv1.CarHeader{
+ Roots: roots,
+ Version: 1,
+ }, outStream); err != nil {
+ return err
+ }
+ for c, blk := range outBlocks {
+ if err := util.LdWrite(outStream, c.Bytes(), blk); err != nil {
+ return err
+ }
+ }
+ } else {
+ outFileName := c.String("output")
+ if outFileName == "" {
+ outFileName = carName
+ }
+
+ if outFileName == "-" && !c.IsSet("output") {
+ return fmt.Errorf("cannot stream carv2's to stdout")
+ }
+ bs, err := blockstore.OpenReadWrite(outFileName, roots)
+ if err != nil {
+ return err
+ }
+ for _, bc := range cidList {
+ blk := outBlocks[bc]
+ ob, _ := blocks.NewBlockWithCid(blk, bc)
+ bs.Put(c.Context, ob)
+ }
+ return bs.Finalize()
+ }
+
+ return nil
+}
+
+func serializeBlock(ctx context.Context, codec cid.Prefix, encoding string, raw []byte) (cid.Cid, []byte, error) {
+ ls := cidlink.DefaultLinkSystem()
+ store := memstore.Store{Bag: map[string][]byte{}}
+ ls.SetReadStorage(&store)
+ ls.SetWriteStorage(&store)
+ b := basicnode.Prototype.Any.NewBuilder()
+ if encoding == "dag-json" {
+ if err := dagjson.Decode(b, bytes.NewBuffer(raw)); err != nil {
+ return cid.Undef, nil, err
+ }
+ } else if encoding == "raw" {
+ if err := b.AssignBytes(raw); err != nil {
+ return cid.Undef, nil, err
+ }
+ } else {
+ return cid.Undef, nil, fmt.Errorf("unknown encoding: %s", encoding)
+ }
+ lnk, err := ls.Store(linking.LinkContext{Ctx: ctx}, cidlink.LinkPrototype{Prefix: codec}, b.Build())
+ if err != nil {
+ return cid.Undef, nil, err
+ }
+ outCid := lnk.(cidlink.Link).Cid
+ outBytes, outErr := store.Get(ctx, outCid.KeyString())
+ return outCid, outBytes, outErr
+}
+
+// DebugCar is a command to translate between a car file, and a human-debuggable patch-like format.
+func DebugCar(c *cli.Context) error {
+ var err error
+ inStream := os.Stdin
+ inFile := "-"
+ if c.Args().Len() >= 1 {
+ inFile = c.Args().First()
+ inStream, err = os.Open(inFile)
+ if err != nil {
+ return err
+ }
+ }
+
+ rd, err := carv2.NewBlockReader(inStream)
+ if err != nil {
+ return err
+ }
+
+ // patch the header.
+ outStream := os.Stdout
+ if c.IsSet("output") {
+ outFileName := c.String("output")
+ outFile, err := os.Create(outFileName)
+ if err != nil {
+ return err
+ }
+ defer outFile.Close()
+ outStream = outFile
+ }
+
+ outStream.WriteString("car compile ")
+ if rd.Version == 2 {
+ outStream.WriteString("--v2 ")
+ }
+
+ outStream.WriteString(inFile + "\n")
+ for _, rt := range rd.Roots {
+ fmt.Fprintf(outStream, "root %s\n", rt.String())
+ }
+
+ // patch each block.
+ nxt, err := rd.Next()
+ if err != nil {
+ return err
+ }
+ for nxt != nil {
+ chunk, err := patch(c.Context, nxt.Cid(), nxt.RawData())
+ if err != nil {
+ return err
+ }
+ outStream.Write(chunk)
+
+ nxt, err = rd.Next()
+ if err == io.EOF {
+ return nil
+ }
+ }
+
+ return nil
+}
+
+func patch(ctx context.Context, c cid.Cid, blk []byte) ([]byte, error) {
+ ls := cidlink.DefaultLinkSystem()
+ store := memstore.Store{Bag: map[string][]byte{}}
+ ls.SetReadStorage(&store)
+ ls.SetWriteStorage(&store)
+ store.Put(ctx, c.KeyString(), blk)
+ node, err := ls.Load(linking.LinkContext{Ctx: ctx}, cidlink.Link{Cid: c}, basicnode.Prototype.Any)
+ if err != nil {
+ return nil, fmt.Errorf("could not load block: %q", err)
+ }
+
+ outMode := "dag-json"
+ if node.Kind() == datamodel.Kind_Bytes && isPrintable(node) {
+ outMode = "raw"
+ }
+ finalBuf := bytes.NewBuffer(nil)
+
+ if outMode == "dag-json" {
+ opts := dagjson.EncodeOptions{
+ EncodeLinks: true,
+ EncodeBytes: true,
+ MapSortMode: codec.MapSortMode_Lexical,
+ }
+ if err := dagjson.Marshal(node, json.NewEncoder(finalBuf, json.EncodeOptions{Line: []byte{'\n'}, Indent: []byte{'\t'}}), opts); err != nil {
+ return nil, err
+ }
+ } else if outMode == "raw" {
+ nb, err := node.AsBytes()
+ if err != nil {
+ return nil, err
+ }
+ finalBuf.Write(nb)
+ }
+
+ // figure out number of lines.
+ lcnt := strings.Count(finalBuf.String(), "\n")
+ crStr := " (no-end-cr)"
+ if finalBuf.Bytes()[len(finalBuf.Bytes())-1] == '\n' {
+ crStr = ""
+ }
+
+ outBuf := bytes.NewBuffer(nil)
+ outBuf.WriteString("--- " + c.String() + "\n")
+ outBuf.WriteString("+++ " + outMode + crStr + " " + c.String() + "\n")
+ outBuf.WriteString(fmt.Sprintf("@@ -%d,%d +%d,%d @@\n", 0, lcnt, 0, lcnt))
+ outBuf.Write(finalBuf.Bytes())
+ outBuf.WriteString("\n")
+ return outBuf.Bytes(), nil
+}
+
+func isPrintable(n ipld.Node) bool {
+ b, err := n.AsBytes()
+ if err != nil {
+ return false
+ }
+ if !utf8.Valid(b) {
+ return false
+ }
+ if bytes.ContainsAny(b, string([]byte{0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x10, 0x11, 0x12, 0x13, 0x14, 0x16, 0x17, 0x18, 0x19, 0x1c, 0x1d, 0x1e, 0x1f})) {
+ return false
+ }
+ // check if would confuse the 'end of patch' checker.
+ if bytes.Contains(b, []byte("\n--- ")) {
+ return false
+ }
+ return true
+}
+
+func parsePatch(br *bufio.Reader) (cid.Cid, string, []byte, error) {
+ // read initial line to parse CID.
+ l1, isPrefix, err := br.ReadLine()
+ if err != nil {
+ return cid.Undef, "", nil, err
+ }
+ if isPrefix {
+ return cid.Undef, "", nil, fmt.Errorf("unexpected long header l1")
+ }
+ var cs string
+ if _, err := fmt.Sscanf(string(l1), "--- %s", &cs); err != nil {
+ return cid.Undef, "", nil, fmt.Errorf("could not parse patch cid line (%s): %q", l1, err)
+ }
+ l2, isPrefix, err := br.ReadLine()
+ if err != nil {
+ return cid.Undef, "", nil, err
+ }
+ if isPrefix {
+ return cid.Undef, "", nil, fmt.Errorf("unexpected long header l2")
+ }
+ var mode string
+ var noEndReturn bool
+ matches := plusLineRegex.FindSubmatch(l2)
+ if len(matches) >= 2 {
+ mode = string(matches[1])
+ }
+ if len(matches) < 2 || string(matches[len(matches)-1]) != cs {
+ return cid.Undef, "", nil, fmt.Errorf("mismatched cid lines: %v", string(l2))
+ }
+ if len(matches[2]) > 0 {
+ noEndReturn = (string(matches[2]) == "(no-end-cr) ")
+ }
+ c, err := cid.Parse(cs)
+ if err != nil {
+ return cid.Undef, "", nil, err
+ }
+
+ // skip over @@ line.
+ l3, isPrefix, err := br.ReadLine()
+ if err != nil {
+ return cid.Undef, "", nil, err
+ }
+ if isPrefix {
+ return cid.Undef, "", nil, fmt.Errorf("unexpected long header l3")
+ }
+ if !strings.HasPrefix(string(l3), "@@") {
+ return cid.Undef, "", nil, fmt.Errorf("unexpected missing chunk prefix")
+ }
+
+ // keep going until next chunk or end.
+ outBuf := bytes.NewBuffer(nil)
+ for {
+ peek, err := br.Peek(4)
+ if err != nil && err != io.EOF {
+ return cid.Undef, "", nil, err
+ }
+ if bytes.Equal(peek, []byte("--- ")) {
+ break
+ }
+ // accumulate to buffer.
+ l, err := br.ReadBytes('\n')
+ if l != nil {
+ outBuf.Write(l)
+ }
+ if err == io.EOF {
+ break
+ } else if err != nil {
+ return cid.Undef, "", nil, err
+ }
+ }
+
+ ob := outBuf.Bytes()
+
+ // remove the final line return
+ if len(ob) > 2 && bytes.Equal(ob[len(ob)-2:], []byte("\r\n")) {
+ ob = ob[:len(ob)-2]
+ } else if len(ob) > 1 && bytes.Equal(ob[len(ob)-1:], []byte("\n")) {
+ ob = ob[:len(ob)-1]
+ }
+
+ if noEndReturn && len(ob) > 2 && bytes.Equal(ob[len(ob)-2:], []byte("\r\n")) {
+ ob = ob[:len(ob)-2]
+ } else if noEndReturn && len(ob) > 1 && bytes.Equal(ob[len(ob)-1:], []byte("\n")) {
+ ob = ob[:len(ob)-1]
+ }
+
+ return c, mode, ob, nil
+}
diff --git a/cmd/car/create.go b/cmd/car/create.go
new file mode 100644
index 0000000000..8dfa000252
--- /dev/null
+++ b/cmd/car/create.go
@@ -0,0 +1,130 @@
+package main
+
+import (
+ "bytes"
+ "context"
+ "fmt"
+ "io"
+ "path"
+
+ "github.com/ipfs/boxo/ipld/car/v2"
+ "github.com/ipfs/boxo/ipld/car/v2/blockstore"
+ blocks "github.com/ipfs/go-block-format"
+ "github.com/ipfs/go-cid"
+ "github.com/ipfs/go-unixfsnode/data/builder"
+ dagpb "github.com/ipld/go-codec-dagpb"
+ "github.com/ipld/go-ipld-prime"
+ cidlink "github.com/ipld/go-ipld-prime/linking/cid"
+ "github.com/multiformats/go-multicodec"
+ "github.com/multiformats/go-multihash"
+ "github.com/urfave/cli/v2"
+)
+
+// CreateCar creates a car
+func CreateCar(c *cli.Context) error {
+ var err error
+ if c.Args().Len() == 0 {
+ return fmt.Errorf("a source location to build the car from must be specified")
+ }
+
+ if !c.IsSet("file") {
+ return fmt.Errorf("a file destination must be specified")
+ }
+
+ // make a cid with the right length that we eventually will patch with the root.
+ hasher, err := multihash.GetHasher(multihash.SHA2_256)
+ if err != nil {
+ return err
+ }
+ digest := hasher.Sum([]byte{})
+ hash, err := multihash.Encode(digest, multihash.SHA2_256)
+ if err != nil {
+ return err
+ }
+ proxyRoot := cid.NewCidV1(uint64(multicodec.DagPb), hash)
+
+ options := []car.Option{}
+ switch c.Int("version") {
+ case 1:
+ options = []car.Option{blockstore.WriteAsCarV1(true)}
+ case 2:
+ // already the default
+ default:
+ return fmt.Errorf("invalid CAR version %d", c.Int("version"))
+ }
+
+ cdest, err := blockstore.OpenReadWrite(c.String("file"), []cid.Cid{proxyRoot}, options...)
+ if err != nil {
+ return err
+ }
+
+ // Write the unixfs blocks into the store.
+ root, err := writeFiles(c.Context, cdest, c.Args().Slice()...)
+ if err != nil {
+ return err
+ }
+
+ if err := cdest.Finalize(); err != nil {
+ return err
+ }
+ // re-open/finalize with the final root.
+ return car.ReplaceRootsInFile(c.String("file"), []cid.Cid{root})
+}
+
+func writeFiles(ctx context.Context, bs *blockstore.ReadWrite, paths ...string) (cid.Cid, error) {
+ ls := cidlink.DefaultLinkSystem()
+ ls.TrustedStorage = true
+ ls.StorageReadOpener = func(_ ipld.LinkContext, l ipld.Link) (io.Reader, error) {
+ cl, ok := l.(cidlink.Link)
+ if !ok {
+ return nil, fmt.Errorf("not a cidlink")
+ }
+ blk, err := bs.Get(ctx, cl.Cid)
+ if err != nil {
+ return nil, err
+ }
+ return bytes.NewBuffer(blk.RawData()), nil
+ }
+ ls.StorageWriteOpener = func(_ ipld.LinkContext) (io.Writer, ipld.BlockWriteCommitter, error) {
+ buf := bytes.NewBuffer(nil)
+ return buf, func(l ipld.Link) error {
+ cl, ok := l.(cidlink.Link)
+ if !ok {
+ return fmt.Errorf("not a cidlink")
+ }
+ blk, err := blocks.NewBlockWithCid(buf.Bytes(), cl.Cid)
+ if err != nil {
+ return err
+ }
+ bs.Put(ctx, blk)
+ return nil
+ }, nil
+ }
+
+ topLevel := make([]dagpb.PBLink, 0, len(paths))
+ for _, p := range paths {
+ l, size, err := builder.BuildUnixFSRecursive(p, &ls)
+ if err != nil {
+ return cid.Undef, err
+ }
+ name := path.Base(p)
+ entry, err := builder.BuildUnixFSDirectoryEntry(name, int64(size), l)
+ if err != nil {
+ return cid.Undef, err
+ }
+ topLevel = append(topLevel, entry)
+ }
+
+ // make a directory for the file(s).
+
+ root, _, err := builder.BuildUnixFSDirectory(topLevel, &ls)
+ if err != nil {
+ return cid.Undef, nil
+ }
+ rcl, ok := root.(cidlink.Link)
+ if !ok {
+ return cid.Undef, fmt.Errorf("could not interpret %s", root)
+ }
+
+ return rcl.Cid, nil
+}
diff --git a/cmd/car/detach.go b/cmd/car/detach.go
new file mode 100644
index 0000000000..d68593d160
--- /dev/null
+++ b/cmd/car/detach.go
@@ -0,0 +1,73 @@
+package main
+
+import (
+ "fmt"
+ "io"
+ "os"
+
+ carv2 "github.com/ipfs/boxo/ipld/car/v2"
+ "github.com/ipfs/boxo/ipld/car/v2/index"
+ "github.com/multiformats/go-multihash"
+ "github.com/urfave/cli/v2"
+)
+
+// DetachCar is a command to output the index part of a car.
+func DetachCar(c *cli.Context) error {
+ r, err := carv2.OpenReader(c.Args().Get(0))
+ if err != nil {
+ return err
+ }
+ defer r.Close()
+
+ if !r.Header.HasIndex() {
+ return fmt.Errorf("no index present")
+ }
+
+ outStream := os.Stdout
+ if c.Args().Len() >= 2 {
+ outStream, err = os.Create(c.Args().Get(1))
+ if err != nil {
+ return err
+ }
+ }
+ defer outStream.Close()
+
+ ir, err := r.IndexReader()
+ if err != nil {
+ return err
+ }
+ _, err = io.Copy(outStream, ir)
+ return err
+}
+
+// DetachCarList prints a list of what's found in a detached index.
+func DetachCarList(c *cli.Context) error {
+ var err error
+
+ inStream := os.Stdin
+ if c.Args().Len() >= 1 {
+ inStream, err = os.Open(c.Args().First())
+ if err != nil {
+ return err
+ }
+ defer inStream.Close()
+ }
+
+ idx, err := index.ReadFrom(inStream)
+ if err != nil {
+ return err
+ }
+
+ if iidx, ok := idx.(index.IterableIndex); ok {
+ err := iidx.ForEach(func(mh multihash.Multihash, offset uint64) error {
+ fmt.Printf("%s %d\n", mh, offset)
+ return nil
+ })
+ if err != nil {
+ return err
+ }
+ return nil
+ }
+
+ return fmt.Errorf("index of codec %s is not iterable", idx.Codec())
+}
diff --git a/cmd/car/extract.go b/cmd/car/extract.go
new file mode 100644
index 0000000000..9a80317501
--- /dev/null
+++ b/cmd/car/extract.go
@@ -0,0 +1,443 @@
+package main
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "path"
+ "path/filepath"
+ "runtime"
+ "strings"
+ "sync"
+
+ "github.com/ipfs/boxo/ipld/car/v2"
+ carstorage "github.com/ipfs/boxo/ipld/car/v2/storage"
+ "github.com/ipfs/go-cid"
+ "github.com/ipfs/go-unixfsnode"
+ "github.com/ipfs/go-unixfsnode/data"
+ "github.com/ipfs/go-unixfsnode/file"
+ dagpb "github.com/ipld/go-codec-dagpb"
+ "github.com/ipld/go-ipld-prime"
+ cidlink "github.com/ipld/go-ipld-prime/linking/cid"
+ basicnode "github.com/ipld/go-ipld-prime/node/basic"
+ "github.com/ipld/go-ipld-prime/storage"
+ "github.com/urfave/cli/v2"
+)
+
+var ErrNotDir = fmt.Errorf("not a directory")
+
+// ExtractCar pulls files and directories out of a car
+func ExtractCar(c *cli.Context) error {
+ outputDir, err := os.Getwd()
+ if err != nil {
+ return err
+ }
+ if c.Args().Present() {
+ outputDir = c.Args().First()
+ }
+
+ var store storage.ReadableStorage
+ var roots []cid.Cid
+
+ if c.String("file") == "" {
+ if f, ok := c.App.Reader.(*os.File); ok {
+ stat, err := f.Stat()
+ if err != nil {
+ return err
+ }
+ if (stat.Mode() & os.ModeCharDevice) != 0 {
+ // Is a terminal. In reality the user is unlikely to actually paste
+ // CAR data into this terminal, but this message may serve to make
+ // them aware that they can/should pipe data into this command.
+ stopKeys := "Ctrl+D"
+ if runtime.GOOS == "windows" {
+ stopKeys = "Ctrl+Z, Enter"
+ }
+ fmt.Fprintf(c.App.ErrWriter, "Reading from stdin; use %s to end\n", stopKeys)
+ }
+ }
+ var err error
+ store, roots, err = NewStdinReadStorage(c.App.Reader)
+ if err != nil {
+ return err
+ }
+ } else {
+ carFile, err := os.Open(c.String("file"))
+ if err != nil {
+ return err
+ }
+ store, err = carstorage.OpenReadable(carFile)
+ if err != nil {
+ return err
+ }
+ roots = store.(carstorage.ReadableCar).Roots()
+ }
+
+ ls := cidlink.DefaultLinkSystem()
+ ls.TrustedStorage = true
+ ls.SetReadStorage(store)
+
+ path, err := pathSegments(c.String("path"))
+ if err != nil {
+ return err
+ }
+
+ var extractedFiles int
+ for _, root := range roots {
+ count, err := extractRoot(c, &ls, root, outputDir, path)
+ if err != nil {
+ return err
+ }
+ extractedFiles += count
+ }
+ if extractedFiles == 0 {
+ return cli.Exit("no files extracted", 1)
+ } else {
+ fmt.Fprintf(c.App.ErrWriter, "extracted %d file(s)\n", extractedFiles)
+ }
+
+ return nil
+}
+
+func extractRoot(c *cli.Context, ls *ipld.LinkSystem, root cid.Cid, outputDir string, path []string) (int, error) {
+ if root.Prefix().Codec == cid.Raw {
+ if c.IsSet("verbose") {
+ fmt.Fprintf(c.App.ErrWriter, "skipping raw root %s\n", root)
+ }
+ return 0, nil
+ }
+
+ pbn, err := ls.Load(ipld.LinkContext{}, cidlink.Link{Cid: root}, dagpb.Type.PBNode)
+ if err != nil {
+ return 0, err
+ }
+ pbnode := pbn.(dagpb.PBNode)
+
+ ufn, err := unixfsnode.Reify(ipld.LinkContext{}, pbnode, ls)
+ if err != nil {
+ return 0, err
+ }
+
+ var outputResolvedDir string
+ if outputDir != "-" {
+ outputResolvedDir, err = filepath.EvalSymlinks(outputDir)
+ if err != nil {
+ return 0, err
+ }
+ if _, err := os.Stat(outputResolvedDir); os.IsNotExist(err) {
+ if err := os.Mkdir(outputResolvedDir, 0755); err != nil {
+ return 0, err
+ }
+ }
+ }
+
+ count, err := extractDir(c, ls, ufn, outputResolvedDir, "/", path)
+ if err != nil {
+ if !errors.Is(err, ErrNotDir) {
+ return 0, fmt.Errorf("%s: %w", root, err)
+ }
+
+ // if it's not a directory, it's a file.
+ ufsData, err := pbnode.LookupByString("Data")
+ if err != nil {
+ return 0, err
+ }
+ ufsBytes, err := ufsData.AsBytes()
+ if err != nil {
+ return 0, err
+ }
+ ufsNode, err := data.DecodeUnixFSData(ufsBytes)
+ if err != nil {
+ return 0, err
+ }
+ var outputName string
+ if outputDir != "-" {
+ outputName = filepath.Join(outputResolvedDir, "unknown")
+ }
+ if ufsNode.DataType.Int() == data.Data_File || ufsNode.DataType.Int() == data.Data_Raw {
+ if err := extractFile(c, ls, pbnode, outputName); err != nil {
+ return 0, err
+ }
+ }
+ return 1, nil
+ }
+
+ return count, nil
+}
+
+func resolvePath(root, pth string) (string, error) {
+ rp, err := filepath.Rel("/", pth)
+ if err != nil {
+ return "", fmt.Errorf("couldn't check relative-ness of %s: %w", pth, err)
+ }
+ joined := path.Join(root, rp)
+
+ basename := path.Dir(joined)
+ final, err := filepath.EvalSymlinks(basename)
+ if err != nil {
+ return "", fmt.Errorf("couldn't eval symlinks in %s: %w", basename, err)
+ }
+ if final != path.Clean(basename) {
+ return "", fmt.Errorf("path attempts to redirect through symlinks")
+ }
+ return joined, nil
+}
+
+func extractDir(c *cli.Context, ls *ipld.LinkSystem, n ipld.Node, outputRoot, outputPath string, matchPath []string) (int, error) {
+ if outputRoot != "" {
+ dirPath, err := resolvePath(outputRoot, outputPath)
+ if err != nil {
+ return 0, err
+ }
+ // make the directory.
+ if err := os.MkdirAll(dirPath, 0755); err != nil {
+ return 0, err
+ }
+ }
+
+ if n.Kind() != ipld.Kind_Map {
+ return 0, ErrNotDir
+ }
+
+ subPath := matchPath
+ if len(matchPath) > 0 {
+ subPath = matchPath[1:]
+ }
+
+ extractElement := func(name string, n ipld.Node) (int, error) {
+ var nextRes string
+ if outputRoot != "" {
+ var err error
+ nextRes, err = resolvePath(outputRoot, path.Join(outputPath, name))
+ if err != nil {
+ return 0, err
+ }
+ if c.IsSet("verbose") {
+ fmt.Fprintf(c.App.Writer, "%s\n", nextRes)
+ }
+ }
+
+ if n.Kind() != ipld.Kind_Link {
+ return 0, fmt.Errorf("unexpected map value for %s at %s", name, outputPath)
+ }
+ // a directory may be represented as a map of name: if unixADL is applied
+ vl, err := n.AsLink()
+ if err != nil {
+ return 0, err
+ }
+ dest, err := ls.Load(ipld.LinkContext{}, vl, basicnode.Prototype.Any)
+ if err != nil {
+ if nf, ok := err.(interface{ NotFound() bool }); ok && nf.NotFound() {
+ fmt.Fprintf(c.App.ErrWriter, "data for entry not found: %s (skipping...)\n", path.Join(outputPath, name))
+ return 0, nil
+ }
+ return 0, err
+ }
+ // degenerate files are handled here.
+ if dest.Kind() == ipld.Kind_Bytes {
+ if err := extractFile(c, ls, dest, nextRes); err != nil {
+ return 0, err
+ }
+ return 1, nil
+ }
+
+ // dir / pbnode
+ pbb := dagpb.Type.PBNode.NewBuilder()
+ if err := pbb.AssignNode(dest); err != nil {
+ return 0, err
+ }
+ pbnode := pbb.Build().(dagpb.PBNode)
+
+ // interpret dagpb 'data' as unixfs data and look at type.
+ ufsData, err := pbnode.LookupByString("Data")
+ if err != nil {
+ return 0, err
+ }
+ ufsBytes, err := ufsData.AsBytes()
+ if err != nil {
+ return 0, err
+ }
+ ufsNode, err := data.DecodeUnixFSData(ufsBytes)
+ if err != nil {
+ return 0, err
+ }
+
+ switch ufsNode.DataType.Int() {
+ case data.Data_Directory, data.Data_HAMTShard:
+ ufn, err := unixfsnode.Reify(ipld.LinkContext{}, pbnode, ls)
+ if err != nil {
+ return 0, err
+ }
+ return extractDir(c, ls, ufn, outputRoot, path.Join(outputPath, name), subPath)
+ case data.Data_File, data.Data_Raw:
+ if err := extractFile(c, ls, pbnode, nextRes); err != nil {
+ return 0, err
+ }
+ return 1, nil
+ case data.Data_Symlink:
+ if nextRes == "" {
+ return 0, fmt.Errorf("cannot extract a symlink to stdout")
+ }
+ data := ufsNode.Data.Must().Bytes()
+ if err := os.Symlink(string(data), nextRes); err != nil {
+ return 0, err
+ }
+ return 1, nil
+ default:
+ return 0, fmt.Errorf("unknown unixfs type: %d", ufsNode.DataType.Int())
+ }
+ }
+
+ // specific path segment
+ if len(matchPath) > 0 {
+ val, err := n.LookupByString(matchPath[0])
+ if err != nil {
+ return 0, err
+ }
+ return extractElement(matchPath[0], val)
+ }
+
+ if outputPath == "-" && len(matchPath) == 0 {
+ return 0, fmt.Errorf("cannot extract a directory to stdout, use a path to extract a specific file")
+ }
+
+ // everything
+ var count int
+ var shardSkip int
+ mi := n.MapIterator()
+ for !mi.Done() {
+ key, val, err := mi.Next()
+ if err != nil {
+ if nf, ok := err.(interface{ NotFound() bool }); ok && nf.NotFound() {
+ shardSkip++
+ continue
+ }
+ return 0, err
+ }
+ ks, err := key.AsString()
+ if err != nil {
+ return 0, err
+ }
+ ecount, err := extractElement(ks, val)
+ if err != nil {
+ return 0, err
+ }
+ count += ecount
+ }
+ if shardSkip > 0 {
+ fmt.Fprintf(c.App.ErrWriter, "data for entry not found for %d unknown sharded entries (skipped...)\n", shardSkip)
+ }
+ return count, nil
+}
+
+func extractFile(c *cli.Context, ls *ipld.LinkSystem, n ipld.Node, outputName string) error {
+ node, err := file.NewUnixFSFile(c.Context, n, ls)
+ if err != nil {
+ return err
+ }
+ nlr, err := node.AsLargeBytes()
+ if err != nil {
+ return err
+ }
+ var f *os.File
+ if outputName == "" {
+ f = os.Stdout
+ } else {
+ f, err = os.Create(outputName)
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+ }
+ _, err = io.Copy(f, nlr)
+ return err
+}
+
+// TODO: dedupe this with lassie, probably into go-unixfsnode
+func pathSegments(path string) ([]string, error) {
+ segments := strings.Split(path, "/")
+ filtered := make([]string, 0, len(segments))
+ for i := 0; i < len(segments); i++ {
+ if segments[i] == "" {
+ // Allow one leading and one trailing '/' at most
+ if i == 0 || i == len(segments)-1 {
+ continue
+ }
+ return nil, fmt.Errorf("invalid empty path segment at position %d", i)
+ }
+ if segments[i] == "." || segments[i] == ".." {
+ return nil, fmt.Errorf("'%s' is unsupported in paths", segments[i])
+ }
+ filtered = append(filtered, segments[i])
+ }
+ return filtered, nil
+}
+
+var _ storage.ReadableStorage = (*stdinReadStorage)(nil)
+
+type stdinReadStorage struct {
+ blocks map[string][]byte
+ done bool
+ lk *sync.RWMutex
+ cond *sync.Cond
+}
+
+func NewStdinReadStorage(reader io.Reader) (*stdinReadStorage, []cid.Cid, error) {
+ var lk sync.RWMutex
+ srs := &stdinReadStorage{
+ blocks: make(map[string][]byte),
+ lk: &lk,
+ cond: sync.NewCond(&lk),
+ }
+ rdr, err := car.NewBlockReader(reader)
+ if err != nil {
+ return nil, nil, err
+ }
+ go func() {
+ for {
+ blk, err := rdr.Next()
+ if err == io.EOF {
+ srs.lk.Lock()
+ srs.done = true
+ srs.lk.Unlock()
+ return
+ }
+ if err != nil {
+ panic(err)
+ }
+ srs.lk.Lock()
+ srs.blocks[string(blk.Cid().Hash())] = blk.RawData()
+ srs.cond.Broadcast()
+ srs.lk.Unlock()
+ }
+ }()
+ return srs, rdr.Roots, nil
+}
+
+func (srs *stdinReadStorage) Has(ctx context.Context, key string) (bool, error) {
+ _, err := srs.Get(ctx, key)
+ if err != nil {
+ return false, err
+ }
+ return true, nil
+}
+
+func (srs *stdinReadStorage) Get(ctx context.Context, key string) ([]byte, error) {
+ c, err := cid.Cast([]byte(key))
+ if err != nil {
+ return nil, err
+ }
+ srs.lk.Lock()
+ defer srs.lk.Unlock()
+ for {
+ if data, ok := srs.blocks[string(c.Hash())]; ok {
+ return data, nil
+ }
+ if srs.done {
+ return nil, carstorage.ErrNotFound{Cid: c}
+ }
+ srs.cond.Wait()
+ }
+}
diff --git a/cmd/car/filter.go b/cmd/car/filter.go
new file mode 100644
index 0000000000..6d74f9fe40
--- /dev/null
+++ b/cmd/car/filter.go
@@ -0,0 +1,128 @@
+package main
+
+import (
+ "bufio"
+ "fmt"
+ "io"
+ "os"
+ "strings"
+
+ carv2 "github.com/ipfs/boxo/ipld/car/v2"
+ "github.com/ipfs/boxo/ipld/car/v2/blockstore"
+ "github.com/ipfs/go-cid"
+ "github.com/urfave/cli/v2"
+)
+
+// FilterCar is a command to select a subset of a car by CID.
+func FilterCar(c *cli.Context) error {
+ if c.Args().Len() < 2 {
+ return fmt.Errorf("an output filename must be provided")
+ }
+
+ fd, err := os.Open(c.Args().First())
+ if err != nil {
+ return err
+ }
+ defer fd.Close()
+ rd, err := carv2.NewBlockReader(fd)
+ if err != nil {
+ return err
+ }
+
+ // Get the set of CIDs from stdin.
+ inStream := os.Stdin
+ if c.IsSet("cidFile") {
+ inStream, err = os.Open(c.String("cidFile"))
+ if err != nil {
+ return err
+ }
+ defer inStream.Close()
+ }
+ cidMap, err := parseCIDS(inStream)
+ if err != nil {
+ return err
+ }
+ fmt.Printf("filtering to %d cids\n", len(cidMap))
+
+ outRoots := make([]cid.Cid, 0)
+ for _, r := range rd.Roots {
+ if _, ok := cidMap[r]; ok {
+ outRoots = append(outRoots, r)
+ }
+ }
+
+ outPath := c.Args().Get(1)
+ if !c.Bool("append") {
+ if _, err := os.Stat(outPath); err == nil || !os.IsNotExist(err) {
+ // output to an existing file.
+ if err := os.Truncate(outPath, 0); err != nil {
+ return err
+ }
+ }
+ } else {
+ // roots will need to be whatever is in the output already.
+ cv2r, err := carv2.OpenReader(outPath)
+ if err != nil {
+ return err
+ }
+ if cv2r.Version != 2 {
+ return fmt.Errorf("can only append to version 2 car files")
+ }
+ outRoots, err = cv2r.Roots()
+ if err != nil {
+ return err
+ }
+ _ = cv2r.Close()
+ }
+
+ if len(outRoots) == 0 {
+ fmt.Fprintf(os.Stderr, "warning: no roots defined after filtering\n")
+ }
+
+ bs, err := blockstore.OpenReadWrite(outPath, outRoots)
+ if err != nil {
+ return err
+ }
+
+ for {
+ blk, err := rd.Next()
+ if err != nil {
+ if err == io.EOF {
+ break
+ }
+ return err
+ }
+ if _, ok := cidMap[blk.Cid()]; ok {
+ if err := bs.Put(c.Context, blk); err != nil {
+ return err
+ }
+ }
+ }
+ return bs.Finalize()
+}
+
+func parseCIDS(r io.Reader) (map[cid.Cid]struct{}, error) {
+ cids := make(map[cid.Cid]struct{})
+ br := bufio.NewReader(r)
+ for {
+ line, _, err := br.ReadLine()
+ if err != nil {
+ if err == io.EOF {
+ return cids, nil
+ }
+ return nil, err
+ }
+ trimLine := strings.TrimSpace(string(line))
+ if len(trimLine) == 0 {
+ continue
+ }
+ c, err := cid.Parse(trimLine)
+ if err != nil {
+ return nil, err
+ }
+ if _, ok := cids[c]; ok {
+ fmt.Fprintf(os.Stderr, "duplicate cid: %s\n", c)
+ }
+ cids[c] = struct{}{}
+ }
+}
diff --git a/cmd/car/get.go b/cmd/car/get.go
new file mode 100644
index 0000000000..0d249e6a5d
--- /dev/null
+++ b/cmd/car/get.go
@@ -0,0 +1,215 @@
+package main
+
+import (
+ "bytes"
+ "context"
+ "fmt"
+
+ "io"
+ "os"
+
+ dagpb "github.com/ipld/go-codec-dagpb"
+ "github.com/ipld/go-ipld-prime"
+ _ "github.com/ipld/go-ipld-prime/codec/cbor"
+ _ "github.com/ipld/go-ipld-prime/codec/dagcbor"
+ _ "github.com/ipld/go-ipld-prime/codec/dagjson"
+ _ "github.com/ipld/go-ipld-prime/codec/json"
+ _ "github.com/ipld/go-ipld-prime/codec/raw"
+
+ "github.com/ipfs/boxo/ipld/car"
+ "github.com/ipfs/boxo/ipld/car/v2/blockstore"
+ "github.com/ipfs/go-cid"
+ ipldfmt "github.com/ipfs/go-ipld-format"
+ "github.com/ipfs/go-unixfsnode"
+ "github.com/ipld/go-ipld-prime/datamodel"
+ "github.com/ipld/go-ipld-prime/linking"
+ cidlink "github.com/ipld/go-ipld-prime/linking/cid"
+ "github.com/ipld/go-ipld-prime/node/basicnode"
+ "github.com/ipld/go-ipld-prime/traversal"
+ "github.com/ipld/go-ipld-prime/traversal/selector"
+ selectorParser "github.com/ipld/go-ipld-prime/traversal/selector/parse"
+ "github.com/urfave/cli/v2"
+)
+
+// GetCarBlock is a command to get a block out of a car
+func GetCarBlock(c *cli.Context) error {
+ if c.Args().Len() < 2 {
+ return fmt.Errorf("usage: car get-block [output file]")
+ }
+
+ bs, err := blockstore.OpenReadOnly(c.Args().Get(0))
+ if err != nil {
+ return err
+ }
+
+ // string to CID
+ blkCid, err := cid.Parse(c.Args().Get(1))
+ if err != nil {
+ return err
+ }
+
+ blk, err := bs.Get(c.Context, blkCid)
+ if err != nil {
+ return err
+ }
+
+ outStream := os.Stdout
+ if c.Args().Len() >= 3 {
+ outStream, err = os.Create(c.Args().Get(2))
+ if err != nil {
+ return err
+ }
+ defer outStream.Close()
+ }
+
+ _, err = outStream.Write(blk.RawData())
+ return err
+}
+
+// GetCarDag is a command to get a dag out of a car
+func GetCarDag(c *cli.Context) error {
+ if c.Args().Len() < 2 {
+ return fmt.Errorf("usage: car get-dag [-s selector] [root cid]