From ba283fb8387d0389d7cdcbc757213f04bba2d83b Mon Sep 17 00:00:00 2001 From: Jimmy Moore Date: Mon, 9 Dec 2024 11:35:19 +0000 Subject: [PATCH 01/37] Start on impl of dev groups Signed-off-by: Jimmy Moore --- pkg/storage/devicegroup/device_group.go | 8 ++++++++ pkg/storage/devicegroup/device_group_test.go | 1 + 2 files changed, 9 insertions(+) create mode 100644 pkg/storage/devicegroup/device_group.go create mode 100644 pkg/storage/devicegroup/device_group_test.go diff --git a/pkg/storage/devicegroup/device_group.go b/pkg/storage/devicegroup/device_group.go new file mode 100644 index 0000000..da350b4 --- /dev/null +++ b/pkg/storage/devicegroup/device_group.go @@ -0,0 +1,8 @@ +package devicegroup + +type DeviceGroup struct { +} + +func New() *DeviceGroup { + return &DeviceGroup{} +} diff --git a/pkg/storage/devicegroup/device_group_test.go b/pkg/storage/devicegroup/device_group_test.go new file mode 100644 index 0000000..db78cf1 --- /dev/null +++ b/pkg/storage/devicegroup/device_group_test.go @@ -0,0 +1 @@ +package devicegroup From e7eef751b1e9f53e6d34e1f0cecb44fec0cce319 Mon Sep 17 00:00:00 2001 From: Jimmy Moore Date: Mon, 9 Dec 2024 12:00:07 +0000 Subject: [PATCH 02/37] Device group creation Signed-off-by: Jimmy Moore --- pkg/storage/devicegroup/device_group.go | 69 ++++++++++++++++++++++++- 1 file changed, 67 insertions(+), 2 deletions(-) diff --git a/pkg/storage/devicegroup/device_group.go b/pkg/storage/devicegroup/device_group.go index da350b4..e8ac57a 100644 --- a/pkg/storage/devicegroup/device_group.go +++ b/pkg/storage/devicegroup/device_group.go @@ -1,8 +1,73 @@ package devicegroup +import ( + "time" + + "github.com/loopholelabs/logging/types" + "github.com/loopholelabs/silo/pkg/storage" + "github.com/loopholelabs/silo/pkg/storage/config" + "github.com/loopholelabs/silo/pkg/storage/device" + "github.com/loopholelabs/silo/pkg/storage/dirtytracker" + "github.com/loopholelabs/silo/pkg/storage/expose" + "github.com/loopholelabs/silo/pkg/storage/metrics" + "github.com/loopholelabs/silo/pkg/storage/modules" + "github.com/loopholelabs/silo/pkg/storage/volatilitymonitor" +) + +const volatilityExpiry = 30 * time.Minute + type DeviceGroup struct { + devices []*DeviceInformation +} + +type DeviceInformation struct { + schema *config.DeviceSchema + prov storage.Provider + exp storage.ExposedStorage + volatility *volatilitymonitor.VolatilityMonitor + dirtyLocal *dirtytracker.Local + dirtyRemote *dirtytracker.Remote } -func New() *DeviceGroup { - return &DeviceGroup{} +func New(ds []*config.DeviceSchema, log types.Logger, met metrics.SiloMetrics) (*DeviceGroup, error) { + dg := &DeviceGroup{ + devices: make([]*DeviceInformation, 0), + } + + for _, s := range ds { + prov, exp, err := device.NewDeviceWithLoggingMetrics(s, log, met) + if err != nil { + // We should try to close / shutdown any successful devices we created here... + // But it's likely to be critical. + for _, d := range dg.devices { + d.exp.Shutdown() + d.prov.Close() + } + return nil, err + } + + mlocal := modules.NewMetrics(prov) + dirtyLocal, dirtyRemote := dirtytracker.NewDirtyTracker(mlocal, int(s.ByteBlockSize())) + vmonitor := volatilitymonitor.NewVolatilityMonitor(dirtyLocal, int(s.ByteBlockSize()), volatilityExpiry) + vmonitor.AddAll() + exp.SetProvider(vmonitor) + + // Add to metrics if given. + if met != nil { + met.AddMetrics(s.Name, mlocal) + met.AddNBD(s.Name, exp.(*expose.ExposedStorageNBDNL)) + met.AddDirtyTracker(s.Name, dirtyRemote) + met.AddVolatilityMonitor(s.Name, vmonitor) + } + + dg.devices = append(dg.devices, &DeviceInformation{ + schema: s, + prov: prov, + exp: exp, + volatility: vmonitor, + dirtyLocal: dirtyLocal, + dirtyRemote: dirtyRemote, + }) + } + return dg, nil } From a3dd4a2503f477ffd9edef200bacc20ab4a5c473 Mon Sep 17 00:00:00 2001 From: Jimmy Moore Date: Mon, 9 Dec 2024 17:28:02 +0000 Subject: [PATCH 03/37] First test for deviceGroup Signed-off-by: Jimmy Moore --- pkg/storage/devicegroup/device_group.go | 49 +++++++++++++++++-- pkg/storage/devicegroup/device_group_test.go | 37 ++++++++++++++ pkg/storage/devicegroup/testdev_test1 | Bin 0 -> 8388608 bytes 3 files changed, 82 insertions(+), 4 deletions(-) create mode 100644 pkg/storage/devicegroup/testdev_test1 diff --git a/pkg/storage/devicegroup/device_group.go b/pkg/storage/devicegroup/device_group.go index e8ac57a..359f1e0 100644 --- a/pkg/storage/devicegroup/device_group.go +++ b/pkg/storage/devicegroup/device_group.go @@ -1,6 +1,7 @@ package devicegroup import ( + "errors" "time" "github.com/loopholelabs/logging/types" @@ -11,12 +12,15 @@ import ( "github.com/loopholelabs/silo/pkg/storage/expose" "github.com/loopholelabs/silo/pkg/storage/metrics" "github.com/loopholelabs/silo/pkg/storage/modules" + "github.com/loopholelabs/silo/pkg/storage/protocol" "github.com/loopholelabs/silo/pkg/storage/volatilitymonitor" ) const volatilityExpiry = 30 * time.Minute type DeviceGroup struct { + log types.Logger + met metrics.SiloMetrics devices []*DeviceInformation } @@ -27,10 +31,13 @@ type DeviceInformation struct { volatility *volatilitymonitor.VolatilityMonitor dirtyLocal *dirtytracker.Local dirtyRemote *dirtytracker.Remote + to *protocol.ToProtocol } func New(ds []*config.DeviceSchema, log types.Logger, met metrics.SiloMetrics) (*DeviceGroup, error) { dg := &DeviceGroup{ + log: log, + met: met, devices: make([]*DeviceInformation, 0), } @@ -39,10 +46,7 @@ func New(ds []*config.DeviceSchema, log types.Logger, met metrics.SiloMetrics) ( if err != nil { // We should try to close / shutdown any successful devices we created here... // But it's likely to be critical. - for _, d := range dg.devices { - d.exp.Shutdown() - d.prov.Close() - } + dg.CloseAll() return nil, err } @@ -71,3 +75,40 @@ func New(ds []*config.DeviceSchema, log types.Logger, met metrics.SiloMetrics) ( } return dg, nil } + +func (dg *DeviceGroup) SendDevInfo(pro protocol.Protocol) error { + var e error + + for index, d := range dg.devices { + d.to = protocol.NewToProtocol(d.prov.Size(), uint32(index), pro) + d.to.SetCompression(true) + + if dg.met != nil { + dg.met.AddToProtocol(d.schema.Name, d.to) + } + + schema := d.schema.Encode() + err := d.to.SendDevInfo(d.schema.Name, uint32(d.schema.ByteBlockSize()), string(schema)) + if err != nil { + e = errors.Join(e, err) + } + } + return e +} + +func (dg *DeviceGroup) CloseAll() error { + var e error + for _, d := range dg.devices { + err := d.prov.Close() + if err != nil { + e = errors.Join(e, err) + } + if d.exp != nil { + err = d.exp.Shutdown() + if err != nil { + e = errors.Join(e, err) + } + } + } + return e +} diff --git a/pkg/storage/devicegroup/device_group_test.go b/pkg/storage/devicegroup/device_group_test.go index db78cf1..4aca6f0 100644 --- a/pkg/storage/devicegroup/device_group_test.go +++ b/pkg/storage/devicegroup/device_group_test.go @@ -1 +1,38 @@ package devicegroup + +import ( + "fmt" + "os/user" + "testing" + + "github.com/loopholelabs/silo/pkg/storage/config" + "github.com/stretchr/testify/assert" +) + +func TestDeviceGroupBasic(t *testing.T) { + currentUser, err := user.Current() + if err != nil { + panic(err) + } + if currentUser.Username != "root" { + fmt.Printf("Cannot run test unless we are root.\n") + return + } + + ds := []*config.DeviceSchema{ + { + Name: "test1", + Size: "8m", + System: "file", + BlockSize: "1m", + Expose: true, + Location: "testdev_test1", + }, + } + + dg, err := New(ds, nil, nil) + assert.NoError(t, err) + + err = dg.CloseAll() + assert.NoError(t, err) +} diff --git a/pkg/storage/devicegroup/testdev_test1 b/pkg/storage/devicegroup/testdev_test1 new file mode 100644 index 0000000000000000000000000000000000000000..ea1a949c28e181bbb22d99ce24f090420c751457 GIT binary patch literal 8388608 zcmeFtfdBvi0Dz$VsTV1P3IhfV7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjxE(qW000005ae$?$c2yr z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pG7 F2LcCx00961 literal 0 HcmV?d00001 From 69f5b0872efbfabaa91c0aa731461d20ee5d7098 Mon Sep 17 00:00:00 2001 From: Jimmy Moore Date: Mon, 9 Dec 2024 18:30:03 +0000 Subject: [PATCH 04/37] Added dg.SendDevInfo and test Signed-off-by: Jimmy Moore --- pkg/storage/devicegroup/device_group_test.go | 66 +++++++++++++++++++ pkg/storage/devicegroup/testdev_test1 | Bin 8388608 -> 0 bytes 2 files changed, 66 insertions(+) delete mode 100644 pkg/storage/devicegroup/testdev_test1 diff --git a/pkg/storage/devicegroup/device_group_test.go b/pkg/storage/devicegroup/device_group_test.go index 4aca6f0..c6f366b 100644 --- a/pkg/storage/devicegroup/device_group_test.go +++ b/pkg/storage/devicegroup/device_group_test.go @@ -1,11 +1,14 @@ package devicegroup import ( + "context" "fmt" "os/user" "testing" "github.com/loopholelabs/silo/pkg/storage/config" + "github.com/loopholelabs/silo/pkg/storage/protocol" + "github.com/loopholelabs/silo/pkg/storage/protocol/packets" "github.com/stretchr/testify/assert" ) @@ -28,11 +31,74 @@ func TestDeviceGroupBasic(t *testing.T) { Expose: true, Location: "testdev_test1", }, + { + Name: "test2", + Size: "16m", + System: "file", + BlockSize: "1m", + Expose: true, + Location: "testdev_test2", + }, + } + + dg, err := New(ds, nil, nil) + assert.NoError(t, err) + + err = dg.CloseAll() + assert.NoError(t, err) +} + +func TestDeviceGroupSendDevInfo(t *testing.T) { + currentUser, err := user.Current() + if err != nil { + panic(err) + } + if currentUser.Username != "root" { + fmt.Printf("Cannot run test unless we are root.\n") + return + } + + ds := []*config.DeviceSchema{ + { + Name: "test1", + Size: "8m", + System: "file", + BlockSize: "1m", + Expose: true, + Location: "testdev_test1", + }, + { + Name: "test2", + Size: "16m", + System: "file", + BlockSize: "1m", + Expose: true, + Location: "testdev_test2", + }, } dg, err := New(ds, nil, nil) assert.NoError(t, err) + pro := protocol.NewMockProtocol(context.TODO()) + + err = dg.SendDevInfo(pro) + assert.NoError(t, err) + err = dg.CloseAll() assert.NoError(t, err) + + // Make sure they all got sent correctly... + for index, r := range ds { + _, data, err := pro.WaitForCommand(uint32(index), packets.CommandDevInfo) + assert.NoError(t, err) + + di, err := packets.DecodeDevInfo(data) + assert.NoError(t, err) + + assert.Equal(t, r.Name, di.Name) + assert.Equal(t, uint64(r.ByteSize()), di.Size) + assert.Equal(t, uint32(r.ByteBlockSize()), di.BlockSize) + assert.Equal(t, string(r.Encode()), di.Schema) + } } diff --git a/pkg/storage/devicegroup/testdev_test1 b/pkg/storage/devicegroup/testdev_test1 deleted file mode 100644 index ea1a949c28e181bbb22d99ce24f090420c751457..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 8388608 zcmeFtfdBvi0Dz$VsTV1P3IhfV7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjxE(qW000005ae$?$c2yr z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pG7 F2LcCx00961 From 2964c7eb7df0857fad0e426a13a7baf963c5d3a7 Mon Sep 17 00:00:00 2001 From: Jimmy Moore Date: Mon, 9 Dec 2024 18:31:29 +0000 Subject: [PATCH 05/37] test cleanup Signed-off-by: Jimmy Moore --- pkg/storage/devicegroup/device_group_test.go | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/pkg/storage/devicegroup/device_group_test.go b/pkg/storage/devicegroup/device_group_test.go index c6f366b..4fe13dd 100644 --- a/pkg/storage/devicegroup/device_group_test.go +++ b/pkg/storage/devicegroup/device_group_test.go @@ -3,6 +3,7 @@ package devicegroup import ( "context" "fmt" + "os" "os/user" "testing" @@ -41,6 +42,11 @@ func TestDeviceGroupBasic(t *testing.T) { }, } + t.Cleanup(func() { + os.Remove("testdev_test1") + os.Remove("testdev_test2") + }) + dg, err := New(ds, nil, nil) assert.NoError(t, err) @@ -77,6 +83,11 @@ func TestDeviceGroupSendDevInfo(t *testing.T) { }, } + t.Cleanup(func() { + os.Remove("testdev_test1") + os.Remove("testdev_test2") + }) + dg, err := New(ds, nil, nil) assert.NoError(t, err) From 6c82e53c8a2ad174655d816d55f7240ee4ecf135 Mon Sep 17 00:00:00 2001 From: Jimmy Moore Date: Mon, 9 Dec 2024 19:17:09 +0000 Subject: [PATCH 06/37] Added first try at dg.MigrateAll Signed-off-by: Jimmy Moore --- pkg/storage/devicegroup/device_group.go | 156 ++++++++++++++++++++++-- 1 file changed, 145 insertions(+), 11 deletions(-) diff --git a/pkg/storage/devicegroup/device_group.go b/pkg/storage/devicegroup/device_group.go index 359f1e0..27beb4f 100644 --- a/pkg/storage/devicegroup/device_group.go +++ b/pkg/storage/devicegroup/device_group.go @@ -6,13 +6,16 @@ import ( "github.com/loopholelabs/logging/types" "github.com/loopholelabs/silo/pkg/storage" + "github.com/loopholelabs/silo/pkg/storage/blocks" "github.com/loopholelabs/silo/pkg/storage/config" "github.com/loopholelabs/silo/pkg/storage/device" "github.com/loopholelabs/silo/pkg/storage/dirtytracker" "github.com/loopholelabs/silo/pkg/storage/expose" "github.com/loopholelabs/silo/pkg/storage/metrics" + "github.com/loopholelabs/silo/pkg/storage/migrator" "github.com/loopholelabs/silo/pkg/storage/modules" "github.com/loopholelabs/silo/pkg/storage/protocol" + "github.com/loopholelabs/silo/pkg/storage/protocol/packets" "github.com/loopholelabs/silo/pkg/storage/volatilitymonitor" ) @@ -25,13 +28,20 @@ type DeviceGroup struct { } type DeviceInformation struct { - schema *config.DeviceSchema - prov storage.Provider - exp storage.ExposedStorage - volatility *volatilitymonitor.VolatilityMonitor - dirtyLocal *dirtytracker.Local - dirtyRemote *dirtytracker.Remote - to *protocol.ToProtocol + size uint64 + blockSize uint64 + numBlocks int + schema *config.DeviceSchema + prov storage.Provider + storage storage.LockableProvider + exp storage.ExposedStorage + volatility *volatilitymonitor.VolatilityMonitor + dirtyLocal *dirtytracker.Local + dirtyRemote *dirtytracker.Remote + to *protocol.ToProtocol + orderer *blocks.PriorityBlockOrder + migrator *migrator.Migrator + migrationError chan error } func New(ds []*config.DeviceSchema, log types.Logger, met metrics.SiloMetrics) (*DeviceGroup, error) { @@ -50,10 +60,17 @@ func New(ds []*config.DeviceSchema, log types.Logger, met metrics.SiloMetrics) ( return nil, err } - mlocal := modules.NewMetrics(prov) - dirtyLocal, dirtyRemote := dirtytracker.NewDirtyTracker(mlocal, int(s.ByteBlockSize())) - vmonitor := volatilitymonitor.NewVolatilityMonitor(dirtyLocal, int(s.ByteBlockSize()), volatilityExpiry) - vmonitor.AddAll() + blockSize := int(s.ByteBlockSize()) + + local := modules.NewLockable(prov) + mlocal := modules.NewMetrics(local) + dirtyLocal, dirtyRemote := dirtytracker.NewDirtyTracker(mlocal, blockSize) + vmonitor := volatilitymonitor.NewVolatilityMonitor(dirtyLocal, blockSize, volatilityExpiry) + + totalBlocks := (int(local.Size()) + int(blockSize) - 1) / int(blockSize) + orderer := blocks.NewPriorityBlockOrder(totalBlocks, vmonitor) + orderer.AddAll() + exp.SetProvider(vmonitor) // Add to metrics if given. @@ -65,12 +82,17 @@ func New(ds []*config.DeviceSchema, log types.Logger, met metrics.SiloMetrics) ( } dg.devices = append(dg.devices, &DeviceInformation{ + size: local.Size(), + blockSize: uint64(blockSize), + numBlocks: totalBlocks, schema: s, prov: prov, + storage: local, exp: exp, volatility: vmonitor, dirtyLocal: dirtyLocal, dirtyRemote: dirtyRemote, + orderer: orderer, }) } return dg, nil @@ -83,6 +105,37 @@ func (dg *DeviceGroup) SendDevInfo(pro protocol.Protocol) error { d.to = protocol.NewToProtocol(d.prov.Size(), uint32(index), pro) d.to.SetCompression(true) + // Setup d.to + // TODO: We *may* want to check errors here, but it will surface elsewhere. + go d.to.HandleNeedAt(func(offset int64, length int32) { + // Prioritize blocks + endOffset := uint64(offset + int64(length)) + if endOffset > d.size { + endOffset = d.size + } + + startBlock := int(offset / int64(d.blockSize)) + endBlock := int((endOffset-1)/d.blockSize) + 1 + for b := startBlock; b < endBlock; b++ { + d.orderer.PrioritiseBlock(b) + } + }) + + // TODO: We *may* want to check errors here, but it will surface elsewhere. + go d.to.HandleDontNeedAt(func(offset int64, length int32) { + // Deprioritize blocks + endOffset := uint64(offset + int64(length)) + if endOffset > d.size { + endOffset = d.size + } + + startBlock := int(offset / int64(d.blockSize)) + endBlock := int((endOffset-1)/d.blockSize) + 1 + for b := startBlock; b < endBlock; b++ { + d.orderer.Remove(b) + } + }) + if dg.met != nil { dg.met.AddToProtocol(d.schema.Name, d.to) } @@ -96,6 +149,87 @@ func (dg *DeviceGroup) SendDevInfo(pro protocol.Protocol) error { return e } +// This will Migrate all devices to the 'to' setup in SendDevInfo stage. +func (dg *DeviceGroup) MigrateAll(progressHandler func(i int, p *migrator.MigrationProgress)) error { + // TODO: We can divide concurrency amongst devices depending on their size... + concurrency := 100 + + for index, d := range dg.devices { + d.migrationError = make(chan error, 1) // We will just hold onto the first error for now. + + setMigrationError := func(err error) { + if err != nil { + select { + case d.migrationError <- err: + default: + } + } + } + + cfg := migrator.NewConfig() + cfg.Logger = dg.log + cfg.BlockSize = int(d.blockSize) + cfg.Concurrency = map[int]int{ + storage.BlockTypeAny: concurrency, + } + cfg.LockerHandler = func() { + setMigrationError(d.to.SendEvent(&packets.Event{Type: packets.EventPreLock})) + d.storage.Lock() + setMigrationError(d.to.SendEvent(&packets.Event{Type: packets.EventPostLock})) + } + cfg.UnlockerHandler = func() { + setMigrationError(d.to.SendEvent(&packets.Event{Type: packets.EventPreUnlock})) + d.storage.Unlock() + setMigrationError(d.to.SendEvent(&packets.Event{Type: packets.EventPostUnlock})) + } + cfg.ErrorHandler = func(b *storage.BlockInfo, err error) { + setMigrationError(err) + } + cfg.ProgressHandler = func(p *migrator.MigrationProgress) { + progressHandler(index, p) + } + mig, err := migrator.NewMigrator(d.dirtyRemote, d.to, d.orderer, cfg) + if err != nil { + return err + } + d.migrator = mig + if dg.met != nil { + dg.met.AddMigrator(d.schema.Name, mig) + } + } + + errs := make(chan error, len(dg.devices)) + + // Now start them all migrating, and collect err + for _, d := range dg.devices { + go func() { + errs <- d.migrator.Migrate(d.numBlocks) + }() + } + + // Check for error from Migrate, and then Wait for completion of all devices... + for _, d := range dg.devices { + migErr := <-errs + if migErr != nil { + return migErr + } + + err := d.migrator.WaitForCompletion() + if err != nil { + return err + } + + // Check for any migration error + select { + case err := <-d.migrationError: + return err + default: + } + } + + return nil +} + func (dg *DeviceGroup) CloseAll() error { var e error for _, d := range dg.devices { From a51a85141126d9f47885edd684d4e5b4fd30b412 Mon Sep 17 00:00:00 2001 From: Jimmy Moore Date: Mon, 9 Dec 2024 20:01:32 +0000 Subject: [PATCH 07/37] DeviceGroup migrate and test working Signed-off-by: Jimmy Moore --- pkg/storage/devicegroup/device_group.go | 73 ++++++----- pkg/storage/devicegroup/device_group_test.go | 127 +++++++++++++++++++ 2 files changed, 168 insertions(+), 32 deletions(-) diff --git a/pkg/storage/devicegroup/device_group.go b/pkg/storage/devicegroup/device_group.go index 27beb4f..2c6f1a8 100644 --- a/pkg/storage/devicegroup/device_group.go +++ b/pkg/storage/devicegroup/device_group.go @@ -1,6 +1,7 @@ package devicegroup import ( + "context" "errors" "time" @@ -98,6 +99,10 @@ func New(ds []*config.DeviceSchema, log types.Logger, met metrics.SiloMetrics) ( return dg, nil } +func (dg *DeviceGroup) GetProvider(index int) storage.Provider { + return dg.devices[index].storage +} + func (dg *DeviceGroup) SendDevInfo(pro protocol.Protocol) error { var e error @@ -105,37 +110,6 @@ func (dg *DeviceGroup) SendDevInfo(pro protocol.Protocol) error { d.to = protocol.NewToProtocol(d.prov.Size(), uint32(index), pro) d.to.SetCompression(true) - // Setup d.to - // TODO: We *may* want to check errors here, but it will surface elsewhere. - go d.to.HandleNeedAt(func(offset int64, length int32) { - // Prioritize blocks - endOffset := uint64(offset + int64(length)) - if endOffset > d.size { - endOffset = d.size - } - - startBlock := int(offset / int64(d.blockSize)) - endBlock := int((endOffset-1)/d.blockSize) + 1 - for b := startBlock; b < endBlock; b++ { - d.orderer.PrioritiseBlock(b) - } - }) - - // TODO: We *may* want to check errors here, but it will surface elsewhere. - go d.to.HandleDontNeedAt(func(offset int64, length int32) { - // Deprioritize blocks - endOffset := uint64(offset + int64(length)) - if endOffset > d.size { - endOffset = d.size - } - - startBlock := int(offset / int64(d.blockSize)) - endBlock := int((endOffset-1)/d.blockSize) + 1 - for b := startBlock; b < endBlock; b++ { - d.orderer.Remove(b) - } - }) - if dg.met != nil { dg.met.AddToProtocol(d.schema.Name, d.to) } @@ -158,7 +132,7 @@ func (dg *DeviceGroup) MigrateAll(progressHandler func(i int, p *migrator.Migrat d.migrationError = make(chan error, 1) // We will just hold onto the first error for now. setMigrationError := func(err error) { - if err != nil { + if err != nil && err != context.Canceled { select { case d.migrationError <- err: default: @@ -166,6 +140,41 @@ func (dg *DeviceGroup) MigrateAll(progressHandler func(i int, p *migrator.Migrat } } + // Setup d.to + go func() { + err := d.to.HandleNeedAt(func(offset int64, length int32) { + // Prioritize blocks + endOffset := uint64(offset + int64(length)) + if endOffset > d.size { + endOffset = d.size + } + + startBlock := int(offset / int64(d.blockSize)) + endBlock := int((endOffset-1)/d.blockSize) + 1 + for b := startBlock; b < endBlock; b++ { + d.orderer.PrioritiseBlock(b) + } + }) + setMigrationError(err) + }() + + go func() { + err := d.to.HandleDontNeedAt(func(offset int64, length int32) { + // Deprioritize blocks + endOffset := uint64(offset + int64(length)) + if endOffset > d.size { + endOffset = d.size + } + + startBlock := int(offset / int64(d.blockSize)) + endBlock := int((endOffset-1)/d.blockSize) + 1 + for b := startBlock; b < endBlock; b++ { + d.orderer.Remove(b) + } + }) + setMigrationError(err) + }() + cfg := migrator.NewConfig() cfg.Logger = dg.log cfg.BlockSize = int(d.blockSize) diff --git a/pkg/storage/devicegroup/device_group_test.go b/pkg/storage/devicegroup/device_group_test.go index 4fe13dd..20dc506 100644 --- a/pkg/storage/devicegroup/device_group_test.go +++ b/pkg/storage/devicegroup/device_group_test.go @@ -2,14 +2,20 @@ package devicegroup import ( "context" + "crypto/rand" "fmt" + "io" "os" "os/user" + "sync" "testing" + "github.com/loopholelabs/silo/pkg/storage" "github.com/loopholelabs/silo/pkg/storage/config" + "github.com/loopholelabs/silo/pkg/storage/migrator" "github.com/loopholelabs/silo/pkg/storage/protocol" "github.com/loopholelabs/silo/pkg/storage/protocol/packets" + "github.com/loopholelabs/silo/pkg/storage/sources" "github.com/stretchr/testify/assert" ) @@ -113,3 +119,124 @@ func TestDeviceGroupSendDevInfo(t *testing.T) { assert.Equal(t, string(r.Encode()), di.Schema) } } + +func TestDeviceGroupMigrate(t *testing.T) { + currentUser, err := user.Current() + if err != nil { + panic(err) + } + if currentUser.Username != "root" { + fmt.Printf("Cannot run test unless we are root.\n") + return + } + + ds := []*config.DeviceSchema{ + { + Name: "test1", + Size: "8m", + System: "file", + BlockSize: "1m", + Expose: true, + Location: "testdev_test1", + }, + { + Name: "test2", + Size: "16m", + System: "file", + BlockSize: "1m", + Expose: true, + Location: "testdev_test2", + }, + } + + t.Cleanup(func() { + os.Remove("testdev_test1") + os.Remove("testdev_test2") + }) + + // Create a simple pipe + r1, w1 := io.Pipe() + r2, w2 := io.Pipe() + + ctx, cancelfn := context.WithCancel(context.TODO()) + + var incomingLock sync.Mutex + incomingProviders := make(map[uint32]storage.Provider) + + initDev := func(ctx context.Context, p protocol.Protocol, dev uint32) { + destStorageFactory := func(di *packets.DevInfo) storage.Provider { + store := sources.NewMemoryStorage(int(di.Size)) + incomingLock.Lock() + incomingProviders[dev] = store + incomingLock.Unlock() + return store + } + + from := protocol.NewFromProtocol(ctx, dev, destStorageFactory, p) + go func() { + err := from.HandleReadAt() + assert.ErrorIs(t, err, context.Canceled) + }() + go func() { + err := from.HandleWriteAt() + assert.ErrorIs(t, err, context.Canceled) + }() + go func() { + err := from.HandleDevInfo() + assert.NoError(t, err) + }() + go func() { + err := from.HandleDirtyList(func(_ []uint) { + }) + assert.ErrorIs(t, err, context.Canceled) + }() + } + + prSource := protocol.NewRW(ctx, []io.Reader{r1}, []io.Writer{w2}, nil) + prDest := protocol.NewRW(ctx, []io.Reader{r2}, []io.Writer{w1}, initDev) + + go func() { + _ = prSource.Handle() + }() + go func() { + _ = prDest.Handle() + }() + + dg, err := New(ds, nil, nil) + assert.NoError(t, err) + + // Lets write some data... + for i := range ds { + prov := dg.GetProvider(i) + buff := make([]byte, prov.Size()) + _, err := rand.Read(buff) + assert.NoError(t, err) + _, err = prov.WriteAt(buff, 0) + assert.NoError(t, err) + } + + // Send all the dev info... + err = dg.SendDevInfo(prSource) + assert.NoError(t, err) + + pHandler := func(index int, p *migrator.MigrationProgress) {} + + err = dg.MigrateAll(pHandler) + assert.NoError(t, err) + + // Check the data all got migrated correctly + for i := range ds { + prov := dg.GetProvider(i) + destProvider := incomingProviders[uint32(i)] + assert.NotNil(t, destProvider) + eq, err := storage.Equals(prov, destProvider, 1024*1024) + assert.NoError(t, err) + assert.True(t, eq) + } + + err = dg.CloseAll() + assert.NoError(t, err) + + cancelfn() + +} From 4b3db5c979c14950855ac7b3e7cbe39283983e0f Mon Sep 17 00:00:00 2001 From: Jimmy Moore Date: Mon, 9 Dec 2024 20:04:13 +0000 Subject: [PATCH 08/37] Lint fixes Signed-off-by: Jimmy Moore --- pkg/storage/devicegroup/device_group.go | 4 ++-- pkg/storage/devicegroup/device_group_test.go | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/pkg/storage/devicegroup/device_group.go b/pkg/storage/devicegroup/device_group.go index 2c6f1a8..95128f4 100644 --- a/pkg/storage/devicegroup/device_group.go +++ b/pkg/storage/devicegroup/device_group.go @@ -68,7 +68,7 @@ func New(ds []*config.DeviceSchema, log types.Logger, met metrics.SiloMetrics) ( dirtyLocal, dirtyRemote := dirtytracker.NewDirtyTracker(mlocal, blockSize) vmonitor := volatilitymonitor.NewVolatilityMonitor(dirtyLocal, blockSize, volatilityExpiry) - totalBlocks := (int(local.Size()) + int(blockSize) - 1) / int(blockSize) + totalBlocks := (int(local.Size()) + blockSize - 1) / blockSize orderer := blocks.NewPriorityBlockOrder(totalBlocks, vmonitor) orderer.AddAll() @@ -191,7 +191,7 @@ func (dg *DeviceGroup) MigrateAll(progressHandler func(i int, p *migrator.Migrat d.storage.Unlock() setMigrationError(d.to.SendEvent(&packets.Event{Type: packets.EventPostUnlock})) } - cfg.ErrorHandler = func(b *storage.BlockInfo, err error) { + cfg.ErrorHandler = func(_ *storage.BlockInfo, err error) { setMigrationError(err) } cfg.ProgressHandler = func(p *migrator.MigrationProgress) { diff --git a/pkg/storage/devicegroup/device_group_test.go b/pkg/storage/devicegroup/device_group_test.go index 20dc506..3e47bf6 100644 --- a/pkg/storage/devicegroup/device_group_test.go +++ b/pkg/storage/devicegroup/device_group_test.go @@ -219,7 +219,7 @@ func TestDeviceGroupMigrate(t *testing.T) { err = dg.SendDevInfo(prSource) assert.NoError(t, err) - pHandler := func(index int, p *migrator.MigrationProgress) {} + pHandler := func(_ int, _ *migrator.MigrationProgress) {} err = dg.MigrateAll(pHandler) assert.NoError(t, err) From 4404d9820a9ec7bef9af2e8c11b1446568e5a75e Mon Sep 17 00:00:00 2001 From: Jimmy Moore Date: Mon, 9 Dec 2024 22:00:06 +0000 Subject: [PATCH 09/37] Added logging for dg.MigrateAll Signed-off-by: Jimmy Moore --- pkg/storage/devicegroup/device_group.go | 59 ++++++++++++++++++++++++- 1 file changed, 57 insertions(+), 2 deletions(-) diff --git a/pkg/storage/devicegroup/device_group.go b/pkg/storage/devicegroup/device_group.go index 95128f4..2176079 100644 --- a/pkg/storage/devicegroup/device_group.go +++ b/pkg/storage/devicegroup/device_group.go @@ -55,6 +55,9 @@ func New(ds []*config.DeviceSchema, log types.Logger, met metrics.SiloMetrics) ( for _, s := range ds { prov, exp, err := device.NewDeviceWithLoggingMetrics(s, log, met) if err != nil { + if log != nil { + log.Error().Err(err).Str("schema", string(s.Encode())).Msg("could not create device") + } // We should try to close / shutdown any successful devices we created here... // But it's likely to be critical. dg.CloseAll() @@ -96,6 +99,10 @@ func New(ds []*config.DeviceSchema, log types.Logger, met metrics.SiloMetrics) ( orderer: orderer, }) } + + if log != nil { + log.Debug().Int("devices", len(dg.devices)).Msg("created device group") + } return dg, nil } @@ -114,9 +121,12 @@ func (dg *DeviceGroup) SendDevInfo(pro protocol.Protocol) error { dg.met.AddToProtocol(d.schema.Name, d.to) } - schema := d.schema.Encode() - err := d.to.SendDevInfo(d.schema.Name, uint32(d.schema.ByteBlockSize()), string(schema)) + schema := string(d.schema.Encode()) + err := d.to.SendDevInfo(d.schema.Name, uint32(d.schema.ByteBlockSize()), schema) if err != nil { + if dg.log != nil { + dg.log.Error().Str("schema", schema).Msg("could not send DevInfo") + } e = errors.Join(e, err) } } @@ -125,6 +135,12 @@ func (dg *DeviceGroup) SendDevInfo(pro protocol.Protocol) error { // This will Migrate all devices to the 'to' setup in SendDevInfo stage. func (dg *DeviceGroup) MigrateAll(progressHandler func(i int, p *migrator.MigrationProgress)) error { + ctime := time.Now() + + if dg.log != nil { + dg.log.Debug().Int("devices", len(dg.devices)).Msg("migrating device group") + } + // TODO: We can divide concurrency amongst devices depending on their size... concurrency := 100 @@ -143,6 +159,14 @@ func (dg *DeviceGroup) MigrateAll(progressHandler func(i int, p *migrator.Migrat // Setup d.to go func() { err := d.to.HandleNeedAt(func(offset int64, length int32) { + if dg.log != nil { + dg.log.Debug(). + Int64("offset", offset). + Int32("length", length). + Int("dev", index). + Str("name", d.schema.Name). + Msg("NeedAt for device") + } // Prioritize blocks endOffset := uint64(offset + int64(length)) if endOffset > d.size { @@ -160,6 +184,14 @@ func (dg *DeviceGroup) MigrateAll(progressHandler func(i int, p *migrator.Migrat go func() { err := d.to.HandleDontNeedAt(func(offset int64, length int32) { + if dg.log != nil { + dg.log.Debug(). + Int64("offset", offset). + Int32("length", length). + Int("dev", index). + Str("name", d.schema.Name). + Msg("DontNeedAt for device") + } // Deprioritize blocks endOffset := uint64(offset + int64(length)) if endOffset > d.size { @@ -220,35 +252,58 @@ func (dg *DeviceGroup) MigrateAll(progressHandler func(i int, p *migrator.Migrat for _, d := range dg.devices { migErr := <-errs if migErr != nil { + if dg.log != nil { + dg.log.Error().Err(migErr).Msg("error migrating device group") + } return migErr } err := d.migrator.WaitForCompletion() if err != nil { + if dg.log != nil { + dg.log.Error().Err(err).Msg("error migrating device group waiting for completion") + } return err } // Check for any migration error select { case err := <-d.migrationError: + if dg.log != nil { + dg.log.Error().Err(err).Msg("error migrating device group from goroutines") + } return err default: } } + if dg.log != nil { + dg.log.Debug().Int64("duration", time.Since(ctime).Milliseconds()).Int("devices", len(dg.devices)).Msg("migration of device group completed") + } + return nil } func (dg *DeviceGroup) CloseAll() error { + if dg.log != nil { + dg.log.Debug().Int("devices", len(dg.devices)).Msg("close device group") + } + var e error for _, d := range dg.devices { err := d.prov.Close() if err != nil { + if dg.log != nil { + dg.log.Error().Err(err).Msg("error closing device group storage provider") + } e = errors.Join(e, err) } if d.exp != nil { err = d.exp.Shutdown() if err != nil { + if dg.log != nil { + dg.log.Error().Err(err).Msg("error closing device group exposed storage") + } e = errors.Join(e, err) } } From 86c3641eec353714324a468263a4fae53b4c2d41 Mon Sep 17 00:00:00 2001 From: Jimmy Moore Date: Mon, 9 Dec 2024 22:10:22 +0000 Subject: [PATCH 10/37] Device concurrency is now proportional to size and allocated from a deviceGroup max Signed-off-by: Jimmy Moore --- pkg/storage/devicegroup/device_group.go | 26 +++++++++++++++++--- pkg/storage/devicegroup/device_group_test.go | 9 +++++-- 2 files changed, 29 insertions(+), 6 deletions(-) diff --git a/pkg/storage/devicegroup/device_group.go b/pkg/storage/devicegroup/device_group.go index 2176079..65332fc 100644 --- a/pkg/storage/devicegroup/device_group.go +++ b/pkg/storage/devicegroup/device_group.go @@ -134,17 +134,28 @@ func (dg *DeviceGroup) SendDevInfo(pro protocol.Protocol) error { } // This will Migrate all devices to the 'to' setup in SendDevInfo stage. -func (dg *DeviceGroup) MigrateAll(progressHandler func(i int, p *migrator.MigrationProgress)) error { +func (dg *DeviceGroup) MigrateAll(maxConcurrency int, progressHandler func(i int, p *migrator.MigrationProgress)) error { ctime := time.Now() if dg.log != nil { dg.log.Debug().Int("devices", len(dg.devices)).Msg("migrating device group") } - // TODO: We can divide concurrency amongst devices depending on their size... - concurrency := 100 + // Add up device sizes, so we can allocate the concurrency proportionally + totalSize := uint64(0) + for _, d := range dg.devices { + totalSize += d.size + } + + // We need at least this much... + if maxConcurrency < len(dg.devices) { + maxConcurrency = len(dg.devices) + } + // We will allocate each device at least ONE... + maxConcurrency -= len(dg.devices) for index, d := range dg.devices { + concurrency := 1 + (uint64(maxConcurrency) * d.size / totalSize) d.migrationError = make(chan error, 1) // We will just hold onto the first error for now. setMigrationError := func(err error) { @@ -211,7 +222,7 @@ func (dg *DeviceGroup) MigrateAll(progressHandler func(i int, p *migrator.Migrat cfg.Logger = dg.log cfg.BlockSize = int(d.blockSize) cfg.Concurrency = map[int]int{ - storage.BlockTypeAny: concurrency, + storage.BlockTypeAny: int(concurrency), } cfg.LockerHandler = func() { setMigrationError(d.to.SendEvent(&packets.Event{Type: packets.EventPreLock})) @@ -237,6 +248,13 @@ func (dg *DeviceGroup) MigrateAll(progressHandler func(i int, p *migrator.Migrat if dg.met != nil { dg.met.AddMigrator(d.schema.Name, mig) } + if dg.log != nil { + dg.log.Debug(). + Uint64("concurrency", concurrency). + Int("index", index). + Str("name", d.schema.Name). + Msg("Setup migrator") + } } errs := make(chan error, len(dg.devices)) diff --git a/pkg/storage/devicegroup/device_group_test.go b/pkg/storage/devicegroup/device_group_test.go index 3e47bf6..8291f0b 100644 --- a/pkg/storage/devicegroup/device_group_test.go +++ b/pkg/storage/devicegroup/device_group_test.go @@ -10,6 +10,8 @@ import ( "sync" "testing" + "github.com/loopholelabs/logging" + "github.com/loopholelabs/logging/types" "github.com/loopholelabs/silo/pkg/storage" "github.com/loopholelabs/silo/pkg/storage/config" "github.com/loopholelabs/silo/pkg/storage/migrator" @@ -202,7 +204,10 @@ func TestDeviceGroupMigrate(t *testing.T) { _ = prDest.Handle() }() - dg, err := New(ds, nil, nil) + log := logging.New(logging.Zerolog, "silo", os.Stdout) + log.SetLevel(types.TraceLevel) + + dg, err := New(ds, log, nil) assert.NoError(t, err) // Lets write some data... @@ -221,7 +226,7 @@ func TestDeviceGroupMigrate(t *testing.T) { pHandler := func(_ int, _ *migrator.MigrationProgress) {} - err = dg.MigrateAll(pHandler) + err = dg.MigrateAll(100, pHandler) assert.NoError(t, err) // Check the data all got migrated correctly From cde82d3746fca6c951de254f09db7d8d1765a64b Mon Sep 17 00:00:00 2001 From: Jimmy Moore Date: Tue, 10 Dec 2024 15:27:00 +0000 Subject: [PATCH 11/37] First stab at migrateDirty Signed-off-by: Jimmy Moore --- pkg/storage/devicegroup/device_group.go | 95 +++++++++++++++++++- pkg/storage/devicegroup/device_group_test.go | 4 +- 2 files changed, 96 insertions(+), 3 deletions(-) diff --git a/pkg/storage/devicegroup/device_group.go b/pkg/storage/devicegroup/device_group.go index 65332fc..6918491 100644 --- a/pkg/storage/devicegroup/device_group.go +++ b/pkg/storage/devicegroup/device_group.go @@ -22,6 +22,8 @@ import ( const volatilityExpiry = 30 * time.Minute +var errNotSetup = errors.New("toProtocol not setup") + type DeviceGroup struct { log types.Logger met metrics.SiloMetrics @@ -110,7 +112,7 @@ func (dg *DeviceGroup) GetProvider(index int) storage.Provider { return dg.devices[index].storage } -func (dg *DeviceGroup) SendDevInfo(pro protocol.Protocol) error { +func (dg *DeviceGroup) StartMigrationTo(pro protocol.Protocol) error { var e error for index, d := range dg.devices { @@ -135,6 +137,12 @@ func (dg *DeviceGroup) SendDevInfo(pro protocol.Protocol) error { // This will Migrate all devices to the 'to' setup in SendDevInfo stage. func (dg *DeviceGroup) MigrateAll(maxConcurrency int, progressHandler func(i int, p *migrator.MigrationProgress)) error { + for _, d := range dg.devices { + if d.to == nil { + return errNotSetup + } + } + ctime := time.Now() if dg.log != nil { @@ -302,6 +310,91 @@ func (dg *DeviceGroup) MigrateAll(maxConcurrency int, progressHandler func(i int return nil } +type MigrateDirtyHooks struct { + PostGetDirty func(index int, blocks []uint) + PostMigrateDirty func(index int) bool + Completed func(index int) +} + +func (dg *DeviceGroup) MigrateDirty(hooks *MigrateDirtyHooks) error { + errs := make(chan error, len(dg.devices)) + + for index, d := range dg.devices { + go func() { + for { + blocks := d.migrator.GetLatestDirty() + if dg.log != nil { + dg.log.Debug(). + Int("blocks", len(blocks)). + Int("index", index). + Str("name", d.schema.Name). + Msg("migrating dirty blocks") + } + if hooks != nil && hooks.PostGetDirty != nil { + hooks.PostGetDirty(index, blocks) + } + + if len(blocks) == 0 { + break + } + + err := d.to.DirtyList(int(d.blockSize), blocks) + if err != nil { + errs <- err + return + } + + err = d.migrator.MigrateDirty(blocks) + if err != nil { + errs <- err + return + } + + if hooks != nil && hooks.PostMigrateDirty != nil { + if hooks.PostMigrateDirty(index) { + break // PostMigrateDirty returned true, which means stop doing any dirty loop business. + } + } + } + + err := d.migrator.WaitForCompletion() + if err != nil { + errs <- err + return + } + + err = d.to.SendEvent(&packets.Event{Type: packets.EventCompleted}) + if err != nil { + errs <- err + return + } + + if hooks != nil && hooks.Completed != nil { + hooks.Completed(index) + } + + if dg.log != nil { + dg.log.Debug(). + Int("index", index). + Str("name", d.schema.Name). + Msg("migrating dirty blocks completed") + } + + errs <- nil + }() + } + + // Wait for all dirty migrations to complete + // Check for any error and return it + for err := range errs { + if err != nil { + return err + } + } + + return nil +} + func (dg *DeviceGroup) CloseAll() error { if dg.log != nil { dg.log.Debug().Int("devices", len(dg.devices)).Msg("close device group") diff --git a/pkg/storage/devicegroup/device_group_test.go b/pkg/storage/devicegroup/device_group_test.go index 8291f0b..420be90 100644 --- a/pkg/storage/devicegroup/device_group_test.go +++ b/pkg/storage/devicegroup/device_group_test.go @@ -101,7 +101,7 @@ func TestDeviceGroupSendDevInfo(t *testing.T) { pro := protocol.NewMockProtocol(context.TODO()) - err = dg.SendDevInfo(pro) + err = dg.StartMigrationTo(pro) assert.NoError(t, err) err = dg.CloseAll() @@ -221,7 +221,7 @@ func TestDeviceGroupMigrate(t *testing.T) { } // Send all the dev info... - err = dg.SendDevInfo(prSource) + err = dg.StartMigrationTo(prSource) assert.NoError(t, err) pHandler := func(_ int, _ *migrator.MigrationProgress) {} From 5151ba584daaeca5c45faa42a505847a2893c8c0 Mon Sep 17 00:00:00 2001 From: Jimmy Moore Date: Tue, 10 Dec 2024 16:08:56 +0000 Subject: [PATCH 12/37] Added hooks for dg.MigrateDirty phase Signed-off-by: Jimmy Moore --- pkg/storage/devicegroup/device_group.go | 28 +++++++++++++++++++------ 1 file changed, 22 insertions(+), 6 deletions(-) diff --git a/pkg/storage/devicegroup/device_group.go b/pkg/storage/devicegroup/device_group.go index 6918491..8277233 100644 --- a/pkg/storage/devicegroup/device_group.go +++ b/pkg/storage/devicegroup/device_group.go @@ -311,17 +311,26 @@ func (dg *DeviceGroup) MigrateAll(maxConcurrency int, progressHandler func(i int } type MigrateDirtyHooks struct { - PostGetDirty func(index int, blocks []uint) - PostMigrateDirty func(index int) bool - Completed func(index int) + PreGetDirty func(index int, to *protocol.ToProtocol, dirtyHistory []int) + PostGetDirty func(index int, to *protocol.ToProtocol, dirtyHistory []int, blocks []uint) + PostMigrateDirty func(index int, to *protocol.ToProtocol, dirtyHistory []int) bool + Completed func(index int, to *protocol.ToProtocol) } +const maxDirtyHistory = 32 + func (dg *DeviceGroup) MigrateDirty(hooks *MigrateDirtyHooks) error { errs := make(chan error, len(dg.devices)) for index, d := range dg.devices { go func() { + dirtyHistory := make([]int, 0) + for { + if hooks != nil && hooks.PreGetDirty != nil { + hooks.PreGetDirty(index, d.to, dirtyHistory) + } + blocks := d.migrator.GetLatestDirty() if dg.log != nil { dg.log.Debug(). @@ -330,8 +339,15 @@ func (dg *DeviceGroup) MigrateDirty(hooks *MigrateDirtyHooks) error { Str("name", d.schema.Name). Msg("migrating dirty blocks") } + + dirtyHistory = append(dirtyHistory, len(blocks)) + // Cap it at a certain MAX LENGTH + if len(dirtyHistory) > maxDirtyHistory { + dirtyHistory = dirtyHistory[1:] + } + if hooks != nil && hooks.PostGetDirty != nil { - hooks.PostGetDirty(index, blocks) + hooks.PostGetDirty(index, d.to, dirtyHistory, blocks) } if len(blocks) == 0 { @@ -351,7 +367,7 @@ func (dg *DeviceGroup) MigrateDirty(hooks *MigrateDirtyHooks) error { } if hooks != nil && hooks.PostMigrateDirty != nil { - if hooks.PostMigrateDirty(index) { + if hooks.PostMigrateDirty(index, d.to, dirtyHistory) { break // PostMigrateDirty returned true, which means stop doing any dirty loop business. } } @@ -370,7 +386,7 @@ func (dg *DeviceGroup) MigrateDirty(hooks *MigrateDirtyHooks) error { } if hooks != nil && hooks.Completed != nil { - hooks.Completed(index) + hooks.Completed(index, d.to) } if dg.log != nil { From 2509799105252773244dbdb24708a05b68b6fb0c Mon Sep 17 00:00:00 2001 From: Jimmy Moore Date: Tue, 10 Dec 2024 16:21:25 +0000 Subject: [PATCH 13/37] refactored dg tests common setup Signed-off-by: Jimmy Moore --- pkg/storage/devicegroup/device_group_test.go | 148 ++++++------------- 1 file changed, 44 insertions(+), 104 deletions(-) diff --git a/pkg/storage/devicegroup/device_group_test.go b/pkg/storage/devicegroup/device_group_test.go index 420be90..ecd6e9a 100644 --- a/pkg/storage/devicegroup/device_group_test.go +++ b/pkg/storage/devicegroup/device_group_test.go @@ -21,94 +21,69 @@ import ( "github.com/stretchr/testify/assert" ) -func TestDeviceGroupBasic(t *testing.T) { +var testDeviceSchema = []*config.DeviceSchema{ + { + Name: "test1", + Size: "8m", + System: "file", + BlockSize: "1m", + Expose: true, + Location: "testdev_test1", + }, + { + Name: "test2", + Size: "16m", + System: "file", + BlockSize: "1m", + Expose: true, + Location: "testdev_test2", + }, +} + +func setupDeviceGroup(t *testing.T) *DeviceGroup { currentUser, err := user.Current() if err != nil { panic(err) } if currentUser.Username != "root" { fmt.Printf("Cannot run test unless we are root.\n") - return + return nil } - ds := []*config.DeviceSchema{ - { - Name: "test1", - Size: "8m", - System: "file", - BlockSize: "1m", - Expose: true, - Location: "testdev_test1", - }, - { - Name: "test2", - Size: "16m", - System: "file", - BlockSize: "1m", - Expose: true, - Location: "testdev_test2", - }, - } + dg, err := New(testDeviceSchema, nil, nil) + assert.NoError(t, err) t.Cleanup(func() { + err = dg.CloseAll() + assert.NoError(t, err) + os.Remove("testdev_test1") os.Remove("testdev_test2") }) - dg, err := New(ds, nil, nil) - assert.NoError(t, err) - - err = dg.CloseAll() - assert.NoError(t, err) + return dg } -func TestDeviceGroupSendDevInfo(t *testing.T) { - currentUser, err := user.Current() - if err != nil { - panic(err) - } - if currentUser.Username != "root" { - fmt.Printf("Cannot run test unless we are root.\n") +func TestDeviceGroupBasic(t *testing.T) { + dg := setupDeviceGroup(t) + if dg == nil { return } +} - ds := []*config.DeviceSchema{ - { - Name: "test1", - Size: "8m", - System: "file", - BlockSize: "1m", - Expose: true, - Location: "testdev_test1", - }, - { - Name: "test2", - Size: "16m", - System: "file", - BlockSize: "1m", - Expose: true, - Location: "testdev_test2", - }, +func TestDeviceGroupSendDevInfo(t *testing.T) { + dg := setupDeviceGroup(t) + if dg == nil { + return } - t.Cleanup(func() { - os.Remove("testdev_test1") - os.Remove("testdev_test2") - }) - - dg, err := New(ds, nil, nil) - assert.NoError(t, err) - pro := protocol.NewMockProtocol(context.TODO()) - err = dg.StartMigrationTo(pro) - assert.NoError(t, err) - - err = dg.CloseAll() + err := dg.StartMigrationTo(pro) assert.NoError(t, err) // Make sure they all got sent correctly... - for index, r := range ds { + for index, r := range testDeviceSchema { _, data, err := pro.WaitForCommand(uint32(index), packets.CommandDevInfo) assert.NoError(t, err) @@ -123,38 +98,13 @@ func TestDeviceGroupSendDevInfo(t *testing.T) { } func TestDeviceGroupMigrate(t *testing.T) { - currentUser, err := user.Current() - if err != nil { - panic(err) - } - if currentUser.Username != "root" { - fmt.Printf("Cannot run test unless we are root.\n") + dg := setupDeviceGroup(t) + if dg == nil { return } - ds := []*config.DeviceSchema{ - { - Name: "test1", - Size: "8m", - System: "file", - BlockSize: "1m", - Expose: true, - Location: "testdev_test1", - }, - { - Name: "test2", - Size: "16m", - System: "file", - BlockSize: "1m", - Expose: true, - Location: "testdev_test2", - }, - } - - t.Cleanup(func() { - os.Remove("testdev_test1") - os.Remove("testdev_test2") - }) + log := logging.New(logging.Zerolog, "silo", os.Stdout) + log.SetLevel(types.TraceLevel) // Create a simple pipe r1, w1 := io.Pipe() @@ -204,14 +154,8 @@ func TestDeviceGroupMigrate(t *testing.T) { _ = prDest.Handle() }() - log := logging.New(logging.Zerolog, "silo", os.Stdout) - log.SetLevel(types.TraceLevel) - - dg, err := New(ds, log, nil) - assert.NoError(t, err) - // Lets write some data... - for i := range ds { + for i := range testDeviceSchema { prov := dg.GetProvider(i) buff := make([]byte, prov.Size()) _, err := rand.Read(buff) @@ -221,7 +165,7 @@ func TestDeviceGroupMigrate(t *testing.T) { } // Send all the dev info... - err = dg.StartMigrationTo(prSource) + err := dg.StartMigrationTo(prSource) assert.NoError(t, err) pHandler := func(_ int, _ *migrator.MigrationProgress) {} @@ -230,7 +174,7 @@ func TestDeviceGroupMigrate(t *testing.T) { assert.NoError(t, err) // Check the data all got migrated correctly - for i := range ds { + for i := range testDeviceSchema { prov := dg.GetProvider(i) destProvider := incomingProviders[uint32(i)] assert.NotNil(t, destProvider) @@ -239,9 +183,5 @@ func TestDeviceGroupMigrate(t *testing.T) { assert.True(t, eq) } - err = dg.CloseAll() - assert.NoError(t, err) - cancelfn() - } From 47d230aaa233a0a2a148bf2e5e5f7d4c404f46c2 Mon Sep 17 00:00:00 2001 From: Jimmy Moore Date: Wed, 11 Dec 2024 11:33:20 +0000 Subject: [PATCH 14/37] Drastic simplification of cmd/serve using devicegroup Signed-off-by: Jimmy Moore --- cmd/serve.go | 447 +++--------------------- pkg/storage/devicegroup/device_group.go | 38 +- 2 files changed, 79 insertions(+), 406 deletions(-) diff --git a/cmd/serve.go b/cmd/serve.go index caf53b9..37afe4e 100644 --- a/cmd/serve.go +++ b/cmd/serve.go @@ -8,32 +8,21 @@ import ( "net/http" "os" "os/signal" - "sync" "syscall" "time" - "github.com/fatih/color" "github.com/loopholelabs/logging" "github.com/loopholelabs/logging/types" - "github.com/loopholelabs/silo/pkg/storage" - "github.com/loopholelabs/silo/pkg/storage/blocks" "github.com/loopholelabs/silo/pkg/storage/config" - "github.com/loopholelabs/silo/pkg/storage/device" - "github.com/loopholelabs/silo/pkg/storage/dirtytracker" - "github.com/loopholelabs/silo/pkg/storage/expose" + "github.com/loopholelabs/silo/pkg/storage/devicegroup" "github.com/loopholelabs/silo/pkg/storage/metrics" siloprom "github.com/loopholelabs/silo/pkg/storage/metrics/prometheus" "github.com/loopholelabs/silo/pkg/storage/migrator" - "github.com/loopholelabs/silo/pkg/storage/modules" "github.com/loopholelabs/silo/pkg/storage/protocol" - "github.com/loopholelabs/silo/pkg/storage/protocol/packets" - "github.com/loopholelabs/silo/pkg/storage/volatilitymonitor" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/collectors" "github.com/prometheus/client_golang/prometheus/promhttp" "github.com/spf13/cobra" - "github.com/vbauerster/mpb/v8" - "github.com/vbauerster/mpb/v8/decor" ) var ( @@ -47,42 +36,16 @@ var ( var serveAddr string var serveConf string -var serveProgress bool -var serveContinuous bool -var serveAnyOrder bool -var serveCompress bool var serveMetrics string - -var srcExposed []storage.ExposedStorage -var srcStorage []*storageInfo - -var serveProgressBar *mpb.Progress -var serveBars []*mpb.Bar - var serveDebug bool func init() { rootCmd.AddCommand(cmdServe) cmdServe.Flags().StringVarP(&serveAddr, "addr", "a", ":5170", "Address to serve from") cmdServe.Flags().StringVarP(&serveConf, "conf", "c", "silo.conf", "Configuration file") - cmdServe.Flags().BoolVarP(&serveProgress, "progress", "p", false, "Show progress") - cmdServe.Flags().BoolVarP(&serveContinuous, "continuous", "C", false, "Continuous sync") - cmdServe.Flags().BoolVarP(&serveAnyOrder, "order", "o", false, "Any order (faster)") cmdServe.Flags().BoolVarP(&serveDebug, "debug", "d", false, "Debug logging (trace)") cmdServe.Flags().StringVarP(&serveMetrics, "metrics", "m", "", "Prom metrics address") - cmdServe.Flags().BoolVarP(&serveCompress, "compress", "x", false, "Compress") -} - -type storageInfo struct { - // tracker storage.TrackingStorageProvider - tracker *dirtytracker.Remote - lockable storage.LockableProvider - orderer *blocks.PriorityBlockOrder - numBlocks int - blockSize int - name string - schema string } func runServe(_ *cobra.Command, _ []string) { @@ -119,46 +82,30 @@ func runServe(_ *cobra.Command, _ []string) { go http.ListenAndServe(serveMetrics, nil) } - if serveProgress { - serveProgressBar = mpb.New( - mpb.WithOutput(color.Output), - mpb.WithAutoRefresh(), - ) - serveBars = make([]*mpb.Bar, 0) + fmt.Printf("Starting silo serve %s\n", serveAddr) + + siloConf, err := config.ReadSchema(serveConf) + if err != nil { + panic(err) } - srcExposed = make([]storage.ExposedStorage, 0) - srcStorage = make([]*storageInfo, 0) - fmt.Printf("Starting silo serve %s\n", serveAddr) + dg, err := devicegroup.New(siloConf.Device, log, siloMetrics) + if err != nil { + panic(err) + } c := make(chan os.Signal, 1) signal.Notify(c, os.Interrupt, syscall.SIGTERM) go func() { <-c - shutdownEverything(log) + dg.CloseAll() os.Exit(1) }() - siloConf, err := config.ReadSchema(serveConf) - if err != nil { - panic(err) - } - - for i, s := range siloConf.Device { - fmt.Printf("Setup storage %d [%s] size %s - %d\n", i, s.Name, s.Size, s.ByteSize()) - sinfo, err := setupStorageDevice(s, log, siloMetrics) - if err != nil { - panic(fmt.Sprintf("Could not setup storage. %v", err)) - } - - srcStorage = append(srcStorage, sinfo) - } - // Setup listener here. When client connects, migrate data to it. - l, err := net.Listen("tcp", serveAddr) if err != nil { - shutdownEverything(log) + dg.CloseAll() panic("Listener issue...") } @@ -179,27 +126,51 @@ func runServe(_ *cobra.Command, _ []string) { siloMetrics.AddProtocol("serve", pro) } - // Lets go through each of the things we want to migrate... ctime := time.Now() - var wg sync.WaitGroup + // Migrate everything to the destination... + err = dg.StartMigrationTo(pro) + if err != nil { + dg.CloseAll() + panic(err) + } - for i, s := range srcStorage { - wg.Add(1) - go func(index int, src *storageInfo) { - err := migrateDevice(log, siloMetrics, uint32(index), src.name, pro, src) - if err != nil { - fmt.Printf("There was an issue migrating the storage %d %v\n", index, err) - } - wg.Done() - }(i, s) + err = dg.MigrateAll(1000, func(index int, p *migrator.MigrationProgress) { + fmt.Printf("[%d] Progress Moved: %d/%d %.2f%% Clean: %d/%d %.2f%% InProgress: %d\n", + index, p.MigratedBlocks, p.TotalBlocks, p.MigratedBlocksPerc, + p.ReadyBlocks, p.TotalBlocks, p.ReadyBlocksPerc, + p.ActiveBlocks) + }) + if err != nil { + dg.CloseAll() + panic(err) } - wg.Wait() - if serveProgressBar != nil { - serveProgressBar.Wait() + fmt.Printf("All devices migrated in %dms.\n", time.Since(ctime).Milliseconds()) + + // Now do a dirty block phase... + hooks := &devicegroup.MigrateDirtyHooks{ + PreGetDirty: func(index int, to *protocol.ToProtocol, dirtyHistory []int) { + fmt.Printf("# [%d]PreGetDirty %v\n", index, dirtyHistory) + }, + PostGetDirty: func(index int, to *protocol.ToProtocol, dirtyHistory []int, blocks []uint) { + fmt.Printf("# [%d]PostGetDirty %v\n", index, dirtyHistory) + }, + PostMigrateDirty: func(index int, to *protocol.ToProtocol, dirtyHistory []int) bool { + fmt.Printf("# [%d]PostMigrateDirty %v\n", index, dirtyHistory) + return false + }, + Completed: func(index int, to *protocol.ToProtocol) { + fmt.Printf("# [%d]Completed\n", index) + }, + } + err = dg.MigrateDirty(hooks) + if err != nil { + dg.CloseAll() + panic(err) } - fmt.Printf("\n\nMigration completed in %dms\n", time.Since(ctime).Milliseconds()) + + fmt.Printf("All devices migrated(including dirty) in %dms.\n", time.Since(ctime).Milliseconds()) if log != nil { metrics := pro.GetMetrics() @@ -213,319 +184,5 @@ func runServe(_ *cobra.Command, _ []string) { con.Close() } - shutdownEverything(log) -} - -func shutdownEverything(log types.Logger) { - // first unlock everything - fmt.Printf("Unlocking devices...\n") - for _, i := range srcStorage { - i.lockable.Unlock() - i.tracker.Close() - } - - fmt.Printf("Shutting down devices cleanly...\n") - for _, p := range srcExposed { - device := p.Device() - - fmt.Printf("Shutdown nbd device %s\n", device) - _ = p.Shutdown() - - // Show some metrics... - if log != nil { - nbdDevice, ok := p.(*expose.ExposedStorageNBDNL) - if ok { - m := nbdDevice.GetMetrics() - log.Debug(). - Uint64("PacketsIn", m.PacketsIn). - Uint64("PacketsOut", m.PacketsOut). - Uint64("ReadAt", m.ReadAt). - Uint64("ReadAtBytes", m.ReadAtBytes). - Uint64("ReadAtTimeMS", uint64(m.ReadAtTime.Milliseconds())). - Uint64("WriteAt", m.WriteAt). - Uint64("WriteAtBytes", m.WriteAtBytes). - Uint64("WriteAtTimeMS", uint64(m.WriteAtTime.Milliseconds())). - Str("device", p.Device()). - Msg("NBD metrics") - } - } - } -} - -func setupStorageDevice(conf *config.DeviceSchema, log types.Logger, met metrics.SiloMetrics) (*storageInfo, error) { - source, ex, err := device.NewDeviceWithLoggingMetrics(conf, log, met) - if err != nil { - return nil, err - } - if ex != nil { - fmt.Printf("Device %s exposed as %s\n", conf.Name, ex.Device()) - srcExposed = append(srcExposed, ex) - } - - blockSize := 1024 * 128 - - if conf.BlockSize != "" { - blockSize = int(conf.ByteBlockSize()) - } - - numBlocks := (int(conf.ByteSize()) + blockSize - 1) / blockSize - - sourceMetrics := modules.NewMetrics(source) - sourceDirtyLocal, sourceDirtyRemote := dirtytracker.NewDirtyTracker(sourceMetrics, blockSize) - sourceMonitor := volatilitymonitor.NewVolatilityMonitor(sourceDirtyLocal, blockSize, 10*time.Second) - sourceStorage := modules.NewLockable(sourceMonitor) - - if met != nil { - met.AddDirtyTracker(conf.Name, sourceDirtyRemote) - met.AddVolatilityMonitor(conf.Name, sourceMonitor) - met.AddMetrics(conf.Name, sourceMetrics) - } - - if ex != nil { - ex.SetProvider(sourceStorage) - } - - // Start monitoring blocks. - - var primaryOrderer storage.BlockOrder - primaryOrderer = sourceMonitor - - if serveAnyOrder { - primaryOrderer = blocks.NewAnyBlockOrder(numBlocks, nil) - } - orderer := blocks.NewPriorityBlockOrder(numBlocks, primaryOrderer) - orderer.AddAll() - - schema := string(conf.Encode()) - - sinfo := &storageInfo{ - tracker: sourceDirtyRemote, - lockable: sourceStorage, - orderer: orderer, - blockSize: blockSize, - numBlocks: numBlocks, - name: conf.Name, - schema: schema, - } - - return sinfo, nil -} - -// Migrate a device -func migrateDevice(log types.Logger, met metrics.SiloMetrics, devID uint32, name string, - pro protocol.Protocol, - sinfo *storageInfo) error { - size := sinfo.lockable.Size() - dest := protocol.NewToProtocol(size, devID, pro) - - // Maybe compress writes - dest.SetCompression(serveCompress) - - err := dest.SendDevInfo(name, uint32(sinfo.blockSize), sinfo.schema) - if err != nil { - return err - } - - statusString := " " - - statusFn := func(_ decor.Statistics) string { - return statusString - } - - var bar *mpb.Bar - if serveProgress { - bar = serveProgressBar.AddBar(int64(size), - mpb.PrependDecorators( - decor.Name(name, decor.WCSyncSpaceR), - decor.CountersKiloByte("%d/%d", decor.WCSyncWidth), - ), - mpb.AppendDecorators( - decor.EwmaETA(decor.ET_STYLE_GO, 30), - decor.Name(" "), - decor.EwmaSpeed(decor.SizeB1024(0), "% .2f", 60, decor.WCSyncWidth), - decor.OnComplete(decor.Percentage(decor.WC{W: 5}), "done"), - decor.Name(" "), - decor.Any(statusFn, decor.WC{W: 2}), - ), - ) - - serveBars = append(serveBars, bar) - } - - go func() { - _ = dest.HandleNeedAt(func(offset int64, length int32) { - // Prioritize blocks... - end := uint64(offset + int64(length)) - if end > size { - end = size - } - - bStart := int(offset / int64(sinfo.blockSize)) - bEnd := int((end-1)/uint64(sinfo.blockSize)) + 1 - for b := bStart; b < bEnd; b++ { - // Ask the orderer to prioritize these blocks... - sinfo.orderer.PrioritiseBlock(b) - } - }) - }() - - go func() { - _ = dest.HandleDontNeedAt(func(offset int64, length int32) { - end := uint64(offset + int64(length)) - if end > size { - end = size - } - - bStart := int(offset / int64(sinfo.blockSize)) - bEnd := int((end-1)/uint64(sinfo.blockSize)) + 1 - for b := bStart; b < bEnd; b++ { - sinfo.orderer.Remove(b) - } - }) - }() - - conf := migrator.NewConfig().WithBlockSize(sinfo.blockSize) - conf.Logger = log - conf.LockerHandler = func() { - _ = dest.SendEvent(&packets.Event{Type: packets.EventPreLock}) - sinfo.lockable.Lock() - _ = dest.SendEvent(&packets.Event{Type: packets.EventPostLock}) - } - conf.UnlockerHandler = func() { - _ = dest.SendEvent(&packets.Event{Type: packets.EventPreUnlock}) - sinfo.lockable.Unlock() - _ = dest.SendEvent(&packets.Event{Type: packets.EventPostUnlock}) - } - conf.Concurrency = map[int]int{ - storage.BlockTypeAny: 1000, - } - conf.ErrorHandler = func(_ *storage.BlockInfo, err error) { - // For now... - panic(err) - } - conf.Integrity = true - - lastValue := uint64(0) - lastTime := time.Now() - - if serveProgress { - - conf.ProgressHandler = func(p *migrator.MigrationProgress) { - v := uint64(p.ReadyBlocks) * uint64(sinfo.blockSize) - if v > size { - v = size - } - bar.SetCurrent(int64(v)) - bar.EwmaIncrInt64(int64(v-lastValue), time.Since(lastTime)) - lastTime = time.Now() - lastValue = v - } - } else { - conf.ProgressHandler = func(p *migrator.MigrationProgress) { - fmt.Printf("[%s] Progress Moved: %d/%d %.2f%% Clean: %d/%d %.2f%% InProgress: %d\n", - name, p.MigratedBlocks, p.TotalBlocks, p.MigratedBlocksPerc, - p.ReadyBlocks, p.TotalBlocks, p.ReadyBlocksPerc, - p.ActiveBlocks) - } - conf.ErrorHandler = func(b *storage.BlockInfo, err error) { - fmt.Printf("[%s] Error for block %d error %v\n", name, b.Block, err) - } - } - - mig, err := migrator.NewMigrator(sinfo.tracker, dest, sinfo.orderer, conf) - - if err != nil { - return err - } - - if met != nil { - met.AddToProtocol(name, dest) - met.AddMigrator(name, mig) - } - - migrateBlocks := sinfo.numBlocks - - // Now do the migration... - err = mig.Migrate(migrateBlocks) - if err != nil { - return err - } - - // Wait for completion. - err = mig.WaitForCompletion() - if err != nil { - return err - } - - hashes := mig.GetHashes() // Get the initial hashes and send them over for verification... - err = dest.SendHashes(hashes) - if err != nil { - return err - } - - // Optional: Enter a loop looking for more dirty blocks to migrate... - for { - blocks := mig.GetLatestDirty() // - if !serveContinuous && blocks == nil { - break - } - - if blocks != nil { - // Optional: Send the list of dirty blocks over... - err := dest.DirtyList(conf.BlockSize, blocks) - if err != nil { - return err - } - - // fmt.Printf("[%s] Migrating dirty blocks %d\n", name, len(blocks)) - err = mig.MigrateDirty(blocks) - if err != nil { - return err - } - } else { - mig.Unlock() - } - time.Sleep(100 * time.Millisecond) - } - - err = mig.WaitForCompletion() - if err != nil { - return err - } - - // fmt.Printf("[%s] Migration completed\n", name) - - err = dest.SendEvent(&packets.Event{Type: packets.EventCompleted}) - if err != nil { - return err - } - /* - // Completed. - if serve_progress { - // bar.SetCurrent(int64(size)) - // bar.EwmaIncrInt64(int64(size-last_value), time.Since(last_time)) - } - */ - - if log != nil { - toMetrics := dest.GetMetrics() - log.Debug(). - Str("name", name). - Uint64("SentEvents", toMetrics.SentEvents). - Uint64("SentHashes", toMetrics.SentHashes). - Uint64("SentDevInfo", toMetrics.SentDevInfo). - Uint64("SentRemoveDev", toMetrics.SentRemoveDev). - Uint64("SentDirtyList", toMetrics.SentDirtyList). - Uint64("SentReadAt", toMetrics.SentReadAt). - Uint64("SentWriteAtHash", toMetrics.SentWriteAtHash). - Uint64("SentWriteAtComp", toMetrics.SentWriteAtComp). - Uint64("SentWriteAt", toMetrics.SentWriteAt). - Uint64("SentWriteAtWithMap", toMetrics.SentWriteAtWithMap). - Uint64("SentRemoveFromMap", toMetrics.SentRemoveFromMap). - Uint64("SentNeedAt", toMetrics.RecvNeedAt). - Uint64("SentDontNeedAt", toMetrics.RecvDontNeedAt). - Msg("ToProtocol metrics") - } - - return nil + dg.CloseAll() } diff --git a/pkg/storage/devicegroup/device_group.go b/pkg/storage/devicegroup/device_group.go index 8277233..9646d94 100644 --- a/pkg/storage/devicegroup/device_group.go +++ b/pkg/storage/devicegroup/device_group.go @@ -21,6 +21,8 @@ import ( ) const volatilityExpiry = 30 * time.Minute +const defaultBlockSize = 1024 * 1024 +const maxDirtyHistory = 32 var errNotSetup = errors.New("toProtocol not setup") @@ -67,6 +69,9 @@ func New(ds []*config.DeviceSchema, log types.Logger, met metrics.SiloMetrics) ( } blockSize := int(s.ByteBlockSize()) + if blockSize == 0 { + blockSize = defaultBlockSize + } local := modules.NewLockable(prov) mlocal := modules.NewMetrics(local) @@ -77,12 +82,16 @@ func New(ds []*config.DeviceSchema, log types.Logger, met metrics.SiloMetrics) ( orderer := blocks.NewPriorityBlockOrder(totalBlocks, vmonitor) orderer.AddAll() - exp.SetProvider(vmonitor) + if exp != nil { + exp.SetProvider(vmonitor) + } // Add to metrics if given. if met != nil { met.AddMetrics(s.Name, mlocal) - met.AddNBD(s.Name, exp.(*expose.ExposedStorageNBDNL)) + if exp != nil { + met.AddNBD(s.Name, exp.(*expose.ExposedStorageNBDNL)) + } met.AddDirtyTracker(s.Name, dirtyRemote) met.AddVolatilityMonitor(s.Name, vmonitor) } @@ -124,7 +133,8 @@ func (dg *DeviceGroup) StartMigrationTo(pro protocol.Protocol) error { } schema := string(d.schema.Encode()) - err := d.to.SendDevInfo(d.schema.Name, uint32(d.schema.ByteBlockSize()), schema) + + err := d.to.SendDevInfo(d.schema.Name, uint32(d.blockSize), schema) if err != nil { if dg.log != nil { dg.log.Error().Str("schema", schema).Msg("could not send DevInfo") @@ -270,24 +280,27 @@ func (dg *DeviceGroup) MigrateAll(maxConcurrency int, progressHandler func(i int // Now start them all migrating, and collect err for _, d := range dg.devices { go func() { - errs <- d.migrator.Migrate(d.numBlocks) + err := d.migrator.Migrate(d.numBlocks) + errs <- err }() } // Check for error from Migrate, and then Wait for completion of all devices... - for _, d := range dg.devices { + for index := range dg.devices { migErr := <-errs if migErr != nil { if dg.log != nil { - dg.log.Error().Err(migErr).Msg("error migrating device group") + dg.log.Error().Err(migErr).Int("index", index).Msg("error migrating device group") } return migErr } + } + for index, d := range dg.devices { err := d.migrator.WaitForCompletion() if err != nil { if dg.log != nil { - dg.log.Error().Err(err).Msg("error migrating device group waiting for completion") + dg.log.Error().Err(err).Int("index", index).Msg("error migrating device group waiting for completion") } return err } @@ -296,7 +309,7 @@ func (dg *DeviceGroup) MigrateAll(maxConcurrency int, progressHandler func(i int select { case err := <-d.migrationError: if dg.log != nil { - dg.log.Error().Err(err).Msg("error migrating device group from goroutines") + dg.log.Error().Err(err).Int("index", index).Msg("error migrating device group from goroutines") } return err default: @@ -317,8 +330,6 @@ type MigrateDirtyHooks struct { Completed func(index int, to *protocol.ToProtocol) } -const maxDirtyHistory = 32 - func (dg *DeviceGroup) MigrateDirty(hooks *MigrateDirtyHooks) error { errs := make(chan error, len(dg.devices)) @@ -402,7 +413,8 @@ func (dg *DeviceGroup) MigrateDirty(hooks *MigrateDirtyHooks) error { // Wait for all dirty migrations to complete // Check for any error and return it - for err := range errs { + for range dg.devices { + err := <-errs if err != nil { return err } @@ -418,6 +430,10 @@ func (dg *DeviceGroup) CloseAll() error { var e error for _, d := range dg.devices { + // Unlock the storage so nothing blocks here... + // If we don't unlock there may be pending nbd writes that can't be completed. + d.storage.Unlock() + err := d.prov.Close() if err != nil { if dg.log != nil { From cb4f0fed8cb1131194daa7fec57cf66c5530a233 Mon Sep 17 00:00:00 2001 From: Jimmy Moore Date: Wed, 11 Dec 2024 12:16:58 +0000 Subject: [PATCH 15/37] Added new packet for deviceGroup Signed-off-by: Jimmy Moore --- .../protocol/packets/device_group_info.go | 53 +++++++++++++++++++ pkg/storage/protocol/packets/packet_test.go | 26 +++++++++ 2 files changed, 79 insertions(+) create mode 100644 pkg/storage/protocol/packets/device_group_info.go diff --git a/pkg/storage/protocol/packets/device_group_info.go b/pkg/storage/protocol/packets/device_group_info.go new file mode 100644 index 0000000..4a7ebd7 --- /dev/null +++ b/pkg/storage/protocol/packets/device_group_info.go @@ -0,0 +1,53 @@ +package packets + +import ( + "bytes" + "encoding/binary" +) + +type DeviceGroupInfo struct { + Devices map[int]*DevInfo +} + +func EncodeDeviceGroupInfo(dgi *DeviceGroupInfo) []byte { + var buffer bytes.Buffer + diHeader := make([]byte, 8) + + for index, di := range dgi.Devices { + diBytes := EncodeDevInfo(di) + binary.LittleEndian.PutUint32(diHeader, uint32(index)) + binary.LittleEndian.PutUint32(diHeader[4:], uint32(len(diBytes))) + buffer.Write(diHeader) + buffer.Write(diBytes) + } + return buffer.Bytes() +} + +func DecodeDeviceGroupInfo(buff []byte) (*DeviceGroupInfo, error) { + dgi := &DeviceGroupInfo{ + Devices: make(map[int]*DevInfo), + } + + ptr := 0 + for { + if ptr == len(buff) { + break + } + if len(buff)-ptr < 8 { + return nil, ErrInvalidPacket + } + index := binary.LittleEndian.Uint32(buff[ptr:]) + length := binary.LittleEndian.Uint32(buff[ptr+4:]) + ptr += 8 + if len(buff)-ptr < int(length) { + return nil, ErrInvalidPacket + } + di, err := DecodeDevInfo(buff[ptr : ptr+int(length)]) + if err != nil { + return nil, err + } + dgi.Devices[int(index)] = di + ptr += int(length) + } + return dgi, nil +} diff --git a/pkg/storage/protocol/packets/packet_test.go b/pkg/storage/protocol/packets/packet_test.go index f4332c8..9c420fb 100644 --- a/pkg/storage/protocol/packets/packet_test.go +++ b/pkg/storage/protocol/packets/packet_test.go @@ -371,3 +371,29 @@ func TestAlternateSources(t *testing.T) { assert.Equal(t, sources[0].Location, sources2[0].Location) } + +func TestDeviceGroupInfo(t *testing.T) { + dgi := &DeviceGroupInfo{ + Devices: map[int]*DevInfo{ + 0: {Size: 100, BlockSize: 1, Name: "a-hello", Schema: "a-1234"}, + 1: {Size: 200, BlockSize: 2, Name: "b-hello", Schema: "b-1234"}, + 3: {Size: 300, BlockSize: 3, Name: "c-hello", Schema: "c-1234"}, + }, + } + b := EncodeDeviceGroupInfo(dgi) + + dgi2, err := DecodeDeviceGroupInfo(b) + assert.NoError(t, err) + + // Check that dgi and dgi2 are the same... + assert.Equal(t, len(dgi.Devices), len(dgi2.Devices)) + + for index, di := range dgi.Devices { + di2, ok := dgi2.Devices[index] + assert.True(t, ok) + assert.Equal(t, di.Size, di2.Size) + assert.Equal(t, di.BlockSize, di2.BlockSize) + assert.Equal(t, di.Name, di2.Name) + assert.Equal(t, di.Schema, di2.Schema) + } +} From 33cff26063103709be6744d281e769c3cea584bf Mon Sep 17 00:00:00 2001 From: Jimmy Moore Date: Wed, 11 Dec 2024 13:24:49 +0000 Subject: [PATCH 16/37] devicegroup devInfo change passes test, but /cmd/connect will be broken atm Signed-off-by: Jimmy Moore --- pkg/storage/devicegroup/device_group.go | 39 ++++++---- pkg/storage/devicegroup/device_group_test.go | 76 +++++++++++-------- pkg/storage/protocol/from_protocol.go | 6 +- .../protocol/packets/device_group_info.go | 7 +- pkg/storage/protocol/packets/packet.go | 1 + pkg/storage/protocol/protocol_rw.go | 10 ++- 6 files changed, 91 insertions(+), 48 deletions(-) diff --git a/pkg/storage/devicegroup/device_group.go b/pkg/storage/devicegroup/device_group.go index 9646d94..3ef113a 100644 --- a/pkg/storage/devicegroup/device_group.go +++ b/pkg/storage/devicegroup/device_group.go @@ -27,9 +27,10 @@ const maxDirtyHistory = 32 var errNotSetup = errors.New("toProtocol not setup") type DeviceGroup struct { - log types.Logger - met metrics.SiloMetrics - devices []*DeviceInformation + log types.Logger + met metrics.SiloMetrics + devices []*DeviceInformation + controlProtocol protocol.Protocol } type DeviceInformation struct { @@ -122,27 +123,39 @@ func (dg *DeviceGroup) GetProvider(index int) storage.Provider { } func (dg *DeviceGroup) StartMigrationTo(pro protocol.Protocol) error { - var e error + // We will use dev 0 to communicate + dg.controlProtocol = pro + // First lets setup the ToProtocol for index, d := range dg.devices { - d.to = protocol.NewToProtocol(d.prov.Size(), uint32(index), pro) + d.to = protocol.NewToProtocol(d.prov.Size(), uint32(index+1), pro) d.to.SetCompression(true) if dg.met != nil { dg.met.AddToProtocol(d.schema.Name, d.to) } + } - schema := string(d.schema.Encode()) + // Now package devices up into a single DeviceGroupInfo + dgi := &packets.DeviceGroupInfo{ + Devices: make(map[int]*packets.DevInfo), + } - err := d.to.SendDevInfo(d.schema.Name, uint32(d.blockSize), schema) - if err != nil { - if dg.log != nil { - dg.log.Error().Str("schema", schema).Msg("could not send DevInfo") - } - e = errors.Join(e, err) + for index, d := range dg.devices { + di := &packets.DevInfo{ + Size: d.prov.Size(), + BlockSize: uint32(d.blockSize), + Name: d.schema.Name, + Schema: string(d.schema.Encode()), } + dgi.Devices[index+1] = di } - return e + + // Send the single DeviceGroupInfo packet down our control channel 0 + dgiData := packets.EncodeDeviceGroupInfo(dgi) + _, err := dg.controlProtocol.SendPacket(0, protocol.IDPickAny, dgiData, protocol.UrgencyUrgent) + + return err } // This will Migrate all devices to the 'to' setup in SendDevInfo stage. diff --git a/pkg/storage/devicegroup/device_group_test.go b/pkg/storage/devicegroup/device_group_test.go index ecd6e9a..e0eeff9 100644 --- a/pkg/storage/devicegroup/device_group_test.go +++ b/pkg/storage/devicegroup/device_group_test.go @@ -83,12 +83,14 @@ func TestDeviceGroupSendDevInfo(t *testing.T) { assert.NoError(t, err) // Make sure they all got sent correctly... - for index, r := range testDeviceSchema { - _, data, err := pro.WaitForCommand(uint32(index), packets.CommandDevInfo) - assert.NoError(t, err) + _, data, err := pro.WaitForCommand(0, packets.CommandDeviceGroupInfo) + assert.NoError(t, err) - di, err := packets.DecodeDevInfo(data) - assert.NoError(t, err) + dgi, err := packets.DecodeDeviceGroupInfo(data) + assert.NoError(t, err) + + for index, r := range testDeviceSchema { + di := dgi.Devices[index+1] assert.Equal(t, r.Name, di.Name) assert.Equal(t, uint64(r.ByteSize()), di.Size) @@ -116,37 +118,47 @@ func TestDeviceGroupMigrate(t *testing.T) { incomingProviders := make(map[uint32]storage.Provider) initDev := func(ctx context.Context, p protocol.Protocol, dev uint32) { - destStorageFactory := func(di *packets.DevInfo) storage.Provider { - store := sources.NewMemoryStorage(int(di.Size)) - incomingLock.Lock() - incomingProviders[dev] = store - incomingLock.Unlock() - return store - } - - from := protocol.NewFromProtocol(ctx, dev, destStorageFactory, p) - go func() { - err := from.HandleReadAt() - assert.ErrorIs(t, err, context.Canceled) - }() - go func() { - err := from.HandleWriteAt() - assert.ErrorIs(t, err, context.Canceled) - }() - go func() { - err := from.HandleDevInfo() - assert.NoError(t, err) - }() - go func() { - err := from.HandleDirtyList(func(_ []uint) { - }) - assert.ErrorIs(t, err, context.Canceled) - }() } prSource := protocol.NewRW(ctx, []io.Reader{r1}, []io.Writer{w2}, nil) prDest := protocol.NewRW(ctx, []io.Reader{r2}, []io.Writer{w1}, initDev) + go func() { + // This is our control channel, and we're expecting a DeviceGroupInfo + _, dgData, err := prDest.WaitForCommand(0, packets.CommandDeviceGroupInfo) + assert.NoError(t, err) + dgi, err := packets.DecodeDeviceGroupInfo(dgData) + assert.NoError(t, err) + fmt.Printf("Device Group %v\n", dgi) + + for index, di := range dgi.Devices { + destStorageFactory := func(di *packets.DevInfo) storage.Provider { + store := sources.NewMemoryStorage(int(di.Size)) + incomingLock.Lock() + incomingProviders[uint32(index)] = store + incomingLock.Unlock() + return store + } + + from := protocol.NewFromProtocol(ctx, uint32(index), destStorageFactory, prDest) + err = from.SetDevInfo(di) + assert.NoError(t, err) + go func() { + err := from.HandleReadAt() + assert.ErrorIs(t, err, context.Canceled) + }() + go func() { + err := from.HandleWriteAt() + assert.ErrorIs(t, err, context.Canceled) + }() + go func() { + err := from.HandleDirtyList(func(_ []uint) { + }) + assert.ErrorIs(t, err, context.Canceled) + }() + } + }() + go func() { _ = prSource.Handle() }() @@ -176,7 +188,7 @@ func TestDeviceGroupMigrate(t *testing.T) { // Check the data all got migrated correctly for i := range testDeviceSchema { prov := dg.GetProvider(i) - destProvider := incomingProviders[uint32(i)] + destProvider := incomingProviders[uint32(i+1)] assert.NotNil(t, destProvider) eq, err := storage.Equals(prov, destProvider, 1024*1024) assert.NoError(t, err) diff --git a/pkg/storage/protocol/from_protocol.go b/pkg/storage/protocol/from_protocol.go index ee9988a..0d1c2c2 100644 --- a/pkg/storage/protocol/from_protocol.go +++ b/pkg/storage/protocol/from_protocol.go @@ -251,9 +251,13 @@ func (fp *FromProtocol) HandleDevInfo() error { if err != nil { return err } - atomic.AddUint64(&fp.metricRecvDevInfo, 1) + return fp.SetDevInfo(di) +} + +// Alternatively, you can call SetDevInfo to setup the DevInfo. +func (fp *FromProtocol) SetDevInfo(di *packets.DevInfo) error { // Create storage, and setup a writeCombinator with two inputs fp.initLock.Lock() fp.prov = fp.providerFactory(di) diff --git a/pkg/storage/protocol/packets/device_group_info.go b/pkg/storage/protocol/packets/device_group_info.go index 4a7ebd7..3d43aa8 100644 --- a/pkg/storage/protocol/packets/device_group_info.go +++ b/pkg/storage/protocol/packets/device_group_info.go @@ -11,6 +11,7 @@ type DeviceGroupInfo struct { func EncodeDeviceGroupInfo(dgi *DeviceGroupInfo) []byte { var buffer bytes.Buffer + buffer.WriteByte(CommandDeviceGroupInfo) diHeader := make([]byte, 8) for index, di := range dgi.Devices { @@ -28,7 +29,11 @@ func DecodeDeviceGroupInfo(buff []byte) (*DeviceGroupInfo, error) { Devices: make(map[int]*DevInfo), } - ptr := 0 + if len(buff) < 1 || buff[0] != CommandDeviceGroupInfo { + return nil, ErrInvalidPacket + } + + ptr := 1 for { if ptr == len(buff) { break diff --git a/pkg/storage/protocol/packets/packet.go b/pkg/storage/protocol/packets/packet.go index 4d40fa1..0b61fa6 100644 --- a/pkg/storage/protocol/packets/packet.go +++ b/pkg/storage/protocol/packets/packet.go @@ -22,6 +22,7 @@ const ( CommandRemoveDev = CommandRequest | byte(10) CommandRemoveFromMap = CommandRequest | byte(11) CommandAlternateSources = CommandRequest | byte(12) + CommandDeviceGroupInfo = CommandRequest | byte(13) ) const ( diff --git a/pkg/storage/protocol/protocol_rw.go b/pkg/storage/protocol/protocol_rw.go index 65ac40e..68f280b 100644 --- a/pkg/storage/protocol/protocol_rw.go +++ b/pkg/storage/protocol/protocol_rw.go @@ -324,7 +324,15 @@ func (p *RW) WaitForPacket(dev uint32, id uint32) ([]byte, error) { func (p *RW) WaitForCommand(dev uint32, cmd byte) (uint32, []byte, error) { p.waitersLock.Lock() - w := p.waiters[dev] + w, ok := p.waiters[dev] + if !ok { + p.activeDevs[dev] = true + w = Waiters{ + byCmd: make(map[byte]chan packetinfo), + byID: make(map[uint32]chan packetinfo), + } + p.waiters[dev] = w + } wq, okk := w.byCmd[cmd] if !okk { wq = make(chan packetinfo, packetBufferSize) From ec9aaf4e897528fc1b3c0309ed6d5fc9e5e6aae9 Mon Sep 17 00:00:00 2001 From: Jimmy Moore Date: Wed, 11 Dec 2024 13:40:53 +0000 Subject: [PATCH 17/37] Started on dg.NewFromProtocol (migrate_from) Signed-off-by: Jimmy Moore --- cmd/serve.go | 2 +- pkg/storage/devicegroup/device_group.go | 387 ------------------ pkg/storage/devicegroup/device_group_from.go | 53 +++ pkg/storage/devicegroup/device_group_test.go | 62 ++- pkg/storage/devicegroup/device_group_to.go | 402 +++++++++++++++++++ 5 files changed, 515 insertions(+), 391 deletions(-) create mode 100644 pkg/storage/devicegroup/device_group_from.go create mode 100644 pkg/storage/devicegroup/device_group_to.go diff --git a/cmd/serve.go b/cmd/serve.go index 37afe4e..e498e19 100644 --- a/cmd/serve.go +++ b/cmd/serve.go @@ -89,7 +89,7 @@ func runServe(_ *cobra.Command, _ []string) { panic(err) } - dg, err := devicegroup.New(siloConf.Device, log, siloMetrics) + dg, err := devicegroup.NewFromSchema(siloConf.Device, log, siloMetrics) if err != nil { panic(err) } diff --git a/pkg/storage/devicegroup/device_group.go b/pkg/storage/devicegroup/device_group.go index 3ef113a..7682858 100644 --- a/pkg/storage/devicegroup/device_group.go +++ b/pkg/storage/devicegroup/device_group.go @@ -1,7 +1,6 @@ package devicegroup import ( - "context" "errors" "time" @@ -9,14 +8,10 @@ import ( "github.com/loopholelabs/silo/pkg/storage" "github.com/loopholelabs/silo/pkg/storage/blocks" "github.com/loopholelabs/silo/pkg/storage/config" - "github.com/loopholelabs/silo/pkg/storage/device" "github.com/loopholelabs/silo/pkg/storage/dirtytracker" - "github.com/loopholelabs/silo/pkg/storage/expose" "github.com/loopholelabs/silo/pkg/storage/metrics" "github.com/loopholelabs/silo/pkg/storage/migrator" - "github.com/loopholelabs/silo/pkg/storage/modules" "github.com/loopholelabs/silo/pkg/storage/protocol" - "github.com/loopholelabs/silo/pkg/storage/protocol/packets" "github.com/loopholelabs/silo/pkg/storage/volatilitymonitor" ) @@ -50,392 +45,10 @@ type DeviceInformation struct { migrationError chan error } -func New(ds []*config.DeviceSchema, log types.Logger, met metrics.SiloMetrics) (*DeviceGroup, error) { - dg := &DeviceGroup{ - log: log, - met: met, - devices: make([]*DeviceInformation, 0), - } - - for _, s := range ds { - prov, exp, err := device.NewDeviceWithLoggingMetrics(s, log, met) - if err != nil { - if log != nil { - log.Error().Err(err).Str("schema", string(s.Encode())).Msg("could not create device") - } - // We should try to close / shutdown any successful devices we created here... - // But it's likely to be critical. - dg.CloseAll() - return nil, err - } - - blockSize := int(s.ByteBlockSize()) - if blockSize == 0 { - blockSize = defaultBlockSize - } - - local := modules.NewLockable(prov) - mlocal := modules.NewMetrics(local) - dirtyLocal, dirtyRemote := dirtytracker.NewDirtyTracker(mlocal, blockSize) - vmonitor := volatilitymonitor.NewVolatilityMonitor(dirtyLocal, blockSize, volatilityExpiry) - - totalBlocks := (int(local.Size()) + blockSize - 1) / blockSize - orderer := blocks.NewPriorityBlockOrder(totalBlocks, vmonitor) - orderer.AddAll() - - if exp != nil { - exp.SetProvider(vmonitor) - } - - // Add to metrics if given. - if met != nil { - met.AddMetrics(s.Name, mlocal) - if exp != nil { - met.AddNBD(s.Name, exp.(*expose.ExposedStorageNBDNL)) - } - met.AddDirtyTracker(s.Name, dirtyRemote) - met.AddVolatilityMonitor(s.Name, vmonitor) - } - - dg.devices = append(dg.devices, &DeviceInformation{ - size: local.Size(), - blockSize: uint64(blockSize), - numBlocks: totalBlocks, - schema: s, - prov: prov, - storage: local, - exp: exp, - volatility: vmonitor, - dirtyLocal: dirtyLocal, - dirtyRemote: dirtyRemote, - orderer: orderer, - }) - } - - if log != nil { - log.Debug().Int("devices", len(dg.devices)).Msg("created device group") - } - return dg, nil -} - func (dg *DeviceGroup) GetProvider(index int) storage.Provider { return dg.devices[index].storage } -func (dg *DeviceGroup) StartMigrationTo(pro protocol.Protocol) error { - // We will use dev 0 to communicate - dg.controlProtocol = pro - - // First lets setup the ToProtocol - for index, d := range dg.devices { - d.to = protocol.NewToProtocol(d.prov.Size(), uint32(index+1), pro) - d.to.SetCompression(true) - - if dg.met != nil { - dg.met.AddToProtocol(d.schema.Name, d.to) - } - } - - // Now package devices up into a single DeviceGroupInfo - dgi := &packets.DeviceGroupInfo{ - Devices: make(map[int]*packets.DevInfo), - } - - for index, d := range dg.devices { - di := &packets.DevInfo{ - Size: d.prov.Size(), - BlockSize: uint32(d.blockSize), - Name: d.schema.Name, - Schema: string(d.schema.Encode()), - } - dgi.Devices[index+1] = di - } - - // Send the single DeviceGroupInfo packet down our control channel 0 - dgiData := packets.EncodeDeviceGroupInfo(dgi) - _, err := dg.controlProtocol.SendPacket(0, protocol.IDPickAny, dgiData, protocol.UrgencyUrgent) - - return err -} - -// This will Migrate all devices to the 'to' setup in SendDevInfo stage. -func (dg *DeviceGroup) MigrateAll(maxConcurrency int, progressHandler func(i int, p *migrator.MigrationProgress)) error { - for _, d := range dg.devices { - if d.to == nil { - return errNotSetup - } - } - - ctime := time.Now() - - if dg.log != nil { - dg.log.Debug().Int("devices", len(dg.devices)).Msg("migrating device group") - } - - // Add up device sizes, so we can allocate the concurrency proportionally - totalSize := uint64(0) - for _, d := range dg.devices { - totalSize += d.size - } - - // We need at least this much... - if maxConcurrency < len(dg.devices) { - maxConcurrency = len(dg.devices) - } - // We will allocate each device at least ONE... - maxConcurrency -= len(dg.devices) - - for index, d := range dg.devices { - concurrency := 1 + (uint64(maxConcurrency) * d.size / totalSize) - d.migrationError = make(chan error, 1) // We will just hold onto the first error for now. - - setMigrationError := func(err error) { - if err != nil && err != context.Canceled { - select { - case d.migrationError <- err: - default: - } - } - } - - // Setup d.to - go func() { - err := d.to.HandleNeedAt(func(offset int64, length int32) { - if dg.log != nil { - dg.log.Debug(). - Int64("offset", offset). - Int32("length", length). - Int("dev", index). - Str("name", d.schema.Name). - Msg("NeedAt for device") - } - // Prioritize blocks - endOffset := uint64(offset + int64(length)) - if endOffset > d.size { - endOffset = d.size - } - - startBlock := int(offset / int64(d.blockSize)) - endBlock := int((endOffset-1)/d.blockSize) + 1 - for b := startBlock; b < endBlock; b++ { - d.orderer.PrioritiseBlock(b) - } - }) - setMigrationError(err) - }() - - go func() { - err := d.to.HandleDontNeedAt(func(offset int64, length int32) { - if dg.log != nil { - dg.log.Debug(). - Int64("offset", offset). - Int32("length", length). - Int("dev", index). - Str("name", d.schema.Name). - Msg("DontNeedAt for device") - } - // Deprioritize blocks - endOffset := uint64(offset + int64(length)) - if endOffset > d.size { - endOffset = d.size - } - - startBlock := int(offset / int64(d.blockSize)) - endBlock := int((endOffset-1)/d.blockSize) + 1 - for b := startBlock; b < endBlock; b++ { - d.orderer.Remove(b) - } - }) - setMigrationError(err) - }() - - cfg := migrator.NewConfig() - cfg.Logger = dg.log - cfg.BlockSize = int(d.blockSize) - cfg.Concurrency = map[int]int{ - storage.BlockTypeAny: int(concurrency), - } - cfg.LockerHandler = func() { - setMigrationError(d.to.SendEvent(&packets.Event{Type: packets.EventPreLock})) - d.storage.Lock() - setMigrationError(d.to.SendEvent(&packets.Event{Type: packets.EventPostLock})) - } - cfg.UnlockerHandler = func() { - setMigrationError(d.to.SendEvent(&packets.Event{Type: packets.EventPreUnlock})) - d.storage.Unlock() - setMigrationError(d.to.SendEvent(&packets.Event{Type: packets.EventPostUnlock})) - } - cfg.ErrorHandler = func(_ *storage.BlockInfo, err error) { - setMigrationError(err) - } - cfg.ProgressHandler = func(p *migrator.MigrationProgress) { - progressHandler(index, p) - } - mig, err := migrator.NewMigrator(d.dirtyRemote, d.to, d.orderer, cfg) - if err != nil { - return err - } - d.migrator = mig - if dg.met != nil { - dg.met.AddMigrator(d.schema.Name, mig) - } - if dg.log != nil { - dg.log.Debug(). - Uint64("concurrency", concurrency). - Int("index", index). - Str("name", d.schema.Name). - Msg("Setup migrator") - } - } - - errs := make(chan error, len(dg.devices)) - - // Now start them all migrating, and collect err - for _, d := range dg.devices { - go func() { - err := d.migrator.Migrate(d.numBlocks) - errs <- err - }() - } - - // Check for error from Migrate, and then Wait for completion of all devices... - for index := range dg.devices { - migErr := <-errs - if migErr != nil { - if dg.log != nil { - dg.log.Error().Err(migErr).Int("index", index).Msg("error migrating device group") - } - return migErr - } - } - - for index, d := range dg.devices { - err := d.migrator.WaitForCompletion() - if err != nil { - if dg.log != nil { - dg.log.Error().Err(err).Int("index", index).Msg("error migrating device group waiting for completion") - } - return err - } - - // Check for any migration error - select { - case err := <-d.migrationError: - if dg.log != nil { - dg.log.Error().Err(err).Int("index", index).Msg("error migrating device group from goroutines") - } - return err - default: - } - } - - if dg.log != nil { - dg.log.Debug().Int64("duration", time.Since(ctime).Milliseconds()).Int("devices", len(dg.devices)).Msg("migration of device group completed") - } - - return nil -} - -type MigrateDirtyHooks struct { - PreGetDirty func(index int, to *protocol.ToProtocol, dirtyHistory []int) - PostGetDirty func(index int, to *protocol.ToProtocol, dirtyHistory []int, blocks []uint) - PostMigrateDirty func(index int, to *protocol.ToProtocol, dirtyHistory []int) bool - Completed func(index int, to *protocol.ToProtocol) -} - -func (dg *DeviceGroup) MigrateDirty(hooks *MigrateDirtyHooks) error { - errs := make(chan error, len(dg.devices)) - - for index, d := range dg.devices { - go func() { - dirtyHistory := make([]int, 0) - - for { - if hooks != nil && hooks.PreGetDirty != nil { - hooks.PreGetDirty(index, d.to, dirtyHistory) - } - - blocks := d.migrator.GetLatestDirty() - if dg.log != nil { - dg.log.Debug(). - Int("blocks", len(blocks)). - Int("index", index). - Str("name", d.schema.Name). - Msg("migrating dirty blocks") - } - - dirtyHistory = append(dirtyHistory, len(blocks)) - // Cap it at a certain MAX LENGTH - if len(dirtyHistory) > maxDirtyHistory { - dirtyHistory = dirtyHistory[1:] - } - - if hooks != nil && hooks.PostGetDirty != nil { - hooks.PostGetDirty(index, d.to, dirtyHistory, blocks) - } - - if len(blocks) == 0 { - break - } - - err := d.to.DirtyList(int(d.blockSize), blocks) - if err != nil { - errs <- err - return - } - - err = d.migrator.MigrateDirty(blocks) - if err != nil { - errs <- err - return - } - - if hooks != nil && hooks.PostMigrateDirty != nil { - if hooks.PostMigrateDirty(index, d.to, dirtyHistory) { - break // PostMigrateDirty returned true, which means stop doing any dirty loop business. - } - } - } - - err := d.migrator.WaitForCompletion() - if err != nil { - errs <- err - return - } - - err = d.to.SendEvent(&packets.Event{Type: packets.EventCompleted}) - if err != nil { - errs <- err - return - } - - if hooks != nil && hooks.Completed != nil { - hooks.Completed(index, d.to) - } - - if dg.log != nil { - dg.log.Debug(). - Int("index", index). - Str("name", d.schema.Name). - Msg("migrating dirty blocks completed") - } - - errs <- nil - }() - } - - // Wait for all dirty migrations to complete - // Check for any error and return it - for range dg.devices { - err := <-errs - if err != nil { - return err - } - } - - return nil -} - func (dg *DeviceGroup) CloseAll() error { if dg.log != nil { dg.log.Debug().Int("devices", len(dg.devices)).Msg("close device group") diff --git a/pkg/storage/devicegroup/device_group_from.go b/pkg/storage/devicegroup/device_group_from.go new file mode 100644 index 0000000..1d6b1ec --- /dev/null +++ b/pkg/storage/devicegroup/device_group_from.go @@ -0,0 +1,53 @@ +package devicegroup + +import ( + "fmt" + + "github.com/loopholelabs/logging/types" + "github.com/loopholelabs/silo/pkg/storage/metrics" + "github.com/loopholelabs/silo/pkg/storage/protocol" + "github.com/loopholelabs/silo/pkg/storage/protocol/packets" +) + +func NewFromProtocol(pro protocol.Protocol, log types.Logger, met metrics.SiloMetrics) (*DeviceGroup, error) { + // This is our control channel, and we're expecting a DeviceGroupInfo + _, dgData, err := pro.WaitForCommand(0, packets.CommandDeviceGroupInfo) + if err != nil { + return nil, err + } + dgi, err := packets.DecodeDeviceGroupInfo(dgData) + if err != nil { + return nil, err + } + + fmt.Printf("DeviceGroupInfo %v\n", dgi) + /* + for index, di := range dgi.Devices { + destStorageFactory := func(di *packets.DevInfo) storage.Provider { + store := sources.NewMemoryStorage(int(di.Size)) + incomingLock.Lock() + incomingProviders[uint32(index)] = store + incomingLock.Unlock() + return store + } + + from := protocol.NewFromProtocol(ctx, uint32(index), destStorageFactory, prDest) + err = from.SetDevInfo(di) + assert.NoError(t, err) + go func() { + err := from.HandleReadAt() + assert.ErrorIs(t, err, context.Canceled) + }() + go func() { + err := from.HandleWriteAt() + assert.ErrorIs(t, err, context.Canceled) + }() + go func() { + err := from.HandleDirtyList(func(_ []uint) { + }) + assert.ErrorIs(t, err, context.Canceled) + }() + } + */ + return nil, nil +} diff --git a/pkg/storage/devicegroup/device_group_test.go b/pkg/storage/devicegroup/device_group_test.go index e0eeff9..abdbddf 100644 --- a/pkg/storage/devicegroup/device_group_test.go +++ b/pkg/storage/devicegroup/device_group_test.go @@ -50,7 +50,7 @@ func setupDeviceGroup(t *testing.T) *DeviceGroup { return nil } - dg, err := New(testDeviceSchema, nil, nil) + dg, err := NewFromSchema(testDeviceSchema, nil, nil) assert.NoError(t, err) t.Cleanup(func() { @@ -99,7 +99,7 @@ func TestDeviceGroupSendDevInfo(t *testing.T) { } } -func TestDeviceGroupMigrate(t *testing.T) { +func TestDeviceGroupMigrateTo(t *testing.T) { dg := setupDeviceGroup(t) if dg == nil { return @@ -129,7 +129,6 @@ func TestDeviceGroupMigrate(t *testing.T) { assert.NoError(t, err) dgi, err := packets.DecodeDeviceGroupInfo(dgData) assert.NoError(t, err) - fmt.Printf("Device Group %v\n", dgi) for index, di := range dgi.Devices { destStorageFactory := func(di *packets.DevInfo) storage.Provider { @@ -197,3 +196,60 @@ func TestDeviceGroupMigrate(t *testing.T) { cancelfn() } + +func TestDeviceGroupMigrate(t *testing.T) { + dg := setupDeviceGroup(t) + if dg == nil { + return + } + + log := logging.New(logging.Zerolog, "silo", os.Stdout) + log.SetLevel(types.TraceLevel) + + // Create a simple pipe + r1, w1 := io.Pipe() + r2, w2 := io.Pipe() + + ctx, cancelfn := context.WithCancel(context.TODO()) + + initDev := func(ctx context.Context, p protocol.Protocol, dev uint32) { + } + + prSource := protocol.NewRW(ctx, []io.Reader{r1}, []io.Writer{w2}, nil) + prDest := protocol.NewRW(ctx, []io.Reader{r2}, []io.Writer{w1}, initDev) + + go func() { + _ = prSource.Handle() + }() + go func() { + _ = prDest.Handle() + }() + + // Lets write some data... + for i := range testDeviceSchema { + prov := dg.GetProvider(i) + buff := make([]byte, prov.Size()) + _, err := rand.Read(buff) + assert.NoError(t, err) + _, err = prov.WriteAt(buff, 0) + assert.NoError(t, err) + } + + go func() { + dg2, err := NewFromProtocol(prDest, log, nil) + fmt.Printf("NewFromProtocol finished %v %v\n", dg2, err) + }() + + // Send all the dev info... + err := dg.StartMigrationTo(prSource) + assert.NoError(t, err) + + pHandler := func(_ int, _ *migrator.MigrationProgress) {} + + err = dg.MigrateAll(100, pHandler) + assert.NoError(t, err) + + // TODO: Check the data all got migrated correctly + + cancelfn() +} diff --git a/pkg/storage/devicegroup/device_group_to.go b/pkg/storage/devicegroup/device_group_to.go new file mode 100644 index 0000000..ec49cf7 --- /dev/null +++ b/pkg/storage/devicegroup/device_group_to.go @@ -0,0 +1,402 @@ +package devicegroup + +import ( + "context" + "time" + + "github.com/loopholelabs/logging/types" + "github.com/loopholelabs/silo/pkg/storage" + "github.com/loopholelabs/silo/pkg/storage/blocks" + "github.com/loopholelabs/silo/pkg/storage/config" + "github.com/loopholelabs/silo/pkg/storage/device" + "github.com/loopholelabs/silo/pkg/storage/dirtytracker" + "github.com/loopholelabs/silo/pkg/storage/expose" + "github.com/loopholelabs/silo/pkg/storage/metrics" + "github.com/loopholelabs/silo/pkg/storage/migrator" + "github.com/loopholelabs/silo/pkg/storage/modules" + "github.com/loopholelabs/silo/pkg/storage/protocol" + "github.com/loopholelabs/silo/pkg/storage/protocol/packets" + "github.com/loopholelabs/silo/pkg/storage/volatilitymonitor" +) + +func NewFromSchema(ds []*config.DeviceSchema, log types.Logger, met metrics.SiloMetrics) (*DeviceGroup, error) { + dg := &DeviceGroup{ + log: log, + met: met, + devices: make([]*DeviceInformation, 0), + } + + for _, s := range ds { + prov, exp, err := device.NewDeviceWithLoggingMetrics(s, log, met) + if err != nil { + if log != nil { + log.Error().Err(err).Str("schema", string(s.Encode())).Msg("could not create device") + } + // We should try to close / shutdown any successful devices we created here... + // But it's likely to be critical. + dg.CloseAll() + return nil, err + } + + blockSize := int(s.ByteBlockSize()) + if blockSize == 0 { + blockSize = defaultBlockSize + } + + local := modules.NewLockable(prov) + mlocal := modules.NewMetrics(local) + dirtyLocal, dirtyRemote := dirtytracker.NewDirtyTracker(mlocal, blockSize) + vmonitor := volatilitymonitor.NewVolatilityMonitor(dirtyLocal, blockSize, volatilityExpiry) + + totalBlocks := (int(local.Size()) + blockSize - 1) / blockSize + orderer := blocks.NewPriorityBlockOrder(totalBlocks, vmonitor) + orderer.AddAll() + + if exp != nil { + exp.SetProvider(vmonitor) + } + + // Add to metrics if given. + if met != nil { + met.AddMetrics(s.Name, mlocal) + if exp != nil { + met.AddNBD(s.Name, exp.(*expose.ExposedStorageNBDNL)) + } + met.AddDirtyTracker(s.Name, dirtyRemote) + met.AddVolatilityMonitor(s.Name, vmonitor) + } + + dg.devices = append(dg.devices, &DeviceInformation{ + size: local.Size(), + blockSize: uint64(blockSize), + numBlocks: totalBlocks, + schema: s, + prov: prov, + storage: local, + exp: exp, + volatility: vmonitor, + dirtyLocal: dirtyLocal, + dirtyRemote: dirtyRemote, + orderer: orderer, + }) + } + + if log != nil { + log.Debug().Int("devices", len(dg.devices)).Msg("created device group") + } + return dg, nil +} + +func (dg *DeviceGroup) StartMigrationTo(pro protocol.Protocol) error { + // We will use dev 0 to communicate + dg.controlProtocol = pro + + // First lets setup the ToProtocol + for index, d := range dg.devices { + d.to = protocol.NewToProtocol(d.prov.Size(), uint32(index+1), pro) + d.to.SetCompression(true) + + if dg.met != nil { + dg.met.AddToProtocol(d.schema.Name, d.to) + } + } + + // Now package devices up into a single DeviceGroupInfo + dgi := &packets.DeviceGroupInfo{ + Devices: make(map[int]*packets.DevInfo), + } + + for index, d := range dg.devices { + di := &packets.DevInfo{ + Size: d.prov.Size(), + BlockSize: uint32(d.blockSize), + Name: d.schema.Name, + Schema: string(d.schema.Encode()), + } + dgi.Devices[index+1] = di + } + + // Send the single DeviceGroupInfo packet down our control channel 0 + dgiData := packets.EncodeDeviceGroupInfo(dgi) + _, err := dg.controlProtocol.SendPacket(0, protocol.IDPickAny, dgiData, protocol.UrgencyUrgent) + + return err +} + +// This will Migrate all devices to the 'to' setup in SendDevInfo stage. +func (dg *DeviceGroup) MigrateAll(maxConcurrency int, progressHandler func(i int, p *migrator.MigrationProgress)) error { + for _, d := range dg.devices { + if d.to == nil { + return errNotSetup + } + } + + ctime := time.Now() + + if dg.log != nil { + dg.log.Debug().Int("devices", len(dg.devices)).Msg("migrating device group") + } + + // Add up device sizes, so we can allocate the concurrency proportionally + totalSize := uint64(0) + for _, d := range dg.devices { + totalSize += d.size + } + + // We need at least this much... + if maxConcurrency < len(dg.devices) { + maxConcurrency = len(dg.devices) + } + // We will allocate each device at least ONE... + maxConcurrency -= len(dg.devices) + + for index, d := range dg.devices { + concurrency := 1 + (uint64(maxConcurrency) * d.size / totalSize) + d.migrationError = make(chan error, 1) // We will just hold onto the first error for now. + + setMigrationError := func(err error) { + if err != nil && err != context.Canceled { + select { + case d.migrationError <- err: + default: + } + } + } + + // Setup d.to + go func() { + err := d.to.HandleNeedAt(func(offset int64, length int32) { + if dg.log != nil { + dg.log.Debug(). + Int64("offset", offset). + Int32("length", length). + Int("dev", index). + Str("name", d.schema.Name). + Msg("NeedAt for device") + } + // Prioritize blocks + endOffset := uint64(offset + int64(length)) + if endOffset > d.size { + endOffset = d.size + } + + startBlock := int(offset / int64(d.blockSize)) + endBlock := int((endOffset-1)/d.blockSize) + 1 + for b := startBlock; b < endBlock; b++ { + d.orderer.PrioritiseBlock(b) + } + }) + setMigrationError(err) + }() + + go func() { + err := d.to.HandleDontNeedAt(func(offset int64, length int32) { + if dg.log != nil { + dg.log.Debug(). + Int64("offset", offset). + Int32("length", length). + Int("dev", index). + Str("name", d.schema.Name). + Msg("DontNeedAt for device") + } + // Deprioritize blocks + endOffset := uint64(offset + int64(length)) + if endOffset > d.size { + endOffset = d.size + } + + startBlock := int(offset / int64(d.blockSize)) + endBlock := int((endOffset-1)/d.blockSize) + 1 + for b := startBlock; b < endBlock; b++ { + d.orderer.Remove(b) + } + }) + setMigrationError(err) + }() + + cfg := migrator.NewConfig() + cfg.Logger = dg.log + cfg.BlockSize = int(d.blockSize) + cfg.Concurrency = map[int]int{ + storage.BlockTypeAny: int(concurrency), + } + cfg.LockerHandler = func() { + setMigrationError(d.to.SendEvent(&packets.Event{Type: packets.EventPreLock})) + d.storage.Lock() + setMigrationError(d.to.SendEvent(&packets.Event{Type: packets.EventPostLock})) + } + cfg.UnlockerHandler = func() { + setMigrationError(d.to.SendEvent(&packets.Event{Type: packets.EventPreUnlock})) + d.storage.Unlock() + setMigrationError(d.to.SendEvent(&packets.Event{Type: packets.EventPostUnlock})) + } + cfg.ErrorHandler = func(_ *storage.BlockInfo, err error) { + setMigrationError(err) + } + cfg.ProgressHandler = func(p *migrator.MigrationProgress) { + progressHandler(index, p) + } + mig, err := migrator.NewMigrator(d.dirtyRemote, d.to, d.orderer, cfg) + if err != nil { + return err + } + d.migrator = mig + if dg.met != nil { + dg.met.AddMigrator(d.schema.Name, mig) + } + if dg.log != nil { + dg.log.Debug(). + Uint64("concurrency", concurrency). + Int("index", index). + Str("name", d.schema.Name). + Msg("Setup migrator") + } + } + + errs := make(chan error, len(dg.devices)) + + // Now start them all migrating, and collect err + for _, d := range dg.devices { + go func() { + err := d.migrator.Migrate(d.numBlocks) + errs <- err + }() + } + + // Check for error from Migrate, and then Wait for completion of all devices... + for index := range dg.devices { + migErr := <-errs + if migErr != nil { + if dg.log != nil { + dg.log.Error().Err(migErr).Int("index", index).Msg("error migrating device group") + } + return migErr + } + } + + for index, d := range dg.devices { + err := d.migrator.WaitForCompletion() + if err != nil { + if dg.log != nil { + dg.log.Error().Err(err).Int("index", index).Msg("error migrating device group waiting for completion") + } + return err + } + + // Check for any migration error + select { + case err := <-d.migrationError: + if dg.log != nil { + dg.log.Error().Err(err).Int("index", index).Msg("error migrating device group from goroutines") + } + return err + default: + } + } + + if dg.log != nil { + dg.log.Debug().Int64("duration", time.Since(ctime).Milliseconds()).Int("devices", len(dg.devices)).Msg("migration of device group completed") + } + + return nil +} + +type MigrateDirtyHooks struct { + PreGetDirty func(index int, to *protocol.ToProtocol, dirtyHistory []int) + PostGetDirty func(index int, to *protocol.ToProtocol, dirtyHistory []int, blocks []uint) + PostMigrateDirty func(index int, to *protocol.ToProtocol, dirtyHistory []int) bool + Completed func(index int, to *protocol.ToProtocol) +} + +func (dg *DeviceGroup) MigrateDirty(hooks *MigrateDirtyHooks) error { + errs := make(chan error, len(dg.devices)) + + for index, d := range dg.devices { + go func() { + dirtyHistory := make([]int, 0) + + for { + if hooks != nil && hooks.PreGetDirty != nil { + hooks.PreGetDirty(index, d.to, dirtyHistory) + } + + blocks := d.migrator.GetLatestDirty() + if dg.log != nil { + dg.log.Debug(). + Int("blocks", len(blocks)). + Int("index", index). + Str("name", d.schema.Name). + Msg("migrating dirty blocks") + } + + dirtyHistory = append(dirtyHistory, len(blocks)) + // Cap it at a certain MAX LENGTH + if len(dirtyHistory) > maxDirtyHistory { + dirtyHistory = dirtyHistory[1:] + } + + if hooks != nil && hooks.PostGetDirty != nil { + hooks.PostGetDirty(index, d.to, dirtyHistory, blocks) + } + + if len(blocks) == 0 { + break + } + + err := d.to.DirtyList(int(d.blockSize), blocks) + if err != nil { + errs <- err + return + } + + err = d.migrator.MigrateDirty(blocks) + if err != nil { + errs <- err + return + } + + if hooks != nil && hooks.PostMigrateDirty != nil { + if hooks.PostMigrateDirty(index, d.to, dirtyHistory) { + break // PostMigrateDirty returned true, which means stop doing any dirty loop business. + } + } + } + + err := d.migrator.WaitForCompletion() + if err != nil { + errs <- err + return + } + + err = d.to.SendEvent(&packets.Event{Type: packets.EventCompleted}) + if err != nil { + errs <- err + return + } + + if hooks != nil && hooks.Completed != nil { + hooks.Completed(index, d.to) + } + + if dg.log != nil { + dg.log.Debug(). + Int("index", index). + Str("name", d.schema.Name). + Msg("migrating dirty blocks completed") + } + + errs <- nil + }() + } + + // Wait for all dirty migrations to complete + // Check for any error and return it + for range dg.devices { + err := <-errs + if err != nil { + return err + } + } + + return nil +} From f4e79ae8599f22aff9a785bb21cd1e497c566867 Mon Sep 17 00:00:00 2001 From: Jimmy Moore Date: Wed, 11 Dec 2024 15:19:58 +0000 Subject: [PATCH 18/37] First deviceGroup migrate test working Signed-off-by: Jimmy Moore --- pkg/storage/devicegroup/device_group_from.go | 107 +++++++++++++------ pkg/storage/devicegroup/device_group_test.go | 41 ++++++- pkg/storage/devicegroup/device_group_to.go | 30 +++--- 3 files changed, 131 insertions(+), 47 deletions(-) diff --git a/pkg/storage/devicegroup/device_group_from.go b/pkg/storage/devicegroup/device_group_from.go index 1d6b1ec..2432df6 100644 --- a/pkg/storage/devicegroup/device_group_from.go +++ b/pkg/storage/devicegroup/device_group_from.go @@ -1,15 +1,23 @@ package devicegroup import ( - "fmt" + "context" + "sync" "github.com/loopholelabs/logging/types" + "github.com/loopholelabs/silo/pkg/storage" + "github.com/loopholelabs/silo/pkg/storage/config" "github.com/loopholelabs/silo/pkg/storage/metrics" "github.com/loopholelabs/silo/pkg/storage/protocol" "github.com/loopholelabs/silo/pkg/storage/protocol/packets" ) -func NewFromProtocol(pro protocol.Protocol, log types.Logger, met metrics.SiloMetrics) (*DeviceGroup, error) { +func NewFromProtocol(ctx context.Context, + pro protocol.Protocol, + tweakDeviceSchema func(index int, name string, schema string) string, + log types.Logger, + met metrics.SiloMetrics) (*DeviceGroup, error) { + // This is our control channel, and we're expecting a DeviceGroupInfo _, dgData, err := pro.WaitForCommand(0, packets.CommandDeviceGroupInfo) if err != nil { @@ -20,34 +28,71 @@ func NewFromProtocol(pro protocol.Protocol, log types.Logger, met metrics.SiloMe return nil, err } - fmt.Printf("DeviceGroupInfo %v\n", dgi) - /* - for index, di := range dgi.Devices { - destStorageFactory := func(di *packets.DevInfo) storage.Provider { - store := sources.NewMemoryStorage(int(di.Size)) - incomingLock.Lock() - incomingProviders[uint32(index)] = store - incomingLock.Unlock() - return store - } - - from := protocol.NewFromProtocol(ctx, uint32(index), destStorageFactory, prDest) - err = from.SetDevInfo(di) - assert.NoError(t, err) - go func() { - err := from.HandleReadAt() - assert.ErrorIs(t, err, context.Canceled) - }() - go func() { - err := from.HandleWriteAt() - assert.ErrorIs(t, err, context.Canceled) - }() - go func() { - err := from.HandleDirtyList(func(_ []uint) { - }) - assert.ErrorIs(t, err, context.Canceled) - }() + devices := make([]*config.DeviceSchema, 0) + + // First create the devices we need using the schemas sent... + for index, di := range dgi.Devices { + ds := &config.DeviceSchema{} + // We may want to tweak schemas here eg autoStart = false on sync. Or modify pathnames. + schema := di.Schema + if tweakDeviceSchema != nil { + schema = tweakDeviceSchema(index-1, di.Name, schema) + } + err := ds.Decode(schema) + if err != nil { + return nil, err } - */ - return nil, nil + devices = append(devices, ds) + } + + dg, err := NewFromSchema(devices, log, met) + if err != nil { + return nil, err + } + + var wg sync.WaitGroup + wg.Add(len(dg.devices)) + + // We need to create the FromProtocol for each device, and associated goroutines here. + for index, di := range dgi.Devices { + destStorageFactory := func(di *packets.DevInfo) storage.Provider { + // TODO: WaitingCache should go in here... + return dg.GetProvider(index - 1) + } + + from := protocol.NewFromProtocol(ctx, uint32(index), destStorageFactory, pro) + err = from.SetDevInfo(di) + if err != nil { + return nil, err + } + go func() { + _ = from.HandleReadAt() + }() + go func() { + _ = from.HandleWriteAt() + }() + go func() { + _ = from.HandleDirtyList(func(_ []uint) { + // TODO: Tell the waitingCache about it + }) + }() + go func() { + from.HandleEvent(func(p *packets.Event) { + if p.Type == packets.EventCompleted { + wg.Done() + } + // TODO: Pass events on + }) + }() + } + + // Wait for completion events from all devices here. + // TODO: Split this up into a separate call... + wg.Wait() + + return dg, nil +} + +func (dg *DeviceGroup) Wait() { + } diff --git a/pkg/storage/devicegroup/device_group_test.go b/pkg/storage/devicegroup/device_group_test.go index abdbddf..6d9dfc2 100644 --- a/pkg/storage/devicegroup/device_group_test.go +++ b/pkg/storage/devicegroup/device_group_test.go @@ -7,6 +7,7 @@ import ( "io" "os" "os/user" + "strings" "sync" "testing" @@ -203,6 +204,12 @@ func TestDeviceGroupMigrate(t *testing.T) { return } + // Remove the receiving files + t.Cleanup(func() { + os.Remove("testrecv_test1") + os.Remove("testrecv_test2") + }) + log := logging.New(logging.Zerolog, "silo", os.Stdout) log.SetLevel(types.TraceLevel) @@ -235,9 +242,24 @@ func TestDeviceGroupMigrate(t *testing.T) { assert.NoError(t, err) } + var dg2 *DeviceGroup + var wg sync.WaitGroup + + // We will tweak schema in recv here so we have separate paths. + tweak := func(index int, name string, schema string) string { + s := strings.ReplaceAll(schema, "testdev_test1", "testrecv_test1") + s = strings.ReplaceAll(s, "testdev_test2", "testrecv_test2") + return s + } + + wg.Add(1) go func() { - dg2, err := NewFromProtocol(prDest, log, nil) - fmt.Printf("NewFromProtocol finished %v %v\n", dg2, err) + var err error + dg2, err = NewFromProtocol(ctx, prDest, tweak, nil, nil) + + fmt.Printf("DG2 setup as %v\n", dg2) + assert.NoError(t, err) + wg.Done() }() // Send all the dev info... @@ -249,7 +271,20 @@ func TestDeviceGroupMigrate(t *testing.T) { err = dg.MigrateAll(100, pHandler) assert.NoError(t, err) - // TODO: Check the data all got migrated correctly + err = dg.Completed() + assert.NoError(t, err) + + wg.Wait() + + // Check the data all got migrated correctly from dg to dg2. + for i := range testDeviceSchema { + prov := dg.GetProvider(i) + destProvider := dg2.GetProvider(i) + assert.NotNil(t, destProvider) + eq, err := storage.Equals(prov, destProvider, 1024*1024) + assert.NoError(t, err) + assert.True(t, eq) + } cancelfn() } diff --git a/pkg/storage/devicegroup/device_group_to.go b/pkg/storage/devicegroup/device_group_to.go index ec49cf7..df704c5 100644 --- a/pkg/storage/devicegroup/device_group_to.go +++ b/pkg/storage/devicegroup/device_group_to.go @@ -368,23 +368,10 @@ func (dg *DeviceGroup) MigrateDirty(hooks *MigrateDirtyHooks) error { return } - err = d.to.SendEvent(&packets.Event{Type: packets.EventCompleted}) - if err != nil { - errs <- err - return - } - if hooks != nil && hooks.Completed != nil { hooks.Completed(index, d.to) } - if dg.log != nil { - dg.log.Debug(). - Int("index", index). - Str("name", d.schema.Name). - Msg("migrating dirty blocks completed") - } - errs <- nil }() } @@ -400,3 +387,20 @@ func (dg *DeviceGroup) MigrateDirty(hooks *MigrateDirtyHooks) error { return nil } + +func (dg *DeviceGroup) Completed() error { + for index, d := range dg.devices { + err := d.to.SendEvent(&packets.Event{Type: packets.EventCompleted}) + if err != nil { + return err + } + + if dg.log != nil { + dg.log.Debug(). + Int("index", index). + Str("name", d.schema.Name). + Msg("migration completed") + } + } + return nil +} From 4d6eb178df0f8f63115d8ed8f4703ac83dca2ad8 Mon Sep 17 00:00:00 2001 From: Jimmy Moore Date: Wed, 11 Dec 2024 16:05:01 +0000 Subject: [PATCH 19/37] Split dg.FromProtocol in two, so we can start using devices before migration completed. Signed-off-by: Jimmy Moore --- pkg/storage/devicegroup/device_group.go | 10 ++++++---- pkg/storage/devicegroup/device_group_from.go | 17 ++++++----------- pkg/storage/devicegroup/device_group_test.go | 6 ++++-- 3 files changed, 16 insertions(+), 17 deletions(-) diff --git a/pkg/storage/devicegroup/device_group.go b/pkg/storage/devicegroup/device_group.go index 7682858..2fe1caa 100644 --- a/pkg/storage/devicegroup/device_group.go +++ b/pkg/storage/devicegroup/device_group.go @@ -2,6 +2,7 @@ package devicegroup import ( "errors" + "sync" "time" "github.com/loopholelabs/logging/types" @@ -22,10 +23,11 @@ const maxDirtyHistory = 32 var errNotSetup = errors.New("toProtocol not setup") type DeviceGroup struct { - log types.Logger - met metrics.SiloMetrics - devices []*DeviceInformation - controlProtocol protocol.Protocol + log types.Logger + met metrics.SiloMetrics + devices []*DeviceInformation + controlProtocol protocol.Protocol + incomingDevicesWg sync.WaitGroup } type DeviceInformation struct { diff --git a/pkg/storage/devicegroup/device_group_from.go b/pkg/storage/devicegroup/device_group_from.go index 2432df6..4155887 100644 --- a/pkg/storage/devicegroup/device_group_from.go +++ b/pkg/storage/devicegroup/device_group_from.go @@ -2,7 +2,6 @@ package devicegroup import ( "context" - "sync" "github.com/loopholelabs/logging/types" "github.com/loopholelabs/silo/pkg/storage" @@ -50,8 +49,7 @@ func NewFromProtocol(ctx context.Context, return nil, err } - var wg sync.WaitGroup - wg.Add(len(dg.devices)) + dg.incomingDevicesWg.Add(len(dg.devices)) // We need to create the FromProtocol for each device, and associated goroutines here. for index, di := range dgi.Devices { @@ -79,20 +77,17 @@ func NewFromProtocol(ctx context.Context, go func() { from.HandleEvent(func(p *packets.Event) { if p.Type == packets.EventCompleted { - wg.Done() + dg.incomingDevicesWg.Done() } - // TODO: Pass events on + // TODO: Pass events on to caller so they can be handled upstream }) }() } - // Wait for completion events from all devices here. - // TODO: Split this up into a separate call... - wg.Wait() - return dg, nil } -func (dg *DeviceGroup) Wait() { - +// Wait for completion events from all devices here. +func (dg *DeviceGroup) WaitForCompletion() { + dg.incomingDevicesWg.Wait() } diff --git a/pkg/storage/devicegroup/device_group_test.go b/pkg/storage/devicegroup/device_group_test.go index 6d9dfc2..7ef3819 100644 --- a/pkg/storage/devicegroup/device_group_test.go +++ b/pkg/storage/devicegroup/device_group_test.go @@ -256,8 +256,6 @@ func TestDeviceGroupMigrate(t *testing.T) { go func() { var err error dg2, err = NewFromProtocol(ctx, prDest, tweak, nil, nil) - - fmt.Printf("DG2 setup as %v\n", dg2) assert.NoError(t, err) wg.Done() }() @@ -274,8 +272,12 @@ func TestDeviceGroupMigrate(t *testing.T) { err = dg.Completed() assert.NoError(t, err) + // Make sure the incoming devices were setup completely wg.Wait() + // Make sure all incoming devices are complete + dg2.WaitForCompletion() + // Check the data all got migrated correctly from dg to dg2. for i := range testDeviceSchema { prov := dg.GetProvider(i) From 2d508f75d9110a8f1503a26c4769ad67e8f09635 Mon Sep 17 00:00:00 2001 From: Jimmy Moore Date: Wed, 11 Dec 2024 18:50:22 +0000 Subject: [PATCH 20/37] Added waitingCache bits into dg from Signed-off-by: Jimmy Moore --- pkg/storage/devicegroup/device_group.go | 31 +++++++++++--------- pkg/storage/devicegroup/device_group_from.go | 17 ++++++++--- 2 files changed, 30 insertions(+), 18 deletions(-) diff --git a/pkg/storage/devicegroup/device_group.go b/pkg/storage/devicegroup/device_group.go index 2fe1caa..6028fc2 100644 --- a/pkg/storage/devicegroup/device_group.go +++ b/pkg/storage/devicegroup/device_group.go @@ -14,6 +14,7 @@ import ( "github.com/loopholelabs/silo/pkg/storage/migrator" "github.com/loopholelabs/silo/pkg/storage/protocol" "github.com/loopholelabs/silo/pkg/storage/volatilitymonitor" + "github.com/loopholelabs/silo/pkg/storage/waitingcache" ) const volatilityExpiry = 30 * time.Minute @@ -31,20 +32,22 @@ type DeviceGroup struct { } type DeviceInformation struct { - size uint64 - blockSize uint64 - numBlocks int - schema *config.DeviceSchema - prov storage.Provider - storage storage.LockableProvider - exp storage.ExposedStorage - volatility *volatilitymonitor.VolatilityMonitor - dirtyLocal *dirtytracker.Local - dirtyRemote *dirtytracker.Remote - to *protocol.ToProtocol - orderer *blocks.PriorityBlockOrder - migrator *migrator.Migrator - migrationError chan error + size uint64 + blockSize uint64 + numBlocks int + schema *config.DeviceSchema + prov storage.Provider + storage storage.LockableProvider + exp storage.ExposedStorage + volatility *volatilitymonitor.VolatilityMonitor + dirtyLocal *dirtytracker.Local + dirtyRemote *dirtytracker.Remote + to *protocol.ToProtocol + orderer *blocks.PriorityBlockOrder + migrator *migrator.Migrator + migrationError chan error + waitingCacheLocal *waitingcache.Local + waitingCacheRemote *waitingcache.Remote } func (dg *DeviceGroup) GetProvider(index int) storage.Provider { diff --git a/pkg/storage/devicegroup/device_group_from.go b/pkg/storage/devicegroup/device_group_from.go index 4155887..2474177 100644 --- a/pkg/storage/devicegroup/device_group_from.go +++ b/pkg/storage/devicegroup/device_group_from.go @@ -9,6 +9,7 @@ import ( "github.com/loopholelabs/silo/pkg/storage/metrics" "github.com/loopholelabs/silo/pkg/storage/protocol" "github.com/loopholelabs/silo/pkg/storage/protocol/packets" + "github.com/loopholelabs/silo/pkg/storage/waitingcache" ) func NewFromProtocol(ctx context.Context, @@ -53,9 +54,16 @@ func NewFromProtocol(ctx context.Context, // We need to create the FromProtocol for each device, and associated goroutines here. for index, di := range dgi.Devices { + d := dg.devices[index-1] + destStorageFactory := func(di *packets.DevInfo) storage.Provider { - // TODO: WaitingCache should go in here... - return dg.GetProvider(index - 1) + d.waitingCacheLocal, d.waitingCacheRemote = waitingcache.NewWaitingCacheWithLogger(d.prov, int(di.BlockSize), dg.log) + + if dg.devices[index-1].exp != nil { + dg.devices[index-1].exp.SetProvider(d.waitingCacheLocal) + } + + return d.waitingCacheRemote } from := protocol.NewFromProtocol(ctx, uint32(index), destStorageFactory, pro) @@ -70,8 +78,9 @@ func NewFromProtocol(ctx context.Context, _ = from.HandleWriteAt() }() go func() { - _ = from.HandleDirtyList(func(_ []uint) { - // TODO: Tell the waitingCache about it + _ = from.HandleDirtyList(func(dirtyBlocks []uint) { + // Tell the waitingCache about it + d.waitingCacheLocal.DirtyBlocks(dirtyBlocks) }) }() go func() { From 36294e15231d8cfe8c0e194a8c748b765003805e Mon Sep 17 00:00:00 2001 From: Jimmy Moore Date: Wed, 11 Dec 2024 19:45:18 +0000 Subject: [PATCH 21/37] Added simple authority transfer to dg Signed-off-by: Jimmy Moore --- pkg/storage/devicegroup/device_group_from.go | 30 ++++++++++++++++++++ pkg/storage/devicegroup/device_group_test.go | 27 ++++++++++++++++-- pkg/storage/devicegroup/device_group_to.go | 22 ++++++++++++++ 3 files changed, 76 insertions(+), 3 deletions(-) diff --git a/pkg/storage/devicegroup/device_group_from.go b/pkg/storage/devicegroup/device_group_from.go index 2474177..b27d8fc 100644 --- a/pkg/storage/devicegroup/device_group_from.go +++ b/pkg/storage/devicegroup/device_group_from.go @@ -2,6 +2,7 @@ package devicegroup import ( "context" + "errors" "github.com/loopholelabs/logging/types" "github.com/loopholelabs/silo/pkg/storage" @@ -50,6 +51,8 @@ func NewFromProtocol(ctx context.Context, return nil, err } + dg.controlProtocol = pro + dg.incomingDevicesWg.Add(len(dg.devices)) // We need to create the FromProtocol for each device, and associated goroutines here. @@ -100,3 +103,30 @@ func NewFromProtocol(ctx context.Context, func (dg *DeviceGroup) WaitForCompletion() { dg.incomingDevicesWg.Wait() } + +func (dg *DeviceGroup) HandleCustomData(cb func(customData []byte)) error { + for { + // This is our control channel, and we're expecting a DeviceGroupInfo + id, evData, err := dg.controlProtocol.WaitForCommand(0, packets.CommandEvent) + if err != nil { + return err + } + ev, err := packets.DecodeEvent(evData) + if err != nil { + return err + } + + if ev.Type != packets.EventCustom || ev.CustomType != 0 { + return errors.New("unexpected event") + } + + cb(ev.CustomPayload) + + // Reply with ack + eack := packets.EncodeEventResponse() + _, err = dg.controlProtocol.SendPacket(0, id, eack, protocol.UrgencyUrgent) + if err != nil { + return err + } + } +} diff --git a/pkg/storage/devicegroup/device_group_test.go b/pkg/storage/devicegroup/device_group_test.go index 7ef3819..1814864 100644 --- a/pkg/storage/devicegroup/device_group_test.go +++ b/pkg/storage/devicegroup/device_group_test.go @@ -10,6 +10,7 @@ import ( "strings" "sync" "testing" + "time" "github.com/loopholelabs/logging" "github.com/loopholelabs/logging/types" @@ -264,17 +265,37 @@ func TestDeviceGroupMigrate(t *testing.T) { err := dg.StartMigrationTo(prSource) assert.NoError(t, err) + // Make sure the incoming devices were setup completely + wg.Wait() + + // TransferAuthority + var tawg sync.WaitGroup + tawg.Add(1) + go func() { + err := dg2.HandleCustomData(func(data []byte) { + assert.Equal(t, []byte("Hello"), data) + tawg.Done() + }) + assert.ErrorIs(t, err, context.Canceled) + }() + + tawg.Add(1) + time.AfterFunc(100*time.Millisecond, func() { + dg.SendCustomData([]byte("Hello")) + tawg.Done() + }) + pHandler := func(_ int, _ *migrator.MigrationProgress) {} err = dg.MigrateAll(100, pHandler) assert.NoError(t, err) + // Make sure authority has been transferred as expected. + tawg.Wait() + err = dg.Completed() assert.NoError(t, err) - // Make sure the incoming devices were setup completely - wg.Wait() - // Make sure all incoming devices are complete dg2.WaitForCompletion() diff --git a/pkg/storage/devicegroup/device_group_to.go b/pkg/storage/devicegroup/device_group_to.go index df704c5..aaa01cb 100644 --- a/pkg/storage/devicegroup/device_group_to.go +++ b/pkg/storage/devicegroup/device_group_to.go @@ -404,3 +404,25 @@ func (dg *DeviceGroup) Completed() error { } return nil } + +func (dg *DeviceGroup) SendCustomData(customData []byte) error { + + // Send the single TransferAuthority packet down our control channel 0 + taData := packets.EncodeEvent(&packets.Event{ + Type: packets.EventCustom, + CustomType: 0, + CustomPayload: customData, + }) + id, err := dg.controlProtocol.SendPacket(0, protocol.IDPickAny, taData, protocol.UrgencyUrgent) + if err != nil { + return err + } + + // Wait for ack + ackData, err := dg.controlProtocol.WaitForPacket(0, id) + if err != nil { + return err + } + + return packets.DecodeEventResponse(ackData) +} From 9ab57ce784c413b58762773893e5c24e00ba771f Mon Sep 17 00:00:00 2001 From: Jimmy Moore Date: Wed, 11 Dec 2024 20:50:26 +0000 Subject: [PATCH 22/37] Improved progressHandler for dg Signed-off-by: Jimmy Moore --- cmd/serve.go | 12 +++++++----- pkg/storage/devicegroup/device_group.go | 2 ++ pkg/storage/devicegroup/device_group_test.go | 10 ++++++++-- pkg/storage/devicegroup/device_group_to.go | 20 +++++++++++++++----- 4 files changed, 32 insertions(+), 12 deletions(-) diff --git a/cmd/serve.go b/cmd/serve.go index e498e19..c499a86 100644 --- a/cmd/serve.go +++ b/cmd/serve.go @@ -135,11 +135,13 @@ func runServe(_ *cobra.Command, _ []string) { panic(err) } - err = dg.MigrateAll(1000, func(index int, p *migrator.MigrationProgress) { - fmt.Printf("[%d] Progress Moved: %d/%d %.2f%% Clean: %d/%d %.2f%% InProgress: %d\n", - index, p.MigratedBlocks, p.TotalBlocks, p.MigratedBlocksPerc, - p.ReadyBlocks, p.TotalBlocks, p.ReadyBlocksPerc, - p.ActiveBlocks) + err = dg.MigrateAll(1000, func(ps []*migrator.MigrationProgress) { + for index, p := range ps { + fmt.Printf("[%d] Progress Moved: %d/%d %.2f%% Clean: %d/%d %.2f%% InProgress: %d\n", + index, p.MigratedBlocks, p.TotalBlocks, p.MigratedBlocksPerc, + p.ReadyBlocks, p.TotalBlocks, p.ReadyBlocksPerc, + p.ActiveBlocks) + } }) if err != nil { dg.CloseAll() diff --git a/pkg/storage/devicegroup/device_group.go b/pkg/storage/devicegroup/device_group.go index 6028fc2..82dc37b 100644 --- a/pkg/storage/devicegroup/device_group.go +++ b/pkg/storage/devicegroup/device_group.go @@ -29,6 +29,8 @@ type DeviceGroup struct { devices []*DeviceInformation controlProtocol protocol.Protocol incomingDevicesWg sync.WaitGroup + progressLock sync.Mutex + progress []*migrator.MigrationProgress } type DeviceInformation struct { diff --git a/pkg/storage/devicegroup/device_group_test.go b/pkg/storage/devicegroup/device_group_test.go index 1814864..4cf9d0c 100644 --- a/pkg/storage/devicegroup/device_group_test.go +++ b/pkg/storage/devicegroup/device_group_test.go @@ -181,7 +181,13 @@ func TestDeviceGroupMigrateTo(t *testing.T) { err := dg.StartMigrationTo(prSource) assert.NoError(t, err) - pHandler := func(_ int, _ *migrator.MigrationProgress) {} + pHandler := func(prog []*migrator.MigrationProgress) { + for index, p := range prog { + if p != nil { + fmt.Printf("[%d] %d / %d\n", index, p.ReadyBlocks, p.TotalBlocks) + } + } + } err = dg.MigrateAll(100, pHandler) assert.NoError(t, err) @@ -285,7 +291,7 @@ func TestDeviceGroupMigrate(t *testing.T) { tawg.Done() }) - pHandler := func(_ int, _ *migrator.MigrationProgress) {} + pHandler := func(_ []*migrator.MigrationProgress) {} err = dg.MigrateAll(100, pHandler) assert.NoError(t, err) diff --git a/pkg/storage/devicegroup/device_group_to.go b/pkg/storage/devicegroup/device_group_to.go index aaa01cb..ab0549a 100644 --- a/pkg/storage/devicegroup/device_group_to.go +++ b/pkg/storage/devicegroup/device_group_to.go @@ -21,9 +21,10 @@ import ( func NewFromSchema(ds []*config.DeviceSchema, log types.Logger, met metrics.SiloMetrics) (*DeviceGroup, error) { dg := &DeviceGroup{ - log: log, - met: met, - devices: make([]*DeviceInformation, 0), + log: log, + met: met, + devices: make([]*DeviceInformation, 0), + progress: make([]*migrator.MigrationProgress, 0), } for _, s := range ds { @@ -79,6 +80,12 @@ func NewFromSchema(ds []*config.DeviceSchema, log types.Logger, met metrics.Silo dirtyRemote: dirtyRemote, orderer: orderer, }) + + // Set these two at least, so we know *something* about every device in progress handler. + dg.progress = append(dg.progress, &migrator.MigrationProgress{ + BlockSize: blockSize, + TotalBlocks: totalBlocks, + }) } if log != nil { @@ -124,7 +131,7 @@ func (dg *DeviceGroup) StartMigrationTo(pro protocol.Protocol) error { } // This will Migrate all devices to the 'to' setup in SendDevInfo stage. -func (dg *DeviceGroup) MigrateAll(maxConcurrency int, progressHandler func(i int, p *migrator.MigrationProgress)) error { +func (dg *DeviceGroup) MigrateAll(maxConcurrency int, progressHandler func(p []*migrator.MigrationProgress)) error { for _, d := range dg.devices { if d.to == nil { return errNotSetup @@ -234,7 +241,10 @@ func (dg *DeviceGroup) MigrateAll(maxConcurrency int, progressHandler func(i int setMigrationError(err) } cfg.ProgressHandler = func(p *migrator.MigrationProgress) { - progressHandler(index, p) + dg.progressLock.Lock() + dg.progress[index] = p + progressHandler(dg.progress) + dg.progressLock.Unlock() } mig, err := migrator.NewMigrator(d.dirtyRemote, d.to, d.orderer, cfg) if err != nil { From b35952a67ede6c8945ad22404902ef710f9cb82a Mon Sep 17 00:00:00 2001 From: Jimmy Moore Date: Thu, 12 Dec 2024 10:35:48 +0000 Subject: [PATCH 23/37] Added README for dg usage Signed-off-by: Jimmy Moore --- pkg/storage/devicegroup/README.md | 59 +++++++++++++++++++++++++++++++ 1 file changed, 59 insertions(+) create mode 100644 pkg/storage/devicegroup/README.md diff --git a/pkg/storage/devicegroup/README.md b/pkg/storage/devicegroup/README.md new file mode 100644 index 0000000..3e7a467 --- /dev/null +++ b/pkg/storage/devicegroup/README.md @@ -0,0 +1,59 @@ +# Device Group + +The `DeviceGroup` combines some number of Silo devices into a single unit, which can then be migrated to another Silo instance. +All internat concerns such as volatilityMonitor, waitingCache, as well as the new S3 assist, are now hidden from the consumer. + +## Creation + +There are two methods to create a `DeviceGroup` + +### NewFromSchema + +This takes in an array of Silo device configs, and creates the devices. If `expose==true` then a corresponding NBD device will be created and attached. + +### NewFromProtocol + +This takes in a `protocol` and creates the devices as they are received from a sender. + +## Usage (Sending devices) + +Devices in a `DeviceGroup` are sent together, which allows Silo to optimize all aspects of the transfer. + + // Create a device group from schema + dg, err := devicegroup.NewFromSchema(devices, log, siloMetrics) + + // Start a migration + err := dg.StartMigrationTo(protocol) + + // Migrate the data with max total concurrency 100 + err = dg.MigrateAll(100, pHandler) + + // Migrate any dirty blocks + // hooks gives some control over the dirty loop + err = dg.MigrateDirty(hooks) + + // Close everything + dg.CloseAll() + +There is also support for sending custom data. This would typically be done either in `pHandler` (The progress handler), or in one of the `MigrateDirty` hooks. + + pHandler := func(ps []*migrator.MigrationProgress) { + // Do some test here to see if enough data migrated + + // If so, send a custom Authority Transfer event. + dg.SendCustomData(authorityTransferPacket) + } + +## Usage (Receiving devices) + + // Create a DeviceGroup from protocol + // tweak allows us to modify the schema + dg, err = NewFromProtocol(ctx, protocol, tweak, nil, nil) + + // Handle any custom data + go dg.HandleCustomData(func(data []byte) { + // We got sent some custom data! + }) + + // Wait for migration completion + dg.WaitForCompletion() From 1eb538c122ab94ff720311a98eaeb8dd0d597cfd Mon Sep 17 00:00:00 2001 From: Jimmy Moore Date: Thu, 12 Dec 2024 21:18:14 +0000 Subject: [PATCH 24/37] Simplified cmd/connect to use new dg api Signed-off-by: Jimmy Moore --- cmd/connect.go | 407 +-------------------- cmd/serve.go | 51 ++- pkg/storage/devicegroup/README.md | 4 +- pkg/storage/devicegroup/device_group_to.go | 18 +- 4 files changed, 70 insertions(+), 410 deletions(-) diff --git a/cmd/connect.go b/cmd/connect.go index 4d58ff6..72d2002 100644 --- a/cmd/connect.go +++ b/cmd/connect.go @@ -2,39 +2,25 @@ package main import ( "context" - "crypto/sha256" "fmt" "io" "net" "net/http" "os" - "os/exec" "os/signal" - "sync" "syscall" "time" "github.com/loopholelabs/logging" "github.com/loopholelabs/logging/types" - "github.com/loopholelabs/silo/pkg/storage" - "github.com/loopholelabs/silo/pkg/storage/config" - "github.com/loopholelabs/silo/pkg/storage/expose" - "github.com/loopholelabs/silo/pkg/storage/integrity" + "github.com/loopholelabs/silo/pkg/storage/devicegroup" "github.com/loopholelabs/silo/pkg/storage/metrics" siloprom "github.com/loopholelabs/silo/pkg/storage/metrics/prometheus" - "github.com/loopholelabs/silo/pkg/storage/modules" "github.com/loopholelabs/silo/pkg/storage/protocol" - "github.com/loopholelabs/silo/pkg/storage/protocol/packets" - "github.com/loopholelabs/silo/pkg/storage/sources" - "github.com/loopholelabs/silo/pkg/storage/waitingcache" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/collectors" "github.com/prometheus/client_golang/prometheus/promhttp" "github.com/spf13/cobra" - - "github.com/fatih/color" - "github.com/vbauerster/mpb/v8" - "github.com/vbauerster/mpb/v8/decor" ) var ( @@ -49,32 +35,12 @@ var ( // Address to connect to var connectAddr string -// Should we expose each device as an nbd device? -var connectExposeDev bool - -// Should we also mount the devices -var connectMountDev bool - -var connectProgress bool - var connectDebug bool - var connectMetrics string -// List of ExposedStorage so they can be cleaned up on exit. -var dstExposed []storage.ExposedStorage - -var dstProgress *mpb.Progress -var dstBars []*mpb.Bar -var dstWG sync.WaitGroup -var dstWGFirst bool - func init() { rootCmd.AddCommand(cmdConnect) cmdConnect.Flags().StringVarP(&connectAddr, "addr", "a", "localhost:5170", "Address to serve from") - cmdConnect.Flags().BoolVarP(&connectExposeDev, "expose", "e", false, "Expose as an nbd devices") - cmdConnect.Flags().BoolVarP(&connectMountDev, "mount", "m", false, "Mount the nbd devices") - cmdConnect.Flags().BoolVarP(&connectProgress, "progress", "p", false, "Show progress") cmdConnect.Flags().BoolVarP(&connectDebug, "debug", "d", false, "Debug logging (trace)") cmdConnect.Flags().StringVarP(&connectMetrics, "metrics", "M", "", "Prom metrics address") } @@ -116,26 +82,17 @@ func runConnect(_ *cobra.Command, _ []string) { go http.ListenAndServe(connectMetrics, nil) } - if connectProgress { - dstProgress = mpb.New( - mpb.WithOutput(color.Output), - mpb.WithAutoRefresh(), - ) - - dstBars = make([]*mpb.Bar, 0) - } - fmt.Printf("Starting silo connect from source %s\n", connectAddr) - dstExposed = make([]storage.ExposedStorage, 0) + var dg *devicegroup.DeviceGroup // Handle shutdown gracefully to disconnect any exposed devices correctly. c := make(chan os.Signal, 1) signal.Notify(c, os.Interrupt, syscall.SIGTERM) go func() { <-c - for _, e := range dstExposed { - _ = dstDeviceShutdown(e) + if dg != nil { + dg.CloseAll() } os.Exit(1) }() @@ -148,14 +105,10 @@ func runConnect(_ *cobra.Command, _ []string) { } // Wrap the connection in a protocol, and handle incoming devices - dstWGFirst = true - dstWG.Add(1) // We need to at least wait for one to complete. protoCtx, protoCancelfn := context.WithCancel(context.TODO()) - handleIncomingDevice := func(ctx context.Context, pro protocol.Protocol, dev uint32) { - handleIncomingDeviceWithLogging(ctx, pro, dev, log, siloMetrics) - } + handleIncomingDevice := func(ctx context.Context, pro protocol.Protocol, dev uint32) {} pro := protocol.NewRW(protoCtx, []io.Reader{con}, []io.Writer{con}, handleIncomingDevice) @@ -175,347 +128,27 @@ func runConnect(_ *cobra.Command, _ []string) { siloMetrics.AddProtocol("protocol", pro) } - dstWG.Wait() // Wait until the migrations have completed... - - if connectProgress { - dstProgress.Wait() - } - - if log != nil { - metrics := pro.GetMetrics() - log.Debug(). - Uint64("PacketsSent", metrics.PacketsSent). - Uint64("DataSent", metrics.DataSent). - Uint64("PacketsRecv", metrics.PacketsRecv). - Uint64("DataRecv", metrics.DataRecv). - Msg("protocol metrics") - } - - fmt.Printf("\nMigrations completed. Please ctrl-c if you want to shut down, or wait an hour :)\n") - - // We should pause here, to allow the user to do things with the devices - time.Sleep(10 * time.Hour) - - // Shutdown any storage exposed as devices - for _, e := range dstExposed { - _ = dstDeviceShutdown(e) - } -} - -// Handle a new incoming device. This is called when a packet is received for a device we haven't heard about before. -func handleIncomingDeviceWithLogging(ctx context.Context, pro protocol.Protocol, dev uint32, log types.RootLogger, met metrics.SiloMetrics) { - var destStorage storage.Provider - var destWaitingLocal *waitingcache.Local - var destWaitingRemote *waitingcache.Remote - var destMonitorStorage *modules.Hooks - var dest *protocol.FromProtocol - - var devSchema *config.DeviceSchema - - var bar *mpb.Bar - - var blockSize uint - var deviceName string - - var statusString = " " - var statusVerify = " " - var statusExposed = " " - - if !dstWGFirst { - // We have a new migration to deal with - dstWG.Add(1) + // TODO: Modify schemas a bit here... + tweak := func(index int, name string, schema string) string { + return schema } - dstWGFirst = false - - // This is a storage factory which will be called when we recive DevInfo. - storageFactory := func(di *packets.DevInfo) storage.Provider { - // fmt.Printf("= %d = Received DevInfo name=%s size=%d blocksize=%d schema=%s\n", dev, di.Name, di.Size, di.Block_size, di.Schema) - // Decode the schema - devSchema = &config.DeviceSchema{} - err := devSchema.Decode(di.Schema) - if err != nil { - panic(err) - } - - blockSize = uint(di.BlockSize) - deviceName = di.Name - - statusFn := func(_ decor.Statistics) string { - return statusString + statusVerify - } - - if connectProgress { - bar = dstProgress.AddBar(int64(di.Size), - mpb.PrependDecorators( - decor.Name(di.Name, decor.WCSyncSpaceR), - decor.Name(" "), - decor.Any(func(_ decor.Statistics) string { return statusExposed }, decor.WC{W: 4}), - decor.Name(" "), - decor.CountersKiloByte("%d/%d", decor.WCSyncWidth), - ), - mpb.AppendDecorators( - decor.EwmaETA(decor.ET_STYLE_GO, 30), - decor.Name(" "), - decor.EwmaSpeed(decor.SizeB1024(0), "% .2f", 60, decor.WCSyncWidth), - decor.OnComplete(decor.Percentage(decor.WC{W: 5}), "done"), - decor.Name(" "), - decor.Any(statusFn, decor.WC{W: 2}), - ), - ) - - dstBars = append(dstBars, bar) - } + dg, err = devicegroup.NewFromProtocol(protoCtx, pro, tweak, log, siloMetrics) - // You can change this to use sources.NewFileStorage etc etc - cr := func(_ int, s int) (storage.Provider, error) { - return sources.NewMemoryStorage(s), nil - } - // Setup some sharded memory storage (for concurrent write speed) - shardSize := di.Size - if di.Size > 64*1024 { - shardSize = di.Size / 1024 - } - - destStorage, err = modules.NewShardedStorage(int(di.Size), int(shardSize), cr) - if err != nil { - panic(err) // FIXME - } - - destMonitorStorage = modules.NewHooks(destStorage) - - if connectProgress { - lastValue := uint64(0) - lastTime := time.Now() - - destMonitorStorage.PostWrite = func(_ []byte, _ int64, n int, err error) (int, error) { - // Update the progress bar - available, total := destWaitingLocal.Availability() - v := uint64(available) * di.Size / uint64(total) - bar.SetCurrent(int64(v)) - bar.EwmaIncrInt64(int64(v-lastValue), time.Since(lastTime)) - lastTime = time.Now() - lastValue = v - - return n, err - } - } - - // Use a WaitingCache which will wait for migration blocks, send priorities etc - // A WaitingCache has two ends - local and remote. - destWaitingLocal, destWaitingRemote = waitingcache.NewWaitingCache(destMonitorStorage, int(di.BlockSize)) - - // Connect the waitingCache to the FromProtocol. - // Note that since these are hints, errors don't matter too much. - destWaitingLocal.NeedAt = func(offset int64, length int32) { - _ = dest.NeedAt(offset, length) - } + // Wait for completion events. + dg.WaitForCompletion() - destWaitingLocal.DontNeedAt = func(offset int64, length int32) { - _ = dest.DontNeedAt(offset, length) - } - - conf := &config.DeviceSchema{} - _ = conf.Decode(di.Schema) - - // Expose this storage as a device if requested - if connectExposeDev { - p, err := dstDeviceSetup(destWaitingLocal) - if err != nil { - fmt.Printf("= %d = Error during setup (expose nbd) %v\n", dev, err) - } else { - statusExposed = p.Device() - dstExposed = append(dstExposed, p) - } - } - return destWaitingRemote - } - - dest = protocol.NewFromProtocol(ctx, dev, storageFactory, pro) - - if met != nil { - met.AddFromProtocol(deviceName, dest) - } - - var handlerWG sync.WaitGroup - - handlerWG.Add(1) - go func() { - _ = dest.HandleReadAt() - handlerWG.Done() - }() - handlerWG.Add(1) - go func() { - _ = dest.HandleWriteAt() - handlerWG.Done() - }() - handlerWG.Add(1) - go func() { - _ = dest.HandleDevInfo() - handlerWG.Done() - }() - - handlerWG.Add(1) - // Handle events from the source - go func() { - _ = dest.HandleEvent(func(e *packets.Event) { - switch e.Type { - - case packets.EventPostLock: - statusString = "L" // red.Sprintf("L") - case packets.EventPreLock: - statusString = "l" // red.Sprintf("l") - case packets.EventPostUnlock: - statusString = "U" // green.Sprintf("U") - case packets.EventPreUnlock: - statusString = "u" // green.Sprintf("u") - - // fmt.Printf("= %d = Event %s\n", dev, protocol.EventsByType[e.Type]) - // Check we have all data... - case packets.EventCompleted: - - if log != nil { - m := destWaitingLocal.GetMetrics() - log.Debug(). - Uint64("WaitForBlock", m.WaitForBlock). - Uint64("WaitForBlockHadRemote", m.WaitForBlockHadRemote). - Uint64("WaitForBlockHadLocal", m.WaitForBlockHadLocal). - Uint64("WaitForBlockTimeMS", uint64(m.WaitForBlockTime.Milliseconds())). - Uint64("WaitForBlockLock", m.WaitForBlockLock). - Uint64("WaitForBlockLockDone", m.WaitForBlockLockDone). - Uint64("MarkAvailableLocalBlock", m.MarkAvailableLocalBlock). - Uint64("MarkAvailableRemoteBlock", m.MarkAvailableRemoteBlock). - Uint64("AvailableLocal", m.AvailableLocal). - Uint64("AvailableRemote", m.AvailableRemote). - Str("name", deviceName). - Msg("waitingCacheMetrics") - - fromMetrics := dest.GetMetrics() - log.Debug(). - Uint64("RecvEvents", fromMetrics.RecvEvents). - Uint64("RecvHashes", fromMetrics.RecvHashes). - Uint64("RecvDevInfo", fromMetrics.RecvDevInfo). - Uint64("RecvAltSources", fromMetrics.RecvAltSources). - Uint64("RecvReadAt", fromMetrics.RecvReadAt). - Uint64("RecvWriteAtHash", fromMetrics.RecvWriteAtHash). - Uint64("RecvWriteAtComp", fromMetrics.RecvWriteAtComp). - Uint64("RecvWriteAt", fromMetrics.RecvWriteAt). - Uint64("RecvWriteAtWithMap", fromMetrics.RecvWriteAtWithMap). - Uint64("RecvRemoveFromMap", fromMetrics.RecvRemoveFromMap). - Uint64("RecvRemoveDev", fromMetrics.RecvRemoveDev). - Uint64("RecvDirtyList", fromMetrics.RecvDirtyList). - Uint64("SentNeedAt", fromMetrics.SentNeedAt). - Uint64("SentDontNeedAt", fromMetrics.SentDontNeedAt). - Str("name", deviceName). - Msg("fromProtocolMetrics") - } - - // We completed the migration, but we should wait for handlers to finish before we ok things... - // fmt.Printf("Completed, now wait for handlers...\n") - go func() { - handlerWG.Wait() - dstWG.Done() - }() - // available, total := destWaitingLocal.Availability() - // fmt.Printf("= %d = Availability (%d/%d)\n", dev, available, total) - // Set bar to completed - if connectProgress { - bar.SetCurrent(int64(destWaitingLocal.Size())) - } - } - }) - handlerWG.Done() - }() - - handlerWG.Add(1) - go func() { - _ = dest.HandleHashes(func(hashes map[uint][sha256.Size]byte) { - // fmt.Printf("[%d] Got %d hashes...\n", dev, len(hashes)) - if len(hashes) > 0 { - in := integrity.NewChecker(int64(destStorage.Size()), int(blockSize)) - in.SetHashes(hashes) - correct, err := in.Check(destStorage) - if err != nil { - panic(err) - } - // fmt.Printf("[%d] Verification result %t %v\n", dev, correct, err) - if correct { - statusVerify = "\u2611" - } else { - statusVerify = "\u2612" - } - } - }) - handlerWG.Done() - }() - - // Handle dirty list by invalidating local waiting cache - handlerWG.Add(1) - go func() { - _ = dest.HandleDirtyList(func(dirty []uint) { - // fmt.Printf("= %d = LIST OF DIRTY BLOCKS %v\n", dev, dirty) - destWaitingLocal.DirtyBlocks(dirty) - }) - handlerWG.Done() - }() -} - -// Called to setup an exposed storage device -func dstDeviceSetup(prov storage.Provider) (storage.ExposedStorage, error) { - p := expose.NewExposedStorageNBDNL(prov, expose.DefaultConfig) - var err error - - err = p.Init() - if err != nil { - // fmt.Printf("\n\n\np.Init returned %v\n\n\n", err) - return nil, err - } - - device := p.Device() - // fmt.Printf("* Device ready on /dev/%s\n", device) - - // We could also mount the device, but we should do so inside a goroutine, so that it doesn't block things... - if connectMountDev { - err = os.Mkdir(fmt.Sprintf("/mnt/mount%s", device), 0600) - if err != nil { - return nil, fmt.Errorf("error mkdir %v", err) - } - - go func() { - // fmt.Printf("Mounting device...") - cmd := exec.Command("mount", "-r", fmt.Sprintf("/dev/%s", device), fmt.Sprintf("/mnt/mount%s", device)) - err = cmd.Run() - if err != nil { - fmt.Printf("Could not mount device %v\n", err) - return - } - // fmt.Printf("* Device is mounted at /mnt/mount%s\n", device) - }() - } + fmt.Printf("\nMigrations completed. Please ctrl-c if you want to shut down, or wait an hour :)\n") - return p, nil -} + prov := dg.GetProvider(0) -// Called to shutdown an exposed storage device -func dstDeviceShutdown(p storage.ExposedStorage) error { - device := p.Device() + buffer := make([]byte, prov.Size()) + prov.ReadAt(buffer, 0) + fmt.Printf("DATA is [%s]\n", string(buffer)) - fmt.Printf("Shutdown %s\n", device) - if connectMountDev { - cmd := exec.Command("umount", fmt.Sprintf("/dev/%s", device)) - err := cmd.Run() - if err != nil { - return err - } - err = os.Remove(fmt.Sprintf("/mnt/mount%s", device)) - if err != nil { - return err - } - } + // We should pause here, to allow the user to do things with the devices + time.Sleep(1 * time.Hour) - err := p.Shutdown() - if err != nil { - return err - } - return nil + // Shutdown any storage exposed as devices + dg.CloseAll() } diff --git a/cmd/serve.go b/cmd/serve.go index c499a86..fbcfbac 100644 --- a/cmd/serve.go +++ b/cmd/serve.go @@ -36,6 +36,7 @@ var ( var serveAddr string var serveConf string +var serveContinuous bool var serveMetrics string var serveDebug bool @@ -46,6 +47,7 @@ func init() { cmdServe.Flags().StringVarP(&serveConf, "conf", "c", "silo.conf", "Configuration file") cmdServe.Flags().BoolVarP(&serveDebug, "debug", "d", false, "Debug logging (trace)") cmdServe.Flags().StringVarP(&serveMetrics, "metrics", "m", "", "Prom metrics address") + cmdServe.Flags().BoolVarP(&serveContinuous, "continuous", "C", false, "Continuous sync") } func runServe(_ *cobra.Command, _ []string) { @@ -150,30 +152,43 @@ func runServe(_ *cobra.Command, _ []string) { fmt.Printf("All devices migrated in %dms.\n", time.Since(ctime).Milliseconds()) - // Now do a dirty block phase... - hooks := &devicegroup.MigrateDirtyHooks{ - PreGetDirty: func(index int, to *protocol.ToProtocol, dirtyHistory []int) { - fmt.Printf("# [%d]PreGetDirty %v\n", index, dirtyHistory) - }, - PostGetDirty: func(index int, to *protocol.ToProtocol, dirtyHistory []int, blocks []uint) { - fmt.Printf("# [%d]PostGetDirty %v\n", index, dirtyHistory) - }, - PostMigrateDirty: func(index int, to *protocol.ToProtocol, dirtyHistory []int) bool { - fmt.Printf("# [%d]PostMigrateDirty %v\n", index, dirtyHistory) - return false - }, - Completed: func(index int, to *protocol.ToProtocol) { - fmt.Printf("# [%d]Completed\n", index) - }, + for { + + // Now do a dirty block phase... + hooks := &devicegroup.MigrateDirtyHooks{ + PreGetDirty: func(index int, to *protocol.ToProtocol, dirtyHistory []int) { + fmt.Printf("# [%d]PreGetDirty %v\n", index, dirtyHistory) + }, + PostGetDirty: func(index int, to *protocol.ToProtocol, dirtyHistory []int, blocks []uint) { + fmt.Printf("# [%d]PostGetDirty %v\n", index, dirtyHistory) + }, + PostMigrateDirty: func(index int, to *protocol.ToProtocol, dirtyHistory []int) bool { + fmt.Printf("# [%d]PostMigrateDirty %v\n", index, dirtyHistory) + time.Sleep(1 * time.Second) // Wait a bit for next dirty loop + return false + }, + Completed: func(index int, to *protocol.ToProtocol) { + fmt.Printf("# [%d]Completed\n", index) + }, + } + err = dg.MigrateDirty(hooks) + if err != nil { + dg.CloseAll() + panic(err) + } + + if !serveContinuous { + break + } } - err = dg.MigrateDirty(hooks) + fmt.Printf("All devices migrated(including dirty) in %dms.\n", time.Since(ctime).Milliseconds()) + + err = dg.Completed() // Send completion events for the devices. if err != nil { dg.CloseAll() panic(err) } - fmt.Printf("All devices migrated(including dirty) in %dms.\n", time.Since(ctime).Milliseconds()) - if log != nil { metrics := pro.GetMetrics() log.Debug(). diff --git a/pkg/storage/devicegroup/README.md b/pkg/storage/devicegroup/README.md index 3e7a467..c064f87 100644 --- a/pkg/storage/devicegroup/README.md +++ b/pkg/storage/devicegroup/README.md @@ -5,7 +5,7 @@ All internat concerns such as volatilityMonitor, waitingCache, as well as the ne ## Creation -There are two methods to create a `DeviceGroup` +There are two methods to create a `DeviceGroup`. ### NewFromSchema @@ -57,3 +57,5 @@ There is also support for sending custom data. This would typically be done eith // Wait for migration completion dg.WaitForCompletion() + +Once a `DeviceGroup` is has been created and migration is completed, you can then send the devices somewhere else with `StartMigration(protocol)`. \ No newline at end of file diff --git a/pkg/storage/devicegroup/device_group_to.go b/pkg/storage/devicegroup/device_group_to.go index ab0549a..63aed60 100644 --- a/pkg/storage/devicegroup/device_group_to.go +++ b/pkg/storage/devicegroup/device_group_to.go @@ -228,14 +228,14 @@ func (dg *DeviceGroup) MigrateAll(maxConcurrency int, progressHandler func(p []* storage.BlockTypeAny: int(concurrency), } cfg.LockerHandler = func() { - setMigrationError(d.to.SendEvent(&packets.Event{Type: packets.EventPreLock})) + // setMigrationError(d.to.SendEvent(&packets.Event{Type: packets.EventPreLock})) d.storage.Lock() - setMigrationError(d.to.SendEvent(&packets.Event{Type: packets.EventPostLock})) + // setMigrationError(d.to.SendEvent(&packets.Event{Type: packets.EventPostLock})) } cfg.UnlockerHandler = func() { - setMigrationError(d.to.SendEvent(&packets.Event{Type: packets.EventPreUnlock})) + // setMigrationError(d.to.SendEvent(&packets.Event{Type: packets.EventPreUnlock})) d.storage.Unlock() - setMigrationError(d.to.SendEvent(&packets.Event{Type: packets.EventPostUnlock})) + // setMigrationError(d.to.SendEvent(&packets.Event{Type: packets.EventPostUnlock})) } cfg.ErrorHandler = func(_ *storage.BlockInfo, err error) { setMigrationError(err) @@ -319,9 +319,19 @@ type MigrateDirtyHooks struct { } func (dg *DeviceGroup) MigrateDirty(hooks *MigrateDirtyHooks) error { + // If StartMigrationTo or MigrateAll have not been called, return error. + for _, d := range dg.devices { + if d.to == nil || d.migrator == nil { + return errNotSetup + } + } + errs := make(chan error, len(dg.devices)) for index, d := range dg.devices { + // First unlock the storage if it is locked due to a previous MigrateDirty call + d.storage.Unlock() + go func() { dirtyHistory := make([]int, 0) From dcfbbd0c62e6cc5f6caa3960f6c04f483f48005a Mon Sep 17 00:00:00 2001 From: Jimmy Moore Date: Fri, 13 Dec 2024 13:04:00 +0000 Subject: [PATCH 25/37] Updates to schema encode/decode Signed-off-by: Jimmy Moore --- pkg/storage/config/silo.go | 20 +++++ pkg/storage/config/silo_test.go | 28 +++++++ pkg/storage/devicegroup/device_group.go | 18 ++++- pkg/storage/devicegroup/device_group_from.go | 3 +- pkg/storage/devicegroup/device_group_test.go | 83 +++++++++++--------- pkg/storage/devicegroup/device_group_to.go | 2 +- pkg/storage/protocol/protocol_rw.go | 2 + pkg/storage/storage.go | 4 + 8 files changed, 119 insertions(+), 41 deletions(-) diff --git a/pkg/storage/config/silo.go b/pkg/storage/config/silo.go index 1b837fe..8b2e968 100644 --- a/pkg/storage/config/silo.go +++ b/pkg/storage/config/silo.go @@ -1,6 +1,7 @@ package config import ( + "errors" "fmt" "os" "strconv" @@ -123,6 +124,25 @@ func (ds *DeviceSchema) Encode() []byte { return f.Bytes() } +func (ds *DeviceSchema) EncodeAsBlock() []byte { + f := hclwrite.NewEmptyFile() + block := gohcl.EncodeAsBlock(ds, "device") + f.Body().AppendBlock(block) + return f.Bytes() +} + +func DecodeDeviceFromBlock(schema string) (*DeviceSchema, error) { + sf := &SiloSchema{} + err := sf.Decode([]byte(schema)) + if err != nil { + return nil, err + } + if len(sf.Device) != 1 { + return nil, errors.New("more than one device in schema") + } + return sf.Device[0], nil +} + func (ds *DeviceSchema) Decode(schema string) error { file, diag := hclsyntax.ParseConfig([]byte(schema), "", hcl.Pos{Line: 1, Column: 1}) if diag.HasErrors() { diff --git a/pkg/storage/config/silo_test.go b/pkg/storage/config/silo_test.go index dc87483..2f0d597 100644 --- a/pkg/storage/config/silo_test.go +++ b/pkg/storage/config/silo_test.go @@ -66,3 +66,31 @@ func TestSiloConfig(t *testing.T) { assert.NoError(t, err) // TODO: Check data is as expected } + +func TestSiloConfigBlock(t *testing.T) { + + schema := `device Disk0 { + size = "1G" + expose = true + system = "memory" + } + + device Disk1 { + size = "2M" + system = "memory" + } + ` + + s := new(SiloSchema) + err := s.Decode([]byte(schema)) + assert.NoError(t, err) + + block0 := s.Device[0].EncodeAsBlock() + + ds := &SiloSchema{} + err = ds.Decode(block0) + assert.NoError(t, err) + + // Make sure it used the label + assert.Equal(t, ds.Device[0].Name, s.Device[0].Name) +} diff --git a/pkg/storage/devicegroup/device_group.go b/pkg/storage/devicegroup/device_group.go index 82dc37b..9d73601 100644 --- a/pkg/storage/devicegroup/device_group.go +++ b/pkg/storage/devicegroup/device_group.go @@ -52,8 +52,22 @@ type DeviceInformation struct { waitingCacheRemote *waitingcache.Remote } -func (dg *DeviceGroup) GetProvider(index int) storage.Provider { - return dg.devices[index].storage +func (dg *DeviceGroup) GetExposedDeviceByName(name string) string { + for _, di := range dg.devices { + if di.schema.Name == name && di.exp != nil { + return di.exp.Device() + } + } + return "" +} + +func (dg *DeviceGroup) GetProviderByName(name string) storage.Provider { + for _, di := range dg.devices { + if di.schema.Name == name { + return di.prov + } + } + return nil } func (dg *DeviceGroup) CloseAll() error { diff --git a/pkg/storage/devicegroup/device_group_from.go b/pkg/storage/devicegroup/device_group_from.go index b27d8fc..6ff0bc8 100644 --- a/pkg/storage/devicegroup/device_group_from.go +++ b/pkg/storage/devicegroup/device_group_from.go @@ -33,13 +33,12 @@ func NewFromProtocol(ctx context.Context, // First create the devices we need using the schemas sent... for index, di := range dgi.Devices { - ds := &config.DeviceSchema{} // We may want to tweak schemas here eg autoStart = false on sync. Or modify pathnames. schema := di.Schema if tweakDeviceSchema != nil { schema = tweakDeviceSchema(index-1, di.Name, schema) } - err := ds.Decode(schema) + ds, err := config.DecodeDeviceFromBlock(schema) if err != nil { return nil, err } diff --git a/pkg/storage/devicegroup/device_group_test.go b/pkg/storage/devicegroup/device_group_test.go index 4cf9d0c..0a11618 100644 --- a/pkg/storage/devicegroup/device_group_test.go +++ b/pkg/storage/devicegroup/device_group_test.go @@ -3,10 +3,8 @@ package devicegroup import ( "context" "crypto/rand" - "fmt" "io" "os" - "os/user" "strings" "sync" "testing" @@ -21,6 +19,7 @@ import ( "github.com/loopholelabs/silo/pkg/storage/protocol/packets" "github.com/loopholelabs/silo/pkg/storage/sources" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) var testDeviceSchema = []*config.DeviceSchema{ @@ -29,29 +28,31 @@ var testDeviceSchema = []*config.DeviceSchema{ Size: "8m", System: "file", BlockSize: "1m", - Expose: true, - Location: "testdev_test1", + // Expose: true, + Location: "testdev_test1", }, + { Name: "test2", Size: "16m", System: "file", BlockSize: "1m", - Expose: true, - Location: "testdev_test2", + // Expose: true, + Location: "testdev_test2", }, } func setupDeviceGroup(t *testing.T) *DeviceGroup { - currentUser, err := user.Current() - if err != nil { - panic(err) - } - if currentUser.Username != "root" { - fmt.Printf("Cannot run test unless we are root.\n") - return nil - } - + /* + currentUser, err := user.Current() + if err != nil { + panic(err) + } + if currentUser.Username != "root" { + fmt.Printf("Cannot run test unless we are root.\n") + return nil + } + */ dg, err := NewFromSchema(testDeviceSchema, nil, nil) assert.NoError(t, err) @@ -97,7 +98,7 @@ func TestDeviceGroupSendDevInfo(t *testing.T) { assert.Equal(t, r.Name, di.Name) assert.Equal(t, uint64(r.ByteSize()), di.Size) assert.Equal(t, uint32(r.ByteBlockSize()), di.BlockSize) - assert.Equal(t, string(r.Encode()), di.Schema) + assert.Equal(t, string(r.EncodeAsBlock()), di.Schema) } } @@ -117,7 +118,7 @@ func TestDeviceGroupMigrateTo(t *testing.T) { ctx, cancelfn := context.WithCancel(context.TODO()) var incomingLock sync.Mutex - incomingProviders := make(map[uint32]storage.Provider) + incomingProviders := make(map[string]storage.Provider) initDev := func(ctx context.Context, p protocol.Protocol, dev uint32) { } @@ -136,7 +137,7 @@ func TestDeviceGroupMigrateTo(t *testing.T) { destStorageFactory := func(di *packets.DevInfo) storage.Provider { store := sources.NewMemoryStorage(int(di.Size)) incomingLock.Lock() - incomingProviders[uint32(index)] = store + incomingProviders[di.Name] = store incomingLock.Unlock() return store } @@ -168,8 +169,9 @@ func TestDeviceGroupMigrateTo(t *testing.T) { }() // Lets write some data... - for i := range testDeviceSchema { - prov := dg.GetProvider(i) + for _, s := range testDeviceSchema { + prov := dg.GetProviderByName(s.Name) + assert.NotNil(t, prov) buff := make([]byte, prov.Size()) _, err := rand.Read(buff) assert.NoError(t, err) @@ -181,21 +183,16 @@ func TestDeviceGroupMigrateTo(t *testing.T) { err := dg.StartMigrationTo(prSource) assert.NoError(t, err) - pHandler := func(prog []*migrator.MigrationProgress) { - for index, p := range prog { - if p != nil { - fmt.Printf("[%d] %d / %d\n", index, p.ReadyBlocks, p.TotalBlocks) - } - } - } + pHandler := func(prog []*migrator.MigrationProgress) {} err = dg.MigrateAll(100, pHandler) assert.NoError(t, err) // Check the data all got migrated correctly - for i := range testDeviceSchema { - prov := dg.GetProvider(i) - destProvider := incomingProviders[uint32(i+1)] + for _, s := range testDeviceSchema { + prov := dg.GetProviderByName(s.Name) + // Find the correct destProvider... + destProvider := incomingProviders[s.Name] assert.NotNil(t, destProvider) eq, err := storage.Equals(prov, destProvider, 1024*1024) assert.NoError(t, err) @@ -232,16 +229,21 @@ func TestDeviceGroupMigrate(t *testing.T) { prSource := protocol.NewRW(ctx, []io.Reader{r1}, []io.Writer{w2}, nil) prDest := protocol.NewRW(ctx, []io.Reader{r2}, []io.Writer{w1}, initDev) + var prDone sync.WaitGroup + + prDone.Add(2) go func() { _ = prSource.Handle() + prDone.Done() }() go func() { _ = prDest.Handle() + prDone.Done() }() // Lets write some data... - for i := range testDeviceSchema { - prov := dg.GetProvider(i) + for _, s := range testDeviceSchema { + prov := dg.GetProviderByName(s.Name) buff := make([]byte, prov.Size()) _, err := rand.Read(buff) assert.NoError(t, err) @@ -306,14 +308,23 @@ func TestDeviceGroupMigrate(t *testing.T) { dg2.WaitForCompletion() // Check the data all got migrated correctly from dg to dg2. - for i := range testDeviceSchema { - prov := dg.GetProvider(i) - destProvider := dg2.GetProvider(i) - assert.NotNil(t, destProvider) + for _, s := range testDeviceSchema { + prov := dg.GetProviderByName(s.Name) + require.NotNil(t, prov) + destProvider := dg2.GetProviderByName(s.Name) + require.NotNil(t, destProvider) eq, err := storage.Equals(prov, destProvider, 1024*1024) assert.NoError(t, err) assert.True(t, eq) } + // Cancel context cancelfn() + + // Close protocol bits + prDone.Wait() + r1.Close() + w1.Close() + r2.Close() + w2.Close() } diff --git a/pkg/storage/devicegroup/device_group_to.go b/pkg/storage/devicegroup/device_group_to.go index 63aed60..a62da50 100644 --- a/pkg/storage/devicegroup/device_group_to.go +++ b/pkg/storage/devicegroup/device_group_to.go @@ -118,7 +118,7 @@ func (dg *DeviceGroup) StartMigrationTo(pro protocol.Protocol) error { Size: d.prov.Size(), BlockSize: uint32(d.blockSize), Name: d.schema.Name, - Schema: string(d.schema.Encode()), + Schema: string(d.schema.EncodeAsBlock()), } dgi.Devices[index+1] = di } diff --git a/pkg/storage/protocol/protocol_rw.go b/pkg/storage/protocol/protocol_rw.go index 68f280b..d6db877 100644 --- a/pkg/storage/protocol/protocol_rw.go +++ b/pkg/storage/protocol/protocol_rw.go @@ -323,6 +323,7 @@ func (p *RW) WaitForPacket(dev uint32, id uint32) ([]byte, error) { } func (p *RW) WaitForCommand(dev uint32, cmd byte) (uint32, []byte, error) { + p.activeDevsLock.Lock() p.waitersLock.Lock() w, ok := p.waiters[dev] if !ok { @@ -339,6 +340,7 @@ func (p *RW) WaitForCommand(dev uint32, cmd byte) (uint32, []byte, error) { w.byCmd[cmd] = wq } p.waitersLock.Unlock() + p.activeDevsLock.Unlock() select { case p := <-wq: diff --git a/pkg/storage/storage.go b/pkg/storage/storage.go index 3a3673d..5237c23 100644 --- a/pkg/storage/storage.go +++ b/pkg/storage/storage.go @@ -65,6 +65,7 @@ type SyncStartConfig struct { */ func Equals(sp1 Provider, sp2 Provider, blockSize int) (bool, error) { if sp1.Size() != sp2.Size() { + fmt.Printf("Equals: Size differs (%d %d)\n", sp1.Size(), sp2.Size()) return false, nil } @@ -78,15 +79,18 @@ func Equals(sp1 Provider, sp2 Provider, blockSize int) (bool, error) { n, err := sp1.ReadAt(sourceBuff, int64(i)) if err != nil { + fmt.Printf("Equals: sp1.ReadAt %v\n", err) return false, err } sourceBuff = sourceBuff[:n] n, err = sp2.ReadAt(destBuff, int64(i)) if err != nil { + fmt.Printf("Equals: sp2.ReadAt %v\n", err) return false, err } destBuff = destBuff[:n] if len(sourceBuff) != len(destBuff) { + fmt.Printf("Equals: data len sp1 sp2 %d %d\n", len(sourceBuff), len(destBuff)) return false, nil } for j := 0; j < n; j++ { From 031cf74d8d69636392dc933a7b59704ec62d7df5 Mon Sep 17 00:00:00 2001 From: Jimmy Moore Date: Fri, 13 Dec 2024 13:27:28 +0000 Subject: [PATCH 26/37] dg tests pass. Order issue fixed Signed-off-by: Jimmy Moore --- pkg/storage/devicegroup/device_group_from.go | 11 ++++++----- pkg/storage/storage.go | 2 +- 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/pkg/storage/devicegroup/device_group_from.go b/pkg/storage/devicegroup/device_group_from.go index 6ff0bc8..a9e2fc1 100644 --- a/pkg/storage/devicegroup/device_group_from.go +++ b/pkg/storage/devicegroup/device_group_from.go @@ -29,7 +29,7 @@ func NewFromProtocol(ctx context.Context, return nil, err } - devices := make([]*config.DeviceSchema, 0) + devices := make([]*config.DeviceSchema, len(dgi.Devices)) // First create the devices we need using the schemas sent... for index, di := range dgi.Devices { @@ -42,7 +42,7 @@ func NewFromProtocol(ctx context.Context, if err != nil { return nil, err } - devices = append(devices, ds) + devices[index-1] = ds } dg, err := NewFromSchema(devices, log, met) @@ -56,13 +56,14 @@ func NewFromProtocol(ctx context.Context, // We need to create the FromProtocol for each device, and associated goroutines here. for index, di := range dgi.Devices { - d := dg.devices[index-1] + dev := index - 1 + d := dg.devices[dev] destStorageFactory := func(di *packets.DevInfo) storage.Provider { d.waitingCacheLocal, d.waitingCacheRemote = waitingcache.NewWaitingCacheWithLogger(d.prov, int(di.BlockSize), dg.log) - if dg.devices[index-1].exp != nil { - dg.devices[index-1].exp.SetProvider(d.waitingCacheLocal) + if d.exp != nil { + d.exp.SetProvider(d.waitingCacheLocal) } return d.waitingCacheRemote diff --git a/pkg/storage/storage.go b/pkg/storage/storage.go index 5237c23..942d88f 100644 --- a/pkg/storage/storage.go +++ b/pkg/storage/storage.go @@ -95,7 +95,7 @@ func Equals(sp1 Provider, sp2 Provider, blockSize int) (bool, error) { } for j := 0; j < n; j++ { if sourceBuff[j] != destBuff[j] { - fmt.Printf("Equals: Block %d differs\n", i/blockSize) + fmt.Printf("Equals: Block %d differs [sp1 %d, sp2 %d]\n", i/blockSize, sourceBuff[j], destBuff[j]) return false, nil } } From 4179e401ce1a5423ca9f63305aabb4c265cd527d Mon Sep 17 00:00:00 2001 From: Jimmy Moore Date: Fri, 13 Dec 2024 13:33:43 +0000 Subject: [PATCH 27/37] cmd serve/connect working Signed-off-by: Jimmy Moore --- cmd/connect.go | 13 +++++++------ cmd/serve.go | 7 +++++++ pkg/storage/devicegroup/device_group.go | 8 ++++++++ 3 files changed, 22 insertions(+), 6 deletions(-) diff --git a/cmd/connect.go b/cmd/connect.go index 72d2002..b3c34da 100644 --- a/cmd/connect.go +++ b/cmd/connect.go @@ -135,17 +135,18 @@ func runConnect(_ *cobra.Command, _ []string) { dg, err = devicegroup.NewFromProtocol(protoCtx, pro, tweak, log, siloMetrics) + for _, d := range dg.GetDeviceSchema() { + expName := dg.GetExposedDeviceByName(d.Name) + if expName != "" { + fmt.Printf("Device %s exposed at %s\n", d.Name, expName) + } + } + // Wait for completion events. dg.WaitForCompletion() fmt.Printf("\nMigrations completed. Please ctrl-c if you want to shut down, or wait an hour :)\n") - prov := dg.GetProvider(0) - - buffer := make([]byte, prov.Size()) - prov.ReadAt(buffer, 0) - fmt.Printf("DATA is [%s]\n", string(buffer)) - // We should pause here, to allow the user to do things with the devices time.Sleep(1 * time.Hour) diff --git a/cmd/serve.go b/cmd/serve.go index fbcfbac..c5f086c 100644 --- a/cmd/serve.go +++ b/cmd/serve.go @@ -96,6 +96,13 @@ func runServe(_ *cobra.Command, _ []string) { panic(err) } + for _, d := range siloConf.Device { + expName := dg.GetExposedDeviceByName(d.Name) + if expName != "" { + fmt.Printf("Device %s exposed at %s\n", d.Name, expName) + } + } + c := make(chan os.Signal, 1) signal.Notify(c, os.Interrupt, syscall.SIGTERM) go func() { diff --git a/pkg/storage/devicegroup/device_group.go b/pkg/storage/devicegroup/device_group.go index 9d73601..c6e10b6 100644 --- a/pkg/storage/devicegroup/device_group.go +++ b/pkg/storage/devicegroup/device_group.go @@ -52,6 +52,14 @@ type DeviceInformation struct { waitingCacheRemote *waitingcache.Remote } +func (dg *DeviceGroup) GetDeviceSchema() []*config.DeviceSchema { + s := make([]*config.DeviceSchema, 0) + for _, di := range dg.devices { + s = append(s, di.schema) + } + return s +} + func (dg *DeviceGroup) GetExposedDeviceByName(name string) string { for _, di := range dg.devices { if di.schema.Name == name && di.exp != nil { From 7d4d6df5134ef5b3603525d4bb3f35d1648114d6 Mon Sep 17 00:00:00 2001 From: Jimmy Moore Date: Fri, 13 Dec 2024 14:36:28 +0000 Subject: [PATCH 28/37] Some minor tweaks to readme Signed-off-by: Jimmy Moore --- pkg/storage/devicegroup/README.md | 20 +++++++++++++++++--- 1 file changed, 17 insertions(+), 3 deletions(-) diff --git a/pkg/storage/devicegroup/README.md b/pkg/storage/devicegroup/README.md index c064f87..b730193 100644 --- a/pkg/storage/devicegroup/README.md +++ b/pkg/storage/devicegroup/README.md @@ -32,10 +32,23 @@ Devices in a `DeviceGroup` are sent together, which allows Silo to optimize all // hooks gives some control over the dirty loop err = dg.MigrateDirty(hooks) + // Send completion events for all devices + err = dg.Completed() + // Close everything dg.CloseAll() -There is also support for sending custom data. This would typically be done either in `pHandler` (The progress handler), or in one of the `MigrateDirty` hooks. +Within the `MigrateDirty` there are a number of hooks we can use to control things. MigrateDirty will return once all devices have no more dirty data. You can of course then call MigrateDirty again eg for continuous sync. + + type MigrateDirtyHooks struct { + PreGetDirty func(index int, to *protocol, ToProtocol, dirtyHistory []int) + PostGetDirty func(index int, to *protocol.ToProtocol, dirtyHistory []int, blocks []uint) + PostMigrateDirty func(index int, to *protocol.ToProtocol, dirtyHistory []int) bool + Completed func(index int, to *protocol.ToProtocol) + } + + +There is also support for sending global custom data. This would typically be done either in `pHandler` (The progress handler), or in one of the `MigrateDirty` hooks. pHandler := func(ps []*migrator.MigrationProgress) { // Do some test here to see if enough data migrated @@ -47,10 +60,11 @@ There is also support for sending custom data. This would typically be done eith ## Usage (Receiving devices) // Create a DeviceGroup from protocol - // tweak allows us to modify the schema + // tweak func allows us to modify the schema, eg pathnames dg, err = NewFromProtocol(ctx, protocol, tweak, nil, nil) - // Handle any custom data + // Handle any custom data events + // For example resume the VM here. go dg.HandleCustomData(func(data []byte) { // We got sent some custom data! }) From 9d4c436bb6f4134a0e6f8e0cb254a9c2159124f1 Mon Sep 17 00:00:00 2001 From: Jimmy Moore Date: Fri, 13 Dec 2024 14:40:50 +0000 Subject: [PATCH 29/37] Silly typos Signed-off-by: Jimmy Moore --- pkg/storage/devicegroup/README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/storage/devicegroup/README.md b/pkg/storage/devicegroup/README.md index b730193..ae7367e 100644 --- a/pkg/storage/devicegroup/README.md +++ b/pkg/storage/devicegroup/README.md @@ -1,7 +1,7 @@ # Device Group The `DeviceGroup` combines some number of Silo devices into a single unit, which can then be migrated to another Silo instance. -All internat concerns such as volatilityMonitor, waitingCache, as well as the new S3 assist, are now hidden from the consumer. +All internal concerns such as volatilityMonitor, waitingCache, as well as the new S3 assist, are now hidden from the consumer. ## Creation @@ -41,7 +41,7 @@ Devices in a `DeviceGroup` are sent together, which allows Silo to optimize all Within the `MigrateDirty` there are a number of hooks we can use to control things. MigrateDirty will return once all devices have no more dirty data. You can of course then call MigrateDirty again eg for continuous sync. type MigrateDirtyHooks struct { - PreGetDirty func(index int, to *protocol, ToProtocol, dirtyHistory []int) + PreGetDirty func(index int, to *protocol.ToProtocol, dirtyHistory []int) PostGetDirty func(index int, to *protocol.ToProtocol, dirtyHistory []int, blocks []uint) PostMigrateDirty func(index int, to *protocol.ToProtocol, dirtyHistory []int) bool Completed func(index int, to *protocol.ToProtocol) From 02d94bf94b2d40e6f7e228165338fe89f44cbfb5 Mon Sep 17 00:00:00 2001 From: Jimmy Moore Date: Sat, 14 Dec 2024 22:43:05 +0000 Subject: [PATCH 30/37] Added event compat and exposed things from dg Signed-off-by: Jimmy Moore --- cmd/connect.go | 12 +- cmd/serve.go | 12 +- pkg/storage/devicegroup/device_group.go | 72 +++++++----- pkg/storage/devicegroup/device_group_from.go | 16 ++- pkg/storage/devicegroup/device_group_test.go | 16 +-- pkg/storage/devicegroup/device_group_to.go | 114 +++++++++---------- 6 files changed, 129 insertions(+), 113 deletions(-) diff --git a/cmd/connect.go b/cmd/connect.go index b3c34da..1b73bd3 100644 --- a/cmd/connect.go +++ b/cmd/connect.go @@ -108,9 +108,7 @@ func runConnect(_ *cobra.Command, _ []string) { protoCtx, protoCancelfn := context.WithCancel(context.TODO()) - handleIncomingDevice := func(ctx context.Context, pro protocol.Protocol, dev uint32) {} - - pro := protocol.NewRW(protoCtx, []io.Reader{con}, []io.Writer{con}, handleIncomingDevice) + pro := protocol.NewRW(protoCtx, []io.Reader{con}, []io.Writer{con}, nil) // Let the protocol do its thing. go func() { @@ -129,16 +127,16 @@ func runConnect(_ *cobra.Command, _ []string) { } // TODO: Modify schemas a bit here... - tweak := func(index int, name string, schema string) string { + tweak := func(_ int, _ string, schema string) string { return schema } - dg, err = devicegroup.NewFromProtocol(protoCtx, pro, tweak, log, siloMetrics) + dg, err = devicegroup.NewFromProtocol(protoCtx, pro, tweak, nil, log, siloMetrics) for _, d := range dg.GetDeviceSchema() { expName := dg.GetExposedDeviceByName(d.Name) - if expName != "" { - fmt.Printf("Device %s exposed at %s\n", d.Name, expName) + if expName != nil { + fmt.Printf("Device %s exposed at %s\n", d.Name, expName.Device()) } } diff --git a/cmd/serve.go b/cmd/serve.go index c5f086c..e74d0f1 100644 --- a/cmd/serve.go +++ b/cmd/serve.go @@ -98,8 +98,8 @@ func runServe(_ *cobra.Command, _ []string) { for _, d := range siloConf.Device { expName := dg.GetExposedDeviceByName(d.Name) - if expName != "" { - fmt.Printf("Device %s exposed at %s\n", d.Name, expName) + if expName != nil { + fmt.Printf("Device %s exposed at %s\n", d.Name, expName.Device()) } } @@ -163,18 +163,18 @@ func runServe(_ *cobra.Command, _ []string) { // Now do a dirty block phase... hooks := &devicegroup.MigrateDirtyHooks{ - PreGetDirty: func(index int, to *protocol.ToProtocol, dirtyHistory []int) { + PreGetDirty: func(index int, _ *protocol.ToProtocol, dirtyHistory []int) { fmt.Printf("# [%d]PreGetDirty %v\n", index, dirtyHistory) }, - PostGetDirty: func(index int, to *protocol.ToProtocol, dirtyHistory []int, blocks []uint) { + PostGetDirty: func(index int, _ *protocol.ToProtocol, dirtyHistory []int, _ []uint) { fmt.Printf("# [%d]PostGetDirty %v\n", index, dirtyHistory) }, - PostMigrateDirty: func(index int, to *protocol.ToProtocol, dirtyHistory []int) bool { + PostMigrateDirty: func(index int, _ *protocol.ToProtocol, dirtyHistory []int) bool { fmt.Printf("# [%d]PostMigrateDirty %v\n", index, dirtyHistory) time.Sleep(1 * time.Second) // Wait a bit for next dirty loop return false }, - Completed: func(index int, to *protocol.ToProtocol) { + Completed: func(index int, _ *protocol.ToProtocol) { fmt.Printf("# [%d]Completed\n", index) }, } diff --git a/pkg/storage/devicegroup/device_group.go b/pkg/storage/devicegroup/device_group.go index c6e10b6..dd19939 100644 --- a/pkg/storage/devicegroup/device_group.go +++ b/pkg/storage/devicegroup/device_group.go @@ -13,6 +13,7 @@ import ( "github.com/loopholelabs/silo/pkg/storage/metrics" "github.com/loopholelabs/silo/pkg/storage/migrator" "github.com/loopholelabs/silo/pkg/storage/protocol" + "github.com/loopholelabs/silo/pkg/storage/protocol/packets" "github.com/loopholelabs/silo/pkg/storage/volatilitymonitor" "github.com/loopholelabs/silo/pkg/storage/waitingcache" ) @@ -34,50 +35,69 @@ type DeviceGroup struct { } type DeviceInformation struct { - size uint64 - blockSize uint64 - numBlocks int - schema *config.DeviceSchema - prov storage.Provider - storage storage.LockableProvider - exp storage.ExposedStorage - volatility *volatilitymonitor.VolatilityMonitor - dirtyLocal *dirtytracker.Local - dirtyRemote *dirtytracker.Remote - to *protocol.ToProtocol - orderer *blocks.PriorityBlockOrder - migrator *migrator.Migrator + Size uint64 + BlockSize uint64 + NumBlocks int + Schema *config.DeviceSchema + Prov storage.Provider + Storage storage.LockableProvider + Exp storage.ExposedStorage + Volatility *volatilitymonitor.VolatilityMonitor + DirtyLocal *dirtytracker.Local + DirtyRemote *dirtytracker.Remote + To *protocol.ToProtocol + Orderer *blocks.PriorityBlockOrder + Migrator *migrator.Migrator migrationError chan error - waitingCacheLocal *waitingcache.Local - waitingCacheRemote *waitingcache.Remote + WaitingCacheLocal *waitingcache.Local + WaitingCacheRemote *waitingcache.Remote + EventHandler func(e *packets.Event) } func (dg *DeviceGroup) GetDeviceSchema() []*config.DeviceSchema { s := make([]*config.DeviceSchema, 0) for _, di := range dg.devices { - s = append(s, di.schema) + s = append(s, di.Schema) } return s } -func (dg *DeviceGroup) GetExposedDeviceByName(name string) string { +func (dg *DeviceGroup) GetDeviceInformationByName(name string) *DeviceInformation { for _, di := range dg.devices { - if di.schema.Name == name && di.exp != nil { - return di.exp.Device() + if di.Schema.Name == name { + return di } } - return "" + return nil +} + +func (dg *DeviceGroup) GetExposedDeviceByName(name string) storage.ExposedStorage { + for _, di := range dg.devices { + if di.Schema.Name == name && di.Exp != nil { + return di.Exp + } + } + return nil } func (dg *DeviceGroup) GetProviderByName(name string) storage.Provider { for _, di := range dg.devices { - if di.schema.Name == name { - return di.prov + if di.Schema.Name == name { + return di.Prov } } return nil } +func (dg *DeviceGroup) GetBlockSizeByName(name string) int { + for _, di := range dg.devices { + if di.Schema.Name == name { + return int(di.BlockSize) + } + } + return -1 +} + func (dg *DeviceGroup) CloseAll() error { if dg.log != nil { dg.log.Debug().Int("devices", len(dg.devices)).Msg("close device group") @@ -87,17 +107,17 @@ func (dg *DeviceGroup) CloseAll() error { for _, d := range dg.devices { // Unlock the storage so nothing blocks here... // If we don't unlock there may be pending nbd writes that can't be completed. - d.storage.Unlock() + d.Storage.Unlock() - err := d.prov.Close() + err := d.Prov.Close() if err != nil { if dg.log != nil { dg.log.Error().Err(err).Msg("error closing device group storage provider") } e = errors.Join(e, err) } - if d.exp != nil { - err = d.exp.Shutdown() + if d.Exp != nil { + err = d.Exp.Shutdown() if err != nil { if dg.log != nil { dg.log.Error().Err(err).Msg("error closing device group exposed storage") diff --git a/pkg/storage/devicegroup/device_group_from.go b/pkg/storage/devicegroup/device_group_from.go index a9e2fc1..53ad055 100644 --- a/pkg/storage/devicegroup/device_group_from.go +++ b/pkg/storage/devicegroup/device_group_from.go @@ -16,6 +16,7 @@ import ( func NewFromProtocol(ctx context.Context, pro protocol.Protocol, tweakDeviceSchema func(index int, name string, schema string) string, + eventHandler func(e *packets.Event), log types.Logger, met metrics.SiloMetrics) (*DeviceGroup, error) { @@ -58,15 +59,16 @@ func NewFromProtocol(ctx context.Context, for index, di := range dgi.Devices { dev := index - 1 d := dg.devices[dev] + d.EventHandler = eventHandler destStorageFactory := func(di *packets.DevInfo) storage.Provider { - d.waitingCacheLocal, d.waitingCacheRemote = waitingcache.NewWaitingCacheWithLogger(d.prov, int(di.BlockSize), dg.log) + d.WaitingCacheLocal, d.WaitingCacheRemote = waitingcache.NewWaitingCacheWithLogger(d.Prov, int(di.BlockSize), dg.log) - if d.exp != nil { - d.exp.SetProvider(d.waitingCacheLocal) + if d.Exp != nil { + d.Exp.SetProvider(d.WaitingCacheLocal) } - return d.waitingCacheRemote + return d.WaitingCacheRemote } from := protocol.NewFromProtocol(ctx, uint32(index), destStorageFactory, pro) @@ -83,7 +85,7 @@ func NewFromProtocol(ctx context.Context, go func() { _ = from.HandleDirtyList(func(dirtyBlocks []uint) { // Tell the waitingCache about it - d.waitingCacheLocal.DirtyBlocks(dirtyBlocks) + d.WaitingCacheLocal.DirtyBlocks(dirtyBlocks) }) }() go func() { @@ -91,7 +93,9 @@ func NewFromProtocol(ctx context.Context, if p.Type == packets.EventCompleted { dg.incomingDevicesWg.Done() } - // TODO: Pass events on to caller so they can be handled upstream + if d.EventHandler != nil { + d.EventHandler(p) + } }) }() } diff --git a/pkg/storage/devicegroup/device_group_test.go b/pkg/storage/devicegroup/device_group_test.go index 0a11618..8fcb461 100644 --- a/pkg/storage/devicegroup/device_group_test.go +++ b/pkg/storage/devicegroup/device_group_test.go @@ -120,11 +120,8 @@ func TestDeviceGroupMigrateTo(t *testing.T) { var incomingLock sync.Mutex incomingProviders := make(map[string]storage.Provider) - initDev := func(ctx context.Context, p protocol.Protocol, dev uint32) { - } - prSource := protocol.NewRW(ctx, []io.Reader{r1}, []io.Writer{w2}, nil) - prDest := protocol.NewRW(ctx, []io.Reader{r2}, []io.Writer{w1}, initDev) + prDest := protocol.NewRW(ctx, []io.Reader{r2}, []io.Writer{w1}, nil) go func() { // This is our control channel, and we're expecting a DeviceGroupInfo @@ -183,7 +180,7 @@ func TestDeviceGroupMigrateTo(t *testing.T) { err := dg.StartMigrationTo(prSource) assert.NoError(t, err) - pHandler := func(prog []*migrator.MigrationProgress) {} + pHandler := func(_ []*migrator.MigrationProgress) {} err = dg.MigrateAll(100, pHandler) assert.NoError(t, err) @@ -223,11 +220,8 @@ func TestDeviceGroupMigrate(t *testing.T) { ctx, cancelfn := context.WithCancel(context.TODO()) - initDev := func(ctx context.Context, p protocol.Protocol, dev uint32) { - } - prSource := protocol.NewRW(ctx, []io.Reader{r1}, []io.Writer{w2}, nil) - prDest := protocol.NewRW(ctx, []io.Reader{r2}, []io.Writer{w1}, initDev) + prDest := protocol.NewRW(ctx, []io.Reader{r2}, []io.Writer{w1}, nil) var prDone sync.WaitGroup @@ -255,7 +249,7 @@ func TestDeviceGroupMigrate(t *testing.T) { var wg sync.WaitGroup // We will tweak schema in recv here so we have separate paths. - tweak := func(index int, name string, schema string) string { + tweak := func(_ int, _ string, schema string) string { s := strings.ReplaceAll(schema, "testdev_test1", "testrecv_test1") s = strings.ReplaceAll(s, "testdev_test2", "testrecv_test2") return s @@ -264,7 +258,7 @@ func TestDeviceGroupMigrate(t *testing.T) { wg.Add(1) go func() { var err error - dg2, err = NewFromProtocol(ctx, prDest, tweak, nil, nil) + dg2, err = NewFromProtocol(ctx, prDest, tweak, nil, nil, nil) assert.NoError(t, err) wg.Done() }() diff --git a/pkg/storage/devicegroup/device_group_to.go b/pkg/storage/devicegroup/device_group_to.go index a62da50..07c5ab3 100644 --- a/pkg/storage/devicegroup/device_group_to.go +++ b/pkg/storage/devicegroup/device_group_to.go @@ -68,17 +68,17 @@ func NewFromSchema(ds []*config.DeviceSchema, log types.Logger, met metrics.Silo } dg.devices = append(dg.devices, &DeviceInformation{ - size: local.Size(), - blockSize: uint64(blockSize), - numBlocks: totalBlocks, - schema: s, - prov: prov, - storage: local, - exp: exp, - volatility: vmonitor, - dirtyLocal: dirtyLocal, - dirtyRemote: dirtyRemote, - orderer: orderer, + Size: local.Size(), + BlockSize: uint64(blockSize), + NumBlocks: totalBlocks, + Schema: s, + Prov: prov, + Storage: local, + Exp: exp, + Volatility: vmonitor, + DirtyLocal: dirtyLocal, + DirtyRemote: dirtyRemote, + Orderer: orderer, }) // Set these two at least, so we know *something* about every device in progress handler. @@ -100,11 +100,11 @@ func (dg *DeviceGroup) StartMigrationTo(pro protocol.Protocol) error { // First lets setup the ToProtocol for index, d := range dg.devices { - d.to = protocol.NewToProtocol(d.prov.Size(), uint32(index+1), pro) - d.to.SetCompression(true) + d.To = protocol.NewToProtocol(d.Prov.Size(), uint32(index+1), pro) + d.To.SetCompression(true) if dg.met != nil { - dg.met.AddToProtocol(d.schema.Name, d.to) + dg.met.AddToProtocol(d.Schema.Name, d.To) } } @@ -115,10 +115,10 @@ func (dg *DeviceGroup) StartMigrationTo(pro protocol.Protocol) error { for index, d := range dg.devices { di := &packets.DevInfo{ - Size: d.prov.Size(), - BlockSize: uint32(d.blockSize), - Name: d.schema.Name, - Schema: string(d.schema.EncodeAsBlock()), + Size: d.Prov.Size(), + BlockSize: uint32(d.BlockSize), + Name: d.Schema.Name, + Schema: string(d.Schema.EncodeAsBlock()), } dgi.Devices[index+1] = di } @@ -133,7 +133,7 @@ func (dg *DeviceGroup) StartMigrationTo(pro protocol.Protocol) error { // This will Migrate all devices to the 'to' setup in SendDevInfo stage. func (dg *DeviceGroup) MigrateAll(maxConcurrency int, progressHandler func(p []*migrator.MigrationProgress)) error { for _, d := range dg.devices { - if d.to == nil { + if d.To == nil { return errNotSetup } } @@ -147,7 +147,7 @@ func (dg *DeviceGroup) MigrateAll(maxConcurrency int, progressHandler func(p []* // Add up device sizes, so we can allocate the concurrency proportionally totalSize := uint64(0) for _, d := range dg.devices { - totalSize += d.size + totalSize += d.Size } // We need at least this much... @@ -158,7 +158,7 @@ func (dg *DeviceGroup) MigrateAll(maxConcurrency int, progressHandler func(p []* maxConcurrency -= len(dg.devices) for index, d := range dg.devices { - concurrency := 1 + (uint64(maxConcurrency) * d.size / totalSize) + concurrency := 1 + (uint64(maxConcurrency) * d.Size / totalSize) d.migrationError = make(chan error, 1) // We will just hold onto the first error for now. setMigrationError := func(err error) { @@ -172,50 +172,50 @@ func (dg *DeviceGroup) MigrateAll(maxConcurrency int, progressHandler func(p []* // Setup d.to go func() { - err := d.to.HandleNeedAt(func(offset int64, length int32) { + err := d.To.HandleNeedAt(func(offset int64, length int32) { if dg.log != nil { dg.log.Debug(). Int64("offset", offset). Int32("length", length). Int("dev", index). - Str("name", d.schema.Name). + Str("name", d.Schema.Name). Msg("NeedAt for device") } // Prioritize blocks endOffset := uint64(offset + int64(length)) - if endOffset > d.size { - endOffset = d.size + if endOffset > d.Size { + endOffset = d.Size } - startBlock := int(offset / int64(d.blockSize)) - endBlock := int((endOffset-1)/d.blockSize) + 1 + startBlock := int(offset / int64(d.BlockSize)) + endBlock := int((endOffset-1)/d.BlockSize) + 1 for b := startBlock; b < endBlock; b++ { - d.orderer.PrioritiseBlock(b) + d.Orderer.PrioritiseBlock(b) } }) setMigrationError(err) }() go func() { - err := d.to.HandleDontNeedAt(func(offset int64, length int32) { + err := d.To.HandleDontNeedAt(func(offset int64, length int32) { if dg.log != nil { dg.log.Debug(). Int64("offset", offset). Int32("length", length). Int("dev", index). - Str("name", d.schema.Name). + Str("name", d.Schema.Name). Msg("DontNeedAt for device") } // Deprioritize blocks endOffset := uint64(offset + int64(length)) - if endOffset > d.size { - endOffset = d.size + if endOffset > d.Size { + endOffset = d.Size } - startBlock := int(offset / int64(d.blockSize)) - endBlock := int((endOffset-1)/d.blockSize) + 1 + startBlock := int(offset / int64(d.BlockSize)) + endBlock := int((endOffset-1)/d.BlockSize) + 1 for b := startBlock; b < endBlock; b++ { - d.orderer.Remove(b) + d.Orderer.Remove(b) } }) setMigrationError(err) @@ -223,18 +223,18 @@ func (dg *DeviceGroup) MigrateAll(maxConcurrency int, progressHandler func(p []* cfg := migrator.NewConfig() cfg.Logger = dg.log - cfg.BlockSize = int(d.blockSize) + cfg.BlockSize = int(d.BlockSize) cfg.Concurrency = map[int]int{ storage.BlockTypeAny: int(concurrency), } cfg.LockerHandler = func() { // setMigrationError(d.to.SendEvent(&packets.Event{Type: packets.EventPreLock})) - d.storage.Lock() + d.Storage.Lock() // setMigrationError(d.to.SendEvent(&packets.Event{Type: packets.EventPostLock})) } cfg.UnlockerHandler = func() { // setMigrationError(d.to.SendEvent(&packets.Event{Type: packets.EventPreUnlock})) - d.storage.Unlock() + d.Storage.Unlock() // setMigrationError(d.to.SendEvent(&packets.Event{Type: packets.EventPostUnlock})) } cfg.ErrorHandler = func(_ *storage.BlockInfo, err error) { @@ -246,19 +246,19 @@ func (dg *DeviceGroup) MigrateAll(maxConcurrency int, progressHandler func(p []* progressHandler(dg.progress) dg.progressLock.Unlock() } - mig, err := migrator.NewMigrator(d.dirtyRemote, d.to, d.orderer, cfg) + mig, err := migrator.NewMigrator(d.DirtyRemote, d.To, d.Orderer, cfg) if err != nil { return err } - d.migrator = mig + d.Migrator = mig if dg.met != nil { - dg.met.AddMigrator(d.schema.Name, mig) + dg.met.AddMigrator(d.Schema.Name, mig) } if dg.log != nil { dg.log.Debug(). Uint64("concurrency", concurrency). Int("index", index). - Str("name", d.schema.Name). + Str("name", d.Schema.Name). Msg("Setup migrator") } } @@ -268,7 +268,7 @@ func (dg *DeviceGroup) MigrateAll(maxConcurrency int, progressHandler func(p []* // Now start them all migrating, and collect err for _, d := range dg.devices { go func() { - err := d.migrator.Migrate(d.numBlocks) + err := d.Migrator.Migrate(d.NumBlocks) errs <- err }() } @@ -285,7 +285,7 @@ func (dg *DeviceGroup) MigrateAll(maxConcurrency int, progressHandler func(p []* } for index, d := range dg.devices { - err := d.migrator.WaitForCompletion() + err := d.Migrator.WaitForCompletion() if err != nil { if dg.log != nil { dg.log.Error().Err(err).Int("index", index).Msg("error migrating device group waiting for completion") @@ -321,7 +321,7 @@ type MigrateDirtyHooks struct { func (dg *DeviceGroup) MigrateDirty(hooks *MigrateDirtyHooks) error { // If StartMigrationTo or MigrateAll have not been called, return error. for _, d := range dg.devices { - if d.to == nil || d.migrator == nil { + if d.To == nil || d.Migrator == nil { return errNotSetup } } @@ -330,22 +330,22 @@ func (dg *DeviceGroup) MigrateDirty(hooks *MigrateDirtyHooks) error { for index, d := range dg.devices { // First unlock the storage if it is locked due to a previous MigrateDirty call - d.storage.Unlock() + d.Storage.Unlock() go func() { dirtyHistory := make([]int, 0) for { if hooks != nil && hooks.PreGetDirty != nil { - hooks.PreGetDirty(index, d.to, dirtyHistory) + hooks.PreGetDirty(index, d.To, dirtyHistory) } - blocks := d.migrator.GetLatestDirty() + blocks := d.Migrator.GetLatestDirty() if dg.log != nil { dg.log.Debug(). Int("blocks", len(blocks)). Int("index", index). - Str("name", d.schema.Name). + Str("name", d.Schema.Name). Msg("migrating dirty blocks") } @@ -356,40 +356,40 @@ func (dg *DeviceGroup) MigrateDirty(hooks *MigrateDirtyHooks) error { } if hooks != nil && hooks.PostGetDirty != nil { - hooks.PostGetDirty(index, d.to, dirtyHistory, blocks) + hooks.PostGetDirty(index, d.To, dirtyHistory, blocks) } if len(blocks) == 0 { break } - err := d.to.DirtyList(int(d.blockSize), blocks) + err := d.To.DirtyList(int(d.BlockSize), blocks) if err != nil { errs <- err return } - err = d.migrator.MigrateDirty(blocks) + err = d.Migrator.MigrateDirty(blocks) if err != nil { errs <- err return } if hooks != nil && hooks.PostMigrateDirty != nil { - if hooks.PostMigrateDirty(index, d.to, dirtyHistory) { + if hooks.PostMigrateDirty(index, d.To, dirtyHistory) { break // PostMigrateDirty returned true, which means stop doing any dirty loop business. } } } - err := d.migrator.WaitForCompletion() + err := d.Migrator.WaitForCompletion() if err != nil { errs <- err return } if hooks != nil && hooks.Completed != nil { - hooks.Completed(index, d.to) + hooks.Completed(index, d.To) } errs <- nil @@ -410,7 +410,7 @@ func (dg *DeviceGroup) MigrateDirty(hooks *MigrateDirtyHooks) error { func (dg *DeviceGroup) Completed() error { for index, d := range dg.devices { - err := d.to.SendEvent(&packets.Event{Type: packets.EventCompleted}) + err := d.To.SendEvent(&packets.Event{Type: packets.EventCompleted}) if err != nil { return err } @@ -418,7 +418,7 @@ func (dg *DeviceGroup) Completed() error { if dg.log != nil { dg.log.Debug(). Int("index", index). - Str("name", d.schema.Name). + Str("name", d.Schema.Name). Msg("migration completed") } } From 44a40f9e862879199f0f1290d7eb102cf8e86999 Mon Sep 17 00:00:00 2001 From: Jimmy Moore Date: Mon, 16 Dec 2024 11:45:31 +0000 Subject: [PATCH 31/37] Refined MigrateDirty hooks for drafter integrate Signed-off-by: Jimmy Moore --- cmd/serve.go | 55 +++++++++++----------- pkg/storage/devicegroup/device_group.go | 9 +++- pkg/storage/devicegroup/device_group_to.go | 44 +++++++++-------- 3 files changed, 56 insertions(+), 52 deletions(-) diff --git a/cmd/serve.go b/cmd/serve.go index e74d0f1..9ebfde9 100644 --- a/cmd/serve.go +++ b/cmd/serve.go @@ -159,35 +159,34 @@ func runServe(_ *cobra.Command, _ []string) { fmt.Printf("All devices migrated in %dms.\n", time.Since(ctime).Milliseconds()) - for { - - // Now do a dirty block phase... - hooks := &devicegroup.MigrateDirtyHooks{ - PreGetDirty: func(index int, _ *protocol.ToProtocol, dirtyHistory []int) { - fmt.Printf("# [%d]PreGetDirty %v\n", index, dirtyHistory) - }, - PostGetDirty: func(index int, _ *protocol.ToProtocol, dirtyHistory []int, _ []uint) { - fmt.Printf("# [%d]PostGetDirty %v\n", index, dirtyHistory) - }, - PostMigrateDirty: func(index int, _ *protocol.ToProtocol, dirtyHistory []int) bool { - fmt.Printf("# [%d]PostMigrateDirty %v\n", index, dirtyHistory) - time.Sleep(1 * time.Second) // Wait a bit for next dirty loop - return false - }, - Completed: func(index int, _ *protocol.ToProtocol) { - fmt.Printf("# [%d]Completed\n", index) - }, - } - err = dg.MigrateDirty(hooks) - if err != nil { - dg.CloseAll() - panic(err) - } - - if !serveContinuous { - break - } + // Now do a dirty block phase... + hooks := &devicegroup.MigrateDirtyHooks{ + PreGetDirty: func(name string) error { + fmt.Printf("# [%s]PreGetDirty\n", name) + return nil + }, + PostGetDirty: func(name string, blocks []uint) (bool, error) { + fmt.Printf("# [%s]PostGetDirty %d\n", name, len(blocks)) + if serveContinuous { + return true, nil + } + return len(blocks) > 0, nil + }, + PostMigrateDirty: func(name string, blocks []uint) (bool, error) { + fmt.Printf("# [%s]PostMigrateDirty %d\n", name, len(blocks)) + time.Sleep(1 * time.Second) // Wait a bit for next dirty loop + return true, nil + }, + Completed: func(name string) { + fmt.Printf("# [%s]Completed\n", name) + }, + } + err = dg.MigrateDirty(hooks) + if err != nil { + dg.CloseAll() + panic(err) } + fmt.Printf("All devices migrated(including dirty) in %dms.\n", time.Since(ctime).Milliseconds()) err = dg.Completed() // Send completion events for the devices. diff --git a/pkg/storage/devicegroup/device_group.go b/pkg/storage/devicegroup/device_group.go index dd19939..a5bd846 100644 --- a/pkg/storage/devicegroup/device_group.go +++ b/pkg/storage/devicegroup/device_group.go @@ -20,7 +20,6 @@ import ( const volatilityExpiry = 30 * time.Minute const defaultBlockSize = 1024 * 1024 -const maxDirtyHistory = 32 var errNotSetup = errors.New("toProtocol not setup") @@ -62,6 +61,14 @@ func (dg *DeviceGroup) GetDeviceSchema() []*config.DeviceSchema { return s } +func (dg *DeviceGroup) GetAllNames() []string { + names := make([]string, 0) + for _, di := range dg.devices { + names = append(names, di.Schema.Name) + } + return names +} + func (dg *DeviceGroup) GetDeviceInformationByName(name string) *DeviceInformation { for _, di := range dg.devices { if di.Schema.Name == name { diff --git a/pkg/storage/devicegroup/device_group_to.go b/pkg/storage/devicegroup/device_group_to.go index 07c5ab3..f215b5b 100644 --- a/pkg/storage/devicegroup/device_group_to.go +++ b/pkg/storage/devicegroup/device_group_to.go @@ -229,12 +229,12 @@ func (dg *DeviceGroup) MigrateAll(maxConcurrency int, progressHandler func(p []* } cfg.LockerHandler = func() { // setMigrationError(d.to.SendEvent(&packets.Event{Type: packets.EventPreLock})) - d.Storage.Lock() + // d.Storage.Lock() // setMigrationError(d.to.SendEvent(&packets.Event{Type: packets.EventPostLock})) } cfg.UnlockerHandler = func() { // setMigrationError(d.to.SendEvent(&packets.Event{Type: packets.EventPreUnlock})) - d.Storage.Unlock() + // d.Storage.Unlock() // setMigrationError(d.to.SendEvent(&packets.Event{Type: packets.EventPostUnlock})) } cfg.ErrorHandler = func(_ *storage.BlockInfo, err error) { @@ -312,10 +312,10 @@ func (dg *DeviceGroup) MigrateAll(maxConcurrency int, progressHandler func(p []* } type MigrateDirtyHooks struct { - PreGetDirty func(index int, to *protocol.ToProtocol, dirtyHistory []int) - PostGetDirty func(index int, to *protocol.ToProtocol, dirtyHistory []int, blocks []uint) - PostMigrateDirty func(index int, to *protocol.ToProtocol, dirtyHistory []int) bool - Completed func(index int, to *protocol.ToProtocol) + PreGetDirty func(name string) error + PostGetDirty func(name string, blocks []uint) (bool, error) + PostMigrateDirty func(name string, blocks []uint) (bool, error) + Completed func(name string) } func (dg *DeviceGroup) MigrateDirty(hooks *MigrateDirtyHooks) error { @@ -333,11 +333,9 @@ func (dg *DeviceGroup) MigrateDirty(hooks *MigrateDirtyHooks) error { d.Storage.Unlock() go func() { - dirtyHistory := make([]int, 0) - for { if hooks != nil && hooks.PreGetDirty != nil { - hooks.PreGetDirty(index, d.To, dirtyHistory) + hooks.PreGetDirty(d.Schema.Name) } blocks := d.Migrator.GetLatestDirty() @@ -349,18 +347,14 @@ func (dg *DeviceGroup) MigrateDirty(hooks *MigrateDirtyHooks) error { Msg("migrating dirty blocks") } - dirtyHistory = append(dirtyHistory, len(blocks)) - // Cap it at a certain MAX LENGTH - if len(dirtyHistory) > maxDirtyHistory { - dirtyHistory = dirtyHistory[1:] - } - if hooks != nil && hooks.PostGetDirty != nil { - hooks.PostGetDirty(index, d.To, dirtyHistory, blocks) - } - - if len(blocks) == 0 { - break + cont, err := hooks.PostGetDirty(d.Schema.Name, blocks) + if err != nil { + errs <- err + } + if !cont { + break + } } err := d.To.DirtyList(int(d.BlockSize), blocks) @@ -376,8 +370,12 @@ func (dg *DeviceGroup) MigrateDirty(hooks *MigrateDirtyHooks) error { } if hooks != nil && hooks.PostMigrateDirty != nil { - if hooks.PostMigrateDirty(index, d.To, dirtyHistory) { - break // PostMigrateDirty returned true, which means stop doing any dirty loop business. + cont, err := hooks.PostMigrateDirty(d.Schema.Name, blocks) + if err != nil { + errs <- err + } + if !cont { + break } } } @@ -389,7 +387,7 @@ func (dg *DeviceGroup) MigrateDirty(hooks *MigrateDirtyHooks) error { } if hooks != nil && hooks.Completed != nil { - hooks.Completed(index, d.To) + hooks.Completed(d.Schema.Name) } errs <- nil From 70c4e9f3c3bf5949832d0ad80d436e9baa021a7e Mon Sep 17 00:00:00 2001 From: Jimmy Moore Date: Mon, 16 Dec 2024 16:28:23 +0000 Subject: [PATCH 32/37] Updated README, and progress now map by name Signed-off-by: Jimmy Moore --- cmd/serve.go | 8 ++++---- pkg/storage/devicegroup/README.md | 8 ++++---- pkg/storage/devicegroup/device_group.go | 2 +- pkg/storage/devicegroup/device_group_test.go | 4 ++-- pkg/storage/devicegroup/device_group_to.go | 10 +++++----- 5 files changed, 16 insertions(+), 16 deletions(-) diff --git a/cmd/serve.go b/cmd/serve.go index 9ebfde9..c114cf2 100644 --- a/cmd/serve.go +++ b/cmd/serve.go @@ -144,10 +144,10 @@ func runServe(_ *cobra.Command, _ []string) { panic(err) } - err = dg.MigrateAll(1000, func(ps []*migrator.MigrationProgress) { - for index, p := range ps { - fmt.Printf("[%d] Progress Moved: %d/%d %.2f%% Clean: %d/%d %.2f%% InProgress: %d\n", - index, p.MigratedBlocks, p.TotalBlocks, p.MigratedBlocksPerc, + err = dg.MigrateAll(1000, func(ps map[string]*migrator.MigrationProgress) { + for name, p := range ps { + fmt.Printf("[%s] Progress Moved: %d/%d %.2f%% Clean: %d/%d %.2f%% InProgress: %d\n", + name, p.MigratedBlocks, p.TotalBlocks, p.MigratedBlocksPerc, p.ReadyBlocks, p.TotalBlocks, p.ReadyBlocksPerc, p.ActiveBlocks) } diff --git a/pkg/storage/devicegroup/README.md b/pkg/storage/devicegroup/README.md index ae7367e..8227fd5 100644 --- a/pkg/storage/devicegroup/README.md +++ b/pkg/storage/devicegroup/README.md @@ -41,10 +41,10 @@ Devices in a `DeviceGroup` are sent together, which allows Silo to optimize all Within the `MigrateDirty` there are a number of hooks we can use to control things. MigrateDirty will return once all devices have no more dirty data. You can of course then call MigrateDirty again eg for continuous sync. type MigrateDirtyHooks struct { - PreGetDirty func(index int, to *protocol.ToProtocol, dirtyHistory []int) - PostGetDirty func(index int, to *protocol.ToProtocol, dirtyHistory []int, blocks []uint) - PostMigrateDirty func(index int, to *protocol.ToProtocol, dirtyHistory []int) bool - Completed func(index int, to *protocol.ToProtocol) + PreGetDirty func(name string) error + PostGetDirty func(name string, blocks []uint) (bool, error) + PostMigrateDirty func(name string, blocks []uint) (bool, error) + Completed func(name string) } diff --git a/pkg/storage/devicegroup/device_group.go b/pkg/storage/devicegroup/device_group.go index a5bd846..9880c45 100644 --- a/pkg/storage/devicegroup/device_group.go +++ b/pkg/storage/devicegroup/device_group.go @@ -30,7 +30,7 @@ type DeviceGroup struct { controlProtocol protocol.Protocol incomingDevicesWg sync.WaitGroup progressLock sync.Mutex - progress []*migrator.MigrationProgress + progress map[string]*migrator.MigrationProgress } type DeviceInformation struct { diff --git a/pkg/storage/devicegroup/device_group_test.go b/pkg/storage/devicegroup/device_group_test.go index 8fcb461..42c9b49 100644 --- a/pkg/storage/devicegroup/device_group_test.go +++ b/pkg/storage/devicegroup/device_group_test.go @@ -180,7 +180,7 @@ func TestDeviceGroupMigrateTo(t *testing.T) { err := dg.StartMigrationTo(prSource) assert.NoError(t, err) - pHandler := func(_ []*migrator.MigrationProgress) {} + pHandler := func(_ map[string]*migrator.MigrationProgress) {} err = dg.MigrateAll(100, pHandler) assert.NoError(t, err) @@ -287,7 +287,7 @@ func TestDeviceGroupMigrate(t *testing.T) { tawg.Done() }) - pHandler := func(_ []*migrator.MigrationProgress) {} + pHandler := func(_ map[string]*migrator.MigrationProgress) {} err = dg.MigrateAll(100, pHandler) assert.NoError(t, err) diff --git a/pkg/storage/devicegroup/device_group_to.go b/pkg/storage/devicegroup/device_group_to.go index f215b5b..47175ac 100644 --- a/pkg/storage/devicegroup/device_group_to.go +++ b/pkg/storage/devicegroup/device_group_to.go @@ -24,7 +24,7 @@ func NewFromSchema(ds []*config.DeviceSchema, log types.Logger, met metrics.Silo log: log, met: met, devices: make([]*DeviceInformation, 0), - progress: make([]*migrator.MigrationProgress, 0), + progress: make(map[string]*migrator.MigrationProgress), } for _, s := range ds { @@ -82,10 +82,10 @@ func NewFromSchema(ds []*config.DeviceSchema, log types.Logger, met metrics.Silo }) // Set these two at least, so we know *something* about every device in progress handler. - dg.progress = append(dg.progress, &migrator.MigrationProgress{ + dg.progress[s.Name] = &migrator.MigrationProgress{ BlockSize: blockSize, TotalBlocks: totalBlocks, - }) + } } if log != nil { @@ -131,7 +131,7 @@ func (dg *DeviceGroup) StartMigrationTo(pro protocol.Protocol) error { } // This will Migrate all devices to the 'to' setup in SendDevInfo stage. -func (dg *DeviceGroup) MigrateAll(maxConcurrency int, progressHandler func(p []*migrator.MigrationProgress)) error { +func (dg *DeviceGroup) MigrateAll(maxConcurrency int, progressHandler func(p map[string]*migrator.MigrationProgress)) error { for _, d := range dg.devices { if d.To == nil { return errNotSetup @@ -242,7 +242,7 @@ func (dg *DeviceGroup) MigrateAll(maxConcurrency int, progressHandler func(p []* } cfg.ProgressHandler = func(p *migrator.MigrationProgress) { dg.progressLock.Lock() - dg.progress[index] = p + dg.progress[d.Schema.Name] = p progressHandler(dg.progress) dg.progressLock.Unlock() } From 65f19cdd0799fbb9ed527fd9bbd14f8a8b90e7a5 Mon Sep 17 00:00:00 2001 From: Jimmy Moore Date: Tue, 17 Dec 2024 10:05:10 +0000 Subject: [PATCH 33/37] Small possible race in nbd_dispatch Signed-off-by: Jimmy Moore --- pkg/storage/expose/nbd.go | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/pkg/storage/expose/nbd.go b/pkg/storage/expose/nbd.go index a95f627..600f3c7 100644 --- a/pkg/storage/expose/nbd.go +++ b/pkg/storage/expose/nbd.go @@ -293,11 +293,6 @@ func (n *ExposedStorageNBDNL) Shutdown() error { // First cancel the context, which will stop waiting on pending readAt/writeAt... n.cancelfn() - // Now wait for any pending responses to be sent - for _, d := range n.dispatchers { - d.Wait() - } - // Now ask to disconnect err := nbdnl.Disconnect(uint32(n.deviceIndex)) if err != nil { @@ -321,6 +316,12 @@ func (n *ExposedStorageNBDNL) Shutdown() error { time.Sleep(100 * time.Nanosecond) } + // Now wait for any pending responses to be sent. There should not be any new + // Requests received since we have Disconnected. + for _, d := range n.dispatchers { + d.Wait() + } + if n.config.Logger != nil { n.config.Logger.Trace(). Str("uuid", n.uuid.String()). From d06050a278ed04635a8a26a1ec8742b67b889d74 Mon Sep 17 00:00:00 2001 From: Jimmy Moore Date: Tue, 17 Dec 2024 10:14:45 +0000 Subject: [PATCH 34/37] Better shutdown Signed-off-by: Jimmy Moore --- pkg/storage/expose/nbd.go | 32 +++++++++++++++++++------------- 1 file changed, 19 insertions(+), 13 deletions(-) diff --git a/pkg/storage/expose/nbd.go b/pkg/storage/expose/nbd.go index 600f3c7..5ecfabd 100644 --- a/pkg/storage/expose/nbd.go +++ b/pkg/storage/expose/nbd.go @@ -108,6 +108,7 @@ type ExposedStorageNBDNL struct { provLock sync.RWMutex deviceIndex int dispatchers []*Dispatch + handlersWg sync.WaitGroup } func NewExposedStorageNBDNL(prov storage.Provider, conf *Config) *ExposedStorageNBDNL { @@ -210,6 +211,7 @@ func (n *ExposedStorageNBDNL) Init() error { d.asyncReads = n.config.AsyncReads d.asyncWrites = n.config.AsyncWrites // Start reading commands on the socket and dispatching them to our provider + n.handlersWg.Add(1) go func() { err := d.Handle() if n.config.Logger != nil { @@ -218,6 +220,7 @@ func (n *ExposedStorageNBDNL) Init() error { Err(err). Msg("nbd dispatch completed") } + n.handlersWg.Done() }() n.socks = append(n.socks, serverc) socks = append(socks, client) @@ -293,20 +296,29 @@ func (n *ExposedStorageNBDNL) Shutdown() error { // First cancel the context, which will stop waiting on pending readAt/writeAt... n.cancelfn() - // Now ask to disconnect - err := nbdnl.Disconnect(uint32(n.deviceIndex)) - if err != nil { - return err - } - // Close all the socket pairs... for _, v := range n.socks { - err = v.Close() + err := v.Close() if err != nil { return err } } + // Now wait until the handlers return + n.handlersWg.Wait() + + // Now wait for any pending responses to be sent. There should not be any new + // Requests received since we have Disconnected. + for _, d := range n.dispatchers { + d.Wait() + } + + // Now ask to disconnect + err := nbdnl.Disconnect(uint32(n.deviceIndex)) + if err != nil { + return err + } + // Wait until it's completely disconnected... for { s, err := nbdnl.Status(uint32(n.deviceIndex)) @@ -316,12 +328,6 @@ func (n *ExposedStorageNBDNL) Shutdown() error { time.Sleep(100 * time.Nanosecond) } - // Now wait for any pending responses to be sent. There should not be any new - // Requests received since we have Disconnected. - for _, d := range n.dispatchers { - d.Wait() - } - if n.config.Logger != nil { n.config.Logger.Trace(). Str("uuid", n.uuid.String()). From 7b4ae69cf5217aaa94f8cc30c70cd1bc86cddaea Mon Sep 17 00:00:00 2001 From: Jimmy Moore Date: Tue, 17 Dec 2024 11:53:45 +0000 Subject: [PATCH 35/37] Even better nbd dispatch shutdown Signed-off-by: Jimmy Moore --- pkg/storage/expose/nbd_dispatch.go | 38 +++++++++++++++++++++++------- 1 file changed, 30 insertions(+), 8 deletions(-) diff --git a/pkg/storage/expose/nbd_dispatch.go b/pkg/storage/expose/nbd_dispatch.go index dc5b59c..6e9d429 100644 --- a/pkg/storage/expose/nbd_dispatch.go +++ b/pkg/storage/expose/nbd_dispatch.go @@ -3,6 +3,7 @@ package expose import ( "context" "encoding/binary" + "errors" "fmt" "io" "sync" @@ -13,6 +14,8 @@ import ( "github.com/loopholelabs/silo/pkg/storage" ) +var ErrShuttingDown = errors.New("shutting down. Cannot serve any new requests.") + const dispatchBufferSize = 4 * 1024 * 1024 /** @@ -76,6 +79,8 @@ type Dispatch struct { prov storage.Provider fatal chan error pendingResponses sync.WaitGroup + shuttingDown bool + shuttingDownLock sync.Mutex metricPacketsIn uint64 metricPacketsOut uint64 metricReadAt uint64 @@ -140,6 +145,11 @@ func (d *Dispatch) GetMetrics() *DispatchMetrics { } func (d *Dispatch) Wait() { + d.shuttingDownLock.Lock() + d.shuttingDown = true + defer d.shuttingDownLock.Unlock() + // Stop accepting any new requests... + if d.logger != nil { d.logger.Trace().Str("device", d.dev).Msg("nbd waiting for pending responses") } @@ -342,16 +352,22 @@ func (d *Dispatch) cmdRead(cmdHandle uint64, cmdFrom uint64, cmdLength uint32) e case e = <-errchan: } - errorValue := uint32(0) if e != nil { - errorValue = 1 - data = make([]byte, 0) // If there was an error, don't send data + return d.writeResponse(1, handle, []byte{}) } - return d.writeResponse(errorValue, handle, data) + return d.writeResponse(0, handle, data) } - if d.asyncReads { + d.shuttingDownLock.Lock() + if !d.shuttingDown { d.pendingResponses.Add(1) + } else { + d.shuttingDownLock.Unlock() + return ErrShuttingDown + } + d.shuttingDownLock.Unlock() + + if d.asyncReads { go func() { ctime := time.Now() err := performRead(cmdHandle, cmdFrom, cmdLength) @@ -368,7 +384,6 @@ func (d *Dispatch) cmdRead(cmdHandle uint64, cmdFrom uint64, cmdLength uint32) e d.pendingResponses.Done() }() } else { - d.pendingResponses.Add(1) ctime := time.Now() err := performRead(cmdHandle, cmdFrom, cmdLength) if err == nil { @@ -418,8 +433,16 @@ func (d *Dispatch) cmdWrite(cmdHandle uint64, cmdFrom uint64, cmdLength uint32, return d.writeResponse(errorValue, handle, []byte{}) } - if d.asyncWrites { + d.shuttingDownLock.Lock() + if !d.shuttingDown { d.pendingResponses.Add(1) + } else { + d.shuttingDownLock.Unlock() + return ErrShuttingDown + } + d.shuttingDownLock.Unlock() + + if d.asyncWrites { go func() { ctime := time.Now() err := performWrite(cmdHandle, cmdFrom, cmdLength, cmdData) @@ -436,7 +459,6 @@ func (d *Dispatch) cmdWrite(cmdHandle uint64, cmdFrom uint64, cmdLength uint32, d.pendingResponses.Done() }() } else { - d.pendingResponses.Add(1) ctime := time.Now() err := performWrite(cmdHandle, cmdFrom, cmdLength, cmdData) if err == nil { From c62e0bc3b60e16cdaa3c51b3e7c98189abfa8351 Mon Sep 17 00:00:00 2001 From: Jimmy Moore Date: Tue, 17 Dec 2024 13:57:02 +0000 Subject: [PATCH 36/37] lint Signed-off-by: Jimmy Moore --- pkg/storage/expose/nbd_dispatch.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/storage/expose/nbd_dispatch.go b/pkg/storage/expose/nbd_dispatch.go index 6e9d429..8bb50c4 100644 --- a/pkg/storage/expose/nbd_dispatch.go +++ b/pkg/storage/expose/nbd_dispatch.go @@ -14,7 +14,7 @@ import ( "github.com/loopholelabs/silo/pkg/storage" ) -var ErrShuttingDown = errors.New("shutting down. Cannot serve any new requests.") +var ErrShuttingDown = errors.New("shutting down. Cannot serve any new requests") const dispatchBufferSize = 4 * 1024 * 1024 From e13b79a1c76a16d0a820f2e3f17e0dfd31375270 Mon Sep 17 00:00:00 2001 From: Jimmy Moore Date: Wed, 18 Dec 2024 13:28:24 +0000 Subject: [PATCH 37/37] Moved customDataHandler. Updated WaitForCompletion to honour context Signed-off-by: Jimmy Moore --- cmd/connect.go | 2 +- pkg/storage/devicegroup/device_group.go | 4 +- pkg/storage/devicegroup/device_group_from.go | 99 +++++++++++++------- pkg/storage/devicegroup/device_group_test.go | 20 ++-- pkg/storage/devicegroup/device_group_to.go | 4 +- 5 files changed, 82 insertions(+), 47 deletions(-) diff --git a/cmd/connect.go b/cmd/connect.go index 1b73bd3..1d9a49c 100644 --- a/cmd/connect.go +++ b/cmd/connect.go @@ -131,7 +131,7 @@ func runConnect(_ *cobra.Command, _ []string) { return schema } - dg, err = devicegroup.NewFromProtocol(protoCtx, pro, tweak, nil, log, siloMetrics) + dg, err = devicegroup.NewFromProtocol(protoCtx, pro, tweak, nil, nil, log, siloMetrics) for _, d := range dg.GetDeviceSchema() { expName := dg.GetExposedDeviceByName(d.Name) diff --git a/pkg/storage/devicegroup/device_group.go b/pkg/storage/devicegroup/device_group.go index 9880c45..918e75a 100644 --- a/pkg/storage/devicegroup/device_group.go +++ b/pkg/storage/devicegroup/device_group.go @@ -1,6 +1,7 @@ package devicegroup import ( + "context" "errors" "sync" "time" @@ -26,9 +27,10 @@ var errNotSetup = errors.New("toProtocol not setup") type DeviceGroup struct { log types.Logger met metrics.SiloMetrics + ctx context.Context devices []*DeviceInformation controlProtocol protocol.Protocol - incomingDevicesWg sync.WaitGroup + incomingDevicesCh chan bool progressLock sync.Mutex progress map[string]*migrator.MigrationProgress } diff --git a/pkg/storage/devicegroup/device_group_from.go b/pkg/storage/devicegroup/device_group_from.go index 53ad055..5fb0bf2 100644 --- a/pkg/storage/devicegroup/device_group_from.go +++ b/pkg/storage/devicegroup/device_group_from.go @@ -17,6 +17,7 @@ func NewFromProtocol(ctx context.Context, pro protocol.Protocol, tweakDeviceSchema func(index int, name string, schema string) string, eventHandler func(e *packets.Event), + customDataHandler func(data []byte), log types.Logger, met metrics.SiloMetrics) (*DeviceGroup, error) { @@ -32,6 +33,45 @@ func NewFromProtocol(ctx context.Context, devices := make([]*config.DeviceSchema, len(dgi.Devices)) + // Setup something to listen for custom data... + handleCustomDataEvent := func() error { + // This is our control channel, and we're expecting a DeviceGroupInfo + id, evData, err := pro.WaitForCommand(0, packets.CommandEvent) + if err != nil { + return err + } + ev, err := packets.DecodeEvent(evData) + if err != nil { + return err + } + if ev.Type != packets.EventCustom || ev.CustomType != 0 { + return err + } + + if customDataHandler != nil { + customDataHandler(ev.CustomPayload) + } + + // Reply with ack + eack := packets.EncodeEventResponse() + _, err = pro.SendPacket(0, id, eack, protocol.UrgencyUrgent) + if err != nil { + return err + } + return nil + } + + // Listen for custom data events + go func() { + for { + err := handleCustomDataEvent() + if err != nil && !errors.Is(err, context.Canceled) { + log.Debug().Err(err).Msg("handleCustomDataEvenet returned") + return + } + } + }() + // First create the devices we need using the schemas sent... for index, di := range dgi.Devices { // We may want to tweak schemas here eg autoStart = false on sync. Or modify pathnames. @@ -52,8 +92,9 @@ func NewFromProtocol(ctx context.Context, } dg.controlProtocol = pro + dg.ctx = ctx - dg.incomingDevicesWg.Add(len(dg.devices)) + dg.incomingDevicesCh = make(chan bool, len(dg.devices)) // We need to create the FromProtocol for each device, and associated goroutines here. for index, di := range dgi.Devices { @@ -77,26 +118,38 @@ func NewFromProtocol(ctx context.Context, return nil, err } go func() { - _ = from.HandleReadAt() + err := from.HandleReadAt() + if err != nil && !errors.Is(err, context.Canceled) { + log.Debug().Err(err).Msg("HandleReadAt returned") + } }() go func() { - _ = from.HandleWriteAt() + err := from.HandleWriteAt() + if err != nil && !errors.Is(err, context.Canceled) { + log.Debug().Err(err).Msg("HandleWriteAt returned") + } }() go func() { - _ = from.HandleDirtyList(func(dirtyBlocks []uint) { + err := from.HandleDirtyList(func(dirtyBlocks []uint) { // Tell the waitingCache about it d.WaitingCacheLocal.DirtyBlocks(dirtyBlocks) }) + if err != nil && !errors.Is(err, context.Canceled) { + log.Debug().Err(err).Msg("HandleDirtyList returned") + } }() go func() { - from.HandleEvent(func(p *packets.Event) { + err := from.HandleEvent(func(p *packets.Event) { if p.Type == packets.EventCompleted { - dg.incomingDevicesWg.Done() + dg.incomingDevicesCh <- true } if d.EventHandler != nil { d.EventHandler(p) } }) + if err != nil && !errors.Is(err, context.Canceled) { + log.Debug().Err(err).Msg("HandleEvent returned") + } }() } @@ -104,33 +157,13 @@ func NewFromProtocol(ctx context.Context, } // Wait for completion events from all devices here. -func (dg *DeviceGroup) WaitForCompletion() { - dg.incomingDevicesWg.Wait() -} - -func (dg *DeviceGroup) HandleCustomData(cb func(customData []byte)) error { - for { - // This is our control channel, and we're expecting a DeviceGroupInfo - id, evData, err := dg.controlProtocol.WaitForCommand(0, packets.CommandEvent) - if err != nil { - return err - } - ev, err := packets.DecodeEvent(evData) - if err != nil { - return err - } - - if ev.Type != packets.EventCustom || ev.CustomType != 0 { - return errors.New("unexpected event") - } - - cb(ev.CustomPayload) - - // Reply with ack - eack := packets.EncodeEventResponse() - _, err = dg.controlProtocol.SendPacket(0, id, eack, protocol.UrgencyUrgent) - if err != nil { - return err +func (dg *DeviceGroup) WaitForCompletion() error { + for range dg.devices { + select { + case <-dg.incomingDevicesCh: + case <-dg.ctx.Done(): + return dg.ctx.Err() } } + return nil } diff --git a/pkg/storage/devicegroup/device_group_test.go b/pkg/storage/devicegroup/device_group_test.go index 42c9b49..32609b7 100644 --- a/pkg/storage/devicegroup/device_group_test.go +++ b/pkg/storage/devicegroup/device_group_test.go @@ -255,10 +255,18 @@ func TestDeviceGroupMigrate(t *testing.T) { return s } + // TransferAuthority + var tawg sync.WaitGroup + tawg.Add(1) + cdh := func(data []byte) { + assert.Equal(t, []byte("Hello"), data) + tawg.Done() + } + wg.Add(1) go func() { var err error - dg2, err = NewFromProtocol(ctx, prDest, tweak, nil, nil, nil) + dg2, err = NewFromProtocol(ctx, prDest, tweak, nil, cdh, nil, nil) assert.NoError(t, err) wg.Done() }() @@ -271,16 +279,6 @@ func TestDeviceGroupMigrate(t *testing.T) { wg.Wait() // TransferAuthority - var tawg sync.WaitGroup - tawg.Add(1) - go func() { - err := dg2.HandleCustomData(func(data []byte) { - assert.Equal(t, []byte("Hello"), data) - tawg.Done() - }) - assert.ErrorIs(t, err, context.Canceled) - }() - tawg.Add(1) time.AfterFunc(100*time.Millisecond, func() { dg.SendCustomData([]byte("Hello")) diff --git a/pkg/storage/devicegroup/device_group_to.go b/pkg/storage/devicegroup/device_group_to.go index 47175ac..3f834ac 100644 --- a/pkg/storage/devicegroup/device_group_to.go +++ b/pkg/storage/devicegroup/device_group_to.go @@ -243,7 +243,9 @@ func (dg *DeviceGroup) MigrateAll(maxConcurrency int, progressHandler func(p map cfg.ProgressHandler = func(p *migrator.MigrationProgress) { dg.progressLock.Lock() dg.progress[d.Schema.Name] = p - progressHandler(dg.progress) + if progressHandler != nil { + progressHandler(dg.progress) + } dg.progressLock.Unlock() } mig, err := migrator.NewMigrator(d.DirtyRemote, d.To, d.Orderer, cfg)