diff --git a/Makefile b/Makefile index 9c8a00e2d..a2a54ab7c 100644 --- a/Makefile +++ b/Makefile @@ -178,7 +178,7 @@ test: test-unit test-all: check test-race test-cover test-unit: - @VERSION=$(VERSION) go test -mod=readonly -tags='ledger test_ledger_mock' ./... + @VERSION=$(VERSION) go test -mod=readonly -tags='ledger test_ledger_mock' `go list ./... | grep -v dex` test-race: @VERSION=$(VERSION) go test -mod=readonly -race -tags='ledger test_ledger_mock' ./... diff --git a/app/app.go b/app/app.go index d25bc5583..0d105a861 100644 --- a/app/app.go +++ b/app/app.go @@ -193,10 +193,6 @@ import ( dexkeeper "github.com/neutron-org/neutron/v4/x/dex/keeper" dextypes "github.com/neutron-org/neutron/v4/x/dex/types" - "github.com/neutron-org/neutron/v4/x/ibcswap" - ibcswapkeeper "github.com/neutron-org/neutron/v4/x/ibcswap/keeper" - ibcswaptypes "github.com/neutron-org/neutron/v4/x/ibcswap/types" - globalfeekeeper "github.com/neutron-org/neutron/v4/x/globalfee/keeper" gmpmiddleware "github.com/neutron-org/neutron/v4/x/gmp" @@ -276,7 +272,6 @@ var ( globalfee.AppModule{}, feemarket.AppModuleBasic{}, dex.AppModuleBasic{}, - ibcswap.AppModuleBasic{}, oracle.AppModuleBasic{}, marketmap.AppModuleBasic{}, dynamicfees.AppModuleBasic{}, @@ -289,7 +284,7 @@ var ( auctiontypes.ModuleName: nil, ibctransfertypes.ModuleName: {authtypes.Minter, authtypes.Burner}, icatypes.ModuleName: nil, - wasmtypes.ModuleName: {}, + wasmtypes.ModuleName: {authtypes.Burner}, interchainqueriesmoduletypes.ModuleName: nil, feetypes.ModuleName: nil, feeburnertypes.ModuleName: nil, @@ -298,7 +293,6 @@ var ( tokenfactorytypes.ModuleName: {authtypes.Minter, authtypes.Burner}, crontypes.ModuleName: nil, dextypes.ModuleName: {authtypes.Minter, authtypes.Burner}, - ibcswaptypes.ModuleName: {authtypes.Burner}, oracletypes.ModuleName: nil, marketmaptypes.ModuleName: nil, feemarkettypes.FeeCollectorName: nil, @@ -370,7 +364,6 @@ type App struct { CronKeeper cronkeeper.Keeper PFMKeeper *pfmkeeper.Keeper DexKeeper dexkeeper.Keeper - SwapKeeper ibcswapkeeper.Keeper GlobalFeeKeeper globalfeekeeper.Keeper PFMModule packetforward.AppModule @@ -748,15 +741,6 @@ func New( dexModule := dex.NewAppModule(appCodec, app.DexKeeper, app.BankKeeper) - app.SwapKeeper = ibcswapkeeper.NewKeeper( - appCodec, - app.MsgServiceRouter(), - app.IBCKeeper.ChannelKeeper, - app.BankKeeper, - ) - - swapModule := ibcswap.NewAppModule(app.SwapKeeper) - wasmDir := filepath.Join(homePath, "wasm") wasmConfig, err := wasm.ReadWasmConfig(appOpts) if err != nil { @@ -921,7 +905,6 @@ func New( pfmkeeper.DefaultRefundTransferPacketTimeoutTimestamp, ) - ibcStack = ibcswap.NewIBCMiddleware(ibcStack, app.SwapKeeper) ibcStack = gmpmiddleware.NewIBCMiddleware(ibcStack) ibcRouter.AddRoute(icacontrollertypes.SubModuleName, icaControllerStack). @@ -972,7 +955,6 @@ func New( globalfee.NewAppModule(app.GlobalFeeKeeper, app.GetSubspace(globalfee.ModuleName), app.AppCodec(), app.keys[globalfee.ModuleName]), feemarket.NewAppModule(appCodec, *app.FeeMarkerKeeper), dynamicfees.NewAppModule(appCodec, *app.DynamicFeesKeeper), - swapModule, dexModule, marketmapModule, oracleModule, @@ -1022,7 +1004,6 @@ func New( oracletypes.ModuleName, globalfee.ModuleName, feemarkettypes.ModuleName, - ibcswaptypes.ModuleName, dextypes.ModuleName, consensusparamtypes.ModuleName, ) @@ -1059,7 +1040,6 @@ func New( oracletypes.ModuleName, globalfee.ModuleName, feemarkettypes.ModuleName, - ibcswaptypes.ModuleName, dextypes.ModuleName, consensusparamtypes.ModuleName, ) @@ -1101,7 +1081,6 @@ func New( feemarkettypes.ModuleName, oracletypes.ModuleName, marketmaptypes.ModuleName, - ibcswaptypes.ModuleName, dextypes.ModuleName, dynamicfeestypes.ModuleName, consensusparamtypes.ModuleName, diff --git a/app/proposals_allowlisting.go b/app/proposals_allowlisting.go index f2cba406d..9b360b0ea 100644 --- a/app/proposals_allowlisting.go +++ b/app/proposals_allowlisting.go @@ -75,6 +75,8 @@ func isSdkMessageWhitelisted(msg sdk.Msg) bool { *feeburnertypes.MsgUpdateParams, *feerefundertypes.MsgUpdateParams, *crontypes.MsgUpdateParams, + *crontypes.MsgAddSchedule, + *crontypes.MsgRemoveSchedule, *contractmanagertypes.MsgUpdateParams, *dextypes.MsgUpdateParams, *banktypes.MsgUpdateParams, diff --git a/contracts/neutron_chain_manager.wasm b/contracts/neutron_chain_manager.wasm index 912789846..5b1fd4f49 100644 Binary files a/contracts/neutron_chain_manager.wasm and b/contracts/neutron_chain_manager.wasm differ diff --git a/go.mod b/go.mod index 837db6d47..d4407fad5 100644 --- a/go.mod +++ b/go.mod @@ -11,10 +11,10 @@ require ( cosmossdk.io/store v1.1.0 cosmossdk.io/x/evidence v0.1.1 cosmossdk.io/x/feegrant v0.1.1 - cosmossdk.io/x/tx v0.13.4 + cosmossdk.io/x/tx v0.13.5 cosmossdk.io/x/upgrade v0.1.4 github.com/CosmWasm/wasmd v0.51.0 - github.com/CosmWasm/wasmvm/v2 v2.0.3 + github.com/CosmWasm/wasmvm/v2 v2.1.2 github.com/cometbft/cometbft v0.38.11 github.com/cosmos/admin-module/v2 v2.0.0-20240430142959-8b3328d1b1a2 github.com/cosmos/cosmos-db v1.0.2 @@ -23,7 +23,7 @@ require ( github.com/cosmos/gogoproto v1.7.0 github.com/cosmos/ibc-apps/middleware/packet-forward-middleware/v8 v8.0.2 github.com/cosmos/ibc-go/modules/capability v1.0.1 - github.com/cosmos/ibc-go/v8 v8.5.0 + github.com/cosmos/ibc-go/v8 v8.5.1 github.com/cosmos/ics23/go v0.11.0 github.com/cosmos/interchain-security/v5 v5.1.1 github.com/gogo/protobuf v1.3.3 @@ -33,13 +33,12 @@ require ( github.com/grpc-ecosystem/grpc-gateway v1.16.0 github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0 github.com/hashicorp/go-metrics v0.5.3 - github.com/iancoleman/orderedmap v0.3.0 github.com/pkg/errors v0.9.1 - github.com/prometheus/client_golang v1.20.2 + github.com/prometheus/client_golang v1.20.4 github.com/rs/zerolog v1.33.0 github.com/skip-mev/block-sdk/v2 v2.1.5 github.com/skip-mev/feemarket v1.1.1 - github.com/skip-mev/slinky v1.0.10 + github.com/skip-mev/slinky v1.0.12 github.com/spf13/cast v1.7.0 github.com/spf13/cobra v1.8.1 github.com/spf13/pflag v1.0.5 @@ -47,7 +46,7 @@ require ( github.com/stretchr/testify v1.9.0 golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 google.golang.org/genproto/googleapis/api v0.0.0-20240814211410-ddb44dafa142 - google.golang.org/grpc v1.66.0 + google.golang.org/grpc v1.66.2 google.golang.org/protobuf v1.34.2 gopkg.in/yaml.v2 v2.4.0 ) @@ -89,7 +88,7 @@ require ( github.com/cosmos/btcutil v1.0.5 // indirect github.com/cosmos/go-bip39 v1.0.0 // indirect github.com/cosmos/gogogateway v1.2.0 // indirect - github.com/cosmos/iavl v1.1.2 // indirect + github.com/cosmos/iavl v1.2.0 // indirect github.com/cosmos/ledger-cosmos-go v0.13.3 // indirect github.com/danieljoos/wincred v1.2.0 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect @@ -142,6 +141,7 @@ require ( github.com/hashicorp/yamux v0.1.1 // indirect github.com/hdevalence/ed25519consensus v0.1.0 // indirect github.com/huandu/skiplist v1.2.0 // indirect + github.com/iancoleman/orderedmap v0.3.0 // indirect github.com/iancoleman/strcase v0.3.0 // indirect github.com/improbable-eng/grpc-web v0.15.0 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect @@ -178,6 +178,7 @@ require ( github.com/sagikazarmark/locafero v0.4.0 // indirect github.com/sagikazarmark/slog-shim v0.1.0 // indirect github.com/sasha-s/go-deadlock v0.3.1 // indirect + github.com/shamaton/msgpack/v2 v2.2.0 // indirect github.com/sourcegraph/conc v0.3.0 // indirect github.com/spf13/afero v1.11.0 // indirect github.com/subosito/gotenv v1.6.0 // indirect @@ -218,7 +219,7 @@ require ( replace ( github.com/99designs/keyring => github.com/cosmos/keyring v1.2.0 - github.com/CosmWasm/wasmd => github.com/neutron-org/wasmd v0.51.2-neutron + github.com/CosmWasm/wasmd => github.com/neutron-org/wasmd v0.53.0-neutron github.com/cosmos/admin-module/v2 => github.com/neutron-org/admin-module/v2 v2.0.2 github.com/cosmos/cosmos-sdk => github.com/neutron-org/cosmos-sdk v0.50.8-neutron // explicitely replace iavl to v1.2.0 cause sometimes go mod tidy uses not right version diff --git a/go.sum b/go.sum index 4e52f6f9f..47b7d96af 100644 --- a/go.sum +++ b/go.sum @@ -212,8 +212,8 @@ cosmossdk.io/x/feegrant v0.1.1 h1:EKFWOeo/pup0yF0svDisWWKAA9Zags6Zd0P3nRvVvw8= cosmossdk.io/x/feegrant v0.1.1/go.mod h1:2GjVVxX6G2fta8LWj7pC/ytHjryA6MHAJroBWHFNiEQ= cosmossdk.io/x/nft v0.1.1 h1:pslAVS8P5NkW080+LWOamInjDcq+v2GSCo+BjN9sxZ8= cosmossdk.io/x/nft v0.1.1/go.mod h1:Kac6F6y2gsKvoxU+fy8uvxRTi4BIhLOor2zgCNQwVgY= -cosmossdk.io/x/tx v0.13.4 h1:Eg0PbJgeO0gM8p5wx6xa0fKR7hIV6+8lC56UrsvSo0Y= -cosmossdk.io/x/tx v0.13.4/go.mod h1:BkFqrnGGgW50Y6cwTy+JvgAhiffbGEKW6KF9ufcDpvk= +cosmossdk.io/x/tx v0.13.5 h1:FdnU+MdmFWn1pTsbfU0OCf2u6mJ8cqc1H4OMG418MLw= +cosmossdk.io/x/tx v0.13.5/go.mod h1:V6DImnwJMTq5qFjeGWpXNiT/fjgE4HtmclRmTqRVM3w= cosmossdk.io/x/upgrade v0.1.4 h1:/BWJim24QHoXde8Bc64/2BSEB6W4eTydq0X/2f8+g38= cosmossdk.io/x/upgrade v0.1.4/go.mod h1:9v0Aj+fs97O+Ztw+tG3/tp5JSlrmT7IcFhAebQHmOPo= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= @@ -225,8 +225,8 @@ github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25 github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/CosmWasm/wasmvm/v2 v2.0.3 h1:G9jpwDk+qFUfDkXCigpWPn9JTGM0H7egKzWQnMEONwE= -github.com/CosmWasm/wasmvm/v2 v2.0.3/go.mod h1:su9lg5qLr7adV95eOfzjZWkGiky8WNaNIHDr7Fpu7Ck= +github.com/CosmWasm/wasmvm/v2 v2.1.2 h1:GkJ5bAsRlLHfIQVg/FY1VHwLyBwlCjAhDea0B8L+e20= +github.com/CosmWasm/wasmvm/v2 v2.1.2/go.mod h1:bMhLQL4Yp9CzJi9A83aR7VO9wockOsSlZbT4ztOl6bg= github.com/DataDog/datadog-go v3.2.0+incompatible h1:qSG2N4FghB1He/r2mFrWKCaL7dXCilEuNEeAn20fdD4= github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= github.com/DataDog/zstd v1.5.5 h1:oWf5W7GtOLgp6bciQYDmhHHjdhYkALu6S/5Ni9ZgSvQ= @@ -373,10 +373,12 @@ github.com/cosmos/iavl v1.2.0 h1:kVxTmjTh4k0Dh1VNL046v6BXqKziqMDzxo93oh3kOfM= github.com/cosmos/iavl v1.2.0/go.mod h1:HidWWLVAtODJqFD6Hbne2Y0q3SdxByJepHUOeoH4LiI= github.com/cosmos/ibc-apps/middleware/packet-forward-middleware/v8 v8.0.2 h1:dyLNlDElY6+5zW/BT/dO/3Ad9FpQblfh+9dQpYQodbA= github.com/cosmos/ibc-apps/middleware/packet-forward-middleware/v8 v8.0.2/go.mod h1:82hPO/tRawbuFad2gPwChvpZ0JEIoNi91LwVneAYCeM= +github.com/cosmos/ibc-go/modules/apps/callbacks v0.2.1-0.20231113120333-342c00b0f8bd h1:Lx+/5dZ/nN6qPXP2Ofog6u1fmlkCFA1ElcOconnofEM= +github.com/cosmos/ibc-go/modules/apps/callbacks v0.2.1-0.20231113120333-342c00b0f8bd/go.mod h1:JWfpWVKJKiKtd53/KbRoKfxWl8FsT2GPcNezTOk0o5Q= github.com/cosmos/ibc-go/modules/capability v1.0.1 h1:ibwhrpJ3SftEEZRxCRkH0fQZ9svjthrX2+oXdZvzgGI= github.com/cosmos/ibc-go/modules/capability v1.0.1/go.mod h1:rquyOV262nGJplkumH+/LeYs04P3eV8oB7ZM4Ygqk4E= -github.com/cosmos/ibc-go/v8 v8.5.0 h1:OjaSXz480JT8ZuMrASxGgS7XzloZ2NuuJPwZB/fKDgE= -github.com/cosmos/ibc-go/v8 v8.5.0/go.mod h1:P5hkAvq0Qbg0h18uLxDVA9q1kOJ0l36htMsskiNwXbo= +github.com/cosmos/ibc-go/v8 v8.5.1 h1:3JleEMKBjRKa3FeTKt4fjg22za/qygLBo7mDkoYTNBs= +github.com/cosmos/ibc-go/v8 v8.5.1/go.mod h1:P5hkAvq0Qbg0h18uLxDVA9q1kOJ0l36htMsskiNwXbo= github.com/cosmos/ics23/go v0.11.0 h1:jk5skjT0TqX5e5QJbEnwXIS2yI2vnmLOgpQPeM5RtnU= github.com/cosmos/ics23/go v0.11.0/go.mod h1:A8OjxPE67hHST4Icw94hOxxFEJMBG031xIGF/JHNIY0= github.com/cosmos/interchain-security/v5 v5.0.0-20240802125602-fa1e09444aae h1:/EWV9qryltapge0v4ctvl2jV3Nne5nsbd+GYblj/jWA= @@ -847,8 +849,8 @@ github.com/neutron-org/admin-module/v2 v2.0.2 h1:XDDFWjvkVBKRf3lBFCazT1zAXZ3dHX8 github.com/neutron-org/admin-module/v2 v2.0.2/go.mod h1:RfOyabXsdJ5btcOKyKPZDYiZhtuKFubbJMOb8EJZtvA= github.com/neutron-org/cosmos-sdk v0.50.8-neutron h1:L+4obYi/KkkmS05gBlXNF+FhipHYTl0iO3EkmpMBXkE= github.com/neutron-org/cosmos-sdk v0.50.8-neutron/go.mod h1:Zb+DgHtiByNwgj71IlJBXwOq6dLhtyAq3AgqpXm/jHo= -github.com/neutron-org/wasmd v0.51.2-neutron h1:+Ih6AzySHeB+ArGmmmpXVQNaiX/fvK5ZSbbaMY7+IEE= -github.com/neutron-org/wasmd v0.51.2-neutron/go.mod h1:7TSaj5HoolghujuVWeExqmcUKgpcYWEySGLSODbnnwY= +github.com/neutron-org/wasmd v0.53.0-neutron h1:Dv1VP1+QjYeb6RMo03sxw0Pe42JU0MPxefwNaG22KVs= +github.com/neutron-org/wasmd v0.53.0-neutron/go.mod h1:FJl/aWjdpGof3usAMFQpDe07Rkx77PUzp0cygFMOvtw= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= @@ -922,8 +924,8 @@ github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5Fsn github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og= github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= -github.com/prometheus/client_golang v1.20.2 h1:5ctymQzZlyOON1666svgwn3s6IKWgfbjsejTMiXIyjg= -github.com/prometheus/client_golang v1.20.2/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= +github.com/prometheus/client_golang v1.20.4 h1:Tgh3Yr67PaOv/uTqloMsCEdeuFTatm5zIq5+qNN23vI= +github.com/prometheus/client_golang v1.20.4/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= @@ -970,6 +972,8 @@ github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0 github.com/sasha-s/go-deadlock v0.3.1 h1:sqv7fDNShgjcaxkO0JNcOAlr8B9+cV5Ey/OB71efZx0= github.com/sasha-s/go-deadlock v0.3.1/go.mod h1:F73l+cr82YSh10GxyRI6qZiCgK64VaZjwesgfQ1/iLM= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= +github.com/shamaton/msgpack/v2 v2.2.0 h1:IP1m01pHwCrMa6ZccP9B3bqxEMKMSmMVAVKk54g3L/Y= +github.com/shamaton/msgpack/v2 v2.2.0/go.mod h1:6khjYnkx73f7VQU7wjcFS9DFjs+59naVWJv1TB7qdOI= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= @@ -983,8 +987,8 @@ github.com/skip-mev/chaintestutil v0.0.0-20240514161515-056d7ba45610 h1:4JlsiRVt github.com/skip-mev/chaintestutil v0.0.0-20240514161515-056d7ba45610/go.mod h1:kB8gFZX07CyJnw8q9iEZijI3qJTIe1K/Y++P5VGkrcg= github.com/skip-mev/feemarket v1.1.1 h1:L34K7N2J6o635kzNYRAvQ93+hAFtSiJ2t03jmaNx0zw= github.com/skip-mev/feemarket v1.1.1/go.mod h1:DUa6djUsTeMOrbrcIZqWSVxU9IZNCXp96ruaojyBNpc= -github.com/skip-mev/slinky v1.0.10 h1:QBd/jBxUcV2dq3VERhf5h42cAA0s2awPZGWpHgh0t20= -github.com/skip-mev/slinky v1.0.10/go.mod h1:8mxMdQ8MY8QAxgxLvUKTfDwX6XCAUeqZwkU/r+ZsELU= +github.com/skip-mev/slinky v1.0.12 h1:qmZHB6c5fgDhO/pv67YcZc2M25t3gZcceVmJtA9zjOo= +github.com/skip-mev/slinky v1.0.12/go.mod h1:8mxMdQ8MY8QAxgxLvUKTfDwX6XCAUeqZwkU/r+ZsELU= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= @@ -1672,8 +1676,8 @@ google.golang.org/grpc v1.48.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACu google.golang.org/grpc v1.49.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= google.golang.org/grpc v1.50.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= google.golang.org/grpc v1.50.1/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= -google.golang.org/grpc v1.66.0 h1:DibZuoBznOxbDQxRINckZcUvnCEvrW9pcWIE2yF9r1c= -google.golang.org/grpc v1.66.0/go.mod h1:s3/l6xSSCURdVfAnL+TqCNMyTDAGN6+lZeVxnZR128Y= +google.golang.org/grpc v1.66.2 h1:3QdXkuq3Bkh7w+ywLdLvM56cmGvQHUMZpiCzt6Rqaoo= +google.golang.org/grpc v1.66.2/go.mod h1:s3/l6xSSCURdVfAnL+TqCNMyTDAGN6+lZeVxnZR128Y= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= diff --git a/proto/neutron/cron/genesis.proto b/proto/neutron/cron/genesis.proto index 61b27069a..eba407f97 100644 --- a/proto/neutron/cron/genesis.proto +++ b/proto/neutron/cron/genesis.proto @@ -8,7 +8,7 @@ import "neutron/cron/schedule.proto"; option go_package = "github.com/neutron-org/neutron/v4/x/cron/types"; -// GenesisState defines the cron module's genesis state. +// Defines the cron module's genesis state. message GenesisState { repeated Schedule scheduleList = 2 [(gogoproto.nullable) = false]; Params params = 1 [(gogoproto.nullable) = false]; diff --git a/proto/neutron/cron/params.proto b/proto/neutron/cron/params.proto index 9bea16a5b..c3c5cf452 100644 --- a/proto/neutron/cron/params.proto +++ b/proto/neutron/cron/params.proto @@ -5,7 +5,7 @@ import "gogoproto/gogo.proto"; option go_package = "github.com/neutron-org/neutron/v4/x/cron/types"; -// Params defines the parameters for the module. +// Defines the parameters for the module. message Params { option (gogoproto.goproto_stringer) = false; // Security address that can remove schedules diff --git a/proto/neutron/cron/query.proto b/proto/neutron/cron/query.proto index 401e9ce9d..60ee505e0 100644 --- a/proto/neutron/cron/query.proto +++ b/proto/neutron/cron/query.proto @@ -10,7 +10,7 @@ import "neutron/cron/schedule.proto"; option go_package = "github.com/neutron-org/neutron/v4/x/cron/types"; -// Query defines the gRPC querier service. +// Defines the gRPC querier service. service Query { // Queries the parameters of the module. rpc Params(QueryParamsRequest) returns (QueryParamsResponse) { @@ -30,25 +30,31 @@ service Query { // this line is used by starport scaffolding # 2 } +// The request type for the Query/Params RPC method. message QueryParamsRequest {} +// The response type for the Query/Params RPC method. message QueryParamsResponse { // params holds all the parameters of this module. Params params = 1 [(gogoproto.nullable) = false]; } +// The request type for the Query/Schedule RPC method. message QueryGetScheduleRequest { string name = 1; } +// The response type for the Query/Params RPC method. message QueryGetScheduleResponse { Schedule schedule = 1 [(gogoproto.nullable) = false]; } +// The request type for the Query/Schedules RPC method. message QuerySchedulesRequest { cosmos.base.query.v1beta1.PageRequest pagination = 1; } +// The response type for the Query/Params RPC method. message QuerySchedulesResponse { repeated Schedule schedules = 1 [(gogoproto.nullable) = false]; cosmos.base.query.v1beta1.PageResponse pagination = 2; diff --git a/proto/neutron/cron/schedule.proto b/proto/neutron/cron/schedule.proto index 5df38de34..b6147aed4 100644 --- a/proto/neutron/cron/schedule.proto +++ b/proto/neutron/cron/schedule.proto @@ -5,25 +5,38 @@ import "gogoproto/gogo.proto"; option go_package = "github.com/neutron-org/neutron/v4/x/cron/types"; +// Defines when messages will be executed in the block +enum ExecutionStage { + // Execution at the end of the block + EXECUTION_STAGE_END_BLOCKER = 0; + // Execution at the beginning of the block + EXECUTION_STAGE_BEGIN_BLOCKER = 1; +} + +// Defines the schedule for execution message Schedule { // Name of schedule string name = 1; // Period in blocks uint64 period = 2; - // Msgs that will be executed every period amount of time + // Msgs that will be executed every certain number of blocks, specified in the `period` field repeated MsgExecuteContract msgs = 3 [(gogoproto.nullable) = false]; // Last execution's block height uint64 last_execute_height = 4; + // Stage when messages will be executed + ExecutionStage execution_stage = 5; } +// Defines the contract and the message to pass message MsgExecuteContract { - // Contract is the address of the smart contract + // The address of the smart contract string contract = 1; - // Msg is json encoded message to be passed to the contract + // JSON encoded message to be passed to the contract string msg = 2; } +// Defines the number of current schedules message ScheduleCount { - // Count is the number of current schedules + // The number of current schedules int32 count = 1; } diff --git a/proto/neutron/cron/tx.proto b/proto/neutron/cron/tx.proto index 6bb9d3bf7..fcb9b383a 100644 --- a/proto/neutron/cron/tx.proto +++ b/proto/neutron/cron/tx.proto @@ -6,32 +6,72 @@ import "cosmos/msg/v1/msg.proto"; import "cosmos_proto/cosmos.proto"; import "gogoproto/gogo.proto"; import "neutron/cron/params.proto"; +import "neutron/cron/schedule.proto"; // this line is used by starport scaffolding # proto/tx/import option go_package = "github.com/neutron-org/neutron/v4/x/cron/types"; -// Msg defines the Msg service. +// Defines the Msg service. service Msg { option (cosmos.msg.v1.service) = true; + // Adds new schedule. + rpc AddSchedule(MsgAddSchedule) returns (MsgAddScheduleResponse); + // Removes schedule. + rpc RemoveSchedule(MsgRemoveSchedule) returns (MsgRemoveScheduleResponse); + // Updates the module parameters. rpc UpdateParams(MsgUpdateParams) returns (MsgUpdateParamsResponse); // this line is used by starport scaffolding # proto/tx/rpc } +// The MsgAddSchedule request type. +message MsgAddSchedule { + option (amino.name) = "cron/MsgAddSchedule"; + option (cosmos.msg.v1.signer) = "authority"; + + // The address of the governance account. + string authority = 1 [(cosmos_proto.scalar) = "cosmos.AddressString"]; + // Name of the schedule + string name = 2; + // Period in blocks + uint64 period = 3; + // Msgs that will be executed every certain number of blocks, specified in the `period` field + repeated MsgExecuteContract msgs = 4 [(gogoproto.nullable) = false]; + // Stage when messages will be executed + ExecutionStage execution_stage = 5; +} + +// Defines the response structure for executing a MsgAddSchedule message. +message MsgAddScheduleResponse {} + +// The MsgRemoveSchedule request type. +message MsgRemoveSchedule { + option (amino.name) = "cron/MsgRemoveSchedule"; + option (cosmos.msg.v1.signer) = "authority"; + + // The address of the governance account. + string authority = 1 [(cosmos_proto.scalar) = "cosmos.AddressString"]; + // Name of the schedule + string name = 2; +} + +// Defines the response structure for executing a MsgRemoveSchedule message. +message MsgRemoveScheduleResponse {} + // this line is used by starport scaffolding # proto/tx/message -// MsgUpdateParams is the MsgUpdateParams request type. +// The MsgUpdateParams request type. // // Since: 0.47 message MsgUpdateParams { option (amino.name) = "cron/MsgUpdateParams"; option (cosmos.msg.v1.signer) = "authority"; - // Authority is the address of the governance account. + // The address of the governance account. string authority = 1 [(cosmos_proto.scalar) = "cosmos.AddressString"]; - // params defines the x/cron parameters to update. + // Defines the x/cron parameters to update. // // NOTE: All parameters must be supplied. Params params = 2 [ @@ -40,8 +80,7 @@ message MsgUpdateParams { ]; } -// MsgUpdateParamsResponse defines the response structure for executing a -// MsgUpdateParams message. +// Defines the response structure for executing a MsgUpdateParams message. // // Since: 0.47 message MsgUpdateParamsResponse {} diff --git a/proto/neutron/cron/v1/schedule.proto b/proto/neutron/cron/v1/schedule.proto new file mode 100644 index 000000000..8995431fe --- /dev/null +++ b/proto/neutron/cron/v1/schedule.proto @@ -0,0 +1,32 @@ +syntax = "proto3"; +package neutron.cron.v1; + +import "gogoproto/gogo.proto"; + +option go_package = "github.com/neutron-org/neutron/v4/x/cron/types/v1"; + +// Defines the schedule for execution +message Schedule { + // Name of schedule + string name = 1; + // Period in blocks + uint64 period = 2; + // Msgs that will be executed every certain number of blocks, specified in the `period` field + repeated MsgExecuteContract msgs = 3 [(gogoproto.nullable) = false]; + // Last execution's block height + uint64 last_execute_height = 4; +} + +// Defines the contract and the message to pass +message MsgExecuteContract { + // The address of the smart contract + string contract = 1; + // JSON encoded message to be passed to the contract + string msg = 2; +} + +// Defines the number of current schedules +message ScheduleCount { + // The number of current schedules + int32 count = 1; +} diff --git a/tests/feemarket/go.sum b/tests/feemarket/go.sum index e47067c01..e2c9ef1fc 100644 --- a/tests/feemarket/go.sum +++ b/tests/feemarket/go.sum @@ -1050,6 +1050,7 @@ github.com/skip-mev/chaintestutil v0.0.0-20240514161515-056d7ba45610 h1:4JlsiRVt github.com/skip-mev/chaintestutil v0.0.0-20240514161515-056d7ba45610/go.mod h1:kB8gFZX07CyJnw8q9iEZijI3qJTIe1K/Y++P5VGkrcg= github.com/skip-mev/feemarket v1.1.0 h1:3z/3Mplmk4t1C/IjghC+OE361L9n8dR3Xr7bXIcS7ec= github.com/skip-mev/feemarket v1.1.0/go.mod h1:CVsCaHxJDK4y271c1Dan6Z8G2QaOyWJLoSBnDEPon40= +github.com/skip-mev/feemarket v1.1.1/go.mod h1:DUa6djUsTeMOrbrcIZqWSVxU9IZNCXp96ruaojyBNpc= github.com/skip-mev/feemarket/tests/e2e v1.10.0 h1:oKAZSo+rynd2b7+T8/U+4C+h//rrTdjLICG2Awjk8YA= github.com/skip-mev/feemarket/tests/e2e v1.10.0/go.mod h1:57BURopGhr+L0zDkhj1E9jzP9W8rMzRb3b+MT+trlB4= github.com/skip-mev/interchaintest/v8 v8.0.1-0.20240611183342-72ec508eb966 h1:X5BD7m4QieHlORqGho1Af8r0O1GSWBRYO330xyu2kzQ= diff --git a/tests/ibc/gmp_swap_forward_test.go b/tests/ibc/gmp_swap_forward_test.go deleted file mode 100644 index c337c953c..000000000 --- a/tests/ibc/gmp_swap_forward_test.go +++ /dev/null @@ -1,88 +0,0 @@ -package ibc_test - -import ( - "encoding/json" - - "cosmossdk.io/math" - - "github.com/neutron-org/neutron/v4/x/dex/types" - "github.com/neutron-org/neutron/v4/x/gmp" - swaptypes "github.com/neutron-org/neutron/v4/x/ibcswap/types" -) - -// TestGMPSwapAndForward_Success asserts that the swap middleware works as intended when the original message is sent via GMP -func (s *IBCTestSuite) TestGMPSwapAndForward_Success() { - // Send an IBC transfer from provider to Neutron, so we can initialize a pool with the IBC denom token + native Neutron token - s.IBCTransferProviderToNeutron(s.providerAddr, s.neutronAddr, nativeDenom, ibcTransferAmount, "") - - // Assert that the funds are gone from the acc on provider and present in the acc on Neutron - newProviderBalNative := genesisWalletAmount.Sub(ibcTransferAmount) - s.assertProviderBalance(s.providerAddr, nativeDenom, newProviderBalNative) - - s.assertNeutronBalance(s.neutronAddr, s.providerToNeutronDenom, ibcTransferAmount) - - // deposit stake<>ibcTransferToken to initialize the pool on Neutron - depositAmount := math.NewInt(100_000) - postDepositNeutronBalNative := genesisWalletAmount.Sub(depositAmount) - s.neutronDeposit( - nativeDenom, - s.providerToNeutronDenom, - depositAmount, - depositAmount, - 0, - 1, - s.neutronAddr) - - // Compose the IBC transfer memo metadata to be used in the swap and forward - swapAmount := math.NewInt(100000) - expectedOut := math.NewInt(99990) - - swapMetadata := swaptypes.PacketMetadata{ - Swap: &swaptypes.SwapMetadata{ - MsgPlaceLimitOrder: &types.MsgPlaceLimitOrder{ - Creator: s.neutronAddr.String(), - Receiver: s.neutronAddr.String(), - TokenIn: s.providerToNeutronDenom, - TokenOut: nativeDenom, - AmountIn: swapAmount, - TickIndexInToOut: 2, - OrderType: types.LimitOrderType_FILL_OR_KILL, - }, - }, - } - swapMetadataBz, err := json.Marshal(swapMetadata) - - s.Require().NoError(err) - - gmpMetadata := gmp.Message{ - SourceChain: "axelar", - SourceAddress: "alice", - Payload: swapMetadataBz, - Type: gmp.TypeGeneralMessageWithToken, - } - - gmpMetadataBz, err := json.Marshal(gmpMetadata) - s.Require().NoError(err) - - // Send an IBC transfer from chainA to chainB with GMP payload containing the swap metadata - - s.IBCTransferProviderToNeutron(s.providerAddr, s.neutronAddr, nativeDenom, ibcTransferAmount, string(gmpMetadataBz)) - - // Check that the funds are moved out of the acc on providerChain - s.assertProviderBalance( - s.providerAddr, - nativeDenom, - newProviderBalNative.Sub(ibcTransferAmount), - ) - - // Check that the swap funds are now present in the acc on Neutron - s.assertNeutronBalance(s.neutronAddr, nativeDenom, postDepositNeutronBalNative.Add(expectedOut)) - - // Check that the overrideReceiver did not keep anything - overrideAddr := s.ReceiverOverrideAddr(s.neutronTransferPath.EndpointA.ChannelID, s.providerAddr.String()) - s.assertNeutronBalance(overrideAddr, s.providerToNeutronDenom, math.ZeroInt()) - s.assertNeutronBalance(overrideAddr, s.providerToNeutronDenom, math.ZeroInt()) - - // Check that nothing is credited to the original creator - s.assertNeutronBalance(s.neutronAddr, s.providerToNeutronDenom, math.ZeroInt()) -} diff --git a/tests/ibc/ibc_setup_test.go b/tests/ibc/ibc_setup_test.go index 54264e685..51477e833 100644 --- a/tests/ibc/ibc_setup_test.go +++ b/tests/ibc/ibc_setup_test.go @@ -5,7 +5,6 @@ import ( "cosmossdk.io/math" sdk "github.com/cosmos/cosmos-sdk/types" - "github.com/cosmos/ibc-apps/middleware/packet-forward-middleware/v8/packetforward" transfertypes "github.com/cosmos/ibc-go/v8/modules/apps/transfer/types" clienttypes "github.com/cosmos/ibc-go/v8/modules/core/02-client/types" //nolint:staticcheck channeltypes "github.com/cosmos/ibc-go/v8/modules/core/04-channel/types" @@ -20,12 +19,10 @@ import ( appparams "github.com/neutron-org/neutron/v4/app/params" "github.com/neutron-org/neutron/v4/testutil" - dextypes "github.com/neutron-org/neutron/v4/x/dex/types" ) var ( nativeDenom = appparams.DefaultDenom - ibcTransferAmount = math.NewInt(100_000) genesisWalletAmount, _ = math.NewIntFromString("10000000000000000000") ) @@ -338,38 +335,3 @@ func (s *IBCTestSuite) assertChainBBalance(addr sdk.AccAddress, denom string, ex func (s *IBCTestSuite) assertChainCBalance(addr sdk.AccAddress, denom string, expectedAmt math.Int) { s.assertBalance(s.bundleC.App.GetTestBankKeeper(), s.bundleC.Chain, addr, denom, expectedAmt) } - -func (s *IBCTestSuite) ReceiverOverrideAddr(channel, sender string) sdk.AccAddress { - addr, err := packetforward.GetReceiver(channel, sender) - if err != nil { - panic("Cannot calc receiver override: " + err.Error()) - } - return sdk.MustAccAddressFromBech32(addr) -} - -func (s *IBCTestSuite) neutronDeposit( - token0 string, - token1 string, - depositAmount0 math.Int, - depositAmount1 math.Int, - tickIndex int64, - fee uint64, - creator sdk.AccAddress, -) { - // create deposit msg - msgDeposit := dextypes.NewMsgDeposit( - creator.String(), - creator.String(), - token0, - token1, - []math.Int{depositAmount0}, - []math.Int{depositAmount1}, - []int64{tickIndex}, - []uint64{fee}, - []*dextypes.DepositOptions{{DisableAutoswap: false}}, - ) - - // execute deposit msg - _, err := s.neutronChain.SendMsgs(msgDeposit) - s.Assert().NoError(err, "Deposit Failed") -} diff --git a/tests/ibc/swap_forward_test.go b/tests/ibc/swap_forward_test.go deleted file mode 100644 index 2385da68d..000000000 --- a/tests/ibc/swap_forward_test.go +++ /dev/null @@ -1,96 +0,0 @@ -package ibc_test - -import ( - "encoding/json" - "time" - - "cosmossdk.io/math" - pfmtypes "github.com/cosmos/ibc-apps/middleware/packet-forward-middleware/v8/packetforward/types" - - "github.com/neutron-org/neutron/v4/x/dex/types" - swaptypes "github.com/neutron-org/neutron/v4/x/ibcswap/types" -) - -// TestSwapAndForward_Fails asserts that the IBC swap middleware fails gracefully when provided with a package-forward memo -func (s *IBCTestSuite) TestSwapAndForward_Fails() { - // Send an IBC transfer from provider chain to neutron, so we can initialize a pool with the IBC denom token + native Neutron token - s.IBCTransferProviderToNeutron( - s.providerAddr, - s.neutronAddr, - nativeDenom, - ibcTransferAmount, - "", - ) - newProviderBalNative := genesisWalletAmount.Sub(ibcTransferAmount) - - // deposit stake<>ibcTransferToken to initialize the pool on Neutron - depositAmount := math.NewInt(100_000) - s.neutronDeposit( - nativeDenom, - s.providerToNeutronDenom, - depositAmount, - depositAmount, - 0, - 1, - s.neutronAddr) - - postDepositNeutronBalNative := genesisWalletAmount.Sub(depositAmount) - - // Compose the IBC transfer memo metadata to be used in the swap and forward - swapAmount := math.NewInt(100000) - chainBAddr := s.bundleB.Chain.SenderAccount.GetAddress() - - retries := uint8(0) - - forwardMetadata := pfmtypes.PacketMetadata{ - Forward: &pfmtypes.ForwardMetadata{ - Receiver: chainBAddr.String(), - Port: s.neutronChainBPath.EndpointA.ChannelConfig.PortID, - Channel: s.neutronChainBPath.EndpointA.ChannelID, - Timeout: pfmtypes.Duration(5 * time.Minute), - Retries: &retries, - Next: nil, - }, - } - - bz, err := json.Marshal(forwardMetadata) - s.Assert().NoError(err) - - nextJSON := new(swaptypes.JSONObject) - err = json.Unmarshal(bz, nextJSON) - s.Assert().NoError(err) - - metadata := swaptypes.PacketMetadata{ - Swap: &swaptypes.SwapMetadata{ - MsgPlaceLimitOrder: &types.MsgPlaceLimitOrder{ - Creator: s.neutronAddr.String(), - Receiver: s.neutronAddr.String(), - TokenIn: s.providerToNeutronDenom, - TokenOut: nativeDenom, - AmountIn: swapAmount, - TickIndexInToOut: 2, - OrderType: types.LimitOrderType_FILL_OR_KILL, - }, - Next: nextJSON, - }, - } - - metadataBz, err := json.Marshal(metadata) - s.Require().NoError(err) - - // Send (failing) IBC transfer with PFM data - s.IBCTransferProviderToNeutron( - s.providerAddr, - s.neutronAddr, - nativeDenom, - ibcTransferAmount, - string(metadataBz), - ) - - // Check that the funds are not present in the account on Neutron - s.assertNeutronBalance(s.neutronAddr, nativeDenom, postDepositNeutronBalNative) - s.assertNeutronBalance(s.neutronAddr, s.providerToNeutronDenom, math.ZeroInt()) - - // Check that the refund takes place and the funds are moved back to the account on Gaia - s.assertProviderBalance(s.providerAddr, nativeDenom, newProviderBalNative) -} diff --git a/tests/ibc/swap_test.go b/tests/ibc/swap_test.go deleted file mode 100644 index bd8f47ca7..000000000 --- a/tests/ibc/swap_test.go +++ /dev/null @@ -1,173 +0,0 @@ -package ibc_test - -import ( - "encoding/json" - - "cosmossdk.io/math" - - dextypes "github.com/neutron-org/neutron/v4/x/dex/types" - swaptypes "github.com/neutron-org/neutron/v4/x/ibcswap/types" -) - -// TestIBCSwapMiddleware_Success asserts that the IBC swap middleware works as intended with Neutron running as a -// consumer chain connected to the Cosmos Hub. -func (s *IBCTestSuite) TestIBCSwapMiddleware_Success() { - // Send an IBC transfer from provider to Neutron, so we can initialize a pool with the IBC denom token + native Neutron token - s.IBCTransferProviderToNeutron( - s.providerAddr, - s.neutronAddr, - nativeDenom, - ibcTransferAmount, - "", - ) - - // Assert that the funds are gone from the acc on provider and present in the acc on Neutron - newProviderBalNative := genesisWalletAmount.Sub(ibcTransferAmount) - s.assertProviderBalance(s.providerAddr, nativeDenom, newProviderBalNative) - - s.assertNeutronBalance(s.neutronAddr, s.providerToNeutronDenom, ibcTransferAmount) - - // deposit stake<>ibcTransferToken to initialize the pool on Neutron - depositAmount := math.NewInt(100_000) - s.neutronDeposit( - nativeDenom, - s.providerToNeutronDenom, - depositAmount, - depositAmount, - 0, - 1, - s.neutronAddr) - - // Assert that the deposit was successful and the funds are moved out of the Neutron user acc - s.assertNeutronBalance(s.neutronAddr, s.providerToNeutronDenom, math.ZeroInt()) - postDepositNeutronBalNative := genesisWalletAmount.Sub(depositAmount) - s.assertNeutronBalance(s.neutronAddr, nativeDenom, postDepositNeutronBalNative) - - // Send an IBC transfer from providerChain to Neutron with packet memo containing the swap metadata - swapAmount := math.NewInt(100000) - expectedOut := math.NewInt(99_990) - - metadata := swaptypes.PacketMetadata{ - Swap: &swaptypes.SwapMetadata{ - MsgPlaceLimitOrder: &dextypes.MsgPlaceLimitOrder{ - Creator: s.neutronAddr.String(), - Receiver: s.neutronAddr.String(), - TokenIn: s.providerToNeutronDenom, - TokenOut: nativeDenom, - AmountIn: swapAmount, - TickIndexInToOut: 2, - OrderType: dextypes.LimitOrderType_FILL_OR_KILL, - }, - Next: nil, - }, - } - - metadataBz, err := json.Marshal(metadata) - s.Require().NoError(err) - - s.IBCTransferProviderToNeutron( - s.providerAddr, - s.neutronAddr, - nativeDenom, - ibcTransferAmount, - string(metadataBz), - ) - - // Check that the funds are moved out of the acc on providerChain - s.assertProviderBalance( - s.providerAddr, - nativeDenom, - newProviderBalNative.Sub(ibcTransferAmount), - ) - - // Check that the swap funds are now present in the acc on Neutron - s.assertNeutronBalance(s.neutronAddr, nativeDenom, postDepositNeutronBalNative.Add(expectedOut)) - - // Check that the overrideReceiver did not keep anything - overrideAddr := s.ReceiverOverrideAddr(s.neutronTransferPath.EndpointA.ChannelID, s.providerAddr.String()) - s.assertNeutronBalance(overrideAddr, s.providerToNeutronDenom, math.ZeroInt()) - s.assertNeutronBalance(overrideAddr, s.providerToNeutronDenom, math.ZeroInt()) - - // Check that nothing credited to the original creator - s.assertNeutronBalance(s.neutronAddr, s.providerToNeutronDenom, math.ZeroInt()) -} - -// TestIBCSwapMiddleware_FailRefund asserts that the IBC swap middleware works as intended with Neutron running as a -// consumer chain connected to the Cosmos Hub. The swap should fail and a refund to the src chain should take place. -func (s *IBCTestSuite) TestIBCSwapMiddleware_FailRefund() { - // Compose the swap metadata, this swap will fail because there is no pool initialized for this pair - swapAmount := math.NewInt(100000) - metadata := swaptypes.PacketMetadata{ - Swap: &swaptypes.SwapMetadata{ - MsgPlaceLimitOrder: &dextypes.MsgPlaceLimitOrder{ - Creator: s.neutronAddr.String(), - Receiver: s.neutronAddr.String(), - TokenIn: s.providerToNeutronDenom, - TokenOut: nativeDenom, - AmountIn: swapAmount, - TickIndexInToOut: 1, - OrderType: dextypes.LimitOrderType_FILL_OR_KILL, - }, - Next: nil, - }, - } - - metadataBz, err := json.Marshal(metadata) - s.Require().NoError(err) - - // Send (failing) IBC transfer with swap metadata - s.IBCTransferProviderToNeutron( - s.providerAddr, - s.neutronAddr, - nativeDenom, - ibcTransferAmount, - string(metadataBz), - ) - - // Check that the funds are not present in the account on Neutron - s.assertNeutronBalance(s.neutronAddr, nativeDenom, genesisWalletAmount) - s.assertNeutronBalance(s.neutronAddr, s.providerToNeutronDenom, math.ZeroInt()) - - // Check that the refund takes place and the funds are moved back to the account on Gaia - s.assertProviderBalance(s.providerAddr, nativeDenom, genesisWalletAmount) -} - -func (s *IBCTestSuite) TestIBCSwapMiddleware_FailWithRefundAddr() { - // Compose the swap metadata, this swap will fail because there is no pool initialized for this pair - refundAddr := s.neutronChain.SenderAccounts[1].SenderAccount.GetAddress() - swapAmount := math.NewInt(100000) - metadata := swaptypes.PacketMetadata{ - Swap: &swaptypes.SwapMetadata{ - MsgPlaceLimitOrder: &dextypes.MsgPlaceLimitOrder{ - Creator: s.neutronAddr.String(), - Receiver: s.neutronAddr.String(), - TokenIn: s.providerToNeutronDenom, - TokenOut: nativeDenom, - AmountIn: swapAmount, - TickIndexInToOut: 1, - OrderType: dextypes.LimitOrderType_FILL_OR_KILL, - }, - NeutronRefundAddress: refundAddr.String(), - Next: nil, - }, - } - - metadataBz, err := json.Marshal(metadata) - s.Require().NoError(err) - - // Send (failing) IBC transfer with swap metadata - s.IBCTransferProviderToNeutron( - s.providerAddr, - s.neutronAddr, - nativeDenom, - ibcTransferAmount, - string(metadataBz), - ) - - // Check that the funds have been moved to the refund address - s.assertNeutronBalance(refundAddr, nativeDenom, genesisWalletAmount) - s.assertNeutronBalance(refundAddr, s.providerToNeutronDenom, ibcTransferAmount) - - // Check that no refund takes place and the funds are not in the account on provider - s.assertProviderBalance(s.providerAddr, nativeDenom, genesisWalletAmount.Sub(ibcTransferAmount)) -} diff --git a/third_party/proto/ibc/core/client/v1/genesis.proto b/third_party/proto/ibc/core/client/v1/genesis.proto new file mode 100644 index 000000000..a16d5a709 --- /dev/null +++ b/third_party/proto/ibc/core/client/v1/genesis.proto @@ -0,0 +1,44 @@ +syntax = "proto3"; + +package ibc.core.client.v1; + +option go_package = "github.com/cosmos/ibc-go/v8/modules/core/02-client/types"; + +import "ibc/core/client/v1/client.proto"; +import "gogoproto/gogo.proto"; + +// GenesisState defines the ibc client submodule's genesis state. +message GenesisState { + // client states with their corresponding identifiers + repeated IdentifiedClientState clients = 1 + [(gogoproto.nullable) = false, (gogoproto.castrepeated) = "IdentifiedClientStates"]; + // consensus states from each client + repeated ClientConsensusStates clients_consensus = 2 + [(gogoproto.nullable) = false, (gogoproto.castrepeated) = "ClientsConsensusStates"]; + // metadata from each client + repeated IdentifiedGenesisMetadata clients_metadata = 3 [(gogoproto.nullable) = false]; + Params params = 4 [(gogoproto.nullable) = false]; + // Deprecated: create_localhost has been deprecated. + // The localhost client is automatically created at genesis. + bool create_localhost = 5 [deprecated = true]; + // the sequence for the next generated client identifier + uint64 next_client_sequence = 6; +} + +// GenesisMetadata defines the genesis type for metadata that clients may return +// with ExportMetadata +message GenesisMetadata { + option (gogoproto.goproto_getters) = false; + + // store key of metadata without clientID-prefix + bytes key = 1; + // metadata value + bytes value = 2; +} + +// IdentifiedGenesisMetadata has the client metadata with the corresponding +// client id. +message IdentifiedGenesisMetadata { + string client_id = 1; + repeated GenesisMetadata client_metadata = 2 [(gogoproto.nullable) = false]; +} diff --git a/third_party/proto/ibc/core/commitment/v1/commitment.proto b/third_party/proto/ibc/core/commitment/v1/commitment.proto new file mode 100644 index 000000000..b4753be2d --- /dev/null +++ b/third_party/proto/ibc/core/commitment/v1/commitment.proto @@ -0,0 +1,39 @@ +syntax = "proto3"; + +package ibc.core.commitment.v1; + +option go_package = "github.com/cosmos/ibc-go/v8/modules/core/23-commitment/types"; + +import "gogoproto/gogo.proto"; +import "cosmos/ics23/v1/proofs.proto"; + +// MerkleRoot defines a merkle root hash. +// In the Cosmos SDK, the AppHash of a block header becomes the root. +message MerkleRoot { + option (gogoproto.goproto_getters) = false; + + bytes hash = 1; +} + +// MerklePrefix is merkle path prefixed to the key. +// The constructed key from the Path and the key will be append(Path.KeyPath, +// append(Path.KeyPrefix, key...)) +message MerklePrefix { + bytes key_prefix = 1; +} + +// MerklePath is the path used to verify commitment proofs, which can be an +// arbitrary structured object (defined by a commitment type). +// MerklePath is represented from root-to-leaf +message MerklePath { + repeated string key_path = 1; +} + +// MerkleProof is a wrapper type over a chain of CommitmentProofs. +// It demonstrates membership or non-membership for an element or set of +// elements, verifiable in conjunction with a known commitment root. Proofs +// should be succinct. +// MerkleProofs are ordered from leaf-to-root +message MerkleProof { + repeated cosmos.ics23.v1.CommitmentProof proofs = 1; +} diff --git a/wasmbinding/bindings/msg.go b/wasmbinding/bindings/msg.go index 2aee5622a..8c57b202a 100644 --- a/wasmbinding/bindings/msg.go +++ b/wasmbinding/bindings/msg.go @@ -196,9 +196,10 @@ type ForceTransfer struct { // AddSchedule adds new schedule to the cron module type AddSchedule struct { - Name string `json:"name"` - Period uint64 `json:"period"` - Msgs []MsgExecuteContract `json:"msgs"` + Name string `json:"name"` + Period uint64 `json:"period"` + Msgs []MsgExecuteContract `json:"msgs"` + ExecutionStage string `json:"execution_stage"` } // AddScheduleResponse holds response AddSchedule diff --git a/wasmbinding/message_plugin.go b/wasmbinding/message_plugin.go index 30d53717d..8e00248d9 100644 --- a/wasmbinding/message_plugin.go +++ b/wasmbinding/message_plugin.go @@ -73,7 +73,8 @@ func CustomMessageDecorator( Adminserver: adminmodulekeeper.NewMsgServerImpl(*adminKeeper), Bank: bankKeeper, TokenFactory: tokenFactoryKeeper, - CronKeeper: cronKeeper, + CronMsgServer: cronkeeper.NewMsgServerImpl(*cronKeeper), + CronQueryServer: cronKeeper, AdminKeeper: adminKeeper, ContractmanagerKeeper: contractmanagerKeeper, DexMsgServer: dexkeeper.NewMsgServerImpl(*dexKeeper), @@ -90,7 +91,8 @@ type CustomMessenger struct { Adminserver admintypes.MsgServer Bank *bankkeeper.BaseKeeper TokenFactory *tokenfactorykeeper.Keeper - CronKeeper *cronkeeper.Keeper + CronMsgServer crontypes.MsgServer + CronQueryServer crontypes.QueryServer AdminKeeper *adminmodulekeeper.Keeper ContractmanagerKeeper *contractmanagerkeeper.Keeper DexMsgServer dextypes.MsgServer @@ -989,6 +991,8 @@ func (m *CustomMessenger) addSchedule(ctx sdk.Context, contractAddr sdk.AccAddre return nil, nil, nil, errors.Wrap(sdkerrors.ErrUnauthorized, "only admin can add schedule") } + authority := authtypes.NewModuleAddress(admintypes.ModuleName) + msgs := make([]crontypes.MsgExecuteContract, 0, len(addSchedule.Msgs)) for _, msg := range addSchedule.Msgs { msgs = append(msgs, crontypes.MsgExecuteContract{ @@ -997,13 +1001,20 @@ func (m *CustomMessenger) addSchedule(ctx sdk.Context, contractAddr sdk.AccAddre }) } - err := m.CronKeeper.AddSchedule(ctx, addSchedule.Name, addSchedule.Period, msgs) + _, err := m.CronMsgServer.AddSchedule(ctx, &crontypes.MsgAddSchedule{ + Authority: authority.String(), + Name: addSchedule.Name, + Period: addSchedule.Period, + Msgs: msgs, + ExecutionStage: crontypes.ExecutionStage(crontypes.ExecutionStage_value[addSchedule.ExecutionStage]), + }) if err != nil { ctx.Logger().Error("failed to addSchedule", "from_address", contractAddr.String(), + "name", addSchedule.Name, "error", err, ) - return nil, nil, nil, errors.Wrap(err, "marshal json failed") + return nil, nil, nil, errors.Wrapf(err, "failed to add %s schedule", addSchedule.Name) } ctx.Logger().Debug("schedule added", @@ -1016,12 +1027,30 @@ func (m *CustomMessenger) addSchedule(ctx sdk.Context, contractAddr sdk.AccAddre } func (m *CustomMessenger) removeSchedule(ctx sdk.Context, contractAddr sdk.AccAddress, removeSchedule *bindings.RemoveSchedule) ([]sdk.Event, [][]byte, [][]*types.Any, error) { - params := m.CronKeeper.GetParams(ctx) - if !m.isAdmin(ctx, contractAddr) && contractAddr.String() != params.SecurityAddress { + params, err := m.CronQueryServer.Params(ctx, &crontypes.QueryParamsRequest{}) + if err != nil { + ctx.Logger().Error("failed to removeSchedule", "error", err) + return nil, nil, nil, errors.Wrap(err, "failed to removeSchedule") + } + + if !m.isAdmin(ctx, contractAddr) && contractAddr.String() != params.Params.SecurityAddress { return nil, nil, nil, errors.Wrap(sdkerrors.ErrUnauthorized, "only admin or security dao can remove schedule") } - m.CronKeeper.RemoveSchedule(ctx, removeSchedule.Name) + authority := authtypes.NewModuleAddress(admintypes.ModuleName) + + _, err = m.CronMsgServer.RemoveSchedule(ctx, &crontypes.MsgRemoveSchedule{ + Authority: authority.String(), + Name: removeSchedule.Name, + }) + if err != nil { + ctx.Logger().Error("failed to removeSchedule", + "from_address", contractAddr.String(), + "name", removeSchedule.Name, + "error", err, + ) + return nil, nil, nil, errors.Wrapf(err, "failed to remove %s schedule", removeSchedule.Name) + } ctx.Logger().Debug("schedule removed", "from_address", contractAddr.String(), diff --git a/wasmbinding/stargate_allowlist.go b/wasmbinding/stargate_allowlist.go index 13d8ab761..1e591374a 100644 --- a/wasmbinding/stargate_allowlist.go +++ b/wasmbinding/stargate_allowlist.go @@ -41,7 +41,8 @@ func AcceptedStargateQueries() wasmkeeper.AcceptedQueries { "/ibc.applications.interchain_accounts.controller.v1.Query/InterchainAccount": &icacontrollertypes.QueryInterchainAccountResponse{}, // transfer - "/ibc.applications.transfer.v1.Query/DenomTrace": &ibctransfertypes.QueryDenomTraceResponse{}, + "/ibc.applications.transfer.v1.Query/DenomTrace": &ibctransfertypes.QueryDenomTraceResponse{}, + "/ibc.applications.transfer.v1.Query/EscrowAddress": &ibctransfertypes.QueryEscrowAddressResponse{}, // auth "/cosmos.auth.v1beta1.Query/Account": &authtypes.QueryAccountResponse{}, diff --git a/wasmbinding/test/custom_message_test.go b/wasmbinding/test/custom_message_test.go index 446c7159f..7ed2fc34a 100644 --- a/wasmbinding/test/custom_message_test.go +++ b/wasmbinding/test/custom_message_test.go @@ -3,9 +3,15 @@ package test import ( "encoding/json" "fmt" + "strings" "testing" + transfertypes "github.com/cosmos/ibc-go/v8/modules/apps/transfer/types" + clienttypes "github.com/cosmos/ibc-go/v8/modules/core/02-client/types" + ibctesting "github.com/cosmos/ibc-go/v8/testing" + contractmanagertypes "github.com/neutron-org/neutron/v4/x/contractmanager/types" + types2 "github.com/neutron-org/neutron/v4/x/cron/types" "cosmossdk.io/math" wasmtypes "github.com/CosmWasm/wasmd/x/wasm/types" @@ -23,6 +29,8 @@ import ( adminkeeper "github.com/cosmos/admin-module/v2/x/adminmodule/keeper" + cronkeeper "github.com/neutron-org/neutron/v4/x/cron/keeper" + "github.com/neutron-org/neutron/v4/app/params" "github.com/CosmWasm/wasmd/x/wasm/keeper" @@ -67,7 +75,8 @@ func (suite *CustomMessengerTestSuite) SetupTest() { suite.messenger.Adminserver = adminkeeper.NewMsgServerImpl(suite.neutron.AdminmoduleKeeper) suite.messenger.Bank = &suite.neutron.BankKeeper suite.messenger.TokenFactory = suite.neutron.TokenFactoryKeeper - suite.messenger.CronKeeper = &suite.neutron.CronKeeper + suite.messenger.CronMsgServer = cronkeeper.NewMsgServerImpl(suite.neutron.CronKeeper) + suite.messenger.CronQueryServer = suite.neutron.CronKeeper suite.messenger.AdminKeeper = &suite.neutron.AdminmoduleKeeper suite.messenger.ContractmanagerKeeper = &suite.neutron.ContractManagerKeeper suite.contractOwner = keeper.RandomAccountAddress(suite.T()) @@ -75,7 +84,7 @@ func (suite *CustomMessengerTestSuite) SetupTest() { suite.contractKeeper = keeper.NewDefaultPermissionKeeper(&suite.neutron.WasmKeeper) err := suite.messenger.TokenFactory.SetParams(suite.ctx, tokenfactorytypes.NewParams( - sdk.NewCoins(sdk.NewInt64Coin(params.DefaultDenom, 10_000_000)), + sdk.NewCoins(sdk.NewInt64Coin(params.DefaultDenom, 100)), 0, FeeCollectorAddress, tokenfactorytypes.DefaultWhitelistedHooks, @@ -718,11 +727,109 @@ func (suite *CustomMessengerTestSuite) TestAddRemoveSchedule() { }, } + schedule, ok := suite.neutron.CronKeeper.GetSchedule(suite.ctx, "schedule1") + suite.True(ok) + suite.Equal(types2.ExecutionStage_EXECUTION_STAGE_END_BLOCKER, schedule.ExecutionStage) + // Dispatch AddSchedule message _, err = suite.executeNeutronMsg(suite.contractAddress, msg) suite.NoError(err) } +func (suite *CustomMessengerTestSuite) TestBurnTokens() { + // add NTRN to the contract + senderAddress := suite.ChainA.SenderAccounts[0].SenderAccount.GetAddress() + coinsAmnt := sdk.NewCoins(sdk.NewCoin(params.DefaultDenom, math.NewInt(int64(10_000_000)))) + bankKeeper := suite.neutron.BankKeeper + err := bankKeeper.SendCoins(suite.ctx, senderAddress, suite.contractAddress, coinsAmnt) + suite.NoError(err) + + suite.ConfigureTransferChannel() + + // add IBC denom to the contract + // Create Transfer Msg + transferMsg := transfertypes.NewMsgTransfer(suite.TransferPath.EndpointB.ChannelConfig.PortID, + suite.TransferPath.EndpointB.ChannelID, + sdk.NewCoin(params.DefaultDenom, math.NewInt(100)), + suite.ChainB.SenderAccounts[0].SenderAccount.GetAddress().String(), + strings.TrimSpace(suite.contractAddress.String()), + clienttypes.NewHeight(1, 110), + 0, + "", + ) + + // Send message from chainB to chainA + res, err := suite.TransferPath.EndpointB.Chain.SendMsgs(transferMsg) + suite.Require().NoError(err) + + // Relay transfer msg to Neutron chain + packet, err := ibctesting.ParsePacketFromEvents(res.GetEvents()) + suite.Require().NoError(err) + + suite.Require().NoError(suite.TransferPath.RelayPacket(packet)) + // ----------------------------------- + + // Add tf token to the contract + // Create denom for minting + fullMsg := bindings.NeutronMsg{ + CreateDenom: &bindings.CreateDenom{ + Subdenom: "tfdenom", + }, + } + + _, err = suite.executeNeutronMsg(suite.contractAddress, fullMsg) + suite.NoError(err) + + tfDenom := fmt.Sprintf("factory/%s/%s", suite.contractAddress.String(), fullMsg.CreateDenom.Subdenom) + + amount, ok := math.NewIntFromString("808010808") + require.True(suite.T(), ok) + + fullMsg = bindings.NeutronMsg{ + MintTokens: &bindings.MintTokens{ + Denom: tfDenom, + Amount: amount, + MintToAddress: suite.contractAddress.String(), + }, + } + + _, err = suite.executeNeutronMsg(suite.contractAddress, fullMsg) + suite.NoError(err) + + type testCase struct { + Name string + CoinToBurn sdk.Coin + } + + ibcTokenDenomHash, err := suite.neutron.TransferKeeper.DenomHash( + suite.ctx, + &transfertypes.QueryDenomHashRequest{Trace: ibctesting.TransferPort + "/" + suite.TransferPath.EndpointA.ChannelID + "/" + params.DefaultDenom}) + suite.Require().NoError(err) + + testcases := []testCase{ + {Name: "burn NTRN", CoinToBurn: sdk.NewCoin(params.DefaultDenom, math.NewInt(1000))}, + {Name: "burn tf denom", CoinToBurn: sdk.NewCoin(tfDenom, math.NewInt(1000))}, + {Name: "burn ibc denom", CoinToBurn: sdk.NewCoin("ibc/"+ibcTokenDenomHash.Hash, math.NewInt(50))}, + } + + for _, tc := range testcases { + suite.Run(tc.Name, func() { + balanceBeforeBurn := bankKeeper.GetBalance(suite.ctx, suite.contractAddress, tc.CoinToBurn.Denom) + + // Craft Burn message + msg := types.CosmosMsg{ + Bank: &types.BankMsg{Burn: &types.BurnMsg{Amount: types.Array[types.Coin]{types.Coin{Amount: tc.CoinToBurn.Amount.String(), Denom: tc.CoinToBurn.Denom}}}}, + } + + // Dispatch Burn message + _, err = suite.executeMsg(suite.contractAddress, msg) + suite.NoError(err) + + suite.Require().Equal(balanceBeforeBurn.Sub(tc.CoinToBurn), bankKeeper.GetBalance(suite.ctx, suite.contractAddress, tc.CoinToBurn.Denom)) + }) + } +} + func (suite *CustomMessengerTestSuite) TestResubmitFailureAck() { // Add failure packet := ibcchanneltypes.Packet{} @@ -804,6 +911,10 @@ func (suite *CustomMessengerTestSuite) executeCustomMsg(contractAddress sdk.AccA Custom: fullMsg, } + return suite.executeMsg(contractAddress, customMsg) +} + +func (suite *CustomMessengerTestSuite) executeMsg(contractAddress sdk.AccAddress, fullMsg types.CosmosMsg) (data []byte, err error) { type ExecuteMsg struct { ReflectMsg struct { Msgs []types.CosmosMsg `json:"msgs"` @@ -812,7 +923,7 @@ func (suite *CustomMessengerTestSuite) executeCustomMsg(contractAddress sdk.AccA execMsg := ExecuteMsg{ReflectMsg: struct { Msgs []types.CosmosMsg `json:"msgs"` - }(struct{ Msgs []types.CosmosMsg }{Msgs: []types.CosmosMsg{customMsg}})} + }(struct{ Msgs []types.CosmosMsg }{Msgs: []types.CosmosMsg{fullMsg}})} msg, err := json.Marshal(execMsg) suite.NoError(err) diff --git a/x/cron/genesis.go b/x/cron/genesis.go index 974790399..525840e26 100644 --- a/x/cron/genesis.go +++ b/x/cron/genesis.go @@ -11,7 +11,7 @@ import ( func InitGenesis(ctx sdk.Context, k keeper.Keeper, genState types.GenesisState) { // Set all the schedules for _, elem := range genState.ScheduleList { - err := k.AddSchedule(ctx, elem.Name, elem.Period, elem.Msgs) + err := k.AddSchedule(ctx, elem.Name, elem.Period, elem.Msgs, elem.ExecutionStage) if err != nil { panic(err) } diff --git a/x/cron/keeper/grpc_query_schedule_test.go b/x/cron/keeper/grpc_query_schedule_test.go index b778f8fba..a132fc8d4 100644 --- a/x/cron/keeper/grpc_query_schedule_test.go +++ b/x/cron/keeper/grpc_query_schedule_test.go @@ -133,8 +133,9 @@ func createNSchedule(t *testing.T, ctx sdk.Context, k *cronkeeper.Keeper, n int3 item.Period = 1000 item.Msgs = nil item.LastExecuteHeight = uint64(ctx.BlockHeight()) + item.ExecutionStage = types.ExecutionStage_EXECUTION_STAGE_END_BLOCKER - err := k.AddSchedule(ctx, item.Name, item.Period, item.Msgs) + err := k.AddSchedule(ctx, item.Name, item.Period, item.Msgs, item.ExecutionStage) require.NoError(t, err) res[idx] = item diff --git a/x/cron/keeper/keeper.go b/x/cron/keeper/keeper.go index 2be2f49ee..070f18b2d 100644 --- a/x/cron/keeper/keeper.go +++ b/x/cron/keeper/keeper.go @@ -66,10 +66,9 @@ func (k *Keeper) Logger(ctx sdk.Context) log.Logger { // ExecuteReadySchedules gets all schedules that are due for execution (with limit that is equal to Params.Limit) // and executes messages in each one -// NOTE that errors in contract calls rollback all already executed messages -func (k *Keeper) ExecuteReadySchedules(ctx sdk.Context) { +func (k *Keeper) ExecuteReadySchedules(ctx sdk.Context, executionStage types.ExecutionStage) { telemetry.ModuleMeasureSince(types.ModuleName, time.Now(), LabelExecuteReadySchedules) - schedules := k.getSchedulesReadyForExecution(ctx) + schedules := k.getSchedulesReadyForExecution(ctx, executionStage) for _, schedule := range schedules { err := k.executeSchedule(ctx, schedule) @@ -77,9 +76,15 @@ func (k *Keeper) ExecuteReadySchedules(ctx sdk.Context) { } } -// AddSchedule adds new schedule to execution for every block `period`. +// AddSchedule adds a new schedule to be executed every certain number of blocks, specified in the `period`. // First schedule execution is supposed to be on `now + period` block. -func (k *Keeper) AddSchedule(ctx sdk.Context, name string, period uint64, msgs []types.MsgExecuteContract) error { +func (k *Keeper) AddSchedule( + ctx sdk.Context, + name string, + period uint64, + msgs []types.MsgExecuteContract, + executionStage types.ExecutionStage, +) error { if k.scheduleExists(ctx, name) { return fmt.Errorf("schedule already exists with name=%v", name) } @@ -89,7 +94,9 @@ func (k *Keeper) AddSchedule(ctx sdk.Context, name string, period uint64, msgs [ Period: period, Msgs: msgs, LastExecuteHeight: uint64(ctx.BlockHeight()), // let's execute newly added schedule on `now + period` block + ExecutionStage: executionStage, } + k.storeSchedule(ctx, schedule) k.changeTotalCount(ctx, 1) @@ -141,7 +148,7 @@ func (k *Keeper) GetScheduleCount(ctx sdk.Context) int32 { return k.getScheduleCount(ctx) } -func (k *Keeper) getSchedulesReadyForExecution(ctx sdk.Context) []types.Schedule { +func (k *Keeper) getSchedulesReadyForExecution(ctx sdk.Context, executionStage types.ExecutionStage) []types.Schedule { params := k.GetParams(ctx) store := prefix.NewStore(ctx.KVStore(k.storeKey), types.ScheduleKey) count := uint64(0) @@ -155,7 +162,7 @@ func (k *Keeper) getSchedulesReadyForExecution(ctx sdk.Context) []types.Schedule var schedule types.Schedule k.cdc.MustUnmarshal(iterator.Value(), &schedule) - if k.intervalPassed(ctx, schedule) { + if k.intervalPassed(ctx, schedule) && schedule.ExecutionStage == executionStage { res = append(res, schedule) count++ diff --git a/x/cron/keeper/keeper_test.go b/x/cron/keeper/keeper_test.go index 8abe2060c..d58553c56 100644 --- a/x/cron/keeper/keeper_test.go +++ b/x/cron/keeper/keeper_test.go @@ -44,7 +44,7 @@ func TestKeeperExecuteReadySchedules(t *testing.T) { schedules := []types.Schedule{ { Name: "1_unready1", - Period: 3, + Period: 10, Msgs: []types.MsgExecuteContract{ { Contract: "1_neutron", @@ -52,10 +52,11 @@ func TestKeeperExecuteReadySchedules(t *testing.T) { }, }, LastExecuteHeight: 4, + ExecutionStage: types.ExecutionStage_EXECUTION_STAGE_BEGIN_BLOCKER, }, { Name: "2_ready1", - Period: 3, + Period: 4, Msgs: []types.MsgExecuteContract{ { Contract: "2_neutron", @@ -63,10 +64,11 @@ func TestKeeperExecuteReadySchedules(t *testing.T) { }, }, LastExecuteHeight: 0, + ExecutionStage: types.ExecutionStage_EXECUTION_STAGE_BEGIN_BLOCKER, }, { Name: "3_ready2", - Period: 3, + Period: 4, Msgs: []types.MsgExecuteContract{ { Contract: "3_neutron", @@ -74,12 +76,14 @@ func TestKeeperExecuteReadySchedules(t *testing.T) { }, }, LastExecuteHeight: 0, + ExecutionStage: types.ExecutionStage_EXECUTION_STAGE_BEGIN_BLOCKER, }, { Name: "4_unready2", - Period: 3, + Period: 10, Msgs: []types.MsgExecuteContract{}, LastExecuteHeight: 4, + ExecutionStage: types.ExecutionStage_EXECUTION_STAGE_BEGIN_BLOCKER, }, { Name: "5_ready3", @@ -91,22 +95,34 @@ func TestKeeperExecuteReadySchedules(t *testing.T) { }, }, LastExecuteHeight: 0, + ExecutionStage: types.ExecutionStage_EXECUTION_STAGE_BEGIN_BLOCKER, + }, + { + Name: "6_ready4", + Period: 3, + Msgs: []types.MsgExecuteContract{ + { + Contract: "6_neutron", + Msg: "6_msg", + }, + }, + LastExecuteHeight: 0, + ExecutionStage: types.ExecutionStage_EXECUTION_STAGE_END_BLOCKER, }, } for _, item := range schedules { ctx = ctx.WithBlockHeight(int64(item.LastExecuteHeight)) - err := k.AddSchedule(ctx, item.Name, item.Period, item.Msgs) + err := k.AddSchedule(ctx, item.Name, item.Period, item.Msgs, item.ExecutionStage) require.NoError(t, err) } count := k.GetScheduleCount(ctx) - require.Equal(t, count, int32(5)) + require.Equal(t, count, int32(6)) ctx = ctx.WithBlockHeight(5) - accountKeeper.EXPECT().GetModuleAddress(types.ModuleName).Return(addr) - accountKeeper.EXPECT().GetModuleAddress(types.ModuleName).Return(addr) + accountKeeper.EXPECT().GetModuleAddress(types.ModuleName).Return(addr).AnyTimes() wasmMsgServer.EXPECT().ExecuteContract(gomock.Any(), &wasmtypes.MsgExecuteContract{ Sender: testutil.TestOwnerAddress, Contract: "2_neutron", @@ -120,25 +136,26 @@ func TestKeeperExecuteReadySchedules(t *testing.T) { Funds: sdk.NewCoins(), }).Return(&wasmtypes.MsgExecuteContractResponse{}, nil) - k.ExecuteReadySchedules(ctx) + k.ExecuteReadySchedules(ctx, types.ExecutionStage_EXECUTION_STAGE_BEGIN_BLOCKER) unready1, _ := k.GetSchedule(ctx, "1_unready1") ready1, _ := k.GetSchedule(ctx, "2_ready1") ready2, _ := k.GetSchedule(ctx, "3_ready2") unready2, _ := k.GetSchedule(ctx, "4_unready2") ready3, _ := k.GetSchedule(ctx, "5_ready3") + ready4, _ := k.GetSchedule(ctx, "6_ready4") require.Equal(t, uint64(4), unready1.LastExecuteHeight) require.Equal(t, uint64(5), ready1.LastExecuteHeight) require.Equal(t, uint64(5), ready2.LastExecuteHeight) require.Equal(t, uint64(4), unready2.LastExecuteHeight) require.Equal(t, uint64(0), ready3.LastExecuteHeight) + require.Equal(t, uint64(0), ready4.LastExecuteHeight) // let's make another call at the next height // Notice that now only one ready schedule left because we got limit of 2 at once ctx = ctx.WithBlockHeight(6) - accountKeeper.EXPECT().GetModuleAddress(types.ModuleName).Return(addr) wasmMsgServer.EXPECT().ExecuteContract(gomock.Any(), &wasmtypes.MsgExecuteContract{ Sender: testutil.TestOwnerAddress, Contract: "5_neutron", @@ -146,19 +163,46 @@ func TestKeeperExecuteReadySchedules(t *testing.T) { Funds: sdk.NewCoins(), }).Return(&wasmtypes.MsgExecuteContractResponse{}, nil) - k.ExecuteReadySchedules(ctx) + k.ExecuteReadySchedules(ctx, types.ExecutionStage_EXECUTION_STAGE_BEGIN_BLOCKER) unready1, _ = k.GetSchedule(ctx, "1_unready1") ready1, _ = k.GetSchedule(ctx, "2_ready1") ready2, _ = k.GetSchedule(ctx, "3_ready2") unready2, _ = k.GetSchedule(ctx, "4_unready2") ready3, _ = k.GetSchedule(ctx, "5_ready3") + ready4, _ = k.GetSchedule(ctx, "6_ready4") require.Equal(t, uint64(4), unready1.LastExecuteHeight) require.Equal(t, uint64(5), ready1.LastExecuteHeight) require.Equal(t, uint64(5), ready2.LastExecuteHeight) require.Equal(t, uint64(4), unready2.LastExecuteHeight) require.Equal(t, uint64(6), ready3.LastExecuteHeight) + require.Equal(t, uint64(0), ready4.LastExecuteHeight) + + ctx = ctx.WithBlockHeight(7) + + wasmMsgServer.EXPECT().ExecuteContract(gomock.Any(), &wasmtypes.MsgExecuteContract{ + Sender: testutil.TestOwnerAddress, + Contract: "6_neutron", + Msg: []byte("6_msg"), + Funds: sdk.NewCoins(), + }).Return(&wasmtypes.MsgExecuteContractResponse{}, nil) + + k.ExecuteReadySchedules(ctx, types.ExecutionStage_EXECUTION_STAGE_END_BLOCKER) + + unready1, _ = k.GetSchedule(ctx, "1_unready1") + ready1, _ = k.GetSchedule(ctx, "2_ready1") + ready2, _ = k.GetSchedule(ctx, "3_ready2") + unready2, _ = k.GetSchedule(ctx, "4_unready2") + ready3, _ = k.GetSchedule(ctx, "5_ready3") + ready4, _ = k.GetSchedule(ctx, "6_ready4") + + require.Equal(t, uint64(4), unready1.LastExecuteHeight) + require.Equal(t, uint64(5), ready1.LastExecuteHeight) + require.Equal(t, uint64(5), ready2.LastExecuteHeight) + require.Equal(t, uint64(4), unready2.LastExecuteHeight) + require.Equal(t, uint64(6), ready3.LastExecuteHeight) + require.Equal(t, uint64(7), ready4.LastExecuteHeight) } func TestAddSchedule(t *testing.T) { @@ -183,11 +227,19 @@ func TestAddSchedule(t *testing.T) { Contract: "c", Msg: "m", }, - }) + }, types.ExecutionStage_EXECUTION_STAGE_BEGIN_BLOCKER) + require.NoError(t, err) + + err = k.AddSchedule(ctx, "b", 7, []types.MsgExecuteContract{ + { + Contract: "c", + Msg: "m", + }, + }, types.ExecutionStage_EXECUTION_STAGE_END_BLOCKER) require.NoError(t, err) // second time with same name returns error - err = k.AddSchedule(ctx, "a", 5, []types.MsgExecuteContract{}) + err = k.AddSchedule(ctx, "a", 5, []types.MsgExecuteContract{}, types.ExecutionStage_EXECUTION_STAGE_END_BLOCKER) require.Error(t, err) scheduleA, found := k.GetSchedule(ctx, "a") @@ -197,6 +249,12 @@ func TestAddSchedule(t *testing.T) { require.Equal(t, scheduleA.Msgs, []types.MsgExecuteContract{ {Contract: "c", Msg: "m"}, }) + require.Equal(t, scheduleA.ExecutionStage, types.ExecutionStage_EXECUTION_STAGE_BEGIN_BLOCKER) + + schedules := k.GetAllSchedules(ctx) + require.Len(t, schedules, 2) + require.Equal(t, schedules[0].ExecutionStage, types.ExecutionStage_EXECUTION_STAGE_BEGIN_BLOCKER) + require.Equal(t, schedules[1].ExecutionStage, types.ExecutionStage_EXECUTION_STAGE_END_BLOCKER) // remove schedule works k.RemoveSchedule(ctx, "a") @@ -223,9 +281,10 @@ func TestGetAllSchedules(t *testing.T) { Period: 5, Msgs: nil, LastExecuteHeight: uint64(ctx.BlockHeight()), + ExecutionStage: types.ExecutionStage_EXECUTION_STAGE_END_BLOCKER, } expectedSchedules = append(expectedSchedules, s) - err := k.AddSchedule(ctx, s.Name, s.Period, s.Msgs) + err := k.AddSchedule(ctx, s.Name, s.Period, s.Msgs, s.ExecutionStage) require.NoError(t, err) } diff --git a/x/cron/keeper/migrations.go b/x/cron/keeper/migrations.go new file mode 100644 index 000000000..0166a8da9 --- /dev/null +++ b/x/cron/keeper/migrations.go @@ -0,0 +1,22 @@ +package keeper + +import ( + sdk "github.com/cosmos/cosmos-sdk/types" + + v2 "github.com/neutron-org/neutron/v4/x/cron/migrations/v2" +) + +// Migrator is a struct for handling in-place store migrations. +type Migrator struct { + keeper Keeper +} + +// NewMigrator returns a new Migrator. +func NewMigrator(keeper Keeper) Migrator { + return Migrator{keeper: keeper} +} + +// Migrate1to2 migrates from version 1 to 2. +func (m Migrator) Migrate1to2(ctx sdk.Context) error { + return v2.MigrateStore(ctx, m.keeper.cdc, m.keeper.storeKey) +} diff --git a/x/cron/keeper/msg_server.go b/x/cron/keeper/msg_server.go index e49e578ab..c42a0d1c4 100644 --- a/x/cron/keeper/msg_server.go +++ b/x/cron/keeper/msg_server.go @@ -11,30 +11,66 @@ import ( ) type msgServer struct { - Keeper + keeper Keeper } // NewMsgServerImpl returns an implementation of the MsgServer interface // for the provided Keeper. func NewMsgServerImpl(keeper Keeper) types.MsgServer { - return &msgServer{Keeper: keeper} + return &msgServer{keeper: keeper} } var _ types.MsgServer = msgServer{} +// AddSchedule adds new schedule +func (k msgServer) AddSchedule(goCtx context.Context, req *types.MsgAddSchedule) (*types.MsgAddScheduleResponse, error) { + if err := req.Validate(); err != nil { + return nil, errors.Wrap(err, "failed to validate MsgAddSchedule") + } + + authority := k.keeper.GetAuthority() + if authority != req.Authority { + return nil, errors.Wrapf(sdkerrors.ErrInvalidRequest, "invalid authority; expected %s, got %s", authority, req.Authority) + } + + ctx := sdk.UnwrapSDKContext(goCtx) + if err := k.keeper.AddSchedule(ctx, req.Name, req.Period, req.Msgs, req.ExecutionStage); err != nil { + return nil, errors.Wrap(err, "failed to add schedule") + } + + return &types.MsgAddScheduleResponse{}, nil +} + +// RemoveSchedule removes schedule +func (k msgServer) RemoveSchedule(goCtx context.Context, req *types.MsgRemoveSchedule) (*types.MsgRemoveScheduleResponse, error) { + if err := req.Validate(); err != nil { + return nil, errors.Wrap(err, "failed to validate MsgRemoveSchedule") + } + + authority := k.keeper.GetAuthority() + if authority != req.Authority { + return nil, errors.Wrapf(sdkerrors.ErrInvalidRequest, "invalid authority; expected %s, got %s", authority, req.Authority) + } + + ctx := sdk.UnwrapSDKContext(goCtx) + k.keeper.RemoveSchedule(ctx, req.Name) + + return &types.MsgRemoveScheduleResponse{}, nil +} + // UpdateParams updates the module parameters -func (k Keeper) UpdateParams(goCtx context.Context, req *types.MsgUpdateParams) (*types.MsgUpdateParamsResponse, error) { +func (k msgServer) UpdateParams(goCtx context.Context, req *types.MsgUpdateParams) (*types.MsgUpdateParamsResponse, error) { if err := req.Validate(); err != nil { return nil, errors.Wrap(err, "failed to validate MsgUpdateParams") } - authority := k.GetAuthority() + authority := k.keeper.GetAuthority() if authority != req.Authority { return nil, errors.Wrapf(sdkerrors.ErrInvalidRequest, "invalid authority; expected %s, got %s", authority, req.Authority) } ctx := sdk.UnwrapSDKContext(goCtx) - if err := k.SetParams(ctx, req.Params); err != nil { + if err := k.keeper.SetParams(ctx, req.Params); err != nil { return nil, err } diff --git a/x/cron/keeper/msg_server_test.go b/x/cron/keeper/msg_server_test.go index 17a3655ac..ef10b1ffc 100644 --- a/x/cron/keeper/msg_server_test.go +++ b/x/cron/keeper/msg_server_test.go @@ -6,12 +6,171 @@ import ( "github.com/stretchr/testify/require" "github.com/neutron-org/neutron/v4/testutil" - "github.com/neutron-org/neutron/v4/testutil/cron/keeper" + testkeeper "github.com/neutron-org/neutron/v4/testutil/cron/keeper" + cronkeeper "github.com/neutron-org/neutron/v4/x/cron/keeper" "github.com/neutron-org/neutron/v4/x/cron/types" ) +func TestMsgAddScheduleValidate(t *testing.T) { + k, ctx := testkeeper.CronKeeper(t, nil, nil) + msgServer := cronkeeper.NewMsgServerImpl(*k) + + tests := []struct { + name string + msg types.MsgAddSchedule + expectedErr string + }{ + { + "empty authority", + types.MsgAddSchedule{ + Authority: "", + Name: "name", + Period: 3, + Msgs: []types.MsgExecuteContract{ + { + Contract: "contract", + Msg: "msg", + }, + }, + ExecutionStage: types.ExecutionStage_EXECUTION_STAGE_BEGIN_BLOCKER, + }, + "authority is invalid", + }, + { + "invalid authority", + types.MsgAddSchedule{ + Authority: "invalid authority", + Name: "name", + Period: 3, + Msgs: []types.MsgExecuteContract{ + { + Contract: "contract", + Msg: "msg", + }, + }, + ExecutionStage: types.ExecutionStage_EXECUTION_STAGE_BEGIN_BLOCKER, + }, + "authority is invalid", + }, + { + "invalid name", + types.MsgAddSchedule{ + Authority: testutil.TestOwnerAddress, + Name: "", + Period: 3, + Msgs: []types.MsgExecuteContract{ + { + Contract: "contract", + Msg: "msg", + }, + }, + ExecutionStage: types.ExecutionStage_EXECUTION_STAGE_BEGIN_BLOCKER, + }, + "name is invalid", + }, + { + "invalid period", + types.MsgAddSchedule{ + Authority: testutil.TestOwnerAddress, + Name: "name", + Period: 0, + Msgs: []types.MsgExecuteContract{ + { + Contract: "contract", + Msg: "msg", + }, + }, + ExecutionStage: types.ExecutionStage_EXECUTION_STAGE_BEGIN_BLOCKER, + }, + "period is invalid", + }, + { + "empty msgs", + types.MsgAddSchedule{ + Authority: testutil.TestOwnerAddress, + Name: "name", + Period: 3, + Msgs: []types.MsgExecuteContract{}, + ExecutionStage: types.ExecutionStage_EXECUTION_STAGE_BEGIN_BLOCKER, + }, + "msgs should not be empty", + }, + { + "invalid execution stage", + types.MsgAddSchedule{ + Authority: testutil.TestOwnerAddress, + Name: "name", + Period: 3, + Msgs: []types.MsgExecuteContract{ + { + Contract: "contract", + Msg: "msg", + }, + }, + ExecutionStage: 7, + }, + "execution stage is invalid", + }, + } + + for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { + resp, err := msgServer.AddSchedule(ctx, &tt.msg) + require.ErrorContains(t, err, tt.expectedErr) + require.Nil(t, resp) + }) + } +} + +func TestMsgRemoveScheduleValidate(t *testing.T) { + k, ctx := testkeeper.CronKeeper(t, nil, nil) + msgServer := cronkeeper.NewMsgServerImpl(*k) + + tests := []struct { + name string + msg types.MsgRemoveSchedule + expectedErr string + }{ + { + "empty authority", + types.MsgRemoveSchedule{ + Authority: "", + Name: "name", + }, + "authority is invalid", + }, + { + "invalid authority", + types.MsgRemoveSchedule{ + Authority: "invalid authority", + Name: "name", + }, + "authority is invalid", + }, + { + "invalid name", + types.MsgRemoveSchedule{ + Authority: testutil.TestOwnerAddress, + Name: "", + }, + "name is invalid", + }, + } + + for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { + resp, err := msgServer.RemoveSchedule(ctx, &tt.msg) + require.ErrorContains(t, err, tt.expectedErr) + require.Nil(t, resp) + }) + } +} + func TestMsgUpdateParamsValidate(t *testing.T) { - k, ctx := keeper.CronKeeper(t, nil, nil) + k, ctx := testkeeper.CronKeeper(t, nil, nil) + msgServer := cronkeeper.NewMsgServerImpl(*k) tests := []struct { name string @@ -57,7 +216,7 @@ func TestMsgUpdateParamsValidate(t *testing.T) { for _, tt := range tests { tt := tt t.Run(tt.name, func(t *testing.T) { - resp, err := k.UpdateParams(ctx, &tt.msg) + resp, err := msgServer.UpdateParams(ctx, &tt.msg) require.ErrorContains(t, err, tt.expectedErr) require.Nil(t, resp) }) diff --git a/x/cron/migrations/v2/store.go b/x/cron/migrations/v2/store.go new file mode 100644 index 000000000..f3311aba0 --- /dev/null +++ b/x/cron/migrations/v2/store.go @@ -0,0 +1,56 @@ +package v2 + +import ( + "cosmossdk.io/errors" + "cosmossdk.io/store/prefix" + storetypes "cosmossdk.io/store/types" + "github.com/cosmos/cosmos-sdk/codec" + sdk "github.com/cosmos/cosmos-sdk/types" + + "github.com/neutron-org/neutron/v4/x/cron/types" +) + +// MigrateStore performs in-place store migrations. +// The migration adds execution stage for schedules. +func MigrateStore(ctx sdk.Context, cdc codec.BinaryCodec, storeKey storetypes.StoreKey) error { + return migrateSchedules(ctx, cdc, storeKey) +} + +type migrationUpdate struct { + key []byte + val []byte +} + +func migrateSchedules(ctx sdk.Context, cdc codec.BinaryCodec, storeKey storetypes.StoreKey) error { + ctx.Logger().Info("Migrating cron Schedules...") + + store := prefix.NewStore(ctx.KVStore(storeKey), types.ScheduleKey) + iterator := storetypes.KVStorePrefixIterator(store, []byte{}) + schedulesToUpdate := make([]migrationUpdate, 0) + + for ; iterator.Valid(); iterator.Next() { + var schedule types.Schedule + cdc.MustUnmarshal(iterator.Value(), &schedule) + // Set execution in EndBlocker + schedule.ExecutionStage = types.ExecutionStage_EXECUTION_STAGE_END_BLOCKER + + schedulesToUpdate = append(schedulesToUpdate, migrationUpdate{ + key: iterator.Key(), + val: cdc.MustMarshal(&schedule), + }) + } + + err := iterator.Close() + if err != nil { + return errors.Wrap(err, "iterator failed to close during migration") + } + + // Store the updated Schedules + for _, v := range schedulesToUpdate { + store.Set(v.key, v.val) + } + + ctx.Logger().Info("Finished migrating cron Schedules...") + + return nil +} diff --git a/x/cron/migrations/v2/store_test.go b/x/cron/migrations/v2/store_test.go new file mode 100644 index 000000000..6b6f46355 --- /dev/null +++ b/x/cron/migrations/v2/store_test.go @@ -0,0 +1,60 @@ +package v2_test + +import ( + "testing" + + "cosmossdk.io/store/prefix" + "github.com/stretchr/testify/suite" + + "github.com/neutron-org/neutron/v4/testutil" + v2 "github.com/neutron-org/neutron/v4/x/cron/migrations/v2" + "github.com/neutron-org/neutron/v4/x/cron/types" + v1types "github.com/neutron-org/neutron/v4/x/cron/types/v1" +) + +type V2CronMigrationTestSuite struct { + testutil.IBCConnectionTestSuite +} + +func TestKeeperTestSuite(t *testing.T) { + suite.Run(t, new(V2CronMigrationTestSuite)) +} + +func (suite *V2CronMigrationTestSuite) TestScheduleUpgrade() { + var ( + app = suite.GetNeutronZoneApp(suite.ChainA) + storeKey = app.GetKey(types.StoreKey) + ctx = suite.ChainA.GetContext() + cdc = app.AppCodec() + ) + + schedule := v1types.Schedule{ + Name: "name", + Period: 3, + Msgs: []v1types.MsgExecuteContract{ + { + Contract: "contract", + Msg: "msg", + }, + }, + LastExecuteHeight: 1, + } + + store := prefix.NewStore(ctx.KVStore(storeKey), types.ScheduleKey) + bz := cdc.MustMarshal(&schedule) + store.Set(types.GetScheduleKey(schedule.Name), bz) + + // Run migration + suite.NoError(v2.MigrateStore(ctx, cdc, storeKey)) + + // Check Schedule has correct ExecutionStage + newSchedule, _ := app.CronKeeper.GetSchedule(ctx, schedule.Name) + suite.Equal(newSchedule.Name, schedule.Name) + suite.Equal(newSchedule.Period, schedule.Period) + for i, msg := range newSchedule.Msgs { + suite.Equal(msg.Contract, schedule.Msgs[i].Contract) + suite.Equal(msg.Msg, schedule.Msgs[i].Msg) + } + suite.Equal(newSchedule.LastExecuteHeight, schedule.LastExecuteHeight) + suite.Equal(newSchedule.ExecutionStage, types.ExecutionStage_EXECUTION_STAGE_END_BLOCKER) +} diff --git a/x/cron/module.go b/x/cron/module.go index 60dc09b91..f9957651e 100644 --- a/x/cron/module.go +++ b/x/cron/module.go @@ -26,8 +26,10 @@ import ( ) var ( - _ appmodule.AppModule = AppModule{} - _ module.AppModuleBasic = AppModuleBasic{} + _ appmodule.AppModule = AppModule{} + _ module.AppModuleBasic = AppModuleBasic{} + _ appmodule.HasBeginBlocker = AppModule{} + _ appmodule.HasEndBlocker = AppModule{} ) // ---------------------------------------------------------------------------- @@ -155,10 +157,13 @@ func (am AppModule) ExportGenesis(ctx sdk.Context, cdc codec.JSONCodec) json.Raw func (AppModule) ConsensusVersion() uint64 { return types.ConsensusVersion } // BeginBlock contains the logic that is automatically triggered at the beginning of each block -func (am AppModule) BeginBlock(_ sdk.Context) {} +func (am AppModule) BeginBlock(ctx context.Context) error { + am.keeper.ExecuteReadySchedules(sdk.UnwrapSDKContext(ctx), types.ExecutionStage_EXECUTION_STAGE_BEGIN_BLOCKER) + return nil +} // EndBlock contains the logic that is automatically triggered at the end of each block -func (am AppModule) EndBlock(ctx context.Context) ([]abci.ValidatorUpdate, error) { - am.keeper.ExecuteReadySchedules(sdk.UnwrapSDKContext(ctx)) - return []abci.ValidatorUpdate{}, nil +func (am AppModule) EndBlock(ctx context.Context) error { + am.keeper.ExecuteReadySchedules(sdk.UnwrapSDKContext(ctx), types.ExecutionStage_EXECUTION_STAGE_END_BLOCKER) + return nil } diff --git a/x/cron/types/codec.go b/x/cron/types/codec.go index a52eff2c6..6772e97b9 100644 --- a/x/cron/types/codec.go +++ b/x/cron/types/codec.go @@ -15,6 +15,8 @@ func RegisterInterfaces(registry cdctypes.InterfaceRegistry) { registry.RegisterImplementations( (*sdk.Msg)(nil), &MsgUpdateParams{}, + &MsgAddSchedule{}, + &MsgRemoveSchedule{}, ) msgservice.RegisterMsgServiceDesc(registry, &_Msg_serviceDesc) } diff --git a/x/cron/types/genesis.pb.go b/x/cron/types/genesis.pb.go index 2d3546994..274598ad7 100644 --- a/x/cron/types/genesis.pb.go +++ b/x/cron/types/genesis.pb.go @@ -23,7 +23,7 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package -// GenesisState defines the cron module's genesis state. +// Defines the cron module's genesis state. type GenesisState struct { ScheduleList []Schedule `protobuf:"bytes,2,rep,name=scheduleList,proto3" json:"scheduleList"` Params Params `protobuf:"bytes,1,opt,name=params,proto3" json:"params"` diff --git a/x/cron/types/params.pb.go b/x/cron/types/params.pb.go index c71904ce8..e927ec42a 100644 --- a/x/cron/types/params.pb.go +++ b/x/cron/types/params.pb.go @@ -23,7 +23,7 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package -// Params defines the parameters for the module. +// Defines the parameters for the module. type Params struct { // Security address that can remove schedules SecurityAddress string `protobuf:"bytes,1,opt,name=security_address,json=securityAddress,proto3" json:"security_address,omitempty"` diff --git a/x/cron/types/query.pb.go b/x/cron/types/query.pb.go index c34c6c3f9..912a44eaa 100644 --- a/x/cron/types/query.pb.go +++ b/x/cron/types/query.pb.go @@ -30,6 +30,7 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +// The request type for the Query/Params RPC method. type QueryParamsRequest struct { } @@ -66,6 +67,7 @@ func (m *QueryParamsRequest) XXX_DiscardUnknown() { var xxx_messageInfo_QueryParamsRequest proto.InternalMessageInfo +// The response type for the Query/Params RPC method. type QueryParamsResponse struct { // params holds all the parameters of this module. Params Params `protobuf:"bytes,1,opt,name=params,proto3" json:"params"` @@ -111,6 +113,7 @@ func (m *QueryParamsResponse) GetParams() Params { return Params{} } +// The request type for the Query/Schedule RPC method. type QueryGetScheduleRequest struct { Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` } @@ -155,6 +158,7 @@ func (m *QueryGetScheduleRequest) GetName() string { return "" } +// The response type for the Query/Params RPC method. type QueryGetScheduleResponse struct { Schedule Schedule `protobuf:"bytes,1,opt,name=schedule,proto3" json:"schedule"` } @@ -199,6 +203,7 @@ func (m *QueryGetScheduleResponse) GetSchedule() Schedule { return Schedule{} } +// The request type for the Query/Schedules RPC method. type QuerySchedulesRequest struct { Pagination *query.PageRequest `protobuf:"bytes,1,opt,name=pagination,proto3" json:"pagination,omitempty"` } @@ -243,6 +248,7 @@ func (m *QuerySchedulesRequest) GetPagination() *query.PageRequest { return nil } +// The response type for the Query/Params RPC method. type QuerySchedulesResponse struct { Schedules []Schedule `protobuf:"bytes,1,rep,name=schedules,proto3" json:"schedules"` Pagination *query.PageResponse `protobuf:"bytes,2,opt,name=pagination,proto3" json:"pagination,omitempty"` diff --git a/x/cron/types/schedule.pb.go b/x/cron/types/schedule.pb.go index cda29cf72..845a1059a 100644 --- a/x/cron/types/schedule.pb.go +++ b/x/cron/types/schedule.pb.go @@ -23,15 +23,46 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +// Defines when messages will be executed in the block +type ExecutionStage int32 + +const ( + // Execution at the end of the block + ExecutionStage_EXECUTION_STAGE_END_BLOCKER ExecutionStage = 0 + // Execution at the beginning of the block + ExecutionStage_EXECUTION_STAGE_BEGIN_BLOCKER ExecutionStage = 1 +) + +var ExecutionStage_name = map[int32]string{ + 0: "EXECUTION_STAGE_END_BLOCKER", + 1: "EXECUTION_STAGE_BEGIN_BLOCKER", +} + +var ExecutionStage_value = map[string]int32{ + "EXECUTION_STAGE_END_BLOCKER": 0, + "EXECUTION_STAGE_BEGIN_BLOCKER": 1, +} + +func (x ExecutionStage) String() string { + return proto.EnumName(ExecutionStage_name, int32(x)) +} + +func (ExecutionStage) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_49ace1b59de613ef, []int{0} +} + +// Defines the schedule for execution type Schedule struct { // Name of schedule Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` // Period in blocks Period uint64 `protobuf:"varint,2,opt,name=period,proto3" json:"period,omitempty"` - // Msgs that will be executed every period amount of time + // Msgs that will be executed every certain number of blocks, specified in the `period` field Msgs []MsgExecuteContract `protobuf:"bytes,3,rep,name=msgs,proto3" json:"msgs"` // Last execution's block height LastExecuteHeight uint64 `protobuf:"varint,4,opt,name=last_execute_height,json=lastExecuteHeight,proto3" json:"last_execute_height,omitempty"` + // Stage when messages will be executed + ExecutionStage ExecutionStage `protobuf:"varint,5,opt,name=execution_stage,json=executionStage,proto3,enum=neutron.cron.ExecutionStage" json:"execution_stage,omitempty"` } func (m *Schedule) Reset() { *m = Schedule{} } @@ -95,10 +126,18 @@ func (m *Schedule) GetLastExecuteHeight() uint64 { return 0 } +func (m *Schedule) GetExecutionStage() ExecutionStage { + if m != nil { + return m.ExecutionStage + } + return ExecutionStage_EXECUTION_STAGE_END_BLOCKER +} + +// Defines the contract and the message to pass type MsgExecuteContract struct { - // Contract is the address of the smart contract + // The address of the smart contract Contract string `protobuf:"bytes,1,opt,name=contract,proto3" json:"contract,omitempty"` - // Msg is json encoded message to be passed to the contract + // JSON encoded message to be passed to the contract Msg string `protobuf:"bytes,2,opt,name=msg,proto3" json:"msg,omitempty"` } @@ -149,8 +188,9 @@ func (m *MsgExecuteContract) GetMsg() string { return "" } +// Defines the number of current schedules type ScheduleCount struct { - // Count is the number of current schedules + // The number of current schedules Count int32 `protobuf:"varint,1,opt,name=count,proto3" json:"count,omitempty"` } @@ -195,6 +235,7 @@ func (m *ScheduleCount) GetCount() int32 { } func init() { + proto.RegisterEnum("neutron.cron.ExecutionStage", ExecutionStage_name, ExecutionStage_value) proto.RegisterType((*Schedule)(nil), "neutron.cron.Schedule") proto.RegisterType((*MsgExecuteContract)(nil), "neutron.cron.MsgExecuteContract") proto.RegisterType((*ScheduleCount)(nil), "neutron.cron.ScheduleCount") @@ -203,27 +244,32 @@ func init() { func init() { proto.RegisterFile("neutron/cron/schedule.proto", fileDescriptor_49ace1b59de613ef) } var fileDescriptor_49ace1b59de613ef = []byte{ - // 309 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x64, 0x91, 0x41, 0x4b, 0xfb, 0x30, - 0x18, 0xc6, 0x9b, 0xff, 0xba, 0xb1, 0xe5, 0xaf, 0xa0, 0x71, 0x48, 0x99, 0x10, 0x4b, 0x41, 0xe8, - 0xc5, 0x14, 0xd4, 0x93, 0xc7, 0x0e, 0x61, 0x17, 0x2f, 0xf5, 0xe6, 0x65, 0x74, 0x59, 0x48, 0x0b, - 0x6b, 0x53, 0x9a, 0x54, 0xe6, 0xb7, 0xf0, 0x33, 0xf8, 0x69, 0x76, 0xdc, 0xd1, 0x93, 0x48, 0xfb, - 0x45, 0x24, 0x69, 0x26, 0x82, 0x97, 0xf0, 0x7b, 0x78, 0xde, 0x27, 0xef, 0xfb, 0x26, 0xf0, 0xa2, - 0x64, 0x8d, 0xaa, 0x45, 0x19, 0x51, 0x7d, 0x48, 0x9a, 0xb1, 0x75, 0xb3, 0x61, 0xa4, 0xaa, 0x85, - 0x12, 0xe8, 0xc8, 0x9a, 0x44, 0x9b, 0xb3, 0x29, 0x17, 0x5c, 0x18, 0x23, 0xd2, 0xd4, 0xd7, 0x04, - 0xef, 0x00, 0x8e, 0x9f, 0x6c, 0x0c, 0x21, 0xe8, 0x96, 0x69, 0xc1, 0x3c, 0xe0, 0x83, 0x70, 0x92, - 0x18, 0x46, 0xe7, 0x70, 0x54, 0xb1, 0x3a, 0x17, 0x6b, 0xef, 0x9f, 0x0f, 0x42, 0x37, 0xb1, 0x0a, - 0xdd, 0x43, 0xb7, 0x90, 0x5c, 0x7a, 0x03, 0x7f, 0x10, 0xfe, 0xbf, 0xf1, 0xc9, 0xef, 0x5e, 0xe4, - 0x51, 0xf2, 0x87, 0x2d, 0xa3, 0x8d, 0x62, 0x73, 0x51, 0xaa, 0x3a, 0xa5, 0x2a, 0x76, 0x77, 0x9f, - 0x97, 0x4e, 0x62, 0x32, 0x88, 0xc0, 0xb3, 0x4d, 0x2a, 0xd5, 0x92, 0xf5, 0x35, 0xcb, 0x8c, 0xe5, - 0x3c, 0x53, 0x9e, 0x6b, 0x1a, 0x9c, 0x6a, 0xcb, 0xa6, 0x17, 0xc6, 0x08, 0x62, 0x88, 0xfe, 0xde, - 0x88, 0x66, 0x70, 0x4c, 0x2d, 0xdb, 0x89, 0x7f, 0x34, 0x3a, 0x81, 0x83, 0x42, 0x72, 0x33, 0xf2, - 0x24, 0xd1, 0x18, 0x5c, 0xc1, 0xe3, 0xc3, 0x9e, 0x73, 0xd1, 0x94, 0x0a, 0x4d, 0xe1, 0x90, 0x6a, - 0x30, 0xd9, 0x61, 0xd2, 0x8b, 0x78, 0xb1, 0x6b, 0x31, 0xd8, 0xb7, 0x18, 0x7c, 0xb5, 0x18, 0xbc, - 0x75, 0xd8, 0xd9, 0x77, 0xd8, 0xf9, 0xe8, 0xb0, 0xf3, 0x4c, 0x78, 0xae, 0xb2, 0x66, 0x45, 0xa8, - 0x28, 0x22, 0xbb, 0xec, 0xb5, 0xa8, 0xf9, 0x81, 0xa3, 0x97, 0xbb, 0x68, 0xdb, 0x7f, 0x83, 0x7a, - 0xad, 0x98, 0x5c, 0x8d, 0xcc, 0x03, 0xdf, 0x7e, 0x07, 0x00, 0x00, 0xff, 0xff, 0x4e, 0x19, 0x38, - 0x8d, 0xa3, 0x01, 0x00, 0x00, + // 393 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x64, 0x92, 0xc1, 0xaa, 0xd3, 0x40, + 0x18, 0x85, 0x33, 0x36, 0x2d, 0xed, 0xa8, 0xb5, 0x8e, 0x45, 0x42, 0xab, 0x69, 0x2c, 0x08, 0x41, + 0x30, 0x81, 0xea, 0xca, 0x9d, 0x89, 0x43, 0x5b, 0xd4, 0x16, 0xd2, 0x0a, 0xe2, 0x26, 0xa4, 0xe9, + 0x30, 0x09, 0x34, 0x99, 0x92, 0x99, 0x48, 0x7d, 0x0b, 0x1f, 0xab, 0xcb, 0x2e, 0x5d, 0x89, 0xb4, + 0x2b, 0xdf, 0xe2, 0x92, 0x49, 0x5a, 0x6e, 0xef, 0xdd, 0x84, 0x73, 0x38, 0xdf, 0xe1, 0x9f, 0xf9, + 0x33, 0xb0, 0x9f, 0x92, 0x5c, 0x64, 0x2c, 0xb5, 0xc3, 0xe2, 0xc3, 0xc3, 0x88, 0xac, 0xf3, 0x0d, + 0xb1, 0xb6, 0x19, 0x13, 0x0c, 0x3d, 0xaa, 0x42, 0xab, 0x08, 0x7b, 0x5d, 0xca, 0x28, 0x93, 0x81, + 0x5d, 0xa8, 0x92, 0x19, 0xfe, 0x07, 0xb0, 0xb9, 0xa8, 0x6a, 0x08, 0x41, 0x35, 0x0d, 0x12, 0xa2, + 0x01, 0x03, 0x98, 0x2d, 0x4f, 0x6a, 0xf4, 0x1c, 0x36, 0xb6, 0x24, 0x8b, 0xd9, 0x5a, 0x7b, 0x60, + 0x00, 0x53, 0xf5, 0x2a, 0x87, 0x3e, 0x40, 0x35, 0xe1, 0x94, 0x6b, 0x35, 0xa3, 0x66, 0x3e, 0x1c, + 0x19, 0xd6, 0xed, 0x59, 0xd6, 0x57, 0x4e, 0xf1, 0x8e, 0x84, 0xb9, 0x20, 0x2e, 0x4b, 0x45, 0x16, + 0x84, 0xc2, 0x51, 0xf7, 0x7f, 0x07, 0x8a, 0x27, 0x3b, 0xc8, 0x82, 0xcf, 0x36, 0x01, 0x17, 0x3e, + 0x29, 0x19, 0x3f, 0x22, 0x31, 0x8d, 0x84, 0xa6, 0xca, 0x01, 0x4f, 0x8b, 0xa8, 0x6a, 0x4f, 0x64, + 0x80, 0x30, 0x7c, 0x52, 0xa2, 0x31, 0x4b, 0x7d, 0x2e, 0x02, 0x4a, 0xb4, 0xba, 0x01, 0xcc, 0xf6, + 0xe8, 0xc5, 0xf5, 0x58, 0x7c, 0x86, 0x16, 0x05, 0xe3, 0xb5, 0xc9, 0x95, 0x1f, 0x3a, 0x10, 0xdd, + 0x3f, 0x18, 0xea, 0xc1, 0x66, 0x58, 0xe9, 0xea, 0xe2, 0x17, 0x8f, 0x3a, 0xb0, 0x96, 0x70, 0x2a, + 0x6f, 0xde, 0xf2, 0x0a, 0x39, 0x7c, 0x0d, 0x1f, 0x9f, 0xd7, 0xe5, 0xb2, 0x3c, 0x15, 0xa8, 0x0b, + 0xeb, 0x61, 0x21, 0x64, 0xb7, 0xee, 0x95, 0xe6, 0xcd, 0x12, 0xb6, 0xaf, 0x0f, 0x83, 0x06, 0xb0, + 0x8f, 0xbf, 0x63, 0xf7, 0xdb, 0x72, 0x3a, 0x9f, 0xf9, 0x8b, 0xe5, 0xc7, 0x31, 0xf6, 0xf1, 0xec, + 0x93, 0xef, 0x7c, 0x99, 0xbb, 0x9f, 0xb1, 0xd7, 0x51, 0xd0, 0x2b, 0xf8, 0xf2, 0x2e, 0xe0, 0xe0, + 0xf1, 0x74, 0x76, 0x41, 0x80, 0x33, 0xd9, 0x1f, 0x75, 0x70, 0x38, 0xea, 0xe0, 0xdf, 0x51, 0x07, + 0xbf, 0x4f, 0xba, 0x72, 0x38, 0xe9, 0xca, 0x9f, 0x93, 0xae, 0xfc, 0xb0, 0x68, 0x2c, 0xa2, 0x7c, + 0x65, 0x85, 0x2c, 0xb1, 0xab, 0x95, 0xbc, 0x65, 0x19, 0x3d, 0x6b, 0xfb, 0xe7, 0x7b, 0x7b, 0x57, + 0xbe, 0x11, 0xf1, 0x6b, 0x4b, 0xf8, 0xaa, 0x21, 0xff, 0xfe, 0xbb, 0x9b, 0x00, 0x00, 0x00, 0xff, + 0xff, 0x2e, 0xa9, 0x20, 0xa0, 0x40, 0x02, 0x00, 0x00, } func (m *Schedule) Marshal() (dAtA []byte, err error) { @@ -246,6 +292,11 @@ func (m *Schedule) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.ExecutionStage != 0 { + i = encodeVarintSchedule(dAtA, i, uint64(m.ExecutionStage)) + i-- + dAtA[i] = 0x28 + } if m.LastExecuteHeight != 0 { i = encodeVarintSchedule(dAtA, i, uint64(m.LastExecuteHeight)) i-- @@ -378,6 +429,9 @@ func (m *Schedule) Size() (n int) { if m.LastExecuteHeight != 0 { n += 1 + sovSchedule(uint64(m.LastExecuteHeight)) } + if m.ExecutionStage != 0 { + n += 1 + sovSchedule(uint64(m.ExecutionStage)) + } return n } @@ -549,6 +603,25 @@ func (m *Schedule) Unmarshal(dAtA []byte) error { break } } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ExecutionStage", wireType) + } + m.ExecutionStage = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedule + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ExecutionStage |= ExecutionStage(b&0x7F) << shift + if b < 0x80 { + break + } + } default: iNdEx = preIndex skippy, err := skipSchedule(dAtA[iNdEx:]) diff --git a/x/cron/types/tx.go b/x/cron/types/tx.go index eb48677cb..27b4ceec4 100644 --- a/x/cron/types/tx.go +++ b/x/cron/types/tx.go @@ -1,10 +1,95 @@ package types import ( - errorsmod "cosmossdk.io/errors" + "cosmossdk.io/errors" sdk "github.com/cosmos/cosmos-sdk/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" ) +var _ sdk.Msg = &MsgAddSchedule{} + +func (msg *MsgAddSchedule) Route() string { + return RouterKey +} + +func (msg *MsgAddSchedule) Type() string { + return "add-schedule" +} + +func (msg *MsgAddSchedule) GetSigners() []sdk.AccAddress { + authority, err := sdk.AccAddressFromBech32(msg.Authority) + if err != nil { // should never happen as valid basic rejects invalid addresses + panic(err.Error()) + } + return []sdk.AccAddress{authority} +} + +func (msg *MsgAddSchedule) GetSignBytes() []byte { + return ModuleCdc.MustMarshalJSON(msg) +} + +func (msg *MsgAddSchedule) Validate() error { + if _, err := sdk.AccAddressFromBech32(msg.Authority); err != nil { + return errors.Wrap(err, "authority is invalid") + } + + if msg.Name == "" { + return errors.Wrap(sdkerrors.ErrInvalidRequest, "name is invalid") + } + + if msg.Period == 0 { + return errors.Wrap(sdkerrors.ErrInvalidRequest, "period is invalid") + } + + if len(msg.Msgs) == 0 { + return errors.Wrap(sdkerrors.ErrInvalidRequest, "msgs should not be empty") + } + + if _, ok := ExecutionStage_name[int32(msg.ExecutionStage)]; !ok { + return errors.Wrap(sdkerrors.ErrInvalidRequest, "execution stage is invalid") + } + + return nil +} + +//---------------------------------------------------------------- + +var _ sdk.Msg = &MsgRemoveSchedule{} + +func (msg *MsgRemoveSchedule) Route() string { + return RouterKey +} + +func (msg *MsgRemoveSchedule) Type() string { + return "remove-schedule" +} + +func (msg *MsgRemoveSchedule) GetSigners() []sdk.AccAddress { + authority, err := sdk.AccAddressFromBech32(msg.Authority) + if err != nil { // should never happen as valid basic rejects invalid addresses + panic(err.Error()) + } + return []sdk.AccAddress{authority} +} + +func (msg *MsgRemoveSchedule) GetSignBytes() []byte { + return ModuleCdc.MustMarshalJSON(msg) +} + +func (msg *MsgRemoveSchedule) Validate() error { + if _, err := sdk.AccAddressFromBech32(msg.Authority); err != nil { + return errors.Wrap(err, "authority is invalid") + } + + if msg.Name == "" { + return errors.Wrap(sdkerrors.ErrInvalidRequest, "name is invalid") + } + + return nil +} + +//---------------------------------------------------------------- + var _ sdk.Msg = &MsgUpdateParams{} func (msg *MsgUpdateParams) Route() string { @@ -29,11 +114,11 @@ func (msg *MsgUpdateParams) GetSignBytes() []byte { func (msg *MsgUpdateParams) Validate() error { if _, err := sdk.AccAddressFromBech32(msg.Authority); err != nil { - return errorsmod.Wrap(err, "authority is invalid") + return errors.Wrap(err, "authority is invalid") } if _, err := sdk.AccAddressFromBech32(msg.Params.SecurityAddress); err != nil { - return errorsmod.Wrap(err, "security_address is invalid") + return errors.Wrap(err, "security_address is invalid") } return nil diff --git a/x/cron/types/tx.pb.go b/x/cron/types/tx.pb.go index 217bb879c..141735a4e 100644 --- a/x/cron/types/tx.pb.go +++ b/x/cron/types/tx.pb.go @@ -31,13 +31,224 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package -// MsgUpdateParams is the MsgUpdateParams request type. +// The MsgAddSchedule request type. +type MsgAddSchedule struct { + // The address of the governance account. + Authority string `protobuf:"bytes,1,opt,name=authority,proto3" json:"authority,omitempty"` + // Name of the schedule + Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` + // Period in blocks + Period uint64 `protobuf:"varint,3,opt,name=period,proto3" json:"period,omitempty"` + // Msgs that will be executed every certain number of blocks, specified in the `period` field + Msgs []MsgExecuteContract `protobuf:"bytes,4,rep,name=msgs,proto3" json:"msgs"` + // Stage when messages will be executed + ExecutionStage ExecutionStage `protobuf:"varint,5,opt,name=execution_stage,json=executionStage,proto3,enum=neutron.cron.ExecutionStage" json:"execution_stage,omitempty"` +} + +func (m *MsgAddSchedule) Reset() { *m = MsgAddSchedule{} } +func (m *MsgAddSchedule) String() string { return proto.CompactTextString(m) } +func (*MsgAddSchedule) ProtoMessage() {} +func (*MsgAddSchedule) Descriptor() ([]byte, []int) { + return fileDescriptor_c9e0a673aba8d6fd, []int{0} +} +func (m *MsgAddSchedule) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgAddSchedule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgAddSchedule.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgAddSchedule) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgAddSchedule.Merge(m, src) +} +func (m *MsgAddSchedule) XXX_Size() int { + return m.Size() +} +func (m *MsgAddSchedule) XXX_DiscardUnknown() { + xxx_messageInfo_MsgAddSchedule.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgAddSchedule proto.InternalMessageInfo + +func (m *MsgAddSchedule) GetAuthority() string { + if m != nil { + return m.Authority + } + return "" +} + +func (m *MsgAddSchedule) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *MsgAddSchedule) GetPeriod() uint64 { + if m != nil { + return m.Period + } + return 0 +} + +func (m *MsgAddSchedule) GetMsgs() []MsgExecuteContract { + if m != nil { + return m.Msgs + } + return nil +} + +func (m *MsgAddSchedule) GetExecutionStage() ExecutionStage { + if m != nil { + return m.ExecutionStage + } + return ExecutionStage_EXECUTION_STAGE_END_BLOCKER +} + +// Defines the response structure for executing a MsgAddSchedule message. +type MsgAddScheduleResponse struct { +} + +func (m *MsgAddScheduleResponse) Reset() { *m = MsgAddScheduleResponse{} } +func (m *MsgAddScheduleResponse) String() string { return proto.CompactTextString(m) } +func (*MsgAddScheduleResponse) ProtoMessage() {} +func (*MsgAddScheduleResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_c9e0a673aba8d6fd, []int{1} +} +func (m *MsgAddScheduleResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgAddScheduleResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgAddScheduleResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgAddScheduleResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgAddScheduleResponse.Merge(m, src) +} +func (m *MsgAddScheduleResponse) XXX_Size() int { + return m.Size() +} +func (m *MsgAddScheduleResponse) XXX_DiscardUnknown() { + xxx_messageInfo_MsgAddScheduleResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgAddScheduleResponse proto.InternalMessageInfo + +// The MsgRemoveSchedule request type. +type MsgRemoveSchedule struct { + // The address of the governance account. + Authority string `protobuf:"bytes,1,opt,name=authority,proto3" json:"authority,omitempty"` + // Name of the schedule + Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` +} + +func (m *MsgRemoveSchedule) Reset() { *m = MsgRemoveSchedule{} } +func (m *MsgRemoveSchedule) String() string { return proto.CompactTextString(m) } +func (*MsgRemoveSchedule) ProtoMessage() {} +func (*MsgRemoveSchedule) Descriptor() ([]byte, []int) { + return fileDescriptor_c9e0a673aba8d6fd, []int{2} +} +func (m *MsgRemoveSchedule) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgRemoveSchedule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgRemoveSchedule.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgRemoveSchedule) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgRemoveSchedule.Merge(m, src) +} +func (m *MsgRemoveSchedule) XXX_Size() int { + return m.Size() +} +func (m *MsgRemoveSchedule) XXX_DiscardUnknown() { + xxx_messageInfo_MsgRemoveSchedule.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgRemoveSchedule proto.InternalMessageInfo + +func (m *MsgRemoveSchedule) GetAuthority() string { + if m != nil { + return m.Authority + } + return "" +} + +func (m *MsgRemoveSchedule) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// Defines the response structure for executing a MsgRemoveSchedule message. +type MsgRemoveScheduleResponse struct { +} + +func (m *MsgRemoveScheduleResponse) Reset() { *m = MsgRemoveScheduleResponse{} } +func (m *MsgRemoveScheduleResponse) String() string { return proto.CompactTextString(m) } +func (*MsgRemoveScheduleResponse) ProtoMessage() {} +func (*MsgRemoveScheduleResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_c9e0a673aba8d6fd, []int{3} +} +func (m *MsgRemoveScheduleResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgRemoveScheduleResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgRemoveScheduleResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgRemoveScheduleResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgRemoveScheduleResponse.Merge(m, src) +} +func (m *MsgRemoveScheduleResponse) XXX_Size() int { + return m.Size() +} +func (m *MsgRemoveScheduleResponse) XXX_DiscardUnknown() { + xxx_messageInfo_MsgRemoveScheduleResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgRemoveScheduleResponse proto.InternalMessageInfo + +// The MsgUpdateParams request type. // // Since: 0.47 type MsgUpdateParams struct { - // Authority is the address of the governance account. + // The address of the governance account. Authority string `protobuf:"bytes,1,opt,name=authority,proto3" json:"authority,omitempty"` - // params defines the x/cron parameters to update. + // Defines the x/cron parameters to update. // // NOTE: All parameters must be supplied. Params Params `protobuf:"bytes,2,opt,name=params,proto3" json:"params"` @@ -47,7 +258,7 @@ func (m *MsgUpdateParams) Reset() { *m = MsgUpdateParams{} } func (m *MsgUpdateParams) String() string { return proto.CompactTextString(m) } func (*MsgUpdateParams) ProtoMessage() {} func (*MsgUpdateParams) Descriptor() ([]byte, []int) { - return fileDescriptor_c9e0a673aba8d6fd, []int{0} + return fileDescriptor_c9e0a673aba8d6fd, []int{4} } func (m *MsgUpdateParams) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -90,8 +301,7 @@ func (m *MsgUpdateParams) GetParams() Params { return Params{} } -// MsgUpdateParamsResponse defines the response structure for executing a -// MsgUpdateParams message. +// Defines the response structure for executing a MsgUpdateParams message. // // Since: 0.47 type MsgUpdateParamsResponse struct { @@ -101,7 +311,7 @@ func (m *MsgUpdateParamsResponse) Reset() { *m = MsgUpdateParamsResponse func (m *MsgUpdateParamsResponse) String() string { return proto.CompactTextString(m) } func (*MsgUpdateParamsResponse) ProtoMessage() {} func (*MsgUpdateParamsResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_c9e0a673aba8d6fd, []int{1} + return fileDescriptor_c9e0a673aba8d6fd, []int{5} } func (m *MsgUpdateParamsResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -131,6 +341,10 @@ func (m *MsgUpdateParamsResponse) XXX_DiscardUnknown() { var xxx_messageInfo_MsgUpdateParamsResponse proto.InternalMessageInfo func init() { + proto.RegisterType((*MsgAddSchedule)(nil), "neutron.cron.MsgAddSchedule") + proto.RegisterType((*MsgAddScheduleResponse)(nil), "neutron.cron.MsgAddScheduleResponse") + proto.RegisterType((*MsgRemoveSchedule)(nil), "neutron.cron.MsgRemoveSchedule") + proto.RegisterType((*MsgRemoveScheduleResponse)(nil), "neutron.cron.MsgRemoveScheduleResponse") proto.RegisterType((*MsgUpdateParams)(nil), "neutron.cron.MsgUpdateParams") proto.RegisterType((*MsgUpdateParamsResponse)(nil), "neutron.cron.MsgUpdateParamsResponse") } @@ -138,28 +352,42 @@ func init() { func init() { proto.RegisterFile("neutron/cron/tx.proto", fileDescriptor_c9e0a673aba8d6fd) } var fileDescriptor_c9e0a673aba8d6fd = []byte{ - // 335 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0xcd, 0x4b, 0x2d, 0x2d, - 0x29, 0xca, 0xcf, 0xd3, 0x4f, 0x06, 0x11, 0x25, 0x15, 0x7a, 0x05, 0x45, 0xf9, 0x25, 0xf9, 0x42, - 0x3c, 0x50, 0x61, 0x3d, 0x90, 0xb0, 0x94, 0x60, 0x62, 0x6e, 0x66, 0x5e, 0xbe, 0x3e, 0x98, 0x84, - 0x28, 0x90, 0x12, 0x4f, 0xce, 0x2f, 0xce, 0xcd, 0x2f, 0xd6, 0xcf, 0x2d, 0x4e, 0xd7, 0x2f, 0x33, - 0x04, 0x51, 0x50, 0x09, 0x49, 0x88, 0x44, 0x3c, 0x98, 0xa7, 0x0f, 0xe1, 0x40, 0xa5, 0x44, 0xd2, - 0xf3, 0xd3, 0xf3, 0x21, 0xe2, 0x20, 0x16, 0x4c, 0x03, 0x8a, 0x0b, 0x0a, 0x12, 0x8b, 0x12, 0x73, - 0xa1, 0x1a, 0x94, 0x56, 0x33, 0x72, 0xf1, 0xfb, 0x16, 0xa7, 0x87, 0x16, 0xa4, 0x24, 0x96, 0xa4, - 0x06, 0x80, 0x65, 0x84, 0xcc, 0xb8, 0x38, 0x13, 0x4b, 0x4b, 0x32, 0xf2, 0x8b, 0x32, 0x4b, 0x2a, - 0x25, 0x18, 0x15, 0x18, 0x35, 0x38, 0x9d, 0x24, 0x2e, 0x6d, 0xd1, 0x15, 0x81, 0xda, 0xe4, 0x98, - 0x92, 0x52, 0x94, 0x5a, 0x5c, 0x1c, 0x5c, 0x52, 0x94, 0x99, 0x97, 0x1e, 0x84, 0x50, 0x2a, 0x64, - 0xce, 0xc5, 0x06, 0x31, 0x5b, 0x82, 0x49, 0x81, 0x51, 0x83, 0xdb, 0x48, 0x44, 0x0f, 0xd9, 0x8b, - 0x7a, 0x10, 0xd3, 0x9d, 0x38, 0x4f, 0xdc, 0x93, 0x67, 0x58, 0xf1, 0x7c, 0x83, 0x16, 0x63, 0x10, - 0x54, 0xb9, 0x95, 0x7a, 0xd3, 0xf3, 0x0d, 0x5a, 0x08, 0x83, 0xba, 0x9e, 0x6f, 0xd0, 0x12, 0x01, - 0x3b, 0x15, 0xcd, 0x65, 0x4a, 0x92, 0x5c, 0xe2, 0x68, 0x42, 0x41, 0xa9, 0xc5, 0x05, 0xf9, 0x79, - 0xc5, 0xa9, 0x46, 0x49, 0x5c, 0xcc, 0xbe, 0xc5, 0xe9, 0x42, 0x21, 0x5c, 0x3c, 0x28, 0x7e, 0x91, - 0x45, 0x75, 0x03, 0x9a, 0x6e, 0x29, 0x55, 0xbc, 0xd2, 0x30, 0xc3, 0xa5, 0x58, 0x1b, 0x40, 0xee, - 0x75, 0xf2, 0x38, 0xf1, 0x48, 0x8e, 0xf1, 0xc2, 0x23, 0x39, 0xc6, 0x07, 0x8f, 0xe4, 0x18, 0x27, - 0x3c, 0x96, 0x63, 0xb8, 0xf0, 0x58, 0x8e, 0xe1, 0xc6, 0x63, 0x39, 0x86, 0x28, 0xbd, 0xf4, 0xcc, - 0x92, 0x8c, 0xd2, 0x24, 0xbd, 0xe4, 0xfc, 0x5c, 0x7d, 0xa8, 0x89, 0xba, 0xf9, 0x45, 0xe9, 0x30, - 0xb6, 0x7e, 0x99, 0x89, 0x7e, 0x05, 0x34, 0xfe, 0x2b, 0x0b, 0x52, 0x8b, 0x93, 0xd8, 0xc0, 0xa1, - 0x6f, 0x0c, 0x08, 0x00, 0x00, 0xff, 0xff, 0x60, 0xa1, 0xa6, 0xeb, 0x1c, 0x02, 0x00, 0x00, + // 554 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x54, 0xbf, 0x6b, 0xdb, 0x40, + 0x14, 0xf6, 0xd9, 0x8e, 0xc1, 0xe7, 0xe0, 0x10, 0xd5, 0x75, 0x64, 0x25, 0x55, 0x8c, 0x68, 0x1b, + 0xd7, 0x10, 0x89, 0xba, 0xa5, 0x05, 0x6f, 0x71, 0x31, 0x74, 0x11, 0xb4, 0x72, 0xbb, 0x64, 0x09, + 0x8a, 0x74, 0x9c, 0x05, 0x95, 0x4e, 0xe8, 0x4e, 0xc6, 0xd9, 0x4a, 0xc7, 0x4c, 0xed, 0x5f, 0xd0, + 0xb5, 0xd0, 0xc5, 0x43, 0xff, 0x88, 0x8c, 0xa1, 0x53, 0xa7, 0x52, 0xec, 0xc1, 0xff, 0x46, 0xd1, + 0xaf, 0x44, 0x17, 0x41, 0x0a, 0x85, 0x2c, 0xa7, 0x7b, 0xef, 0xfb, 0xde, 0xd3, 0x77, 0xdf, 0x3d, + 0x0e, 0xde, 0xf7, 0x50, 0xc8, 0x02, 0xe2, 0x69, 0x56, 0xb4, 0xb0, 0xb9, 0xea, 0x07, 0x84, 0x11, + 0x61, 0x33, 0x4d, 0xab, 0x51, 0x5a, 0xda, 0x36, 0x5d, 0xc7, 0x23, 0x5a, 0xbc, 0x26, 0x04, 0x69, + 0xc7, 0x22, 0xd4, 0x25, 0x54, 0x73, 0x29, 0xd6, 0x66, 0x4f, 0xa3, 0x4f, 0x0a, 0x74, 0x12, 0xe0, + 0x24, 0x8e, 0xb4, 0x24, 0x48, 0xa1, 0x16, 0x26, 0x98, 0x24, 0xf9, 0x68, 0x97, 0x15, 0x70, 0x0a, + 0x7c, 0x33, 0x30, 0xdd, 0xac, 0x60, 0x97, 0x83, 0xa8, 0x35, 0x45, 0x76, 0xf8, 0x01, 0x25, 0xa0, + 0xf2, 0xb5, 0x0c, 0x9b, 0x3a, 0xc5, 0x47, 0xb6, 0x3d, 0x49, 0x01, 0xe1, 0x05, 0xac, 0x9b, 0x21, + 0x9b, 0x92, 0xc0, 0x61, 0x67, 0x22, 0xe8, 0x82, 0x5e, 0x7d, 0x24, 0xfe, 0xfc, 0x71, 0xd8, 0x4a, + 0x55, 0x1c, 0xd9, 0x76, 0x80, 0x28, 0x9d, 0xb0, 0xc0, 0xf1, 0xb0, 0x71, 0x4d, 0x15, 0x04, 0x58, + 0xf5, 0x4c, 0x17, 0x89, 0xe5, 0xa8, 0xc4, 0x88, 0xf7, 0x42, 0x1b, 0xd6, 0x7c, 0x14, 0x38, 0xc4, + 0x16, 0x2b, 0x5d, 0xd0, 0xab, 0x1a, 0x69, 0x24, 0x0c, 0x61, 0xd5, 0xa5, 0x98, 0x8a, 0xd5, 0x6e, + 0xa5, 0xd7, 0x18, 0x74, 0xd5, 0xbc, 0x51, 0xaa, 0x4e, 0xf1, 0x78, 0x8e, 0xac, 0x90, 0xa1, 0x57, + 0xc4, 0x63, 0x81, 0x69, 0xb1, 0x51, 0xf5, 0xe2, 0xf7, 0x7e, 0xc9, 0x88, 0x6b, 0x84, 0x31, 0xdc, + 0x42, 0x31, 0xec, 0x10, 0xef, 0x84, 0x32, 0x13, 0x23, 0x71, 0xa3, 0x0b, 0x7a, 0xcd, 0xc1, 0x1e, + 0xdf, 0x66, 0x9c, 0x91, 0x26, 0x11, 0xc7, 0x68, 0x22, 0x2e, 0x1e, 0x3e, 0xfe, 0xb4, 0x5e, 0xf4, + 0xaf, 0xe5, 0x9f, 0xaf, 0x17, 0xfd, 0x7b, 0xb1, 0x43, 0xbc, 0x1d, 0x8a, 0x08, 0xdb, 0x7c, 0xc6, + 0x40, 0xd4, 0x27, 0x1e, 0x45, 0xca, 0x39, 0x80, 0xdb, 0x3a, 0xc5, 0x06, 0x72, 0xc9, 0x0c, 0xdd, + 0x85, 0x7d, 0xc3, 0x27, 0x45, 0x8d, 0xed, 0x4c, 0x23, 0xff, 0x5b, 0x65, 0x17, 0x76, 0x0a, 0xc9, + 0x2b, 0xa5, 0xdf, 0x01, 0xdc, 0xd2, 0x29, 0x7e, 0xef, 0xdb, 0x26, 0x43, 0x6f, 0xe2, 0xe1, 0xf8, + 0x6f, 0x9d, 0x2f, 0x61, 0x2d, 0x19, 0xaf, 0x58, 0x69, 0x63, 0xd0, 0xe2, 0x5d, 0x4f, 0xba, 0x8f, + 0xea, 0xd1, 0x85, 0x7d, 0x5b, 0x2f, 0xfa, 0xc0, 0x48, 0xe9, 0xc3, 0x83, 0xe2, 0x61, 0x5a, 0xd9, + 0x61, 0xf2, 0xca, 0x94, 0x0e, 0xdc, 0xb9, 0x91, 0xca, 0x0e, 0x32, 0xf8, 0x52, 0x86, 0x15, 0x9d, + 0x62, 0xe1, 0x2d, 0x6c, 0xe4, 0x47, 0x76, 0xaf, 0x30, 0x40, 0x39, 0x54, 0x7a, 0x78, 0x1b, 0x9a, + 0xb5, 0x16, 0x8e, 0x61, 0xf3, 0xc6, 0x4d, 0xee, 0x17, 0xea, 0x78, 0x82, 0x74, 0xf0, 0x0f, 0xc2, + 0x55, 0xef, 0x77, 0x70, 0x93, 0xf3, 0xfe, 0x41, 0xa1, 0x30, 0x0f, 0x4b, 0x8f, 0x6e, 0x85, 0xb3, + 0xae, 0xd2, 0xc6, 0xc7, 0xc8, 0xdf, 0xd1, 0xeb, 0x8b, 0xa5, 0x0c, 0x2e, 0x97, 0x32, 0xf8, 0xb3, + 0x94, 0xc1, 0xe7, 0x95, 0x5c, 0xba, 0x5c, 0xc9, 0xa5, 0x5f, 0x2b, 0xb9, 0x74, 0xac, 0x62, 0x87, + 0x4d, 0xc3, 0x53, 0xd5, 0x22, 0xae, 0x96, 0x76, 0x3c, 0x24, 0x01, 0xce, 0xf6, 0xda, 0xec, 0xb9, + 0x36, 0x4f, 0x9f, 0xac, 0x33, 0x1f, 0xd1, 0xd3, 0x5a, 0xfc, 0x26, 0x3c, 0xfb, 0x1b, 0x00, 0x00, + 0xff, 0xff, 0xf7, 0xca, 0x79, 0x29, 0xcf, 0x04, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -174,6 +402,11 @@ const _ = grpc.SupportPackageIsVersion4 // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. type MsgClient interface { + // Adds new schedule. + AddSchedule(ctx context.Context, in *MsgAddSchedule, opts ...grpc.CallOption) (*MsgAddScheduleResponse, error) + // Removes schedule. + RemoveSchedule(ctx context.Context, in *MsgRemoveSchedule, opts ...grpc.CallOption) (*MsgRemoveScheduleResponse, error) + // Updates the module parameters. UpdateParams(ctx context.Context, in *MsgUpdateParams, opts ...grpc.CallOption) (*MsgUpdateParamsResponse, error) } @@ -185,6 +418,24 @@ func NewMsgClient(cc grpc1.ClientConn) MsgClient { return &msgClient{cc} } +func (c *msgClient) AddSchedule(ctx context.Context, in *MsgAddSchedule, opts ...grpc.CallOption) (*MsgAddScheduleResponse, error) { + out := new(MsgAddScheduleResponse) + err := c.cc.Invoke(ctx, "/neutron.cron.Msg/AddSchedule", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *msgClient) RemoveSchedule(ctx context.Context, in *MsgRemoveSchedule, opts ...grpc.CallOption) (*MsgRemoveScheduleResponse, error) { + out := new(MsgRemoveScheduleResponse) + err := c.cc.Invoke(ctx, "/neutron.cron.Msg/RemoveSchedule", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + func (c *msgClient) UpdateParams(ctx context.Context, in *MsgUpdateParams, opts ...grpc.CallOption) (*MsgUpdateParamsResponse, error) { out := new(MsgUpdateParamsResponse) err := c.cc.Invoke(ctx, "/neutron.cron.Msg/UpdateParams", in, out, opts...) @@ -196,6 +447,11 @@ func (c *msgClient) UpdateParams(ctx context.Context, in *MsgUpdateParams, opts // MsgServer is the server API for Msg service. type MsgServer interface { + // Adds new schedule. + AddSchedule(context.Context, *MsgAddSchedule) (*MsgAddScheduleResponse, error) + // Removes schedule. + RemoveSchedule(context.Context, *MsgRemoveSchedule) (*MsgRemoveScheduleResponse, error) + // Updates the module parameters. UpdateParams(context.Context, *MsgUpdateParams) (*MsgUpdateParamsResponse, error) } @@ -203,6 +459,12 @@ type MsgServer interface { type UnimplementedMsgServer struct { } +func (*UnimplementedMsgServer) AddSchedule(ctx context.Context, req *MsgAddSchedule) (*MsgAddScheduleResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method AddSchedule not implemented") +} +func (*UnimplementedMsgServer) RemoveSchedule(ctx context.Context, req *MsgRemoveSchedule) (*MsgRemoveScheduleResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method RemoveSchedule not implemented") +} func (*UnimplementedMsgServer) UpdateParams(ctx context.Context, req *MsgUpdateParams) (*MsgUpdateParamsResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method UpdateParams not implemented") } @@ -211,6 +473,42 @@ func RegisterMsgServer(s grpc1.Server, srv MsgServer) { s.RegisterService(&_Msg_serviceDesc, srv) } +func _Msg_AddSchedule_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MsgAddSchedule) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MsgServer).AddSchedule(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/neutron.cron.Msg/AddSchedule", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MsgServer).AddSchedule(ctx, req.(*MsgAddSchedule)) + } + return interceptor(ctx, in, info, handler) +} + +func _Msg_RemoveSchedule_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MsgRemoveSchedule) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MsgServer).RemoveSchedule(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/neutron.cron.Msg/RemoveSchedule", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MsgServer).RemoveSchedule(ctx, req.(*MsgRemoveSchedule)) + } + return interceptor(ctx, in, info, handler) +} + func _Msg_UpdateParams_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(MsgUpdateParams) if err := dec(in); err != nil { @@ -233,6 +531,14 @@ var _Msg_serviceDesc = grpc.ServiceDesc{ ServiceName: "neutron.cron.Msg", HandlerType: (*MsgServer)(nil), Methods: []grpc.MethodDesc{ + { + MethodName: "AddSchedule", + Handler: _Msg_AddSchedule_Handler, + }, + { + MethodName: "RemoveSchedule", + Handler: _Msg_RemoveSchedule_Handler, + }, { MethodName: "UpdateParams", Handler: _Msg_UpdateParams_Handler, @@ -242,7 +548,7 @@ var _Msg_serviceDesc = grpc.ServiceDesc{ Metadata: "neutron/cron/tx.proto", } -func (m *MsgUpdateParams) Marshal() (dAtA []byte, err error) { +func (m *MsgAddSchedule) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -252,26 +558,47 @@ func (m *MsgUpdateParams) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *MsgUpdateParams) MarshalTo(dAtA []byte) (int, error) { +func (m *MsgAddSchedule) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *MsgUpdateParams) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *MsgAddSchedule) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - { - size, err := m.Params.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err + if m.ExecutionStage != 0 { + i = encodeVarintTx(dAtA, i, uint64(m.ExecutionStage)) + i-- + dAtA[i] = 0x28 + } + if len(m.Msgs) > 0 { + for iNdEx := len(m.Msgs) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Msgs[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTx(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 } - i -= size - i = encodeVarintTx(dAtA, i, uint64(size)) } - i-- - dAtA[i] = 0x12 + if m.Period != 0 { + i = encodeVarintTx(dAtA, i, uint64(m.Period)) + i-- + dAtA[i] = 0x18 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintTx(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x12 + } if len(m.Authority) > 0 { i -= len(m.Authority) copy(dAtA[i:], m.Authority) @@ -282,7 +609,7 @@ func (m *MsgUpdateParams) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *MsgUpdateParamsResponse) Marshal() (dAtA []byte, err error) { +func (m *MsgAddScheduleResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -292,12 +619,12 @@ func (m *MsgUpdateParamsResponse) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *MsgUpdateParamsResponse) MarshalTo(dAtA []byte) (int, error) { +func (m *MsgAddScheduleResponse) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *MsgUpdateParamsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *MsgAddScheduleResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -305,46 +632,633 @@ func (m *MsgUpdateParamsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) return len(dAtA) - i, nil } -func encodeVarintTx(dAtA []byte, offset int, v uint64) int { - offset -= sovTx(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ +func (m *MsgRemoveSchedule) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - dAtA[offset] = uint8(v) - return base + return dAtA[:n], nil } -func (m *MsgUpdateParams) Size() (n int) { - if m == nil { - return 0 - } + +func (m *MsgRemoveSchedule) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgRemoveSchedule) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Authority) - if l > 0 { - n += 1 + l + sovTx(uint64(l)) + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintTx(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x12 } - l = m.Params.Size() - n += 1 + l + sovTx(uint64(l)) - return n + if len(m.Authority) > 0 { + i -= len(m.Authority) + copy(dAtA[i:], m.Authority) + i = encodeVarintTx(dAtA, i, uint64(len(m.Authority))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil } -func (m *MsgUpdateParamsResponse) Size() (n int) { - if m == nil { - return 0 +func (m *MsgRemoveScheduleResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } + return dAtA[:n], nil +} + +func (m *MsgRemoveScheduleResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgRemoveScheduleResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - return n + return len(dAtA) - i, nil } -func sovTx(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozTx(x uint64) (n int) { - return sovTx(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +func (m *MsgUpdateParams) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgUpdateParams) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgUpdateParams) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Params.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTx(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + if len(m.Authority) > 0 { + i -= len(m.Authority) + copy(dAtA[i:], m.Authority) + i = encodeVarintTx(dAtA, i, uint64(len(m.Authority))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *MsgUpdateParamsResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgUpdateParamsResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgUpdateParamsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func encodeVarintTx(dAtA []byte, offset int, v uint64) int { + offset -= sovTx(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *MsgAddSchedule) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Authority) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + l = len(m.Name) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + if m.Period != 0 { + n += 1 + sovTx(uint64(m.Period)) + } + if len(m.Msgs) > 0 { + for _, e := range m.Msgs { + l = e.Size() + n += 1 + l + sovTx(uint64(l)) + } + } + if m.ExecutionStage != 0 { + n += 1 + sovTx(uint64(m.ExecutionStage)) + } + return n +} + +func (m *MsgAddScheduleResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *MsgRemoveSchedule) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Authority) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + l = len(m.Name) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + return n +} + +func (m *MsgRemoveScheduleResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *MsgUpdateParams) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Authority) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + l = m.Params.Size() + n += 1 + l + sovTx(uint64(l)) + return n +} + +func (m *MsgUpdateParamsResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func sovTx(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozTx(x uint64) (n int) { + return sovTx(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *MsgAddSchedule) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgAddSchedule: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgAddSchedule: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Authority", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Authority = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Period", wireType) + } + m.Period = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Period |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Msgs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Msgs = append(m.Msgs, MsgExecuteContract{}) + if err := m.Msgs[len(m.Msgs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ExecutionStage", wireType) + } + m.ExecutionStage = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ExecutionStage |= ExecutionStage(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTx(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTx + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgAddScheduleResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgAddScheduleResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgAddScheduleResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipTx(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTx + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgRemoveSchedule) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgRemoveSchedule: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgRemoveSchedule: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Authority", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Authority = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTx(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTx + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgRemoveScheduleResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgRemoveScheduleResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgRemoveScheduleResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipTx(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTx + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil } func (m *MsgUpdateParams) Unmarshal(dAtA []byte) error { l := len(dAtA) diff --git a/x/cron/types/v1/schedule.pb.go b/x/cron/types/v1/schedule.pb.go new file mode 100644 index 000000000..3a35f34e6 --- /dev/null +++ b/x/cron/types/v1/schedule.pb.go @@ -0,0 +1,842 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: neutron/cron/v1/schedule.proto + +package v1 + +import ( + fmt "fmt" + _ "github.com/cosmos/gogoproto/gogoproto" + proto "github.com/cosmos/gogoproto/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// Defines the schedule for execution +type Schedule struct { + // Name of schedule + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Period in blocks + Period uint64 `protobuf:"varint,2,opt,name=period,proto3" json:"period,omitempty"` + // Msgs that will be executed every certain number of blocks, specified in the `period` field + Msgs []MsgExecuteContract `protobuf:"bytes,3,rep,name=msgs,proto3" json:"msgs"` + // Last execution's block height + LastExecuteHeight uint64 `protobuf:"varint,4,opt,name=last_execute_height,json=lastExecuteHeight,proto3" json:"last_execute_height,omitempty"` +} + +func (m *Schedule) Reset() { *m = Schedule{} } +func (m *Schedule) String() string { return proto.CompactTextString(m) } +func (*Schedule) ProtoMessage() {} +func (*Schedule) Descriptor() ([]byte, []int) { + return fileDescriptor_cd4938034d592826, []int{0} +} +func (m *Schedule) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Schedule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Schedule.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Schedule) XXX_Merge(src proto.Message) { + xxx_messageInfo_Schedule.Merge(m, src) +} +func (m *Schedule) XXX_Size() int { + return m.Size() +} +func (m *Schedule) XXX_DiscardUnknown() { + xxx_messageInfo_Schedule.DiscardUnknown(m) +} + +var xxx_messageInfo_Schedule proto.InternalMessageInfo + +func (m *Schedule) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Schedule) GetPeriod() uint64 { + if m != nil { + return m.Period + } + return 0 +} + +func (m *Schedule) GetMsgs() []MsgExecuteContract { + if m != nil { + return m.Msgs + } + return nil +} + +func (m *Schedule) GetLastExecuteHeight() uint64 { + if m != nil { + return m.LastExecuteHeight + } + return 0 +} + +// Defines the contract and the message to pass +type MsgExecuteContract struct { + // The address of the smart contract + Contract string `protobuf:"bytes,1,opt,name=contract,proto3" json:"contract,omitempty"` + // JSON encoded message to be passed to the contract + Msg string `protobuf:"bytes,2,opt,name=msg,proto3" json:"msg,omitempty"` +} + +func (m *MsgExecuteContract) Reset() { *m = MsgExecuteContract{} } +func (m *MsgExecuteContract) String() string { return proto.CompactTextString(m) } +func (*MsgExecuteContract) ProtoMessage() {} +func (*MsgExecuteContract) Descriptor() ([]byte, []int) { + return fileDescriptor_cd4938034d592826, []int{1} +} +func (m *MsgExecuteContract) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgExecuteContract) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgExecuteContract.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgExecuteContract) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgExecuteContract.Merge(m, src) +} +func (m *MsgExecuteContract) XXX_Size() int { + return m.Size() +} +func (m *MsgExecuteContract) XXX_DiscardUnknown() { + xxx_messageInfo_MsgExecuteContract.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgExecuteContract proto.InternalMessageInfo + +func (m *MsgExecuteContract) GetContract() string { + if m != nil { + return m.Contract + } + return "" +} + +func (m *MsgExecuteContract) GetMsg() string { + if m != nil { + return m.Msg + } + return "" +} + +// Defines the number of current schedules +type ScheduleCount struct { + // The number of current schedules + Count int32 `protobuf:"varint,1,opt,name=count,proto3" json:"count,omitempty"` +} + +func (m *ScheduleCount) Reset() { *m = ScheduleCount{} } +func (m *ScheduleCount) String() string { return proto.CompactTextString(m) } +func (*ScheduleCount) ProtoMessage() {} +func (*ScheduleCount) Descriptor() ([]byte, []int) { + return fileDescriptor_cd4938034d592826, []int{2} +} +func (m *ScheduleCount) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ScheduleCount) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ScheduleCount.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ScheduleCount) XXX_Merge(src proto.Message) { + xxx_messageInfo_ScheduleCount.Merge(m, src) +} +func (m *ScheduleCount) XXX_Size() int { + return m.Size() +} +func (m *ScheduleCount) XXX_DiscardUnknown() { + xxx_messageInfo_ScheduleCount.DiscardUnknown(m) +} + +var xxx_messageInfo_ScheduleCount proto.InternalMessageInfo + +func (m *ScheduleCount) GetCount() int32 { + if m != nil { + return m.Count + } + return 0 +} + +func init() { + proto.RegisterType((*Schedule)(nil), "neutron.cron.v1.Schedule") + proto.RegisterType((*MsgExecuteContract)(nil), "neutron.cron.v1.MsgExecuteContract") + proto.RegisterType((*ScheduleCount)(nil), "neutron.cron.v1.ScheduleCount") +} + +func init() { proto.RegisterFile("neutron/cron/v1/schedule.proto", fileDescriptor_cd4938034d592826) } + +var fileDescriptor_cd4938034d592826 = []byte{ + // 316 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x64, 0x91, 0xc1, 0x4a, 0xc3, 0x30, + 0x18, 0xc7, 0x1b, 0xd7, 0x8d, 0x2d, 0x22, 0x6a, 0x1c, 0x52, 0x76, 0x88, 0x63, 0x22, 0xec, 0x62, + 0x42, 0xd5, 0xab, 0x97, 0x0d, 0x41, 0x10, 0x2f, 0xf5, 0xe6, 0x65, 0x6c, 0x59, 0x48, 0x07, 0x6b, + 0x53, 0x9a, 0xb4, 0xcc, 0xb7, 0xf0, 0x25, 0x7c, 0x97, 0x1d, 0x77, 0xf4, 0x24, 0xd2, 0xbe, 0x88, + 0x24, 0xcd, 0x3c, 0xe8, 0x25, 0xfc, 0xfe, 0xfc, 0xbf, 0x7f, 0xbe, 0xef, 0x4b, 0x20, 0x4e, 0x79, + 0xa1, 0x73, 0x99, 0x52, 0x66, 0x8e, 0x32, 0xa4, 0x8a, 0xc5, 0x7c, 0x59, 0xac, 0x39, 0xc9, 0x72, + 0xa9, 0x25, 0x3a, 0x76, 0x3e, 0x31, 0x3e, 0x29, 0xc3, 0x41, 0x5f, 0x48, 0x21, 0xad, 0x47, 0x0d, + 0x35, 0x65, 0xa3, 0x0f, 0x00, 0xbb, 0x2f, 0x2e, 0x89, 0x10, 0xf4, 0xd3, 0x79, 0xc2, 0x03, 0x30, + 0x04, 0xe3, 0x5e, 0x64, 0x19, 0x9d, 0xc3, 0x4e, 0xc6, 0xf3, 0x95, 0x5c, 0x06, 0x07, 0x43, 0x30, + 0xf6, 0x23, 0xa7, 0xd0, 0x3d, 0xf4, 0x13, 0x25, 0x54, 0xd0, 0x1a, 0xb6, 0xc6, 0x87, 0x37, 0x97, + 0xe4, 0x4f, 0x3b, 0xf2, 0xac, 0xc4, 0xc3, 0x86, 0xb3, 0x42, 0xf3, 0xa9, 0x4c, 0x75, 0x3e, 0x67, + 0x7a, 0xe2, 0x6f, 0xbf, 0x2e, 0xbc, 0xc8, 0xc6, 0x10, 0x81, 0x67, 0xeb, 0xb9, 0xd2, 0x33, 0xde, + 0xd4, 0xcc, 0x62, 0xbe, 0x12, 0xb1, 0x0e, 0x7c, 0xdb, 0xe3, 0xd4, 0x58, 0x2e, 0xfd, 0x68, 0x8d, + 0xd1, 0x04, 0xa2, 0xff, 0x37, 0xa2, 0x01, 0xec, 0x32, 0xc7, 0x6e, 0xe8, 0x5f, 0x8d, 0x4e, 0x60, + 0x2b, 0x51, 0xc2, 0x4e, 0xdd, 0x8b, 0x0c, 0x8e, 0xae, 0xe0, 0xd1, 0x7e, 0xd5, 0xa9, 0x2c, 0x52, + 0x8d, 0xfa, 0xb0, 0xcd, 0x0c, 0xd8, 0x6c, 0x3b, 0x6a, 0xc4, 0xe4, 0x69, 0x5b, 0x61, 0xb0, 0xab, + 0x30, 0xf8, 0xae, 0x30, 0x78, 0xaf, 0xb1, 0xb7, 0xab, 0xb1, 0xf7, 0x59, 0x63, 0xef, 0x35, 0x14, + 0x2b, 0x1d, 0x17, 0x0b, 0xc2, 0x64, 0x42, 0xdd, 0xbe, 0xd7, 0x32, 0x17, 0x7b, 0xa6, 0xe5, 0x1d, + 0xdd, 0x34, 0xff, 0xa1, 0xdf, 0x32, 0xae, 0x68, 0x19, 0x2e, 0x3a, 0xf6, 0x99, 0x6f, 0x7f, 0x02, + 0x00, 0x00, 0xff, 0xff, 0x4d, 0x2d, 0x47, 0x23, 0xaf, 0x01, 0x00, 0x00, +} + +func (m *Schedule) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Schedule) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Schedule) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.LastExecuteHeight != 0 { + i = encodeVarintSchedule(dAtA, i, uint64(m.LastExecuteHeight)) + i-- + dAtA[i] = 0x20 + } + if len(m.Msgs) > 0 { + for iNdEx := len(m.Msgs) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Msgs[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintSchedule(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if m.Period != 0 { + i = encodeVarintSchedule(dAtA, i, uint64(m.Period)) + i-- + dAtA[i] = 0x10 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintSchedule(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *MsgExecuteContract) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgExecuteContract) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgExecuteContract) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Msg) > 0 { + i -= len(m.Msg) + copy(dAtA[i:], m.Msg) + i = encodeVarintSchedule(dAtA, i, uint64(len(m.Msg))) + i-- + dAtA[i] = 0x12 + } + if len(m.Contract) > 0 { + i -= len(m.Contract) + copy(dAtA[i:], m.Contract) + i = encodeVarintSchedule(dAtA, i, uint64(len(m.Contract))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ScheduleCount) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ScheduleCount) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ScheduleCount) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Count != 0 { + i = encodeVarintSchedule(dAtA, i, uint64(m.Count)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func encodeVarintSchedule(dAtA []byte, offset int, v uint64) int { + offset -= sovSchedule(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *Schedule) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovSchedule(uint64(l)) + } + if m.Period != 0 { + n += 1 + sovSchedule(uint64(m.Period)) + } + if len(m.Msgs) > 0 { + for _, e := range m.Msgs { + l = e.Size() + n += 1 + l + sovSchedule(uint64(l)) + } + } + if m.LastExecuteHeight != 0 { + n += 1 + sovSchedule(uint64(m.LastExecuteHeight)) + } + return n +} + +func (m *MsgExecuteContract) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Contract) + if l > 0 { + n += 1 + l + sovSchedule(uint64(l)) + } + l = len(m.Msg) + if l > 0 { + n += 1 + l + sovSchedule(uint64(l)) + } + return n +} + +func (m *ScheduleCount) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Count != 0 { + n += 1 + sovSchedule(uint64(m.Count)) + } + return n +} + +func sovSchedule(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozSchedule(x uint64) (n int) { + return sovSchedule(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *Schedule) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedule + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Schedule: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Schedule: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedule + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSchedule + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthSchedule + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Period", wireType) + } + m.Period = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedule + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Period |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Msgs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedule + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSchedule + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthSchedule + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Msgs = append(m.Msgs, MsgExecuteContract{}) + if err := m.Msgs[len(m.Msgs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field LastExecuteHeight", wireType) + } + m.LastExecuteHeight = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedule + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.LastExecuteHeight |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipSchedule(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthSchedule + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgExecuteContract) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedule + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgExecuteContract: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgExecuteContract: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Contract", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedule + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSchedule + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthSchedule + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Contract = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Msg", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedule + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSchedule + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthSchedule + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Msg = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSchedule(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthSchedule + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ScheduleCount) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedule + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ScheduleCount: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ScheduleCount: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Count", wireType) + } + m.Count = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedule + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Count |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipSchedule(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthSchedule + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipSchedule(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowSchedule + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowSchedule + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowSchedule + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthSchedule + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupSchedule + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthSchedule + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthSchedule = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowSchedule = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupSchedule = fmt.Errorf("proto: unexpected end of group") +) diff --git a/x/dex/keeper/cancel_limit_order.go b/x/dex/keeper/cancel_limit_order.go index 8538313bb..2b1a3f668 100644 --- a/x/dex/keeper/cancel_limit_order.go +++ b/x/dex/keeper/cancel_limit_order.go @@ -6,6 +6,7 @@ import ( sdkerrors "cosmossdk.io/errors" sdk "github.com/cosmos/cosmos-sdk/types" + math_utils "github.com/neutron-org/neutron/v4/utils/math" "github.com/neutron-org/neutron/v4/x/dex/types" ) @@ -78,11 +79,22 @@ func (k Keeper) ExecuteCancelLimitOrder( makerAmountToReturn := tranche.RemoveTokenIn(trancheUser) _, takerAmountOut := tranche.Withdraw(trancheUser) - trancheUser.SharesWithdrawn = trancheUser.SharesOwned - - // Remove the canceled shares from the limitOrder + // Remove the canceled shares from the maker side of the limitOrder tranche.TotalMakerDenom = tranche.TotalMakerDenom.Sub(trancheUser.SharesOwned) - tranche.TotalTakerDenom = tranche.TotalTakerDenom.Sub(takerAmountOut) + + // Calculate total number of shares removed previously withdrawn by the user (denominated in takerDenom) + sharesWithdrawnTakerDenom := math_utils.NewPrecDecFromInt(trancheUser.SharesWithdrawn). + Quo(tranche.PriceTakerToMaker). + TruncateInt() + + // Calculate the total amount removed including prior withdrawals (denominated in takerDenom) + totalAmountOutTakerDenom := sharesWithdrawnTakerDenom.Add(takerAmountOut) + + // Decrease the tranche TotalTakerDenom by the amount being removed + tranche.TotalTakerDenom = tranche.TotalTakerDenom.Sub(totalAmountOutTakerDenom) + + // Set TrancheUser to 100% shares withdrawn + trancheUser.SharesWithdrawn = trancheUser.SharesOwned if !makerAmountToReturn.IsPositive() && !takerAmountOut.IsPositive() { return sdk.Coin{}, sdk.Coin{}, nil, sdkerrors.Wrapf(types.ErrCancelEmptyLimitOrder, "%s", tranche.Key.TrancheKey) diff --git a/x/dex/keeper/integration_cancellimitorder_test.go b/x/dex/keeper/integration_cancellimitorder_test.go index c21e96274..793e2d7e6 100644 --- a/x/dex/keeper/integration_cancellimitorder_test.go +++ b/x/dex/keeper/integration_cancellimitorder_test.go @@ -442,3 +442,67 @@ func (s *DexTestSuite) TestCancelJITNextBlock() { }) s.False(found) } + +func (s *DexTestSuite) TestWithdrawThenCancel() { + s.fundAliceBalances(50, 0) + s.fundBobBalances(50, 0) + s.fundCarolBalances(0, 40) + + // // GIVEN alice and bob each limit sells 50 TokenA + trancheKey := s.aliceLimitSells("TokenA", 0, 50) + s.bobLimitSells("TokenA", 0, 50) + + s.carolLimitSells("TokenB", -1, 10, types.LimitOrderType_FILL_OR_KILL) + + // WHEN alice withdraws and cancels her limit order + s.aliceWithdrawsLimitSell(trancheKey) + s.aliceCancelsLimitSell(trancheKey) + s.assertAliceBalances(45, 5) + + s.bobWithdrawsLimitSell(trancheKey) + s.assertBobBalances(0, 5) + s.bobCancelsLimitSell(trancheKey) + s.assertBobBalances(45, 5) +} + +func (s *DexTestSuite) TestWithdrawThenCancel2() { + s.fundAliceBalances(50, 0) + s.fundBobBalances(50, 0) + s.fundCarolBalances(0, 40) + + // // GIVEN alice and bob each limit sells 50 TokenA + trancheKey := s.aliceLimitSells("TokenA", 0, 50) + s.bobLimitSells("TokenA", 0, 50) + + s.carolLimitSells("TokenB", -1, 10, types.LimitOrderType_FILL_OR_KILL) + + // WHEN alice withdraws and cancels her limit order + s.aliceWithdrawsLimitSell(trancheKey) + s.aliceCancelsLimitSell(trancheKey) + s.assertAliceBalances(45, 5) + + s.bobCancelsLimitSell(trancheKey) + s.assertBobBalances(45, 5) +} + +func (s *DexTestSuite) TestWithdrawThenCancelLowTick() { + s.fundAliceBalances(50, 0) + s.fundBobBalances(50, 0) + s.fundCarolBalances(0, 40) + + // // GIVEN alice and bob each limit sells 50 TokenA + trancheKey := s.aliceLimitSells("TokenA", 20000, 50) + s.bobLimitSells("TokenA", 20000, 50) + + s.carolLimitSells("TokenB", -20001, 10, types.LimitOrderType_FILL_OR_KILL) + + // WHEN alice withdraws and cancels her limit order + s.aliceWithdrawsLimitSell(trancheKey) + s.aliceCancelsLimitSell(trancheKey) + s.assertAliceBalancesInt(sdkmath.NewInt(13058413), sdkmath.NewInt(4999999)) + + s.bobWithdrawsLimitSell(trancheKey) + s.assertBobBalancesInt(sdkmath.ZeroInt(), sdkmath.NewInt(4999999)) + s.bobCancelsLimitSell(trancheKey) + s.assertBobBalancesInt(sdkmath.NewInt(13058413), sdkmath.NewInt(4999999)) +} diff --git a/x/dex/keeper/integration_withdrawfilled_test.go b/x/dex/keeper/integration_withdrawfilled_test.go index 8d8ba338b..97df676c0 100644 --- a/x/dex/keeper/integration_withdrawfilled_test.go +++ b/x/dex/keeper/integration_withdrawfilled_test.go @@ -386,8 +386,27 @@ func (s *DexTestSuite) TestWithdrawPartiallyGTTFilledCancelled() { s.False(found, "Alice's LimitOrderTrancheUser not removed") } -// testcancel unfilled +func (s *DexTestSuite) TestWithdrawInactive() { + s.fundAliceBalances(10, 0) + s.fundBobBalances(0, 20) -// test withdraw expired + // GIVEN Alice places an expiring limit order of A + trancheKey := s.aliceLimitSellsGoodTil("TokenA", 0, 10, time.Now()) + + // Bob trades through half of it + s.bobLimitSells("TokenB", -1, 5) + + // Alice withdraws the profits + s.aliceWithdrawsLimitSell(trancheKey) + s.assertAliceBalances(0, 5) + + // bob swap through more + s.bobLimitSells("TokenB", -1, 4) -// how does cancel withdraw work does it call into was filled + // WHEN it is purged + s.App.DexKeeper.PurgeExpiredLimitOrders(s.Ctx, time.Now()) + + // THEN alice can withdraw the expected amount + s.aliceWithdrawsLimitSell(trancheKey) + s.assertAliceBalances(1, 9) +} diff --git a/x/dex/keeper/msg_server.go b/x/dex/keeper/msg_server.go index 1211cc57b..2b3bca666 100644 --- a/x/dex/keeper/msg_server.go +++ b/x/dex/keeper/msg_server.go @@ -259,12 +259,6 @@ func (k MsgServer) UpdateParams(goCtx context.Context, req *types.MsgUpdateParam return &types.MsgUpdateParamsResponse{}, nil } -func (k MsgServer) AssertNotPaused(goCtx context.Context) error { - ctx := sdk.UnwrapSDKContext(goCtx) - paused := k.GetParams(ctx).Paused - - if paused { - return types.ErrDexPaused - } - return nil +func (k MsgServer) AssertNotPaused(_ context.Context) error { + return types.ErrDexPaused } diff --git a/x/ibcswap/ibc_middleware.go b/x/ibcswap/ibc_middleware.go deleted file mode 100644 index 9f5b5edab..000000000 --- a/x/ibcswap/ibc_middleware.go +++ /dev/null @@ -1,428 +0,0 @@ -package ibcswap - -import ( - "encoding/json" - "errors" - "strings" - - sdkerrors "cosmossdk.io/errors" - "cosmossdk.io/math" - sdk "github.com/cosmos/cosmos-sdk/types" - "github.com/cosmos/ibc-apps/middleware/packet-forward-middleware/v8/packetforward" - capabilitytypes "github.com/cosmos/ibc-go/modules/capability/types" - transfertypes "github.com/cosmos/ibc-go/v8/modules/apps/transfer/types" - clienttypes "github.com/cosmos/ibc-go/v8/modules/core/02-client/types" //nolint:staticcheck - channeltypes "github.com/cosmos/ibc-go/v8/modules/core/04-channel/types" - porttypes "github.com/cosmos/ibc-go/v8/modules/core/05-port/types" - ibcexported "github.com/cosmos/ibc-go/v8/modules/core/exported" - - "github.com/neutron-org/neutron/v4/x/ibcswap/keeper" - "github.com/neutron-org/neutron/v4/x/ibcswap/types" -) - -var _ porttypes.Middleware = &IBCMiddleware{} - -// IBCMiddleware implements the ICS26 callbacks for the swap middleware given the -// swap keeper and the underlying application. -type IBCMiddleware struct { - app porttypes.IBCModule - keeper keeper.Keeper -} - -// NewIBCMiddleware creates a new IBCMiddleware given the keeper and underlying application. -func NewIBCMiddleware(app porttypes.IBCModule, k keeper.Keeper) IBCMiddleware { - return IBCMiddleware{ - app: app, - keeper: k, - } -} - -// OnChanOpenInit implements the IBCModule interface. -func (im IBCMiddleware) OnChanOpenInit( - ctx sdk.Context, - order channeltypes.Order, - connectionHops []string, - portID string, - channelID string, - chanCap *capabilitytypes.Capability, - counterparty channeltypes.Counterparty, - version string, -) (string, error) { - return im.app.OnChanOpenInit( - ctx, - order, - connectionHops, - portID, - channelID, - chanCap, - counterparty, - version, - ) -} - -// OnChanOpenTry implements the IBCModule interface. -func (im IBCMiddleware) OnChanOpenTry( - ctx sdk.Context, - order channeltypes.Order, - connectionHops []string, - portID, channelID string, - chanCap *capabilitytypes.Capability, - counterparty channeltypes.Counterparty, - counterpartyVersion string, -) (version string, err error) { - return im.app.OnChanOpenTry( - ctx, - order, - connectionHops, - portID, - channelID, - chanCap, - counterparty, - counterpartyVersion, - ) -} - -// OnChanOpenAck implements the IBCModule interface. -func (im IBCMiddleware) OnChanOpenAck( - ctx sdk.Context, - portID, channelID string, - counterpartyChannelID string, - counterpartyVersion string, -) error { - return im.app.OnChanOpenAck(ctx, portID, channelID, counterpartyChannelID, counterpartyVersion) -} - -// OnChanOpenConfirm implements the IBCModule interface. -func (im IBCMiddleware) OnChanOpenConfirm(ctx sdk.Context, portID, channelID string) error { - return im.app.OnChanOpenConfirm(ctx, portID, channelID) -} - -// OnChanCloseInit implements the IBCModule interface. -func (im IBCMiddleware) OnChanCloseInit(ctx sdk.Context, portID, channelID string) error { - return im.app.OnChanCloseInit(ctx, portID, channelID) -} - -// OnChanCloseConfirm implements the IBCModule interface. -func (im IBCMiddleware) OnChanCloseConfirm(ctx sdk.Context, portID, channelID string) error { - return im.app.OnChanCloseConfirm(ctx, portID, channelID) -} - -// OnRecvPacket checks the memo field on this packet and if the metadata inside's root key indicates this packet -// should be handled by the swap middleware it attempts to perform a swap. If the swap is successful -// the underlying application's OnRecvPacket callback is invoked. - -// For clarity, here is a breakdown of the steps -// 1. Check if this is a swap packet; if not pass it to next middleware -// 2. validate swapMetadata; ErrAck if invalid -// 3. Pass through the middleware stack to ibc-go/transfer#OnRecvPacket; transfer coins are sent to receiver -// 4. Do swap; handle failures - -func (im IBCMiddleware) OnRecvPacket( - ctx sdk.Context, - packet channeltypes.Packet, - relayer sdk.AccAddress, -) ibcexported.Acknowledgement { - var data transfertypes.FungibleTokenPacketData - if err := transfertypes.ModuleCdc.UnmarshalJSON(packet.GetData(), &data); err != nil { - return channeltypes.NewErrorAcknowledgement(err) - } - - m := &types.PacketMetadata{} - err := json.Unmarshal([]byte(data.Memo), m) - if err != nil || m.Swap == nil { - // Not a packet that should be handled by the swap middleware - return im.app.OnRecvPacket(ctx, packet, relayer) - } - - metadata := m.Swap - - if err := metadata.Validate(); err != nil { - return channeltypes.NewErrorAcknowledgement(err) - } - - if err := validateSwapPacket(packet, data, *metadata); err != nil { - return channeltypes.NewErrorAcknowledgement(err) - } - - // Use overrideReceiver so that users cannot ibcswap through arbitrary addresses. - // Instead generate a unique address for each user based on their channel and origin-address - originalCreator := m.Swap.Creator - overrideReceiver, err := packetforward.GetReceiver(packet.DestinationChannel, data.Sender) - if err != nil { - return channeltypes.NewErrorAcknowledgement(err) - } - metadata.Creator = overrideReceiver - // Update packet data to match the new receiver so that transfer middleware adds tokens to the expected address - packet = newPacketWithOverrideReceiver(packet, data, overrideReceiver) - - ack := im.app.OnRecvPacket(ctx, packet, relayer) - if ack == nil || !ack.Success() { - return ack - } - - // Attempt to perform a swap using a cacheCtx - cacheCtx, writeCache := ctx.CacheContext() - res, err := im.keeper.Swap(cacheCtx, originalCreator, metadata.MsgPlaceLimitOrder) - if err != nil { - return im.handleFailedSwap(ctx, packet, data, metadata, err) - } - - // If there is no next field set in the metadata return ack - if metadata.Next == nil { - writeCache() - return ack - } - - // We need to reset the packets memo field so that the root key in the metadata is the - // next field from the current metadata. - memoBz, err := json.Marshal(metadata.Next) - if err != nil { - return ack - } - - postSwapData := data - postSwapData.Memo = string(memoBz) - - // Override the packet data to include the token denom and amount that was received from the swap. - postSwapData.Denom = res.TakerCoinOut.Denom - postSwapData.Amount = res.TakerCoinOut.Amount.String() - - // After a successful swap funds are now in the receiver account from the MsgPlaceLimitOrder so, - // we need to override the packets receiver field before invoking the forward middlewares OnRecvPacket. - postSwapData.Receiver = m.Swap.Receiver - - dataBz, err := transfertypes.ModuleCdc.MarshalJSON(&postSwapData) - if err != nil { - return ack - } - - packet.Data = dataBz - - // The forward middleware should return a nil ack if the forward is initiated properly. - // If not an error occurred, and we return the original ack. - newAck := im.app.OnRecvPacket(cacheCtx, packet, relayer) - if newAck != nil { - return im.handleFailedSwap(ctx, packet, data, metadata, errors.New(string(newAck.Acknowledgement()))) - } - - writeCache() - return nil -} - -// OnAcknowledgementPacket implements the IBCModule interface. -func (im IBCMiddleware) OnAcknowledgementPacket( - ctx sdk.Context, - packet channeltypes.Packet, - acknowledgement []byte, - relayer sdk.AccAddress, -) error { - return im.app.OnAcknowledgementPacket(ctx, packet, acknowledgement, relayer) -} - -// OnTimeoutPacket implements the IBCModule interface. -func (im IBCMiddleware) OnTimeoutPacket( - ctx sdk.Context, - packet channeltypes.Packet, - relayer sdk.AccAddress, -) error { - return im.app.OnTimeoutPacket(ctx, packet, relayer) -} - -func (im IBCMiddleware) SendPacket( - ctx sdk.Context, - chanCap *capabilitytypes.Capability, - sourcePort string, - sourceChannel string, - timeoutHeight clienttypes.Height, - timeoutTimestamp uint64, - data []byte, -) (sequence uint64, err error) { - return im.keeper.SendPacket( - ctx, - chanCap, - sourcePort, - sourceChannel, - timeoutHeight, - timeoutTimestamp, - data, - ) -} - -// WriteAcknowledgement implements the ICS4 Wrapper interface. -func (im IBCMiddleware) WriteAcknowledgement( - ctx sdk.Context, - chanCap *capabilitytypes.Capability, - packet ibcexported.PacketI, - ack ibcexported.Acknowledgement, -) error { - return im.keeper.WriteAcknowledgement(ctx, chanCap, packet, ack) -} - -func (im IBCMiddleware) GetAppVersion( - ctx sdk.Context, - portID string, - channelID string, -) (string, bool) { - return im.keeper.GetAppVersion(ctx, portID, channelID) -} - -// handleFailedSwap will invoke the appropriate failover logic depending on if this swap was marked refundable -// or non-refundable in the SwapMetadata. -func (im IBCMiddleware) handleFailedSwap( - ctx sdk.Context, - packet channeltypes.Packet, - data transfertypes.FungibleTokenPacketData, - metadata *types.SwapMetadata, - err error, -) ibcexported.Acknowledgement { - swapErr := sdkerrors.Wrap(types.ErrSwapFailed, err.Error()) - im.keeper.Logger(ctx).Error( - "ibc swap failed", - "err", swapErr, - "creator", metadata.Creator, - "receiver", metadata.Receiver, - "tokenIn", metadata.TokenIn, - "tokenOut", metadata.TokenOut, - "AmountIn", metadata.AmountIn, - "TickIndexInToOut", metadata.TickIndexInToOut, - "OrderType", metadata.OrderType, - "refund address", metadata.NeutronRefundAddress, - ) - - // The current denom is from the sender chains perspective, we need to compose the appropriate denom for this side - denomOnThisChain := getDenomForThisChain(packet, data.Denom) - - if len(metadata.NeutronRefundAddress) != 0 { - return im.handleOnChainRefund(ctx, data, metadata, denomOnThisChain, err) - } - - return im.handleIBCRefund(ctx, packet, data, metadata, denomOnThisChain, err) -} - -// handleOnChainRefund will compose a successful ack to send back to the counterparty chain containing any error messages. -// Returning a successful ack ensures that a refund is not issued on the counterparty chain. -// See: https://github.com/cosmos/ibc-go/blob/3ecc7dd3aef5790ec5d906936a297b34adf1ee41/modules/apps/transfer/keeper/relay.go#L320 -func (im IBCMiddleware) handleOnChainRefund( - ctx sdk.Context, - data transfertypes.FungibleTokenPacketData, - metadata *types.SwapMetadata, - newDenom string, - swapErr error, -) ibcexported.Acknowledgement { - amount, ok := math.NewIntFromString(data.Amount) - if !ok { - wrappedErr := sdkerrors.Wrapf( - transfertypes.ErrInvalidAmount, - "unable to parse transfer amount (%s) into math.Int", - data.Amount, - ) - wrappedErr = sdkerrors.Wrap(swapErr, wrappedErr.Error()) - return channeltypes.NewResultAcknowledgement([]byte(wrappedErr.Error())) - } - - token := sdk.NewCoin(newDenom, amount) - err := im.keeper.SendCoins(ctx, metadata.Creator, metadata.NeutronRefundAddress, sdk.NewCoins(token)) - if err != nil { - wrappedErr := sdkerrors.Wrap(err, "failed to move funds to refund address") - wrappedErr = sdkerrors.Wrap(swapErr, wrappedErr.Error()) - return channeltypes.NewResultAcknowledgement([]byte(wrappedErr.Error())) - } - - return channeltypes.NewResultAcknowledgement([]byte(swapErr.Error())) -} - -// handleIBCRefund will either burn or transfer the funds back to the appropriate escrow account. -// When a packet comes in the transfer module's OnRecvPacket callback is invoked which either -// mints or unescrows funds on this side so if the swap fails an explicit refund is required. -func (im IBCMiddleware) handleIBCRefund( - ctx sdk.Context, - packet channeltypes.Packet, - data transfertypes.FungibleTokenPacketData, - metadata *types.SwapMetadata, - newDenom string, - swapErr error, -) ibcexported.Acknowledgement { - data.Denom = newDenom - - err := im.keeper.RefundPacketToken(ctx, packet, data, metadata) - if err != nil { - wrappedErr := sdkerrors.Wrap(swapErr, err.Error()) - - // If the refund fails on this side we want to make sure that the refund does not happen on the counterparty, - // so we return a successful ack containing the error - return channeltypes.NewResultAcknowledgement([]byte(wrappedErr.Error())) - } - - return channeltypes.NewErrorAcknowledgement(swapErr) -} - -// getDenomForThisChain composes a new token denom by either unwinding or prefixing the specified token denom appropriately. -// This is necessary because the token denom in the packet data is from the perspective of the counterparty chain. -func getDenomForThisChain(packet channeltypes.Packet, denom string) string { - counterpartyPrefix := transfertypes.GetDenomPrefix(packet.SourcePort, packet.SourceChannel) - if strings.HasPrefix(denom, counterpartyPrefix) { - // unwind denom - unwoundDenom := denom[len(counterpartyPrefix):] - denomTrace := transfertypes.ParseDenomTrace(unwoundDenom) - if denomTrace.Path == "" { - // denom is now unwound back to native denom - return unwoundDenom - } - // denom is still IBC denom - return denomTrace.IBCDenom() - } - - // append port and channel from this chain to denom - prefixedDenom := transfertypes.GetDenomPrefix(packet.DestinationPort, packet.DestinationChannel) + denom - return transfertypes.ParseDenomTrace(prefixedDenom).IBCDenom() -} - -// Update the packet data to reflect the new receiver address that is used by the PFM -func newPacketWithOverrideReceiver(oldPacket channeltypes.Packet, data transfertypes.FungibleTokenPacketData, overrideReceiver string) channeltypes.Packet { - overrideData := transfertypes.FungibleTokenPacketData{ - Denom: data.Denom, - Amount: data.Amount, - Sender: data.Sender, - Receiver: overrideReceiver, // override receiver - } - overrideDataBz := transfertypes.ModuleCdc.MustMarshalJSON(&overrideData) - - return channeltypes.Packet{ - Sequence: oldPacket.Sequence, - SourcePort: oldPacket.SourcePort, - SourceChannel: oldPacket.SourceChannel, - DestinationPort: oldPacket.DestinationPort, - DestinationChannel: oldPacket.DestinationChannel, - Data: overrideDataBz, // override data - TimeoutHeight: oldPacket.TimeoutHeight, - TimeoutTimestamp: oldPacket.TimeoutTimestamp, - } -} - -func validateSwapPacket(packet channeltypes.Packet, transferData transfertypes.FungibleTokenPacketData, sm types.SwapMetadata) error { - denomOnNeutron := getDenomForThisChain(packet, transferData.Denom) - if denomOnNeutron != sm.TokenIn { - return sdkerrors.Wrap(types.ErrInvalidSwapMetadata, "Transfer Denom must match TokenIn") - } - - transferAmount, ok := math.NewIntFromString(transferData.Amount) - if !ok { - return sdkerrors.Wrapf( - transfertypes.ErrInvalidAmount, - "unable to parse transfer amount (%s) into math.Int", - transferData.Amount, - ) - } - - if transferAmount.LT(sm.AmountIn) { - return sdkerrors.Wrap(types.ErrInvalidSwapMetadata, "Transfer amount must be >= AmountIn") - } - - if sm.ContainsPFM() { - return sdkerrors.Wrap( - types.ErrInvalidSwapMetadata, - "ibcswap middleware cannot be used in conjunction with packet-forward-middleware", - ) - } - return nil -} diff --git a/x/ibcswap/keeper/keeper.go b/x/ibcswap/keeper/keeper.go deleted file mode 100644 index 33d5f1493..000000000 --- a/x/ibcswap/keeper/keeper.go +++ /dev/null @@ -1,207 +0,0 @@ -package keeper - -import ( - "fmt" - - sdkerrors "cosmossdk.io/errors" - "cosmossdk.io/log" - "cosmossdk.io/math" - "github.com/cosmos/cosmos-sdk/baseapp" - "github.com/cosmos/cosmos-sdk/codec" - sdk "github.com/cosmos/cosmos-sdk/types" - "github.com/cosmos/gogoproto/proto" - capabilitytypes "github.com/cosmos/ibc-go/modules/capability/types" - transfertypes "github.com/cosmos/ibc-go/v8/modules/apps/transfer/types" - clienttypes "github.com/cosmos/ibc-go/v8/modules/core/02-client/types" //nolint:staticcheck - channeltypes "github.com/cosmos/ibc-go/v8/modules/core/04-channel/types" - porttypes "github.com/cosmos/ibc-go/v8/modules/core/05-port/types" - ibcexported "github.com/cosmos/ibc-go/v8/modules/core/exported" - - dextypes "github.com/neutron-org/neutron/v4/x/dex/types" - "github.com/neutron-org/neutron/v4/x/ibcswap/types" -) - -// Keeper defines the swap middleware keeper. -type Keeper struct { - cdc codec.BinaryCodec - msgServiceRouter *baseapp.MsgServiceRouter - - ics4Wrapper porttypes.ICS4Wrapper - bankKeeper types.BankKeeper -} - -// NewKeeper creates a new swap Keeper instance. -func NewKeeper( - cdc codec.BinaryCodec, - msgServiceRouter *baseapp.MsgServiceRouter, - ics4Wrapper porttypes.ICS4Wrapper, - bankKeeper types.BankKeeper, -) Keeper { - return Keeper{ - cdc: cdc, - msgServiceRouter: msgServiceRouter, - - ics4Wrapper: ics4Wrapper, - bankKeeper: bankKeeper, - } -} - -// Logger returns a module-specific logger. -func (k Keeper) Logger(ctx sdk.Context) log.Logger { - return ctx.Logger().With("module", "x/"+ibcexported.ModuleName+"-"+types.ModuleName) -} - -// Swap calls into the base app's msg service router so that the appropriate handler is called when sending the swap msg. -func (k Keeper) Swap( - ctx sdk.Context, - originalCreator string, - msg *dextypes.MsgPlaceLimitOrder, -) (*dextypes.MsgPlaceLimitOrderResponse, error) { - swapHandler := k.msgServiceRouter.Handler(msg) - if swapHandler == nil { - return nil, sdkerrors.Wrap( - types.ErrMsgHandlerInvalid, - fmt.Sprintf("could not find the handler for %T", msg), - ) - } - - res, err := swapHandler(ctx, msg) - if err != nil { - return nil, err - } - - msgSwapRes := &dextypes.MsgPlaceLimitOrderResponse{} - // TODO: replace the Data field with MsgResponses? - if err := proto.Unmarshal(res.Data, msgSwapRes); err != nil { - return nil, err - } - - amountUnused := msg.AmountIn.Sub(msgSwapRes.CoinIn.Amount) - // If not all tokenIn is swapped and a temporary creator address has been used - // return the unused portion to the original creator address - if amountUnused.IsPositive() && originalCreator != msg.Creator { - overrrideCreatorAddr := sdk.MustAccAddressFromBech32(msg.Creator) - originalCreatorAddr := sdk.MustAccAddressFromBech32(originalCreator) - unusedCoin := sdk.NewCoin(msg.TokenIn, amountUnused) - - err := k.bankKeeper.SendCoins(ctx, overrrideCreatorAddr, originalCreatorAddr, sdk.Coins{unusedCoin}) - if err != nil { - return nil, err - } - } - - return msgSwapRes, nil -} - -// SendPacket wraps IBC ChannelKeeper's SendPacket function. -func (k Keeper) SendPacket( - ctx sdk.Context, - chanCap *capabilitytypes.Capability, - sourcePort string, - sourceChannel string, - timeoutHeight clienttypes.Height, - timeoutTimestamp uint64, - data []byte, -) (sequence uint64, err error) { - return k.ics4Wrapper.SendPacket( - ctx, - chanCap, - sourcePort, - sourceChannel, - timeoutHeight, - timeoutTimestamp, - data, - ) -} - -// WriteAcknowledgement wraps IBC ChannelKeeper's WriteAcknowledgement function. -func (k Keeper) WriteAcknowledgement( - ctx sdk.Context, - chanCap *capabilitytypes.Capability, - packet ibcexported.PacketI, - acknowledgement ibcexported.Acknowledgement, -) error { - return k.ics4Wrapper.WriteAcknowledgement(ctx, chanCap, packet, acknowledgement) -} - -// RefundPacketToken handles the burning or escrow lock up of vouchers when an asset should be refunded. -// This is only used in the case where we call into the transfer modules OnRecvPacket callback but then the swap fails. -func (k Keeper) RefundPacketToken( - ctx sdk.Context, - packet channeltypes.Packet, - data transfertypes.FungibleTokenPacketData, - metadata *types.SwapMetadata, -) error { - // parse the denomination from the full denom path - trace := transfertypes.ParseDenomTrace(data.Denom) - - // parse the transfer amount - transferAmount, ok := math.NewIntFromString(data.Amount) - if !ok { - return sdkerrors.Wrapf( - transfertypes.ErrInvalidAmount, - "unable to parse transfer amount (%s) into math.Int", - data.Amount, - ) - } - token := sdk.NewCoin(trace.IBCDenom(), transferAmount) - - // decode the creator address - receiver, err := sdk.AccAddressFromBech32(metadata.Creator) - if err != nil { - return err - } - - // if the sender chain is source that means a voucher was minted on Neutron when the ics20 transfer took place - if transfertypes.SenderChainIsSource(packet.SourcePort, packet.SourceChannel, data.Denom) { - // transfer coins from user account to transfer module - err = k.bankKeeper.SendCoinsFromAccountToModule( - ctx, - receiver, - types.ModuleName, - sdk.NewCoins(token), - ) - if err != nil { - return err - } - - // burn the coins - err = k.bankKeeper.BurnCoins(ctx, types.ModuleName, sdk.NewCoins(token)) - if err != nil { - return err - } - - return nil - } - - // transfer coins from user account to escrow address - escrowAddress := transfertypes.GetEscrowAddress( - packet.GetSourcePort(), - packet.GetSourceChannel(), - ) - err = k.bankKeeper.SendCoins(ctx, receiver, escrowAddress, sdk.NewCoins(token)) - if err != nil { - return err - } - - return nil -} - -// SendCoins wraps the BankKeepers SendCoins function so it can be invoked from the middleware. -func (k Keeper) SendCoins(ctx sdk.Context, fromAddr, toAddr string, amt sdk.Coins) error { - from, err := sdk.AccAddressFromBech32(fromAddr) - if err != nil { - return err - } - - to, err := sdk.AccAddressFromBech32(toAddr) - if err != nil { - return err - } - - return k.bankKeeper.SendCoins(ctx, from, to, amt) -} - -func (k Keeper) GetAppVersion(ctx sdk.Context, portID, channelID string) (string, bool) { - return k.ics4Wrapper.GetAppVersion(ctx, portID, channelID) -} diff --git a/x/ibcswap/module.go b/x/ibcswap/module.go deleted file mode 100644 index 428ac1274..000000000 --- a/x/ibcswap/module.go +++ /dev/null @@ -1,144 +0,0 @@ -package ibcswap - -import ( - "encoding/json" - - "cosmossdk.io/core/appmodule" - - abci "github.com/cometbft/cometbft/abci/types" - "github.com/cosmos/cosmos-sdk/client" - "github.com/cosmos/cosmos-sdk/codec" - codectypes "github.com/cosmos/cosmos-sdk/codec/types" - sdk "github.com/cosmos/cosmos-sdk/types" - "github.com/cosmos/cosmos-sdk/types/module" - simtypes "github.com/cosmos/cosmos-sdk/types/simulation" - "github.com/gorilla/mux" - "github.com/grpc-ecosystem/grpc-gateway/runtime" - "github.com/spf13/cobra" - - "github.com/neutron-org/neutron/v4/x/ibcswap/keeper" - "github.com/neutron-org/neutron/v4/x/ibcswap/types" -) - -var ( - _ module.AppModuleBasic = AppModuleBasic{} - _ appmodule.AppModule = AppModule{} - _ module.AppModuleSimulation = AppModule{} -) - -// AppModuleBasic is the swap middleware AppModuleBasic. -type AppModuleBasic struct{} - -// Name implements AppModuleBasic interface. -func (AppModuleBasic) Name() string { - return types.ModuleName -} - -// RegisterLegacyAminoCodec implements AppModuleBasic interface. -func (AppModuleBasic) RegisterLegacyAminoCodec(_ *codec.LegacyAmino) {} - -// RegisterInterfaces registers module concrete types into protobuf Any. -func (AppModuleBasic) RegisterInterfaces(_ codectypes.InterfaceRegistry) {} - -// DefaultGenesis returns default genesis state as raw bytes for the swap module. -func (AppModuleBasic) DefaultGenesis(_ codec.JSONCodec) json.RawMessage { - return nil -} - -// ValidateGenesis performs genesis state validation for the swap module. -func (AppModuleBasic) ValidateGenesis( - _ codec.JSONCodec, - _ client.TxEncodingConfig, - _ json.RawMessage, -) error { - return nil -} - -// RegisterRESTRoutes implements AppModuleBasic interface. -func (AppModuleBasic) RegisterRESTRoutes(_ client.Context, _ *mux.Router) {} - -// RegisterGRPCGatewayRoutes registers the gRPC Gateway routes for the swap module. -func (AppModuleBasic) RegisterGRPCGatewayRoutes(_ client.Context, _ *runtime.ServeMux) {} - -// GetTxCmd implements AppModuleBasic interface. -func (AppModuleBasic) GetTxCmd() *cobra.Command { - return nil -} - -// GetQueryCmd implements AppModuleBasic interface. -func (AppModuleBasic) GetQueryCmd() *cobra.Command { - return nil -} - -// AppModule implements the module.AppModule -type AppModule struct { - AppModuleBasic - keeper keeper.Keeper -} - -func NewAppModule(keeper keeper.Keeper) *AppModule { - return &AppModule{ - keeper: keeper, - } -} - -// RegisterInvariants implements the AppModule interface. -func (AppModule) RegisterInvariants(_ sdk.InvariantRegistry) {} - -// QuerierRoute implements the AppModule interface. -func (AppModule) QuerierRoute() string { - return "" -} - -// RegisterServices registers module services. -func (am AppModule) RegisterServices(_ module.Configurator) {} - -// InitGenesis performs genesis initialization for the ibc-router module. It returns -// no validator updates. -func (am AppModule) InitGenesis( - _ sdk.Context, - _ codec.JSONCodec, - _ json.RawMessage, -) []abci.ValidatorUpdate { - return []abci.ValidatorUpdate{} -} - -// ExportGenesis returns the exported genesis state as raw bytes for the swap module. -func (am AppModule) ExportGenesis(_ sdk.Context, _ codec.JSONCodec) json.RawMessage { - return nil -} - -// ConsensusVersion returns the consensus state breaking version for the swap module. -func (AppModule) ConsensusVersion() uint64 { return 1 } - -// BeginBlock implements the AppModule interface. -func (am AppModule) BeginBlock(_ sdk.Context) {} - -// EndBlock implements the AppModule interface. -func (am AppModule) EndBlock(_ sdk.Context) []abci.ValidatorUpdate { - return []abci.ValidatorUpdate{} -} - -// GenerateGenesisState implements the AppModuleSimulation interface. -func (AppModule) GenerateGenesisState(_ *module.SimulationState) {} - -// ProposalContents implements the AppModuleSimulation interface. -func (AppModule) ProposalContents(_ module.SimulationState) []simtypes.WeightedProposalMsg { - return nil -} - -// RegisterStoreDecoder implements the AppModuleSimulation interface. -func (am AppModule) RegisterStoreDecoder(_ simtypes.StoreDecoderRegistry) {} - -// WeightedOperations implements the AppModuleSimulation interface. -func (am AppModule) WeightedOperations(_ module.SimulationState) []simtypes.WeightedOperation { - return nil -} - -// IsOnePerModuleType implements the depinject.OnePerModuleType interface. -func (am AppModule) IsOnePerModuleType() { // marker -} - -// IsAppModule implements the appmodule.AppModule interface. -func (am AppModule) IsAppModule() { // marker -} diff --git a/x/ibcswap/types/errors.go b/x/ibcswap/types/errors.go deleted file mode 100644 index 819589ae8..000000000 --- a/x/ibcswap/types/errors.go +++ /dev/null @@ -1,9 +0,0 @@ -package types - -import sdkerrors "cosmossdk.io/errors" - -var ( - ErrInvalidSwapMetadata = sdkerrors.Register(ModuleName, 2, "invalid swap metadata") - ErrSwapFailed = sdkerrors.Register(ModuleName, 3, "ibc swap failed") - ErrMsgHandlerInvalid = sdkerrors.Register(ModuleName, 4, "msg service handler not found") -) diff --git a/x/ibcswap/types/expected_keepers.go b/x/ibcswap/types/expected_keepers.go deleted file mode 100644 index 705463d97..000000000 --- a/x/ibcswap/types/expected_keepers.go +++ /dev/null @@ -1,14 +0,0 @@ -package types - -import ( - "context" - - sdk "github.com/cosmos/cosmos-sdk/types" -) - -// BankKeeper defines the expected interface that the swap middleware needs in order to facilitate refunds. -type BankKeeper interface { - SendCoins(ctx context.Context, fromAddr, toAddr sdk.AccAddress, amt sdk.Coins) error - BurnCoins(ctx context.Context, moduleName string, amt sdk.Coins) error - SendCoinsFromAccountToModule(ctx context.Context, senderAddr sdk.AccAddress, recipientModule string, amt sdk.Coins) error -} diff --git a/x/ibcswap/types/keys.go b/x/ibcswap/types/keys.go deleted file mode 100644 index 9a88fc669..000000000 --- a/x/ibcswap/types/keys.go +++ /dev/null @@ -1,8 +0,0 @@ -package types - -// ModuleName defines the name for the swap middleware. -const ModuleName = "swap-middleware" - -// ProcessedKey is used to signal to the swap middleware that a packet has already been processed by some other -// middleware and so invoking the transfer modules OnRecvPacket callback should be avoided. -type ProcessedKey struct{} diff --git a/x/ibcswap/types/swap.go b/x/ibcswap/types/swap.go deleted file mode 100644 index 9f5013a49..000000000 --- a/x/ibcswap/types/swap.go +++ /dev/null @@ -1,114 +0,0 @@ -package types - -import ( - "encoding/json" - - sdkerrors "cosmossdk.io/errors" - sdk "github.com/cosmos/cosmos-sdk/types" - "github.com/iancoleman/orderedmap" - - dextypes "github.com/neutron-org/neutron/v4/x/dex/types" -) - -// PacketMetadata wraps the SwapMetadata. The root key in the incoming ICS20 transfer packet's memo needs to be set to the same -// value as the json tag in order for the swap middleware to process the swap. -type PacketMetadata struct { - Swap *SwapMetadata `json:"swap"` -} - -// SwapMetadata defines the parameters necessary to perform a swap utilizing the memo field from an incoming ICS20 -// transfer packet. The next field is a string so that you can nest any arbitrary metadata to be handled -// further in the middleware stack or on the counterparty. -type SwapMetadata struct { - *dextypes.MsgPlaceLimitOrder - // If a value is provided for NeutronRefundAddress and the swap fails the Transfer.Amount will be moved to this address for later recovery. - // If no NeutronRefundAddress is provided and a swap fails we will fail the ibc transfer and tokens will be refunded on the source chain. - NeutronRefundAddress string `json:"refund-address,omitempty"` - - // Using JSONObject so that objects for next property will not be mutated by golang's lexicographic key sort on map keys during Marshal. - // Supports primitives for Unmarshal/Marshal so that an escaped JSON-marshaled string is also valid. - Next *JSONObject `json:"next,omitempty"` -} - -// Validate ensures that all the required fields are present in the SwapMetadata and contain valid values. -func (sm SwapMetadata) Validate() error { - if err := sm.MsgPlaceLimitOrder.Validate(); err != nil { - return sdkerrors.Wrap(ErrInvalidSwapMetadata, err.Error()) - } - if sm.TokenIn == "" { - return sdkerrors.Wrap(ErrInvalidSwapMetadata, "limit order tokenIn cannot be an empty string") - } - if sm.TokenOut == "" { - return sdkerrors.Wrap(ErrInvalidSwapMetadata, "limit order tokenOut cannot be an empty string") - } - if sm.NeutronRefundAddress != "" { - _, err := sdk.AccAddressFromBech32(sm.NeutronRefundAddress) - if err != nil { - return sdkerrors.Wrapf(dextypes.ErrInvalidAddress, "%s is not a valid Neutron address", sm.NeutronRefundAddress) - } - } - - if !sm.OrderType.IsFoK() { - return sdkerrors.Wrap(ErrInvalidSwapMetadata, "Limit Order type must be FILL_OR_KILL") - } - - return nil -} - -// ContainsPFM checks if the Swapetadata is wrapping packet-forward-middleware -func (sm SwapMetadata) ContainsPFM() bool { - if sm.Next == nil { - return false - } - forward, _ := sm.Next.orderedMap.Get("forward") - - return forward != nil -} - -// JSONObject is a wrapper type to allow either a primitive type or a JSON object. -// In the case the value is a JSON object, OrderedMap type is used so that key order -// is retained across Unmarshal/Marshal. -type JSONObject struct { - obj bool - primitive []byte - orderedMap orderedmap.OrderedMap -} - -// NewJSONObject is a constructor used for tests. -// The usage of JSONObject in the middleware is only json Marshal/Unmarshal -func NewJSONObject(object bool, primitive []byte, orderedMap orderedmap.OrderedMap) *JSONObject { - return &JSONObject{ - obj: object, - primitive: primitive, - orderedMap: orderedMap, - } -} - -// UnmarshalJSON overrides the default json.Unmarshal behavior -func (o *JSONObject) UnmarshalJSON(b []byte) error { - if err := o.orderedMap.UnmarshalJSON(b); err != nil { - // If ordered map unmarshal fails, this is a primitive value - o.obj = false - // Attempt to unmarshal as string, this removes extra JSON escaping - var primitiveStr string - if err := json.Unmarshal(b, &primitiveStr); err != nil { - o.primitive = b - return nil - } - o.primitive = []byte(primitiveStr) - return nil - } - // This is a JSON object, now stored as an ordered map to retain key order. - o.obj = true - return nil -} - -// MarshalJSON overrides the default json.Marshal behavior -func (o *JSONObject) MarshalJSON() ([]byte, error) { - if o.obj { - // non-primitive, return marshaled ordered map. - return o.orderedMap.MarshalJSON() - } - // primitive, return raw bytes. - return o.primitive, nil -} diff --git a/x/ibcswap/types/swap_test.go b/x/ibcswap/types/swap_test.go deleted file mode 100644 index 50bc06772..000000000 --- a/x/ibcswap/types/swap_test.go +++ /dev/null @@ -1,248 +0,0 @@ -package types_test - -import ( - "encoding/json" - "testing" - - "github.com/neutron-org/neutron/v4/app/config" - - "cosmossdk.io/math" - pfmtypes "github.com/cosmos/ibc-apps/middleware/packet-forward-middleware/v8/packetforward/types" - "github.com/iancoleman/orderedmap" - "github.com/stretchr/testify/require" - - "github.com/neutron-org/neutron/v4/testutil/common/sample" - "github.com/neutron-org/neutron/v4/x/dex/types" - dextypes "github.com/neutron-org/neutron/v4/x/ibcswap/types" -) - -func init() { - _ = config.GetDefaultConfig() -} - -// TestPacketMetadata_Marshal asserts that the marshaling of the swap metadata works as intended. -func TestPacketMetadata_Marshal(t *testing.T) { - pm := dextypes.PacketMetadata{ - &dextypes.SwapMetadata{ - MsgPlaceLimitOrder: &types.MsgPlaceLimitOrder{ - Creator: "test-1", - Receiver: "test-1", - TokenIn: "token-a", - TokenOut: "token-b", - AmountIn: math.NewInt(123), - TickIndexInToOut: 0, - OrderType: types.LimitOrderType_FILL_OR_KILL, - }, - Next: nil, - }, - } - _, err := json.Marshal(pm) - require.NoError(t, err) -} - -// TestPacketMetadata_MarshalWithNext asserts that the marshaling of the swap metadata works as intended with next field initialized. -func TestPacketMetadata_MarshalWithNext(t *testing.T) { - forwardMedata := &pfmtypes.PacketMetadata{ - Forward: &pfmtypes.ForwardMetadata{ - Receiver: "cosmos14zde8usc4ur04y3aqnufzzmv2uqdpwwttr5uwv", - Port: "transfer", - Channel: "channel-0", - Timeout: 0, - Retries: nil, - Next: nil, - }, - } - nextBz, err := json.Marshal(forwardMedata) - require.NoError(t, err) - - pm := dextypes.PacketMetadata{ - &dextypes.SwapMetadata{ - MsgPlaceLimitOrder: &types.MsgPlaceLimitOrder{ - Creator: "test-1", - Receiver: "test-1", - TokenIn: "token-a", - TokenOut: "token-b", - TickIndexInToOut: 0, - AmountIn: math.NewInt(123), - OrderType: types.LimitOrderType_FILL_OR_KILL, - // MaxAmountOut: math.NewInt(456), - }, - Next: dextypes.NewJSONObject(false, nextBz, orderedmap.OrderedMap{}), - }, - } - _, err = json.Marshal(pm) - require.NoError(t, err) -} - -// TestPacketMetadata_Unmarshal asserts that unmarshaling works as intended. -func TestPacketMetadata_Unmarshal(t *testing.T) { - metadata := "{\n \"swap\": {\n \"creator\": \"test-1\",\n \"TickIndexInToOut\": 0,\n \"orderType\": 1,\n \"receiver\": \"test-1\",\n \"tokenIn\": \"token-a\",\n \"tokenOut\": \"token-b\",\n \"AmountIn\": \"123\",\n \"next\": \"\"\n }\n}" - pm := &dextypes.PacketMetadata{} - err := json.Unmarshal([]byte(metadata), pm) - require.NoError(t, err) -} - -// TestPacketMetadata_UnmarshalStringNext asserts that unmarshaling works as intended when next is escaped json string. -func TestPacketMetadata_UnmarshalStringNext(t *testing.T) { - metadata := "{\n \"swap\": {\n \"creator\": \"test-1\",\n \"receiver\": \"test-1\",\n \"tokenIn\": \"token-a\",\n \"tokenOut\": \"token-b\",\n \"AmountIn\": \"123\",\n \"TickIndexInToOut\": 0,\n \"orderType\": 1,\n \"next\": \" {\\\"forward\\\":{\\\"receiver\\\":\\\"cosmos1f4cur2krsua2th9kkp7n0zje4stea4p9tu70u8\\\",\\\"port\\\":\\\"transfer\\\",\\\"channel\\\":\\\"channel-0\\\",\\\"timeout\\\":0,\\\"next\\\":{\\\"forward\\\":{\\\"receiver\\\":\\\"cosmos1l505zhahp24v5jsmps9vs5asah759fdce06sfp\\\",\\\"port\\\":\\\"transfer\\\",\\\"channel\\\":\\\"channel-0\\\",\\\"timeout\\\":0}}}}\"\n }\n}" - pm := &dextypes.PacketMetadata{} - err := json.Unmarshal([]byte(metadata), pm) - require.NoError(t, err) -} - -// TestPacketMetadata_UnmarshalJSONNext asserts that unmarshaling works as intended when next is a raw json object. -func TestPacketMetadata_UnmarshalJSONNext(t *testing.T) { - metadata := "{\"swap\":{\"creator\":\"test-1\",\"receiver\":\"test-1\",\"tokenIn\":\"token-a\",\"tokenOut\":\"token-b\",\"AmountIn\":\"123\",\"TickIndexInToOut\":0, \"orderType\": 1, \"tokenIn\":\"token-in\",\"next\":{\"forward\":{\"receiver\":\"cosmos14zde8usc4ur04y3aqnufzzmv2uqdpwwttr5uwv\",\"port\":\"transfer\",\"channel\":\"channel-0\"}}}}" - pm := &dextypes.PacketMetadata{} - err := json.Unmarshal([]byte(metadata), pm) - require.NoError(t, err) -} - -func TestSwapMetadata_ValidatePass(t *testing.T) { - pm := dextypes.PacketMetadata{ - &dextypes.SwapMetadata{ - MsgPlaceLimitOrder: &types.MsgPlaceLimitOrder{ - Creator: sample.AccAddress(), - Receiver: sample.AccAddress(), - TokenIn: "token-a", - TokenOut: "token-b", - AmountIn: math.NewInt(123), - TickIndexInToOut: 0, - OrderType: types.LimitOrderType_FILL_OR_KILL, - }, - Next: nil, - }, - } - _, err := json.Marshal(pm) - require.NoError(t, err) - - require.NoError(t, pm.Swap.Validate()) -} - -func TestSwapMetadata_ValidateFail(t *testing.T) { - pm := dextypes.PacketMetadata{ - &dextypes.SwapMetadata{ - MsgPlaceLimitOrder: &types.MsgPlaceLimitOrder{ - Creator: "", - Receiver: "test-1", - TokenIn: "token-a", - TokenOut: "token-b", - AmountIn: math.NewInt(123), - TickIndexInToOut: 0, - OrderType: types.LimitOrderType_FILL_OR_KILL, - }, - Next: nil, - }, - } - _, err := json.Marshal(pm) - require.NoError(t, err) - require.Error(t, pm.Swap.Validate()) - - pm = dextypes.PacketMetadata{ - &dextypes.SwapMetadata{ - MsgPlaceLimitOrder: &types.MsgPlaceLimitOrder{ - Creator: "creator", - Receiver: "", - TokenIn: "token-a", - TokenOut: "token-b", - AmountIn: math.NewInt(123), - TickIndexInToOut: 0, - OrderType: types.LimitOrderType_FILL_OR_KILL, - }, - Next: nil, - }, - } - _, err = json.Marshal(pm) - require.NoError(t, err) - require.Error(t, pm.Swap.Validate()) - - pm = dextypes.PacketMetadata{ - &dextypes.SwapMetadata{ - MsgPlaceLimitOrder: &types.MsgPlaceLimitOrder{ - Creator: "creator", - Receiver: "test-1", - TokenIn: "", - TokenOut: "token-b", - AmountIn: math.NewInt(123), - TickIndexInToOut: 0, - OrderType: types.LimitOrderType_FILL_OR_KILL, - }, - Next: nil, - }, - } - _, err = json.Marshal(pm) - require.NoError(t, err) - require.Error(t, pm.Swap.Validate()) - - pm = dextypes.PacketMetadata{ - &dextypes.SwapMetadata{ - MsgPlaceLimitOrder: &types.MsgPlaceLimitOrder{ - Creator: "creator", - Receiver: "receiver", - TokenIn: "token-a", - TokenOut: "", - AmountIn: math.NewInt(123), - TickIndexInToOut: 0, - OrderType: types.LimitOrderType_FILL_OR_KILL, - }, - Next: nil, - }, - } - _, err = json.Marshal(pm) - require.NoError(t, err) - require.Error(t, pm.Swap.Validate()) - - pm = dextypes.PacketMetadata{ - &dextypes.SwapMetadata{ - MsgPlaceLimitOrder: &types.MsgPlaceLimitOrder{ - Creator: "creator", - Receiver: "receiver", - TokenIn: "token-a", - TokenOut: "token-b", - AmountIn: math.NewInt(0), - TickIndexInToOut: 0, - OrderType: types.LimitOrderType_FILL_OR_KILL, - }, - Next: nil, - }, - } - _, err = json.Marshal(pm) - require.NoError(t, err) - require.Error(t, pm.Swap.Validate()) - - pm = dextypes.PacketMetadata{ - &dextypes.SwapMetadata{ - MsgPlaceLimitOrder: &types.MsgPlaceLimitOrder{ - Creator: "creator", - Receiver: "receiver", - TokenIn: "token-a", - TokenOut: "token-b", - AmountIn: math.NewInt(-1), - TickIndexInToOut: 0, - OrderType: types.LimitOrderType_FILL_OR_KILL, - }, - Next: nil, - }, - } - _, err = json.Marshal(pm) - require.NoError(t, err) - require.Error(t, pm.Swap.Validate()) - - pm = dextypes.PacketMetadata{ - &dextypes.SwapMetadata{ - MsgPlaceLimitOrder: &types.MsgPlaceLimitOrder{ - Creator: "creator", - Receiver: "receiver", - TokenIn: "token-a", - TokenOut: "token-b", - AmountIn: math.NewInt(123), - TickIndexInToOut: 0, - OrderType: types.LimitOrderType_GOOD_TIL_CANCELLED, - }, - Next: nil, - }, - } - _, err = json.Marshal(pm) - require.NoError(t, err) - require.Error(t, pm.Swap.Validate()) -}