diff --git a/.mockery.yaml b/.mockery.yaml new file mode 100644 index 00000000000..7948ea60520 --- /dev/null +++ b/.mockery.yaml @@ -0,0 +1,12 @@ +quiet: False +disable-version-string: True +with-expecter: True +mockname: "{{.InterfaceName}}" +filename: "{{.MockName}}.go" +dir: "{{ .InterfaceDirRelative }}/mocks" +outpkg: mocks +packages: + github.com/cosmos/gaia/v22/x/lsm/types: + config: + recursive: True + include-regex: ".*Keeper" diff --git a/Makefile b/Makefile index e21967e1fcc..2cd4903c40b 100644 --- a/Makefile +++ b/Makefile @@ -273,6 +273,13 @@ PACKAGES_E2E=$(shell cd tests/e2e && go list ./... | grep '/e2e') TEST_PACKAGES=./... TEST_TARGETS := test-unit test-unit-cover test-race test-e2e +mocks: gen-mocks format + +gen-mocks: + @echo "--> generating mocks" + @go install github.com/vektra/mockery/v2 + @go run github.com/vektra/mockery/v2 + test-unit: ARGS=-timeout=5m -tags='norace' test-unit: TEST_PACKAGES=$(PACKAGES_UNIT) test-unit-cover: ARGS=-timeout=5m -tags='norace' -coverprofile=coverage.txt -covermode=atomic diff --git a/app/keepers/keepers.go b/app/keepers/keepers.go index 1b07afe9ac2..030539d27b2 100644 --- a/app/keepers/keepers.go +++ b/app/keepers/keepers.go @@ -82,6 +82,9 @@ import ( "github.com/CosmWasm/wasmd/x/wasm" wasmkeeper "github.com/CosmWasm/wasmd/x/wasm/keeper" wasmtypes "github.com/CosmWasm/wasmd/x/wasm/types" + + lsmkeeper "github.com/cosmos/gaia/v22/x/lsm/keeper" + lsmtypes "github.com/cosmos/gaia/v22/x/lsm/types" ) type AppKeepers struct { @@ -98,6 +101,7 @@ type AppKeepers struct { SlashingKeeper slashingkeeper.Keeper MintKeeper mintkeeper.Keeper DistrKeeper distrkeeper.Keeper + LsmKeeper *lsmkeeper.Keeper GovKeeper *govkeeper.Keeper CrisisKeeper *crisiskeeper.Keeper UpgradeKeeper *upgradekeeper.Keeper @@ -282,6 +286,16 @@ func NewAppKeeper( ), ) + appKeepers.LsmKeeper = lsmkeeper.NewKeeper( + appCodec, + runtime.NewKVStoreService(appKeepers.keys[lsmtypes.StoreKey]), + appKeepers.AccountKeeper, + appKeepers.BankKeeper, + appKeepers.StakingKeeper, + appKeepers.DistrKeeper, + authtypes.NewModuleAddress(govtypes.ModuleName).String(), + ) + appKeepers.FeeMarketKeeper = feemarketkeeper.NewKeeper( appCodec, appKeepers.keys[feemarkettypes.StoreKey], diff --git a/app/keepers/keys.go b/app/keepers/keys.go index e4cf9fe31fb..741669bcf34 100644 --- a/app/keepers/keys.go +++ b/app/keepers/keys.go @@ -31,6 +31,8 @@ import ( stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types" wasmtypes "github.com/CosmWasm/wasmd/x/wasm/types" + + lsmtypes "github.com/cosmos/gaia/v22/x/lsm/types" ) func (appKeepers *AppKeepers) GenerateKeys() { @@ -62,6 +64,7 @@ func (appKeepers *AppKeepers) GenerateKeys() { consensusparamtypes.StoreKey, feemarkettypes.StoreKey, wasmtypes.StoreKey, + lsmtypes.StoreKey, ) // Define transient store keys diff --git a/app/modules.go b/app/modules.go index 59015ba9f7a..f7c32657613 100644 --- a/app/modules.go +++ b/app/modules.go @@ -62,6 +62,7 @@ import ( wasm "github.com/CosmWasm/wasmd/x/wasm" wasmtypes "github.com/CosmWasm/wasmd/x/wasm/types" + "github.com/cosmos/gaia/v22/x/lsm" "github.com/cosmos/gaia/v22/x/metaprotocols" metaprotocolstypes "github.com/cosmos/gaia/v22/x/metaprotocols/types" ) @@ -167,6 +168,7 @@ func simulationModules( authzmodule.NewAppModule(appCodec, app.AuthzKeeper, app.AccountKeeper, app.BankKeeper, app.interfaceRegistry), wasm.NewAppModule(appCodec, &app.AppKeepers.WasmKeeper, app.AppKeepers.StakingKeeper, app.AppKeepers.AccountKeeper, app.AppKeepers.BankKeeper, app.MsgServiceRouter(), app.GetSubspace(wasmtypes.ModuleName)), ibc.NewAppModule(app.IBCKeeper), + lsm.NewAppModule(appCodec, app.LsmKeeper, app.AccountKeeper, app.BankKeeper, app.StakingKeeper), app.TransferModule, app.ICAModule, } diff --git a/go.mod b/go.mod index 840cf9ced01..4d534a14d96 100644 --- a/go.mod +++ b/go.mod @@ -23,6 +23,7 @@ require ( github.com/cometbft/cometbft v0.38.15 github.com/cometbft/cometbft-db v0.14.1 github.com/cosmos/cosmos-db v1.1.0 + github.com/cosmos/cosmos-proto v1.0.0-beta.5 github.com/cosmos/cosmos-sdk v0.50.10 github.com/cosmos/go-bip39 v1.0.0 github.com/cosmos/gogoproto v1.7.0 @@ -31,6 +32,7 @@ require ( github.com/cosmos/ibc-go/modules/capability v1.0.1 github.com/cosmos/ibc-go/v8 v8.5.2 github.com/cosmos/interchain-security/v6 v6.0.0-20241209121224-cabebfa1c816 + github.com/golang/mock v1.6.0 github.com/google/gofuzz v1.2.0 github.com/gorilla/mux v1.8.1 github.com/ory/dockertest/v3 v3.11.0 @@ -42,6 +44,9 @@ require ( github.com/spf13/pflag v1.0.5 github.com/spf13/viper v1.19.0 github.com/stretchr/testify v1.10.0 + github.com/vektra/mockery/v2 v2.43.2 + google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1 + google.golang.org/protobuf v1.35.2 ) require ( @@ -49,10 +54,10 @@ require ( cloud.google.com/go/compute/metadata v0.5.0 // indirect cloud.google.com/go/iam v1.1.9 // indirect cloud.google.com/go/storage v1.41.0 // indirect - github.com/golang/protobuf v1.5.4 // indirect + github.com/golang/protobuf v1.5.4 github.com/grpc-ecosystem/grpc-gateway v1.16.0 google.golang.org/genproto v0.0.0-20240701130421-f6361c86f094 // indirect - google.golang.org/grpc v1.68.0 // indirect + google.golang.org/grpc v1.68.0 ) require ( @@ -81,6 +86,7 @@ require ( github.com/bytedance/sonic/loader v0.2.0 // indirect github.com/cenkalti/backoff/v4 v4.3.0 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect + github.com/chigopher/pathlib v0.19.1 // indirect github.com/chzyer/readline v1.5.1 // indirect github.com/cloudwego/base64x v0.1.4 // indirect github.com/cloudwego/iasm v0.2.0 // indirect @@ -94,7 +100,6 @@ require ( github.com/coinbase/rosetta-sdk-go v0.7.9 // indirect github.com/containerd/continuity v0.4.3 // indirect github.com/cosmos/btcutil v1.0.5 // indirect - github.com/cosmos/cosmos-proto v1.0.0-beta.5 // indirect github.com/cosmos/gogogateway v1.2.0 // indirect github.com/cosmos/iavl v1.2.0 // indirect github.com/cosmos/ics23/go v0.11.0 // indirect @@ -130,7 +135,6 @@ require ( github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/glog v1.2.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect - github.com/golang/mock v1.6.0 // indirect github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb // indirect github.com/google/btree v1.1.3 // indirect github.com/google/flatbuffers v1.12.1 // indirect @@ -159,10 +163,12 @@ require ( github.com/hashicorp/yamux v0.1.1 // indirect github.com/hdevalence/ed25519consensus v0.1.0 // indirect github.com/huandu/skiplist v1.2.0 // indirect + github.com/huandu/xstrings v1.4.0 // indirect github.com/iancoleman/orderedmap v0.3.0 // indirect github.com/iancoleman/strcase v0.3.0 // indirect github.com/improbable-eng/grpc-web v0.15.0 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect + github.com/jinzhu/copier v0.3.5 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/jmhodges/levigo v1.0.0 // indirect github.com/klauspost/compress v1.17.9 // indirect @@ -207,6 +213,7 @@ require ( github.com/sirupsen/logrus v1.9.3 // indirect github.com/sourcegraph/conc v0.3.0 // indirect github.com/spf13/afero v1.11.0 // indirect + github.com/stretchr/objx v0.5.2 // indirect github.com/subosito/gotenv v1.6.0 // indirect github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d // indirect github.com/tendermint/go-amino v0.16.0 // indirect @@ -230,6 +237,7 @@ require ( golang.org/x/arch v0.0.0-20210923205945-b76863e36670 // indirect golang.org/x/crypto v0.28.0 // indirect golang.org/x/exp v0.0.0-20240613232115-7f521ea00fb8 // indirect + golang.org/x/mod v0.22.0 // indirect golang.org/x/net v0.30.0 // indirect golang.org/x/oauth2 v0.23.0 // indirect golang.org/x/sync v0.8.0 // indirect @@ -237,10 +245,9 @@ require ( golang.org/x/term v0.25.0 // indirect golang.org/x/text v0.19.0 // indirect golang.org/x/time v0.5.0 // indirect + golang.org/x/tools v0.24.0 // indirect google.golang.org/api v0.186.0 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 // indirect - google.golang.org/protobuf v1.35.2 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect diff --git a/go.sum b/go.sum index f82d546bc1d..2d831ab378a 100644 --- a/go.sum +++ b/go.sum @@ -360,6 +360,8 @@ github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XL github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cheggaaa/pb v1.0.27/go.mod h1:pQciLPpbU0oxA0h+VJYYLxO+XeDQb5pZijXscXHm81s= +github.com/chigopher/pathlib v0.19.1 h1:RoLlUJc0CqBGwq239cilyhxPNLXTK+HXoASGyGznx5A= +github.com/chigopher/pathlib v0.19.1/go.mod h1:tzC1dZLW8o33UQpWkNkhvPwL5n4yyFRFm/jL1YGWFvY= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/logex v1.2.1 h1:XHDu3E6q+gdHgsdTPH6ImJMIp436vR6MPtH8gP05QzM= github.com/chzyer/logex v1.2.1/go.mod h1:JLbx6lG2kDbNRFnfkgvh4eRJRPX1QCoOIWomwysCBrQ= @@ -860,6 +862,8 @@ github.com/huandu/go-assert v1.1.5 h1:fjemmA7sSfYHJD7CUqs9qTwwfdNAx7/j2/ZlHXzNB3 github.com/huandu/go-assert v1.1.5/go.mod h1:yOLvuqZwmcHIC5rIzrBhT7D3Q9c3GFnd0JrPVhn/06U= github.com/huandu/skiplist v1.2.0 h1:gox56QD77HzSC0w+Ws3MH3iie755GBJU1OER3h5VsYw= github.com/huandu/skiplist v1.2.0/go.mod h1:7v3iFjLcSAzO4fN5B8dvebvo/qsfumiLiDXMrPiHF9w= +github.com/huandu/xstrings v1.4.0 h1:D17IlohoQq4UcpqD7fDk80P7l+lwAmlFaBHgOipl2FU= +github.com/huandu/xstrings v1.4.0/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= github.com/huin/goupnp v1.0.3-0.20220313090229-ca81a64b4204/go.mod h1:ZxNlw5WqJj6wSsRK5+YfflQGXYfccj5VgQsMNixHM7Y= github.com/huin/goutil v0.0.0-20170803182201-1ca381bf3150/go.mod h1:PpLOETDnJ0o3iZrZfqZzyLl6l7F3c6L1oWn7OICBi6o= @@ -894,6 +898,8 @@ github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jhump/protoreflect v1.15.3 h1:6SFRuqU45u9hIZPJAoZ8c28T3nK64BNdp9w6jFonzls= github.com/jhump/protoreflect v1.15.3/go.mod h1:4ORHmSBmlCW8fh3xHmJMGyul1zNqZK4Elxc8qKP+p1k= +github.com/jinzhu/copier v0.3.5 h1:GlvfUwHk62RokgqVNvYsku0TATCF7bAHVwEXoBh3iJg= +github.com/jinzhu/copier v0.3.5/go.mod h1:DfbEm0FYsaqBcKcFuvmOZb218JkPGtvSHsKg8S8hyyg= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= @@ -1302,6 +1308,8 @@ github.com/urfave/cli/v2 v2.3.0/go.mod h1:LJmUH05zAU44vOAcrfzZQKsZbVcdbOG8rtL3/X github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= github.com/valyala/fasttemplate v1.0.1/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8= github.com/valyala/fasttemplate v1.2.1/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ= +github.com/vektra/mockery/v2 v2.43.2 h1:OdivAsQL/uoQ55UnTt25tliRI8kaj5j6caHk9xaAUD0= +github.com/vektra/mockery/v2 v2.43.2/go.mod h1:XNTE9RIu3deGAGQRVjP1VZxGpQNm0YedZx4oDs3prr8= github.com/vmihailenco/msgpack/v5 v5.3.5/go.mod h1:7xyJ9e+0+9SaZT0Wt1RGleJXzli6Q/V5KbhBonMG9jc= github.com/vmihailenco/tagparser/v2 v2.0.0/go.mod h1:Wri+At7QHww0WTrCBeu4J6bNtoV6mEfg5OIWRZA9qds= github.com/willf/bitset v1.1.3/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= @@ -1441,6 +1449,8 @@ golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.22.0 h1:D4nJWe9zXqHOmWqj4VMOJhvzj7bEZg4wEYa759z1pH4= +golang.org/x/mod v0.22.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= golang.org/x/net v0.0.0-20180719180050-a680a1efc54d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -1752,6 +1762,8 @@ golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.8/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.24.0 h1:J1shsA93PJUEVaUSaay7UXAyE8aimq3GW0pjlolpa24= +golang.org/x/tools v0.24.0/go.mod h1:YhNqVBIfWHdzvTLs0d8LCuMhkKUgSUKldakyV7W/WDQ= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/proto/gaia/lsm/module/v1/module.proto b/proto/gaia/lsm/module/v1/module.proto new file mode 100644 index 00000000000..f7b5ce9cbe3 --- /dev/null +++ b/proto/gaia/lsm/module/v1/module.proto @@ -0,0 +1,22 @@ +syntax = "proto3"; + +package gaia.lsm.module.v1; + +import "cosmos/app/v1alpha1/module.proto"; + +// Module is the config object of the staking module. +message Module { + option (cosmos.app.v1alpha1.module) = { + go_import : "github.com/cosmos/gaia/x/lsm" + }; + + // authority defines the custom module authority. If not set, defaults to the + // governance module. + string authority = 1; + + // bech32_prefix_validator is the bech32 validator prefix for the app. + string bech32_prefix_validator = 2; + + // bech32_prefix_consensus is the bech32 consensus node prefix for the app. + string bech32_prefix_consensus = 3; +} diff --git a/proto/gaia/lsm/v1beta1/genesis.proto b/proto/gaia/lsm/v1beta1/genesis.proto new file mode 100644 index 00000000000..95913e778b9 --- /dev/null +++ b/proto/gaia/lsm/v1beta1/genesis.proto @@ -0,0 +1,48 @@ +syntax = "proto3"; +package gaia.lsm.v1beta1; + +option go_package = "github.com/cosmos/gaia/x/lsm/types"; + +import "gogoproto/gogo.proto"; +import "gaia/lsm/v1beta1/lsm.proto"; +import "amino/amino.proto"; +import "google/protobuf/timestamp.proto"; + +// GenesisState defines the lsm module's genesis state. +message GenesisState { + // params defines all the parameters of related to deposit. + Params params = 1 + [ (gogoproto.nullable) = false, (amino.dont_omitempty) = true ]; + + // store tokenize share records to provide reward to record owners + repeated TokenizeShareRecord tokenize_share_records = 9 + [ (gogoproto.nullable) = false ]; + + // last tokenize share record id, used for next share record id calculation + uint64 last_tokenize_share_record_id = 10; + + // total number of liquid staked tokens at genesis + bytes total_liquid_staked_tokens = 11 [ + (gogoproto.customtype) = "cosmossdk.io/math.Int", + (gogoproto.moretags) = "yaml:\"total_liquid_staked_tokens\"", + (gogoproto.nullable) = false + ]; + + // tokenize shares locks at genesis + repeated TokenizeShareLock tokenize_share_locks = 12 + [ (gogoproto.nullable) = false ]; +} + +// TokenizeSharesLock required for specifying account locks at genesis +message TokenizeShareLock { + // Address of the account that is locked + string address = 1; + // Status of the lock (LOCKED or LOCK_EXPIRING) + string status = 2; + // Completion time if the lock is expiring + google.protobuf.Timestamp completion_time = 3 [ + (gogoproto.nullable) = false, + (gogoproto.stdtime) = true, + (gogoproto.moretags) = "yaml:\"completion_time\"" + ]; +} diff --git a/proto/gaia/lsm/v1beta1/lsm.proto b/proto/gaia/lsm/v1beta1/lsm.proto new file mode 100644 index 00000000000..17398d6c9ab --- /dev/null +++ b/proto/gaia/lsm/v1beta1/lsm.proto @@ -0,0 +1,113 @@ +syntax = "proto3"; +package gaia.lsm.v1beta1; + +import "gogoproto/gogo.proto"; + +import "amino/amino.proto"; +import "cosmos/base/v1beta1/coin.proto"; +import "cosmos_proto/cosmos.proto"; + +option go_package = "github.com/cosmos/gaia/x/lsm/types"; + +// Params defines the parameters for the x/lsm module. +message Params { + option (amino.name) = "gaia/x/lsm/Params"; + option (gogoproto.equal) = true; + + // validator_bond_factor is required as a safety check for tokenizing shares + // and delegations from liquid staking providers + string validator_bond_factor = 7 [ + (gogoproto.moretags) = "yaml:\"validator_bond_factor\"", + (gogoproto.customtype) = "cosmossdk.io/math.LegacyDec", + (gogoproto.nullable) = false, + (amino.dont_omitempty) = true, + (cosmos_proto.scalar) = "cosmos.Dec" + ]; + // global_liquid_staking_cap represents a cap on the portion of stake that + // comes from liquid staking providers + string global_liquid_staking_cap = 8 [ + (gogoproto.moretags) = "yaml:\"global_liquid_staking_cap\"", + (gogoproto.customtype) = "cosmossdk.io/math.LegacyDec", + (gogoproto.nullable) = false, + (amino.dont_omitempty) = true, + (cosmos_proto.scalar) = "cosmos.Dec" + ]; + // validator_liquid_staking_cap represents a cap on the portion of stake that + // comes from liquid staking providers for a specific validator + string validator_liquid_staking_cap = 9 [ + (gogoproto.moretags) = "yaml:\"validator_liquid_staking_cap\"", + (gogoproto.customtype) = "cosmossdk.io/math.LegacyDec", + (gogoproto.nullable) = false, + (amino.dont_omitempty) = true, + (cosmos_proto.scalar) = "cosmos.Dec" + ]; +} + +// TokenizeShareRecord represents a tokenized delegation +message TokenizeShareRecord { + option (gogoproto.equal) = true; + + uint64 id = 1; + string owner = 2; + string module_account = 3; // module account take the role of delegator + string validator = + 4; // validator delegated to for tokenize share record creation +} + +// PendingTokenizeShareAuthorizations stores a list of addresses that have their +// tokenize share enablement in progress +message PendingTokenizeShareAuthorizations { repeated string addresses = 1; } + +// TokenizeShareLockStatus indicates whether the address is able to tokenize +// shares +enum TokenizeShareLockStatus { + option (gogoproto.goproto_enum_prefix) = false; + + // UNSPECIFIED defines an empty tokenize share lock status + TOKENIZE_SHARE_LOCK_STATUS_UNSPECIFIED = 0; + // LOCKED indicates the account is locked and cannot tokenize shares + TOKENIZE_SHARE_LOCK_STATUS_LOCKED = 1; + // UNLOCKED indicates the account is unlocked and can tokenize shares + TOKENIZE_SHARE_LOCK_STATUS_UNLOCKED = 2; + // LOCK_EXPIRING indicates the account is unable to tokenize shares, but + // will be able to tokenize shortly (after 1 unbonding period) + TOKENIZE_SHARE_LOCK_STATUS_LOCK_EXPIRING = 3; +} + +// TokenizeShareRecordReward represents the properties of tokenize share +message TokenizeShareRecordReward { + option (gogoproto.goproto_getters) = false; + option (gogoproto.goproto_stringer) = true; + + uint64 record_id = 1; + + repeated cosmos.base.v1beta1.DecCoin reward = 2 [ + (gogoproto.castrepeated) = "github.com/cosmos/cosmos-sdk/types.DecCoins", + (gogoproto.nullable) = false + ]; +} + +// LiquidValidator is the storage layout for details about a validator's liquid +// stake. +message LiquidValidator { + option (gogoproto.goproto_getters) = false; + option (gogoproto.goproto_stringer) = true; + + // operator_address defines the address of the validator's operator; bech + // encoded in JSON. + string operator_address = 1 + [ (cosmos_proto.scalar) = "cosmos.AddressString" ]; + + // Number of shares self bonded from the validator + string validator_bond_shares = 2 [ + (gogoproto.moretags) = "yaml:\"validator_bond_shares\"", + (gogoproto.customtype) = "cosmossdk.io/math.LegacyDec", + (gogoproto.nullable) = false + ]; + // Number of shares either tokenized or owned by a liquid staking provider + string liquid_shares = 3 [ + (gogoproto.moretags) = "yaml:\"liquid_shares\"", + (gogoproto.customtype) = "cosmossdk.io/math.LegacyDec", + (gogoproto.nullable) = false + ]; +} \ No newline at end of file diff --git a/proto/gaia/lsm/v1beta1/query.proto b/proto/gaia/lsm/v1beta1/query.proto new file mode 100644 index 00000000000..2d491cc57e8 --- /dev/null +++ b/proto/gaia/lsm/v1beta1/query.proto @@ -0,0 +1,198 @@ +syntax = "proto3"; +package gaia.lsm.v1beta1; + +import "cosmos/base/query/v1beta1/pagination.proto"; +import "gogoproto/gogo.proto"; +import "google/api/annotations.proto"; +import "gaia/lsm/v1beta1/lsm.proto"; +import "cosmos/base/v1beta1/coin.proto"; +import "cosmos/query/v1/query.proto"; +import "amino/amino.proto"; + +option go_package = "github.com/cosmos/gaia/x/lsm/types"; + +// Query defines the gRPC querier service. +service Query { + + // Query for individual tokenize share record information by share by id + rpc TokenizeShareRecordById(QueryTokenizeShareRecordByIdRequest) + returns (QueryTokenizeShareRecordByIdResponse) { + option (google.api.http).get = + "/gaia/lsm/v1beta1/tokenize_share_record_by_id/{id}"; + } + + // Query for individual tokenize share record information by share denom + rpc TokenizeShareRecordByDenom(QueryTokenizeShareRecordByDenomRequest) + returns (QueryTokenizeShareRecordByDenomResponse) { + option (google.api.http).get = + "/gaia/lsm/v1beta1/tokenize_share_record_by_denom/{denom}"; + } + + // Query tokenize share records by address + rpc TokenizeShareRecordsOwned(QueryTokenizeShareRecordsOwnedRequest) + returns (QueryTokenizeShareRecordsOwnedResponse) { + option (google.api.http).get = + "/gaia/lsm/v1beta1/tokenize_share_record_owned/{owner}"; + } + + // Query for all tokenize share records + rpc AllTokenizeShareRecords(QueryAllTokenizeShareRecordsRequest) + returns (QueryAllTokenizeShareRecordsResponse) { + option (google.api.http).get = "/gaia/lsm/v1beta1/tokenize_share_records"; + } + + // Query for last tokenize share record id + rpc LastTokenizeShareRecordId(QueryLastTokenizeShareRecordIdRequest) + returns (QueryLastTokenizeShareRecordIdResponse) { + option (google.api.http).get = + "/gaia/lsm/v1beta1/last_tokenize_share_record_id"; + } + + // Query for total tokenized staked assets + rpc TotalTokenizeSharedAssets(QueryTotalTokenizeSharedAssetsRequest) + returns (QueryTotalTokenizeSharedAssetsResponse) { + option (google.api.http).get = + "/gaia/lsm/v1beta1/total_tokenize_shared_assets"; + } + + // Query for total liquid staked (including tokenized shares or owned by an + // liquid staking provider) + rpc TotalLiquidStaked(QueryTotalLiquidStaked) + returns (QueryTotalLiquidStakedResponse) { + option (google.api.http).get = "/gaia/lsm/v1beta1/total_liquid_staked"; + } + + // Query tokenize share locks + rpc TokenizeShareLockInfo(QueryTokenizeShareLockInfo) + returns (QueryTokenizeShareLockInfoResponse) { + option (google.api.http).get = + "/gaia/lsm/v1beta1/tokenize_share_lock_info/{address}"; + } + + // Parameters queries the lsm parameters. + rpc Params(QueryParamsRequest) returns (QueryParamsResponse) { + option (cosmos.query.v1.module_query_safe) = true; + option (google.api.http).get = "/gaia/lsm/v1beta1/params"; + } + + // TokenizeShareRecordReward queries the tokenize share record rewards + rpc TokenizeShareRecordReward(QueryTokenizeShareRecordRewardRequest) + returns (QueryTokenizeShareRecordRewardResponse) { + option (google.api.http).get = + "/gaia/lsm/v1beta1/{owner_address}/tokenize_share_record_rewards"; + } +} + +// QueryParamsRequest is request type for the Query/Params RPC method. +message QueryParamsRequest {} + +// QueryParamsResponse is response type for the Query/Params RPC method. +message QueryParamsResponse { + // params holds all the parameters of this module. + Params params = 1 + [ (gogoproto.nullable) = false, (amino.dont_omitempty) = true ]; +} + +// QueryTokenizeShareRecordByIdRequest is request type for the +// Query/QueryTokenizeShareRecordById RPC method. +message QueryTokenizeShareRecordByIdRequest { uint64 id = 1; } + +// QueryTokenizeShareRecordByIdRequest is response type for the +// Query/QueryTokenizeShareRecordById RPC method. +message QueryTokenizeShareRecordByIdResponse { + TokenizeShareRecord record = 1 [ (gogoproto.nullable) = false ]; +} + +// QueryTokenizeShareRecordByDenomRequest is request type for the +// Query/QueryTokenizeShareRecordByDenom RPC method. +message QueryTokenizeShareRecordByDenomRequest { string denom = 1; } + +// QueryTokenizeShareRecordByDenomResponse is response type for the +// Query/QueryTokenizeShareRecordByDenom RPC method. +message QueryTokenizeShareRecordByDenomResponse { + TokenizeShareRecord record = 1 [ (gogoproto.nullable) = false ]; +} + +// QueryTokenizeShareRecordsOwnedRequest is request type for the +// Query/QueryTokenizeShareRecordsOwned RPC method. +message QueryTokenizeShareRecordsOwnedRequest { string owner = 1; } + +// QueryTokenizeShareRecordsOwnedResponse is response type for the +// Query/QueryTokenizeShareRecordsOwned RPC method. +message QueryTokenizeShareRecordsOwnedResponse { + repeated TokenizeShareRecord records = 1 [ (gogoproto.nullable) = false ]; +} + +// QueryAllTokenizeShareRecordsRequest is request type for the +// Query/QueryAllTokenizeShareRecords RPC method. +message QueryAllTokenizeShareRecordsRequest { + // pagination defines an optional pagination for the request. + cosmos.base.query.v1beta1.PageRequest pagination = 1; +} + +// QueryAllTokenizeShareRecordsResponse is response type for the +// Query/QueryAllTokenizeShareRecords RPC method. +message QueryAllTokenizeShareRecordsResponse { + repeated TokenizeShareRecord records = 1 [ (gogoproto.nullable) = false ]; + // pagination defines the pagination in the response. + cosmos.base.query.v1beta1.PageResponse pagination = 2; +} + +// QueryLastTokenizeShareRecordIdRequest is request type for the +// Query/QueryLastTokenizeShareRecordId RPC method. +message QueryLastTokenizeShareRecordIdRequest {} + +// QueryLastTokenizeShareRecordIdResponse is response type for the +// Query/QueryLastTokenizeShareRecordId RPC method. +message QueryLastTokenizeShareRecordIdResponse { uint64 id = 1; } + +// QueryTotalTokenizeSharedAssetsRequest is request type for the +// Query/QueryTotalTokenizeSharedAssets RPC method. +message QueryTotalTokenizeSharedAssetsRequest {} + +// QueryTotalTokenizeSharedAssetsResponse is response type for the +// Query/QueryTotalTokenizeSharedAssets RPC method. +message QueryTotalTokenizeSharedAssetsResponse { + cosmos.base.v1beta1.Coin value = 1 [ (gogoproto.nullable) = false ]; +} + +// QueryTotalLiquidStakedRequest is request type for the +// Query/QueryQueryTotalLiquidStaked RPC method. +message QueryTotalLiquidStaked {} + +// QueryTotalLiquidStakedResponse is response type for the +// Query/QueryQueryTotalLiquidStaked RPC method. +message QueryTotalLiquidStakedResponse { string tokens = 1; } + +// QueryTokenizeShareLockInfo queries the tokenize share lock information +// associated with given account +message QueryTokenizeShareLockInfo { string address = 1; } + +// QueryTokenizeShareLockInfoResponse is the response from the +// QueryTokenizeShareLockInfo query +message QueryTokenizeShareLockInfoResponse { + string status = 1; + string expiration_time = 2; +} + +// QueryTokenizeShareRecordRewardRequest is the request type for the +// Query/TokenizeShareRecordReward RPC method. +message QueryTokenizeShareRecordRewardRequest { + option (gogoproto.equal) = false; + option (gogoproto.goproto_getters) = false; + + string owner_address = 1 [ (gogoproto.moretags) = "yaml:\"owner_address\"" ]; +} + +// QueryTokenizeShareRecordRewardResponse is the response type for the +// Query/TokenizeShareRecordReward RPC method. +message QueryTokenizeShareRecordRewardResponse { + // rewards defines all the rewards accrued by a delegator. + repeated TokenizeShareRecordReward rewards = 1 + [ (gogoproto.nullable) = false ]; + // total defines the sum of all the rewards. + repeated cosmos.base.v1beta1.DecCoin total = 2 [ + (gogoproto.nullable) = false, + (gogoproto.castrepeated) = "github.com/cosmos/cosmos-sdk/types.DecCoins" + ]; +} \ No newline at end of file diff --git a/proto/gaia/lsm/v1beta1/tx.proto b/proto/gaia/lsm/v1beta1/tx.proto new file mode 100644 index 00000000000..83f9cde13ab --- /dev/null +++ b/proto/gaia/lsm/v1beta1/tx.proto @@ -0,0 +1,191 @@ +syntax = "proto3"; +package gaia.lsm.v1beta1; + +import "google/protobuf/timestamp.proto"; +import "gogoproto/gogo.proto"; + +import "cosmos_proto/cosmos.proto"; +import "cosmos/base/v1beta1/coin.proto"; +import "gaia/lsm/v1beta1/lsm.proto"; +import "cosmos/msg/v1/msg.proto"; +import "amino/amino.proto"; + +option go_package = "github.com/cosmos/gaia/x/lsm/types"; + +// Msg defines the lsm Msg service. +service Msg { + option (cosmos.msg.v1.service) = true; + + // UpdateParams defines an operation for updating the x/lsm module + // parameters. + rpc UpdateParams(MsgUpdateParams) returns (MsgUpdateParamsResponse); + + // TokenizeShares defines a method for tokenizing shares from a validator. + rpc TokenizeShares(MsgTokenizeShares) returns (MsgTokenizeSharesResponse); + + // RedeemTokensForShares defines a method for redeeming tokens from a + // validator for shares. + rpc RedeemTokensForShares(MsgRedeemTokensForShares) + returns (MsgRedeemTokensForSharesResponse); + + // TransferTokenizeShareRecord defines a method to transfer ownership of + // TokenizeShareRecord + rpc TransferTokenizeShareRecord(MsgTransferTokenizeShareRecord) + returns (MsgTransferTokenizeShareRecordResponse); + + // DisableTokenizeShares defines a method to prevent the tokenization of an + // addresses stake + rpc DisableTokenizeShares(MsgDisableTokenizeShares) + returns (MsgDisableTokenizeSharesResponse); + + // EnableTokenizeShares defines a method to re-enable the tokenization of an + // addresseses stake after it has been disabled + rpc EnableTokenizeShares(MsgEnableTokenizeShares) + returns (MsgEnableTokenizeSharesResponse); +} + +// MsgUpdateParams is the Msg/UpdateParams request type. +message MsgUpdateParams { + option (cosmos.msg.v1.signer) = "authority"; + option (amino.name) = "gaia/x/lsm/MsgUpdateParams"; + + // authority is the address that controls the module (defaults to x/gov unless + // overwritten). + string authority = 1 [ (cosmos_proto.scalar) = "cosmos.AddressString" ]; + // params defines the x/lsm parameters to update. + // + // NOTE: All parameters must be supplied. + Params params = 2 + [ (gogoproto.nullable) = false, (amino.dont_omitempty) = true ]; +}; + +// MsgUpdateParamsResponse defines the response structure for executing a +// MsgUpdateParams message. +message MsgUpdateParamsResponse {}; + +// MsgTokenizeShares tokenizes a delegation +message MsgTokenizeShares { + option (cosmos.msg.v1.signer) = "delegator_address"; + option (amino.name) = "gaia/x/lsm/MsgTokenizeShares"; + + option (gogoproto.equal) = false; + option (gogoproto.goproto_getters) = false; + + string delegator_address = 1 + [ (gogoproto.moretags) = "yaml:\"delegator_address\"" ]; + string validator_address = 2 + [ (gogoproto.moretags) = "yaml:\"validator_address\"" ]; + cosmos.base.v1beta1.Coin amount = 3 [ (gogoproto.nullable) = false ]; + string tokenized_share_owner = 4; +} + +// MsgTokenizeSharesResponse defines the Msg/MsgTokenizeShares response type. +message MsgTokenizeSharesResponse { + cosmos.base.v1beta1.Coin amount = 1 [ (gogoproto.nullable) = false ]; +} + +// MsgRedeemTokensForShares redeems a tokenized share back into a native +// delegation +message MsgRedeemTokensForShares { + option (cosmos.msg.v1.signer) = "delegator_address"; + option (amino.name) = "gaia/x/lsm/MsgRedeemTokensForShares"; + + option (gogoproto.equal) = false; + option (gogoproto.goproto_getters) = false; + + string delegator_address = 1 + [ (gogoproto.moretags) = "yaml:\"delegator_address\"" ]; + cosmos.base.v1beta1.Coin amount = 2 [ (gogoproto.nullable) = false ]; +} + +// MsgRedeemTokensForSharesResponse defines the Msg/MsgRedeemTokensForShares +// response type. +message MsgRedeemTokensForSharesResponse { + cosmos.base.v1beta1.Coin amount = 1 [ (gogoproto.nullable) = false ]; +} + +// MsgTransferTokenizeShareRecord transfer a tokenize share record +message MsgTransferTokenizeShareRecord { + option (cosmos.msg.v1.signer) = "sender"; + option (amino.name) = "gaia/x/lsm/MsgTransferTokenizeShareRecord"; + + option (gogoproto.equal) = false; + option (gogoproto.goproto_getters) = false; + + uint64 tokenize_share_record_id = 1; + string sender = 2; + string new_owner = 3; +} + +// MsgTransferTokenizeShareRecordResponse defines the +// Msg/MsgTransferTokenizeShareRecord response type. +message MsgTransferTokenizeShareRecordResponse {} + +// MsgDisableTokenizeShares prevents the tokenization of shares for a given +// address +message MsgDisableTokenizeShares { + option (cosmos.msg.v1.signer) = "delegator_address"; + option (amino.name) = "gaia/x/lsm/MsgDisableTokenizeShares"; + + option (gogoproto.equal) = false; + option (gogoproto.goproto_getters) = false; + + string delegator_address = 1 + [ (gogoproto.moretags) = "yaml:\"delegator_address\"" ]; +} + +// MsgDisableTokenizeSharesResponse defines the Msg/DisableTokenizeShares +// response type. +message MsgDisableTokenizeSharesResponse {} + +// MsgEnableTokenizeShares re-enables tokenization of shares for a given address +message MsgEnableTokenizeShares { + option (cosmos.msg.v1.signer) = "delegator_address"; + option (amino.name) = "gaia/x/lsm/MsgEnableTokenizeShares"; + + option (gogoproto.equal) = false; + option (gogoproto.goproto_getters) = false; + + string delegator_address = 1 + [ (gogoproto.moretags) = "yaml:\"delegator_address\"" ]; +} + +// MsgEnableTokenizeSharesResponse defines the Msg/EnableTokenizeShares response +// type. +message MsgEnableTokenizeSharesResponse { + google.protobuf.Timestamp completion_time = 1 + [ (gogoproto.nullable) = false, (gogoproto.stdtime) = true ]; +} + +// MsgWithdrawTokenizeShareRecordReward withdraws tokenize share rewards for a +// specific record +message MsgWithdrawTokenizeShareRecordReward { + option (cosmos.msg.v1.signer) = "owner_address"; + option (amino.name) = "gaia/x/lsm/MsgWithdrawTokenizeShareRecordReward"; + + option (gogoproto.equal) = false; + option (gogoproto.goproto_getters) = false; + + string owner_address = 1 [ (gogoproto.moretags) = "yaml:\"owner_address\"" ]; + uint64 record_id = 2; +} + +// MsgWithdrawTokenizeShareRecordReward defines the +// Msg/WithdrawTokenizeShareRecordReward response type. +message MsgWithdrawTokenizeShareRecordRewardResponse {} + +// MsgWithdrawAllTokenizeShareRecordReward withdraws tokenize share rewards or +// all records owned by the designated owner +message MsgWithdrawAllTokenizeShareRecordReward { + option (cosmos.msg.v1.signer) = "owner_address"; + option (amino.name) = "gaia/x/lsm/MsgWithdrawAllTokenizeShareRecordReward"; + + option (gogoproto.equal) = false; + option (gogoproto.goproto_getters) = false; + + string owner_address = 1 [ (gogoproto.moretags) = "yaml:\"owner_address\"" ]; +} + +// MsgWithdrawAllTokenizeShareRecordRewardResponse defines the +// Msg/WithdrawTokenizeShareRecordReward response type. +message MsgWithdrawAllTokenizeShareRecordRewardResponse {} diff --git a/tools/tools.go b/tools/tools.go new file mode 100644 index 00000000000..6c695633b00 --- /dev/null +++ b/tools/tools.go @@ -0,0 +1,14 @@ +//go:build tools +// +build tools + +// This is the canonical way to enforce dependency inclusion in go.mod for tools that are not directly involved in the build process. +// See +// https://github.com/golang/go/wiki/Modules#how-can-i-track-tool-dependencies-for-a-module + +package tools + +//nolint + +import ( + _ "github.com/vektra/mockery/v2" +) diff --git a/x/lsm/keeper/distribution.go b/x/lsm/keeper/distribution.go new file mode 100644 index 00000000000..36cb56f2c82 --- /dev/null +++ b/x/lsm/keeper/distribution.go @@ -0,0 +1,146 @@ +package keeper + +import ( + "context" + goerrors "errors" + + errorsmod "cosmossdk.io/errors" + + sdk "github.com/cosmos/cosmos-sdk/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" + stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types" + + "github.com/cosmos/gaia/v22/x/lsm/types" +) + +func (k Keeper) WithdrawSingleShareRecordReward(ctx context.Context, recordID uint64) error { + record, err := k.GetTokenizeShareRecord(ctx, recordID) + if err != nil { + return err + } + + ownerAddr, err := k.authKeeper.AddressCodec().StringToBytes(record.Owner) + if err != nil { + return err + } + owner := sdk.AccAddress(ownerAddr) + + // This check is necessary to prevent sending rewards to a blacklisted address + if k.bankKeeper.BlockedAddr(owner) { + return errorsmod.Wrapf(sdkerrors.ErrUnauthorized, "%s is not allowed to receive funds", owner.String()) + } + + valAddr, err := k.stakingKeeper.ValidatorAddressCodec().StringToBytes(record.Validator) + if err != nil { + return err + } + + validatorFound := true + _, err = k.stakingKeeper.Validator(ctx, valAddr) + if err != nil { + if !goerrors.Is(err, stakingtypes.ErrNoValidatorFound) { + return err + } + + validatorFound = false + } + + delegationFound := true + _, err = k.stakingKeeper.Delegation(ctx, record.GetModuleAddress(), valAddr) + if err != nil { + if !goerrors.Is(err, stakingtypes.ErrNoDelegation) { + return err + } + + delegationFound = false + } + + sdkCtx := sdk.UnwrapSDKContext(ctx) + if validatorFound && delegationFound { + // withdraw rewards into reward module account and send it to reward owner + cacheCtx, write := sdkCtx.CacheContext() + _, err = k.distKeeper.WithdrawDelegationRewards(cacheCtx, record.GetModuleAddress(), valAddr) + if err != nil { + return err + } + write() + } + + // apply changes when the module account has positive balance + balances := k.bankKeeper.GetAllBalances(ctx, record.GetModuleAddress()) + if !balances.Empty() { + err = k.bankKeeper.SendCoins(ctx, record.GetModuleAddress(), owner, balances) + if err != nil { + return err + } + + sdkCtx.EventManager().EmitEvent( + sdk.NewEvent( + types.EventTypeWithdrawTokenizeShareReward, + sdk.NewAttribute(types.AttributeKeyWithdrawAddress, owner.String()), + sdk.NewAttribute(sdk.AttributeKeyAmount, balances.String()), + ), + ) + } + return nil +} + +// WithdrawTokenizeShareRecordReward withdraws rewards for owning a TokenizeShareRecord +func (k Keeper) WithdrawTokenizeShareRecordReward(ctx context.Context, ownerAddr sdk.AccAddress, + recordID uint64, +) (sdk.Coins, error) { + record, err := k.GetTokenizeShareRecord(ctx, recordID) + if err != nil { + return nil, err + } + + // This check is necessary to prevent sending rewards to a blacklisted address + if k.bankKeeper.BlockedAddr(ownerAddr) { + return nil, errorsmod.Wrapf(sdkerrors.ErrUnauthorized, "%s is not allowed to receive funds", ownerAddr) + } + + if record.Owner != ownerAddr.String() { + return nil, types.ErrNotTokenizeShareRecordOwner + } + + valAddr, err := k.stakingKeeper.ValidatorAddressCodec().StringToBytes(record.Validator) + if err != nil { + return nil, err + } + + _, err = k.stakingKeeper.Validator(ctx, valAddr) + if err != nil { + return nil, err + } + + _, err = k.stakingKeeper.Delegation(ctx, record.GetModuleAddress(), valAddr) + if err != nil { + return nil, err + } + + // withdraw rewards into reward module account and send it to reward owner + _, err = k.distKeeper.WithdrawDelegationRewards(ctx, record.GetModuleAddress(), valAddr) + if err != nil { + return nil, err + } + + // apply changes when the module account has positive balance + rewards := k.bankKeeper.GetAllBalances(ctx, record.GetModuleAddress()) + if !rewards.Empty() { + err = k.bankKeeper.SendCoins(ctx, record.GetModuleAddress(), ownerAddr, rewards) + if err != nil { + return nil, err + } + } + + sdkCtx := sdk.UnwrapSDKContext(ctx) + sdkCtx.EventManager().EmitEvent( + sdk.NewEvent( + types.EventTypeWithdrawTokenizeShareReward, + sdk.NewAttribute(types.AttributeKeyWithdrawAddress, ownerAddr.String()), + sdk.NewAttribute(sdk.AttributeKeyAmount, rewards.String()), + ), + ) + + return rewards, nil +} diff --git a/x/lsm/keeper/genesis.go b/x/lsm/keeper/genesis.go new file mode 100644 index 00000000000..7918b2c4e03 --- /dev/null +++ b/x/lsm/keeper/genesis.go @@ -0,0 +1,74 @@ +package keeper + +import ( + "context" + "fmt" + + sdk "github.com/cosmos/cosmos-sdk/types" + + "github.com/cosmos/gaia/v22/x/lsm/types" +) + +// InitGenesis sets lsm information for genesis +func (k Keeper) InitGenesis(ctx context.Context, data *types.GenesisState) { + // + // Set the total liquid staked tokens + k.SetTotalLiquidStakedTokens(ctx, data.TotalLiquidStakedTokens) + + // Set each tokenize share record, as well as the last tokenize share record ID + latestID := uint64(0) + for _, tokenizeShareRecord := range data.TokenizeShareRecords { + if err := k.AddTokenizeShareRecord(ctx, tokenizeShareRecord); err != nil { + panic(err) + } + if tokenizeShareRecord.Id > latestID { + latestID = tokenizeShareRecord.Id + } + } + if data.LastTokenizeShareRecordId < latestID { + panic("Tokenize share record specified with ID greater than the latest ID") + } + k.SetLastTokenizeShareRecordID(ctx, data.LastTokenizeShareRecordId) + + // Set the tokenize shares locks for accounts that have disabled tokenizing shares + // The lock can either be in status LOCKED or LOCK_EXPIRING + // If it is in status LOCK_EXPIRING, the unlocking must also be queued + for _, tokenizeShareLock := range data.TokenizeShareLocks { + address, err := k.authKeeper.AddressCodec().StringToBytes(tokenizeShareLock.Address) + if err != nil { + panic(err) + } + + switch tokenizeShareLock.Status { + case types.TOKENIZE_SHARE_LOCK_STATUS_LOCKED.String(): + k.AddTokenizeSharesLock(ctx, address) + + case types.TOKENIZE_SHARE_LOCK_STATUS_LOCK_EXPIRING.String(): + completionTime := tokenizeShareLock.CompletionTime + + authorizations := k.GetPendingTokenizeShareAuthorizations(ctx, completionTime) + authorizations.Addresses = append(authorizations.Addresses, sdk.AccAddress(address).String()) + + k.SetPendingTokenizeShareAuthorizations(ctx, completionTime, authorizations) + k.SetTokenizeSharesUnlockTime(ctx, address, completionTime) + + default: + panic(fmt.Sprintf("Unsupported tokenize share lock status %s", tokenizeShareLock.Status)) + } + } +} + +func (k Keeper) ExportGenesis(ctx context.Context) *types.GenesisState { + params, err := k.GetParams(ctx) + if err != nil { + panic(err) + } + + return &types.GenesisState{ + Params: params, + TokenizeShareRecords: k.GetAllTokenizeShareRecords(ctx), + LastTokenizeShareRecordId: k.GetLastTokenizeShareRecordID(ctx), + TotalLiquidStakedTokens: k.GetTotalLiquidStakedTokens(ctx), + TokenizeShareLocks: k.GetAllTokenizeSharesLocks(ctx), + } +} diff --git a/x/lsm/keeper/grpc_query.go b/x/lsm/keeper/grpc_query.go new file mode 100644 index 00000000000..aa1bbc6b4e2 --- /dev/null +++ b/x/lsm/keeper/grpc_query.go @@ -0,0 +1,288 @@ +package keeper + +import ( + "context" + goerrors "errors" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + "cosmossdk.io/math" + "cosmossdk.io/store/prefix" + + "github.com/cosmos/cosmos-sdk/runtime" + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/types/query" + stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types" + + "github.com/cosmos/gaia/v22/x/lsm/types" +) + +// Querier is used as Keeper will have duplicate methods if used directly, and gRPC names take precedence over keeper +type Querier struct { + *Keeper +} + +var _ types.QueryServer = Querier{} + +func NewQuerier(keeper *Keeper) Querier { + return Querier{Keeper: keeper} +} + +// Params queries the staking parameters +func (k Querier) Params(ctx context.Context, _ *types.QueryParamsRequest) (*types.QueryParamsResponse, error) { + params, err := k.GetParams(ctx) + if err != nil { + return nil, err + } + return &types.QueryParamsResponse{Params: params}, nil +} + +// TokenizeShareRecordById queries for individual tokenize share record information by share by id +func (k Querier) TokenizeShareRecordById(c context.Context, req *types.QueryTokenizeShareRecordByIdRequest) (*types.QueryTokenizeShareRecordByIdResponse, error) { //nolint:revive // fixing this would require changing the .proto files, so we might as well leave it alone + if req == nil { + return nil, status.Error(codes.InvalidArgument, "empty request") + } + + ctx := sdk.UnwrapSDKContext(c) + record, err := k.GetTokenizeShareRecord(ctx, req.Id) + if err != nil { + return nil, err + } + + return &types.QueryTokenizeShareRecordByIdResponse{ + Record: record, + }, nil +} + +// TokenizeShareRecordByDenom queries for individual tokenize share record information by share denom +func (k Querier) TokenizeShareRecordByDenom(c context.Context, req *types.QueryTokenizeShareRecordByDenomRequest) (*types.QueryTokenizeShareRecordByDenomResponse, error) { + if req == nil { + return nil, status.Error(codes.InvalidArgument, "empty request") + } + + ctx := sdk.UnwrapSDKContext(c) + record, err := k.GetTokenizeShareRecordByDenom(ctx, req.Denom) + if err != nil { + return nil, err + } + + return &types.QueryTokenizeShareRecordByDenomResponse{ + Record: record, + }, nil +} + +// TokenizeShareRecordsOwned queries tokenize share records by address +func (k Querier) TokenizeShareRecordsOwned(c context.Context, req *types.QueryTokenizeShareRecordsOwnedRequest) (*types.QueryTokenizeShareRecordsOwnedResponse, error) { + if req == nil { + return nil, status.Error(codes.InvalidArgument, "empty request") + } + + ctx := sdk.UnwrapSDKContext(c) + owner, err := k.authKeeper.AddressCodec().StringToBytes(req.Owner) + if err != nil { + return nil, err + } + records := k.GetTokenizeShareRecordsByOwner(ctx, owner) + + return &types.QueryTokenizeShareRecordsOwnedResponse{ + Records: records, + }, nil +} + +// AllTokenizeShareRecords queries for all tokenize share records +func (k Querier) AllTokenizeShareRecords(c context.Context, req *types.QueryAllTokenizeShareRecordsRequest) (*types.QueryAllTokenizeShareRecordsResponse, error) { + if req == nil { + return nil, status.Error(codes.InvalidArgument, "empty request") + } + ctx := sdk.UnwrapSDKContext(c) + + var records []types.TokenizeShareRecord + + store := k.storeService.OpenKVStore(ctx) + valStore := prefix.NewStore(runtime.KVStoreAdapter(store), types.TokenizeShareRecordPrefix) + pageRes, err := query.FilteredPaginate(valStore, req.Pagination, func(key, value []byte, accumulate bool) (bool, error) { + var tokenizeShareRecord types.TokenizeShareRecord + if err := k.cdc.Unmarshal(value, &tokenizeShareRecord); err != nil { + return false, err + } + + if accumulate { + records = append(records, tokenizeShareRecord) + } + return true, nil + }) + if err != nil { + return nil, status.Error(codes.Internal, err.Error()) + } + + return &types.QueryAllTokenizeShareRecordsResponse{ + Records: records, + Pagination: pageRes, + }, nil +} + +// LastTokenizeShareRecordId queries for last tokenize share record id +func (k Querier) LastTokenizeShareRecordId(c context.Context, req *types.QueryLastTokenizeShareRecordIdRequest) (*types.QueryLastTokenizeShareRecordIdResponse, error) { //nolint:revive // fixing this would require changing the .proto files, so we might as well leave it alone + if req == nil { + return nil, status.Error(codes.InvalidArgument, "empty request") + } + + ctx := sdk.UnwrapSDKContext(c) + return &types.QueryLastTokenizeShareRecordIdResponse{ + Id: k.GetLastTokenizeShareRecordID(ctx), + }, nil +} + +// TotalTokenizeSharedAssets queries for total tokenized staked assets +func (k Querier) TotalTokenizeSharedAssets(c context.Context, req *types.QueryTotalTokenizeSharedAssetsRequest) (*types.QueryTotalTokenizeSharedAssetsResponse, error) { + if req == nil { + return nil, status.Error(codes.InvalidArgument, "empty request") + } + ctx := sdk.UnwrapSDKContext(c) + records := k.GetAllTokenizeShareRecords(ctx) + totalTokenizeShared := math.ZeroInt() + + for _, record := range records { + moduleAcc := record.GetModuleAddress() + valAddr, err := k.stakingKeeper.ValidatorAddressCodec().StringToBytes(record.Validator) + if err != nil { + return nil, err + } + + validator, err := k.stakingKeeper.GetValidator(ctx, valAddr) + if err != nil { + return nil, err + } + + delegation, err := k.stakingKeeper.GetDelegation(ctx, moduleAcc, valAddr) + if err != nil { + return nil, err + } + + tokens := validator.TokensFromShares(delegation.Shares) + totalTokenizeShared = totalTokenizeShared.Add(tokens.RoundInt()) + } + + bondDenom, err := k.stakingKeeper.BondDenom(ctx) + if err != nil { + return nil, err + } + + return &types.QueryTotalTokenizeSharedAssetsResponse{ + Value: sdk.NewCoin(bondDenom, totalTokenizeShared), + }, nil +} + +// TotalLiquidStaked queries for total tokenized staked tokens +// Liquid staked tokens are either tokenized delegations or delegations +// owned by a module account +func (k Querier) TotalLiquidStaked(c context.Context, req *types.QueryTotalLiquidStaked) (*types.QueryTotalLiquidStakedResponse, error) { + if req == nil { + return nil, status.Error(codes.InvalidArgument, "empty request") + } + ctx := sdk.UnwrapSDKContext(c) + totalLiquidStaked := k.GetTotalLiquidStakedTokens(ctx).String() + return &types.QueryTotalLiquidStakedResponse{ + Tokens: totalLiquidStaked, + }, nil +} + +// TokenizeShareLockInfo queries status of an account's tokenize share lock +func (k Querier) TokenizeShareLockInfo(c context.Context, req *types.QueryTokenizeShareLockInfo) (*types.QueryTokenizeShareLockInfoResponse, error) { + if req == nil { + return nil, status.Error(codes.InvalidArgument, "empty request") + } + ctx := sdk.UnwrapSDKContext(c) + + address, err := k.authKeeper.AddressCodec().StringToBytes(req.Address) + if err != nil { + panic(err) + } + + lockStatus, completionTime := k.GetTokenizeSharesLock(ctx, address) + + timeString := "" + if !completionTime.IsZero() { + timeString = completionTime.String() + } + + return &types.QueryTokenizeShareLockInfoResponse{ + Status: lockStatus.String(), + ExpirationTime: timeString, + }, nil +} + +// TokenizeShareRecordReward returns estimated amount of reward from tokenize share record ownership +func (k Keeper) TokenizeShareRecordReward(c context.Context, req *types.QueryTokenizeShareRecordRewardRequest) (*types.QueryTokenizeShareRecordRewardResponse, error) { + ctx := sdk.UnwrapSDKContext(c) + + totalRewards := sdk.DecCoins{} + rewards := []types.TokenizeShareRecordReward{} + + ownerAddr, err := k.authKeeper.AddressCodec().StringToBytes(req.OwnerAddress) + if err != nil { + return nil, err + } + records := k.GetTokenizeShareRecordsByOwner(ctx, ownerAddr) + for _, record := range records { + valAddr, err := k.stakingKeeper.ValidatorAddressCodec().StringToBytes(record.Validator) + if err != nil { + return nil, err + } + + moduleAddr := record.GetModuleAddress() + moduleBalance := k.bankKeeper.GetAllBalances(ctx, moduleAddr) + moduleBalanceDecCoins := sdk.NewDecCoinsFromCoins(moduleBalance...) + + validatorFound := true + val, err := k.stakingKeeper.Validator(ctx, valAddr) + if err != nil { + if !goerrors.Is(err, stakingtypes.ErrNoValidatorFound) { + return nil, err + } + + validatorFound = false + } + + delegationFound := true + del, err := k.stakingKeeper.Delegation(ctx, moduleAddr, valAddr) + if err != nil { + if !goerrors.Is(err, stakingtypes.ErrNoDelegation) { + return nil, err + } + + delegationFound = false + } + + if validatorFound && delegationFound { + // withdraw rewards + endingPeriod, err := k.distKeeper.IncrementValidatorPeriod(ctx, val) + if err != nil { + return nil, err + } + + recordReward, err := k.distKeeper.CalculateDelegationRewards(ctx, val, del, endingPeriod) + if err != nil { + return nil, err + } + + rewards = append(rewards, types.TokenizeShareRecordReward{ + RecordId: record.Id, + Reward: recordReward.Add(moduleBalanceDecCoins...), + }) + totalRewards = totalRewards.Add(recordReward...).Add(moduleBalanceDecCoins...) + } else if !moduleBalance.IsZero() { + rewards = append(rewards, types.TokenizeShareRecordReward{ + RecordId: record.Id, + Reward: moduleBalanceDecCoins, + }) + totalRewards = totalRewards.Add(moduleBalanceDecCoins...) + } + } + + return &types.QueryTokenizeShareRecordRewardResponse{ + Rewards: rewards, + Total: totalRewards, + }, nil +} diff --git a/x/lsm/keeper/keeper.go b/x/lsm/keeper/keeper.go new file mode 100644 index 00000000000..bf182431478 --- /dev/null +++ b/x/lsm/keeper/keeper.go @@ -0,0 +1,63 @@ +package keeper + +import ( + "context" + + storetypes "cosmossdk.io/core/store" + "cosmossdk.io/log" + + "github.com/cosmos/cosmos-sdk/codec" + sdk "github.com/cosmos/cosmos-sdk/types" + + "github.com/cosmos/gaia/v22/x/lsm/types" +) + +// Keeper of the x/lsm store +type Keeper struct { + storeService storetypes.KVStoreService + cdc codec.BinaryCodec + authKeeper types.AccountKeeper + bankKeeper types.BankKeeper + stakingKeeper types.StakingKeeper + distKeeper types.DistributionKeeper + // TODO Replace distribution keeper's tokenizeshare record hook thingy + // hooks types.StakingHooks + authority string +} + +// NewKeeper creates a new lsm Keeper instance +func NewKeeper( + cdc codec.BinaryCodec, + storeService storetypes.KVStoreService, + ak types.AccountKeeper, + bk types.BankKeeper, + sk types.StakingKeeper, + dk types.DistributionKeeper, + authority string, +) *Keeper { + // ensure that authority is a valid AccAddress + if _, err := ak.AddressCodec().StringToBytes(authority); err != nil { + panic("authority is not a valid acc address") + } + + return &Keeper{ + storeService: storeService, + cdc: cdc, + authKeeper: ak, + bankKeeper: bk, + stakingKeeper: sk, + distKeeper: dk, + authority: authority, + } +} + +// Logger returns a module-specific logger. +func (k Keeper) Logger(ctx context.Context) log.Logger { + sdkCtx := sdk.UnwrapSDKContext(ctx) + return sdkCtx.Logger().With("module", "x/"+types.ModuleName) +} + +// GetAuthority returns the x/lsm module's authority. +func (k Keeper) GetAuthority() string { + return k.authority +} diff --git a/x/lsm/keeper/keeper_test.go b/x/lsm/keeper/keeper_test.go new file mode 100644 index 00000000000..31f19ec33cf --- /dev/null +++ b/x/lsm/keeper/keeper_test.go @@ -0,0 +1,101 @@ +package keeper_test + +import ( + "testing" + + "github.com/stretchr/testify/suite" + + sdkmath "cosmossdk.io/math" + storetypes "cosmossdk.io/store/types" + cmtproto "github.com/cometbft/cometbft/proto/tendermint/types" + cmttime "github.com/cometbft/cometbft/types/time" + "github.com/cosmos/cosmos-sdk/baseapp" + "github.com/cosmos/cosmos-sdk/codec/address" + "github.com/cosmos/cosmos-sdk/runtime" + "github.com/cosmos/cosmos-sdk/testutil" + sdk "github.com/cosmos/cosmos-sdk/types" + moduletestutil "github.com/cosmos/cosmos-sdk/types/module/testutil" + authtypes "github.com/cosmos/cosmos-sdk/x/auth/types" + govtypes "github.com/cosmos/cosmos-sdk/x/gov/types" + stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types" + + lsmkeeper "github.com/cosmos/gaia/v22/x/lsm/keeper" + lsmtypes "github.com/cosmos/gaia/v22/x/lsm/types" + "github.com/cosmos/gaia/v22/x/lsm/types/mocks" +) + +type KeeperTestSuite struct { + suite.Suite + + ctx sdk.Context + lsmKeeper *lsmkeeper.Keeper + stakingKeeper *mocks.StakingKeeper + bankKeeper *mocks.BankKeeper + accountKeeper *mocks.AccountKeeper + queryClient lsmtypes.QueryClient + msgServer lsmtypes.MsgServer +} + +func (s *KeeperTestSuite) SetupTest() { + require := s.Require() + key := storetypes.NewKVStoreKey(stakingtypes.StoreKey) + storeService := runtime.NewKVStoreService(key) + testCtx := testutil.DefaultContextWithDB(s.T(), key, storetypes.NewTransientStoreKey("transient_test")) + ctx := testCtx.Ctx.WithBlockHeader(cmtproto.Header{Time: cmttime.Now()}) + encCfg := moduletestutil.MakeTestEncodingConfig() + + accountKeeper := mocks.NewAccountKeeper(s.T()) + accountKeeper.EXPECT().AddressCodec().Return(address.NewBech32Codec("cosmos")) + + bankKeeper := mocks.NewBankKeeper(s.T()) + stakingKeeper := mocks.NewStakingKeeper(s.T()) + distributionKeeper := mocks.NewDistributionKeeper(s.T()) + + stakingKeeper.EXPECT().ValidatorAddressCodec().Return(address.NewBech32Codec("cosmosvaloper")).Maybe() + + lsmKeeper := lsmkeeper.NewKeeper( + encCfg.Codec, + storeService, + accountKeeper, + bankKeeper, + stakingKeeper, + distributionKeeper, + authtypes.NewModuleAddress(govtypes.ModuleName).String(), + ) + require.NoError(lsmKeeper.SetParams(ctx, lsmtypes.DefaultParams())) + + s.ctx = ctx + s.stakingKeeper = stakingKeeper + s.bankKeeper = bankKeeper + s.accountKeeper = accountKeeper + s.lsmKeeper = lsmKeeper + + lsmtypes.RegisterInterfaces(encCfg.InterfaceRegistry) + queryHelper := baseapp.NewQueryServerTestHelper(ctx, encCfg.InterfaceRegistry) + lsmtypes.RegisterQueryServer(queryHelper, lsmkeeper.Querier{Keeper: lsmKeeper}) + s.queryClient = lsmtypes.NewQueryClient(queryHelper) + s.msgServer = lsmkeeper.NewMsgServerImpl(lsmKeeper) +} + +func (s *KeeperTestSuite) TestParams() { + ctx, keeper := s.ctx, s.lsmKeeper + require := s.Require() + + expParams := lsmtypes.DefaultParams() + // check that the empty keeper loads the default + resParams, err := keeper.GetParams(ctx) + require.NoError(err) + require.Equal(expParams, resParams) + + expParams.ValidatorBondFactor = sdkmath.LegacyNewDec(-1) + expParams.GlobalLiquidStakingCap = sdkmath.LegacyNewDec(1) + expParams.ValidatorLiquidStakingCap = sdkmath.LegacyNewDec(1) + require.NoError(keeper.SetParams(ctx, expParams)) + resParams, err = keeper.GetParams(ctx) + require.NoError(err) + require.True(expParams.Equal(resParams)) +} + +func TestKeeperTestSuite(t *testing.T) { + suite.Run(t, new(KeeperTestSuite)) +} diff --git a/x/lsm/keeper/liquid_stake.go b/x/lsm/keeper/liquid_stake.go new file mode 100644 index 00000000000..5f611b1d1f7 --- /dev/null +++ b/x/lsm/keeper/liquid_stake.go @@ -0,0 +1,633 @@ +package keeper + +import ( + "context" + "time" + + "cosmossdk.io/math" + storetypes "cosmossdk.io/store/types" + + "github.com/cosmos/cosmos-sdk/runtime" + sdk "github.com/cosmos/cosmos-sdk/types" + vesting "github.com/cosmos/cosmos-sdk/x/auth/vesting/exported" + "github.com/cosmos/gaia/v22/x/lsm/types" +) + +// SetTotalLiquidStakedTokens stores the total outstanding tokens owned by a liquid staking provider +func (k Keeper) SetTotalLiquidStakedTokens(ctx context.Context, tokens math.Int) { + store := k.storeService.OpenKVStore(ctx) + + tokensBz, err := tokens.Marshal() + if err != nil { + panic(err) + } + + err = store.Set(types.TotalLiquidStakedTokensKey, tokensBz) + if err != nil { + panic(err) + } +} + +// GetTotalLiquidStakedTokens returns the total outstanding tokens owned by a liquid staking provider +// Returns zero if the total liquid stake amount has not been initialized +func (k Keeper) GetTotalLiquidStakedTokens(ctx context.Context) math.Int { + store := k.storeService.OpenKVStore(ctx) + tokensBz, err := store.Get(types.TotalLiquidStakedTokensKey) + if err != nil { + panic(err) + } + + if tokensBz == nil { + return math.ZeroInt() + } + + var tokens math.Int + if err := tokens.Unmarshal(tokensBz); err != nil { + panic(err) + } + + return tokens +} + +// Checks if an account associated with a given delegation is related to liquid staking +// +// This is determined by checking if the account has a 32-length address +// which will identify the following scenarios: +// - An account has tokenized their shares, and thus the delegation is +// owned by the tokenize share record module account +// - A liquid staking provider is delegating through an ICA account +// +// Both ICA accounts and tokenize share record module accounts have 32-length addresses +// NOTE: This will have to be refactored before adapting it to chains beyond gaia +// as other chains may have 32-length addresses that are not related to the above scenarios +func (k Keeper) DelegatorIsLiquidStaker(delegatorAddress sdk.AccAddress) bool { + return len(delegatorAddress) == 32 +} + +// CheckExceedsGlobalLiquidStakingCap checks if a liquid delegation would cause the +// global liquid staking cap to be exceeded +// A liquid delegation is defined as either tokenized shares, or a delegation from an ICA Account +// The total stake is determined by the balance of the bonded pool +// If the delegation's shares are already bonded (e.g. in the event of a tokenized share) +// the tokens are already included in the bonded pool +// If the delegation's shares are not bonded (e.g. normal delegation), +// we need to add the tokens to the current bonded pool balance to get the total staked +func (k Keeper) CheckExceedsGlobalLiquidStakingCap(ctx context.Context, tokens math.Int, sharesAlreadyBonded bool) (bool, error) { + liquidStakingCap, err := k.GlobalLiquidStakingCap(ctx) + if err != nil { + return false, err + } + + liquidStakedAmount := k.GetTotalLiquidStakedTokens(ctx) + + // Determine the total stake from the balance of the bonded pool + // If this is not a tokenized delegation, we need to add the tokens to the pool balance since + // they would not have been counted yet + // If this is for a tokenized delegation, the tokens are already included in the pool balance + totalStakedAmount, err := k.stakingKeeper.TotalBondedTokens(ctx) + if err != nil { + return false, err + } + + if !sharesAlreadyBonded { + totalStakedAmount = totalStakedAmount.Add(tokens) + } + + // Calculate the percentage of stake that is liquid + + updatedLiquidStaked := math.LegacyNewDecFromInt(liquidStakedAmount.Add(tokens)) + liquidStakePercent := updatedLiquidStaked.Quo(math.LegacyNewDecFromInt(totalStakedAmount)) + + return liquidStakePercent.GT(liquidStakingCap), nil +} + +// CheckExceedsValidatorBondCap checks if a liquid delegation to a validator would cause +// the liquid shares to exceed the validator bond factor +// A liquid delegation is defined as either tokenized shares, or a delegation from an ICA Account +// Returns true if the cap is exceeded +func (k Keeper) CheckExceedsValidatorBondCap(ctx context.Context, validator types.LiquidValidator, + shares math.LegacyDec, +) (bool, error) { + validatorBondFactor, err := k.ValidatorBondFactor(ctx) + if err != nil { + return false, err + } + + if validatorBondFactor.Equal(types.ValidatorBondCapDisabled) { + return false, nil + } + maxValLiquidShares := validator.ValidatorBondShares.Mul(validatorBondFactor) + + return validator.LiquidShares.Add(shares).GT(maxValLiquidShares), nil +} + +// CheckExceedsValidatorLiquidStakingCap checks if a liquid delegation could cause the +// total liquid shares to exceed the liquid staking cap +// A liquid delegation is defined as either tokenized shares, or a delegation from an ICA Account +// If the liquid delegation's shares are already bonded (e.g. in the event of a tokenized share) +// the tokens are already included in the validator's delegator shares +// If the liquid delegation's shares are not bonded (e.g. normal delegation), +// we need to add the shares to the current validator's delegator shares to get the total shares +// Returns true if the cap is exceeded +func (k Keeper) CheckExceedsValidatorLiquidStakingCap(ctx context.Context, validator types.LiquidValidator, + shares math.LegacyDec, sharesAlreadyBonded bool, +) (bool, error) { + updatedLiquidShares := validator.LiquidShares.Add(shares) + str, err := k.stakingKeeper.ValidatorAddressCodec().StringToBytes(validator.OperatorAddress) + if err != nil { + return false, err + } + stVal, err := k.stakingKeeper.GetValidator(ctx, str) + if err != nil { + return false, err + } + updatedTotalShares := stVal.DelegatorShares + if !sharesAlreadyBonded { + updatedTotalShares = updatedTotalShares.Add(shares) + } + + liquidStakePercent := updatedLiquidShares.Quo(updatedTotalShares) + liquidStakingCap, err := k.ValidatorLiquidStakingCap(ctx) + if err != nil { + return false, err + } + + return liquidStakePercent.GT(liquidStakingCap), nil +} + +// SafelyIncreaseTotalLiquidStakedTokens increments the total liquid staked tokens +// if the global cap is not surpassed by this delegation +// +// The percentage of liquid staked tokens must be less than the GlobalLiquidStakingCap: +// (TotalLiquidStakedTokens / TotalStakedTokens) <= GlobalLiquidStakingCap +func (k Keeper) SafelyIncreaseTotalLiquidStakedTokens(ctx context.Context, amount math.Int, sharesAlreadyBonded bool) error { + exceedsCap, err := k.CheckExceedsGlobalLiquidStakingCap(ctx, amount, sharesAlreadyBonded) + if err != nil { + return err + } + + if exceedsCap { + return types.ErrGlobalLiquidStakingCapExceeded + } + + k.SetTotalLiquidStakedTokens(ctx, k.GetTotalLiquidStakedTokens(ctx).Add(amount)) + return nil +} + +// DecreaseTotalLiquidStakedTokens decrements the total liquid staked tokens +func (k Keeper) DecreaseTotalLiquidStakedTokens(ctx context.Context, amount math.Int) error { + totalLiquidStake := k.GetTotalLiquidStakedTokens(ctx) + if amount.GT(totalLiquidStake) { + return types.ErrTotalLiquidStakedUnderflow + } + k.SetTotalLiquidStakedTokens(ctx, totalLiquidStake.Sub(amount)) + return nil +} + +// SafelyIncreaseValidatorLiquidShares increments the liquid shares on a validator, if: +// the validator bond factor and validator liquid staking cap will not be exceeded by this delegation +// +// The percentage of validator liquid shares must be less than the ValidatorLiquidStakingCap, +// and the total liquid staked shares cannot exceed the validator bond cap +// 1. (TotalLiquidStakedTokens / TotalStakedTokens) <= ValidatorLiquidStakingCap +// 2. LiquidShares <= (ValidatorBondShares * ValidatorBondFactor) +func (k Keeper) SafelyIncreaseValidatorLiquidShares(ctx context.Context, valAddress sdk.ValAddress, + shares math.LegacyDec, sharesAlreadyBonded bool, +) (types.LiquidValidator, error) { + validator, err := k.GetLiquidValidator(ctx, valAddress) + if err != nil { + return validator, err + } + + // Confirm the validator bond factor and validator liquid staking cap will not be exceeded + exceedsValidatorBondCap, err := k.CheckExceedsValidatorBondCap(ctx, validator, shares) + if err != nil { + return validator, err + } + + if exceedsValidatorBondCap { + return validator, types.ErrInsufficientValidatorBondShares + } + + exceedsValidatorLiquidStakingCap, err := k.CheckExceedsValidatorLiquidStakingCap(ctx, validator, shares, sharesAlreadyBonded) + if err != nil { + return validator, err + } + + if exceedsValidatorLiquidStakingCap { + return validator, types.ErrValidatorLiquidStakingCapExceeded + } + + // Increment the validator's liquid shares + validator.LiquidShares = validator.LiquidShares.Add(shares) + err = k.SetLiquidValidator(ctx, validator) + if err != nil { + return types.LiquidValidator{}, err + } + + return validator, nil +} + +// DecreaseValidatorLiquidShares decrements the liquid shares on a validator +func (k Keeper) DecreaseValidatorLiquidShares(ctx context.Context, valAddress sdk.ValAddress, + shares math.LegacyDec, +) (types.LiquidValidator, error) { + validator, err := k.GetLiquidValidator(ctx, valAddress) + if err != nil { + return validator, err + } + + if shares.GT(validator.LiquidShares) { + return validator, types.ErrValidatorLiquidSharesUnderflow + } + + validator.LiquidShares = validator.LiquidShares.Sub(shares) + err = k.SetLiquidValidator(ctx, validator) + if err != nil { + return types.LiquidValidator{}, err + } + + return validator, nil +} + +// Increase validator bond shares increments the validator's self bond +// in the event that the delegation amount on a validator bond delegation is increased +func (k Keeper) IncreaseValidatorBondShares(ctx context.Context, valAddress sdk.ValAddress, shares math.LegacyDec) error { + validator, err := k.GetLiquidValidator(ctx, valAddress) + if err != nil { + return err + } + + validator.ValidatorBondShares = validator.ValidatorBondShares.Add(shares) + err = k.SetLiquidValidator(ctx, validator) + if err != nil { + return err + } + + return nil +} + +// SafelyDecreaseValidatorBond decrements the validator's self bond +// so long as it will not cause the current delegations to exceed the threshold +// set by validator bond factor +func (k Keeper) SafelyDecreaseValidatorBond(ctx context.Context, valAddress sdk.ValAddress, shares math.LegacyDec) error { + validator, err := k.GetLiquidValidator(ctx, valAddress) + if err != nil { + return err + } + + // Check if the decreased self bond will cause the validator bond threshold to be exceeded + validatorBondFactor, err := k.ValidatorBondFactor(ctx) + if err != nil { + return err + } + + validatorBondEnabled := !validatorBondFactor.Equal(types.ValidatorBondCapDisabled) + maxValTotalShare := validator.ValidatorBondShares.Sub(shares).Mul(validatorBondFactor) + + if validatorBondEnabled && validator.LiquidShares.GT(maxValTotalShare) { + return types.ErrInsufficientValidatorBondShares + } + + // Decrement the validator's self bond + validator.ValidatorBondShares = validator.ValidatorBondShares.Sub(shares) + err = k.SetLiquidValidator(ctx, validator) + if err != nil { + return err + } + + return nil +} + +// Adds a lock that prevents tokenizing shares for an account +// The tokenize share lock store is implemented by keying on the account address +// and storing a timestamp as the value. The timestamp is empty when the lock is +// set and gets populated with the unlock completion time once the unlock has started +func (k Keeper) AddTokenizeSharesLock(ctx context.Context, address sdk.AccAddress) { + store := k.storeService.OpenKVStore(ctx) + key := types.GetTokenizeSharesLockKey(address) + err := store.Set(key, sdk.FormatTimeBytes(time.Time{})) + if err != nil { + panic(err) + } +} + +// Removes the tokenize share lock for an account to enable tokenizing shares +func (k Keeper) RemoveTokenizeSharesLock(ctx context.Context, address sdk.AccAddress) { + store := k.storeService.OpenKVStore(ctx) + key := types.GetTokenizeSharesLockKey(address) + err := store.Delete(key) + if err != nil { + panic(err) + } +} + +// Updates the timestamp associated with a lock to the time at which the lock expires +func (k Keeper) SetTokenizeSharesUnlockTime(ctx context.Context, address sdk.AccAddress, completionTime time.Time) { + store := k.storeService.OpenKVStore(ctx) + key := types.GetTokenizeSharesLockKey(address) + err := store.Set(key, sdk.FormatTimeBytes(completionTime)) + if err != nil { + panic(err) + } +} + +// Checks if there is currently a tokenize share lock for a given account +// Returns the status indicating whether the account is locked, unlocked, +// or as a lock expiring. If the lock is expiring, the expiration time is returned +func (k Keeper) GetTokenizeSharesLock(ctx context.Context, address sdk.AccAddress) (status types.TokenizeShareLockStatus, unlockTime time.Time) { + store := k.storeService.OpenKVStore(ctx) + key := types.GetTokenizeSharesLockKey(address) + bz, err := store.Get(key) + if err != nil { + panic(err) + } + + if len(bz) == 0 { + return types.TOKENIZE_SHARE_LOCK_STATUS_UNLOCKED, time.Time{} + } + unlockTime, err = sdk.ParseTimeBytes(bz) + if err != nil { + panic(err) + } + if unlockTime.IsZero() { + return types.TOKENIZE_SHARE_LOCK_STATUS_LOCKED, time.Time{} + } + return types.TOKENIZE_SHARE_LOCK_STATUS_LOCK_EXPIRING, unlockTime +} + +// Returns all tokenize share locks +func (k Keeper) GetAllTokenizeSharesLocks(ctx context.Context) (tokenizeShareLocks []types.TokenizeShareLock) { + store := runtime.KVStoreAdapter(k.storeService.OpenKVStore(ctx)) + + iterator := storetypes.KVStorePrefixIterator(store, types.TokenizeSharesLockPrefix) + defer iterator.Close() + + for ; iterator.Valid(); iterator.Next() { + addressBz := iterator.Key()[2:] // remove prefix bytes and address length + unlockTime, err := sdk.ParseTimeBytes(iterator.Value()) + if err != nil { + panic(err) + } + + var status types.TokenizeShareLockStatus + if unlockTime.IsZero() { + status = types.TOKENIZE_SHARE_LOCK_STATUS_LOCKED + } else { + status = types.TOKENIZE_SHARE_LOCK_STATUS_LOCK_EXPIRING + } + + bechPrefix := sdk.GetConfig().GetBech32AccountAddrPrefix() + lock := types.TokenizeShareLock{ + Address: sdk.MustBech32ifyAddressBytes(bechPrefix, addressBz), + Status: status.String(), + CompletionTime: unlockTime, + } + + tokenizeShareLocks = append(tokenizeShareLocks, lock) + } + + return tokenizeShareLocks +} + +// Stores a list of addresses pending tokenize share unlocking at the same time +func (k Keeper) SetPendingTokenizeShareAuthorizations(ctx context.Context, completionTime time.Time, authorizations types.PendingTokenizeShareAuthorizations) { + store := k.storeService.OpenKVStore(ctx) + timeKey := types.GetTokenizeShareAuthorizationTimeKey(completionTime) + bz := k.cdc.MustMarshal(&authorizations) + err := store.Set(timeKey, bz) + if err != nil { + panic(err) + } +} + +// Returns a list of addresses pending tokenize share unlocking at the same time +func (k Keeper) GetPendingTokenizeShareAuthorizations(ctx context.Context, completionTime time.Time) types.PendingTokenizeShareAuthorizations { + store := k.storeService.OpenKVStore(ctx) + + timeKey := types.GetTokenizeShareAuthorizationTimeKey(completionTime) + bz, err := store.Get(timeKey) + if err != nil { + panic(err) + } + + authorizations := types.PendingTokenizeShareAuthorizations{Addresses: []string{}} + if len(bz) == 0 { + return authorizations + } + k.cdc.MustUnmarshal(bz, &authorizations) + + return authorizations +} + +// Inserts the address into a queue where it will sit for 1 unbonding period +// before the tokenize share lock is removed +// Returns the completion time +func (k Keeper) QueueTokenizeSharesAuthorization(ctx context.Context, address sdk.AccAddress) (time.Time, error) { + sdkCtx := sdk.UnwrapSDKContext(ctx) + blockTime := sdkCtx.BlockTime() + + params, err := k.stakingKeeper.GetParams(ctx) + if err != nil { + return blockTime, err + } + + completionTime := blockTime.Add(params.UnbondingTime) + + // Append the address to the list of addresses that also unlock at this time + authorizations := k.GetPendingTokenizeShareAuthorizations(ctx, completionTime) + authorizations.Addresses = append(authorizations.Addresses, address.String()) + + k.SetPendingTokenizeShareAuthorizations(ctx, completionTime, authorizations) + k.SetTokenizeSharesUnlockTime(ctx, address, completionTime) + + return completionTime, nil +} + +// Cancels a pending tokenize share authorization by removing the lock from the queue +func (k Keeper) CancelTokenizeShareLockExpiration(ctx context.Context, address sdk.AccAddress, completionTime time.Time) { + authorizations := k.GetPendingTokenizeShareAuthorizations(ctx, completionTime) + + updatedAddresses := []string{} + for _, expiringAddress := range authorizations.Addresses { + if address.String() != expiringAddress { + updatedAddresses = append(updatedAddresses, expiringAddress) + } + } + + authorizations.Addresses = updatedAddresses + k.SetPendingTokenizeShareAuthorizations(ctx, completionTime, authorizations) +} + +// Unlocks all queued tokenize share authorizations that have matured +// (i.e. have waited the full unbonding period) +func (k Keeper) RemoveExpiredTokenizeShareLocks(ctx context.Context, blockTime time.Time) ([]string, error) { + store := k.storeService.OpenKVStore(ctx) + + // iterators all time slices from time 0 until the current block time + prefixEnd := storetypes.InclusiveEndBytes(types.GetTokenizeShareAuthorizationTimeKey(blockTime)) + iterator, err := store.Iterator(types.TokenizeSharesUnlockQueuePrefix, prefixEnd) + if err != nil { + return []string{}, err + } + + defer iterator.Close() + + // collect all unlocked addresses + unlockedAddresses := []string{} + keys := [][]byte{} + for ; iterator.Valid(); iterator.Next() { + authorizations := types.PendingTokenizeShareAuthorizations{} + k.cdc.MustUnmarshal(iterator.Value(), &authorizations) + unlockedAddresses = append(unlockedAddresses, authorizations.Addresses...) + keys = append(keys, iterator.Key()) + } + + // delete unlocked addresses keys + for _, k := range keys { + err := store.Delete(k) + if err != nil { + panic(err) + } + } + + // remove the lock from each unlocked address + for _, unlockedAddress := range unlockedAddresses { + unlockedAddr, err := k.authKeeper.AddressCodec().StringToBytes(unlockedAddress) + if err != nil { + return unlockedAddresses, err + } + k.RemoveTokenizeSharesLock(ctx, unlockedAddr) + } + + return unlockedAddresses, nil +} + +// Calculates and sets the global liquid staked tokens and liquid shares by validator +// The totals are determined by looping each delegation record and summing the stake +// if the delegator has a 32-length address. Checking for a 32-length address will capture +// ICA accounts, as well as tokenized delegations which are owned by module accounts +// under the hood +// This function must be called in the upgrade handler which onboards LSM +func (k Keeper) RefreshTotalLiquidStaked(ctx context.Context) error { + validators, err := k.stakingKeeper.GetAllValidators(ctx) + if err != nil { + return err + } + + // First reset each validator's liquid shares to 0 + for _, validator := range validators { + str, err := k.stakingKeeper.ValidatorAddressCodec().StringToBytes(validator.OperatorAddress) + if err != nil { + return err + } + liquidVal, err := k.GetLiquidValidator(ctx, str) + liquidVal.LiquidShares = math.LegacyZeroDec() + err = k.SetLiquidValidator(ctx, liquidVal) + if err != nil { + return err + } + } + + delegations, err := k.stakingKeeper.GetAllDelegations(ctx) + if err != nil { + return err + } + + // Sum up the total liquid tokens and increment each validator's liquid shares + totalLiquidStakedTokens := math.ZeroInt() + for _, delegation := range delegations { + delegatorAddress, err := k.authKeeper.AddressCodec().StringToBytes(delegation.DelegatorAddress) + if err != nil { + return err + } + + // If the delegator is either an ICA account or a tokenize share module account, + // the delegation should be considered to be associated with liquid staking + // Consequently, the global number of liquid staked tokens, and the total + // liquid shares on the validator should be incremented + if k.DelegatorIsLiquidStaker(delegatorAddress) { + validatorAddress, err := k.stakingKeeper.ValidatorAddressCodec().StringToBytes(delegation.ValidatorAddress) + if err != nil { + return err + } + validator, err := k.stakingKeeper.GetValidator(ctx, validatorAddress) + if err != nil { + return err + } + liquidVal, err := k.GetLiquidValidator(ctx, validatorAddress) + if err != nil { + return err + } + + liquidShares := delegation.Shares + liquidTokens := validator.TokensFromShares(liquidShares).TruncateInt() + + liquidVal.LiquidShares = liquidVal.LiquidShares.Add(liquidShares) + err = k.SetLiquidValidator(ctx, liquidVal) + if err != nil { + return err + } + + totalLiquidStakedTokens = totalLiquidStakedTokens.Add(liquidTokens) + } + } + + k.SetTotalLiquidStakedTokens(ctx, totalLiquidStakedTokens) + + return nil +} + +// CheckVestedDelegationInVestingAccount verifies whether the provided vesting account +// holds a vested delegation for an equal or greater amount of the specified coin +// at the given block time. +// +// Note that this function facilitates a specific use-case in the LSM module for tokenizing vested delegations. +// For more details, see https://github.com/cosmos/gaia/issues/2877. +func CheckVestedDelegationInVestingAccount(account vesting.VestingAccount, blockTime time.Time, coin sdk.Coin) bool { + // Get the vesting coins at the current block time + vestingAmount := account.GetVestingCoins(blockTime).AmountOf(coin.Denom) + + // Note that the "DelegatedVesting" and "DelegatedFree" values + // were computed during the last delegation or undelegation operation + delVestingAmount := account.GetDelegatedVesting().AmountOf(coin.Denom) + delVested := account.GetDelegatedFree() + + // Calculate the new vested delegated coins + x := math.MinInt(vestingAmount.Sub(delVestingAmount), math.ZeroInt()) + + // Add the newly vested delegated coins to the existing delegated vested amount + if !x.IsZero() { + delVested = delVested.Add(sdk.NewCoin(coin.Denom, x.Abs())) + } + + // Check if the total delegated vested amount is greater than or equal to the specified coin amount + return delVested.AmountOf(coin.Denom).GTE(coin.Amount) +} + +// SetLiquidValidator sets the main record holding liquid validator details +func (k Keeper) SetLiquidValidator(ctx context.Context, validator types.LiquidValidator) error { + store := k.storeService.OpenKVStore(ctx) + bz := types.MustMarshalValidator(k.cdc, &validator) + str, err := k.stakingKeeper.ValidatorAddressCodec().StringToBytes(validator.OperatorAddress) + if err != nil { + return err + } + return store.Set(types.GetLiquidValidatorKey(str), bz) +} + +// GetLiquidValidator gets a liquid validator record +func (k Keeper) GetLiquidValidator(ctx context.Context, addr sdk.ValAddress) (validator types.LiquidValidator, + err error) { + store := k.storeService.OpenKVStore(ctx) + value, err := store.Get(types.GetLiquidValidatorKey(addr)) + if err != nil { + return validator, err + } + + if value == nil { + return validator, types.ErrNoValidatorFound + } + + return types.UnmarshalValidator(k.cdc, value) +} diff --git a/x/lsm/keeper/liquid_stake_test.go b/x/lsm/keeper/liquid_stake_test.go new file mode 100644 index 00000000000..df1e3a57ea7 --- /dev/null +++ b/x/lsm/keeper/liquid_stake_test.go @@ -0,0 +1,939 @@ +package keeper_test + +import ( + "time" + + "github.com/stretchr/testify/mock" + + "cosmossdk.io/math" + "github.com/cosmos/cosmos-sdk/crypto/keys/secp256k1" + simtestutil "github.com/cosmos/cosmos-sdk/testutil/sims" + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/types/address" + authtypes "github.com/cosmos/cosmos-sdk/x/auth/types" + vestingtypes "github.com/cosmos/cosmos-sdk/x/auth/vesting/types" + stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types" + + lsmkeeper "github.com/cosmos/gaia/v22/x/lsm/keeper" + "github.com/cosmos/gaia/v22/x/lsm/types" +) + +// Tests Set/Get TotalLiquidStakedTokens +func (s *KeeperTestSuite) TestTotalLiquidStakedTokens() { + ctx, keeper := s.ctx, s.lsmKeeper + require := s.Require() + + // Update the total liquid staked + total := math.NewInt(100) + keeper.SetTotalLiquidStakedTokens(ctx, total) + + // Confirm it was updated + require.Equal(total, keeper.GetTotalLiquidStakedTokens(ctx), "initial") +} + +// Tests Increase/Decrease TotalValidatorLiquidShares +func (s *KeeperTestSuite) TestValidatorLiquidShares() { + ctx, keeper := s.ctx, s.lsmKeeper + + // Create a validator address + privKey := secp256k1.GenPrivKey() + pubKey := privKey.PubKey() + valAddress := sdk.ValAddress(pubKey.Address()) + + // Set an initial total + initial := math.LegacyNewDec(100) + validator := types.LiquidValidator{ + OperatorAddress: valAddress.String(), + LiquidShares: initial, + } + s.Require().NoError(keeper.SetLiquidValidator(ctx, validator)) +} + +// Tests DecreaseTotalLiquidStakedTokens +func (s *KeeperTestSuite) TestDecreaseTotalLiquidStakedTokens() { + ctx, keeper := s.ctx, s.lsmKeeper + require := s.Require() + + intitialTotalLiquidStaked := math.NewInt(100) + decreaseAmount := math.NewInt(10) + + // Set the total liquid staked to an arbitrary value + keeper.SetTotalLiquidStakedTokens(ctx, intitialTotalLiquidStaked) + + // Decrease the total liquid stake and confirm the total was updated + err := keeper.DecreaseTotalLiquidStakedTokens(ctx, decreaseAmount) + require.NoError(err, "no error expected when decreasing total liquid staked tokens") + require.Equal(intitialTotalLiquidStaked.Sub(decreaseAmount), keeper.GetTotalLiquidStakedTokens(ctx)) + + // Attempt to decrease by an excessive amount, it should error + err = keeper.DecreaseTotalLiquidStakedTokens(ctx, intitialTotalLiquidStaked) + require.ErrorIs(err, types.ErrTotalLiquidStakedUnderflow) +} + +// Tests CheckExceedsValidatorBondCap +func (s *KeeperTestSuite) TestCheckExceedsValidatorBondCap() { + ctx, keeper := s.ctx, s.lsmKeeper + require := s.Require() + + testCases := []struct { + name string + validatorShares math.LegacyDec + validatorBondFactor math.LegacyDec + currentLiquidShares math.LegacyDec + newShares math.LegacyDec + expectedExceeds bool + }{ + { + // Validator Shares: 100, Factor: 1, Current Shares: 90 => 100 Max Shares, Capacity: 10 + // New Shares: 5 - below cap + name: "factor 1 - below cap", + validatorShares: math.LegacyNewDec(100), + validatorBondFactor: math.LegacyNewDec(1), + currentLiquidShares: math.LegacyNewDec(90), + newShares: math.LegacyNewDec(5), + expectedExceeds: false, + }, + { + // Validator Shares: 100, Factor: 1, Current Shares: 90 => 100 Max Shares, Capacity: 10 + // New Shares: 10 - at cap + name: "factor 1 - at cap", + validatorShares: math.LegacyNewDec(100), + validatorBondFactor: math.LegacyNewDec(1), + currentLiquidShares: math.LegacyNewDec(90), + newShares: math.LegacyNewDec(10), + expectedExceeds: false, + }, + { + // Validator Shares: 100, Factor: 1, Current Shares: 90 => 100 Max Shares, Capacity: 10 + // New Shares: 15 - above cap + name: "factor 1 - above cap", + validatorShares: math.LegacyNewDec(100), + validatorBondFactor: math.LegacyNewDec(1), + currentLiquidShares: math.LegacyNewDec(90), + newShares: math.LegacyNewDec(15), + expectedExceeds: true, + }, + { + // Validator Shares: 100, Factor: 2, Current Shares: 90 => 200 Max Shares, Capacity: 110 + // New Shares: 5 - below cap + name: "factor 2 - well below cap", + validatorShares: math.LegacyNewDec(100), + validatorBondFactor: math.LegacyNewDec(2), + currentLiquidShares: math.LegacyNewDec(90), + newShares: math.LegacyNewDec(5), + expectedExceeds: false, + }, + { + // Validator Shares: 100, Factor: 2, Current Shares: 90 => 200 Max Shares, Capacity: 110 + // New Shares: 100 - below cap + name: "factor 2 - below cap", + validatorShares: math.LegacyNewDec(100), + validatorBondFactor: math.LegacyNewDec(2), + currentLiquidShares: math.LegacyNewDec(90), + newShares: math.LegacyNewDec(100), + expectedExceeds: false, + }, + { + // Validator Shares: 100, Factor: 2, Current Shares: 90 => 200 Max Shares, Capacity: 110 + // New Shares: 110 - below cap + name: "factor 2 - at cap", + validatorShares: math.LegacyNewDec(100), + validatorBondFactor: math.LegacyNewDec(2), + currentLiquidShares: math.LegacyNewDec(90), + newShares: math.LegacyNewDec(110), + expectedExceeds: false, + }, + { + // Validator Shares: 100, Factor: 2, Current Shares: 90 => 200 Max Shares, Capacity: 110 + // New Shares: 111 - above cap + name: "factor 2 - above cap", + validatorShares: math.LegacyNewDec(100), + validatorBondFactor: math.LegacyNewDec(2), + currentLiquidShares: math.LegacyNewDec(90), + newShares: math.LegacyNewDec(111), + expectedExceeds: true, + }, + { + // Validator Shares: 100, Factor: 100, Current Shares: 90 => 10000 Max Shares, Capacity: 9910 + // New Shares: 100 - below cap + name: "factor 100 - below cap", + validatorShares: math.LegacyNewDec(100), + validatorBondFactor: math.LegacyNewDec(100), + currentLiquidShares: math.LegacyNewDec(90), + newShares: math.LegacyNewDec(100), + expectedExceeds: false, + }, + { + // Validator Shares: 100, Factor: 100, Current Shares: 90 => 10000 Max Shares, Capacity: 9910 + // New Shares: 9910 - at cap + name: "factor 100 - at cap", + validatorShares: math.LegacyNewDec(100), + validatorBondFactor: math.LegacyNewDec(100), + currentLiquidShares: math.LegacyNewDec(90), + newShares: math.LegacyNewDec(9910), + expectedExceeds: false, + }, + { + // Validator Shares: 100, Factor: 100, Current Shares: 90 => 10000 Max Shares, Capacity: 9910 + // New Shares: 9911 - above cap + name: "factor 100 - above cap", + validatorShares: math.LegacyNewDec(100), + validatorBondFactor: math.LegacyNewDec(100), + currentLiquidShares: math.LegacyNewDec(90), + newShares: math.LegacyNewDec(9911), + expectedExceeds: true, + }, + { + // Factor of -1 (disabled): Should always return false + name: "factor disabled", + validatorShares: math.LegacyNewDec(1), + validatorBondFactor: math.LegacyNewDec(-1), + currentLiquidShares: math.LegacyNewDec(1), + newShares: math.LegacyNewDec(1_000_000), + expectedExceeds: false, + }, + } + + for _, tc := range testCases { + s.Run(tc.name, func() { + // Update the validator bond factor + params, err := keeper.GetParams(ctx) + require.NoError(err) + params.ValidatorBondFactor = tc.validatorBondFactor + keeper.SetParams(ctx, params) + + // Create a validator with designated self-bond shares + validator := types.LiquidValidator{ + LiquidShares: tc.currentLiquidShares, + ValidatorBondShares: tc.validatorShares, + } + + // Check whether the cap is exceeded + actualExceeds, err := keeper.CheckExceedsValidatorBondCap(ctx, validator, tc.newShares) + require.NoError(err) + require.Equal(tc.expectedExceeds, actualExceeds, tc.name) + }) + } +} + +// Tests TestCheckExceedsValidatorLiquidStakingCap +func (s *KeeperTestSuite) TestCheckExceedsValidatorLiquidStakingCap() { + ctx, keeper := s.ctx, s.lsmKeeper + require := s.Require() + + testCases := []struct { + name string + validatorLiquidCap math.LegacyDec + validatorLiquidShares math.LegacyDec + validatorTotalShares math.LegacyDec + newLiquidShares math.LegacyDec + tokenizingShares bool + expectedExceeds bool + }{ + { + // Cap: 10% - Delegation Below Threshold + // Liquid Shares: 5, Total Shares: 95, New Liquid Shares: 1 + // => Liquid Shares: 5+1=6, Total Shares: 95+1=96 => 6/96 = 6% < 10% cap + name: "10 percent cap _ native delegation _ below cap", + validatorLiquidCap: math.LegacyMustNewDecFromStr("0.1"), + validatorLiquidShares: math.LegacyNewDec(5), + validatorTotalShares: math.LegacyNewDec(95), + newLiquidShares: math.LegacyNewDec(1), + tokenizingShares: false, + expectedExceeds: false, + }, + { + // Cap: 10% - Delegation At Threshold + // Liquid Shares: 5, Total Shares: 95, New Liquid Shares: 5 + // => Liquid Shares: 5+5=10, Total Shares: 95+5=100 => 10/100 = 10% == 10% cap + name: "10 percent cap _ native delegation _ equals cap", + validatorLiquidCap: math.LegacyMustNewDecFromStr("0.1"), + validatorLiquidShares: math.LegacyNewDec(5), + validatorTotalShares: math.LegacyNewDec(95), + newLiquidShares: math.LegacyNewDec(4), + tokenizingShares: false, + expectedExceeds: false, + }, + { + // Cap: 10% - Delegation Exceeds Threshold + // Liquid Shares: 5, Total Shares: 95, New Liquid Shares: 6 + // => Liquid Shares: 5+6=11, Total Shares: 95+6=101 => 11/101 = 11% > 10% cap + name: "10 percent cap _ native delegation _ exceeds cap", + validatorLiquidCap: math.LegacyMustNewDecFromStr("0.1"), + validatorLiquidShares: math.LegacyNewDec(5), + validatorTotalShares: math.LegacyNewDec(95), + newLiquidShares: math.LegacyNewDec(6), + tokenizingShares: false, + expectedExceeds: true, + }, + { + // Cap: 20% - Delegation Below Threshold + // Liquid Shares: 20, Total Shares: 220, New Liquid Shares: 29 + // => Liquid Shares: 20+29=49, Total Shares: 220+29=249 => 49/249 = 19% < 20% cap + name: "20 percent cap _ native delegation _ below cap", + validatorLiquidCap: math.LegacyMustNewDecFromStr("0.2"), + validatorLiquidShares: math.LegacyNewDec(20), + validatorTotalShares: math.LegacyNewDec(220), + newLiquidShares: math.LegacyNewDec(29), + tokenizingShares: false, + expectedExceeds: false, + }, + { + // Cap: 20% - Delegation At Threshold + // Liquid Shares: 20, Total Shares: 220, New Liquid Shares: 30 + // => Liquid Shares: 20+30=50, Total Shares: 220+30=250 => 50/250 = 20% == 20% cap + name: "20 percent cap _ native delegation _ equals cap", + validatorLiquidCap: math.LegacyMustNewDecFromStr("0.2"), + validatorLiquidShares: math.LegacyNewDec(20), + validatorTotalShares: math.LegacyNewDec(220), + newLiquidShares: math.LegacyNewDec(30), + tokenizingShares: false, + expectedExceeds: false, + }, + { + // Cap: 20% - Delegation Exceeds Threshold + // Liquid Shares: 20, Total Shares: 220, New Liquid Shares: 31 + // => Liquid Shares: 20+31=51, Total Shares: 220+31=251 => 51/251 = 21% > 20% cap + name: "20 percent cap _ native delegation _ exceeds cap", + validatorLiquidCap: math.LegacyMustNewDecFromStr("0.2"), + validatorLiquidShares: math.LegacyNewDec(20), + validatorTotalShares: math.LegacyNewDec(220), + newLiquidShares: math.LegacyNewDec(31), + tokenizingShares: false, + expectedExceeds: true, + }, + { + // Cap: 50% - Native Delegation - Delegation At Threshold + // Liquid shares: 0, Total Shares: 100, New Liquid Shares: 50 + // Total Liquid Shares: 0+50=50, Total Shares: 100+50=150 + // => 50/150 = 33% < 50% cap + name: "50 percent cap _ native delegation _ delegation equals cap", + validatorLiquidCap: math.LegacyMustNewDecFromStr("0.5"), + validatorLiquidShares: math.LegacyNewDec(0), + validatorTotalShares: math.LegacyNewDec(100), + newLiquidShares: math.LegacyNewDec(50), + tokenizingShares: false, + expectedExceeds: false, + }, + { + // Cap: 50% - Tokenized Delegation - Delegation At Threshold + // Liquid shares: 0, Total Shares: 100, New Liquid Shares: 50 + // Total Liquid Shares => 0+50=50, Total Shares: 100, New Liquid Shares: 50 + // => 50 / 100 = 50% == 50% cap + name: "50 percent cap _ tokenized delegation _ delegation equals cap", + validatorLiquidCap: math.LegacyMustNewDecFromStr("0.5"), + validatorLiquidShares: math.LegacyNewDec(0), + validatorTotalShares: math.LegacyNewDec(100), + newLiquidShares: math.LegacyNewDec(50), + tokenizingShares: true, + expectedExceeds: false, + }, + { + // Cap: 50% - Native Delegation - Delegation At Threshold + // Liquid shares: 0, Total Shares: 100, New Liquid Shares: 51 + // Total Liquid Shares: 0+51=51, Total Shares: 100+51=151 + // => 51/150 = 33% < 50% cap + name: "50 percent cap _ native delegation _ delegation equals cap", + validatorLiquidCap: math.LegacyMustNewDecFromStr("0.5"), + validatorLiquidShares: math.LegacyNewDec(0), + validatorTotalShares: math.LegacyNewDec(100), + newLiquidShares: math.LegacyNewDec(51), + tokenizingShares: false, + expectedExceeds: false, + }, + { + // Cap: 50% - Tokenized Delegation - Delegation At Threshold + // Liquid shares: 0, Total Shares: 100, New Liquid Shares: 50 + // Total Liquid Shares => 0+51=51, Total Shares: 100, New Liquid Shares: 51 + // => 51 / 100 = 51% > 50% cap + name: "50 percent cap _ tokenized delegation _ delegation equals cap", + validatorLiquidCap: math.LegacyMustNewDecFromStr("0.5"), + validatorLiquidShares: math.LegacyNewDec(0), + validatorTotalShares: math.LegacyNewDec(100), + newLiquidShares: math.LegacyNewDec(51), + tokenizingShares: true, + expectedExceeds: true, + }, + { + // Cap of 0% - everything should exceed + name: "0 percent cap", + validatorLiquidCap: math.LegacyZeroDec(), + validatorLiquidShares: math.LegacyNewDec(0), + validatorTotalShares: math.LegacyNewDec(1_000_000), + newLiquidShares: math.LegacyNewDec(1), + tokenizingShares: false, + expectedExceeds: true, + }, + { + // Cap of 100% - nothing should exceed + name: "100 percent cap", + validatorLiquidCap: math.LegacyOneDec(), + validatorLiquidShares: math.LegacyNewDec(1), + validatorTotalShares: math.LegacyNewDec(1_000_000), + newLiquidShares: math.LegacyNewDec(1), + tokenizingShares: false, + expectedExceeds: false, + }, + } + + for _, tc := range testCases { + s.Run(tc.name, func() { + // Update the validator liquid staking cap + params, err := keeper.GetParams(ctx) + require.NoError(err) + params.ValidatorLiquidStakingCap = tc.validatorLiquidCap + keeper.SetParams(ctx, params) + + call := s.stakingKeeper.EXPECT().GetValidator(ctx, mock.Anything).Return( + stakingtypes.Validator{DelegatorShares: tc.validatorTotalShares}, + nil, + ) + // Generate a test validator address + privKey := secp256k1.GenPrivKey() + pubKey := privKey.PubKey() + valAddress := sdk.ValAddress(pubKey.Address()) + // Create a validator with designated self-bond shares + validator := types.LiquidValidator{ + OperatorAddress: valAddress.String(), + LiquidShares: tc.validatorLiquidShares, + } + + // Check whether the cap is exceeded + actualExceeds, err := keeper.CheckExceedsValidatorLiquidStakingCap(ctx, validator, tc.newLiquidShares, tc.tokenizingShares) + require.NoError(err) + require.Equal(tc.expectedExceeds, actualExceeds, tc.name) + call.Unset() + }) + } +} + +// Tests SafelyIncreaseValidatorLiquidShares +func (s *KeeperTestSuite) TestSafelyIncreaseValidatorLiquidShares() { + ctx, keeper := s.ctx, s.lsmKeeper + require := s.Require() + + // Generate a test validator address + privKey := secp256k1.GenPrivKey() + pubKey := privKey.PubKey() + valAddress := sdk.ValAddress(pubKey.Address()) + + // Helper function to check the validator's liquid shares + checkValidatorLiquidShares := func(expected math.LegacyDec, description string) { + actualValidator, err := keeper.GetLiquidValidator(ctx, valAddress) + require.NoError(err) + require.Equal(expected.TruncateInt64(), actualValidator.LiquidShares.TruncateInt64(), description) + } + + // Start with the following: + // Initial Liquid Shares: 0 + // Validator Bond Shares: 10 + // Validator TotalShares: 75 + // + // Initial Caps: + // ValidatorBondFactor: 1 (Cap applied at 10 shares) + // ValidatorLiquidStakingCap: 25% (Cap applied at 25 shares) + // + // Cap Increases: + // ValidatorBondFactor: 10 (Cap applied at 100 shares) + // ValidatorLiquidStakingCap: 40% (Cap applied at 50 shares) + initialLiquidShares := math.LegacyNewDec(0) + validatorBondShares := math.LegacyNewDec(10) + validatorTotalShares := math.LegacyNewDec(75) + + firstIncreaseAmount := math.LegacyNewDec(20) + secondIncreaseAmount := math.LegacyNewDec(10) // total increase of 30 + + initialBondFactor := math.LegacyNewDec(1) + finalBondFactor := math.LegacyNewDec(10) + initialLiquidStakingCap := math.LegacyMustNewDecFromStr("0.25") + finalLiquidStakingCap := math.LegacyMustNewDecFromStr("0.4") + + stVal := stakingtypes.Validator{ + OperatorAddress: valAddress.String(), + DelegatorShares: validatorTotalShares, + } + call := s.stakingKeeper.EXPECT().GetValidator(ctx, valAddress).Return(stVal, nil) + defer call.Unset() + // Create a validator with designated self-bond shares + initialValidator := types.LiquidValidator{ + OperatorAddress: valAddress.String(), + LiquidShares: initialLiquidShares, + ValidatorBondShares: validatorBondShares, + } + keeper.SetLiquidValidator(ctx, initialValidator) + + // Set validator bond factor to a small number such that any delegation would fail, + // and set the liquid staking cap such that the first stake would succeed, but the second + // would fail + params, err := keeper.GetParams(ctx) + require.NoError(err) + params.ValidatorBondFactor = initialBondFactor + params.ValidatorLiquidStakingCap = initialLiquidStakingCap + keeper.SetParams(ctx, params) + + // Attempt to increase the validator liquid shares, it should throw an + // error that the validator bond cap was exceeded + _, err = keeper.SafelyIncreaseValidatorLiquidShares(ctx, valAddress, firstIncreaseAmount, false) + require.ErrorIs(err, types.ErrInsufficientValidatorBondShares) + checkValidatorLiquidShares(initialLiquidShares, "shares after low bond factor") + + // Change validator bond factor to a more conservative number, so that the increase succeeds + params.ValidatorBondFactor = finalBondFactor + keeper.SetParams(ctx, params) + + // Try the increase again and check that it succeeded + expectedLiquidSharesAfterFirstStake := initialLiquidShares.Add(firstIncreaseAmount) + _, err = keeper.SafelyIncreaseValidatorLiquidShares(ctx, valAddress, firstIncreaseAmount, false) + require.NoError(err) + checkValidatorLiquidShares(expectedLiquidSharesAfterFirstStake, "shares with cap loose bond cap") + + // Attempt another increase, it should fail from the liquid staking cap + _, err = keeper.SafelyIncreaseValidatorLiquidShares(ctx, valAddress, secondIncreaseAmount, false) + require.ErrorIs(err, types.ErrValidatorLiquidStakingCapExceeded) + checkValidatorLiquidShares(expectedLiquidSharesAfterFirstStake, "shares after liquid staking cap hit") + + // Raise the liquid staking cap so the new increment succeeds + params.ValidatorLiquidStakingCap = finalLiquidStakingCap + keeper.SetParams(ctx, params) + + // Finally confirm that the increase succeeded this time + expectedLiquidSharesAfterSecondStake := expectedLiquidSharesAfterFirstStake.Add(secondIncreaseAmount) + _, err = keeper.SafelyIncreaseValidatorLiquidShares(ctx, valAddress, secondIncreaseAmount, false) + require.NoError(err, "no error expected after increasing liquid staking cap") + checkValidatorLiquidShares(expectedLiquidSharesAfterSecondStake, "shares after loose liquid stake cap") +} + +// Tests DecreaseValidatorLiquidShares +func (s *KeeperTestSuite) TestDecreaseValidatorLiquidShares() { + ctx, keeper := s.ctx, s.lsmKeeper + require := s.Require() + + initialLiquidShares := math.LegacyNewDec(100) + decreaseAmount := math.LegacyNewDec(10) + + // Create a validator with designated self-bond shares + privKey := secp256k1.GenPrivKey() + pubKey := privKey.PubKey() + valAddress := sdk.ValAddress(pubKey.Address()) + + initialValidator := types.LiquidValidator{ + OperatorAddress: valAddress.String(), + LiquidShares: initialLiquidShares, + } + keeper.SetLiquidValidator(ctx, initialValidator) + + // Decrease the validator liquid shares, and confirm the new share amount has been updated + _, err := keeper.DecreaseValidatorLiquidShares(ctx, valAddress, decreaseAmount) + require.NoError(err, "no error expected when decreasing validator liquid shares") + + actualValidator, err := keeper.GetLiquidValidator(ctx, valAddress) + require.NoError(err) + require.Equal(initialLiquidShares.Sub(decreaseAmount), actualValidator.LiquidShares, "liquid shares") + + // Attempt to decrease by a larger amount than it has, it should fail + _, err = keeper.DecreaseValidatorLiquidShares(ctx, valAddress, initialLiquidShares) + require.ErrorIs(err, types.ErrValidatorLiquidSharesUnderflow) +} + +// Tests SafelyDecreaseValidatorBond +func (s *KeeperTestSuite) TestSafelyDecreaseValidatorBond() { + ctx, keeper := s.ctx, s.lsmKeeper + require := s.Require() + + // Initial Bond Factor: 100, Initial Validator Bond: 10 + // => Max Liquid Shares 1000 (Initial Liquid Shares: 200) + initialBondFactor := math.LegacyNewDec(100) + initialValidatorBondShares := math.LegacyNewDec(10) + initialLiquidShares := math.LegacyNewDec(200) + + // Create a validator with designated self-bond shares + privKey := secp256k1.GenPrivKey() + pubKey := privKey.PubKey() + valAddress := sdk.ValAddress(pubKey.Address()) + + initialValidator := types.LiquidValidator{ + OperatorAddress: valAddress.String(), + ValidatorBondShares: initialValidatorBondShares, + LiquidShares: initialLiquidShares, + } + keeper.SetLiquidValidator(ctx, initialValidator) + + // Set the bond factor + params, err := keeper.GetParams(ctx) + require.NoError(err) + params.ValidatorBondFactor = initialBondFactor + keeper.SetParams(ctx, params) + + // Decrease the validator bond from 10 to 5 (minus 5) + // This will adjust the cap (factor * shares) + // from (100 * 10 = 1000) to (100 * 5 = 500) + // Since this is still above the initial liquid shares of 200, this will succeed + decreaseAmount, expectedBondShares := math.LegacyNewDec(5), math.LegacyNewDec(5) + err = keeper.SafelyDecreaseValidatorBond(ctx, valAddress, decreaseAmount) + require.NoError(err) + + actualValidator, err := keeper.GetLiquidValidator(ctx, valAddress) + require.NoError(err) + require.Equal(expectedBondShares, actualValidator.ValidatorBondShares, "validator bond shares shares") + + // Now attempt to decrease the validator bond again from 5 to 1 (minus 4) + // This time, the cap will be reduced to (factor * shares) = (100 * 1) = 100 + // However, the liquid shares are currently 200, so this should fail + decreaseAmount, expectedBondShares = math.LegacyNewDec(4), math.LegacyNewDec(1) + err = keeper.SafelyDecreaseValidatorBond(ctx, valAddress, decreaseAmount) + require.ErrorIs(err, types.ErrInsufficientValidatorBondShares) + + // Finally, disable the cap and attempt to decrease again + // This time it should succeed + params.ValidatorBondFactor = types.ValidatorBondCapDisabled + keeper.SetParams(ctx, params) + + err = keeper.SafelyDecreaseValidatorBond(ctx, valAddress, decreaseAmount) + require.NoError(err) + + actualValidator, err = keeper.GetLiquidValidator(ctx, valAddress) + require.NoError(err) + require.Equal(expectedBondShares, actualValidator.ValidatorBondShares, "validator bond shares shares") +} + +// Tests Add/Remove/Get/SetTokenizeSharesLock +func (s *KeeperTestSuite) TestTokenizeSharesLock() { + ctx, keeper := s.ctx, s.lsmKeeper + require := s.Require() + + addresses := simtestutil.CreateIncrementalAccounts(2) + addressA, addressB := addresses[0], addresses[1] + + unlocked := types.TOKENIZE_SHARE_LOCK_STATUS_UNLOCKED.String() + locked := types.TOKENIZE_SHARE_LOCK_STATUS_LOCKED.String() + lockExpiring := types.TOKENIZE_SHARE_LOCK_STATUS_LOCK_EXPIRING.String() + + // Confirm both accounts start unlocked + status, _ := keeper.GetTokenizeSharesLock(ctx, addressA) + require.Equal(unlocked, status.String(), "addressA unlocked at start") + + status, _ = keeper.GetTokenizeSharesLock(ctx, addressB) + require.Equal(unlocked, status.String(), "addressB unlocked at start") + + // Lock the first account + keeper.AddTokenizeSharesLock(ctx, addressA) + + // The first account should now have tokenize shares disabled + // and the unlock time should be the zero time + status, _ = keeper.GetTokenizeSharesLock(ctx, addressA) + require.Equal(locked, status.String(), "addressA locked") + + status, _ = keeper.GetTokenizeSharesLock(ctx, addressB) + require.Equal(unlocked, status.String(), "addressB still unlocked") + + // Update the lock time and confirm it was set + expectedUnlockTime := time.Date(2023, 1, 1, 0, 0, 0, 0, time.UTC) + keeper.SetTokenizeSharesUnlockTime(ctx, addressA, expectedUnlockTime) + + status, actualUnlockTime := keeper.GetTokenizeSharesLock(ctx, addressA) + require.Equal(lockExpiring, status.String(), "addressA lock expiring") + require.Equal(expectedUnlockTime, actualUnlockTime, "addressA unlock time") + + // Confirm B is still unlocked + status, _ = keeper.GetTokenizeSharesLock(ctx, addressB) + require.Equal(unlocked, status.String(), "addressB still unlocked") + + // Remove the lock + keeper.RemoveTokenizeSharesLock(ctx, addressA) + status, _ = keeper.GetTokenizeSharesLock(ctx, addressA) + require.Equal(unlocked, status.String(), "addressA unlocked at end") + + status, _ = keeper.GetTokenizeSharesLock(ctx, addressB) + require.Equal(unlocked, status.String(), "addressB unlocked at end") +} + +// Tests GetAllTokenizeSharesLocks +func (s *KeeperTestSuite) TestGetAllTokenizeSharesLocks() { + ctx, keeper := s.ctx, s.lsmKeeper + require := s.Require() + + addresses := simtestutil.CreateIncrementalAccounts(4) + + // Set 2 locked accounts, and two accounts with a lock expiring + keeper.AddTokenizeSharesLock(ctx, addresses[0]) + keeper.AddTokenizeSharesLock(ctx, addresses[1]) + + unlockTime1 := time.Date(2023, 1, 1, 1, 0, 0, 0, time.UTC) + unlockTime2 := time.Date(2023, 1, 2, 1, 0, 0, 0, time.UTC) + keeper.SetTokenizeSharesUnlockTime(ctx, addresses[2], unlockTime1) + keeper.SetTokenizeSharesUnlockTime(ctx, addresses[3], unlockTime2) + + // Defined expected locks after GetAll + expectedLocks := map[string]types.TokenizeShareLock{ + addresses[0].String(): { + Status: types.TOKENIZE_SHARE_LOCK_STATUS_LOCKED.String(), + }, + addresses[1].String(): { + Status: types.TOKENIZE_SHARE_LOCK_STATUS_LOCKED.String(), + }, + addresses[2].String(): { + Status: types.TOKENIZE_SHARE_LOCK_STATUS_LOCK_EXPIRING.String(), + CompletionTime: unlockTime1, + }, + addresses[3].String(): { + Status: types.TOKENIZE_SHARE_LOCK_STATUS_LOCK_EXPIRING.String(), + CompletionTime: unlockTime2, + }, + } + + // Check output from GetAll + actualLocks := keeper.GetAllTokenizeSharesLocks(ctx) + require.Len(actualLocks, len(expectedLocks), "number of locks") + + for i, actual := range actualLocks { + expected, ok := expectedLocks[actual.Address] + require.True(ok, "address %s not expected", actual.Address) + require.Equal(expected.Status, actual.Status, "tokenize share lock #%d status", i) + require.Equal(expected.CompletionTime, actual.CompletionTime, "tokenize share lock #%d completion time", i) + } +} + +// Test Get/SetPendingTokenizeShareAuthorizations +func (s *KeeperTestSuite) TestPendingTokenizeShareAuthorizations() { + ctx, keeper := s.ctx, s.lsmKeeper + require := s.Require() + + // Create dummy accounts and completion times + + addresses := simtestutil.CreateIncrementalAccounts(4) + addressStrings := []string{} + for _, address := range addresses { + addressStrings = append(addressStrings, address.String()) + } + + timeA := time.Date(2023, 1, 1, 0, 0, 0, 0, time.UTC) + timeB := timeA.Add(time.Hour) + + // There should be no addresses returned originally + authorizationsA := keeper.GetPendingTokenizeShareAuthorizations(ctx, timeA) + require.Empty(authorizationsA.Addresses, "no addresses at timeA expected") + + authorizationsB := keeper.GetPendingTokenizeShareAuthorizations(ctx, timeB) + require.Empty(authorizationsB.Addresses, "no addresses at timeB expected") + + // Store addresses for timeB + keeper.SetPendingTokenizeShareAuthorizations(ctx, timeB, types.PendingTokenizeShareAuthorizations{ + Addresses: addressStrings, + }) + + // Check addresses + authorizationsA = keeper.GetPendingTokenizeShareAuthorizations(ctx, timeA) + require.Empty(authorizationsA.Addresses, "no addresses at timeA expected at end") + + authorizationsB = keeper.GetPendingTokenizeShareAuthorizations(ctx, timeB) + require.Equal(addressStrings, authorizationsB.Addresses, "address length") +} + +// Test QueueTokenizeSharesAuthorization and RemoveExpiredTokenizeShareLocks +func (s *KeeperTestSuite) TestTokenizeShareAuthorizationQueue() { + ctx, keeper := s.ctx, s.lsmKeeper + require := s.Require() + + // Create dummy accounts and completion times + + // We'll start by adding the following addresses to the queue + // Time 0: [address0] + // Time 1: [] + // Time 2: [address1, address2, address3] + // Time 3: [address4, address5] + // Time 4: [address6] + addresses := simtestutil.CreateIncrementalAccounts(7) + addressesByTime := map[int][]sdk.AccAddress{ + 0: {addresses[0]}, + 1: {}, + 2: {addresses[1], addresses[2], addresses[3]}, + 3: {addresses[4], addresses[5]}, + 4: {addresses[6]}, + } + + // Add each address to the queue and then increment the block time + // such that the times line up as follows + // Time 0: 2023-01-01 00:00:00 + // Time 1: 2023-01-01 00:01:00 + // Time 2: 2023-01-01 00:02:00 + // Time 3: 2023-01-01 00:03:00 + startTime := time.Date(2023, 1, 1, 0, 0, 0, 0, time.UTC) + ctx = ctx.WithBlockTime(startTime) + blockTimeIncrement := time.Hour + + // Set the unbonding time to 1 day + unbondingPeriod := time.Hour * 24 + s.stakingKeeper.EXPECT().GetParams(mock.Anything).Return(stakingtypes.Params{UnbondingTime: unbondingPeriod}, nil) + + for timeIndex := 0; timeIndex <= 4; timeIndex++ { + for _, address := range addressesByTime[timeIndex] { + keeper.QueueTokenizeSharesAuthorization(ctx, address) + } + ctx = ctx.WithBlockTime(ctx.BlockTime().Add(blockTimeIncrement)) + } + + // We'll unlock the tokens using the following progression + // The "alias'"/keys for these times assume a starting point of the Time 0 + // from above, plus the Unbonding Time + // Time -1 (2023-01-01 23:59:99): [] + // Time 0 (2023-01-02 00:00:00): [address0] + // Time 1 (2023-01-02 00:01:00): [] + // Time 2.5 (2023-01-02 00:02:30): [address1, address2, address3] + // Time 10 (2023-01-02 00:10:00): [address4, address5, address6] + unlockBlockTimes := map[string]time.Time{ + "-1": startTime.Add(unbondingPeriod).Add(-time.Second), + "0": startTime.Add(unbondingPeriod), + "1": startTime.Add(unbondingPeriod).Add(blockTimeIncrement), + "2.5": startTime.Add(unbondingPeriod).Add(2 * blockTimeIncrement).Add(blockTimeIncrement / 2), + "10": startTime.Add(unbondingPeriod).Add(10 * blockTimeIncrement), + } + expectedUnlockedAddresses := map[string][]string{ + "-1": {}, + "0": {addresses[0].String()}, + "1": {}, + "2.5": {addresses[1].String(), addresses[2].String(), addresses[3].String()}, + "10": {addresses[4].String(), addresses[5].String(), addresses[6].String()}, + } + + // Now we'll remove items from the queue sequentially + // First check with a block time before the first expiration - it should remove no addresses + actualAddresses, err := keeper.RemoveExpiredTokenizeShareLocks(ctx, unlockBlockTimes["-1"]) + require.NoError(err) + require.Equal(expectedUnlockedAddresses["-1"], actualAddresses, "no addresses unlocked from time -1") + + // Then pass in (time 0 + unbonding time) - it should remove the first address + actualAddresses, err = keeper.RemoveExpiredTokenizeShareLocks(ctx, unlockBlockTimes["0"]) + require.NoError(err) + require.Equal(expectedUnlockedAddresses["0"], actualAddresses, "one address unlocked from time 0") + + // Now pass in (time 1 + unbonding time) - it should remove no addresses since + // the address at time 0 was already removed + actualAddresses, err = keeper.RemoveExpiredTokenizeShareLocks(ctx, unlockBlockTimes["1"]) + require.NoError(err) + require.Equal(expectedUnlockedAddresses["1"], actualAddresses, "no addresses unlocked from time 1") + + // Now pass in (time 2.5 + unbonding time) - it should remove the three addresses from time 2 + actualAddresses, err = keeper.RemoveExpiredTokenizeShareLocks(ctx, unlockBlockTimes["2.5"]) + require.NoError(err) + require.Equal(expectedUnlockedAddresses["2.5"], actualAddresses, "addresses unlocked from time 2.5") + + // Finally pass in a block time far in the future, which should remove all the remaining locks + actualAddresses, err = keeper.RemoveExpiredTokenizeShareLocks(ctx, unlockBlockTimes["10"]) + require.NoError(err) + require.Equal(expectedUnlockedAddresses["10"], actualAddresses, "addresses unlocked from time 10") +} + +// Tests DelegatorIsLiquidStaker +func (s *KeeperTestSuite) TestDelegatorIsLiquidStaker() { + _, keeper := s.ctx, s.lsmKeeper + require := s.Require() + + // Create base and ICA accounts + baseAccountAddress := sdk.AccAddress("base-account") + icaAccountAddress := sdk.AccAddress( + address.Derive(authtypes.NewModuleAddress("icahost"), []byte("connection-0"+"icahost")), + ) + + // Only the ICA module account should be considered a liquid staking provider + require.False(keeper.DelegatorIsLiquidStaker(baseAccountAddress), "base account") + require.True(keeper.DelegatorIsLiquidStaker(icaAccountAddress), "ICA module account") +} + +func (s *KeeperTestSuite) TestCheckVestedDelegationInVestingAccount() { + var ( + vestingAcct *vestingtypes.ContinuousVestingAccount + startTime = time.Now() + endTime = startTime.Add(24 * time.Hour) + originalVesting = sdk.NewCoins(sdk.NewCoin(sdk.DefaultBondDenom, math.NewInt(100_000))) + ) + require := s.Require() + + testCases := []struct { + name string + setupAcct func() + blockTime time.Time + coinRequired sdk.Coin + expRes bool + }{ + { + name: "vesting account has zero delegations", + setupAcct: func() {}, + blockTime: endTime, + coinRequired: sdk.NewCoin(sdk.DefaultBondDenom, math.OneInt()), + expRes: false, + }, + { + name: "vested delegations exist but for a different coin", + setupAcct: func() { + vestingAcct.DelegatedFree = sdk.NewCoins(sdk.NewCoin("uatom", math.NewInt(100_000))) + }, + blockTime: endTime, + coinRequired: sdk.NewCoin(sdk.DefaultBondDenom, math.OneInt()), + expRes: false, + }, + { + name: "all delegations are vesting", + setupAcct: func() { + vestingAcct.DelegatedVesting = vestingAcct.OriginalVesting + }, + blockTime: startTime, + coinRequired: sdk.NewCoin(sdk.DefaultBondDenom, math.OneInt()), + expRes: false, + }, + { + name: "not enough vested coin", + setupAcct: func() { + vestingAcct.DelegatedFree = sdk.NewCoins(sdk.NewCoin(sdk.DefaultBondDenom, math.NewInt(80_000))) + }, + blockTime: endTime, + coinRequired: sdk.NewCoin(sdk.DefaultBondDenom, math.NewInt(100_000)), + expRes: false, + }, + { + name: "account is vested and have vested delegations", + setupAcct: func() { + vestingAcct.DelegatedFree = vestingAcct.OriginalVesting + }, + blockTime: endTime, + coinRequired: sdk.NewCoin(sdk.DefaultBondDenom, math.NewInt(100_000)), + expRes: true, + }, + { + name: "vesting account partially vested and have vesting and vested delegations", + setupAcct: func() { + vestingAcct.DelegatedFree = sdk.NewCoins(sdk.NewCoin(sdk.DefaultBondDenom, math.NewInt(50_000))) + vestingAcct.DelegatedVesting = sdk.NewCoins(sdk.NewCoin(sdk.DefaultBondDenom, math.NewInt(50_000))) + }, + blockTime: startTime.Add(18 * time.Hour), // vest 3/4 vesting period + coinRequired: sdk.NewCoin(sdk.DefaultBondDenom, math.NewInt(75_000)), + + expRes: true, + }, + } + + for _, tc := range testCases { + s.Run(tc.name, func() { + pubKey := secp256k1.GenPrivKey().PubKey() + baseAcc := authtypes.NewBaseAccount(sdk.AccAddress(pubKey.Address()), pubKey, 0, 0) + + var err error + vestingAcct, err = vestingtypes.NewContinuousVestingAccount( + baseAcc, + originalVesting, + startTime.Unix(), + endTime.Unix(), + ) + require.NoError(err) + + tc.setupAcct() + + require.Equal( + tc.expRes, lsmkeeper.CheckVestedDelegationInVestingAccount( + vestingAcct, + tc.blockTime, + tc.coinRequired, + ), + ) + }) + } +} diff --git a/x/lsm/keeper/msg_server.go b/x/lsm/keeper/msg_server.go new file mode 100644 index 00000000000..56741cca731 --- /dev/null +++ b/x/lsm/keeper/msg_server.go @@ -0,0 +1,505 @@ +package keeper + +import ( + "context" + "errors" + "fmt" + + errorsmod "cosmossdk.io/errors" + "cosmossdk.io/math" + + sdk "github.com/cosmos/cosmos-sdk/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" + vesting "github.com/cosmos/cosmos-sdk/x/auth/vesting/exported" + govtypes "github.com/cosmos/cosmos-sdk/x/gov/types" + minttypes "github.com/cosmos/cosmos-sdk/x/mint/types" + stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types" + + "github.com/cosmos/gaia/v22/x/lsm/types" +) + +type msgServer struct { + *Keeper +} + +// NewMsgServerImpl returns an implementation of the staking MsgServer interface +// for the provided Keeper. +func NewMsgServerImpl(keeper *Keeper) types.MsgServer { + return &msgServer{Keeper: keeper} +} + +var _ types.MsgServer = msgServer{} + +// UpdateParams defines a method to perform updating of params for the x/lsm module. +func (k msgServer) UpdateParams(ctx context.Context, msg *types.MsgUpdateParams) (*types.MsgUpdateParamsResponse, error) { + if k.authority != msg.Authority { + return nil, errorsmod.Wrapf(govtypes.ErrInvalidSigner, "invalid authority; expected %s, got %s", k.authority, msg.Authority) + } + + if err := msg.Params.Validate(); err != nil { + return nil, err + } + + // store params + if err := k.SetParams(ctx, msg.Params); err != nil { + return nil, err + } + + return &types.MsgUpdateParamsResponse{}, nil +} + +// Tokenizes shares associated with a delegation by creating a tokenize share record +// and returning tokens with a denom of the format {validatorAddress}/{recordId} +func (k msgServer) TokenizeShares(goCtx context.Context, msg *types.MsgTokenizeShares) (*types.MsgTokenizeSharesResponse, error) { + ctx := sdk.UnwrapSDKContext(goCtx) + + valAddr, valErr := k.stakingKeeper.ValidatorAddressCodec().StringToBytes(msg.ValidatorAddress) + if valErr != nil { + return nil, valErr + } + validator, err := k.stakingKeeper.GetValidator(ctx, valAddr) + if err != nil { + return nil, err + } + + delegatorAddress, err := k.authKeeper.AddressCodec().StringToBytes(msg.DelegatorAddress) + if err != nil { + return nil, err + } + + _, err = k.authKeeper.AddressCodec().StringToBytes(msg.TokenizedShareOwner) + if err != nil { + return nil, err + } + + if !msg.Amount.IsValid() || !msg.Amount.Amount.IsPositive() { + return nil, errorsmod.Wrap(sdkerrors.ErrInvalidRequest, "invalid shares amount") + } + + // Check if the delegator has disabled tokenization + lockStatus, unlockTime := k.GetTokenizeSharesLock(ctx, delegatorAddress) + if lockStatus == types.TOKENIZE_SHARE_LOCK_STATUS_LOCKED { + return nil, types.ErrTokenizeSharesDisabledForAccount + } + if lockStatus == types.TOKENIZE_SHARE_LOCK_STATUS_LOCK_EXPIRING { + return nil, types.ErrTokenizeSharesDisabledForAccount.Wrapf("tokenization will be allowed at %s", unlockTime) + } + + delegation, err := k.stakingKeeper.GetDelegation(ctx, delegatorAddress, valAddr) + if err != nil { + return nil, err + } + + // ValidatorBond delegation is not allowed for tokenize share + if delegation.ValidatorBond { + return nil, types.ErrValidatorBondNotAllowedForTokenizeShare + } + + bondDenom, err := k.stakingKeeper.BondDenom(ctx) + if err != nil { + return nil, err + } + + if msg.Amount.Denom != bondDenom { + return nil, types.ErrOnlyBondDenomAllowdForTokenize + } + + acc := k.authKeeper.GetAccount(ctx, delegatorAddress) + if acc != nil { + acc, ok := acc.(vesting.VestingAccount) + if ok { + // if account is a vesting account, it checks if free delegation (non-vesting delegation) is not exceeding + // the tokenize share amount and execute further tokenize share process + // tokenize share is reducing unlocked tokens delegation from the vesting account and further process + // is not causing issues + if !CheckVestedDelegationInVestingAccount(acc, ctx.BlockTime(), msg.Amount) { + return nil, types.ErrExceedingFreeVestingDelegations + } + } + } + + shares, err := k.stakingKeeper.ValidateUnbondAmount( + ctx, delegatorAddress, valAddr, msg.Amount.Amount, + ) + if err != nil { + return nil, err + } + + // sanity check to avoid creating a tokenized share record with zero shares + if shares.IsZero() { + return nil, errorsmod.Wrap(types.ErrInsufficientShares, "cannot tokenize zero shares") + } + + // Check that the delegator has no ongoing redelegations to the validator + found, err := k.stakingKeeper.HasReceivingRedelegation(ctx, delegatorAddress, valAddr) + if err != nil { + return nil, err + } + if found { + return nil, types.ErrRedelegationInProgress + } + + // If this tokenization is NOT from a liquid staking provider, + // confirm it does not exceed the global and validator liquid staking cap + // If the tokenization is from a liquid staking provider, + // the shares are already considered liquid and there's no need to increment the totals + if !k.DelegatorIsLiquidStaker(delegatorAddress) { + if err := k.SafelyIncreaseTotalLiquidStakedTokens(ctx, msg.Amount.Amount, true); err != nil { + return nil, err + } + _, err = k.SafelyIncreaseValidatorLiquidShares(ctx, valAddr, shares, true) + if err != nil { + return nil, err + } + } + + recordID := k.GetLastTokenizeShareRecordID(ctx) + 1 + k.SetLastTokenizeShareRecordID(ctx, recordID) + + record := types.TokenizeShareRecord{ + Id: recordID, + Owner: msg.TokenizedShareOwner, + ModuleAccount: fmt.Sprintf("%s%d", types.TokenizeShareModuleAccountPrefix, recordID), + Validator: msg.ValidatorAddress, + } + + // note: this returnAmount can be slightly off from the original delegation amount if there + // is a decimal to int precision error + returnAmount, err := k.stakingKeeper.Unbond(ctx, delegatorAddress, valAddr, shares) + if err != nil { + return nil, err + } + + if validator.IsBonded() { + coins := sdk.NewCoins(sdk.NewCoin(bondDenom, returnAmount)) + err := k.bankKeeper.SendCoinsFromModuleToModule(ctx, stakingtypes.BondedPoolName, stakingtypes.NotBondedPoolName, coins) + if err != nil { + return nil, err + } + } + + // Note: UndelegateCoinsFromModuleToAccount is internally calling TrackUndelegation for vesting account + returnCoin := sdk.NewCoin(bondDenom, returnAmount) + err = k.bankKeeper.UndelegateCoinsFromModuleToAccount(ctx, stakingtypes.NotBondedPoolName, delegatorAddress, + sdk.Coins{returnCoin}) + if err != nil { + return nil, err + } + + // Re-calculate the shares in case there was rounding precision during the undelegation + newShares, err := validator.SharesFromTokens(returnAmount) + if err != nil { + return nil, err + } + + // The share tokens returned maps 1:1 with shares + shareToken := sdk.NewCoin(record.GetShareTokenDenom(), newShares.TruncateInt()) + + err = k.bankKeeper.MintCoins(ctx, minttypes.ModuleName, sdk.Coins{shareToken}) + if err != nil { + return nil, err + } + + err = k.bankKeeper.SendCoinsFromModuleToAccount(ctx, minttypes.ModuleName, delegatorAddress, sdk.Coins{shareToken}) + if err != nil { + return nil, err + } + + // create reward ownership record + err = k.AddTokenizeShareRecord(ctx, record) + if err != nil { + return nil, err + } + // send coins to module account + err = k.bankKeeper.SendCoins(ctx, delegatorAddress, record.GetModuleAddress(), sdk.Coins{returnCoin}) + if err != nil { + return nil, err + } + + // Note: it is needed to get latest validator object to get Keeper.Delegate function work properly + validator, err = k.stakingKeeper.GetValidator(ctx, valAddr) + if err != nil { + return nil, err + } + + // delegate from module account + _, err = k.stakingKeeper.Delegate(ctx, record.GetModuleAddress(), returnAmount, stakingtypes.Unbonded, validator, + true) + if err != nil { + return nil, err + } + + ctx.EventManager().EmitEvent( + sdk.NewEvent( + types.EventTypeTokenizeShares, + sdk.NewAttribute(types.AttributeKeyDelegator, msg.DelegatorAddress), + sdk.NewAttribute(types.AttributeKeyValidator, msg.ValidatorAddress), + sdk.NewAttribute(types.AttributeKeyShareOwner, msg.TokenizedShareOwner), + sdk.NewAttribute(types.AttributeKeyShareRecordID, fmt.Sprintf("%d", record.Id)), + sdk.NewAttribute(types.AttributeKeyAmount, msg.Amount.String()), + sdk.NewAttribute(types.AttributeKeyTokenizedShares, shareToken.String()), + ), + ) + + return &types.MsgTokenizeSharesResponse{ + Amount: shareToken, + }, nil +} + +// Converts tokenized shares back into a native delegation +func (k msgServer) RedeemTokensForShares(goCtx context.Context, msg *types.MsgRedeemTokensForShares) (*types.MsgRedeemTokensForSharesResponse, error) { + ctx := sdk.UnwrapSDKContext(goCtx) + + delegatorAddress, err := k.authKeeper.AddressCodec().StringToBytes(msg.DelegatorAddress) + if err != nil { + return nil, err + } + + if !msg.Amount.IsValid() || !msg.Amount.Amount.IsPositive() { + return nil, errorsmod.Wrap(sdkerrors.ErrInvalidRequest, "invalid shares amount") + } + + shareToken := msg.Amount + balance := k.bankKeeper.GetBalance(ctx, delegatorAddress, shareToken.Denom) + if balance.Amount.LT(shareToken.Amount) { + return nil, types.ErrNotEnoughBalance + } + + record, err := k.GetTokenizeShareRecordByDenom(ctx, shareToken.Denom) + if err != nil { + return nil, err + } + + valAddr, valErr := k.stakingKeeper.ValidatorAddressCodec().StringToBytes(record.Validator) + if valErr != nil { + return nil, valErr + } + + validator, err := k.stakingKeeper.GetValidator(ctx, valAddr) + if err != nil { + return nil, err + } + + delegation, err := k.stakingKeeper.GetDelegation(ctx, record.GetModuleAddress(), valAddr) + if err != nil { + return nil, err + } + + // Similar to undelegations, if the account is attempting to tokenize the full delegation, + // but there's a precision error due to the decimal to int conversion, round up to the + // full decimal amount before modifying the delegation + shares := math.LegacyNewDecFromInt(shareToken.Amount) + if shareToken.Amount.Equal(delegation.Shares.TruncateInt()) { + shares = delegation.Shares + } + tokens := validator.TokensFromShares(shares).TruncateInt() + + // prevent redemption that returns a 0 amount + if tokens.IsZero() { + return nil, types.ErrTinyRedemptionAmount + } + + // If this redemption is NOT from a liquid staking provider, decrement the total liquid staked + // If the redemption was from a liquid staking provider, the shares are still considered + // liquid, even in their non-tokenized form (since they are owned by a liquid staking provider) + if !k.DelegatorIsLiquidStaker(delegatorAddress) { + if err := k.DecreaseTotalLiquidStakedTokens(ctx, tokens); err != nil { + return nil, err + } + _, err = k.DecreaseValidatorLiquidShares(ctx, valAddr, shares) + if err != nil { + return nil, err + } + } + + returnAmount, err := k.stakingKeeper.Unbond(ctx, record.GetModuleAddress(), valAddr, shares) + if err != nil { + return nil, err + } + + if validator.IsBonded() { + bondDenom, err := k.stakingKeeper.BondDenom(ctx) + if err != nil { + return nil, err + } + + coins := sdk.NewCoins(sdk.NewCoin(bondDenom, returnAmount)) + err = k.bankKeeper.SendCoinsFromModuleToModule(ctx, stakingtypes.BondedPoolName, stakingtypes.NotBondedPoolName, coins) + if err != nil { + return nil, err + } + } + + // Note: since delegation object has been changed from unbond call, it gets latest delegation + _, err = k.stakingKeeper.GetDelegation(ctx, record.GetModuleAddress(), valAddr) + if err != nil && !errors.Is(err, types.ErrNoDelegation) { + return nil, err + } + + // this err will be ErrNoDelegation + if err != nil { + if err := k.WithdrawSingleShareRecordReward(ctx, record.Id); err != nil { + return nil, err + } + err = k.DeleteTokenizeShareRecord(ctx, record.Id) + if err != nil { + return nil, err + } + } + + // send share tokens to NotBondedPool and burn + err = k.bankKeeper.SendCoinsFromAccountToModule(ctx, delegatorAddress, stakingtypes.NotBondedPoolName, + sdk.Coins{shareToken}) + if err != nil { + return nil, err + } + err = k.bankKeeper.BurnCoins(ctx, stakingtypes.NotBondedPoolName, sdk.Coins{shareToken}) + if err != nil { + return nil, err + } + + bondDenom, err := k.stakingKeeper.BondDenom(ctx) + if err != nil { + return nil, err + } + // send equivalent amount of tokens to the delegator + returnCoin := sdk.NewCoin(bondDenom, returnAmount) + err = k.bankKeeper.SendCoinsFromModuleToAccount(ctx, stakingtypes.NotBondedPoolName, delegatorAddress, + sdk.Coins{returnCoin}) + if err != nil { + return nil, err + } + + // Note: it is needed to get latest validator object to get Keeper.Delegate function work properly + validator, err = k.stakingKeeper.GetValidator(ctx, valAddr) + if err != nil { + return nil, err + } + + // convert the share tokens to delegated status + // Note: Delegate(substractAccount => true) -> DelegateCoinsFromAccountToModule -> TrackDelegation for vesting account + _, err = k.stakingKeeper.Delegate(ctx, delegatorAddress, returnAmount, stakingtypes.Unbonded, validator, true) + if err != nil { + return nil, err + } + + // tokenized shares can be transferred from a validator that does not have validator bond to a delegator with validator bond + // in that case we need to increase the validator bond shares (same as during msgServer.Delegate) + newDelegation, err := k.stakingKeeper.GetDelegation(ctx, delegatorAddress, valAddr) + if err != nil { + return nil, err + } + + if newDelegation.ValidatorBond { + if err := k.IncreaseValidatorBondShares(ctx, valAddr, shares); err != nil { + return nil, err + } + } + + ctx.EventManager().EmitEvent( + sdk.NewEvent( + types.EventTypeRedeemShares, + sdk.NewAttribute(types.AttributeKeyDelegator, msg.DelegatorAddress), + sdk.NewAttribute(types.AttributeKeyValidator, validator.OperatorAddress), + sdk.NewAttribute(types.AttributeKeyAmount, shareToken.String()), + ), + ) + + return &types.MsgRedeemTokensForSharesResponse{ + Amount: returnCoin, + }, nil +} + +// Transfers the ownership of rewards associated with a tokenize share record +func (k msgServer) TransferTokenizeShareRecord(goCtx context.Context, msg *types.MsgTransferTokenizeShareRecord) (*types.MsgTransferTokenizeShareRecordResponse, error) { + ctx := sdk.UnwrapSDKContext(goCtx) + + record, err := k.GetTokenizeShareRecord(ctx, msg.TokenizeShareRecordId) + if err != nil { + return nil, types.ErrTokenizeShareRecordNotExists + } + + if record.Owner != msg.Sender { + return nil, types.ErrNotTokenizeShareRecordOwner + } + + // Remove old account reference + oldOwner, err := k.authKeeper.AddressCodec().StringToBytes(record.Owner) + if err != nil { + return nil, sdkerrors.ErrInvalidAddress + } + k.deleteTokenizeShareRecordWithOwner(ctx, oldOwner, record.Id) + + record.Owner = msg.NewOwner + k.setTokenizeShareRecord(ctx, record) + + // Set new account reference + newOwner, err := k.authKeeper.AddressCodec().StringToBytes(record.Owner) + if err != nil { + return nil, sdkerrors.ErrInvalidAddress + } + k.setTokenizeShareRecordWithOwner(ctx, newOwner, record.Id) + + ctx.EventManager().EmitEvent( + sdk.NewEvent( + types.EventTypeTransferTokenizeShareRecord, + sdk.NewAttribute(types.AttributeKeyShareRecordID, fmt.Sprintf("%d", msg.TokenizeShareRecordId)), + sdk.NewAttribute(sdk.AttributeKeySender, msg.Sender), + sdk.NewAttribute(types.AttributeKeyShareOwner, msg.NewOwner), + ), + ) + + return &types.MsgTransferTokenizeShareRecordResponse{}, nil +} + +// DisableTokenizeShares prevents an address from tokenizing any of their delegations +func (k msgServer) DisableTokenizeShares(ctx context.Context, msg *types.MsgDisableTokenizeShares) (*types.MsgDisableTokenizeSharesResponse, error) { + delegator, err := k.authKeeper.AddressCodec().StringToBytes(msg.DelegatorAddress) + if err != nil { + panic(err) + } + + // If tokenized shares is already disabled, alert the user + lockStatus, completionTime := k.GetTokenizeSharesLock(ctx, delegator) + if lockStatus == types.TOKENIZE_SHARE_LOCK_STATUS_LOCKED { + return nil, types.ErrTokenizeSharesAlreadyDisabledForAccount + } + + // If the tokenized shares lock is expiring, remove the pending unlock from the queue + if lockStatus == types.TOKENIZE_SHARE_LOCK_STATUS_LOCK_EXPIRING { + k.CancelTokenizeShareLockExpiration(ctx, delegator, completionTime) + } + + // Create a new tokenization lock for the user + // Note: if there is a lock expiration in progress, this will override the expiration + k.AddTokenizeSharesLock(ctx, delegator) + + return &types.MsgDisableTokenizeSharesResponse{}, nil +} + +// EnableTokenizeShares begins the countdown after which tokenizing shares by the +// sender address is re-allowed, which will complete after the unbonding period +func (k msgServer) EnableTokenizeShares(ctx context.Context, msg *types.MsgEnableTokenizeShares) (*types.MsgEnableTokenizeSharesResponse, error) { + delegator, err := k.authKeeper.AddressCodec().StringToBytes(msg.DelegatorAddress) + if err != nil { + panic(err) + } + + // If tokenized shares aren't current disabled, alert the user + lockStatus, unlockTime := k.GetTokenizeSharesLock(ctx, delegator) + if lockStatus == types.TOKENIZE_SHARE_LOCK_STATUS_UNLOCKED { + return nil, types.ErrTokenizeSharesAlreadyEnabledForAccount + } + if lockStatus == types.TOKENIZE_SHARE_LOCK_STATUS_LOCK_EXPIRING { + return nil, types.ErrTokenizeSharesAlreadyEnabledForAccount.Wrapf( + "tokenize shares re-enablement already in progress, ending at %s", unlockTime) + } + + // Otherwise queue the unlock + completionTime, err := k.QueueTokenizeSharesAuthorization(ctx, delegator) + if err != nil { + panic(err) + } + + return &types.MsgEnableTokenizeSharesResponse{CompletionTime: completionTime}, nil +} diff --git a/x/lsm/keeper/params.go b/x/lsm/keeper/params.go new file mode 100644 index 00000000000..c9d3dd8e8f3 --- /dev/null +++ b/x/lsm/keeper/params.go @@ -0,0 +1,54 @@ +package keeper + +import ( + "context" + + "cosmossdk.io/math" + + "github.com/cosmos/gaia/v22/x/lsm/types" +) + +// SetParams sets the x/lsm module parameters. +// CONTRACT: This method performs no validation of the parameters. +func (k Keeper) SetParams(ctx context.Context, params types.Params) error { + store := k.storeService.OpenKVStore(ctx) + bz, err := k.cdc.Marshal(¶ms) + if err != nil { + return err + } + return store.Set(types.ParamsKey, bz) +} + +// GetParams gets the x/lsm module parameters. +func (k Keeper) GetParams(ctx context.Context) (params types.Params, err error) { + store := k.storeService.OpenKVStore(ctx) + bz, err := store.Get(types.ParamsKey) + if err != nil { + return params, err + } + + if bz == nil { + return params, nil + } + + err = k.cdc.Unmarshal(bz, ¶ms) + return params, err +} + +// Validator bond factor for all validators +func (k Keeper) ValidatorBondFactor(ctx context.Context) (math.LegacyDec, error) { + params, err := k.GetParams(ctx) + return params.ValidatorBondFactor, err +} + +// Global liquid staking cap across all liquid staking providers +func (k Keeper) GlobalLiquidStakingCap(ctx context.Context) (math.LegacyDec, error) { + params, err := k.GetParams(ctx) + return params.GlobalLiquidStakingCap, err +} + +// Liquid staking cap for each validator +func (k Keeper) ValidatorLiquidStakingCap(ctx context.Context) (math.LegacyDec, error) { + params, err := k.GetParams(ctx) + return params.ValidatorLiquidStakingCap, err +} diff --git a/x/lsm/keeper/tokenize_share_record.go b/x/lsm/keeper/tokenize_share_record.go new file mode 100644 index 00000000000..84cebfc7e98 --- /dev/null +++ b/x/lsm/keeper/tokenize_share_record.go @@ -0,0 +1,196 @@ +package keeper + +import ( + "context" + "fmt" + + gogotypes "github.com/cosmos/gogoproto/types" + + errorsmod "cosmossdk.io/errors" + storetypes "cosmossdk.io/store/types" + + "github.com/cosmos/cosmos-sdk/runtime" + sdk "github.com/cosmos/cosmos-sdk/types" + + "github.com/cosmos/gaia/v22/x/lsm/types" +) + +func (k Keeper) GetLastTokenizeShareRecordID(ctx context.Context) uint64 { + store := k.storeService.OpenKVStore(ctx) + bytes, err := store.Get(types.LastTokenizeShareRecordIDKey) + if err != nil { + panic(err) + } + + if bytes == nil { + return 0 + } + return sdk.BigEndianToUint64(bytes) +} + +func (k Keeper) SetLastTokenizeShareRecordID(ctx context.Context, id uint64) { + store := k.storeService.OpenKVStore(ctx) + err := store.Set(types.LastTokenizeShareRecordIDKey, sdk.Uint64ToBigEndian(id)) + if err != nil { + panic(err) + } +} + +func (k Keeper) GetTokenizeShareRecord(ctx context.Context, id uint64) (tokenizeShareRecord types.TokenizeShareRecord, err error) { + store := k.storeService.OpenKVStore(ctx) + + bz, err := store.Get(types.GetTokenizeShareRecordByIndexKey(id)) + if err != nil { + return tokenizeShareRecord, err + } + + if bz == nil { + return tokenizeShareRecord, errorsmod.Wrap(types.ErrTokenizeShareRecordNotExists, fmt.Sprintf("tokenizeShareRecord %d does not exist", id)) + } + + k.cdc.MustUnmarshal(bz, &tokenizeShareRecord) + return tokenizeShareRecord, nil +} + +func (k Keeper) GetTokenizeShareRecordsByOwner(ctx context.Context, owner sdk.AccAddress) (tokenizeShareRecords []types.TokenizeShareRecord) { + store := k.storeService.OpenKVStore(ctx) + + it := storetypes.KVStorePrefixIterator(runtime.KVStoreAdapter(store), types.GetTokenizeShareRecordIDsByOwnerPrefix(owner)) + defer it.Close() + + for ; it.Valid(); it.Next() { + var id gogotypes.UInt64Value + k.cdc.MustUnmarshal(it.Value(), &id) + + tokenizeShareRecord, err := k.GetTokenizeShareRecord(ctx, id.Value) + if err != nil { + continue + } + tokenizeShareRecords = append(tokenizeShareRecords, tokenizeShareRecord) + } + return +} + +func (k Keeper) GetTokenizeShareRecordByDenom(ctx context.Context, denom string) (types.TokenizeShareRecord, error) { + store := k.storeService.OpenKVStore(ctx) + bz, err := store.Get(types.GetTokenizeShareRecordIDByDenomKey(denom)) + if err != nil { + return types.TokenizeShareRecord{}, err + } + + if bz == nil { + return types.TokenizeShareRecord{}, fmt.Errorf("tokenize share record not found from denom: %s", denom) + } + + var id gogotypes.UInt64Value + k.cdc.MustUnmarshal(bz, &id) + + return k.GetTokenizeShareRecord(ctx, id.Value) +} + +func (k Keeper) GetAllTokenizeShareRecords(ctx context.Context) (tokenizeShareRecords []types.TokenizeShareRecord) { + store := k.storeService.OpenKVStore(ctx) + + it := storetypes.KVStorePrefixIterator(runtime.KVStoreAdapter(store), types.TokenizeShareRecordPrefix) + defer it.Close() + + for ; it.Valid(); it.Next() { + var tokenizeShareRecord types.TokenizeShareRecord + k.cdc.MustUnmarshal(it.Value(), &tokenizeShareRecord) + + tokenizeShareRecords = append(tokenizeShareRecords, tokenizeShareRecord) + } + return +} + +func (k Keeper) AddTokenizeShareRecord(ctx context.Context, tokenizeShareRecord types.TokenizeShareRecord) error { + hasRecord, err := k.hasTokenizeShareRecord(ctx, tokenizeShareRecord.Id) + if err != nil { + return err + } + + if hasRecord { + return errorsmod.Wrapf(types.ErrTokenizeShareRecordAlreadyExists, "TokenizeShareRecord already exists: %d", tokenizeShareRecord.Id) + } + + k.setTokenizeShareRecord(ctx, tokenizeShareRecord) + + owner, err := k.authKeeper.AddressCodec().StringToBytes(tokenizeShareRecord.Owner) + if err != nil { + return err + } + + k.setTokenizeShareRecordWithOwner(ctx, owner, tokenizeShareRecord.Id) + k.setTokenizeShareRecordWithDenom(ctx, tokenizeShareRecord.GetShareTokenDenom(), tokenizeShareRecord.Id) + + return nil +} + +func (k Keeper) DeleteTokenizeShareRecord(ctx context.Context, recordID uint64) error { + record, err := k.GetTokenizeShareRecord(ctx, recordID) + if err != nil { + return err + } + owner, err := k.authKeeper.AddressCodec().StringToBytes(record.Owner) + if err != nil { + return err + } + + store := k.storeService.OpenKVStore(ctx) + err = store.Delete(types.GetTokenizeShareRecordByIndexKey(recordID)) + if err != nil { + return err + } + err = store.Delete(types.GetTokenizeShareRecordIDByOwnerAndIDKey(owner, recordID)) + if err != nil { + return err + } + err = store.Delete(types.GetTokenizeShareRecordIDByDenomKey(record.GetShareTokenDenom())) + if err != nil { + return err + } + return nil +} + +func (k Keeper) hasTokenizeShareRecord(ctx context.Context, id uint64) (bool, error) { + store := k.storeService.OpenKVStore(ctx) + return store.Has(types.GetTokenizeShareRecordByIndexKey(id)) +} + +func (k Keeper) setTokenizeShareRecord(ctx context.Context, tokenizeShareRecord types.TokenizeShareRecord) { + store := k.storeService.OpenKVStore(ctx) + bz := k.cdc.MustMarshal(&tokenizeShareRecord) + + err := store.Set(types.GetTokenizeShareRecordByIndexKey(tokenizeShareRecord.Id), bz) + if err != nil { + panic(err) + } +} + +func (k Keeper) setTokenizeShareRecordWithOwner(ctx context.Context, owner sdk.AccAddress, id uint64) { + store := k.storeService.OpenKVStore(ctx) + bz := k.cdc.MustMarshal(&gogotypes.UInt64Value{Value: id}) + + err := store.Set(types.GetTokenizeShareRecordIDByOwnerAndIDKey(owner, id), bz) + if err != nil { + panic(err) + } +} + +func (k Keeper) deleteTokenizeShareRecordWithOwner(ctx context.Context, owner sdk.AccAddress, id uint64) { + store := k.storeService.OpenKVStore(ctx) + err := store.Delete(types.GetTokenizeShareRecordIDByOwnerAndIDKey(owner, id)) + if err != nil { + panic(err) + } +} + +func (k Keeper) setTokenizeShareRecordWithDenom(ctx context.Context, denom string, id uint64) { + store := k.storeService.OpenKVStore(ctx) + bz := k.cdc.MustMarshal(&gogotypes.UInt64Value{Value: id}) + + err := store.Set(types.GetTokenizeShareRecordIDByDenomKey(denom), bz) + if err != nil { + panic(err) + } +} diff --git a/x/lsm/keeper/tokenize_share_record_test.go b/x/lsm/keeper/tokenize_share_record_test.go new file mode 100644 index 00000000000..be7ed1a063f --- /dev/null +++ b/x/lsm/keeper/tokenize_share_record_test.go @@ -0,0 +1,63 @@ +package keeper_test + +import ( + simtestutil "github.com/cosmos/cosmos-sdk/testutil/sims" + "github.com/cosmos/gaia/v22/x/lsm/types" +) + +func (suite *KeeperTestSuite) TestGetLastTokenizeShareRecordId() { + ctx, keeper := suite.ctx, suite.lsmKeeper + lastTokenizeShareRecordID := keeper.GetLastTokenizeShareRecordID(ctx) + suite.Equal(lastTokenizeShareRecordID, uint64(0)) + keeper.SetLastTokenizeShareRecordID(ctx, 100) + lastTokenizeShareRecordID = keeper.GetLastTokenizeShareRecordID(ctx) + suite.Equal(lastTokenizeShareRecordID, uint64(100)) +} + +func (suite *KeeperTestSuite) TestGetTokenizeShareRecord() { + ctx, keeper := suite.ctx, suite.lsmKeeper + addrs := simtestutil.CreateIncrementalAccounts(2) + + owner1, owner2 := addrs[0], addrs[1] + tokenizeShareRecord1 := types.TokenizeShareRecord{ + Id: 0, + Owner: owner1.String(), + ModuleAccount: "test-module-account-1", + Validator: "test-validator", + } + tokenizeShareRecord2 := types.TokenizeShareRecord{ + Id: 1, + Owner: owner2.String(), + ModuleAccount: "test-module-account-2", + Validator: "test-validator", + } + tokenizeShareRecord3 := types.TokenizeShareRecord{ + Id: 2, + Owner: owner1.String(), + ModuleAccount: "test-module-account-3", + Validator: "test-validator", + } + err := keeper.AddTokenizeShareRecord(ctx, tokenizeShareRecord1) + suite.NoError(err) + err = keeper.AddTokenizeShareRecord(ctx, tokenizeShareRecord2) + suite.NoError(err) + err = keeper.AddTokenizeShareRecord(ctx, tokenizeShareRecord3) + suite.NoError(err) + + tokenizeShareRecord, err := keeper.GetTokenizeShareRecord(ctx, 2) + suite.NoError(err) + suite.Equal(tokenizeShareRecord, tokenizeShareRecord3) + + tokenizeShareRecord, err = keeper.GetTokenizeShareRecordByDenom(ctx, tokenizeShareRecord2.GetShareTokenDenom()) + suite.NoError(err) + suite.Equal(tokenizeShareRecord, tokenizeShareRecord2) + + tokenizeShareRecords := keeper.GetAllTokenizeShareRecords(ctx) + suite.Equal(len(tokenizeShareRecords), 3) + + tokenizeShareRecords = keeper.GetTokenizeShareRecordsByOwner(ctx, owner1) + suite.Equal(len(tokenizeShareRecords), 2) + + tokenizeShareRecords = keeper.GetTokenizeShareRecordsByOwner(ctx, owner2) + suite.Equal(len(tokenizeShareRecords), 1) +} diff --git a/x/lsm/module.go b/x/lsm/module.go new file mode 100644 index 00000000000..a25027f2478 --- /dev/null +++ b/x/lsm/module.go @@ -0,0 +1,169 @@ +package lsm + +import ( + "context" + "encoding/json" + "fmt" + + gwruntime "github.com/grpc-ecosystem/grpc-gateway/runtime" + "github.com/spf13/cobra" + + "cosmossdk.io/core/appmodule" + + "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/codec" + cdctypes "github.com/cosmos/cosmos-sdk/codec/types" + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/types/module" + simtypes "github.com/cosmos/cosmos-sdk/types/simulation" + "github.com/cosmos/cosmos-sdk/x/staking/client/cli" + + "github.com/cosmos/gaia/v22/x/lsm/keeper" + "github.com/cosmos/gaia/v22/x/lsm/simulation" + "github.com/cosmos/gaia/v22/x/lsm/types" +) + +const ( + consensusVersion uint64 = 1 +) + +var ( + _ module.AppModuleBasic = AppModuleBasic{} + _ module.AppModuleSimulation = AppModule{} + _ module.HasServices = AppModule{} + _ module.HasGenesis = AppModule{} + + _ appmodule.AppModule = AppModule{} +) + +// AppModuleBasic defines the basic application module used by the lsm module. +type AppModuleBasic struct { + cdc codec.Codec + ak types.AccountKeeper +} + +// Name returns the lsm module's name. +func (AppModuleBasic) Name() string { + return types.ModuleName +} + +// RegisterLegacyAminoCodec registers the lsm module's types on the given LegacyAmino codec. +func (AppModuleBasic) RegisterLegacyAminoCodec(cdc *codec.LegacyAmino) { + types.RegisterLegacyAminoCodec(cdc) +} + +// RegisterInterfaces registers the module's interface types +func (AppModuleBasic) RegisterInterfaces(registry cdctypes.InterfaceRegistry) { + types.RegisterInterfaces(registry) +} + +// DefaultGenesis returns default genesis state as raw bytes for the lsm +// module. +func (AppModuleBasic) DefaultGenesis(cdc codec.JSONCodec) json.RawMessage { + return cdc.MustMarshalJSON(types.DefaultGenesisState()) +} + +// ValidateGenesis performs genesis state validation for the staking module. +func (AppModuleBasic) ValidateGenesis(cdc codec.JSONCodec, config client.TxEncodingConfig, bz json.RawMessage) error { + var data types.GenesisState + if err := cdc.UnmarshalJSON(bz, &data); err != nil { + return fmt.Errorf("failed to unmarshal %s genesis state: %w", types.ModuleName, err) + } + + return types.ValidateGenesis(&data) +} + +// RegisterGRPCGatewayRoutes registers the gRPC Gateway routes for the staking module. +func (AppModuleBasic) RegisterGRPCGatewayRoutes(clientCtx client.Context, mux *gwruntime.ServeMux) { + if err := types.RegisterQueryHandlerClient(context.Background(), mux, types.NewQueryClient(clientCtx)); err != nil { + panic(err) + } +} + +// GetTxCmd returns the root tx command for the staking module. +func (amb AppModuleBasic) GetTxCmd() *cobra.Command { + return cli.NewTxCmd(amb.cdc.InterfaceRegistry().SigningContext().ValidatorAddressCodec(), amb.cdc.InterfaceRegistry().SigningContext().AddressCodec()) +} + +// AppModule implements an application module for the staking module. +type AppModule struct { + AppModuleBasic + + keeper *keeper.Keeper + accountKeeper types.AccountKeeper + bankKeeper types.BankKeeper + stakingKeeper types.StakingKeeper +} + +// NewAppModule creates a new AppModule object +func NewAppModule( + cdc codec.Codec, + keeper *keeper.Keeper, + ak types.AccountKeeper, + bk types.BankKeeper, + sk types.StakingKeeper, +) AppModule { + return AppModule{ + AppModuleBasic: AppModuleBasic{cdc: cdc, ak: ak}, + keeper: keeper, + accountKeeper: ak, + bankKeeper: bk, + stakingKeeper: sk, + } +} + +// IsOnePerModuleType implements the depinject.OnePerModuleType interface. +func (am AppModule) IsOnePerModuleType() {} + +// IsAppModule implements the appmodule.AppModule interface. +func (am AppModule) IsAppModule() {} + +// RegisterServices registers module services. +func (am AppModule) RegisterServices(cfg module.Configurator) { + types.RegisterMsgServer(cfg.MsgServer(), keeper.NewMsgServerImpl(am.keeper)) + querier := keeper.Querier{Keeper: am.keeper} + types.RegisterQueryServer(cfg.QueryServer(), querier) +} + +// InitGenesis performs genesis initialization for the lsm module. +func (am AppModule) InitGenesis(ctx sdk.Context, cdc codec.JSONCodec, data json.RawMessage) { + var genesisState types.GenesisState + + cdc.MustUnmarshalJSON(data, &genesisState) + + am.keeper.InitGenesis(ctx, &genesisState) +} + +// ExportGenesis returns the exported genesis state as raw bytes for the lsm +// module. +func (am AppModule) ExportGenesis(ctx sdk.Context, cdc codec.JSONCodec) json.RawMessage { + return cdc.MustMarshalJSON(am.keeper.ExportGenesis(ctx)) +} + +// ConsensusVersion implements AppModule/ConsensusVersion. +func (AppModule) ConsensusVersion() uint64 { return consensusVersion } + +// AppModuleSimulation functions + +// GenerateGenesisState creates a randomized GenState of the lsm module. +func (AppModule) GenerateGenesisState(simState *module.SimulationState) { + simulation.RandomizedGenState(simState) +} + +// ProposalMsgs returns msgs used for governance proposals for simulations. +func (AppModule) ProposalMsgs(_ module.SimulationState) []simtypes.WeightedProposalMsg { + return simulation.ProposalMsgs() +} + +// RegisterStoreDecoder registers a decoder for lsm module's types +func (am AppModule) RegisterStoreDecoder(sdr simtypes.StoreDecoderRegistry) { + sdr[types.StoreKey] = simulation.NewDecodeStore(am.cdc) +} + +// WeightedOperations returns the all the lsm module operations with their respective weights. +func (am AppModule) WeightedOperations(simState module.SimulationState) []simtypes.WeightedOperation { + return simulation.WeightedOperations( + simState.AppParams, simState.TxConfig, + am.accountKeeper, am.bankKeeper, am.stakingKeeper, am.keeper, + ) +} diff --git a/x/lsm/simulation/decoder.go b/x/lsm/simulation/decoder.go new file mode 100644 index 00000000000..128552d78fc --- /dev/null +++ b/x/lsm/simulation/decoder.go @@ -0,0 +1,60 @@ +package simulation + +import ( + "bytes" + "fmt" + + "github.com/cosmos/cosmos-sdk/codec" + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/types/kv" + + "github.com/cosmos/gaia/v22/x/lsm/types" +) + +// NewDecodeStore returns a decoder function closure that unmarshals the KVPair's +// Value to the corresponding lsm type. +func NewDecodeStore(cdc codec.Codec) func(kvA, kvB kv.Pair) string { + return func(kvA, kvB kv.Pair) string { + switch { + case bytes.Equal(kvA.Key[:1], types.TokenizeShareRecordPrefix): + var recordA, recordB types.TokenizeShareRecord + + cdc.MustUnmarshal(kvA.Value, &recordA) + cdc.MustUnmarshal(kvB.Value, &recordB) + + return fmt.Sprintf("%v\n%v", recordA, recordB) + case bytes.Equal(kvA.Key[:1], types.TokenizeShareRecordIDByOwnerPrefix), + bytes.Equal(kvA.Key[:1], types.TokenizeShareRecordIDByDenomPrefix), + bytes.Equal(kvA.Key[:1], types.LastTokenizeShareRecordIDKey): + var idA, idB uint64 + + idA = sdk.BigEndianToUint64(kvA.Value) + idB = sdk.BigEndianToUint64(kvB.Value) + + return fmt.Sprintf("%v\n%v", idA, idB) + case bytes.Equal(kvA.Key[:1], types.TotalLiquidStakedTokensKey): + var tokensA, tokensB sdk.IntProto + + cdc.MustUnmarshal(kvA.Value, &tokensA) + cdc.MustUnmarshal(kvB.Value, &tokensB) + + return fmt.Sprintf("%v\n%v", tokensA, tokensB) + case bytes.Equal(kvA.Key[:1], types.TokenizeSharesLockPrefix): + var lockA, lockB types.TokenizeShareLock + + cdc.MustUnmarshal(kvA.Value, &lockA) + cdc.MustUnmarshal(kvB.Value, &lockB) + + return fmt.Sprintf("%v\n%v", lockA, lockB) + case bytes.Equal(kvA.Key[:1], types.TokenizeSharesUnlockQueuePrefix): + var authsA, authsB types.PendingTokenizeShareAuthorizations + + cdc.MustUnmarshal(kvA.Value, &authsA) + cdc.MustUnmarshal(kvB.Value, &authsB) + + return fmt.Sprintf("%v\n%v", authsA, authsB) + default: + panic(fmt.Sprintf("invalid lsm key prefix %X", kvA.Key[:1])) + } + } +} diff --git a/x/lsm/simulation/genesis.go b/x/lsm/simulation/genesis.go new file mode 100644 index 00000000000..6f3f5dbfc0d --- /dev/null +++ b/x/lsm/simulation/genesis.go @@ -0,0 +1,67 @@ +package simulation + +import ( + "encoding/json" + "fmt" + "math/rand" + + sdkmath "cosmossdk.io/math" + + "github.com/cosmos/cosmos-sdk/types/module" + "github.com/cosmos/cosmos-sdk/types/simulation" + + "github.com/cosmos/gaia/v22/x/lsm/types" +) + +// Simulation parameter constants +const ( + ValidatorBondFactor = "validator_bond_factor" + GlobalLiquidStakingCap = "global_liquid_staking_cap" + ValidatorLiquidStakingCap = "validator_liquid_staking_cap" +) + +// getGlobalLiquidStakingCap returns randomized GlobalLiquidStakingCap between 0-1. +func getGlobalLiquidStakingCap(r *rand.Rand) sdkmath.LegacyDec { + return simulation.RandomDecAmount(r, sdkmath.LegacyOneDec()) +} + +// getValidatorLiquidStakingCap returns randomized ValidatorLiquidStakingCap between 0-1. +func getValidatorLiquidStakingCap(r *rand.Rand) sdkmath.LegacyDec { + return simulation.RandomDecAmount(r, sdkmath.LegacyOneDec()) +} + +// getValidatorBondFactor returns randomized ValidatorBondCap between -1 and 300. +func getValidatorBondFactor(r *rand.Rand) sdkmath.LegacyDec { + return sdkmath.LegacyNewDec(int64(simulation.RandIntBetween(r, -1, 300))) +} + +// RandomizedGenState generates a random GenesisState for lsm +func RandomizedGenState(simState *module.SimulationState) { + // params + var ( + validatorBondFactor sdkmath.LegacyDec + globalLiquidStakingCap sdkmath.LegacyDec + validatorLiquidStakingCap sdkmath.LegacyDec + ) + + simState.AppParams.GetOrGenerate(ValidatorBondFactor, &validatorBondFactor, simState.Rand, func(r *rand.Rand) { validatorBondFactor = getValidatorBondFactor(r) }) + + simState.AppParams.GetOrGenerate(GlobalLiquidStakingCap, &globalLiquidStakingCap, simState.Rand, func(r *rand.Rand) { globalLiquidStakingCap = getGlobalLiquidStakingCap(r) }) + + simState.AppParams.GetOrGenerate(ValidatorLiquidStakingCap, &validatorLiquidStakingCap, simState.Rand, func(r *rand.Rand) { validatorLiquidStakingCap = getValidatorLiquidStakingCap(r) }) + + params := types.NewParams( + validatorBondFactor, + globalLiquidStakingCap, + validatorLiquidStakingCap, + ) + + lsmGenesis := types.NewGenesisState(params, nil, 0, sdkmath.ZeroInt(), nil) + + bz, err := json.MarshalIndent(&lsmGenesis.Params, "", " ") + if err != nil { + panic(err) + } + fmt.Printf("Selected randomly generated lsm parameters:\n%s\n", bz) + simState.GenState[types.ModuleName] = simState.Cdc.MustMarshalJSON(lsmGenesis) +} diff --git a/x/lsm/simulation/operations.go b/x/lsm/simulation/operations.go new file mode 100644 index 00000000000..da7dc03da14 --- /dev/null +++ b/x/lsm/simulation/operations.go @@ -0,0 +1,594 @@ +package simulation + +import ( + "fmt" + "math/rand" + + "cosmossdk.io/math" + + "github.com/cosmos/cosmos-sdk/baseapp" + "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/testutil" + sdk "github.com/cosmos/cosmos-sdk/types" + simtypes "github.com/cosmos/cosmos-sdk/types/simulation" + vesting "github.com/cosmos/cosmos-sdk/x/auth/vesting/exported" + "github.com/cosmos/cosmos-sdk/x/simulation" + + "github.com/cosmos/gaia/v22/x/lsm/keeper" + "github.com/cosmos/gaia/v22/x/lsm/types" +) + +// Simulation operation weight constants +const ( + DefaultWeightMsgTokenizeShares int = 25 + DefaultWeightMsgRedeemTokensforShares int = 25 + DefaultWeightMsgTransferTokenizeShareRecord int = 5 + DefaultWeightMsgEnableTokenizeShares int = 1 + DefaultWeightMsgDisableTokenizeShares int = 1 + DefaultWeightMsgWithdrawAllTokenizeShareRecordReward int = 50 + + OpWeightMsgTokenizeShares = "op_weight_msg_tokenize_shares" //nolint:gosec + OpWeightMsgRedeemTokensforShares = "op_weight_msg_redeem_tokens_for_shares" //nolint:gosec + OpWeightMsgTransferTokenizeShareRecord = "op_weight_msg_transfer_tokenize_share_record" //nolint:gosec + OpWeightMsgDisableTokenizeShares = "op_weight_msg_disable_tokenize_shares" //nolint:gosec + OpWeightMsgEnableTokenizeShares = "op_weight_msg_enable_tokenize_shares" //nolint:gosec + OpWeightMsgWithdrawAllTokenizeShareRecordReward = "op_weight_msg_withdraw_all_tokenize_share_record_reward" //nolint:gosec +) + +// WeightedOperations returns all the operations from the module with their respective weights +func WeightedOperations( + appParams simtypes.AppParams, + txGen client.TxConfig, + ak types.AccountKeeper, + bk types.BankKeeper, + sk types.StakingKeeper, + k *keeper.Keeper, +) simulation.WeightedOperations { + var ( + weightMsgTokenizeShares int + weightMsgRedeemTokensforShares int + weightMsgTransferTokenizeShareRecord int + weightMsgDisableTokenizeShares int + weightMsgEnableTokenizeShares int + weightMsgWithdrawAllTokenizeShareRecordReward int + ) + + appParams.GetOrGenerate(OpWeightMsgTokenizeShares, &weightMsgTokenizeShares, nil, + func(_ *rand.Rand) { + weightMsgTokenizeShares = DefaultWeightMsgTokenizeShares + }, + ) + + appParams.GetOrGenerate(OpWeightMsgRedeemTokensforShares, &weightMsgRedeemTokensforShares, nil, + func(_ *rand.Rand) { + weightMsgRedeemTokensforShares = DefaultWeightMsgRedeemTokensforShares + }, + ) + + appParams.GetOrGenerate(OpWeightMsgTransferTokenizeShareRecord, &weightMsgTransferTokenizeShareRecord, nil, + func(_ *rand.Rand) { + weightMsgTransferTokenizeShareRecord = DefaultWeightMsgTransferTokenizeShareRecord + }, + ) + + appParams.GetOrGenerate(OpWeightMsgDisableTokenizeShares, &weightMsgDisableTokenizeShares, nil, + func(_ *rand.Rand) { + weightMsgDisableTokenizeShares = DefaultWeightMsgDisableTokenizeShares + }, + ) + + appParams.GetOrGenerate(OpWeightMsgEnableTokenizeShares, &weightMsgEnableTokenizeShares, nil, + func(_ *rand.Rand) { + weightMsgEnableTokenizeShares = DefaultWeightMsgEnableTokenizeShares + }, + ) + + appParams.GetOrGenerate(OpWeightMsgWithdrawAllTokenizeShareRecordReward, + &weightMsgWithdrawAllTokenizeShareRecordReward, nil, func(r *rand.Rand) { + weightMsgWithdrawAllTokenizeShareRecordReward = DefaultWeightMsgWithdrawAllTokenizeShareRecordReward + }, + ) + + return simulation.WeightedOperations{ + simulation.NewWeightedOperation( + weightMsgTokenizeShares, + SimulateMsgTokenizeShares(txGen, ak, bk, sk, k), + ), + simulation.NewWeightedOperation( + weightMsgRedeemTokensforShares, + SimulateMsgRedeemTokensforShares(txGen, ak, bk, sk, k), + ), + simulation.NewWeightedOperation( + weightMsgTransferTokenizeShareRecord, + SimulateMsgTransferTokenizeShareRecord(txGen, ak, bk, k), + ), + simulation.NewWeightedOperation( + weightMsgDisableTokenizeShares, + SimulateMsgDisableTokenizeShares(txGen, ak, bk, sk, k), + ), + simulation.NewWeightedOperation( + weightMsgEnableTokenizeShares, + SimulateMsgEnableTokenizeShares(txGen, ak, bk, sk, k), + ), + simulation.NewWeightedOperation( + weightMsgWithdrawAllTokenizeShareRecordReward, + SimulateMsgWithdrawAllTokenizeShareRecordReward(txGen, ak, bk, k), + ), + } +} + +// SimulateMsgTokenizeShares generates a MsgTokenizeShares with random values +func SimulateMsgTokenizeShares(txGen client.TxConfig, ak types.AccountKeeper, bk types.BankKeeper, + sk types.StakingKeeper, k *keeper.Keeper, +) simtypes.Operation { + return func( + r *rand.Rand, app *baseapp.BaseApp, ctx sdk.Context, accs []simtypes.Account, chainID string, + ) (simtypes.OperationMsg, []simtypes.FutureOperation, error) { + msgType := sdk.MsgTypeURL(&types.MsgTokenizeShares{}) + + vals, err := sk.GetAllValidators(ctx) + if err != nil { + return simtypes.NoOpMsg(types.ModuleName, msgType, "unable to get validators"), nil, err + } + + // get random validator + validator, ok := testutil.RandSliceElem(r, vals) + if !ok { + return simtypes.NoOpMsg(types.ModuleName, msgType, "unable to pick validator"), nil, nil + } + + valAddr, err := sk.ValidatorAddressCodec().StringToBytes(validator.GetOperator()) + if err != nil { + return simtypes.NoOpMsg(types.ModuleName, msgType, "error getting validator address bytes"), nil, err + } + + delegations, err := sk.GetValidatorDelegations(ctx, valAddr) + if err != nil { + return simtypes.NoOpMsg(types.ModuleName, msgType, "error getting validator delegations"), nil, nil + } + + if delegations == nil { + return simtypes.NoOpMsg(types.ModuleName, msgType, "keeper does have any delegation entries"), nil, nil + } + + // get random delegator from src validator + delegation := delegations[r.Intn(len(delegations))] + delAddr := delegation.GetDelegatorAddr() + delAddrBz, err := ak.AddressCodec().StringToBytes(delAddr) + if err != nil { + return simtypes.NoOpMsg(types.ModuleName, msgType, "error getting delegator address bytes"), nil, err + } + + // make sure delegation is not a validator bond + if delegation.ValidatorBond { + return simtypes.NoOpMsg(types.ModuleName, msgType, "can't tokenize a validator bond"), nil, nil + } + + // make sure tokenizations are not disabled + lockStatus, _ := k.GetTokenizeSharesLock(ctx, sdk.AccAddress(delAddrBz)) + if lockStatus != types.TOKENIZE_SHARE_LOCK_STATUS_UNLOCKED { + return simtypes.NoOpMsg(types.ModuleName, msgType, "tokenize shares disabled"), nil, nil + } + + // Make sure that the delegator has no ongoing redelegations to the validator + found, err := sk.HasReceivingRedelegation(ctx, delAddrBz, valAddr) + if err != nil { + return simtypes.NoOpMsg(types.ModuleName, msgType, "error checking receiving redelegation"), nil, err + } + if found { + return simtypes.NoOpMsg(types.ModuleName, msgType, "delegator has redelegations in progress"), nil, nil + } + + // get random destination validator + totalBond := validator.TokensFromShares(delegation.GetShares()).TruncateInt() + if !totalBond.IsPositive() { + return simtypes.NoOpMsg(types.ModuleName, msgType, "total bond is negative"), nil, nil + } + + tokenizeShareAmt, err := simtypes.RandPositiveInt(r, totalBond) + if err != nil { + return simtypes.NoOpMsg(types.ModuleName, msgType, "unable to generate positive amount"), nil, err + } + + if tokenizeShareAmt.IsZero() { + return simtypes.NoOpMsg(types.ModuleName, msgType, "amount is zero"), nil, nil + } + + bondDenom, err := sk.BondDenom(ctx) + if err != nil { + return simtypes.NoOpMsg(types.ModuleName, msgType, "failed to find bond denom"), nil, err + } + + account := ak.GetAccount(ctx, sdk.AccAddress(delAddrBz)) + if account, ok := account.(vesting.VestingAccount); ok { + if tokenizeShareAmt.GT(account.GetDelegatedFree().AmountOf(bondDenom)) { + return simtypes.NoOpMsg(types.ModuleName, msgType, "account vests and amount exceeds free portion"), nil, nil + } + } + + // check if the shares truncate to zero + shares, err := validator.SharesFromTokens(tokenizeShareAmt) + if err != nil { + return simtypes.NoOpMsg(types.ModuleName, msgType, "invalid shares"), nil, err + } + + if validator.TokensFromShares(shares).TruncateInt().IsZero() { + return simtypes.NoOpMsg(types.ModuleName, msgType, "shares truncate to zero"), nil, nil // skip + } + + // check that tokenization would not exceed global cap + params, err := k.GetParams(ctx) + if err != nil { + return simtypes.NoOpMsg(types.ModuleName, msgType, "failed to get params"), nil, err + } + + totalBondedTokens, err := sk.TotalBondedTokens(ctx) + if err != nil { + return simtypes.NoOpMsg(types.ModuleName, msgType, "failed to get total bonded errors"), nil, err + } + + totalStaked := math.LegacyNewDecFromInt(totalBondedTokens) + if totalStaked.IsZero() { + return simtypes.NoOpMsg(types.ModuleName, msgType, "cannot happened - no validators bonded if stake is 0.0"), nil, nil // skip + } + totalLiquidStaked := math.LegacyNewDecFromInt(k.GetTotalLiquidStakedTokens(ctx).Add(tokenizeShareAmt)) + liquidStakedPercent := totalLiquidStaked.Quo(totalStaked) + if liquidStakedPercent.GT(params.GlobalLiquidStakingCap) { + return simtypes.NoOpMsg(types.ModuleName, msgType, "global liquid staking cap exceeded"), nil, nil + } + + // check that tokenization would not exceed validator liquid staking cap + validatorTotalShares := validator.DelegatorShares + validatorLiquidShares := validator.LiquidShares.Add(shares) + validatorLiquidSharesPercent := validatorLiquidShares.Quo(validatorTotalShares) + if validatorLiquidSharesPercent.GT(params.ValidatorLiquidStakingCap) { + return simtypes.NoOpMsg(types.ModuleName, msgType, "validator liquid staking cap exceeded"), nil, nil + } + + // check that tokenization would not exceed validator bond cap + maxValidatorLiquidShares := validator.ValidatorBondShares.Mul(params.ValidatorBondFactor) + if validator.LiquidShares.Add(shares).GT(maxValidatorLiquidShares) { + return simtypes.NoOpMsg(types.ModuleName, msgType, "validator bond cap exceeded"), nil, nil + } + + // need to retrieve the simulation account associated with delegation to retrieve PrivKey + var simAccount simtypes.Account + + for _, simAcc := range accs { + if simAcc.Address.Equals(sdk.AccAddress(delAddrBz)) { + simAccount = simAcc + break + } + } + + // if simaccount.PrivKey == nil, delegation address does not exist in accs. Return error + if simAccount.PrivKey == nil { + return simtypes.NoOpMsg(types.ModuleName, msgType, "account private key is nil"), nil, nil + } + + msg := &types.MsgTokenizeShares{ + DelegatorAddress: delAddr, + ValidatorAddress: validator.GetOperator(), + Amount: sdk.NewCoin(bondDenom, tokenizeShareAmt), + TokenizedShareOwner: delAddr, + } + + spendable := bk.SpendableCoins(ctx, account.GetAddress()) + + txCtx := simulation.OperationInput{ + R: r, + App: app, + TxGen: txGen, + Cdc: nil, + Msg: msg, + Context: ctx, + SimAccount: simAccount, + AccountKeeper: ak, + Bankkeeper: bk, + ModuleName: types.ModuleName, + CoinsSpentInMsg: spendable, + } + + return simulation.GenAndDeliverTxWithRandFees(txCtx) + } +} + +// SimulateMsgRedeemTokensforShares generates a MsgRedeemTokensforShares with random values +func SimulateMsgRedeemTokensforShares(txGen client.TxConfig, ak types.AccountKeeper, bk types.BankKeeper, + sk types.StakingKeeper, k *keeper.Keeper, +) simtypes.Operation { + return func( + r *rand.Rand, app *baseapp.BaseApp, ctx sdk.Context, accs []simtypes.Account, chainID string, + ) (simtypes.OperationMsg, []simtypes.FutureOperation, error) { + msgType := sdk.MsgTypeURL(&types.MsgRedeemTokensForShares{}) + + redeemUser := simtypes.Account{} + redeemCoin := sdk.Coin{} + tokenizeShareRecord := types.TokenizeShareRecord{} + + records := k.GetAllTokenizeShareRecords(ctx) + if len(records) > 0 { + record := records[r.Intn(len(records))] + for _, acc := range accs { + balance := bk.GetBalance(ctx, acc.Address, record.GetShareTokenDenom()) + if balance.Amount.IsPositive() { + redeemUser = acc + redeemAmount, err := simtypes.RandPositiveInt(r, balance.Amount) + if err == nil { + redeemCoin = sdk.NewCoin(record.GetShareTokenDenom(), redeemAmount) + tokenizeShareRecord = record + } + break + } + } + } + + // if redeemUser.PrivKey == nil, redeem user does not exist in accs + if redeemUser.PrivKey == nil { + return simtypes.NoOpMsg(types.ModuleName, msgType, "account private key is nil"), nil, nil + } + + if redeemCoin.Amount.IsZero() { + return simtypes.NoOpMsg(types.ModuleName, msgType, "empty balance in tokens"), nil, nil + } + + valAddress, err := sk.ValidatorAddressCodec().StringToBytes(tokenizeShareRecord.Validator) + if err != nil { + return simtypes.NoOpMsg(types.ModuleName, msgType, "invalid validator address"), nil, fmt.Errorf("invalid validator address") + } + validator, err := sk.GetValidator(ctx, valAddress) + if err != nil { + return simtypes.NoOpMsg(types.ModuleName, msgType, "validator not found"), nil, fmt.Errorf("validator not found") + } + delegation, err := sk.GetDelegation(ctx, tokenizeShareRecord.GetModuleAddress(), valAddress) + if err != nil { + return simtypes.NoOpMsg(types.ModuleName, msgType, "delegation not found"), nil, fmt.Errorf("delegation not found") + } + + // prevent redemption that returns a 0 amount + shares := math.LegacyNewDecFromInt(redeemCoin.Amount) + if redeemCoin.Amount.Equal(delegation.Shares.TruncateInt()) { + shares = delegation.Shares + } + + if validator.TokensFromShares(shares).TruncateInt().IsZero() { + return simtypes.NoOpMsg(types.ModuleName, msgType, "zero tokens returned"), nil, nil + } + + account := ak.GetAccount(ctx, redeemUser.Address) + spendable := bk.SpendableCoins(ctx, account.GetAddress()) + + msg := &types.MsgRedeemTokensForShares{ + DelegatorAddress: redeemUser.Address.String(), + Amount: redeemCoin, + } + + txCtx := simulation.OperationInput{ + R: r, + App: app, + TxGen: txGen, + Cdc: nil, + Msg: msg, + Context: ctx, + SimAccount: redeemUser, + AccountKeeper: ak, + Bankkeeper: bk, + ModuleName: types.ModuleName, + CoinsSpentInMsg: spendable, + } + + return simulation.GenAndDeliverTxWithRandFees(txCtx) + } +} + +// SimulateMsgTransferTokenizeShareRecord generates a MsgTransferTokenizeShareRecord with random values +func SimulateMsgTransferTokenizeShareRecord(txGen client.TxConfig, ak types.AccountKeeper, bk types.BankKeeper, k *keeper.Keeper) simtypes.Operation { + return func( + r *rand.Rand, app *baseapp.BaseApp, ctx sdk.Context, accs []simtypes.Account, chainID string, + ) (simtypes.OperationMsg, []simtypes.FutureOperation, error) { + msgType := sdk.MsgTypeURL(&types.MsgTransferTokenizeShareRecord{}) + + simAccount, _ := simtypes.RandomAcc(r, accs) + destAccount, _ := simtypes.RandomAcc(r, accs) + transferRecord := types.TokenizeShareRecord{} + + records := k.GetAllTokenizeShareRecords(ctx) + if len(records) > 0 { + record := records[r.Intn(len(records))] + for _, acc := range accs { + if record.Owner == acc.Address.String() { + simAccount = acc + transferRecord = record + break + } + } + } + + // if simAccount.PrivKey == nil, record owner does not exist in accs + if simAccount.PrivKey == nil { + return simtypes.NoOpMsg(types.ModuleName, msgType, "account private key is nil"), nil, nil + } + + if transferRecord.Id == 0 { + return simtypes.NoOpMsg(types.ModuleName, msgType, "share record not found"), nil, nil + } + + account := ak.GetAccount(ctx, simAccount.Address) + spendable := bk.SpendableCoins(ctx, account.GetAddress()) + + msg := &types.MsgTransferTokenizeShareRecord{ + TokenizeShareRecordId: transferRecord.Id, + Sender: simAccount.Address.String(), + NewOwner: destAccount.Address.String(), + } + + txCtx := simulation.OperationInput{ + R: r, + App: app, + TxGen: txGen, + Cdc: nil, + Msg: msg, + Context: ctx, + SimAccount: simAccount, + AccountKeeper: ak, + Bankkeeper: bk, + ModuleName: types.ModuleName, + CoinsSpentInMsg: spendable, + } + + return simulation.GenAndDeliverTxWithRandFees(txCtx) + } +} + +func SimulateMsgDisableTokenizeShares(txGen client.TxConfig, ak types.AccountKeeper, bk types.BankKeeper, + sk types.StakingKeeper, k *keeper.Keeper, +) simtypes.Operation { + return func( + r *rand.Rand, app *baseapp.BaseApp, ctx sdk.Context, accs []simtypes.Account, chainID string, + ) (simtypes.OperationMsg, []simtypes.FutureOperation, error) { + msgType := sdk.MsgTypeURL(&types.MsgDisableTokenizeShares{}) + simAccount, _ := simtypes.RandomAcc(r, accs) + + if simAccount.PrivKey == nil { + return simtypes.NoOpMsg(types.ModuleName, msgType, "account private key is nil"), nil, nil + } + + denom, err := sk.BondDenom(ctx) + if err != nil { + return simtypes.NoOpMsg(types.ModuleName, msgType, "bond denom not found"), nil, err + } + + balance := bk.GetBalance(ctx, simAccount.Address, denom).Amount + if !balance.IsPositive() { + return simtypes.NoOpMsg(types.ModuleName, msgType, "balance is negative"), nil, nil + } + + lockStatus, _ := k.GetTokenizeSharesLock(ctx, simAccount.Address) + if lockStatus == types.TOKENIZE_SHARE_LOCK_STATUS_LOCKED { + return simtypes.NoOpMsg(types.ModuleName, msgType, "account already locked"), nil, nil + } + + msg := &types.MsgDisableTokenizeShares{ + DelegatorAddress: simAccount.Address.String(), + } + + txCtx := simulation.OperationInput{ + R: r, + App: app, + TxGen: txGen, + Cdc: nil, + Msg: msg, + Context: ctx, + SimAccount: simAccount, + AccountKeeper: ak, + Bankkeeper: bk, + ModuleName: types.ModuleName, + } + return simulation.GenAndDeliverTxWithRandFees(txCtx) + } +} + +func SimulateMsgEnableTokenizeShares(txGen client.TxConfig, ak types.AccountKeeper, bk types.BankKeeper, + sk types.StakingKeeper, k *keeper.Keeper, +) simtypes.Operation { + return func( + r *rand.Rand, app *baseapp.BaseApp, ctx sdk.Context, accs []simtypes.Account, chainID string, + ) (simtypes.OperationMsg, []simtypes.FutureOperation, error) { + msgType := sdk.MsgTypeURL(&types.MsgEnableTokenizeShares{}) + simAccount, _ := simtypes.RandomAcc(r, accs) + + if simAccount.PrivKey == nil { + return simtypes.NoOpMsg(types.ModuleName, msgType, "account private key is nil"), nil, nil + } + + denom, err := sk.BondDenom(ctx) + if err != nil { + return simtypes.NoOpMsg(types.ModuleName, msgType, "bond denom not found"), nil, err + } + + balance := bk.GetBalance(ctx, simAccount.Address, denom).Amount + if !balance.IsPositive() { + return simtypes.NoOpMsg(types.ModuleName, msgType, "balance is negative"), nil, nil + } + + lockStatus, _ := k.GetTokenizeSharesLock(ctx, simAccount.Address) + if lockStatus != types.TOKENIZE_SHARE_LOCK_STATUS_LOCKED { + return simtypes.NoOpMsg(types.ModuleName, msgType, "account is not locked"), nil, nil + } + + msg := &types.MsgEnableTokenizeShares{ + DelegatorAddress: simAccount.Address.String(), + } + + txCtx := simulation.OperationInput{ + R: r, + App: app, + TxGen: txGen, + Cdc: nil, + Msg: msg, + Context: ctx, + SimAccount: simAccount, + AccountKeeper: ak, + Bankkeeper: bk, + ModuleName: types.ModuleName, + } + return simulation.GenAndDeliverTxWithRandFees(txCtx) + } +} + +// SimulateMsgWithdrawAllTokenizeShareRecordReward simulates MsgWithdrawTokenizeShareRecordReward execution where +// a random account claim tokenize share record rewards. +func SimulateMsgWithdrawAllTokenizeShareRecordReward(txConfig client.TxConfig, ak types.AccountKeeper, + bk types.BankKeeper, k *keeper.Keeper, +) simtypes.Operation { + return func( + r *rand.Rand, app *baseapp.BaseApp, ctx sdk.Context, accs []simtypes.Account, chainID string, + ) (simtypes.OperationMsg, []simtypes.FutureOperation, error) { + msgType := sdk.MsgTypeURL(&types.MsgWithdrawTokenizeShareRecordReward{}) + rewardOwner, _ := simtypes.RandomAcc(r, accs) + + records := k.GetAllTokenizeShareRecords(ctx) + if len(records) > 0 { + record := records[r.Intn(len(records))] + for _, acc := range accs { + if acc.Address.String() == record.Owner { + rewardOwner = acc + break + } + } + } + + // if simaccount.PrivKey == nil, delegation address does not exist in accs. Return error + if rewardOwner.PrivKey == nil { + return simtypes.NoOpMsg(types.ModuleName, msgType, "account private key is nil"), nil, nil + } + + rewardOwnerAddr, err := ak.AddressCodec().BytesToString(rewardOwner.Address) + if err != nil { + return simtypes.NoOpMsg(types.ModuleName, msgType, "error converting reward owner address"), nil, err + } + + msg := &types.MsgWithdrawAllTokenizeShareRecordReward{ + OwnerAddress: rewardOwnerAddr, + } + + account := ak.GetAccount(ctx, rewardOwner.Address) + spendable := bk.SpendableCoins(ctx, account.GetAddress()) + + txCtx := simulation.OperationInput{ + R: r, + App: app, + TxGen: txConfig, + Cdc: nil, + Msg: msg, + Context: ctx, + SimAccount: rewardOwner, + AccountKeeper: ak, + Bankkeeper: bk, + ModuleName: types.ModuleName, + CoinsSpentInMsg: spendable, + } + + return simulation.GenAndDeliverTxWithRandFees(txCtx) + } +} diff --git a/x/lsm/simulation/proposals.go b/x/lsm/simulation/proposals.go new file mode 100644 index 00000000000..02a39f0e642 --- /dev/null +++ b/x/lsm/simulation/proposals.go @@ -0,0 +1,51 @@ +package simulation + +import ( + "math/rand" + + "cosmossdk.io/math" + + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/types/address" + simtypes "github.com/cosmos/cosmos-sdk/types/simulation" + "github.com/cosmos/cosmos-sdk/x/simulation" + + "github.com/cosmos/gaia/v22/x/lsm/types" +) + +// Simulation operation weights constants +const ( + DefaultWeightMsgUpdateParams int = 100 + + OpWeightMsgUpdateParams = "op_weight_msg_update_params" //nolint:gosec +) + +// ProposalMsgs defines the module weighted proposals' contents +func ProposalMsgs() []simtypes.WeightedProposalMsg { + return []simtypes.WeightedProposalMsg{ + simulation.NewWeightedProposalMsg( + OpWeightMsgUpdateParams, + DefaultWeightMsgUpdateParams, + SimulateMsgUpdateParams, + ), + } +} + +// SimulateMsgUpdateParams returns a random MsgUpdateParams +func SimulateMsgUpdateParams(r *rand.Rand, _ sdk.Context, _ []simtypes.Account) sdk.Msg { + // use the default gov module account address as authority + var authority sdk.AccAddress = address.Module("gov") + + params := types.DefaultParams() + params.GlobalLiquidStakingCap = simtypes.RandomDecAmount(r, math.LegacyOneDec()) + params.ValidatorLiquidStakingCap = simtypes.RandomDecAmount(r, math.LegacyOneDec()) + randSeed := simtypes.RandIntBetween(r, -1, 10000) + if randSeed != 0 { + params.ValidatorBondFactor = math.LegacyNewDecFromInt(math.NewInt(int64(randSeed))) + } + + return &types.MsgUpdateParams{ + Authority: authority.String(), + Params: params, + } +} diff --git a/x/lsm/types/codec.go b/x/lsm/types/codec.go new file mode 100644 index 00000000000..733e0793528 --- /dev/null +++ b/x/lsm/types/codec.go @@ -0,0 +1,44 @@ +package types + +import ( + "github.com/cosmos/cosmos-sdk/codec" + "github.com/cosmos/cosmos-sdk/codec/legacy" + "github.com/cosmos/cosmos-sdk/codec/types" + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/types/msgservice" +) + +// RegisterLegacyAminoCodec registers the necessary x/lsm interfaces +// and concrete types on the provided LegacyAmino codec. These types are used +// for Amino JSON serialization. +func RegisterLegacyAminoCodec(cdc *codec.LegacyAmino) { + legacy.RegisterAminoMsg(cdc, &MsgUpdateParams{}, "gaia/x/lsm/MsgUpdateParams") + legacy.RegisterAminoMsg(cdc, &MsgTokenizeShares{}, "gaia/x/lsm/MsgTokenizeShares") + legacy.RegisterAminoMsg(cdc, &MsgRedeemTokensForShares{}, "gaia/x/lsm/MsgRedeemTokensForShares") + legacy.RegisterAminoMsg(cdc, &MsgTransferTokenizeShareRecord{}, "gaia/x/lsm/MsgTransferTokenizeShareRecord") + legacy.RegisterAminoMsg(cdc, &MsgDisableTokenizeShares{}, "gaia/x/lsm/MsgDisableTokenizeShares") + legacy.RegisterAminoMsg(cdc, &MsgEnableTokenizeShares{}, "gaia/x/lsm/MsgEnableTokenizeShares") + // TODO eric I haven't included UnbondValidator -- do I need? + // legacy.RegisterAminoMsg(cdc, &MsgUnbondValidator{}, "cosmos-sdk/MsgUnbondValidator") + legacy.RegisterAminoMsg(cdc, &MsgWithdrawTokenizeShareRecordReward{}, "gaia/x/lsm/MsgWithdrawTokenizeShareRecordReward") + legacy.RegisterAminoMsg(cdc, &MsgWithdrawAllTokenizeShareRecordReward{}, "gaia/x/lsm/MsgWithdrawAllTokenizeShareRecordReward") + + cdc.RegisterConcrete(Params{}, "gaia/x/lsm/Params", nil) +} + +// RegisterInterfaces registers the x/lsm interfaces with the interface registry +func RegisterInterfaces(registry types.InterfaceRegistry) { + registry.RegisterImplementations( + (*sdk.Msg)(nil), + &MsgUpdateParams{}, + &MsgTokenizeShares{}, + &MsgRedeemTokensForShares{}, + &MsgTransferTokenizeShareRecord{}, + &MsgDisableTokenizeShares{}, + &MsgEnableTokenizeShares{}, + &MsgWithdrawTokenizeShareRecordReward{}, + &MsgWithdrawAllTokenizeShareRecordReward{}, + ) + + msgservice.RegisterMsgServiceDesc(registry, &_Msg_serviceDesc) +} diff --git a/x/lsm/types/errors.go b/x/lsm/types/errors.go new file mode 100644 index 00000000000..9f65cfc9885 --- /dev/null +++ b/x/lsm/types/errors.go @@ -0,0 +1,27 @@ +package types + +import "cosmossdk.io/errors" + +// x/lsm module sentinel errors +var ( + ErrRedelegationInProgress = errors.Register(ModuleName, 120, "delegator is not allowed to tokenize shares from validator with a redelegation in progress") + ErrInsufficientShares = errors.Register(ModuleName, 22, "insufficient delegation shares") + ErrTokenizeShareRecordNotExists = errors.Register(ModuleName, 102, "tokenize share record not exists") + ErrTokenizeShareRecordAlreadyExists = errors.Register(ModuleName, 103, "tokenize share record already exists") + ErrNotTokenizeShareRecordOwner = errors.Register(ModuleName, 104, "not tokenize share record owner") + ErrExceedingFreeVestingDelegations = errors.Register(ModuleName, 105, "trying to exceed vested free delegation for vesting account") + ErrOnlyBondDenomAllowdForTokenize = errors.Register(ModuleName, 106, "only bond denom is allowed for tokenize") + ErrInsufficientValidatorBondShares = errors.Register(ModuleName, 107, "insufficient validator bond shares") + ErrValidatorBondNotAllowedForTokenizeShare = errors.Register(ModuleName, 109, "validator bond delegation is not allowed to tokenize share") + ErrGlobalLiquidStakingCapExceeded = errors.Register(ModuleName, 111, "delegation or tokenization exceeds the global cap") + ErrValidatorLiquidStakingCapExceeded = errors.Register(ModuleName, 112, "delegation or tokenization exceeds the validator cap") + ErrTokenizeSharesDisabledForAccount = errors.Register(ModuleName, 113, "tokenize shares currently disabled for account") + ErrTokenizeSharesAlreadyEnabledForAccount = errors.Register(ModuleName, 115, "tokenize shares is already enabled for this account") + ErrTokenizeSharesAlreadyDisabledForAccount = errors.Register(ModuleName, 116, "tokenize shares is already disabled for this account") + ErrValidatorLiquidSharesUnderflow = errors.Register(ModuleName, 117, "validator liquid shares underflow") + ErrTotalLiquidStakedUnderflow = errors.Register(ModuleName, 118, "total liquid staked underflow") + ErrNotEnoughBalance = errors.Register(ModuleName, 101, "not enough balance") + ErrTinyRedemptionAmount = errors.Register(ModuleName, 119, "too few tokens to redeem (truncates to zero tokens)") + ErrNoDelegation = errors.Register(ModuleName, 19, "no delegation for (address, validator) tuple") + ErrNoValidatorFound = errors.Register(ModuleName, 3, "validator does not exist") +) diff --git a/x/lsm/types/events.go b/x/lsm/types/events.go new file mode 100644 index 00000000000..9040ae3dd83 --- /dev/null +++ b/x/lsm/types/events.go @@ -0,0 +1,17 @@ +package types + +// lsm module event types +const ( + EventTypeTokenizeShares = "tokenize_shares" + EventTypeRedeemShares = "redeem_shares" + EventTypeTransferTokenizeShareRecord = "transfer_tokenize_share_record" + EventTypeWithdrawTokenizeShareReward = "withdraw_tokenize_share_reward" + + AttributeKeyValidator = "validator" + AttributeKeyDelegator = "delegator" + AttributeKeyShareOwner = "share_owner" + AttributeKeyShareRecordID = "share_record_id" + AttributeKeyAmount = "amount" + AttributeKeyTokenizedShares = "tokenized_shares" + AttributeKeyWithdrawAddress = "withdraw_address" +) diff --git a/x/lsm/types/expected_keepers.go b/x/lsm/types/expected_keepers.go new file mode 100644 index 00000000000..1166d6c4a6e --- /dev/null +++ b/x/lsm/types/expected_keepers.go @@ -0,0 +1,64 @@ +package types + +import ( + "context" + + "cosmossdk.io/core/address" + "cosmossdk.io/math" + + sdk "github.com/cosmos/cosmos-sdk/types" + stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types" +) + +// AccountKeeper defines the expected account keeper (noalias) +type AccountKeeper interface { + AddressCodec() address.Codec + GetAccount(ctx context.Context, addr sdk.AccAddress) sdk.AccountI // only used for simulation + GetModuleAddress(name string) sdk.AccAddress + GetModuleAccount(ctx context.Context, moduleName string) sdk.ModuleAccountI +} + +// BankKeeper defines the expected interface needed to retrieve account balances. +type BankKeeper interface { + GetAllBalances(ctx context.Context, addr sdk.AccAddress) sdk.Coins + GetBalance(ctx context.Context, addr sdk.AccAddress, denom string) sdk.Coin + SendCoins(ctx context.Context, fromAddr, toAddr sdk.AccAddress, amt sdk.Coins) error + SendCoinsFromAccountToModule(ctx context.Context, senderAddr sdk.AccAddress, recipientModule string, amt sdk.Coins) error + SendCoinsFromModuleToAccount(ctx context.Context, senderModule string, recipientAddr sdk.AccAddress, amt sdk.Coins) error + SendCoinsFromModuleToModule(ctx context.Context, senderPool, recipientPool string, amt sdk.Coins) error + UndelegateCoinsFromModuleToAccount(ctx context.Context, senderModule string, recipientAddr sdk.AccAddress, amt sdk.Coins) error + MintCoins(cts context.Context, name string, amt sdk.Coins) error + BurnCoins(ctx context.Context, name string, amt sdk.Coins) error + BlockedAddr(addr sdk.AccAddress) bool + SpendableCoins(ctx context.Context, addr sdk.AccAddress) sdk.Coins +} + +// StakingKeeper defines the expected interface needed to interact with the x/staking keeper. +type StakingKeeper interface { + GetParams(ctx context.Context) (params stakingtypes.Params, err error) + TotalBondedTokens(ctx context.Context) (math.Int, error) + GetValidator(ctx context.Context, addr sdk.ValAddress) (validator stakingtypes.Validator, err error) + SetValidator(ctx context.Context, validator stakingtypes.Validator) error + GetAllValidators(ctx context.Context) (validators []stakingtypes.Validator, err error) + GetDelegation(ctx context.Context, delAddr sdk.AccAddress, valAddr sdk.ValAddress) (stakingtypes.Delegation, error) + GetAllDelegations(ctx context.Context) (delegations []stakingtypes.Delegation, err error) + ValidatorAddressCodec() address.Codec + BondDenom(ctx context.Context) (string, error) + ValidateUnbondAmount(ctx context.Context, delAddr sdk.AccAddress, valAddr sdk.ValAddress, amt math.Int) (shares math.LegacyDec, err error) + HasReceivingRedelegation(ctx context.Context, delAddr sdk.AccAddress, valDstAddr sdk.ValAddress) (bool, error) + Unbond(ctx context.Context, delAddr sdk.AccAddress, valAddr sdk.ValAddress, shares math.LegacyDec) (amount math.Int, err error) + Delegate( + ctx context.Context, delAddr sdk.AccAddress, bondAmt math.Int, tokenSrc stakingtypes.BondStatus, + validator stakingtypes.Validator, subtractAccount bool, + ) (newShares math.LegacyDec, err error) + Validator(ctx context.Context, address sdk.ValAddress) (stakingtypes.ValidatorI, error) + Delegation(ctx context.Context, addrDel sdk.AccAddress, addrVal sdk.ValAddress) (stakingtypes.DelegationI, error) + GetValidatorDelegations(ctx context.Context, valAddr sdk.ValAddress) (delegations []stakingtypes.Delegation, err error) +} + +// DistributionKeeper defines the expected interface needed to interact with the x/distribution keeper. +type DistributionKeeper interface { + WithdrawDelegationRewards(ctx context.Context, delAddr sdk.AccAddress, valAddr sdk.ValAddress) (sdk.Coins, error) + IncrementValidatorPeriod(ctx context.Context, val stakingtypes.ValidatorI) (uint64, error) + CalculateDelegationRewards(ctx context.Context, val stakingtypes.ValidatorI, del stakingtypes.DelegationI, endingPeriod uint64) (rewards sdk.DecCoins, err error) +} diff --git a/x/lsm/types/genesis.go b/x/lsm/types/genesis.go new file mode 100644 index 00000000000..852dabbcdf0 --- /dev/null +++ b/x/lsm/types/genesis.go @@ -0,0 +1,29 @@ +package types + +import "cosmossdk.io/math" + +func NewGenesisState( + params Params, + tsr []TokenizeShareRecord, + recordID uint64, + liquidStakeTokens math.Int, + locks []TokenizeShareLock, +) *GenesisState { + return &GenesisState{ + Params: params, + TokenizeShareRecords: tsr, + LastTokenizeShareRecordId: recordID, + TotalLiquidStakedTokens: liquidStakeTokens, + TokenizeShareLocks: locks, + } +} + +func DefaultGenesisState() *GenesisState { + return &GenesisState{ + Params: DefaultParams(), + } +} + +func ValidateGenesis(gs *GenesisState) error { + return gs.Params.Validate() +} diff --git a/x/lsm/types/genesis.pb.go b/x/lsm/types/genesis.pb.go new file mode 100644 index 00000000000..4a7de852bd0 --- /dev/null +++ b/x/lsm/types/genesis.pb.go @@ -0,0 +1,832 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: gaia/lsm/v1beta1/genesis.proto + +package types + +import ( + cosmossdk_io_math "cosmossdk.io/math" + fmt "fmt" + _ "github.com/cosmos/cosmos-sdk/types/tx/amino" + _ "github.com/cosmos/gogoproto/gogoproto" + proto "github.com/cosmos/gogoproto/proto" + github_com_cosmos_gogoproto_types "github.com/cosmos/gogoproto/types" + _ "google.golang.org/protobuf/types/known/timestamppb" + io "io" + math "math" + math_bits "math/bits" + time "time" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf +var _ = time.Kitchen + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// GenesisState defines the lsm module's genesis state. +type GenesisState struct { + // params defines all the parameters of related to deposit. + Params Params `protobuf:"bytes,1,opt,name=params,proto3" json:"params"` + // store tokenize share records to provide reward to record owners + TokenizeShareRecords []TokenizeShareRecord `protobuf:"bytes,9,rep,name=tokenize_share_records,json=tokenizeShareRecords,proto3" json:"tokenize_share_records"` + // last tokenize share record id, used for next share record id calculation + LastTokenizeShareRecordId uint64 `protobuf:"varint,10,opt,name=last_tokenize_share_record_id,json=lastTokenizeShareRecordId,proto3" json:"last_tokenize_share_record_id,omitempty"` + // total number of liquid staked tokens at genesis + TotalLiquidStakedTokens cosmossdk_io_math.Int `protobuf:"bytes,11,opt,name=total_liquid_staked_tokens,json=totalLiquidStakedTokens,proto3,customtype=cosmossdk.io/math.Int" json:"total_liquid_staked_tokens" yaml:"total_liquid_staked_tokens"` + // tokenize shares locks at genesis + TokenizeShareLocks []TokenizeShareLock `protobuf:"bytes,12,rep,name=tokenize_share_locks,json=tokenizeShareLocks,proto3" json:"tokenize_share_locks"` +} + +func (m *GenesisState) Reset() { *m = GenesisState{} } +func (m *GenesisState) String() string { return proto.CompactTextString(m) } +func (*GenesisState) ProtoMessage() {} +func (*GenesisState) Descriptor() ([]byte, []int) { + return fileDescriptor_1d00893579549cfc, []int{0} +} +func (m *GenesisState) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GenesisState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_GenesisState.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *GenesisState) XXX_Merge(src proto.Message) { + xxx_messageInfo_GenesisState.Merge(m, src) +} +func (m *GenesisState) XXX_Size() int { + return m.Size() +} +func (m *GenesisState) XXX_DiscardUnknown() { + xxx_messageInfo_GenesisState.DiscardUnknown(m) +} + +var xxx_messageInfo_GenesisState proto.InternalMessageInfo + +func (m *GenesisState) GetParams() Params { + if m != nil { + return m.Params + } + return Params{} +} + +func (m *GenesisState) GetTokenizeShareRecords() []TokenizeShareRecord { + if m != nil { + return m.TokenizeShareRecords + } + return nil +} + +func (m *GenesisState) GetLastTokenizeShareRecordId() uint64 { + if m != nil { + return m.LastTokenizeShareRecordId + } + return 0 +} + +func (m *GenesisState) GetTokenizeShareLocks() []TokenizeShareLock { + if m != nil { + return m.TokenizeShareLocks + } + return nil +} + +// TokenizeSharesLock required for specifying account locks at genesis +type TokenizeShareLock struct { + // Address of the account that is locked + Address string `protobuf:"bytes,1,opt,name=address,proto3" json:"address,omitempty"` + // Status of the lock (LOCKED or LOCK_EXPIRING) + Status string `protobuf:"bytes,2,opt,name=status,proto3" json:"status,omitempty"` + // Completion time if the lock is expiring + CompletionTime time.Time `protobuf:"bytes,3,opt,name=completion_time,json=completionTime,proto3,stdtime" json:"completion_time" yaml:"completion_time"` +} + +func (m *TokenizeShareLock) Reset() { *m = TokenizeShareLock{} } +func (m *TokenizeShareLock) String() string { return proto.CompactTextString(m) } +func (*TokenizeShareLock) ProtoMessage() {} +func (*TokenizeShareLock) Descriptor() ([]byte, []int) { + return fileDescriptor_1d00893579549cfc, []int{1} +} +func (m *TokenizeShareLock) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TokenizeShareLock) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_TokenizeShareLock.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *TokenizeShareLock) XXX_Merge(src proto.Message) { + xxx_messageInfo_TokenizeShareLock.Merge(m, src) +} +func (m *TokenizeShareLock) XXX_Size() int { + return m.Size() +} +func (m *TokenizeShareLock) XXX_DiscardUnknown() { + xxx_messageInfo_TokenizeShareLock.DiscardUnknown(m) +} + +var xxx_messageInfo_TokenizeShareLock proto.InternalMessageInfo + +func (m *TokenizeShareLock) GetAddress() string { + if m != nil { + return m.Address + } + return "" +} + +func (m *TokenizeShareLock) GetStatus() string { + if m != nil { + return m.Status + } + return "" +} + +func (m *TokenizeShareLock) GetCompletionTime() time.Time { + if m != nil { + return m.CompletionTime + } + return time.Time{} +} + +func init() { + proto.RegisterType((*GenesisState)(nil), "gaia.lsm.v1beta1.GenesisState") + proto.RegisterType((*TokenizeShareLock)(nil), "gaia.lsm.v1beta1.TokenizeShareLock") +} + +func init() { proto.RegisterFile("gaia/lsm/v1beta1/genesis.proto", fileDescriptor_1d00893579549cfc) } + +var fileDescriptor_1d00893579549cfc = []byte{ + // 505 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x92, 0x41, 0x8b, 0xd3, 0x40, + 0x18, 0x86, 0x1b, 0xbb, 0x54, 0x3a, 0x2d, 0xea, 0x86, 0x5a, 0x63, 0xc1, 0xa4, 0x46, 0x84, 0xe2, + 0x61, 0xc2, 0xae, 0x37, 0xf5, 0x20, 0xb9, 0xc8, 0xc2, 0x1e, 0x24, 0xdd, 0x93, 0x1e, 0xc2, 0x34, + 0x19, 0xd3, 0xa1, 0x99, 0x4e, 0xcc, 0xf7, 0x55, 0x5c, 0x2f, 0xfe, 0x85, 0xfd, 0x19, 0x9e, 0xc4, + 0x9f, 0xb1, 0x27, 0xd9, 0xa3, 0x78, 0xa8, 0xd2, 0x1e, 0xbc, 0xef, 0x2f, 0x90, 0x99, 0xa4, 0x08, + 0xed, 0xca, 0x5e, 0x4a, 0x67, 0x9e, 0xf7, 0x7b, 0xf3, 0xf2, 0xcd, 0x4b, 0xdc, 0x8c, 0x09, 0x16, + 0xe4, 0x20, 0x83, 0x0f, 0x07, 0x13, 0x8e, 0xec, 0x20, 0xc8, 0xf8, 0x9c, 0x83, 0x00, 0x5a, 0x94, + 0x0a, 0x95, 0x7d, 0x47, 0x73, 0x9a, 0x83, 0xa4, 0x35, 0x1f, 0xf4, 0x32, 0x95, 0x29, 0x03, 0x03, + 0xfd, 0xaf, 0xd2, 0x0d, 0x06, 0x3b, 0x3e, 0x7a, 0xa6, 0x62, 0xfb, 0x4c, 0x8a, 0xb9, 0x0a, 0xcc, + 0x6f, 0x7d, 0xe5, 0x65, 0x4a, 0x65, 0x39, 0x0f, 0xcc, 0x69, 0xb2, 0x78, 0x17, 0xa0, 0x90, 0x1c, + 0x90, 0xc9, 0xa2, 0x12, 0xf8, 0xdf, 0x9b, 0xa4, 0xfb, 0xaa, 0x4a, 0x32, 0x46, 0x86, 0xdc, 0x7e, + 0x4e, 0x5a, 0x05, 0x2b, 0x99, 0x04, 0xc7, 0x1a, 0x5a, 0xa3, 0xce, 0xa1, 0x43, 0xb7, 0x93, 0xd1, + 0xd7, 0x86, 0x87, 0xed, 0xf3, 0xa5, 0xd7, 0xf8, 0xf2, 0xe7, 0xdb, 0x13, 0x2b, 0xaa, 0x47, 0x6c, + 0x46, 0xfa, 0xa8, 0x66, 0x7c, 0x2e, 0x3e, 0xf1, 0x18, 0xa6, 0xac, 0xe4, 0x71, 0xc9, 0x13, 0x55, + 0xa6, 0xe0, 0xb4, 0x87, 0xcd, 0x51, 0xe7, 0xf0, 0xf1, 0xae, 0xd9, 0x49, 0xad, 0x1f, 0x6b, 0x79, + 0x64, 0xd4, 0xe1, 0x9e, 0x76, 0x8e, 0x7a, 0xb8, 0x8b, 0xc0, 0x7e, 0x49, 0x1e, 0xe4, 0x0c, 0x30, + 0xbe, 0xf2, 0x3b, 0xb1, 0x48, 0x1d, 0x32, 0xb4, 0x46, 0x7b, 0xd1, 0x7d, 0x2d, 0xba, 0xc2, 0xfb, + 0x28, 0xb5, 0x3f, 0x93, 0x01, 0x2a, 0x64, 0x79, 0x9c, 0x8b, 0xf7, 0x0b, 0x91, 0xc6, 0x80, 0x6c, + 0xc6, 0xd3, 0xca, 0x10, 0x9c, 0xce, 0xd0, 0x1a, 0x75, 0xc3, 0x50, 0x27, 0xf8, 0xb9, 0xf4, 0xee, + 0x26, 0x0a, 0xa4, 0x02, 0x48, 0x67, 0x54, 0xa8, 0x40, 0x32, 0x9c, 0xd2, 0xa3, 0x39, 0x5e, 0x2e, + 0xbd, 0x87, 0xa7, 0x4c, 0xe6, 0xcf, 0xfc, 0xff, 0x1b, 0xf9, 0xd1, 0x3d, 0x03, 0x8f, 0x0d, 0x1b, + 0x1b, 0x64, 0xf2, 0x80, 0xfd, 0x96, 0xf4, 0xb6, 0xd2, 0xe7, 0x2a, 0x99, 0x81, 0xd3, 0x35, 0x3b, + 0x7a, 0x74, 0xcd, 0x8e, 0x8e, 0x55, 0x32, 0xab, 0x37, 0x64, 0xe3, 0x36, 0x00, 0xff, 0xab, 0x45, + 0xf6, 0x77, 0xf4, 0xb6, 0x43, 0x6e, 0xb2, 0x34, 0x2d, 0x39, 0x54, 0xcf, 0xda, 0x8e, 0x36, 0x47, + 0xbb, 0x4f, 0x5a, 0x80, 0x0c, 0x17, 0xe0, 0xdc, 0x30, 0xa0, 0x3e, 0xd9, 0x19, 0xb9, 0x9d, 0x28, + 0x59, 0xe4, 0x1c, 0x85, 0x9a, 0xc7, 0xba, 0x36, 0x4e, 0xd3, 0x14, 0x62, 0x40, 0xab, 0x4e, 0xd1, + 0x4d, 0xa7, 0xe8, 0xc9, 0xa6, 0x53, 0xa1, 0xaf, 0x63, 0x5d, 0x2e, 0xbd, 0x7e, 0xb5, 0x9d, 0x2d, + 0x03, 0xff, 0xec, 0x97, 0x67, 0x45, 0xb7, 0xfe, 0xdd, 0xea, 0xc1, 0xf0, 0xc5, 0xf9, 0xca, 0xb5, + 0x2e, 0x56, 0xae, 0xf5, 0x7b, 0xe5, 0x5a, 0x67, 0x6b, 0xb7, 0x71, 0xb1, 0x76, 0x1b, 0x3f, 0xd6, + 0x6e, 0xe3, 0x8d, 0x9f, 0x09, 0x9c, 0x2e, 0x26, 0x34, 0x51, 0x32, 0xa8, 0xde, 0x21, 0x30, 0xed, + 0xff, 0x68, 0xfa, 0x8f, 0xa7, 0x05, 0x87, 0x49, 0xcb, 0xa4, 0x78, 0xfa, 0x37, 0x00, 0x00, 0xff, + 0xff, 0x6e, 0xd3, 0xdf, 0x79, 0x60, 0x03, 0x00, 0x00, +} + +func (m *GenesisState) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GenesisState) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GenesisState) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.TokenizeShareLocks) > 0 { + for iNdEx := len(m.TokenizeShareLocks) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.TokenizeShareLocks[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenesis(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x62 + } + } + { + size := m.TotalLiquidStakedTokens.Size() + i -= size + if _, err := m.TotalLiquidStakedTokens.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintGenesis(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x5a + if m.LastTokenizeShareRecordId != 0 { + i = encodeVarintGenesis(dAtA, i, uint64(m.LastTokenizeShareRecordId)) + i-- + dAtA[i] = 0x50 + } + if len(m.TokenizeShareRecords) > 0 { + for iNdEx := len(m.TokenizeShareRecords) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.TokenizeShareRecords[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenesis(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a + } + } + { + size, err := m.Params.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenesis(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *TokenizeShareLock) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TokenizeShareLock) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TokenizeShareLock) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + n2, err2 := github_com_cosmos_gogoproto_types.StdTimeMarshalTo(m.CompletionTime, dAtA[i-github_com_cosmos_gogoproto_types.SizeOfStdTime(m.CompletionTime):]) + if err2 != nil { + return 0, err2 + } + i -= n2 + i = encodeVarintGenesis(dAtA, i, uint64(n2)) + i-- + dAtA[i] = 0x1a + if len(m.Status) > 0 { + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenesis(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + } + if len(m.Address) > 0 { + i -= len(m.Address) + copy(dAtA[i:], m.Address) + i = encodeVarintGenesis(dAtA, i, uint64(len(m.Address))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintGenesis(dAtA []byte, offset int, v uint64) int { + offset -= sovGenesis(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *GenesisState) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Params.Size() + n += 1 + l + sovGenesis(uint64(l)) + if len(m.TokenizeShareRecords) > 0 { + for _, e := range m.TokenizeShareRecords { + l = e.Size() + n += 1 + l + sovGenesis(uint64(l)) + } + } + if m.LastTokenizeShareRecordId != 0 { + n += 1 + sovGenesis(uint64(m.LastTokenizeShareRecordId)) + } + l = m.TotalLiquidStakedTokens.Size() + n += 1 + l + sovGenesis(uint64(l)) + if len(m.TokenizeShareLocks) > 0 { + for _, e := range m.TokenizeShareLocks { + l = e.Size() + n += 1 + l + sovGenesis(uint64(l)) + } + } + return n +} + +func (m *TokenizeShareLock) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Address) + if l > 0 { + n += 1 + l + sovGenesis(uint64(l)) + } + l = len(m.Status) + if l > 0 { + n += 1 + l + sovGenesis(uint64(l)) + } + l = github_com_cosmos_gogoproto_types.SizeOfStdTime(m.CompletionTime) + n += 1 + l + sovGenesis(uint64(l)) + return n +} + +func sovGenesis(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenesis(x uint64) (n int) { + return sovGenesis(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *GenesisState) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GenesisState: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GenesisState: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Params", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenesis + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenesis + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Params.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TokenizeShareRecords", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenesis + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenesis + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TokenizeShareRecords = append(m.TokenizeShareRecords, TokenizeShareRecord{}) + if err := m.TokenizeShareRecords[len(m.TokenizeShareRecords)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTokenizeShareRecordId", wireType) + } + m.LastTokenizeShareRecordId = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.LastTokenizeShareRecordId |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TotalLiquidStakedTokens", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthGenesis + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthGenesis + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.TotalLiquidStakedTokens.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TokenizeShareLocks", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenesis + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenesis + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TokenizeShareLocks = append(m.TokenizeShareLocks, TokenizeShareLock{}) + if err := m.TokenizeShareLocks[len(m.TokenizeShareLocks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenesis(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenesis + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TokenizeShareLock) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TokenizeShareLock: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TokenizeShareLock: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Address", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenesis + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenesis + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Address = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenesis + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenesis + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CompletionTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenesis + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenesis + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := github_com_cosmos_gogoproto_types.StdTimeUnmarshal(&m.CompletionTime, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenesis(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenesis + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenesis(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenesis + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenesis + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenesis + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenesis + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenesis + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenesis + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenesis = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenesis = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenesis = fmt.Errorf("proto: unexpected end of group") +) diff --git a/x/lsm/types/keys.go b/x/lsm/types/keys.go new file mode 100644 index 00000000000..69587f388c1 --- /dev/null +++ b/x/lsm/types/keys.go @@ -0,0 +1,73 @@ +package types + +import ( + "time" + + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/types/address" +) + +const ( + // ModuleName is the name of the lsm module + ModuleName = "lsm" + + // StoreKey is the string store representation + StoreKey = ModuleName + + // RouterKey is the msg router key for the lsm module + RouterKey = ModuleName + + // Prefix for module accounts that custodian tokenized shares + TokenizeShareModuleAccountPrefix = "tokenizeshare_" +) + +var ( + // Keys for store prefixes + // Last* values are constant during a block. + ParamsKey = []byte{0x51} // prefix for parameters for module x/lsm + + TokenizeShareRecordPrefix = []byte{0x1} // key for tokenizeshare record prefix + TokenizeShareRecordIDByOwnerPrefix = []byte{0x2} // key for tokenizeshare record id by owner prefix + TokenizeShareRecordIDByDenomPrefix = []byte{0x3} // key for tokenizeshare record id by denom prefix + LastTokenizeShareRecordIDKey = []byte{0x4} // key for last tokenize share record id + TotalLiquidStakedTokensKey = []byte{0x5} // key for total liquid staked tokens + TokenizeSharesLockPrefix = []byte{0x6} // key for locking tokenize shares + TokenizeSharesUnlockQueuePrefix = []byte{0x7} // key for the queue that unlocks tokenize shares + LiquidValidatorPrefix = []byte{0x8} // key for liquid validator prefix +) + +// GetLiquidValidatorKey returns the key of the liquid validator. +func GetLiquidValidatorKey(operatorAddress sdk.ValAddress) []byte { + return append(LiquidValidatorPrefix, address.MustLengthPrefix(operatorAddress)...) +} + +// GetTokenizeShareRecordByIndexKey returns the key of the specified id. Intended for querying the tokenizeShareRecord by the id. +func GetTokenizeShareRecordByIndexKey(id uint64) []byte { + return append(TokenizeShareRecordPrefix, sdk.Uint64ToBigEndian(id)...) +} + +// GetTokenizeShareRecordIDsByOwnerPrefix returns the key of the specified owner. Intended for querying all tokenizeShareRecords of an owner +func GetTokenizeShareRecordIDsByOwnerPrefix(owner sdk.AccAddress) []byte { + return append(TokenizeShareRecordIDByOwnerPrefix, address.MustLengthPrefix(owner)...) +} + +// GetTokenizeShareRecordIdByOwnerAndIdKey returns the key of the specified owner and id. Intended for setting tokenizeShareRecord of an owner +func GetTokenizeShareRecordIDByOwnerAndIDKey(owner sdk.AccAddress, id uint64) []byte { + return append(append(TokenizeShareRecordIDByOwnerPrefix, address.MustLengthPrefix(owner)...), sdk.Uint64ToBigEndian(id)...) +} + +func GetTokenizeShareRecordIDByDenomKey(denom string) []byte { + return append(TokenizeShareRecordIDByDenomPrefix, []byte(denom)...) +} + +// GetTokenizeSharesLockKey returns the key for storing a tokenize share lock for a specified account +func GetTokenizeSharesLockKey(owner sdk.AccAddress) []byte { + return append(TokenizeSharesLockPrefix, address.MustLengthPrefix(owner)...) +} + +// GetTokenizeShareAuthorizationTimeKey returns the prefix key used for getting a set of pending +// tokenize share unlocks that complete at the given time +func GetTokenizeShareAuthorizationTimeKey(timestamp time.Time) []byte { + bz := sdk.FormatTimeBytes(timestamp) + return append(TokenizeSharesUnlockQueuePrefix, bz...) +} diff --git a/x/lsm/types/liquid_validator.go b/x/lsm/types/liquid_validator.go new file mode 100644 index 00000000000..b3329714a8c --- /dev/null +++ b/x/lsm/types/liquid_validator.go @@ -0,0 +1,13 @@ +package types + +import "github.com/cosmos/cosmos-sdk/codec" + +func MustMarshalValidator(cdc codec.BinaryCodec, validator *LiquidValidator) []byte { + return cdc.MustMarshal(validator) +} + +// unmarshal from a store value +func UnmarshalValidator(cdc codec.BinaryCodec, value []byte) (v LiquidValidator, err error) { + err = cdc.Unmarshal(value, &v) + return v, err +} diff --git a/x/lsm/types/lsm.pb.go b/x/lsm/types/lsm.pb.go new file mode 100644 index 00000000000..249e442693b --- /dev/null +++ b/x/lsm/types/lsm.pb.go @@ -0,0 +1,1510 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: gaia/lsm/v1beta1/lsm.proto + +package types + +import ( + cosmossdk_io_math "cosmossdk.io/math" + fmt "fmt" + _ "github.com/cosmos/cosmos-proto" + github_com_cosmos_cosmos_sdk_types "github.com/cosmos/cosmos-sdk/types" + types "github.com/cosmos/cosmos-sdk/types" + _ "github.com/cosmos/cosmos-sdk/types/tx/amino" + _ "github.com/cosmos/gogoproto/gogoproto" + proto "github.com/cosmos/gogoproto/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// TokenizeShareLockStatus indicates whether the address is able to tokenize +// shares +type TokenizeShareLockStatus int32 + +const ( + // UNSPECIFIED defines an empty tokenize share lock status + TOKENIZE_SHARE_LOCK_STATUS_UNSPECIFIED TokenizeShareLockStatus = 0 + // LOCKED indicates the account is locked and cannot tokenize shares + TOKENIZE_SHARE_LOCK_STATUS_LOCKED TokenizeShareLockStatus = 1 + // UNLOCKED indicates the account is unlocked and can tokenize shares + TOKENIZE_SHARE_LOCK_STATUS_UNLOCKED TokenizeShareLockStatus = 2 + // LOCK_EXPIRING indicates the account is unable to tokenize shares, but + // will be able to tokenize shortly (after 1 unbonding period) + TOKENIZE_SHARE_LOCK_STATUS_LOCK_EXPIRING TokenizeShareLockStatus = 3 +) + +var TokenizeShareLockStatus_name = map[int32]string{ + 0: "TOKENIZE_SHARE_LOCK_STATUS_UNSPECIFIED", + 1: "TOKENIZE_SHARE_LOCK_STATUS_LOCKED", + 2: "TOKENIZE_SHARE_LOCK_STATUS_UNLOCKED", + 3: "TOKENIZE_SHARE_LOCK_STATUS_LOCK_EXPIRING", +} + +var TokenizeShareLockStatus_value = map[string]int32{ + "TOKENIZE_SHARE_LOCK_STATUS_UNSPECIFIED": 0, + "TOKENIZE_SHARE_LOCK_STATUS_LOCKED": 1, + "TOKENIZE_SHARE_LOCK_STATUS_UNLOCKED": 2, + "TOKENIZE_SHARE_LOCK_STATUS_LOCK_EXPIRING": 3, +} + +func (x TokenizeShareLockStatus) String() string { + return proto.EnumName(TokenizeShareLockStatus_name, int32(x)) +} + +func (TokenizeShareLockStatus) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_d1bf8cc0b3b18958, []int{0} +} + +// Params defines the parameters for the x/lsm module. +type Params struct { + // validator_bond_factor is required as a safety check for tokenizing shares + // and delegations from liquid staking providers + ValidatorBondFactor cosmossdk_io_math.LegacyDec `protobuf:"bytes,7,opt,name=validator_bond_factor,json=validatorBondFactor,proto3,customtype=cosmossdk.io/math.LegacyDec" json:"validator_bond_factor" yaml:"validator_bond_factor"` + // global_liquid_staking_cap represents a cap on the portion of stake that + // comes from liquid staking providers + GlobalLiquidStakingCap cosmossdk_io_math.LegacyDec `protobuf:"bytes,8,opt,name=global_liquid_staking_cap,json=globalLiquidStakingCap,proto3,customtype=cosmossdk.io/math.LegacyDec" json:"global_liquid_staking_cap" yaml:"global_liquid_staking_cap"` + // validator_liquid_staking_cap represents a cap on the portion of stake that + // comes from liquid staking providers for a specific validator + ValidatorLiquidStakingCap cosmossdk_io_math.LegacyDec `protobuf:"bytes,9,opt,name=validator_liquid_staking_cap,json=validatorLiquidStakingCap,proto3,customtype=cosmossdk.io/math.LegacyDec" json:"validator_liquid_staking_cap" yaml:"validator_liquid_staking_cap"` +} + +func (m *Params) Reset() { *m = Params{} } +func (m *Params) String() string { return proto.CompactTextString(m) } +func (*Params) ProtoMessage() {} +func (*Params) Descriptor() ([]byte, []int) { + return fileDescriptor_d1bf8cc0b3b18958, []int{0} +} +func (m *Params) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Params) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Params.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Params) XXX_Merge(src proto.Message) { + xxx_messageInfo_Params.Merge(m, src) +} +func (m *Params) XXX_Size() int { + return m.Size() +} +func (m *Params) XXX_DiscardUnknown() { + xxx_messageInfo_Params.DiscardUnknown(m) +} + +var xxx_messageInfo_Params proto.InternalMessageInfo + +// TokenizeShareRecord represents a tokenized delegation +type TokenizeShareRecord struct { + Id uint64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` + Owner string `protobuf:"bytes,2,opt,name=owner,proto3" json:"owner,omitempty"` + ModuleAccount string `protobuf:"bytes,3,opt,name=module_account,json=moduleAccount,proto3" json:"module_account,omitempty"` + Validator string `protobuf:"bytes,4,opt,name=validator,proto3" json:"validator,omitempty"` +} + +func (m *TokenizeShareRecord) Reset() { *m = TokenizeShareRecord{} } +func (m *TokenizeShareRecord) String() string { return proto.CompactTextString(m) } +func (*TokenizeShareRecord) ProtoMessage() {} +func (*TokenizeShareRecord) Descriptor() ([]byte, []int) { + return fileDescriptor_d1bf8cc0b3b18958, []int{1} +} +func (m *TokenizeShareRecord) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TokenizeShareRecord) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_TokenizeShareRecord.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *TokenizeShareRecord) XXX_Merge(src proto.Message) { + xxx_messageInfo_TokenizeShareRecord.Merge(m, src) +} +func (m *TokenizeShareRecord) XXX_Size() int { + return m.Size() +} +func (m *TokenizeShareRecord) XXX_DiscardUnknown() { + xxx_messageInfo_TokenizeShareRecord.DiscardUnknown(m) +} + +var xxx_messageInfo_TokenizeShareRecord proto.InternalMessageInfo + +func (m *TokenizeShareRecord) GetId() uint64 { + if m != nil { + return m.Id + } + return 0 +} + +func (m *TokenizeShareRecord) GetOwner() string { + if m != nil { + return m.Owner + } + return "" +} + +func (m *TokenizeShareRecord) GetModuleAccount() string { + if m != nil { + return m.ModuleAccount + } + return "" +} + +func (m *TokenizeShareRecord) GetValidator() string { + if m != nil { + return m.Validator + } + return "" +} + +// PendingTokenizeShareAuthorizations stores a list of addresses that have their +// tokenize share enablement in progress +type PendingTokenizeShareAuthorizations struct { + Addresses []string `protobuf:"bytes,1,rep,name=addresses,proto3" json:"addresses,omitempty"` +} + +func (m *PendingTokenizeShareAuthorizations) Reset() { *m = PendingTokenizeShareAuthorizations{} } +func (m *PendingTokenizeShareAuthorizations) String() string { return proto.CompactTextString(m) } +func (*PendingTokenizeShareAuthorizations) ProtoMessage() {} +func (*PendingTokenizeShareAuthorizations) Descriptor() ([]byte, []int) { + return fileDescriptor_d1bf8cc0b3b18958, []int{2} +} +func (m *PendingTokenizeShareAuthorizations) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PendingTokenizeShareAuthorizations) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_PendingTokenizeShareAuthorizations.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *PendingTokenizeShareAuthorizations) XXX_Merge(src proto.Message) { + xxx_messageInfo_PendingTokenizeShareAuthorizations.Merge(m, src) +} +func (m *PendingTokenizeShareAuthorizations) XXX_Size() int { + return m.Size() +} +func (m *PendingTokenizeShareAuthorizations) XXX_DiscardUnknown() { + xxx_messageInfo_PendingTokenizeShareAuthorizations.DiscardUnknown(m) +} + +var xxx_messageInfo_PendingTokenizeShareAuthorizations proto.InternalMessageInfo + +func (m *PendingTokenizeShareAuthorizations) GetAddresses() []string { + if m != nil { + return m.Addresses + } + return nil +} + +// TokenizeShareRecordReward represents the properties of tokenize share +type TokenizeShareRecordReward struct { + RecordId uint64 `protobuf:"varint,1,opt,name=record_id,json=recordId,proto3" json:"record_id,omitempty"` + Reward github_com_cosmos_cosmos_sdk_types.DecCoins `protobuf:"bytes,2,rep,name=reward,proto3,castrepeated=github.com/cosmos/cosmos-sdk/types.DecCoins" json:"reward"` +} + +func (m *TokenizeShareRecordReward) Reset() { *m = TokenizeShareRecordReward{} } +func (m *TokenizeShareRecordReward) String() string { return proto.CompactTextString(m) } +func (*TokenizeShareRecordReward) ProtoMessage() {} +func (*TokenizeShareRecordReward) Descriptor() ([]byte, []int) { + return fileDescriptor_d1bf8cc0b3b18958, []int{3} +} +func (m *TokenizeShareRecordReward) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TokenizeShareRecordReward) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_TokenizeShareRecordReward.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *TokenizeShareRecordReward) XXX_Merge(src proto.Message) { + xxx_messageInfo_TokenizeShareRecordReward.Merge(m, src) +} +func (m *TokenizeShareRecordReward) XXX_Size() int { + return m.Size() +} +func (m *TokenizeShareRecordReward) XXX_DiscardUnknown() { + xxx_messageInfo_TokenizeShareRecordReward.DiscardUnknown(m) +} + +var xxx_messageInfo_TokenizeShareRecordReward proto.InternalMessageInfo + +// LiquidValidator is the storage layout for details about a validator's liquid +// stake. +type LiquidValidator struct { + // operator_address defines the address of the validator's operator; bech + // encoded in JSON. + OperatorAddress string `protobuf:"bytes,1,opt,name=operator_address,json=operatorAddress,proto3" json:"operator_address,omitempty"` + // Number of shares self bonded from the validator + ValidatorBondShares cosmossdk_io_math.LegacyDec `protobuf:"bytes,2,opt,name=validator_bond_shares,json=validatorBondShares,proto3,customtype=cosmossdk.io/math.LegacyDec" json:"validator_bond_shares" yaml:"validator_bond_shares"` + // Number of shares either tokenized or owned by a liquid staking provider + LiquidShares cosmossdk_io_math.LegacyDec `protobuf:"bytes,3,opt,name=liquid_shares,json=liquidShares,proto3,customtype=cosmossdk.io/math.LegacyDec" json:"liquid_shares" yaml:"liquid_shares"` +} + +func (m *LiquidValidator) Reset() { *m = LiquidValidator{} } +func (m *LiquidValidator) String() string { return proto.CompactTextString(m) } +func (*LiquidValidator) ProtoMessage() {} +func (*LiquidValidator) Descriptor() ([]byte, []int) { + return fileDescriptor_d1bf8cc0b3b18958, []int{4} +} +func (m *LiquidValidator) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LiquidValidator) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_LiquidValidator.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *LiquidValidator) XXX_Merge(src proto.Message) { + xxx_messageInfo_LiquidValidator.Merge(m, src) +} +func (m *LiquidValidator) XXX_Size() int { + return m.Size() +} +func (m *LiquidValidator) XXX_DiscardUnknown() { + xxx_messageInfo_LiquidValidator.DiscardUnknown(m) +} + +var xxx_messageInfo_LiquidValidator proto.InternalMessageInfo + +func init() { + proto.RegisterEnum("gaia.lsm.v1beta1.TokenizeShareLockStatus", TokenizeShareLockStatus_name, TokenizeShareLockStatus_value) + proto.RegisterType((*Params)(nil), "gaia.lsm.v1beta1.Params") + proto.RegisterType((*TokenizeShareRecord)(nil), "gaia.lsm.v1beta1.TokenizeShareRecord") + proto.RegisterType((*PendingTokenizeShareAuthorizations)(nil), "gaia.lsm.v1beta1.PendingTokenizeShareAuthorizations") + proto.RegisterType((*TokenizeShareRecordReward)(nil), "gaia.lsm.v1beta1.TokenizeShareRecordReward") + proto.RegisterType((*LiquidValidator)(nil), "gaia.lsm.v1beta1.LiquidValidator") +} + +func init() { proto.RegisterFile("gaia/lsm/v1beta1/lsm.proto", fileDescriptor_d1bf8cc0b3b18958) } + +var fileDescriptor_d1bf8cc0b3b18958 = []byte{ + // 809 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x54, 0x4f, 0x6f, 0x1b, 0x45, + 0x14, 0xf7, 0xd8, 0x26, 0xc4, 0x03, 0x6d, 0xdd, 0xa9, 0x81, 0xb5, 0x6b, 0xad, 0xcd, 0x56, 0x05, + 0xcb, 0x10, 0x5b, 0xa5, 0x37, 0xc3, 0xc5, 0x6b, 0xbb, 0x60, 0xd5, 0x4a, 0xad, 0x5d, 0xb7, 0x42, + 0x91, 0xd0, 0x32, 0xde, 0x1d, 0xd6, 0x23, 0x7b, 0x77, 0xcc, 0xce, 0xba, 0x21, 0xfd, 0x00, 0xa8, + 0xea, 0x89, 0x0b, 0x12, 0x27, 0x54, 0x89, 0x0b, 0x42, 0x42, 0xea, 0x21, 0x37, 0xbe, 0x40, 0xb8, + 0x45, 0x39, 0x21, 0x0e, 0x01, 0x25, 0x87, 0x20, 0x8e, 0x7c, 0x02, 0xb4, 0x33, 0x6b, 0x87, 0x24, + 0x26, 0x58, 0xe2, 0x62, 0xcf, 0xbc, 0xf7, 0x9b, 0xdf, 0xfb, 0xbd, 0x7f, 0x0b, 0x0b, 0x2e, 0xa6, + 0xb8, 0x3e, 0xe1, 0x5e, 0xfd, 0xf1, 0x9d, 0x21, 0x09, 0xf1, 0x9d, 0xe8, 0x5c, 0x9b, 0x06, 0x2c, + 0x64, 0x28, 0x1b, 0xf9, 0x6a, 0xd1, 0x3d, 0xf6, 0x15, 0x72, 0x2e, 0x73, 0x99, 0x70, 0xd6, 0xa3, + 0x93, 0xc4, 0x15, 0xae, 0x63, 0x8f, 0xfa, 0xac, 0x2e, 0x7e, 0x63, 0x93, 0x6a, 0x33, 0xee, 0x31, + 0x5e, 0x1f, 0x62, 0x4e, 0x16, 0xcc, 0x36, 0xa3, 0x7e, 0xec, 0xcf, 0x4b, 0xbf, 0x25, 0xb9, 0xe4, + 0x45, 0xba, 0xb4, 0x3f, 0x53, 0x70, 0xad, 0x8f, 0x03, 0xec, 0x71, 0xf4, 0x0c, 0xc0, 0xd7, 0x1e, + 0xe3, 0x09, 0x75, 0x70, 0xc8, 0x02, 0x6b, 0xc8, 0x7c, 0xc7, 0xfa, 0x0c, 0xdb, 0x21, 0x0b, 0x94, + 0x97, 0xcb, 0xa0, 0x92, 0xd1, 0x1f, 0xed, 0x1d, 0x96, 0x12, 0xbf, 0x1e, 0x96, 0x6e, 0x4a, 0x02, + 0xee, 0x8c, 0x6b, 0x94, 0xd5, 0x3d, 0x1c, 0x8e, 0x6a, 0x3d, 0xe2, 0x62, 0x7b, 0xa7, 0x4d, 0xec, + 0xbf, 0x0e, 0x4b, 0xc5, 0x1d, 0xec, 0x4d, 0x1a, 0xda, 0x52, 0x26, 0xed, 0x60, 0x77, 0x03, 0xc6, + 0xf1, 0xdb, 0xc4, 0xfe, 0xfe, 0xe4, 0x45, 0x15, 0x18, 0x37, 0x16, 0x50, 0x9d, 0xf9, 0xce, 0x3d, + 0x01, 0x44, 0x5f, 0x03, 0x98, 0x77, 0x27, 0x6c, 0x88, 0x27, 0xd6, 0x84, 0x7e, 0x3e, 0xa3, 0x8e, + 0xc5, 0x43, 0x3c, 0xa6, 0xbe, 0x6b, 0xd9, 0x78, 0xaa, 0xac, 0x0b, 0x41, 0x5b, 0xab, 0x09, 0x2a, + 0x4b, 0x41, 0xff, 0xca, 0xb6, 0x54, 0xd4, 0xeb, 0x12, 0xde, 0x13, 0x68, 0x53, 0x82, 0x5b, 0x78, + 0x8a, 0xbe, 0x05, 0xb0, 0x78, 0x9a, 0xda, 0x12, 0x69, 0x19, 0x21, 0xed, 0x93, 0xd5, 0xa4, 0xdd, + 0x3a, 0x5f, 0xab, 0x15, 0xd5, 0xe5, 0x17, 0x2f, 0xce, 0x0b, 0x6c, 0x14, 0xfe, 0x78, 0x5e, 0x02, + 0xcf, 0x4e, 0x5e, 0x54, 0xaf, 0x8b, 0x59, 0xfb, 0x42, 0x4c, 0x9b, 0xec, 0xb0, 0xf6, 0x25, 0x80, + 0x37, 0x06, 0x6c, 0x4c, 0x7c, 0xfa, 0x84, 0x98, 0x23, 0x1c, 0x10, 0x83, 0xd8, 0x2c, 0x70, 0xd0, + 0x55, 0x98, 0xa4, 0x8e, 0x02, 0xca, 0xa0, 0x92, 0x36, 0x92, 0xd4, 0x41, 0x39, 0xf8, 0x12, 0xdb, + 0xf6, 0x49, 0xa0, 0x24, 0xa3, 0x64, 0x0c, 0x79, 0x41, 0xb7, 0xe1, 0x55, 0x8f, 0x39, 0xb3, 0x09, + 0xb1, 0xb0, 0x6d, 0xb3, 0x99, 0x1f, 0x2a, 0x29, 0xe1, 0xbe, 0x22, 0xad, 0x4d, 0x69, 0x44, 0x45, + 0x98, 0x59, 0xa8, 0x53, 0xd2, 0x02, 0x71, 0x6a, 0x68, 0xa4, 0x23, 0x79, 0x9a, 0x0e, 0xb5, 0x3e, + 0xf1, 0x1d, 0xea, 0xbb, 0x67, 0xe4, 0x34, 0x67, 0xe1, 0x88, 0x05, 0xf4, 0x09, 0x0e, 0x29, 0xf3, + 0x79, 0xc4, 0x84, 0x1d, 0x27, 0x20, 0x9c, 0x13, 0xae, 0x80, 0x72, 0x2a, 0x62, 0x5a, 0x18, 0xb4, + 0x1f, 0x01, 0xcc, 0x2f, 0x49, 0xc6, 0x20, 0xdb, 0x38, 0x70, 0xd0, 0x4d, 0x98, 0x09, 0xc4, 0xdd, + 0x5a, 0x64, 0xb6, 0x2e, 0x0d, 0x5d, 0x07, 0x51, 0xb8, 0x16, 0x08, 0x98, 0x92, 0x2c, 0xa7, 0x2a, + 0xaf, 0xbc, 0x57, 0xac, 0xc5, 0x05, 0x8e, 0x16, 0x68, 0xbe, 0x7e, 0x51, 0xb5, 0x5b, 0x8c, 0xfa, + 0xfa, 0xdd, 0xa8, 0x97, 0x3f, 0xfc, 0x56, 0x7a, 0xc7, 0xa5, 0xe1, 0x68, 0x36, 0xac, 0xd9, 0xcc, + 0x8b, 0x77, 0x28, 0xfe, 0xdb, 0xe0, 0xce, 0xb8, 0x1e, 0xee, 0x4c, 0x09, 0x9f, 0xbf, 0xe1, 0x46, + 0x1c, 0xa0, 0xb1, 0xfe, 0xf4, 0x79, 0x29, 0xf1, 0x4d, 0x94, 0xf3, 0x4f, 0x49, 0x78, 0x4d, 0x76, + 0xeb, 0xd1, 0xbc, 0x1a, 0xa8, 0x05, 0xb3, 0x6c, 0x4a, 0x02, 0xd1, 0xfa, 0x38, 0x33, 0x21, 0x36, + 0xa3, 0x2b, 0x07, 0xbb, 0x1b, 0xb9, 0x58, 0x55, 0x53, 0x7a, 0xcc, 0x30, 0xa0, 0xbe, 0x6b, 0x5c, + 0x9b, 0xbf, 0x88, 0xcd, 0x68, 0xfb, 0xc2, 0xda, 0xf2, 0xa8, 0x1c, 0x5c, 0x76, 0x4f, 0x6f, 0xfd, + 0x9f, 0xb5, 0x95, 0x4c, 0xda, 0xb9, 0x1d, 0x15, 0xe5, 0xe6, 0xe8, 0x53, 0x78, 0x65, 0x3e, 0xaf, + 0x32, 0xa0, 0x98, 0x07, 0xfd, 0xfd, 0xd5, 0x02, 0xe6, 0x64, 0xc0, 0x33, 0x0c, 0x9a, 0xf1, 0xaa, + 0xbc, 0xcb, 0x08, 0xa7, 0xd5, 0xab, 0xfe, 0x0c, 0xe0, 0x1b, 0x67, 0xba, 0xdd, 0x63, 0xf6, 0xd8, + 0x0c, 0x71, 0x38, 0xe3, 0xa8, 0x0a, 0xdf, 0x1a, 0x3c, 0xb8, 0xdf, 0xd9, 0xec, 0x6e, 0x75, 0x2c, + 0xf3, 0xa3, 0xa6, 0xd1, 0xb1, 0x7a, 0x0f, 0x5a, 0xf7, 0x2d, 0x73, 0xd0, 0x1c, 0x3c, 0x34, 0xad, + 0x87, 0x9b, 0x66, 0xbf, 0xd3, 0xea, 0xde, 0xeb, 0x76, 0xda, 0xd9, 0x04, 0xba, 0x0d, 0xdf, 0xbc, + 0x04, 0x1b, 0x9d, 0x3b, 0xed, 0x2c, 0x40, 0x6f, 0xc3, 0x5b, 0x97, 0x52, 0xc6, 0xc0, 0x24, 0x7a, + 0x17, 0x56, 0xfe, 0x83, 0xcf, 0xea, 0x7c, 0xdc, 0xef, 0x1a, 0xdd, 0xcd, 0x0f, 0xb3, 0xa9, 0x42, + 0xfa, 0xe9, 0x77, 0x6a, 0x42, 0xff, 0x60, 0xef, 0x48, 0x05, 0xfb, 0x47, 0x2a, 0xf8, 0xfd, 0x48, + 0x05, 0x5f, 0x1d, 0xab, 0x89, 0xfd, 0x63, 0x35, 0xf1, 0xcb, 0xb1, 0x9a, 0xd8, 0xd2, 0x2e, 0x8e, + 0xd8, 0x3f, 0xb6, 0x58, 0x8c, 0xd8, 0x70, 0x4d, 0x7c, 0xb8, 0xef, 0xfe, 0x1d, 0x00, 0x00, 0xff, + 0xff, 0x24, 0xb8, 0x46, 0x83, 0x4c, 0x06, 0x00, 0x00, +} + +func (this *Params) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Params) + if !ok { + that2, ok := that.(Params) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.ValidatorBondFactor.Equal(that1.ValidatorBondFactor) { + return false + } + if !this.GlobalLiquidStakingCap.Equal(that1.GlobalLiquidStakingCap) { + return false + } + if !this.ValidatorLiquidStakingCap.Equal(that1.ValidatorLiquidStakingCap) { + return false + } + return true +} +func (this *TokenizeShareRecord) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*TokenizeShareRecord) + if !ok { + that2, ok := that.(TokenizeShareRecord) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Id != that1.Id { + return false + } + if this.Owner != that1.Owner { + return false + } + if this.ModuleAccount != that1.ModuleAccount { + return false + } + if this.Validator != that1.Validator { + return false + } + return true +} +func (m *Params) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Params) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Params) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size := m.ValidatorLiquidStakingCap.Size() + i -= size + if _, err := m.ValidatorLiquidStakingCap.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintLsm(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a + { + size := m.GlobalLiquidStakingCap.Size() + i -= size + if _, err := m.GlobalLiquidStakingCap.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintLsm(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + { + size := m.ValidatorBondFactor.Size() + i -= size + if _, err := m.ValidatorBondFactor.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintLsm(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + return len(dAtA) - i, nil +} + +func (m *TokenizeShareRecord) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TokenizeShareRecord) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TokenizeShareRecord) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Validator) > 0 { + i -= len(m.Validator) + copy(dAtA[i:], m.Validator) + i = encodeVarintLsm(dAtA, i, uint64(len(m.Validator))) + i-- + dAtA[i] = 0x22 + } + if len(m.ModuleAccount) > 0 { + i -= len(m.ModuleAccount) + copy(dAtA[i:], m.ModuleAccount) + i = encodeVarintLsm(dAtA, i, uint64(len(m.ModuleAccount))) + i-- + dAtA[i] = 0x1a + } + if len(m.Owner) > 0 { + i -= len(m.Owner) + copy(dAtA[i:], m.Owner) + i = encodeVarintLsm(dAtA, i, uint64(len(m.Owner))) + i-- + dAtA[i] = 0x12 + } + if m.Id != 0 { + i = encodeVarintLsm(dAtA, i, uint64(m.Id)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *PendingTokenizeShareAuthorizations) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PendingTokenizeShareAuthorizations) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PendingTokenizeShareAuthorizations) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Addresses) > 0 { + for iNdEx := len(m.Addresses) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Addresses[iNdEx]) + copy(dAtA[i:], m.Addresses[iNdEx]) + i = encodeVarintLsm(dAtA, i, uint64(len(m.Addresses[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *TokenizeShareRecordReward) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TokenizeShareRecordReward) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TokenizeShareRecordReward) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Reward) > 0 { + for iNdEx := len(m.Reward) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Reward[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintLsm(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if m.RecordId != 0 { + i = encodeVarintLsm(dAtA, i, uint64(m.RecordId)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *LiquidValidator) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LiquidValidator) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LiquidValidator) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size := m.LiquidShares.Size() + i -= size + if _, err := m.LiquidShares.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintLsm(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size := m.ValidatorBondShares.Size() + i -= size + if _, err := m.ValidatorBondShares.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintLsm(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + if len(m.OperatorAddress) > 0 { + i -= len(m.OperatorAddress) + copy(dAtA[i:], m.OperatorAddress) + i = encodeVarintLsm(dAtA, i, uint64(len(m.OperatorAddress))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintLsm(dAtA []byte, offset int, v uint64) int { + offset -= sovLsm(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *Params) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ValidatorBondFactor.Size() + n += 1 + l + sovLsm(uint64(l)) + l = m.GlobalLiquidStakingCap.Size() + n += 1 + l + sovLsm(uint64(l)) + l = m.ValidatorLiquidStakingCap.Size() + n += 1 + l + sovLsm(uint64(l)) + return n +} + +func (m *TokenizeShareRecord) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Id != 0 { + n += 1 + sovLsm(uint64(m.Id)) + } + l = len(m.Owner) + if l > 0 { + n += 1 + l + sovLsm(uint64(l)) + } + l = len(m.ModuleAccount) + if l > 0 { + n += 1 + l + sovLsm(uint64(l)) + } + l = len(m.Validator) + if l > 0 { + n += 1 + l + sovLsm(uint64(l)) + } + return n +} + +func (m *PendingTokenizeShareAuthorizations) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Addresses) > 0 { + for _, s := range m.Addresses { + l = len(s) + n += 1 + l + sovLsm(uint64(l)) + } + } + return n +} + +func (m *TokenizeShareRecordReward) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.RecordId != 0 { + n += 1 + sovLsm(uint64(m.RecordId)) + } + if len(m.Reward) > 0 { + for _, e := range m.Reward { + l = e.Size() + n += 1 + l + sovLsm(uint64(l)) + } + } + return n +} + +func (m *LiquidValidator) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.OperatorAddress) + if l > 0 { + n += 1 + l + sovLsm(uint64(l)) + } + l = m.ValidatorBondShares.Size() + n += 1 + l + sovLsm(uint64(l)) + l = m.LiquidShares.Size() + n += 1 + l + sovLsm(uint64(l)) + return n +} + +func sovLsm(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozLsm(x uint64) (n int) { + return sovLsm(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *Params) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLsm + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Params: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Params: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ValidatorBondFactor", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLsm + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthLsm + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthLsm + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ValidatorBondFactor.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GlobalLiquidStakingCap", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLsm + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthLsm + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthLsm + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.GlobalLiquidStakingCap.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ValidatorLiquidStakingCap", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLsm + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthLsm + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthLsm + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ValidatorLiquidStakingCap.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipLsm(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthLsm + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TokenizeShareRecord) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLsm + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TokenizeShareRecord: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TokenizeShareRecord: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) + } + m.Id = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLsm + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Id |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Owner", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLsm + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthLsm + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthLsm + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Owner = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ModuleAccount", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLsm + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthLsm + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthLsm + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ModuleAccount = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Validator", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLsm + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthLsm + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthLsm + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Validator = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipLsm(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthLsm + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PendingTokenizeShareAuthorizations) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLsm + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PendingTokenizeShareAuthorizations: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PendingTokenizeShareAuthorizations: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Addresses", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLsm + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthLsm + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthLsm + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Addresses = append(m.Addresses, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipLsm(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthLsm + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TokenizeShareRecordReward) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLsm + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TokenizeShareRecordReward: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TokenizeShareRecordReward: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RecordId", wireType) + } + m.RecordId = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLsm + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.RecordId |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reward", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLsm + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthLsm + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthLsm + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reward = append(m.Reward, types.DecCoin{}) + if err := m.Reward[len(m.Reward)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipLsm(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthLsm + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LiquidValidator) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLsm + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LiquidValidator: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LiquidValidator: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field OperatorAddress", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLsm + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthLsm + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthLsm + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.OperatorAddress = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ValidatorBondShares", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLsm + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthLsm + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthLsm + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ValidatorBondShares.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LiquidShares", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLsm + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthLsm + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthLsm + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LiquidShares.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipLsm(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthLsm + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipLsm(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowLsm + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowLsm + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowLsm + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthLsm + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupLsm + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthLsm + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthLsm = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowLsm = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupLsm = fmt.Errorf("proto: unexpected end of group") +) diff --git a/x/lsm/types/mocks/AccountKeeper.go b/x/lsm/types/mocks/AccountKeeper.go new file mode 100644 index 00000000000..c2345760031 --- /dev/null +++ b/x/lsm/types/mocks/AccountKeeper.go @@ -0,0 +1,233 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + context "context" + + address "cosmossdk.io/core/address" + + cosmos_sdktypes "github.com/cosmos/cosmos-sdk/types" + + mock "github.com/stretchr/testify/mock" +) + +// AccountKeeper is an autogenerated mock type for the AccountKeeper type +type AccountKeeper struct { + mock.Mock +} + +type AccountKeeper_Expecter struct { + mock *mock.Mock +} + +func (_m *AccountKeeper) EXPECT() *AccountKeeper_Expecter { + return &AccountKeeper_Expecter{mock: &_m.Mock} +} + +// AddressCodec provides a mock function with given fields: +func (_m *AccountKeeper) AddressCodec() address.Codec { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for AddressCodec") + } + + var r0 address.Codec + if rf, ok := ret.Get(0).(func() address.Codec); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(address.Codec) + } + } + + return r0 +} + +// AccountKeeper_AddressCodec_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'AddressCodec' +type AccountKeeper_AddressCodec_Call struct { + *mock.Call +} + +// AddressCodec is a helper method to define mock.On call +func (_e *AccountKeeper_Expecter) AddressCodec() *AccountKeeper_AddressCodec_Call { + return &AccountKeeper_AddressCodec_Call{Call: _e.mock.On("AddressCodec")} +} + +func (_c *AccountKeeper_AddressCodec_Call) Run(run func()) *AccountKeeper_AddressCodec_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *AccountKeeper_AddressCodec_Call) Return(_a0 address.Codec) *AccountKeeper_AddressCodec_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *AccountKeeper_AddressCodec_Call) RunAndReturn(run func() address.Codec) *AccountKeeper_AddressCodec_Call { + _c.Call.Return(run) + return _c +} + +// GetAccount provides a mock function with given fields: ctx, addr +func (_m *AccountKeeper) GetAccount(ctx context.Context, addr cosmos_sdktypes.AccAddress) cosmos_sdktypes.AccountI { + ret := _m.Called(ctx, addr) + + if len(ret) == 0 { + panic("no return value specified for GetAccount") + } + + var r0 cosmos_sdktypes.AccountI + if rf, ok := ret.Get(0).(func(context.Context, cosmos_sdktypes.AccAddress) cosmos_sdktypes.AccountI); ok { + r0 = rf(ctx, addr) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(cosmos_sdktypes.AccountI) + } + } + + return r0 +} + +// AccountKeeper_GetAccount_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetAccount' +type AccountKeeper_GetAccount_Call struct { + *mock.Call +} + +// GetAccount is a helper method to define mock.On call +// - ctx context.Context +// - addr cosmos_sdktypes.AccAddress +func (_e *AccountKeeper_Expecter) GetAccount(ctx interface{}, addr interface{}) *AccountKeeper_GetAccount_Call { + return &AccountKeeper_GetAccount_Call{Call: _e.mock.On("GetAccount", ctx, addr)} +} + +func (_c *AccountKeeper_GetAccount_Call) Run(run func(ctx context.Context, addr cosmos_sdktypes.AccAddress)) *AccountKeeper_GetAccount_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(cosmos_sdktypes.AccAddress)) + }) + return _c +} + +func (_c *AccountKeeper_GetAccount_Call) Return(_a0 cosmos_sdktypes.AccountI) *AccountKeeper_GetAccount_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *AccountKeeper_GetAccount_Call) RunAndReturn(run func(context.Context, cosmos_sdktypes.AccAddress) cosmos_sdktypes.AccountI) *AccountKeeper_GetAccount_Call { + _c.Call.Return(run) + return _c +} + +// GetModuleAccount provides a mock function with given fields: ctx, moduleName +func (_m *AccountKeeper) GetModuleAccount(ctx context.Context, moduleName string) cosmos_sdktypes.ModuleAccountI { + ret := _m.Called(ctx, moduleName) + + if len(ret) == 0 { + panic("no return value specified for GetModuleAccount") + } + + var r0 cosmos_sdktypes.ModuleAccountI + if rf, ok := ret.Get(0).(func(context.Context, string) cosmos_sdktypes.ModuleAccountI); ok { + r0 = rf(ctx, moduleName) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(cosmos_sdktypes.ModuleAccountI) + } + } + + return r0 +} + +// AccountKeeper_GetModuleAccount_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetModuleAccount' +type AccountKeeper_GetModuleAccount_Call struct { + *mock.Call +} + +// GetModuleAccount is a helper method to define mock.On call +// - ctx context.Context +// - moduleName string +func (_e *AccountKeeper_Expecter) GetModuleAccount(ctx interface{}, moduleName interface{}) *AccountKeeper_GetModuleAccount_Call { + return &AccountKeeper_GetModuleAccount_Call{Call: _e.mock.On("GetModuleAccount", ctx, moduleName)} +} + +func (_c *AccountKeeper_GetModuleAccount_Call) Run(run func(ctx context.Context, moduleName string)) *AccountKeeper_GetModuleAccount_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(string)) + }) + return _c +} + +func (_c *AccountKeeper_GetModuleAccount_Call) Return(_a0 cosmos_sdktypes.ModuleAccountI) *AccountKeeper_GetModuleAccount_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *AccountKeeper_GetModuleAccount_Call) RunAndReturn(run func(context.Context, string) cosmos_sdktypes.ModuleAccountI) *AccountKeeper_GetModuleAccount_Call { + _c.Call.Return(run) + return _c +} + +// GetModuleAddress provides a mock function with given fields: name +func (_m *AccountKeeper) GetModuleAddress(name string) cosmos_sdktypes.AccAddress { + ret := _m.Called(name) + + if len(ret) == 0 { + panic("no return value specified for GetModuleAddress") + } + + var r0 cosmos_sdktypes.AccAddress + if rf, ok := ret.Get(0).(func(string) cosmos_sdktypes.AccAddress); ok { + r0 = rf(name) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(cosmos_sdktypes.AccAddress) + } + } + + return r0 +} + +// AccountKeeper_GetModuleAddress_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetModuleAddress' +type AccountKeeper_GetModuleAddress_Call struct { + *mock.Call +} + +// GetModuleAddress is a helper method to define mock.On call +// - name string +func (_e *AccountKeeper_Expecter) GetModuleAddress(name interface{}) *AccountKeeper_GetModuleAddress_Call { + return &AccountKeeper_GetModuleAddress_Call{Call: _e.mock.On("GetModuleAddress", name)} +} + +func (_c *AccountKeeper_GetModuleAddress_Call) Run(run func(name string)) *AccountKeeper_GetModuleAddress_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(string)) + }) + return _c +} + +func (_c *AccountKeeper_GetModuleAddress_Call) Return(_a0 cosmos_sdktypes.AccAddress) *AccountKeeper_GetModuleAddress_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *AccountKeeper_GetModuleAddress_Call) RunAndReturn(run func(string) cosmos_sdktypes.AccAddress) *AccountKeeper_GetModuleAddress_Call { + _c.Call.Return(run) + return _c +} + +// NewAccountKeeper creates a new instance of AccountKeeper. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewAccountKeeper(t interface { + mock.TestingT + Cleanup(func()) +}) *AccountKeeper { + mock := &AccountKeeper{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/x/lsm/types/mocks/BankKeeper.go b/x/lsm/types/mocks/BankKeeper.go new file mode 100644 index 00000000000..3774f17eb3f --- /dev/null +++ b/x/lsm/types/mocks/BankKeeper.go @@ -0,0 +1,571 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + context "context" + + mock "github.com/stretchr/testify/mock" + + types "github.com/cosmos/cosmos-sdk/types" +) + +// BankKeeper is an autogenerated mock type for the BankKeeper type +type BankKeeper struct { + mock.Mock +} + +type BankKeeper_Expecter struct { + mock *mock.Mock +} + +func (_m *BankKeeper) EXPECT() *BankKeeper_Expecter { + return &BankKeeper_Expecter{mock: &_m.Mock} +} + +// BlockedAddr provides a mock function with given fields: addr +func (_m *BankKeeper) BlockedAddr(addr types.AccAddress) bool { + ret := _m.Called(addr) + + if len(ret) == 0 { + panic("no return value specified for BlockedAddr") + } + + var r0 bool + if rf, ok := ret.Get(0).(func(types.AccAddress) bool); ok { + r0 = rf(addr) + } else { + r0 = ret.Get(0).(bool) + } + + return r0 +} + +// BankKeeper_BlockedAddr_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'BlockedAddr' +type BankKeeper_BlockedAddr_Call struct { + *mock.Call +} + +// BlockedAddr is a helper method to define mock.On call +// - addr types.AccAddress +func (_e *BankKeeper_Expecter) BlockedAddr(addr interface{}) *BankKeeper_BlockedAddr_Call { + return &BankKeeper_BlockedAddr_Call{Call: _e.mock.On("BlockedAddr", addr)} +} + +func (_c *BankKeeper_BlockedAddr_Call) Run(run func(addr types.AccAddress)) *BankKeeper_BlockedAddr_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(types.AccAddress)) + }) + return _c +} + +func (_c *BankKeeper_BlockedAddr_Call) Return(_a0 bool) *BankKeeper_BlockedAddr_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *BankKeeper_BlockedAddr_Call) RunAndReturn(run func(types.AccAddress) bool) *BankKeeper_BlockedAddr_Call { + _c.Call.Return(run) + return _c +} + +// BurnCoins provides a mock function with given fields: ctx, name, amt +func (_m *BankKeeper) BurnCoins(ctx context.Context, name string, amt types.Coins) error { + ret := _m.Called(ctx, name, amt) + + if len(ret) == 0 { + panic("no return value specified for BurnCoins") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, string, types.Coins) error); ok { + r0 = rf(ctx, name, amt) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// BankKeeper_BurnCoins_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'BurnCoins' +type BankKeeper_BurnCoins_Call struct { + *mock.Call +} + +// BurnCoins is a helper method to define mock.On call +// - ctx context.Context +// - name string +// - amt types.Coins +func (_e *BankKeeper_Expecter) BurnCoins(ctx interface{}, name interface{}, amt interface{}) *BankKeeper_BurnCoins_Call { + return &BankKeeper_BurnCoins_Call{Call: _e.mock.On("BurnCoins", ctx, name, amt)} +} + +func (_c *BankKeeper_BurnCoins_Call) Run(run func(ctx context.Context, name string, amt types.Coins)) *BankKeeper_BurnCoins_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(string), args[2].(types.Coins)) + }) + return _c +} + +func (_c *BankKeeper_BurnCoins_Call) Return(_a0 error) *BankKeeper_BurnCoins_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *BankKeeper_BurnCoins_Call) RunAndReturn(run func(context.Context, string, types.Coins) error) *BankKeeper_BurnCoins_Call { + _c.Call.Return(run) + return _c +} + +// GetAllBalances provides a mock function with given fields: ctx, addr +func (_m *BankKeeper) GetAllBalances(ctx context.Context, addr types.AccAddress) types.Coins { + ret := _m.Called(ctx, addr) + + if len(ret) == 0 { + panic("no return value specified for GetAllBalances") + } + + var r0 types.Coins + if rf, ok := ret.Get(0).(func(context.Context, types.AccAddress) types.Coins); ok { + r0 = rf(ctx, addr) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(types.Coins) + } + } + + return r0 +} + +// BankKeeper_GetAllBalances_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetAllBalances' +type BankKeeper_GetAllBalances_Call struct { + *mock.Call +} + +// GetAllBalances is a helper method to define mock.On call +// - ctx context.Context +// - addr types.AccAddress +func (_e *BankKeeper_Expecter) GetAllBalances(ctx interface{}, addr interface{}) *BankKeeper_GetAllBalances_Call { + return &BankKeeper_GetAllBalances_Call{Call: _e.mock.On("GetAllBalances", ctx, addr)} +} + +func (_c *BankKeeper_GetAllBalances_Call) Run(run func(ctx context.Context, addr types.AccAddress)) *BankKeeper_GetAllBalances_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(types.AccAddress)) + }) + return _c +} + +func (_c *BankKeeper_GetAllBalances_Call) Return(_a0 types.Coins) *BankKeeper_GetAllBalances_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *BankKeeper_GetAllBalances_Call) RunAndReturn(run func(context.Context, types.AccAddress) types.Coins) *BankKeeper_GetAllBalances_Call { + _c.Call.Return(run) + return _c +} + +// GetBalance provides a mock function with given fields: ctx, addr, denom +func (_m *BankKeeper) GetBalance(ctx context.Context, addr types.AccAddress, denom string) types.Coin { + ret := _m.Called(ctx, addr, denom) + + if len(ret) == 0 { + panic("no return value specified for GetBalance") + } + + var r0 types.Coin + if rf, ok := ret.Get(0).(func(context.Context, types.AccAddress, string) types.Coin); ok { + r0 = rf(ctx, addr, denom) + } else { + r0 = ret.Get(0).(types.Coin) + } + + return r0 +} + +// BankKeeper_GetBalance_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetBalance' +type BankKeeper_GetBalance_Call struct { + *mock.Call +} + +// GetBalance is a helper method to define mock.On call +// - ctx context.Context +// - addr types.AccAddress +// - denom string +func (_e *BankKeeper_Expecter) GetBalance(ctx interface{}, addr interface{}, denom interface{}) *BankKeeper_GetBalance_Call { + return &BankKeeper_GetBalance_Call{Call: _e.mock.On("GetBalance", ctx, addr, denom)} +} + +func (_c *BankKeeper_GetBalance_Call) Run(run func(ctx context.Context, addr types.AccAddress, denom string)) *BankKeeper_GetBalance_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(types.AccAddress), args[2].(string)) + }) + return _c +} + +func (_c *BankKeeper_GetBalance_Call) Return(_a0 types.Coin) *BankKeeper_GetBalance_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *BankKeeper_GetBalance_Call) RunAndReturn(run func(context.Context, types.AccAddress, string) types.Coin) *BankKeeper_GetBalance_Call { + _c.Call.Return(run) + return _c +} + +// MintCoins provides a mock function with given fields: cts, name, amt +func (_m *BankKeeper) MintCoins(cts context.Context, name string, amt types.Coins) error { + ret := _m.Called(cts, name, amt) + + if len(ret) == 0 { + panic("no return value specified for MintCoins") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, string, types.Coins) error); ok { + r0 = rf(cts, name, amt) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// BankKeeper_MintCoins_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'MintCoins' +type BankKeeper_MintCoins_Call struct { + *mock.Call +} + +// MintCoins is a helper method to define mock.On call +// - cts context.Context +// - name string +// - amt types.Coins +func (_e *BankKeeper_Expecter) MintCoins(cts interface{}, name interface{}, amt interface{}) *BankKeeper_MintCoins_Call { + return &BankKeeper_MintCoins_Call{Call: _e.mock.On("MintCoins", cts, name, amt)} +} + +func (_c *BankKeeper_MintCoins_Call) Run(run func(cts context.Context, name string, amt types.Coins)) *BankKeeper_MintCoins_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(string), args[2].(types.Coins)) + }) + return _c +} + +func (_c *BankKeeper_MintCoins_Call) Return(_a0 error) *BankKeeper_MintCoins_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *BankKeeper_MintCoins_Call) RunAndReturn(run func(context.Context, string, types.Coins) error) *BankKeeper_MintCoins_Call { + _c.Call.Return(run) + return _c +} + +// SendCoins provides a mock function with given fields: ctx, fromAddr, toAddr, amt +func (_m *BankKeeper) SendCoins(ctx context.Context, fromAddr types.AccAddress, toAddr types.AccAddress, amt types.Coins) error { + ret := _m.Called(ctx, fromAddr, toAddr, amt) + + if len(ret) == 0 { + panic("no return value specified for SendCoins") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, types.AccAddress, types.AccAddress, types.Coins) error); ok { + r0 = rf(ctx, fromAddr, toAddr, amt) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// BankKeeper_SendCoins_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SendCoins' +type BankKeeper_SendCoins_Call struct { + *mock.Call +} + +// SendCoins is a helper method to define mock.On call +// - ctx context.Context +// - fromAddr types.AccAddress +// - toAddr types.AccAddress +// - amt types.Coins +func (_e *BankKeeper_Expecter) SendCoins(ctx interface{}, fromAddr interface{}, toAddr interface{}, amt interface{}) *BankKeeper_SendCoins_Call { + return &BankKeeper_SendCoins_Call{Call: _e.mock.On("SendCoins", ctx, fromAddr, toAddr, amt)} +} + +func (_c *BankKeeper_SendCoins_Call) Run(run func(ctx context.Context, fromAddr types.AccAddress, toAddr types.AccAddress, amt types.Coins)) *BankKeeper_SendCoins_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(types.AccAddress), args[2].(types.AccAddress), args[3].(types.Coins)) + }) + return _c +} + +func (_c *BankKeeper_SendCoins_Call) Return(_a0 error) *BankKeeper_SendCoins_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *BankKeeper_SendCoins_Call) RunAndReturn(run func(context.Context, types.AccAddress, types.AccAddress, types.Coins) error) *BankKeeper_SendCoins_Call { + _c.Call.Return(run) + return _c +} + +// SendCoinsFromAccountToModule provides a mock function with given fields: ctx, senderAddr, recipientModule, amt +func (_m *BankKeeper) SendCoinsFromAccountToModule(ctx context.Context, senderAddr types.AccAddress, recipientModule string, amt types.Coins) error { + ret := _m.Called(ctx, senderAddr, recipientModule, amt) + + if len(ret) == 0 { + panic("no return value specified for SendCoinsFromAccountToModule") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, types.AccAddress, string, types.Coins) error); ok { + r0 = rf(ctx, senderAddr, recipientModule, amt) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// BankKeeper_SendCoinsFromAccountToModule_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SendCoinsFromAccountToModule' +type BankKeeper_SendCoinsFromAccountToModule_Call struct { + *mock.Call +} + +// SendCoinsFromAccountToModule is a helper method to define mock.On call +// - ctx context.Context +// - senderAddr types.AccAddress +// - recipientModule string +// - amt types.Coins +func (_e *BankKeeper_Expecter) SendCoinsFromAccountToModule(ctx interface{}, senderAddr interface{}, recipientModule interface{}, amt interface{}) *BankKeeper_SendCoinsFromAccountToModule_Call { + return &BankKeeper_SendCoinsFromAccountToModule_Call{Call: _e.mock.On("SendCoinsFromAccountToModule", ctx, senderAddr, recipientModule, amt)} +} + +func (_c *BankKeeper_SendCoinsFromAccountToModule_Call) Run(run func(ctx context.Context, senderAddr types.AccAddress, recipientModule string, amt types.Coins)) *BankKeeper_SendCoinsFromAccountToModule_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(types.AccAddress), args[2].(string), args[3].(types.Coins)) + }) + return _c +} + +func (_c *BankKeeper_SendCoinsFromAccountToModule_Call) Return(_a0 error) *BankKeeper_SendCoinsFromAccountToModule_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *BankKeeper_SendCoinsFromAccountToModule_Call) RunAndReturn(run func(context.Context, types.AccAddress, string, types.Coins) error) *BankKeeper_SendCoinsFromAccountToModule_Call { + _c.Call.Return(run) + return _c +} + +// SendCoinsFromModuleToAccount provides a mock function with given fields: ctx, senderModule, recipientAddr, amt +func (_m *BankKeeper) SendCoinsFromModuleToAccount(ctx context.Context, senderModule string, recipientAddr types.AccAddress, amt types.Coins) error { + ret := _m.Called(ctx, senderModule, recipientAddr, amt) + + if len(ret) == 0 { + panic("no return value specified for SendCoinsFromModuleToAccount") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, string, types.AccAddress, types.Coins) error); ok { + r0 = rf(ctx, senderModule, recipientAddr, amt) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// BankKeeper_SendCoinsFromModuleToAccount_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SendCoinsFromModuleToAccount' +type BankKeeper_SendCoinsFromModuleToAccount_Call struct { + *mock.Call +} + +// SendCoinsFromModuleToAccount is a helper method to define mock.On call +// - ctx context.Context +// - senderModule string +// - recipientAddr types.AccAddress +// - amt types.Coins +func (_e *BankKeeper_Expecter) SendCoinsFromModuleToAccount(ctx interface{}, senderModule interface{}, recipientAddr interface{}, amt interface{}) *BankKeeper_SendCoinsFromModuleToAccount_Call { + return &BankKeeper_SendCoinsFromModuleToAccount_Call{Call: _e.mock.On("SendCoinsFromModuleToAccount", ctx, senderModule, recipientAddr, amt)} +} + +func (_c *BankKeeper_SendCoinsFromModuleToAccount_Call) Run(run func(ctx context.Context, senderModule string, recipientAddr types.AccAddress, amt types.Coins)) *BankKeeper_SendCoinsFromModuleToAccount_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(string), args[2].(types.AccAddress), args[3].(types.Coins)) + }) + return _c +} + +func (_c *BankKeeper_SendCoinsFromModuleToAccount_Call) Return(_a0 error) *BankKeeper_SendCoinsFromModuleToAccount_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *BankKeeper_SendCoinsFromModuleToAccount_Call) RunAndReturn(run func(context.Context, string, types.AccAddress, types.Coins) error) *BankKeeper_SendCoinsFromModuleToAccount_Call { + _c.Call.Return(run) + return _c +} + +// SendCoinsFromModuleToModule provides a mock function with given fields: ctx, senderPool, recipientPool, amt +func (_m *BankKeeper) SendCoinsFromModuleToModule(ctx context.Context, senderPool string, recipientPool string, amt types.Coins) error { + ret := _m.Called(ctx, senderPool, recipientPool, amt) + + if len(ret) == 0 { + panic("no return value specified for SendCoinsFromModuleToModule") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, string, string, types.Coins) error); ok { + r0 = rf(ctx, senderPool, recipientPool, amt) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// BankKeeper_SendCoinsFromModuleToModule_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SendCoinsFromModuleToModule' +type BankKeeper_SendCoinsFromModuleToModule_Call struct { + *mock.Call +} + +// SendCoinsFromModuleToModule is a helper method to define mock.On call +// - ctx context.Context +// - senderPool string +// - recipientPool string +// - amt types.Coins +func (_e *BankKeeper_Expecter) SendCoinsFromModuleToModule(ctx interface{}, senderPool interface{}, recipientPool interface{}, amt interface{}) *BankKeeper_SendCoinsFromModuleToModule_Call { + return &BankKeeper_SendCoinsFromModuleToModule_Call{Call: _e.mock.On("SendCoinsFromModuleToModule", ctx, senderPool, recipientPool, amt)} +} + +func (_c *BankKeeper_SendCoinsFromModuleToModule_Call) Run(run func(ctx context.Context, senderPool string, recipientPool string, amt types.Coins)) *BankKeeper_SendCoinsFromModuleToModule_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(string), args[2].(string), args[3].(types.Coins)) + }) + return _c +} + +func (_c *BankKeeper_SendCoinsFromModuleToModule_Call) Return(_a0 error) *BankKeeper_SendCoinsFromModuleToModule_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *BankKeeper_SendCoinsFromModuleToModule_Call) RunAndReturn(run func(context.Context, string, string, types.Coins) error) *BankKeeper_SendCoinsFromModuleToModule_Call { + _c.Call.Return(run) + return _c +} + +// SpendableCoins provides a mock function with given fields: ctx, addr +func (_m *BankKeeper) SpendableCoins(ctx context.Context, addr types.AccAddress) types.Coins { + ret := _m.Called(ctx, addr) + + if len(ret) == 0 { + panic("no return value specified for SpendableCoins") + } + + var r0 types.Coins + if rf, ok := ret.Get(0).(func(context.Context, types.AccAddress) types.Coins); ok { + r0 = rf(ctx, addr) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(types.Coins) + } + } + + return r0 +} + +// BankKeeper_SpendableCoins_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SpendableCoins' +type BankKeeper_SpendableCoins_Call struct { + *mock.Call +} + +// SpendableCoins is a helper method to define mock.On call +// - ctx context.Context +// - addr types.AccAddress +func (_e *BankKeeper_Expecter) SpendableCoins(ctx interface{}, addr interface{}) *BankKeeper_SpendableCoins_Call { + return &BankKeeper_SpendableCoins_Call{Call: _e.mock.On("SpendableCoins", ctx, addr)} +} + +func (_c *BankKeeper_SpendableCoins_Call) Run(run func(ctx context.Context, addr types.AccAddress)) *BankKeeper_SpendableCoins_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(types.AccAddress)) + }) + return _c +} + +func (_c *BankKeeper_SpendableCoins_Call) Return(_a0 types.Coins) *BankKeeper_SpendableCoins_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *BankKeeper_SpendableCoins_Call) RunAndReturn(run func(context.Context, types.AccAddress) types.Coins) *BankKeeper_SpendableCoins_Call { + _c.Call.Return(run) + return _c +} + +// UndelegateCoinsFromModuleToAccount provides a mock function with given fields: ctx, senderModule, recipientAddr, amt +func (_m *BankKeeper) UndelegateCoinsFromModuleToAccount(ctx context.Context, senderModule string, recipientAddr types.AccAddress, amt types.Coins) error { + ret := _m.Called(ctx, senderModule, recipientAddr, amt) + + if len(ret) == 0 { + panic("no return value specified for UndelegateCoinsFromModuleToAccount") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, string, types.AccAddress, types.Coins) error); ok { + r0 = rf(ctx, senderModule, recipientAddr, amt) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// BankKeeper_UndelegateCoinsFromModuleToAccount_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'UndelegateCoinsFromModuleToAccount' +type BankKeeper_UndelegateCoinsFromModuleToAccount_Call struct { + *mock.Call +} + +// UndelegateCoinsFromModuleToAccount is a helper method to define mock.On call +// - ctx context.Context +// - senderModule string +// - recipientAddr types.AccAddress +// - amt types.Coins +func (_e *BankKeeper_Expecter) UndelegateCoinsFromModuleToAccount(ctx interface{}, senderModule interface{}, recipientAddr interface{}, amt interface{}) *BankKeeper_UndelegateCoinsFromModuleToAccount_Call { + return &BankKeeper_UndelegateCoinsFromModuleToAccount_Call{Call: _e.mock.On("UndelegateCoinsFromModuleToAccount", ctx, senderModule, recipientAddr, amt)} +} + +func (_c *BankKeeper_UndelegateCoinsFromModuleToAccount_Call) Run(run func(ctx context.Context, senderModule string, recipientAddr types.AccAddress, amt types.Coins)) *BankKeeper_UndelegateCoinsFromModuleToAccount_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(string), args[2].(types.AccAddress), args[3].(types.Coins)) + }) + return _c +} + +func (_c *BankKeeper_UndelegateCoinsFromModuleToAccount_Call) Return(_a0 error) *BankKeeper_UndelegateCoinsFromModuleToAccount_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *BankKeeper_UndelegateCoinsFromModuleToAccount_Call) RunAndReturn(run func(context.Context, string, types.AccAddress, types.Coins) error) *BankKeeper_UndelegateCoinsFromModuleToAccount_Call { + _c.Call.Return(run) + return _c +} + +// NewBankKeeper creates a new instance of BankKeeper. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewBankKeeper(t interface { + mock.TestingT + Cleanup(func()) +}) *BankKeeper { + mock := &BankKeeper{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/x/lsm/types/mocks/DistributionKeeper.go b/x/lsm/types/mocks/DistributionKeeper.go new file mode 100644 index 00000000000..ff3ce5139a7 --- /dev/null +++ b/x/lsm/types/mocks/DistributionKeeper.go @@ -0,0 +1,218 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + context "context" + + cosmos_sdktypes "github.com/cosmos/cosmos-sdk/types" + + mock "github.com/stretchr/testify/mock" + + types "github.com/cosmos/cosmos-sdk/x/staking/types" +) + +// DistributionKeeper is an autogenerated mock type for the DistributionKeeper type +type DistributionKeeper struct { + mock.Mock +} + +type DistributionKeeper_Expecter struct { + mock *mock.Mock +} + +func (_m *DistributionKeeper) EXPECT() *DistributionKeeper_Expecter { + return &DistributionKeeper_Expecter{mock: &_m.Mock} +} + +// CalculateDelegationRewards provides a mock function with given fields: ctx, val, del, endingPeriod +func (_m *DistributionKeeper) CalculateDelegationRewards(ctx context.Context, val types.ValidatorI, del types.DelegationI, endingPeriod uint64) (cosmos_sdktypes.DecCoins, error) { + ret := _m.Called(ctx, val, del, endingPeriod) + + if len(ret) == 0 { + panic("no return value specified for CalculateDelegationRewards") + } + + var r0 cosmos_sdktypes.DecCoins + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, types.ValidatorI, types.DelegationI, uint64) (cosmos_sdktypes.DecCoins, error)); ok { + return rf(ctx, val, del, endingPeriod) + } + if rf, ok := ret.Get(0).(func(context.Context, types.ValidatorI, types.DelegationI, uint64) cosmos_sdktypes.DecCoins); ok { + r0 = rf(ctx, val, del, endingPeriod) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(cosmos_sdktypes.DecCoins) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, types.ValidatorI, types.DelegationI, uint64) error); ok { + r1 = rf(ctx, val, del, endingPeriod) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// DistributionKeeper_CalculateDelegationRewards_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CalculateDelegationRewards' +type DistributionKeeper_CalculateDelegationRewards_Call struct { + *mock.Call +} + +// CalculateDelegationRewards is a helper method to define mock.On call +// - ctx context.Context +// - val types.ValidatorI +// - del types.DelegationI +// - endingPeriod uint64 +func (_e *DistributionKeeper_Expecter) CalculateDelegationRewards(ctx interface{}, val interface{}, del interface{}, endingPeriod interface{}) *DistributionKeeper_CalculateDelegationRewards_Call { + return &DistributionKeeper_CalculateDelegationRewards_Call{Call: _e.mock.On("CalculateDelegationRewards", ctx, val, del, endingPeriod)} +} + +func (_c *DistributionKeeper_CalculateDelegationRewards_Call) Run(run func(ctx context.Context, val types.ValidatorI, del types.DelegationI, endingPeriod uint64)) *DistributionKeeper_CalculateDelegationRewards_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(types.ValidatorI), args[2].(types.DelegationI), args[3].(uint64)) + }) + return _c +} + +func (_c *DistributionKeeper_CalculateDelegationRewards_Call) Return(rewards cosmos_sdktypes.DecCoins, err error) *DistributionKeeper_CalculateDelegationRewards_Call { + _c.Call.Return(rewards, err) + return _c +} + +func (_c *DistributionKeeper_CalculateDelegationRewards_Call) RunAndReturn(run func(context.Context, types.ValidatorI, types.DelegationI, uint64) (cosmos_sdktypes.DecCoins, error)) *DistributionKeeper_CalculateDelegationRewards_Call { + _c.Call.Return(run) + return _c +} + +// IncrementValidatorPeriod provides a mock function with given fields: ctx, val +func (_m *DistributionKeeper) IncrementValidatorPeriod(ctx context.Context, val types.ValidatorI) (uint64, error) { + ret := _m.Called(ctx, val) + + if len(ret) == 0 { + panic("no return value specified for IncrementValidatorPeriod") + } + + var r0 uint64 + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, types.ValidatorI) (uint64, error)); ok { + return rf(ctx, val) + } + if rf, ok := ret.Get(0).(func(context.Context, types.ValidatorI) uint64); ok { + r0 = rf(ctx, val) + } else { + r0 = ret.Get(0).(uint64) + } + + if rf, ok := ret.Get(1).(func(context.Context, types.ValidatorI) error); ok { + r1 = rf(ctx, val) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// DistributionKeeper_IncrementValidatorPeriod_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'IncrementValidatorPeriod' +type DistributionKeeper_IncrementValidatorPeriod_Call struct { + *mock.Call +} + +// IncrementValidatorPeriod is a helper method to define mock.On call +// - ctx context.Context +// - val types.ValidatorI +func (_e *DistributionKeeper_Expecter) IncrementValidatorPeriod(ctx interface{}, val interface{}) *DistributionKeeper_IncrementValidatorPeriod_Call { + return &DistributionKeeper_IncrementValidatorPeriod_Call{Call: _e.mock.On("IncrementValidatorPeriod", ctx, val)} +} + +func (_c *DistributionKeeper_IncrementValidatorPeriod_Call) Run(run func(ctx context.Context, val types.ValidatorI)) *DistributionKeeper_IncrementValidatorPeriod_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(types.ValidatorI)) + }) + return _c +} + +func (_c *DistributionKeeper_IncrementValidatorPeriod_Call) Return(_a0 uint64, _a1 error) *DistributionKeeper_IncrementValidatorPeriod_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *DistributionKeeper_IncrementValidatorPeriod_Call) RunAndReturn(run func(context.Context, types.ValidatorI) (uint64, error)) *DistributionKeeper_IncrementValidatorPeriod_Call { + _c.Call.Return(run) + return _c +} + +// WithdrawDelegationRewards provides a mock function with given fields: ctx, delAddr, valAddr +func (_m *DistributionKeeper) WithdrawDelegationRewards(ctx context.Context, delAddr cosmos_sdktypes.AccAddress, valAddr cosmos_sdktypes.ValAddress) (cosmos_sdktypes.Coins, error) { + ret := _m.Called(ctx, delAddr, valAddr) + + if len(ret) == 0 { + panic("no return value specified for WithdrawDelegationRewards") + } + + var r0 cosmos_sdktypes.Coins + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, cosmos_sdktypes.AccAddress, cosmos_sdktypes.ValAddress) (cosmos_sdktypes.Coins, error)); ok { + return rf(ctx, delAddr, valAddr) + } + if rf, ok := ret.Get(0).(func(context.Context, cosmos_sdktypes.AccAddress, cosmos_sdktypes.ValAddress) cosmos_sdktypes.Coins); ok { + r0 = rf(ctx, delAddr, valAddr) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(cosmos_sdktypes.Coins) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, cosmos_sdktypes.AccAddress, cosmos_sdktypes.ValAddress) error); ok { + r1 = rf(ctx, delAddr, valAddr) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// DistributionKeeper_WithdrawDelegationRewards_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'WithdrawDelegationRewards' +type DistributionKeeper_WithdrawDelegationRewards_Call struct { + *mock.Call +} + +// WithdrawDelegationRewards is a helper method to define mock.On call +// - ctx context.Context +// - delAddr cosmos_sdktypes.AccAddress +// - valAddr cosmos_sdktypes.ValAddress +func (_e *DistributionKeeper_Expecter) WithdrawDelegationRewards(ctx interface{}, delAddr interface{}, valAddr interface{}) *DistributionKeeper_WithdrawDelegationRewards_Call { + return &DistributionKeeper_WithdrawDelegationRewards_Call{Call: _e.mock.On("WithdrawDelegationRewards", ctx, delAddr, valAddr)} +} + +func (_c *DistributionKeeper_WithdrawDelegationRewards_Call) Run(run func(ctx context.Context, delAddr cosmos_sdktypes.AccAddress, valAddr cosmos_sdktypes.ValAddress)) *DistributionKeeper_WithdrawDelegationRewards_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(cosmos_sdktypes.AccAddress), args[2].(cosmos_sdktypes.ValAddress)) + }) + return _c +} + +func (_c *DistributionKeeper_WithdrawDelegationRewards_Call) Return(_a0 cosmos_sdktypes.Coins, _a1 error) *DistributionKeeper_WithdrawDelegationRewards_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *DistributionKeeper_WithdrawDelegationRewards_Call) RunAndReturn(run func(context.Context, cosmos_sdktypes.AccAddress, cosmos_sdktypes.ValAddress) (cosmos_sdktypes.Coins, error)) *DistributionKeeper_WithdrawDelegationRewards_Call { + _c.Call.Return(run) + return _c +} + +// NewDistributionKeeper creates a new instance of DistributionKeeper. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewDistributionKeeper(t interface { + mock.TestingT + Cleanup(func()) +}) *DistributionKeeper { + mock := &DistributionKeeper{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/x/lsm/types/mocks/StakingKeeper.go b/x/lsm/types/mocks/StakingKeeper.go new file mode 100644 index 00000000000..63731686967 --- /dev/null +++ b/x/lsm/types/mocks/StakingKeeper.go @@ -0,0 +1,952 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + context "context" + + address "cosmossdk.io/core/address" + + cosmos_sdktypes "github.com/cosmos/cosmos-sdk/types" + + math "cosmossdk.io/math" + + mock "github.com/stretchr/testify/mock" + + stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types" +) + +// StakingKeeper is an autogenerated mock type for the StakingKeeper type +type StakingKeeper struct { + mock.Mock +} + +type StakingKeeper_Expecter struct { + mock *mock.Mock +} + +func (_m *StakingKeeper) EXPECT() *StakingKeeper_Expecter { + return &StakingKeeper_Expecter{mock: &_m.Mock} +} + +// BondDenom provides a mock function with given fields: ctx +func (_m *StakingKeeper) BondDenom(ctx context.Context) (string, error) { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for BondDenom") + } + + var r0 string + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) (string, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) string); ok { + r0 = rf(ctx) + } else { + r0 = ret.Get(0).(string) + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// StakingKeeper_BondDenom_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'BondDenom' +type StakingKeeper_BondDenom_Call struct { + *mock.Call +} + +// BondDenom is a helper method to define mock.On call +// - ctx context.Context +func (_e *StakingKeeper_Expecter) BondDenom(ctx interface{}) *StakingKeeper_BondDenom_Call { + return &StakingKeeper_BondDenom_Call{Call: _e.mock.On("BondDenom", ctx)} +} + +func (_c *StakingKeeper_BondDenom_Call) Run(run func(ctx context.Context)) *StakingKeeper_BondDenom_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *StakingKeeper_BondDenom_Call) Return(_a0 string, _a1 error) *StakingKeeper_BondDenom_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *StakingKeeper_BondDenom_Call) RunAndReturn(run func(context.Context) (string, error)) *StakingKeeper_BondDenom_Call { + _c.Call.Return(run) + return _c +} + +// Delegate provides a mock function with given fields: ctx, delAddr, bondAmt, tokenSrc, validator, subtractAccount +func (_m *StakingKeeper) Delegate(ctx context.Context, delAddr cosmos_sdktypes.AccAddress, bondAmt math.Int, tokenSrc stakingtypes.BondStatus, validator stakingtypes.Validator, subtractAccount bool) (math.LegacyDec, error) { + ret := _m.Called(ctx, delAddr, bondAmt, tokenSrc, validator, subtractAccount) + + if len(ret) == 0 { + panic("no return value specified for Delegate") + } + + var r0 math.LegacyDec + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, cosmos_sdktypes.AccAddress, math.Int, stakingtypes.BondStatus, stakingtypes.Validator, bool) (math.LegacyDec, error)); ok { + return rf(ctx, delAddr, bondAmt, tokenSrc, validator, subtractAccount) + } + if rf, ok := ret.Get(0).(func(context.Context, cosmos_sdktypes.AccAddress, math.Int, stakingtypes.BondStatus, stakingtypes.Validator, bool) math.LegacyDec); ok { + r0 = rf(ctx, delAddr, bondAmt, tokenSrc, validator, subtractAccount) + } else { + r0 = ret.Get(0).(math.LegacyDec) + } + + if rf, ok := ret.Get(1).(func(context.Context, cosmos_sdktypes.AccAddress, math.Int, stakingtypes.BondStatus, stakingtypes.Validator, bool) error); ok { + r1 = rf(ctx, delAddr, bondAmt, tokenSrc, validator, subtractAccount) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// StakingKeeper_Delegate_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Delegate' +type StakingKeeper_Delegate_Call struct { + *mock.Call +} + +// Delegate is a helper method to define mock.On call +// - ctx context.Context +// - delAddr cosmos_sdktypes.AccAddress +// - bondAmt math.Int +// - tokenSrc stakingtypes.BondStatus +// - validator stakingtypes.Validator +// - subtractAccount bool +func (_e *StakingKeeper_Expecter) Delegate(ctx interface{}, delAddr interface{}, bondAmt interface{}, tokenSrc interface{}, validator interface{}, subtractAccount interface{}) *StakingKeeper_Delegate_Call { + return &StakingKeeper_Delegate_Call{Call: _e.mock.On("Delegate", ctx, delAddr, bondAmt, tokenSrc, validator, subtractAccount)} +} + +func (_c *StakingKeeper_Delegate_Call) Run(run func(ctx context.Context, delAddr cosmos_sdktypes.AccAddress, bondAmt math.Int, tokenSrc stakingtypes.BondStatus, validator stakingtypes.Validator, subtractAccount bool)) *StakingKeeper_Delegate_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(cosmos_sdktypes.AccAddress), args[2].(math.Int), args[3].(stakingtypes.BondStatus), args[4].(stakingtypes.Validator), args[5].(bool)) + }) + return _c +} + +func (_c *StakingKeeper_Delegate_Call) Return(newShares math.LegacyDec, err error) *StakingKeeper_Delegate_Call { + _c.Call.Return(newShares, err) + return _c +} + +func (_c *StakingKeeper_Delegate_Call) RunAndReturn(run func(context.Context, cosmos_sdktypes.AccAddress, math.Int, stakingtypes.BondStatus, stakingtypes.Validator, bool) (math.LegacyDec, error)) *StakingKeeper_Delegate_Call { + _c.Call.Return(run) + return _c +} + +// Delegation provides a mock function with given fields: ctx, addrDel, addrVal +func (_m *StakingKeeper) Delegation(ctx context.Context, addrDel cosmos_sdktypes.AccAddress, addrVal cosmos_sdktypes.ValAddress) (stakingtypes.DelegationI, error) { + ret := _m.Called(ctx, addrDel, addrVal) + + if len(ret) == 0 { + panic("no return value specified for Delegation") + } + + var r0 stakingtypes.DelegationI + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, cosmos_sdktypes.AccAddress, cosmos_sdktypes.ValAddress) (stakingtypes.DelegationI, error)); ok { + return rf(ctx, addrDel, addrVal) + } + if rf, ok := ret.Get(0).(func(context.Context, cosmos_sdktypes.AccAddress, cosmos_sdktypes.ValAddress) stakingtypes.DelegationI); ok { + r0 = rf(ctx, addrDel, addrVal) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(stakingtypes.DelegationI) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, cosmos_sdktypes.AccAddress, cosmos_sdktypes.ValAddress) error); ok { + r1 = rf(ctx, addrDel, addrVal) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// StakingKeeper_Delegation_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Delegation' +type StakingKeeper_Delegation_Call struct { + *mock.Call +} + +// Delegation is a helper method to define mock.On call +// - ctx context.Context +// - addrDel cosmos_sdktypes.AccAddress +// - addrVal cosmos_sdktypes.ValAddress +func (_e *StakingKeeper_Expecter) Delegation(ctx interface{}, addrDel interface{}, addrVal interface{}) *StakingKeeper_Delegation_Call { + return &StakingKeeper_Delegation_Call{Call: _e.mock.On("Delegation", ctx, addrDel, addrVal)} +} + +func (_c *StakingKeeper_Delegation_Call) Run(run func(ctx context.Context, addrDel cosmos_sdktypes.AccAddress, addrVal cosmos_sdktypes.ValAddress)) *StakingKeeper_Delegation_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(cosmos_sdktypes.AccAddress), args[2].(cosmos_sdktypes.ValAddress)) + }) + return _c +} + +func (_c *StakingKeeper_Delegation_Call) Return(_a0 stakingtypes.DelegationI, _a1 error) *StakingKeeper_Delegation_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *StakingKeeper_Delegation_Call) RunAndReturn(run func(context.Context, cosmos_sdktypes.AccAddress, cosmos_sdktypes.ValAddress) (stakingtypes.DelegationI, error)) *StakingKeeper_Delegation_Call { + _c.Call.Return(run) + return _c +} + +// GetAllDelegations provides a mock function with given fields: ctx +func (_m *StakingKeeper) GetAllDelegations(ctx context.Context) ([]stakingtypes.Delegation, error) { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for GetAllDelegations") + } + + var r0 []stakingtypes.Delegation + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) ([]stakingtypes.Delegation, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) []stakingtypes.Delegation); ok { + r0 = rf(ctx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]stakingtypes.Delegation) + } + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// StakingKeeper_GetAllDelegations_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetAllDelegations' +type StakingKeeper_GetAllDelegations_Call struct { + *mock.Call +} + +// GetAllDelegations is a helper method to define mock.On call +// - ctx context.Context +func (_e *StakingKeeper_Expecter) GetAllDelegations(ctx interface{}) *StakingKeeper_GetAllDelegations_Call { + return &StakingKeeper_GetAllDelegations_Call{Call: _e.mock.On("GetAllDelegations", ctx)} +} + +func (_c *StakingKeeper_GetAllDelegations_Call) Run(run func(ctx context.Context)) *StakingKeeper_GetAllDelegations_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *StakingKeeper_GetAllDelegations_Call) Return(delegations []stakingtypes.Delegation, err error) *StakingKeeper_GetAllDelegations_Call { + _c.Call.Return(delegations, err) + return _c +} + +func (_c *StakingKeeper_GetAllDelegations_Call) RunAndReturn(run func(context.Context) ([]stakingtypes.Delegation, error)) *StakingKeeper_GetAllDelegations_Call { + _c.Call.Return(run) + return _c +} + +// GetAllValidators provides a mock function with given fields: ctx +func (_m *StakingKeeper) GetAllValidators(ctx context.Context) ([]stakingtypes.Validator, error) { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for GetAllValidators") + } + + var r0 []stakingtypes.Validator + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) ([]stakingtypes.Validator, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) []stakingtypes.Validator); ok { + r0 = rf(ctx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]stakingtypes.Validator) + } + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// StakingKeeper_GetAllValidators_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetAllValidators' +type StakingKeeper_GetAllValidators_Call struct { + *mock.Call +} + +// GetAllValidators is a helper method to define mock.On call +// - ctx context.Context +func (_e *StakingKeeper_Expecter) GetAllValidators(ctx interface{}) *StakingKeeper_GetAllValidators_Call { + return &StakingKeeper_GetAllValidators_Call{Call: _e.mock.On("GetAllValidators", ctx)} +} + +func (_c *StakingKeeper_GetAllValidators_Call) Run(run func(ctx context.Context)) *StakingKeeper_GetAllValidators_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *StakingKeeper_GetAllValidators_Call) Return(validators []stakingtypes.Validator, err error) *StakingKeeper_GetAllValidators_Call { + _c.Call.Return(validators, err) + return _c +} + +func (_c *StakingKeeper_GetAllValidators_Call) RunAndReturn(run func(context.Context) ([]stakingtypes.Validator, error)) *StakingKeeper_GetAllValidators_Call { + _c.Call.Return(run) + return _c +} + +// GetDelegation provides a mock function with given fields: ctx, delAddr, valAddr +func (_m *StakingKeeper) GetDelegation(ctx context.Context, delAddr cosmos_sdktypes.AccAddress, valAddr cosmos_sdktypes.ValAddress) (stakingtypes.Delegation, error) { + ret := _m.Called(ctx, delAddr, valAddr) + + if len(ret) == 0 { + panic("no return value specified for GetDelegation") + } + + var r0 stakingtypes.Delegation + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, cosmos_sdktypes.AccAddress, cosmos_sdktypes.ValAddress) (stakingtypes.Delegation, error)); ok { + return rf(ctx, delAddr, valAddr) + } + if rf, ok := ret.Get(0).(func(context.Context, cosmos_sdktypes.AccAddress, cosmos_sdktypes.ValAddress) stakingtypes.Delegation); ok { + r0 = rf(ctx, delAddr, valAddr) + } else { + r0 = ret.Get(0).(stakingtypes.Delegation) + } + + if rf, ok := ret.Get(1).(func(context.Context, cosmos_sdktypes.AccAddress, cosmos_sdktypes.ValAddress) error); ok { + r1 = rf(ctx, delAddr, valAddr) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// StakingKeeper_GetDelegation_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetDelegation' +type StakingKeeper_GetDelegation_Call struct { + *mock.Call +} + +// GetDelegation is a helper method to define mock.On call +// - ctx context.Context +// - delAddr cosmos_sdktypes.AccAddress +// - valAddr cosmos_sdktypes.ValAddress +func (_e *StakingKeeper_Expecter) GetDelegation(ctx interface{}, delAddr interface{}, valAddr interface{}) *StakingKeeper_GetDelegation_Call { + return &StakingKeeper_GetDelegation_Call{Call: _e.mock.On("GetDelegation", ctx, delAddr, valAddr)} +} + +func (_c *StakingKeeper_GetDelegation_Call) Run(run func(ctx context.Context, delAddr cosmos_sdktypes.AccAddress, valAddr cosmos_sdktypes.ValAddress)) *StakingKeeper_GetDelegation_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(cosmos_sdktypes.AccAddress), args[2].(cosmos_sdktypes.ValAddress)) + }) + return _c +} + +func (_c *StakingKeeper_GetDelegation_Call) Return(_a0 stakingtypes.Delegation, _a1 error) *StakingKeeper_GetDelegation_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *StakingKeeper_GetDelegation_Call) RunAndReturn(run func(context.Context, cosmos_sdktypes.AccAddress, cosmos_sdktypes.ValAddress) (stakingtypes.Delegation, error)) *StakingKeeper_GetDelegation_Call { + _c.Call.Return(run) + return _c +} + +// GetParams provides a mock function with given fields: ctx +func (_m *StakingKeeper) GetParams(ctx context.Context) (stakingtypes.Params, error) { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for GetParams") + } + + var r0 stakingtypes.Params + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) (stakingtypes.Params, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) stakingtypes.Params); ok { + r0 = rf(ctx) + } else { + r0 = ret.Get(0).(stakingtypes.Params) + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// StakingKeeper_GetParams_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetParams' +type StakingKeeper_GetParams_Call struct { + *mock.Call +} + +// GetParams is a helper method to define mock.On call +// - ctx context.Context +func (_e *StakingKeeper_Expecter) GetParams(ctx interface{}) *StakingKeeper_GetParams_Call { + return &StakingKeeper_GetParams_Call{Call: _e.mock.On("GetParams", ctx)} +} + +func (_c *StakingKeeper_GetParams_Call) Run(run func(ctx context.Context)) *StakingKeeper_GetParams_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *StakingKeeper_GetParams_Call) Return(params stakingtypes.Params, err error) *StakingKeeper_GetParams_Call { + _c.Call.Return(params, err) + return _c +} + +func (_c *StakingKeeper_GetParams_Call) RunAndReturn(run func(context.Context) (stakingtypes.Params, error)) *StakingKeeper_GetParams_Call { + _c.Call.Return(run) + return _c +} + +// GetValidator provides a mock function with given fields: ctx, addr +func (_m *StakingKeeper) GetValidator(ctx context.Context, addr cosmos_sdktypes.ValAddress) (stakingtypes.Validator, error) { + ret := _m.Called(ctx, addr) + + if len(ret) == 0 { + panic("no return value specified for GetValidator") + } + + var r0 stakingtypes.Validator + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, cosmos_sdktypes.ValAddress) (stakingtypes.Validator, error)); ok { + return rf(ctx, addr) + } + if rf, ok := ret.Get(0).(func(context.Context, cosmos_sdktypes.ValAddress) stakingtypes.Validator); ok { + r0 = rf(ctx, addr) + } else { + r0 = ret.Get(0).(stakingtypes.Validator) + } + + if rf, ok := ret.Get(1).(func(context.Context, cosmos_sdktypes.ValAddress) error); ok { + r1 = rf(ctx, addr) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// StakingKeeper_GetValidator_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetValidator' +type StakingKeeper_GetValidator_Call struct { + *mock.Call +} + +// GetValidator is a helper method to define mock.On call +// - ctx context.Context +// - addr cosmos_sdktypes.ValAddress +func (_e *StakingKeeper_Expecter) GetValidator(ctx interface{}, addr interface{}) *StakingKeeper_GetValidator_Call { + return &StakingKeeper_GetValidator_Call{Call: _e.mock.On("GetValidator", ctx, addr)} +} + +func (_c *StakingKeeper_GetValidator_Call) Run(run func(ctx context.Context, addr cosmos_sdktypes.ValAddress)) *StakingKeeper_GetValidator_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(cosmos_sdktypes.ValAddress)) + }) + return _c +} + +func (_c *StakingKeeper_GetValidator_Call) Return(validator stakingtypes.Validator, err error) *StakingKeeper_GetValidator_Call { + _c.Call.Return(validator, err) + return _c +} + +func (_c *StakingKeeper_GetValidator_Call) RunAndReturn(run func(context.Context, cosmos_sdktypes.ValAddress) (stakingtypes.Validator, error)) *StakingKeeper_GetValidator_Call { + _c.Call.Return(run) + return _c +} + +// GetValidatorDelegations provides a mock function with given fields: ctx, valAddr +func (_m *StakingKeeper) GetValidatorDelegations(ctx context.Context, valAddr cosmos_sdktypes.ValAddress) ([]stakingtypes.Delegation, error) { + ret := _m.Called(ctx, valAddr) + + if len(ret) == 0 { + panic("no return value specified for GetValidatorDelegations") + } + + var r0 []stakingtypes.Delegation + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, cosmos_sdktypes.ValAddress) ([]stakingtypes.Delegation, error)); ok { + return rf(ctx, valAddr) + } + if rf, ok := ret.Get(0).(func(context.Context, cosmos_sdktypes.ValAddress) []stakingtypes.Delegation); ok { + r0 = rf(ctx, valAddr) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]stakingtypes.Delegation) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, cosmos_sdktypes.ValAddress) error); ok { + r1 = rf(ctx, valAddr) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// StakingKeeper_GetValidatorDelegations_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetValidatorDelegations' +type StakingKeeper_GetValidatorDelegations_Call struct { + *mock.Call +} + +// GetValidatorDelegations is a helper method to define mock.On call +// - ctx context.Context +// - valAddr cosmos_sdktypes.ValAddress +func (_e *StakingKeeper_Expecter) GetValidatorDelegations(ctx interface{}, valAddr interface{}) *StakingKeeper_GetValidatorDelegations_Call { + return &StakingKeeper_GetValidatorDelegations_Call{Call: _e.mock.On("GetValidatorDelegations", ctx, valAddr)} +} + +func (_c *StakingKeeper_GetValidatorDelegations_Call) Run(run func(ctx context.Context, valAddr cosmos_sdktypes.ValAddress)) *StakingKeeper_GetValidatorDelegations_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(cosmos_sdktypes.ValAddress)) + }) + return _c +} + +func (_c *StakingKeeper_GetValidatorDelegations_Call) Return(delegations []stakingtypes.Delegation, err error) *StakingKeeper_GetValidatorDelegations_Call { + _c.Call.Return(delegations, err) + return _c +} + +func (_c *StakingKeeper_GetValidatorDelegations_Call) RunAndReturn(run func(context.Context, cosmos_sdktypes.ValAddress) ([]stakingtypes.Delegation, error)) *StakingKeeper_GetValidatorDelegations_Call { + _c.Call.Return(run) + return _c +} + +// HasReceivingRedelegation provides a mock function with given fields: ctx, delAddr, valDstAddr +func (_m *StakingKeeper) HasReceivingRedelegation(ctx context.Context, delAddr cosmos_sdktypes.AccAddress, valDstAddr cosmos_sdktypes.ValAddress) (bool, error) { + ret := _m.Called(ctx, delAddr, valDstAddr) + + if len(ret) == 0 { + panic("no return value specified for HasReceivingRedelegation") + } + + var r0 bool + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, cosmos_sdktypes.AccAddress, cosmos_sdktypes.ValAddress) (bool, error)); ok { + return rf(ctx, delAddr, valDstAddr) + } + if rf, ok := ret.Get(0).(func(context.Context, cosmos_sdktypes.AccAddress, cosmos_sdktypes.ValAddress) bool); ok { + r0 = rf(ctx, delAddr, valDstAddr) + } else { + r0 = ret.Get(0).(bool) + } + + if rf, ok := ret.Get(1).(func(context.Context, cosmos_sdktypes.AccAddress, cosmos_sdktypes.ValAddress) error); ok { + r1 = rf(ctx, delAddr, valDstAddr) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// StakingKeeper_HasReceivingRedelegation_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'HasReceivingRedelegation' +type StakingKeeper_HasReceivingRedelegation_Call struct { + *mock.Call +} + +// HasReceivingRedelegation is a helper method to define mock.On call +// - ctx context.Context +// - delAddr cosmos_sdktypes.AccAddress +// - valDstAddr cosmos_sdktypes.ValAddress +func (_e *StakingKeeper_Expecter) HasReceivingRedelegation(ctx interface{}, delAddr interface{}, valDstAddr interface{}) *StakingKeeper_HasReceivingRedelegation_Call { + return &StakingKeeper_HasReceivingRedelegation_Call{Call: _e.mock.On("HasReceivingRedelegation", ctx, delAddr, valDstAddr)} +} + +func (_c *StakingKeeper_HasReceivingRedelegation_Call) Run(run func(ctx context.Context, delAddr cosmos_sdktypes.AccAddress, valDstAddr cosmos_sdktypes.ValAddress)) *StakingKeeper_HasReceivingRedelegation_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(cosmos_sdktypes.AccAddress), args[2].(cosmos_sdktypes.ValAddress)) + }) + return _c +} + +func (_c *StakingKeeper_HasReceivingRedelegation_Call) Return(_a0 bool, _a1 error) *StakingKeeper_HasReceivingRedelegation_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *StakingKeeper_HasReceivingRedelegation_Call) RunAndReturn(run func(context.Context, cosmos_sdktypes.AccAddress, cosmos_sdktypes.ValAddress) (bool, error)) *StakingKeeper_HasReceivingRedelegation_Call { + _c.Call.Return(run) + return _c +} + +// SetValidator provides a mock function with given fields: ctx, validator +func (_m *StakingKeeper) SetValidator(ctx context.Context, validator stakingtypes.Validator) error { + ret := _m.Called(ctx, validator) + + if len(ret) == 0 { + panic("no return value specified for SetValidator") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, stakingtypes.Validator) error); ok { + r0 = rf(ctx, validator) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// StakingKeeper_SetValidator_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SetValidator' +type StakingKeeper_SetValidator_Call struct { + *mock.Call +} + +// SetValidator is a helper method to define mock.On call +// - ctx context.Context +// - validator stakingtypes.Validator +func (_e *StakingKeeper_Expecter) SetValidator(ctx interface{}, validator interface{}) *StakingKeeper_SetValidator_Call { + return &StakingKeeper_SetValidator_Call{Call: _e.mock.On("SetValidator", ctx, validator)} +} + +func (_c *StakingKeeper_SetValidator_Call) Run(run func(ctx context.Context, validator stakingtypes.Validator)) *StakingKeeper_SetValidator_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(stakingtypes.Validator)) + }) + return _c +} + +func (_c *StakingKeeper_SetValidator_Call) Return(_a0 error) *StakingKeeper_SetValidator_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *StakingKeeper_SetValidator_Call) RunAndReturn(run func(context.Context, stakingtypes.Validator) error) *StakingKeeper_SetValidator_Call { + _c.Call.Return(run) + return _c +} + +// TotalBondedTokens provides a mock function with given fields: ctx +func (_m *StakingKeeper) TotalBondedTokens(ctx context.Context) (math.Int, error) { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for TotalBondedTokens") + } + + var r0 math.Int + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) (math.Int, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) math.Int); ok { + r0 = rf(ctx) + } else { + r0 = ret.Get(0).(math.Int) + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// StakingKeeper_TotalBondedTokens_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'TotalBondedTokens' +type StakingKeeper_TotalBondedTokens_Call struct { + *mock.Call +} + +// TotalBondedTokens is a helper method to define mock.On call +// - ctx context.Context +func (_e *StakingKeeper_Expecter) TotalBondedTokens(ctx interface{}) *StakingKeeper_TotalBondedTokens_Call { + return &StakingKeeper_TotalBondedTokens_Call{Call: _e.mock.On("TotalBondedTokens", ctx)} +} + +func (_c *StakingKeeper_TotalBondedTokens_Call) Run(run func(ctx context.Context)) *StakingKeeper_TotalBondedTokens_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *StakingKeeper_TotalBondedTokens_Call) Return(_a0 math.Int, _a1 error) *StakingKeeper_TotalBondedTokens_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *StakingKeeper_TotalBondedTokens_Call) RunAndReturn(run func(context.Context) (math.Int, error)) *StakingKeeper_TotalBondedTokens_Call { + _c.Call.Return(run) + return _c +} + +// Unbond provides a mock function with given fields: ctx, delAddr, valAddr, shares +func (_m *StakingKeeper) Unbond(ctx context.Context, delAddr cosmos_sdktypes.AccAddress, valAddr cosmos_sdktypes.ValAddress, shares math.LegacyDec) (math.Int, error) { + ret := _m.Called(ctx, delAddr, valAddr, shares) + + if len(ret) == 0 { + panic("no return value specified for Unbond") + } + + var r0 math.Int + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, cosmos_sdktypes.AccAddress, cosmos_sdktypes.ValAddress, math.LegacyDec) (math.Int, error)); ok { + return rf(ctx, delAddr, valAddr, shares) + } + if rf, ok := ret.Get(0).(func(context.Context, cosmos_sdktypes.AccAddress, cosmos_sdktypes.ValAddress, math.LegacyDec) math.Int); ok { + r0 = rf(ctx, delAddr, valAddr, shares) + } else { + r0 = ret.Get(0).(math.Int) + } + + if rf, ok := ret.Get(1).(func(context.Context, cosmos_sdktypes.AccAddress, cosmos_sdktypes.ValAddress, math.LegacyDec) error); ok { + r1 = rf(ctx, delAddr, valAddr, shares) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// StakingKeeper_Unbond_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Unbond' +type StakingKeeper_Unbond_Call struct { + *mock.Call +} + +// Unbond is a helper method to define mock.On call +// - ctx context.Context +// - delAddr cosmos_sdktypes.AccAddress +// - valAddr cosmos_sdktypes.ValAddress +// - shares math.LegacyDec +func (_e *StakingKeeper_Expecter) Unbond(ctx interface{}, delAddr interface{}, valAddr interface{}, shares interface{}) *StakingKeeper_Unbond_Call { + return &StakingKeeper_Unbond_Call{Call: _e.mock.On("Unbond", ctx, delAddr, valAddr, shares)} +} + +func (_c *StakingKeeper_Unbond_Call) Run(run func(ctx context.Context, delAddr cosmos_sdktypes.AccAddress, valAddr cosmos_sdktypes.ValAddress, shares math.LegacyDec)) *StakingKeeper_Unbond_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(cosmos_sdktypes.AccAddress), args[2].(cosmos_sdktypes.ValAddress), args[3].(math.LegacyDec)) + }) + return _c +} + +func (_c *StakingKeeper_Unbond_Call) Return(amount math.Int, err error) *StakingKeeper_Unbond_Call { + _c.Call.Return(amount, err) + return _c +} + +func (_c *StakingKeeper_Unbond_Call) RunAndReturn(run func(context.Context, cosmos_sdktypes.AccAddress, cosmos_sdktypes.ValAddress, math.LegacyDec) (math.Int, error)) *StakingKeeper_Unbond_Call { + _c.Call.Return(run) + return _c +} + +// ValidateUnbondAmount provides a mock function with given fields: ctx, delAddr, valAddr, amt +func (_m *StakingKeeper) ValidateUnbondAmount(ctx context.Context, delAddr cosmos_sdktypes.AccAddress, valAddr cosmos_sdktypes.ValAddress, amt math.Int) (math.LegacyDec, error) { + ret := _m.Called(ctx, delAddr, valAddr, amt) + + if len(ret) == 0 { + panic("no return value specified for ValidateUnbondAmount") + } + + var r0 math.LegacyDec + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, cosmos_sdktypes.AccAddress, cosmos_sdktypes.ValAddress, math.Int) (math.LegacyDec, error)); ok { + return rf(ctx, delAddr, valAddr, amt) + } + if rf, ok := ret.Get(0).(func(context.Context, cosmos_sdktypes.AccAddress, cosmos_sdktypes.ValAddress, math.Int) math.LegacyDec); ok { + r0 = rf(ctx, delAddr, valAddr, amt) + } else { + r0 = ret.Get(0).(math.LegacyDec) + } + + if rf, ok := ret.Get(1).(func(context.Context, cosmos_sdktypes.AccAddress, cosmos_sdktypes.ValAddress, math.Int) error); ok { + r1 = rf(ctx, delAddr, valAddr, amt) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// StakingKeeper_ValidateUnbondAmount_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ValidateUnbondAmount' +type StakingKeeper_ValidateUnbondAmount_Call struct { + *mock.Call +} + +// ValidateUnbondAmount is a helper method to define mock.On call +// - ctx context.Context +// - delAddr cosmos_sdktypes.AccAddress +// - valAddr cosmos_sdktypes.ValAddress +// - amt math.Int +func (_e *StakingKeeper_Expecter) ValidateUnbondAmount(ctx interface{}, delAddr interface{}, valAddr interface{}, amt interface{}) *StakingKeeper_ValidateUnbondAmount_Call { + return &StakingKeeper_ValidateUnbondAmount_Call{Call: _e.mock.On("ValidateUnbondAmount", ctx, delAddr, valAddr, amt)} +} + +func (_c *StakingKeeper_ValidateUnbondAmount_Call) Run(run func(ctx context.Context, delAddr cosmos_sdktypes.AccAddress, valAddr cosmos_sdktypes.ValAddress, amt math.Int)) *StakingKeeper_ValidateUnbondAmount_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(cosmos_sdktypes.AccAddress), args[2].(cosmos_sdktypes.ValAddress), args[3].(math.Int)) + }) + return _c +} + +func (_c *StakingKeeper_ValidateUnbondAmount_Call) Return(shares math.LegacyDec, err error) *StakingKeeper_ValidateUnbondAmount_Call { + _c.Call.Return(shares, err) + return _c +} + +func (_c *StakingKeeper_ValidateUnbondAmount_Call) RunAndReturn(run func(context.Context, cosmos_sdktypes.AccAddress, cosmos_sdktypes.ValAddress, math.Int) (math.LegacyDec, error)) *StakingKeeper_ValidateUnbondAmount_Call { + _c.Call.Return(run) + return _c +} + +// Validator provides a mock function with given fields: ctx, _a1 +func (_m *StakingKeeper) Validator(ctx context.Context, _a1 cosmos_sdktypes.ValAddress) (stakingtypes.ValidatorI, error) { + ret := _m.Called(ctx, _a1) + + if len(ret) == 0 { + panic("no return value specified for Validator") + } + + var r0 stakingtypes.ValidatorI + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, cosmos_sdktypes.ValAddress) (stakingtypes.ValidatorI, error)); ok { + return rf(ctx, _a1) + } + if rf, ok := ret.Get(0).(func(context.Context, cosmos_sdktypes.ValAddress) stakingtypes.ValidatorI); ok { + r0 = rf(ctx, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(stakingtypes.ValidatorI) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, cosmos_sdktypes.ValAddress) error); ok { + r1 = rf(ctx, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// StakingKeeper_Validator_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Validator' +type StakingKeeper_Validator_Call struct { + *mock.Call +} + +// Validator is a helper method to define mock.On call +// - ctx context.Context +// - _a1 cosmos_sdktypes.ValAddress +func (_e *StakingKeeper_Expecter) Validator(ctx interface{}, _a1 interface{}) *StakingKeeper_Validator_Call { + return &StakingKeeper_Validator_Call{Call: _e.mock.On("Validator", ctx, _a1)} +} + +func (_c *StakingKeeper_Validator_Call) Run(run func(ctx context.Context, _a1 cosmos_sdktypes.ValAddress)) *StakingKeeper_Validator_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(cosmos_sdktypes.ValAddress)) + }) + return _c +} + +func (_c *StakingKeeper_Validator_Call) Return(_a0 stakingtypes.ValidatorI, _a1 error) *StakingKeeper_Validator_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *StakingKeeper_Validator_Call) RunAndReturn(run func(context.Context, cosmos_sdktypes.ValAddress) (stakingtypes.ValidatorI, error)) *StakingKeeper_Validator_Call { + _c.Call.Return(run) + return _c +} + +// ValidatorAddressCodec provides a mock function with given fields: +func (_m *StakingKeeper) ValidatorAddressCodec() address.Codec { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for ValidatorAddressCodec") + } + + var r0 address.Codec + if rf, ok := ret.Get(0).(func() address.Codec); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(address.Codec) + } + } + + return r0 +} + +// StakingKeeper_ValidatorAddressCodec_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ValidatorAddressCodec' +type StakingKeeper_ValidatorAddressCodec_Call struct { + *mock.Call +} + +// ValidatorAddressCodec is a helper method to define mock.On call +func (_e *StakingKeeper_Expecter) ValidatorAddressCodec() *StakingKeeper_ValidatorAddressCodec_Call { + return &StakingKeeper_ValidatorAddressCodec_Call{Call: _e.mock.On("ValidatorAddressCodec")} +} + +func (_c *StakingKeeper_ValidatorAddressCodec_Call) Run(run func()) *StakingKeeper_ValidatorAddressCodec_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *StakingKeeper_ValidatorAddressCodec_Call) Return(_a0 address.Codec) *StakingKeeper_ValidatorAddressCodec_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *StakingKeeper_ValidatorAddressCodec_Call) RunAndReturn(run func() address.Codec) *StakingKeeper_ValidatorAddressCodec_Call { + _c.Call.Return(run) + return _c +} + +// NewStakingKeeper creates a new instance of StakingKeeper. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewStakingKeeper(t interface { + mock.TestingT + Cleanup(func()) +}) *StakingKeeper { + mock := &StakingKeeper{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/x/lsm/types/params.go b/x/lsm/types/params.go new file mode 100644 index 00000000000..862608cf23b --- /dev/null +++ b/x/lsm/types/params.go @@ -0,0 +1,121 @@ +package types + +import ( + "fmt" + + "cosmossdk.io/math" + + "github.com/cosmos/cosmos-sdk/codec" +) + +var ( + // ValidatorBondFactor of -1 indicates that it's disabled + ValidatorBondCapDisabled = math.LegacyNewDecFromInt(math.NewInt(-1)) + + // DefaultValidatorBondFactor is set to -1 (disabled) + DefaultValidatorBondFactor = ValidatorBondCapDisabled + // DefaultGlobalLiquidStakingCap is set to 100% + DefaultGlobalLiquidStakingCap = math.LegacyOneDec() + // DefaultValidatorLiquidStakingCap is set to 100% + DefaultValidatorLiquidStakingCap = math.LegacyOneDec() +) + +// NewParams creates a new Params instance +func NewParams( + validatorBondFactor math.LegacyDec, + globalLiquidStakingCap math.LegacyDec, + validatorLiquidStakingCap math.LegacyDec, +) Params { + return Params{ + ValidatorBondFactor: validatorBondFactor, + GlobalLiquidStakingCap: globalLiquidStakingCap, + ValidatorLiquidStakingCap: validatorLiquidStakingCap, + } +} + +// DefaultParams returns a default set of parameters. +func DefaultParams() Params { + return NewParams( + DefaultValidatorBondFactor, + DefaultGlobalLiquidStakingCap, + DefaultValidatorLiquidStakingCap, + ) +} + +// unmarshal the current lsm params value from store key or panic +func MustUnmarshalParams(cdc *codec.LegacyAmino, value []byte) Params { + params, err := UnmarshalParams(cdc, value) + if err != nil { + panic(err) + } + + return params +} + +// unmarshal the current lsm params value from store key +func UnmarshalParams(cdc *codec.LegacyAmino, value []byte) (params Params, err error) { + err = cdc.Unmarshal(value, ¶ms) + if err != nil { + return + } + + return +} + +// validate a set of params +func (p Params) Validate() error { + if err := validateValidatorBondFactor(p.ValidatorBondFactor); err != nil { + return err + } + + if err := validateGlobalLiquidStakingCap(p.GlobalLiquidStakingCap); err != nil { + return err + } + + return validateValidatorLiquidStakingCap(p.ValidatorLiquidStakingCap) +} + +func validateValidatorBondFactor(i interface{}) error { + v, ok := i.(math.LegacyDec) + if !ok { + return fmt.Errorf("invalid parameter type: %T", i) + } + + if v.IsNegative() && !v.Equal(math.LegacyNewDec(-1)) { + return fmt.Errorf("invalid validator bond factor: %s", v) + } + + return nil +} + +func validateGlobalLiquidStakingCap(i interface{}) error { + v, ok := i.(math.LegacyDec) + if !ok { + return fmt.Errorf("invalid parameter type: %T", i) + } + + if v.IsNegative() { + return fmt.Errorf("global liquid staking cap cannot be negative: %s", v) + } + if v.GT(math.LegacyOneDec()) { + return fmt.Errorf("global liquid staking cap cannot be greater than 100%%: %s", v) + } + + return nil +} + +func validateValidatorLiquidStakingCap(i interface{}) error { + v, ok := i.(math.LegacyDec) + if !ok { + return fmt.Errorf("invalid parameter type: %T", i) + } + + if v.IsNegative() { + return fmt.Errorf("validator liquid staking cap cannot be negative: %s", v) + } + if v.GT(math.LegacyOneDec()) { + return fmt.Errorf("validator liquid staking cap cannot be greater than 100%%: %s", v) + } + + return nil +} diff --git a/x/lsm/types/query.pb.go b/x/lsm/types/query.pb.go new file mode 100644 index 00000000000..7c690824d80 --- /dev/null +++ b/x/lsm/types/query.pb.go @@ -0,0 +1,4078 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: gaia/lsm/v1beta1/query.proto + +package types + +import ( + context "context" + fmt "fmt" + github_com_cosmos_cosmos_sdk_types "github.com/cosmos/cosmos-sdk/types" + types "github.com/cosmos/cosmos-sdk/types" + query "github.com/cosmos/cosmos-sdk/types/query" + _ "github.com/cosmos/cosmos-sdk/types/tx/amino" + _ "github.com/cosmos/gogoproto/gogoproto" + grpc1 "github.com/cosmos/gogoproto/grpc" + proto "github.com/cosmos/gogoproto/proto" + _ "google.golang.org/genproto/googleapis/api/annotations" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// QueryParamsRequest is request type for the Query/Params RPC method. +type QueryParamsRequest struct { +} + +func (m *QueryParamsRequest) Reset() { *m = QueryParamsRequest{} } +func (m *QueryParamsRequest) String() string { return proto.CompactTextString(m) } +func (*QueryParamsRequest) ProtoMessage() {} +func (*QueryParamsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_264debc7b0a264a5, []int{0} +} +func (m *QueryParamsRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryParamsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryParamsRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryParamsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryParamsRequest.Merge(m, src) +} +func (m *QueryParamsRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryParamsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryParamsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryParamsRequest proto.InternalMessageInfo + +// QueryParamsResponse is response type for the Query/Params RPC method. +type QueryParamsResponse struct { + // params holds all the parameters of this module. + Params Params `protobuf:"bytes,1,opt,name=params,proto3" json:"params"` +} + +func (m *QueryParamsResponse) Reset() { *m = QueryParamsResponse{} } +func (m *QueryParamsResponse) String() string { return proto.CompactTextString(m) } +func (*QueryParamsResponse) ProtoMessage() {} +func (*QueryParamsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_264debc7b0a264a5, []int{1} +} +func (m *QueryParamsResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryParamsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryParamsResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryParamsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryParamsResponse.Merge(m, src) +} +func (m *QueryParamsResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryParamsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryParamsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryParamsResponse proto.InternalMessageInfo + +func (m *QueryParamsResponse) GetParams() Params { + if m != nil { + return m.Params + } + return Params{} +} + +// QueryTokenizeShareRecordByIdRequest is request type for the +// Query/QueryTokenizeShareRecordById RPC method. +type QueryTokenizeShareRecordByIdRequest struct { + Id uint64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` +} + +func (m *QueryTokenizeShareRecordByIdRequest) Reset() { *m = QueryTokenizeShareRecordByIdRequest{} } +func (m *QueryTokenizeShareRecordByIdRequest) String() string { return proto.CompactTextString(m) } +func (*QueryTokenizeShareRecordByIdRequest) ProtoMessage() {} +func (*QueryTokenizeShareRecordByIdRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_264debc7b0a264a5, []int{2} +} +func (m *QueryTokenizeShareRecordByIdRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryTokenizeShareRecordByIdRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryTokenizeShareRecordByIdRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryTokenizeShareRecordByIdRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryTokenizeShareRecordByIdRequest.Merge(m, src) +} +func (m *QueryTokenizeShareRecordByIdRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryTokenizeShareRecordByIdRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryTokenizeShareRecordByIdRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryTokenizeShareRecordByIdRequest proto.InternalMessageInfo + +func (m *QueryTokenizeShareRecordByIdRequest) GetId() uint64 { + if m != nil { + return m.Id + } + return 0 +} + +// QueryTokenizeShareRecordByIdRequest is response type for the +// Query/QueryTokenizeShareRecordById RPC method. +type QueryTokenizeShareRecordByIdResponse struct { + Record TokenizeShareRecord `protobuf:"bytes,1,opt,name=record,proto3" json:"record"` +} + +func (m *QueryTokenizeShareRecordByIdResponse) Reset() { *m = QueryTokenizeShareRecordByIdResponse{} } +func (m *QueryTokenizeShareRecordByIdResponse) String() string { return proto.CompactTextString(m) } +func (*QueryTokenizeShareRecordByIdResponse) ProtoMessage() {} +func (*QueryTokenizeShareRecordByIdResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_264debc7b0a264a5, []int{3} +} +func (m *QueryTokenizeShareRecordByIdResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryTokenizeShareRecordByIdResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryTokenizeShareRecordByIdResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryTokenizeShareRecordByIdResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryTokenizeShareRecordByIdResponse.Merge(m, src) +} +func (m *QueryTokenizeShareRecordByIdResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryTokenizeShareRecordByIdResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryTokenizeShareRecordByIdResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryTokenizeShareRecordByIdResponse proto.InternalMessageInfo + +func (m *QueryTokenizeShareRecordByIdResponse) GetRecord() TokenizeShareRecord { + if m != nil { + return m.Record + } + return TokenizeShareRecord{} +} + +// QueryTokenizeShareRecordByDenomRequest is request type for the +// Query/QueryTokenizeShareRecordByDenom RPC method. +type QueryTokenizeShareRecordByDenomRequest struct { + Denom string `protobuf:"bytes,1,opt,name=denom,proto3" json:"denom,omitempty"` +} + +func (m *QueryTokenizeShareRecordByDenomRequest) Reset() { + *m = QueryTokenizeShareRecordByDenomRequest{} +} +func (m *QueryTokenizeShareRecordByDenomRequest) String() string { return proto.CompactTextString(m) } +func (*QueryTokenizeShareRecordByDenomRequest) ProtoMessage() {} +func (*QueryTokenizeShareRecordByDenomRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_264debc7b0a264a5, []int{4} +} +func (m *QueryTokenizeShareRecordByDenomRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryTokenizeShareRecordByDenomRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryTokenizeShareRecordByDenomRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryTokenizeShareRecordByDenomRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryTokenizeShareRecordByDenomRequest.Merge(m, src) +} +func (m *QueryTokenizeShareRecordByDenomRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryTokenizeShareRecordByDenomRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryTokenizeShareRecordByDenomRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryTokenizeShareRecordByDenomRequest proto.InternalMessageInfo + +func (m *QueryTokenizeShareRecordByDenomRequest) GetDenom() string { + if m != nil { + return m.Denom + } + return "" +} + +// QueryTokenizeShareRecordByDenomResponse is response type for the +// Query/QueryTokenizeShareRecordByDenom RPC method. +type QueryTokenizeShareRecordByDenomResponse struct { + Record TokenizeShareRecord `protobuf:"bytes,1,opt,name=record,proto3" json:"record"` +} + +func (m *QueryTokenizeShareRecordByDenomResponse) Reset() { + *m = QueryTokenizeShareRecordByDenomResponse{} +} +func (m *QueryTokenizeShareRecordByDenomResponse) String() string { return proto.CompactTextString(m) } +func (*QueryTokenizeShareRecordByDenomResponse) ProtoMessage() {} +func (*QueryTokenizeShareRecordByDenomResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_264debc7b0a264a5, []int{5} +} +func (m *QueryTokenizeShareRecordByDenomResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryTokenizeShareRecordByDenomResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryTokenizeShareRecordByDenomResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryTokenizeShareRecordByDenomResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryTokenizeShareRecordByDenomResponse.Merge(m, src) +} +func (m *QueryTokenizeShareRecordByDenomResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryTokenizeShareRecordByDenomResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryTokenizeShareRecordByDenomResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryTokenizeShareRecordByDenomResponse proto.InternalMessageInfo + +func (m *QueryTokenizeShareRecordByDenomResponse) GetRecord() TokenizeShareRecord { + if m != nil { + return m.Record + } + return TokenizeShareRecord{} +} + +// QueryTokenizeShareRecordsOwnedRequest is request type for the +// Query/QueryTokenizeShareRecordsOwned RPC method. +type QueryTokenizeShareRecordsOwnedRequest struct { + Owner string `protobuf:"bytes,1,opt,name=owner,proto3" json:"owner,omitempty"` +} + +func (m *QueryTokenizeShareRecordsOwnedRequest) Reset() { *m = QueryTokenizeShareRecordsOwnedRequest{} } +func (m *QueryTokenizeShareRecordsOwnedRequest) String() string { return proto.CompactTextString(m) } +func (*QueryTokenizeShareRecordsOwnedRequest) ProtoMessage() {} +func (*QueryTokenizeShareRecordsOwnedRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_264debc7b0a264a5, []int{6} +} +func (m *QueryTokenizeShareRecordsOwnedRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryTokenizeShareRecordsOwnedRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryTokenizeShareRecordsOwnedRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryTokenizeShareRecordsOwnedRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryTokenizeShareRecordsOwnedRequest.Merge(m, src) +} +func (m *QueryTokenizeShareRecordsOwnedRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryTokenizeShareRecordsOwnedRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryTokenizeShareRecordsOwnedRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryTokenizeShareRecordsOwnedRequest proto.InternalMessageInfo + +func (m *QueryTokenizeShareRecordsOwnedRequest) GetOwner() string { + if m != nil { + return m.Owner + } + return "" +} + +// QueryTokenizeShareRecordsOwnedResponse is response type for the +// Query/QueryTokenizeShareRecordsOwned RPC method. +type QueryTokenizeShareRecordsOwnedResponse struct { + Records []TokenizeShareRecord `protobuf:"bytes,1,rep,name=records,proto3" json:"records"` +} + +func (m *QueryTokenizeShareRecordsOwnedResponse) Reset() { + *m = QueryTokenizeShareRecordsOwnedResponse{} +} +func (m *QueryTokenizeShareRecordsOwnedResponse) String() string { return proto.CompactTextString(m) } +func (*QueryTokenizeShareRecordsOwnedResponse) ProtoMessage() {} +func (*QueryTokenizeShareRecordsOwnedResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_264debc7b0a264a5, []int{7} +} +func (m *QueryTokenizeShareRecordsOwnedResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryTokenizeShareRecordsOwnedResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryTokenizeShareRecordsOwnedResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryTokenizeShareRecordsOwnedResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryTokenizeShareRecordsOwnedResponse.Merge(m, src) +} +func (m *QueryTokenizeShareRecordsOwnedResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryTokenizeShareRecordsOwnedResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryTokenizeShareRecordsOwnedResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryTokenizeShareRecordsOwnedResponse proto.InternalMessageInfo + +func (m *QueryTokenizeShareRecordsOwnedResponse) GetRecords() []TokenizeShareRecord { + if m != nil { + return m.Records + } + return nil +} + +// QueryAllTokenizeShareRecordsRequest is request type for the +// Query/QueryAllTokenizeShareRecords RPC method. +type QueryAllTokenizeShareRecordsRequest struct { + // pagination defines an optional pagination for the request. + Pagination *query.PageRequest `protobuf:"bytes,1,opt,name=pagination,proto3" json:"pagination,omitempty"` +} + +func (m *QueryAllTokenizeShareRecordsRequest) Reset() { *m = QueryAllTokenizeShareRecordsRequest{} } +func (m *QueryAllTokenizeShareRecordsRequest) String() string { return proto.CompactTextString(m) } +func (*QueryAllTokenizeShareRecordsRequest) ProtoMessage() {} +func (*QueryAllTokenizeShareRecordsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_264debc7b0a264a5, []int{8} +} +func (m *QueryAllTokenizeShareRecordsRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryAllTokenizeShareRecordsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryAllTokenizeShareRecordsRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryAllTokenizeShareRecordsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryAllTokenizeShareRecordsRequest.Merge(m, src) +} +func (m *QueryAllTokenizeShareRecordsRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryAllTokenizeShareRecordsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryAllTokenizeShareRecordsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryAllTokenizeShareRecordsRequest proto.InternalMessageInfo + +func (m *QueryAllTokenizeShareRecordsRequest) GetPagination() *query.PageRequest { + if m != nil { + return m.Pagination + } + return nil +} + +// QueryAllTokenizeShareRecordsResponse is response type for the +// Query/QueryAllTokenizeShareRecords RPC method. +type QueryAllTokenizeShareRecordsResponse struct { + Records []TokenizeShareRecord `protobuf:"bytes,1,rep,name=records,proto3" json:"records"` + // pagination defines the pagination in the response. + Pagination *query.PageResponse `protobuf:"bytes,2,opt,name=pagination,proto3" json:"pagination,omitempty"` +} + +func (m *QueryAllTokenizeShareRecordsResponse) Reset() { *m = QueryAllTokenizeShareRecordsResponse{} } +func (m *QueryAllTokenizeShareRecordsResponse) String() string { return proto.CompactTextString(m) } +func (*QueryAllTokenizeShareRecordsResponse) ProtoMessage() {} +func (*QueryAllTokenizeShareRecordsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_264debc7b0a264a5, []int{9} +} +func (m *QueryAllTokenizeShareRecordsResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryAllTokenizeShareRecordsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryAllTokenizeShareRecordsResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryAllTokenizeShareRecordsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryAllTokenizeShareRecordsResponse.Merge(m, src) +} +func (m *QueryAllTokenizeShareRecordsResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryAllTokenizeShareRecordsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryAllTokenizeShareRecordsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryAllTokenizeShareRecordsResponse proto.InternalMessageInfo + +func (m *QueryAllTokenizeShareRecordsResponse) GetRecords() []TokenizeShareRecord { + if m != nil { + return m.Records + } + return nil +} + +func (m *QueryAllTokenizeShareRecordsResponse) GetPagination() *query.PageResponse { + if m != nil { + return m.Pagination + } + return nil +} + +// QueryLastTokenizeShareRecordIdRequest is request type for the +// Query/QueryLastTokenizeShareRecordId RPC method. +type QueryLastTokenizeShareRecordIdRequest struct { +} + +func (m *QueryLastTokenizeShareRecordIdRequest) Reset() { *m = QueryLastTokenizeShareRecordIdRequest{} } +func (m *QueryLastTokenizeShareRecordIdRequest) String() string { return proto.CompactTextString(m) } +func (*QueryLastTokenizeShareRecordIdRequest) ProtoMessage() {} +func (*QueryLastTokenizeShareRecordIdRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_264debc7b0a264a5, []int{10} +} +func (m *QueryLastTokenizeShareRecordIdRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryLastTokenizeShareRecordIdRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryLastTokenizeShareRecordIdRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryLastTokenizeShareRecordIdRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryLastTokenizeShareRecordIdRequest.Merge(m, src) +} +func (m *QueryLastTokenizeShareRecordIdRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryLastTokenizeShareRecordIdRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryLastTokenizeShareRecordIdRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryLastTokenizeShareRecordIdRequest proto.InternalMessageInfo + +// QueryLastTokenizeShareRecordIdResponse is response type for the +// Query/QueryLastTokenizeShareRecordId RPC method. +type QueryLastTokenizeShareRecordIdResponse struct { + Id uint64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` +} + +func (m *QueryLastTokenizeShareRecordIdResponse) Reset() { + *m = QueryLastTokenizeShareRecordIdResponse{} +} +func (m *QueryLastTokenizeShareRecordIdResponse) String() string { return proto.CompactTextString(m) } +func (*QueryLastTokenizeShareRecordIdResponse) ProtoMessage() {} +func (*QueryLastTokenizeShareRecordIdResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_264debc7b0a264a5, []int{11} +} +func (m *QueryLastTokenizeShareRecordIdResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryLastTokenizeShareRecordIdResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryLastTokenizeShareRecordIdResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryLastTokenizeShareRecordIdResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryLastTokenizeShareRecordIdResponse.Merge(m, src) +} +func (m *QueryLastTokenizeShareRecordIdResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryLastTokenizeShareRecordIdResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryLastTokenizeShareRecordIdResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryLastTokenizeShareRecordIdResponse proto.InternalMessageInfo + +func (m *QueryLastTokenizeShareRecordIdResponse) GetId() uint64 { + if m != nil { + return m.Id + } + return 0 +} + +// QueryTotalTokenizeSharedAssetsRequest is request type for the +// Query/QueryTotalTokenizeSharedAssets RPC method. +type QueryTotalTokenizeSharedAssetsRequest struct { +} + +func (m *QueryTotalTokenizeSharedAssetsRequest) Reset() { *m = QueryTotalTokenizeSharedAssetsRequest{} } +func (m *QueryTotalTokenizeSharedAssetsRequest) String() string { return proto.CompactTextString(m) } +func (*QueryTotalTokenizeSharedAssetsRequest) ProtoMessage() {} +func (*QueryTotalTokenizeSharedAssetsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_264debc7b0a264a5, []int{12} +} +func (m *QueryTotalTokenizeSharedAssetsRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryTotalTokenizeSharedAssetsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryTotalTokenizeSharedAssetsRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryTotalTokenizeSharedAssetsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryTotalTokenizeSharedAssetsRequest.Merge(m, src) +} +func (m *QueryTotalTokenizeSharedAssetsRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryTotalTokenizeSharedAssetsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryTotalTokenizeSharedAssetsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryTotalTokenizeSharedAssetsRequest proto.InternalMessageInfo + +// QueryTotalTokenizeSharedAssetsResponse is response type for the +// Query/QueryTotalTokenizeSharedAssets RPC method. +type QueryTotalTokenizeSharedAssetsResponse struct { + Value types.Coin `protobuf:"bytes,1,opt,name=value,proto3" json:"value"` +} + +func (m *QueryTotalTokenizeSharedAssetsResponse) Reset() { + *m = QueryTotalTokenizeSharedAssetsResponse{} +} +func (m *QueryTotalTokenizeSharedAssetsResponse) String() string { return proto.CompactTextString(m) } +func (*QueryTotalTokenizeSharedAssetsResponse) ProtoMessage() {} +func (*QueryTotalTokenizeSharedAssetsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_264debc7b0a264a5, []int{13} +} +func (m *QueryTotalTokenizeSharedAssetsResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryTotalTokenizeSharedAssetsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryTotalTokenizeSharedAssetsResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryTotalTokenizeSharedAssetsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryTotalTokenizeSharedAssetsResponse.Merge(m, src) +} +func (m *QueryTotalTokenizeSharedAssetsResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryTotalTokenizeSharedAssetsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryTotalTokenizeSharedAssetsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryTotalTokenizeSharedAssetsResponse proto.InternalMessageInfo + +func (m *QueryTotalTokenizeSharedAssetsResponse) GetValue() types.Coin { + if m != nil { + return m.Value + } + return types.Coin{} +} + +// QueryTotalLiquidStakedRequest is request type for the +// Query/QueryQueryTotalLiquidStaked RPC method. +type QueryTotalLiquidStaked struct { +} + +func (m *QueryTotalLiquidStaked) Reset() { *m = QueryTotalLiquidStaked{} } +func (m *QueryTotalLiquidStaked) String() string { return proto.CompactTextString(m) } +func (*QueryTotalLiquidStaked) ProtoMessage() {} +func (*QueryTotalLiquidStaked) Descriptor() ([]byte, []int) { + return fileDescriptor_264debc7b0a264a5, []int{14} +} +func (m *QueryTotalLiquidStaked) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryTotalLiquidStaked) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryTotalLiquidStaked.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryTotalLiquidStaked) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryTotalLiquidStaked.Merge(m, src) +} +func (m *QueryTotalLiquidStaked) XXX_Size() int { + return m.Size() +} +func (m *QueryTotalLiquidStaked) XXX_DiscardUnknown() { + xxx_messageInfo_QueryTotalLiquidStaked.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryTotalLiquidStaked proto.InternalMessageInfo + +// QueryTotalLiquidStakedResponse is response type for the +// Query/QueryQueryTotalLiquidStaked RPC method. +type QueryTotalLiquidStakedResponse struct { + Tokens string `protobuf:"bytes,1,opt,name=tokens,proto3" json:"tokens,omitempty"` +} + +func (m *QueryTotalLiquidStakedResponse) Reset() { *m = QueryTotalLiquidStakedResponse{} } +func (m *QueryTotalLiquidStakedResponse) String() string { return proto.CompactTextString(m) } +func (*QueryTotalLiquidStakedResponse) ProtoMessage() {} +func (*QueryTotalLiquidStakedResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_264debc7b0a264a5, []int{15} +} +func (m *QueryTotalLiquidStakedResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryTotalLiquidStakedResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryTotalLiquidStakedResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryTotalLiquidStakedResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryTotalLiquidStakedResponse.Merge(m, src) +} +func (m *QueryTotalLiquidStakedResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryTotalLiquidStakedResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryTotalLiquidStakedResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryTotalLiquidStakedResponse proto.InternalMessageInfo + +func (m *QueryTotalLiquidStakedResponse) GetTokens() string { + if m != nil { + return m.Tokens + } + return "" +} + +// QueryTokenizeShareLockInfo queries the tokenize share lock information +// associated with given account +type QueryTokenizeShareLockInfo struct { + Address string `protobuf:"bytes,1,opt,name=address,proto3" json:"address,omitempty"` +} + +func (m *QueryTokenizeShareLockInfo) Reset() { *m = QueryTokenizeShareLockInfo{} } +func (m *QueryTokenizeShareLockInfo) String() string { return proto.CompactTextString(m) } +func (*QueryTokenizeShareLockInfo) ProtoMessage() {} +func (*QueryTokenizeShareLockInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_264debc7b0a264a5, []int{16} +} +func (m *QueryTokenizeShareLockInfo) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryTokenizeShareLockInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryTokenizeShareLockInfo.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryTokenizeShareLockInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryTokenizeShareLockInfo.Merge(m, src) +} +func (m *QueryTokenizeShareLockInfo) XXX_Size() int { + return m.Size() +} +func (m *QueryTokenizeShareLockInfo) XXX_DiscardUnknown() { + xxx_messageInfo_QueryTokenizeShareLockInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryTokenizeShareLockInfo proto.InternalMessageInfo + +func (m *QueryTokenizeShareLockInfo) GetAddress() string { + if m != nil { + return m.Address + } + return "" +} + +// QueryTokenizeShareLockInfoResponse is the response from the +// QueryTokenizeShareLockInfo query +type QueryTokenizeShareLockInfoResponse struct { + Status string `protobuf:"bytes,1,opt,name=status,proto3" json:"status,omitempty"` + ExpirationTime string `protobuf:"bytes,2,opt,name=expiration_time,json=expirationTime,proto3" json:"expiration_time,omitempty"` +} + +func (m *QueryTokenizeShareLockInfoResponse) Reset() { *m = QueryTokenizeShareLockInfoResponse{} } +func (m *QueryTokenizeShareLockInfoResponse) String() string { return proto.CompactTextString(m) } +func (*QueryTokenizeShareLockInfoResponse) ProtoMessage() {} +func (*QueryTokenizeShareLockInfoResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_264debc7b0a264a5, []int{17} +} +func (m *QueryTokenizeShareLockInfoResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryTokenizeShareLockInfoResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryTokenizeShareLockInfoResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryTokenizeShareLockInfoResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryTokenizeShareLockInfoResponse.Merge(m, src) +} +func (m *QueryTokenizeShareLockInfoResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryTokenizeShareLockInfoResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryTokenizeShareLockInfoResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryTokenizeShareLockInfoResponse proto.InternalMessageInfo + +func (m *QueryTokenizeShareLockInfoResponse) GetStatus() string { + if m != nil { + return m.Status + } + return "" +} + +func (m *QueryTokenizeShareLockInfoResponse) GetExpirationTime() string { + if m != nil { + return m.ExpirationTime + } + return "" +} + +// QueryTokenizeShareRecordRewardRequest is the request type for the +// Query/TokenizeShareRecordReward RPC method. +type QueryTokenizeShareRecordRewardRequest struct { + OwnerAddress string `protobuf:"bytes,1,opt,name=owner_address,json=ownerAddress,proto3" json:"owner_address,omitempty" yaml:"owner_address"` +} + +func (m *QueryTokenizeShareRecordRewardRequest) Reset() { *m = QueryTokenizeShareRecordRewardRequest{} } +func (m *QueryTokenizeShareRecordRewardRequest) String() string { return proto.CompactTextString(m) } +func (*QueryTokenizeShareRecordRewardRequest) ProtoMessage() {} +func (*QueryTokenizeShareRecordRewardRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_264debc7b0a264a5, []int{18} +} +func (m *QueryTokenizeShareRecordRewardRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryTokenizeShareRecordRewardRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryTokenizeShareRecordRewardRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryTokenizeShareRecordRewardRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryTokenizeShareRecordRewardRequest.Merge(m, src) +} +func (m *QueryTokenizeShareRecordRewardRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryTokenizeShareRecordRewardRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryTokenizeShareRecordRewardRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryTokenizeShareRecordRewardRequest proto.InternalMessageInfo + +// QueryTokenizeShareRecordRewardResponse is the response type for the +// Query/TokenizeShareRecordReward RPC method. +type QueryTokenizeShareRecordRewardResponse struct { + // rewards defines all the rewards accrued by a delegator. + Rewards []TokenizeShareRecordReward `protobuf:"bytes,1,rep,name=rewards,proto3" json:"rewards"` + // total defines the sum of all the rewards. + Total github_com_cosmos_cosmos_sdk_types.DecCoins `protobuf:"bytes,2,rep,name=total,proto3,castrepeated=github.com/cosmos/cosmos-sdk/types.DecCoins" json:"total"` +} + +func (m *QueryTokenizeShareRecordRewardResponse) Reset() { + *m = QueryTokenizeShareRecordRewardResponse{} +} +func (m *QueryTokenizeShareRecordRewardResponse) String() string { return proto.CompactTextString(m) } +func (*QueryTokenizeShareRecordRewardResponse) ProtoMessage() {} +func (*QueryTokenizeShareRecordRewardResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_264debc7b0a264a5, []int{19} +} +func (m *QueryTokenizeShareRecordRewardResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryTokenizeShareRecordRewardResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryTokenizeShareRecordRewardResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryTokenizeShareRecordRewardResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryTokenizeShareRecordRewardResponse.Merge(m, src) +} +func (m *QueryTokenizeShareRecordRewardResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryTokenizeShareRecordRewardResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryTokenizeShareRecordRewardResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryTokenizeShareRecordRewardResponse proto.InternalMessageInfo + +func (m *QueryTokenizeShareRecordRewardResponse) GetRewards() []TokenizeShareRecordReward { + if m != nil { + return m.Rewards + } + return nil +} + +func (m *QueryTokenizeShareRecordRewardResponse) GetTotal() github_com_cosmos_cosmos_sdk_types.DecCoins { + if m != nil { + return m.Total + } + return nil +} + +func init() { + proto.RegisterType((*QueryParamsRequest)(nil), "gaia.lsm.v1beta1.QueryParamsRequest") + proto.RegisterType((*QueryParamsResponse)(nil), "gaia.lsm.v1beta1.QueryParamsResponse") + proto.RegisterType((*QueryTokenizeShareRecordByIdRequest)(nil), "gaia.lsm.v1beta1.QueryTokenizeShareRecordByIdRequest") + proto.RegisterType((*QueryTokenizeShareRecordByIdResponse)(nil), "gaia.lsm.v1beta1.QueryTokenizeShareRecordByIdResponse") + proto.RegisterType((*QueryTokenizeShareRecordByDenomRequest)(nil), "gaia.lsm.v1beta1.QueryTokenizeShareRecordByDenomRequest") + proto.RegisterType((*QueryTokenizeShareRecordByDenomResponse)(nil), "gaia.lsm.v1beta1.QueryTokenizeShareRecordByDenomResponse") + proto.RegisterType((*QueryTokenizeShareRecordsOwnedRequest)(nil), "gaia.lsm.v1beta1.QueryTokenizeShareRecordsOwnedRequest") + proto.RegisterType((*QueryTokenizeShareRecordsOwnedResponse)(nil), "gaia.lsm.v1beta1.QueryTokenizeShareRecordsOwnedResponse") + proto.RegisterType((*QueryAllTokenizeShareRecordsRequest)(nil), "gaia.lsm.v1beta1.QueryAllTokenizeShareRecordsRequest") + proto.RegisterType((*QueryAllTokenizeShareRecordsResponse)(nil), "gaia.lsm.v1beta1.QueryAllTokenizeShareRecordsResponse") + proto.RegisterType((*QueryLastTokenizeShareRecordIdRequest)(nil), "gaia.lsm.v1beta1.QueryLastTokenizeShareRecordIdRequest") + proto.RegisterType((*QueryLastTokenizeShareRecordIdResponse)(nil), "gaia.lsm.v1beta1.QueryLastTokenizeShareRecordIdResponse") + proto.RegisterType((*QueryTotalTokenizeSharedAssetsRequest)(nil), "gaia.lsm.v1beta1.QueryTotalTokenizeSharedAssetsRequest") + proto.RegisterType((*QueryTotalTokenizeSharedAssetsResponse)(nil), "gaia.lsm.v1beta1.QueryTotalTokenizeSharedAssetsResponse") + proto.RegisterType((*QueryTotalLiquidStaked)(nil), "gaia.lsm.v1beta1.QueryTotalLiquidStaked") + proto.RegisterType((*QueryTotalLiquidStakedResponse)(nil), "gaia.lsm.v1beta1.QueryTotalLiquidStakedResponse") + proto.RegisterType((*QueryTokenizeShareLockInfo)(nil), "gaia.lsm.v1beta1.QueryTokenizeShareLockInfo") + proto.RegisterType((*QueryTokenizeShareLockInfoResponse)(nil), "gaia.lsm.v1beta1.QueryTokenizeShareLockInfoResponse") + proto.RegisterType((*QueryTokenizeShareRecordRewardRequest)(nil), "gaia.lsm.v1beta1.QueryTokenizeShareRecordRewardRequest") + proto.RegisterType((*QueryTokenizeShareRecordRewardResponse)(nil), "gaia.lsm.v1beta1.QueryTokenizeShareRecordRewardResponse") +} + +func init() { proto.RegisterFile("gaia/lsm/v1beta1/query.proto", fileDescriptor_264debc7b0a264a5) } + +var fileDescriptor_264debc7b0a264a5 = []byte{ + // 1151 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x57, 0x4d, 0x6c, 0xdc, 0x44, + 0x14, 0x5e, 0x87, 0x26, 0xa5, 0x03, 0x04, 0x3a, 0x84, 0x76, 0x63, 0xa2, 0x0d, 0x1a, 0x9a, 0x6e, + 0x94, 0x52, 0x3b, 0x49, 0xf3, 0x47, 0x68, 0x80, 0xa4, 0x85, 0xaa, 0x22, 0x12, 0xe0, 0xf6, 0xc4, + 0xc5, 0x9a, 0xac, 0xa7, 0xdb, 0xd1, 0xda, 0x9e, 0x8d, 0xc7, 0xdb, 0x34, 0x84, 0x5c, 0x38, 0xf5, + 0x88, 0xc4, 0x1d, 0xf5, 0x88, 0x38, 0x71, 0x42, 0x82, 0x33, 0x82, 0x5e, 0x90, 0x2a, 0xf5, 0x00, + 0xa7, 0x80, 0x12, 0x24, 0x38, 0x73, 0xe3, 0x86, 0x3c, 0x33, 0x76, 0xd6, 0xf1, 0xce, 0xfe, 0xa0, + 0x5e, 0x76, 0x77, 0x7e, 0xbe, 0x79, 0xdf, 0xe7, 0xf7, 0xde, 0x7c, 0x6b, 0x30, 0x51, 0xc7, 0x14, + 0xdb, 0x3e, 0x0f, 0xec, 0x7b, 0x73, 0x5b, 0x24, 0xc6, 0x73, 0xf6, 0x76, 0x8b, 0x44, 0xbb, 0x56, + 0x33, 0x62, 0x31, 0x83, 0x2f, 0x25, 0xab, 0x96, 0xcf, 0x03, 0x4b, 0xad, 0x9a, 0x33, 0x35, 0xc6, + 0x03, 0xc6, 0xed, 0x2d, 0xcc, 0x89, 0xdc, 0x9a, 0x01, 0x9b, 0xb8, 0x4e, 0x43, 0x1c, 0x53, 0x16, + 0x4a, 0xb4, 0x39, 0x56, 0x67, 0x75, 0x26, 0x7e, 0xda, 0xc9, 0x2f, 0x35, 0x3b, 0x51, 0x67, 0xac, + 0xee, 0x13, 0x1b, 0x37, 0xa9, 0x8d, 0xc3, 0x90, 0xc5, 0x02, 0xc2, 0xd5, 0xaa, 0x59, 0xe0, 0x93, + 0x44, 0x97, 0x6b, 0x95, 0xf6, 0xd8, 0xe9, 0x72, 0x8d, 0xd1, 0x34, 0xde, 0xab, 0x6a, 0x3d, 0xa5, + 0xd5, 0x2e, 0xc5, 0x3c, 0x8b, 0x03, 0x1a, 0x32, 0x5b, 0x7c, 0xca, 0x29, 0x34, 0x06, 0xe0, 0xc7, + 0xc9, 0x8e, 0x8f, 0x70, 0x84, 0x03, 0xee, 0x90, 0xed, 0x16, 0xe1, 0x31, 0x72, 0xc0, 0xcb, 0xb9, + 0x59, 0xde, 0x64, 0x21, 0x27, 0xf0, 0x2d, 0x30, 0xd2, 0x14, 0x33, 0x65, 0xe3, 0x35, 0x63, 0xfa, + 0xb9, 0xf9, 0xb2, 0x75, 0xf2, 0xd9, 0x58, 0x12, 0xb1, 0x71, 0xe6, 0xd1, 0xc1, 0x64, 0xe9, 0xeb, + 0xbf, 0xbe, 0x9d, 0x31, 0x1c, 0x05, 0x41, 0x8b, 0xe0, 0x75, 0x71, 0xe6, 0x6d, 0xd6, 0x20, 0x21, + 0xfd, 0x94, 0xdc, 0xba, 0x8b, 0x23, 0xe2, 0x90, 0x1a, 0x8b, 0xbc, 0x8d, 0xdd, 0x9b, 0x9e, 0x0a, + 0x0d, 0x47, 0xc1, 0x10, 0xf5, 0xc4, 0xf9, 0xa7, 0x9c, 0x21, 0xea, 0xa1, 0x06, 0xb8, 0xd0, 0x1d, + 0xa6, 0xb8, 0x5d, 0x03, 0x23, 0x91, 0x98, 0x55, 0xdc, 0xa6, 0x8a, 0xdc, 0x3a, 0x1d, 0x71, 0x2a, + 0x21, 0xea, 0x28, 0x28, 0x7a, 0x1b, 0x5c, 0xd4, 0x07, 0xbb, 0x4e, 0x42, 0x16, 0xa4, 0x34, 0xc7, + 0xc0, 0xb0, 0x97, 0x8c, 0x45, 0xb4, 0x33, 0x8e, 0x1c, 0xa0, 0x10, 0x54, 0x7b, 0xe2, 0x9f, 0x26, + 0xdf, 0x35, 0x30, 0xa5, 0x8b, 0xc7, 0x3f, 0xdc, 0x09, 0x89, 0xd7, 0x46, 0x97, 0xed, 0x84, 0x24, + 0x4a, 0xe9, 0x8a, 0x01, 0x62, 0x7a, 0xb9, 0x29, 0x5c, 0xb1, 0x7d, 0x0f, 0x9c, 0x96, 0x21, 0x93, + 0xd4, 0x3f, 0x33, 0x28, 0xdd, 0x14, 0x8b, 0x02, 0x55, 0x03, 0xeb, 0xbe, 0xdf, 0x29, 0x66, 0xca, + 0xf6, 0x7d, 0x00, 0x8e, 0x1b, 0x49, 0x3d, 0x9f, 0x8b, 0x96, 0xac, 0x6c, 0x2b, 0xa9, 0x7c, 0x4b, + 0x56, 0xf5, 0x71, 0xd1, 0xd5, 0x89, 0xc2, 0x3a, 0x6d, 0x48, 0xf4, 0x9d, 0xa1, 0x8a, 0x47, 0x1b, + 0xef, 0xa9, 0xca, 0x83, 0x37, 0x72, 0xbc, 0x87, 0x04, 0xef, 0x6a, 0x4f, 0xde, 0x92, 0x43, 0x8e, + 0x78, 0x55, 0xe5, 0x75, 0x13, 0xf3, 0xb8, 0x43, 0xdc, 0xac, 0x5b, 0xd0, 0x8a, 0xca, 0x60, 0x97, + 0x8d, 0x4a, 0xe2, 0xc9, 0xbe, 0xaa, 0x66, 0xa5, 0x13, 0xe3, 0xfc, 0xc3, 0xf1, 0xd6, 0x39, 0x27, + 0x71, 0x76, 0x17, 0xb8, 0x59, 0x91, 0x68, 0x37, 0xaa, 0x10, 0x8b, 0x60, 0xf8, 0x1e, 0xf6, 0x5b, + 0x44, 0x65, 0x6c, 0x3c, 0xa7, 0x3c, 0xd5, 0x7c, 0x8d, 0xd1, 0x50, 0x3d, 0x37, 0xb9, 0x1b, 0x95, + 0xc1, 0xb9, 0xe3, 0x00, 0x9b, 0x74, 0xbb, 0x45, 0xbd, 0x5b, 0x31, 0x6e, 0x10, 0x0f, 0xad, 0x80, + 0x4a, 0xe7, 0x95, 0x2c, 0xe4, 0x39, 0x30, 0x12, 0x27, 0x94, 0xb8, 0x2a, 0x6c, 0x35, 0x42, 0x4b, + 0xc0, 0x2c, 0x56, 0xf6, 0x26, 0xab, 0x35, 0x6e, 0x86, 0x77, 0x18, 0x2c, 0x83, 0xd3, 0xd8, 0xf3, + 0x22, 0xc2, 0x53, 0x58, 0x3a, 0x44, 0x04, 0x20, 0x3d, 0xae, 0x3d, 0x2a, 0x8f, 0x71, 0xdc, 0xca, + 0xa2, 0xca, 0x11, 0xac, 0x82, 0x17, 0xc9, 0xfd, 0x26, 0x8d, 0x44, 0x12, 0xdd, 0x98, 0x06, 0x44, + 0x14, 0xc1, 0x19, 0x67, 0xf4, 0x78, 0xfa, 0x36, 0x0d, 0x08, 0x6a, 0xea, 0xfb, 0xd6, 0x21, 0x3b, + 0x38, 0xca, 0xfa, 0x76, 0x0d, 0xbc, 0x20, 0x5a, 0xd5, 0xcd, 0xf1, 0xdd, 0x28, 0xff, 0x73, 0x30, + 0x39, 0xb6, 0x8b, 0x03, 0x7f, 0x15, 0xe5, 0x96, 0x91, 0xf3, 0xbc, 0x18, 0xaf, 0xcb, 0xe1, 0xea, + 0xb3, 0x0f, 0x1e, 0x4e, 0x96, 0xfe, 0x7e, 0x38, 0x59, 0x42, 0x07, 0x86, 0xbe, 0xd7, 0xd3, 0x90, + 0x4a, 0xdd, 0x07, 0x49, 0x33, 0x24, 0x33, 0x69, 0x33, 0x5c, 0xea, 0xab, 0x19, 0xe4, 0x29, 0xc7, + 0x2d, 0x21, 0x4e, 0x80, 0x75, 0x30, 0x1c, 0x27, 0xd9, 0x2b, 0x0f, 0x89, 0xa3, 0x26, 0x3a, 0xd6, + 0xc4, 0x75, 0x52, 0x13, 0x65, 0x71, 0x25, 0xc1, 0x7e, 0xf3, 0xfb, 0xe4, 0xa5, 0x3a, 0x8d, 0xef, + 0xb6, 0xb6, 0xac, 0x1a, 0x0b, 0x6c, 0xe5, 0x67, 0xf2, 0xeb, 0x32, 0xf7, 0x1a, 0x76, 0xbc, 0xdb, + 0x24, 0x3c, 0xc5, 0x70, 0x47, 0x9e, 0x3f, 0xff, 0xef, 0x28, 0x18, 0x16, 0x02, 0xe1, 0x8f, 0x06, + 0x38, 0xaf, 0x71, 0x0b, 0xb8, 0x58, 0x94, 0xd2, 0x87, 0x29, 0x99, 0x4b, 0x83, 0xc2, 0xe4, 0xa3, + 0x44, 0xab, 0x9f, 0x3f, 0xf9, 0xf3, 0xcb, 0xa1, 0x05, 0x38, 0x6f, 0x17, 0x2c, 0x3d, 0x56, 0x50, + 0x97, 0x27, 0x58, 0x57, 0x5e, 0x21, 0xee, 0xd6, 0xae, 0x4b, 0x3d, 0x7b, 0x8f, 0x7a, 0xfb, 0xf0, + 0x89, 0x01, 0x4c, 0xbd, 0x8f, 0xc0, 0x95, 0x41, 0x28, 0xb5, 0x5b, 0x97, 0xf9, 0xe6, 0xff, 0x40, + 0x2a, 0x3d, 0xef, 0x0a, 0x3d, 0xab, 0x70, 0x65, 0x00, 0x3d, 0xc2, 0x19, 0xed, 0x3d, 0xf1, 0xb5, + 0x0f, 0x7f, 0x31, 0xc0, 0xb8, 0xd6, 0x6e, 0xe0, 0x72, 0xff, 0xd4, 0x72, 0xfe, 0x66, 0xae, 0x0c, + 0x0e, 0x54, 0x92, 0xd6, 0x84, 0xa4, 0x65, 0xb8, 0xd8, 0xaf, 0xa4, 0xa4, 0xc1, 0x3c, 0x7b, 0x4f, + 0xf4, 0xd9, 0x3e, 0xfc, 0xc1, 0x00, 0xe7, 0x35, 0xee, 0xa2, 0x2d, 0xb6, 0xee, 0xee, 0xa7, 0x2d, + 0xb6, 0x1e, 0x26, 0x86, 0x66, 0x85, 0x92, 0x19, 0x38, 0xdd, 0xa7, 0x12, 0x0e, 0x7f, 0x36, 0xc0, + 0xb8, 0xd6, 0x39, 0xb4, 0xc9, 0xe8, 0x65, 0x4a, 0xda, 0x64, 0xf4, 0x34, 0x29, 0xb4, 0x2c, 0x24, + 0xcc, 0x41, 0xbb, 0x28, 0xc1, 0xc7, 0x3c, 0x76, 0x3b, 0x67, 0x84, 0x7a, 0xf0, 0x27, 0x51, 0x56, + 0x1a, 0x83, 0xea, 0x52, 0x56, 0xdd, 0xbd, 0xaf, 0x4b, 0x59, 0xf5, 0xf0, 0x42, 0xb4, 0x24, 0x94, + 0xcc, 0x42, 0xab, 0x53, 0x32, 0x62, 0xec, 0x9f, 0x90, 0xe2, 0xb9, 0x58, 0x52, 0xfd, 0xca, 0x00, + 0x67, 0x0b, 0x76, 0x07, 0xa7, 0xbb, 0xf1, 0x68, 0xdf, 0x69, 0xce, 0xf6, 0xbb, 0x33, 0x63, 0x7a, + 0x59, 0x30, 0xad, 0xc2, 0x29, 0x1d, 0x53, 0x5f, 0xa0, 0x5c, 0x2e, 0xa9, 0x7c, 0x6f, 0x80, 0x57, + 0x3a, 0xbb, 0xea, 0x1b, 0xfd, 0xf4, 0x60, 0xba, 0xdb, 0x5c, 0x18, 0x64, 0x77, 0x46, 0xf6, 0xaa, + 0x20, 0xbb, 0x04, 0x17, 0x7a, 0xd6, 0xb8, 0xcf, 0x6a, 0x0d, 0x97, 0x86, 0x77, 0x98, 0xbd, 0xa7, + 0xcc, 0x71, 0x1f, 0x7e, 0x06, 0x46, 0xe4, 0xfb, 0x09, 0xbc, 0xa0, 0x89, 0x9e, 0x7b, 0x0d, 0x32, + 0xa7, 0x7a, 0xec, 0x52, 0xa4, 0xa6, 0x1e, 0x24, 0x2f, 0x3a, 0x82, 0x99, 0x09, 0xcb, 0x45, 0x66, + 0xf2, 0x05, 0x08, 0xfe, 0xda, 0xf9, 0xea, 0x93, 0xbe, 0x39, 0xc8, 0xd5, 0x97, 0xfb, 0x8b, 0x30, + 0xc8, 0xd5, 0x97, 0x37, 0x7a, 0x74, 0x43, 0x50, 0x5e, 0x87, 0xef, 0x14, 0x29, 0xef, 0xe5, 0xfe, + 0x56, 0xec, 0x6b, 0xae, 0x42, 0x65, 0xf2, 0x1b, 0x57, 0x1f, 0x1d, 0x56, 0x8c, 0xc7, 0x87, 0x15, + 0xe3, 0x8f, 0xc3, 0x8a, 0xf1, 0xc5, 0x51, 0xa5, 0xf4, 0xf8, 0xa8, 0x52, 0xfa, 0xed, 0xa8, 0x52, + 0xfa, 0x04, 0x15, 0x9d, 0x5c, 0xc4, 0xba, 0x2f, 0xa2, 0x09, 0x27, 0xdf, 0x1a, 0x11, 0x6f, 0xa2, + 0x57, 0xfe, 0x0b, 0x00, 0x00, 0xff, 0xff, 0x09, 0xe3, 0xab, 0xb7, 0x87, 0x0f, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// QueryClient is the client API for Query service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type QueryClient interface { + // Query for individual tokenize share record information by share by id + TokenizeShareRecordById(ctx context.Context, in *QueryTokenizeShareRecordByIdRequest, opts ...grpc.CallOption) (*QueryTokenizeShareRecordByIdResponse, error) + // Query for individual tokenize share record information by share denom + TokenizeShareRecordByDenom(ctx context.Context, in *QueryTokenizeShareRecordByDenomRequest, opts ...grpc.CallOption) (*QueryTokenizeShareRecordByDenomResponse, error) + // Query tokenize share records by address + TokenizeShareRecordsOwned(ctx context.Context, in *QueryTokenizeShareRecordsOwnedRequest, opts ...grpc.CallOption) (*QueryTokenizeShareRecordsOwnedResponse, error) + // Query for all tokenize share records + AllTokenizeShareRecords(ctx context.Context, in *QueryAllTokenizeShareRecordsRequest, opts ...grpc.CallOption) (*QueryAllTokenizeShareRecordsResponse, error) + // Query for last tokenize share record id + LastTokenizeShareRecordId(ctx context.Context, in *QueryLastTokenizeShareRecordIdRequest, opts ...grpc.CallOption) (*QueryLastTokenizeShareRecordIdResponse, error) + // Query for total tokenized staked assets + TotalTokenizeSharedAssets(ctx context.Context, in *QueryTotalTokenizeSharedAssetsRequest, opts ...grpc.CallOption) (*QueryTotalTokenizeSharedAssetsResponse, error) + // Query for total liquid staked (including tokenized shares or owned by an + // liquid staking provider) + TotalLiquidStaked(ctx context.Context, in *QueryTotalLiquidStaked, opts ...grpc.CallOption) (*QueryTotalLiquidStakedResponse, error) + // Query tokenize share locks + TokenizeShareLockInfo(ctx context.Context, in *QueryTokenizeShareLockInfo, opts ...grpc.CallOption) (*QueryTokenizeShareLockInfoResponse, error) + // Parameters queries the lsm parameters. + Params(ctx context.Context, in *QueryParamsRequest, opts ...grpc.CallOption) (*QueryParamsResponse, error) + // TokenizeShareRecordReward queries the tokenize share record rewards + TokenizeShareRecordReward(ctx context.Context, in *QueryTokenizeShareRecordRewardRequest, opts ...grpc.CallOption) (*QueryTokenizeShareRecordRewardResponse, error) +} + +type queryClient struct { + cc grpc1.ClientConn +} + +func NewQueryClient(cc grpc1.ClientConn) QueryClient { + return &queryClient{cc} +} + +func (c *queryClient) TokenizeShareRecordById(ctx context.Context, in *QueryTokenizeShareRecordByIdRequest, opts ...grpc.CallOption) (*QueryTokenizeShareRecordByIdResponse, error) { + out := new(QueryTokenizeShareRecordByIdResponse) + err := c.cc.Invoke(ctx, "/gaia.lsm.v1beta1.Query/TokenizeShareRecordById", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *queryClient) TokenizeShareRecordByDenom(ctx context.Context, in *QueryTokenizeShareRecordByDenomRequest, opts ...grpc.CallOption) (*QueryTokenizeShareRecordByDenomResponse, error) { + out := new(QueryTokenizeShareRecordByDenomResponse) + err := c.cc.Invoke(ctx, "/gaia.lsm.v1beta1.Query/TokenizeShareRecordByDenom", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *queryClient) TokenizeShareRecordsOwned(ctx context.Context, in *QueryTokenizeShareRecordsOwnedRequest, opts ...grpc.CallOption) (*QueryTokenizeShareRecordsOwnedResponse, error) { + out := new(QueryTokenizeShareRecordsOwnedResponse) + err := c.cc.Invoke(ctx, "/gaia.lsm.v1beta1.Query/TokenizeShareRecordsOwned", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *queryClient) AllTokenizeShareRecords(ctx context.Context, in *QueryAllTokenizeShareRecordsRequest, opts ...grpc.CallOption) (*QueryAllTokenizeShareRecordsResponse, error) { + out := new(QueryAllTokenizeShareRecordsResponse) + err := c.cc.Invoke(ctx, "/gaia.lsm.v1beta1.Query/AllTokenizeShareRecords", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *queryClient) LastTokenizeShareRecordId(ctx context.Context, in *QueryLastTokenizeShareRecordIdRequest, opts ...grpc.CallOption) (*QueryLastTokenizeShareRecordIdResponse, error) { + out := new(QueryLastTokenizeShareRecordIdResponse) + err := c.cc.Invoke(ctx, "/gaia.lsm.v1beta1.Query/LastTokenizeShareRecordId", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *queryClient) TotalTokenizeSharedAssets(ctx context.Context, in *QueryTotalTokenizeSharedAssetsRequest, opts ...grpc.CallOption) (*QueryTotalTokenizeSharedAssetsResponse, error) { + out := new(QueryTotalTokenizeSharedAssetsResponse) + err := c.cc.Invoke(ctx, "/gaia.lsm.v1beta1.Query/TotalTokenizeSharedAssets", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *queryClient) TotalLiquidStaked(ctx context.Context, in *QueryTotalLiquidStaked, opts ...grpc.CallOption) (*QueryTotalLiquidStakedResponse, error) { + out := new(QueryTotalLiquidStakedResponse) + err := c.cc.Invoke(ctx, "/gaia.lsm.v1beta1.Query/TotalLiquidStaked", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *queryClient) TokenizeShareLockInfo(ctx context.Context, in *QueryTokenizeShareLockInfo, opts ...grpc.CallOption) (*QueryTokenizeShareLockInfoResponse, error) { + out := new(QueryTokenizeShareLockInfoResponse) + err := c.cc.Invoke(ctx, "/gaia.lsm.v1beta1.Query/TokenizeShareLockInfo", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *queryClient) Params(ctx context.Context, in *QueryParamsRequest, opts ...grpc.CallOption) (*QueryParamsResponse, error) { + out := new(QueryParamsResponse) + err := c.cc.Invoke(ctx, "/gaia.lsm.v1beta1.Query/Params", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *queryClient) TokenizeShareRecordReward(ctx context.Context, in *QueryTokenizeShareRecordRewardRequest, opts ...grpc.CallOption) (*QueryTokenizeShareRecordRewardResponse, error) { + out := new(QueryTokenizeShareRecordRewardResponse) + err := c.cc.Invoke(ctx, "/gaia.lsm.v1beta1.Query/TokenizeShareRecordReward", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// QueryServer is the server API for Query service. +type QueryServer interface { + // Query for individual tokenize share record information by share by id + TokenizeShareRecordById(context.Context, *QueryTokenizeShareRecordByIdRequest) (*QueryTokenizeShareRecordByIdResponse, error) + // Query for individual tokenize share record information by share denom + TokenizeShareRecordByDenom(context.Context, *QueryTokenizeShareRecordByDenomRequest) (*QueryTokenizeShareRecordByDenomResponse, error) + // Query tokenize share records by address + TokenizeShareRecordsOwned(context.Context, *QueryTokenizeShareRecordsOwnedRequest) (*QueryTokenizeShareRecordsOwnedResponse, error) + // Query for all tokenize share records + AllTokenizeShareRecords(context.Context, *QueryAllTokenizeShareRecordsRequest) (*QueryAllTokenizeShareRecordsResponse, error) + // Query for last tokenize share record id + LastTokenizeShareRecordId(context.Context, *QueryLastTokenizeShareRecordIdRequest) (*QueryLastTokenizeShareRecordIdResponse, error) + // Query for total tokenized staked assets + TotalTokenizeSharedAssets(context.Context, *QueryTotalTokenizeSharedAssetsRequest) (*QueryTotalTokenizeSharedAssetsResponse, error) + // Query for total liquid staked (including tokenized shares or owned by an + // liquid staking provider) + TotalLiquidStaked(context.Context, *QueryTotalLiquidStaked) (*QueryTotalLiquidStakedResponse, error) + // Query tokenize share locks + TokenizeShareLockInfo(context.Context, *QueryTokenizeShareLockInfo) (*QueryTokenizeShareLockInfoResponse, error) + // Parameters queries the lsm parameters. + Params(context.Context, *QueryParamsRequest) (*QueryParamsResponse, error) + // TokenizeShareRecordReward queries the tokenize share record rewards + TokenizeShareRecordReward(context.Context, *QueryTokenizeShareRecordRewardRequest) (*QueryTokenizeShareRecordRewardResponse, error) +} + +// UnimplementedQueryServer can be embedded to have forward compatible implementations. +type UnimplementedQueryServer struct { +} + +func (*UnimplementedQueryServer) TokenizeShareRecordById(ctx context.Context, req *QueryTokenizeShareRecordByIdRequest) (*QueryTokenizeShareRecordByIdResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method TokenizeShareRecordById not implemented") +} +func (*UnimplementedQueryServer) TokenizeShareRecordByDenom(ctx context.Context, req *QueryTokenizeShareRecordByDenomRequest) (*QueryTokenizeShareRecordByDenomResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method TokenizeShareRecordByDenom not implemented") +} +func (*UnimplementedQueryServer) TokenizeShareRecordsOwned(ctx context.Context, req *QueryTokenizeShareRecordsOwnedRequest) (*QueryTokenizeShareRecordsOwnedResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method TokenizeShareRecordsOwned not implemented") +} +func (*UnimplementedQueryServer) AllTokenizeShareRecords(ctx context.Context, req *QueryAllTokenizeShareRecordsRequest) (*QueryAllTokenizeShareRecordsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method AllTokenizeShareRecords not implemented") +} +func (*UnimplementedQueryServer) LastTokenizeShareRecordId(ctx context.Context, req *QueryLastTokenizeShareRecordIdRequest) (*QueryLastTokenizeShareRecordIdResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method LastTokenizeShareRecordId not implemented") +} +func (*UnimplementedQueryServer) TotalTokenizeSharedAssets(ctx context.Context, req *QueryTotalTokenizeSharedAssetsRequest) (*QueryTotalTokenizeSharedAssetsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method TotalTokenizeSharedAssets not implemented") +} +func (*UnimplementedQueryServer) TotalLiquidStaked(ctx context.Context, req *QueryTotalLiquidStaked) (*QueryTotalLiquidStakedResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method TotalLiquidStaked not implemented") +} +func (*UnimplementedQueryServer) TokenizeShareLockInfo(ctx context.Context, req *QueryTokenizeShareLockInfo) (*QueryTokenizeShareLockInfoResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method TokenizeShareLockInfo not implemented") +} +func (*UnimplementedQueryServer) Params(ctx context.Context, req *QueryParamsRequest) (*QueryParamsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Params not implemented") +} +func (*UnimplementedQueryServer) TokenizeShareRecordReward(ctx context.Context, req *QueryTokenizeShareRecordRewardRequest) (*QueryTokenizeShareRecordRewardResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method TokenizeShareRecordReward not implemented") +} + +func RegisterQueryServer(s grpc1.Server, srv QueryServer) { + s.RegisterService(&_Query_serviceDesc, srv) +} + +func _Query_TokenizeShareRecordById_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryTokenizeShareRecordByIdRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).TokenizeShareRecordById(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/gaia.lsm.v1beta1.Query/TokenizeShareRecordById", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).TokenizeShareRecordById(ctx, req.(*QueryTokenizeShareRecordByIdRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Query_TokenizeShareRecordByDenom_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryTokenizeShareRecordByDenomRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).TokenizeShareRecordByDenom(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/gaia.lsm.v1beta1.Query/TokenizeShareRecordByDenom", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).TokenizeShareRecordByDenom(ctx, req.(*QueryTokenizeShareRecordByDenomRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Query_TokenizeShareRecordsOwned_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryTokenizeShareRecordsOwnedRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).TokenizeShareRecordsOwned(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/gaia.lsm.v1beta1.Query/TokenizeShareRecordsOwned", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).TokenizeShareRecordsOwned(ctx, req.(*QueryTokenizeShareRecordsOwnedRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Query_AllTokenizeShareRecords_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryAllTokenizeShareRecordsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).AllTokenizeShareRecords(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/gaia.lsm.v1beta1.Query/AllTokenizeShareRecords", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).AllTokenizeShareRecords(ctx, req.(*QueryAllTokenizeShareRecordsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Query_LastTokenizeShareRecordId_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryLastTokenizeShareRecordIdRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).LastTokenizeShareRecordId(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/gaia.lsm.v1beta1.Query/LastTokenizeShareRecordId", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).LastTokenizeShareRecordId(ctx, req.(*QueryLastTokenizeShareRecordIdRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Query_TotalTokenizeSharedAssets_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryTotalTokenizeSharedAssetsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).TotalTokenizeSharedAssets(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/gaia.lsm.v1beta1.Query/TotalTokenizeSharedAssets", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).TotalTokenizeSharedAssets(ctx, req.(*QueryTotalTokenizeSharedAssetsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Query_TotalLiquidStaked_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryTotalLiquidStaked) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).TotalLiquidStaked(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/gaia.lsm.v1beta1.Query/TotalLiquidStaked", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).TotalLiquidStaked(ctx, req.(*QueryTotalLiquidStaked)) + } + return interceptor(ctx, in, info, handler) +} + +func _Query_TokenizeShareLockInfo_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryTokenizeShareLockInfo) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).TokenizeShareLockInfo(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/gaia.lsm.v1beta1.Query/TokenizeShareLockInfo", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).TokenizeShareLockInfo(ctx, req.(*QueryTokenizeShareLockInfo)) + } + return interceptor(ctx, in, info, handler) +} + +func _Query_Params_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryParamsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).Params(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/gaia.lsm.v1beta1.Query/Params", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).Params(ctx, req.(*QueryParamsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Query_TokenizeShareRecordReward_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryTokenizeShareRecordRewardRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).TokenizeShareRecordReward(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/gaia.lsm.v1beta1.Query/TokenizeShareRecordReward", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).TokenizeShareRecordReward(ctx, req.(*QueryTokenizeShareRecordRewardRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Query_serviceDesc = grpc.ServiceDesc{ + ServiceName: "gaia.lsm.v1beta1.Query", + HandlerType: (*QueryServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "TokenizeShareRecordById", + Handler: _Query_TokenizeShareRecordById_Handler, + }, + { + MethodName: "TokenizeShareRecordByDenom", + Handler: _Query_TokenizeShareRecordByDenom_Handler, + }, + { + MethodName: "TokenizeShareRecordsOwned", + Handler: _Query_TokenizeShareRecordsOwned_Handler, + }, + { + MethodName: "AllTokenizeShareRecords", + Handler: _Query_AllTokenizeShareRecords_Handler, + }, + { + MethodName: "LastTokenizeShareRecordId", + Handler: _Query_LastTokenizeShareRecordId_Handler, + }, + { + MethodName: "TotalTokenizeSharedAssets", + Handler: _Query_TotalTokenizeSharedAssets_Handler, + }, + { + MethodName: "TotalLiquidStaked", + Handler: _Query_TotalLiquidStaked_Handler, + }, + { + MethodName: "TokenizeShareLockInfo", + Handler: _Query_TokenizeShareLockInfo_Handler, + }, + { + MethodName: "Params", + Handler: _Query_Params_Handler, + }, + { + MethodName: "TokenizeShareRecordReward", + Handler: _Query_TokenizeShareRecordReward_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "gaia/lsm/v1beta1/query.proto", +} + +func (m *QueryParamsRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryParamsRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryParamsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *QueryParamsResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryParamsResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryParamsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Params.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *QueryTokenizeShareRecordByIdRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryTokenizeShareRecordByIdRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryTokenizeShareRecordByIdRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Id != 0 { + i = encodeVarintQuery(dAtA, i, uint64(m.Id)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *QueryTokenizeShareRecordByIdResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryTokenizeShareRecordByIdResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryTokenizeShareRecordByIdResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Record.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *QueryTokenizeShareRecordByDenomRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryTokenizeShareRecordByDenomRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryTokenizeShareRecordByDenomRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Denom) > 0 { + i -= len(m.Denom) + copy(dAtA[i:], m.Denom) + i = encodeVarintQuery(dAtA, i, uint64(len(m.Denom))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *QueryTokenizeShareRecordByDenomResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryTokenizeShareRecordByDenomResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryTokenizeShareRecordByDenomResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Record.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *QueryTokenizeShareRecordsOwnedRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryTokenizeShareRecordsOwnedRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryTokenizeShareRecordsOwnedRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Owner) > 0 { + i -= len(m.Owner) + copy(dAtA[i:], m.Owner) + i = encodeVarintQuery(dAtA, i, uint64(len(m.Owner))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *QueryTokenizeShareRecordsOwnedResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryTokenizeShareRecordsOwnedResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryTokenizeShareRecordsOwnedResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Records) > 0 { + for iNdEx := len(m.Records) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Records[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *QueryAllTokenizeShareRecordsRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryAllTokenizeShareRecordsRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryAllTokenizeShareRecordsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Pagination != nil { + { + size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *QueryAllTokenizeShareRecordsResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryAllTokenizeShareRecordsResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryAllTokenizeShareRecordsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Pagination != nil { + { + size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.Records) > 0 { + for iNdEx := len(m.Records) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Records[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *QueryLastTokenizeShareRecordIdRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryLastTokenizeShareRecordIdRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryLastTokenizeShareRecordIdRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *QueryLastTokenizeShareRecordIdResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryLastTokenizeShareRecordIdResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryLastTokenizeShareRecordIdResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Id != 0 { + i = encodeVarintQuery(dAtA, i, uint64(m.Id)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *QueryTotalTokenizeSharedAssetsRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryTotalTokenizeSharedAssetsRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryTotalTokenizeSharedAssetsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *QueryTotalTokenizeSharedAssetsResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryTotalTokenizeSharedAssetsResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryTotalTokenizeSharedAssetsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Value.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *QueryTotalLiquidStaked) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryTotalLiquidStaked) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryTotalLiquidStaked) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *QueryTotalLiquidStakedResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryTotalLiquidStakedResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryTotalLiquidStakedResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Tokens) > 0 { + i -= len(m.Tokens) + copy(dAtA[i:], m.Tokens) + i = encodeVarintQuery(dAtA, i, uint64(len(m.Tokens))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *QueryTokenizeShareLockInfo) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryTokenizeShareLockInfo) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryTokenizeShareLockInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Address) > 0 { + i -= len(m.Address) + copy(dAtA[i:], m.Address) + i = encodeVarintQuery(dAtA, i, uint64(len(m.Address))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *QueryTokenizeShareLockInfoResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryTokenizeShareLockInfoResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryTokenizeShareLockInfoResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ExpirationTime) > 0 { + i -= len(m.ExpirationTime) + copy(dAtA[i:], m.ExpirationTime) + i = encodeVarintQuery(dAtA, i, uint64(len(m.ExpirationTime))) + i-- + dAtA[i] = 0x12 + } + if len(m.Status) > 0 { + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintQuery(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *QueryTokenizeShareRecordRewardRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryTokenizeShareRecordRewardRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryTokenizeShareRecordRewardRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.OwnerAddress) > 0 { + i -= len(m.OwnerAddress) + copy(dAtA[i:], m.OwnerAddress) + i = encodeVarintQuery(dAtA, i, uint64(len(m.OwnerAddress))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *QueryTokenizeShareRecordRewardResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryTokenizeShareRecordRewardResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryTokenizeShareRecordRewardResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Total) > 0 { + for iNdEx := len(m.Total) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Total[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.Rewards) > 0 { + for iNdEx := len(m.Rewards) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Rewards[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func encodeVarintQuery(dAtA []byte, offset int, v uint64) int { + offset -= sovQuery(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *QueryParamsRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *QueryParamsResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Params.Size() + n += 1 + l + sovQuery(uint64(l)) + return n +} + +func (m *QueryTokenizeShareRecordByIdRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Id != 0 { + n += 1 + sovQuery(uint64(m.Id)) + } + return n +} + +func (m *QueryTokenizeShareRecordByIdResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Record.Size() + n += 1 + l + sovQuery(uint64(l)) + return n +} + +func (m *QueryTokenizeShareRecordByDenomRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Denom) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func (m *QueryTokenizeShareRecordByDenomResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Record.Size() + n += 1 + l + sovQuery(uint64(l)) + return n +} + +func (m *QueryTokenizeShareRecordsOwnedRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Owner) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func (m *QueryTokenizeShareRecordsOwnedResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Records) > 0 { + for _, e := range m.Records { + l = e.Size() + n += 1 + l + sovQuery(uint64(l)) + } + } + return n +} + +func (m *QueryAllTokenizeShareRecordsRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Pagination != nil { + l = m.Pagination.Size() + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func (m *QueryAllTokenizeShareRecordsResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Records) > 0 { + for _, e := range m.Records { + l = e.Size() + n += 1 + l + sovQuery(uint64(l)) + } + } + if m.Pagination != nil { + l = m.Pagination.Size() + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func (m *QueryLastTokenizeShareRecordIdRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *QueryLastTokenizeShareRecordIdResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Id != 0 { + n += 1 + sovQuery(uint64(m.Id)) + } + return n +} + +func (m *QueryTotalTokenizeSharedAssetsRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *QueryTotalTokenizeSharedAssetsResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Value.Size() + n += 1 + l + sovQuery(uint64(l)) + return n +} + +func (m *QueryTotalLiquidStaked) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *QueryTotalLiquidStakedResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Tokens) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func (m *QueryTokenizeShareLockInfo) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Address) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func (m *QueryTokenizeShareLockInfoResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Status) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + l = len(m.ExpirationTime) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func (m *QueryTokenizeShareRecordRewardRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.OwnerAddress) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func (m *QueryTokenizeShareRecordRewardResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Rewards) > 0 { + for _, e := range m.Rewards { + l = e.Size() + n += 1 + l + sovQuery(uint64(l)) + } + } + if len(m.Total) > 0 { + for _, e := range m.Total { + l = e.Size() + n += 1 + l + sovQuery(uint64(l)) + } + } + return n +} + +func sovQuery(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozQuery(x uint64) (n int) { + return sovQuery(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *QueryParamsRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryParamsRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryParamsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryParamsResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryParamsResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryParamsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Params", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Params.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryTokenizeShareRecordByIdRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryTokenizeShareRecordByIdRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryTokenizeShareRecordByIdRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) + } + m.Id = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Id |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryTokenizeShareRecordByIdResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryTokenizeShareRecordByIdResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryTokenizeShareRecordByIdResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Record", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Record.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryTokenizeShareRecordByDenomRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryTokenizeShareRecordByDenomRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryTokenizeShareRecordByDenomRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Denom", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Denom = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryTokenizeShareRecordByDenomResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryTokenizeShareRecordByDenomResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryTokenizeShareRecordByDenomResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Record", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Record.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryTokenizeShareRecordsOwnedRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryTokenizeShareRecordsOwnedRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryTokenizeShareRecordsOwnedRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Owner", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Owner = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryTokenizeShareRecordsOwnedResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryTokenizeShareRecordsOwnedResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryTokenizeShareRecordsOwnedResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Records", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Records = append(m.Records, TokenizeShareRecord{}) + if err := m.Records[len(m.Records)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryAllTokenizeShareRecordsRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryAllTokenizeShareRecordsRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryAllTokenizeShareRecordsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Pagination == nil { + m.Pagination = &query.PageRequest{} + } + if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryAllTokenizeShareRecordsResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryAllTokenizeShareRecordsResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryAllTokenizeShareRecordsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Records", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Records = append(m.Records, TokenizeShareRecord{}) + if err := m.Records[len(m.Records)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Pagination == nil { + m.Pagination = &query.PageResponse{} + } + if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryLastTokenizeShareRecordIdRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryLastTokenizeShareRecordIdRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryLastTokenizeShareRecordIdRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryLastTokenizeShareRecordIdResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryLastTokenizeShareRecordIdResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryLastTokenizeShareRecordIdResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) + } + m.Id = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Id |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryTotalTokenizeSharedAssetsRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryTotalTokenizeSharedAssetsRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryTotalTokenizeSharedAssetsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryTotalTokenizeSharedAssetsResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryTotalTokenizeSharedAssetsResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryTotalTokenizeSharedAssetsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Value.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryTotalLiquidStaked) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryTotalLiquidStaked: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryTotalLiquidStaked: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryTotalLiquidStakedResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryTotalLiquidStakedResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryTotalLiquidStakedResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tokens", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Tokens = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryTokenizeShareLockInfo) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryTokenizeShareLockInfo: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryTokenizeShareLockInfo: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Address", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Address = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryTokenizeShareLockInfoResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryTokenizeShareLockInfoResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryTokenizeShareLockInfoResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExpirationTime", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ExpirationTime = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryTokenizeShareRecordRewardRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryTokenizeShareRecordRewardRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryTokenizeShareRecordRewardRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field OwnerAddress", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.OwnerAddress = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryTokenizeShareRecordRewardResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryTokenizeShareRecordRewardResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryTokenizeShareRecordRewardResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Rewards", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Rewards = append(m.Rewards, TokenizeShareRecordReward{}) + if err := m.Rewards[len(m.Rewards)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Total", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Total = append(m.Total, types.DecCoin{}) + if err := m.Total[len(m.Total)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipQuery(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowQuery + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowQuery + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowQuery + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthQuery + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupQuery + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthQuery + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthQuery = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowQuery = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupQuery = fmt.Errorf("proto: unexpected end of group") +) diff --git a/x/lsm/types/query.pb.gw.go b/x/lsm/types/query.pb.gw.go new file mode 100644 index 00000000000..7b4c7e43eca --- /dev/null +++ b/x/lsm/types/query.pb.gw.go @@ -0,0 +1,936 @@ +// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT. +// source: gaia/lsm/v1beta1/query.proto + +/* +Package types is a reverse proxy. + +It translates gRPC into RESTful JSON APIs. +*/ +package types + +import ( + "context" + "io" + "net/http" + + "github.com/golang/protobuf/descriptor" + "github.com/golang/protobuf/proto" + "github.com/grpc-ecosystem/grpc-gateway/runtime" + "github.com/grpc-ecosystem/grpc-gateway/utilities" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" +) + +// Suppress "imported and not used" errors +var _ codes.Code +var _ io.Reader +var _ status.Status +var _ = runtime.String +var _ = utilities.NewDoubleArray +var _ = descriptor.ForMessage +var _ = metadata.Join + +func request_Query_TokenizeShareRecordById_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryTokenizeShareRecordByIdRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "id") + } + + protoReq.Id, err = runtime.Uint64(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "id", err) + } + + msg, err := client.TokenizeShareRecordById(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Query_TokenizeShareRecordById_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryTokenizeShareRecordByIdRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "id") + } + + protoReq.Id, err = runtime.Uint64(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "id", err) + } + + msg, err := server.TokenizeShareRecordById(ctx, &protoReq) + return msg, metadata, err + +} + +func request_Query_TokenizeShareRecordByDenom_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryTokenizeShareRecordByDenomRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["denom"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "denom") + } + + protoReq.Denom, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "denom", err) + } + + msg, err := client.TokenizeShareRecordByDenom(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Query_TokenizeShareRecordByDenom_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryTokenizeShareRecordByDenomRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["denom"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "denom") + } + + protoReq.Denom, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "denom", err) + } + + msg, err := server.TokenizeShareRecordByDenom(ctx, &protoReq) + return msg, metadata, err + +} + +func request_Query_TokenizeShareRecordsOwned_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryTokenizeShareRecordsOwnedRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["owner"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "owner") + } + + protoReq.Owner, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "owner", err) + } + + msg, err := client.TokenizeShareRecordsOwned(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Query_TokenizeShareRecordsOwned_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryTokenizeShareRecordsOwnedRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["owner"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "owner") + } + + protoReq.Owner, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "owner", err) + } + + msg, err := server.TokenizeShareRecordsOwned(ctx, &protoReq) + return msg, metadata, err + +} + +var ( + filter_Query_AllTokenizeShareRecords_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} +) + +func request_Query_AllTokenizeShareRecords_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryAllTokenizeShareRecordsRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_AllTokenizeShareRecords_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.AllTokenizeShareRecords(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Query_AllTokenizeShareRecords_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryAllTokenizeShareRecordsRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_AllTokenizeShareRecords_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.AllTokenizeShareRecords(ctx, &protoReq) + return msg, metadata, err + +} + +func request_Query_LastTokenizeShareRecordId_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryLastTokenizeShareRecordIdRequest + var metadata runtime.ServerMetadata + + msg, err := client.LastTokenizeShareRecordId(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Query_LastTokenizeShareRecordId_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryLastTokenizeShareRecordIdRequest + var metadata runtime.ServerMetadata + + msg, err := server.LastTokenizeShareRecordId(ctx, &protoReq) + return msg, metadata, err + +} + +func request_Query_TotalTokenizeSharedAssets_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryTotalTokenizeSharedAssetsRequest + var metadata runtime.ServerMetadata + + msg, err := client.TotalTokenizeSharedAssets(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Query_TotalTokenizeSharedAssets_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryTotalTokenizeSharedAssetsRequest + var metadata runtime.ServerMetadata + + msg, err := server.TotalTokenizeSharedAssets(ctx, &protoReq) + return msg, metadata, err + +} + +func request_Query_TotalLiquidStaked_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryTotalLiquidStaked + var metadata runtime.ServerMetadata + + msg, err := client.TotalLiquidStaked(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Query_TotalLiquidStaked_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryTotalLiquidStaked + var metadata runtime.ServerMetadata + + msg, err := server.TotalLiquidStaked(ctx, &protoReq) + return msg, metadata, err + +} + +func request_Query_TokenizeShareLockInfo_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryTokenizeShareLockInfo + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["address"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "address") + } + + protoReq.Address, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "address", err) + } + + msg, err := client.TokenizeShareLockInfo(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Query_TokenizeShareLockInfo_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryTokenizeShareLockInfo + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["address"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "address") + } + + protoReq.Address, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "address", err) + } + + msg, err := server.TokenizeShareLockInfo(ctx, &protoReq) + return msg, metadata, err + +} + +func request_Query_Params_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryParamsRequest + var metadata runtime.ServerMetadata + + msg, err := client.Params(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Query_Params_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryParamsRequest + var metadata runtime.ServerMetadata + + msg, err := server.Params(ctx, &protoReq) + return msg, metadata, err + +} + +func request_Query_TokenizeShareRecordReward_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryTokenizeShareRecordRewardRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["owner_address"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "owner_address") + } + + protoReq.OwnerAddress, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "owner_address", err) + } + + msg, err := client.TokenizeShareRecordReward(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Query_TokenizeShareRecordReward_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryTokenizeShareRecordRewardRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["owner_address"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "owner_address") + } + + protoReq.OwnerAddress, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "owner_address", err) + } + + msg, err := server.TokenizeShareRecordReward(ctx, &protoReq) + return msg, metadata, err + +} + +// RegisterQueryHandlerServer registers the http handlers for service Query to "mux". +// UnaryRPC :call QueryServer directly. +// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. +// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterQueryHandlerFromEndpoint instead. +func RegisterQueryHandlerServer(ctx context.Context, mux *runtime.ServeMux, server QueryServer) error { + + mux.Handle("GET", pattern_Query_TokenizeShareRecordById_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Query_TokenizeShareRecordById_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_TokenizeShareRecordById_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_TokenizeShareRecordByDenom_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Query_TokenizeShareRecordByDenom_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_TokenizeShareRecordByDenom_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_TokenizeShareRecordsOwned_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Query_TokenizeShareRecordsOwned_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_TokenizeShareRecordsOwned_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_AllTokenizeShareRecords_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Query_AllTokenizeShareRecords_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_AllTokenizeShareRecords_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_LastTokenizeShareRecordId_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Query_LastTokenizeShareRecordId_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_LastTokenizeShareRecordId_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_TotalTokenizeSharedAssets_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Query_TotalTokenizeSharedAssets_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_TotalTokenizeSharedAssets_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_TotalLiquidStaked_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Query_TotalLiquidStaked_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_TotalLiquidStaked_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_TokenizeShareLockInfo_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Query_TokenizeShareLockInfo_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_TokenizeShareLockInfo_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_Params_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Query_Params_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_Params_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_TokenizeShareRecordReward_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Query_TokenizeShareRecordReward_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_TokenizeShareRecordReward_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +// RegisterQueryHandlerFromEndpoint is same as RegisterQueryHandler but +// automatically dials to "endpoint" and closes the connection when "ctx" gets done. +func RegisterQueryHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { + conn, err := grpc.Dial(endpoint, opts...) + if err != nil { + return err + } + defer func() { + if err != nil { + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + return + } + go func() { + <-ctx.Done() + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + }() + }() + + return RegisterQueryHandler(ctx, mux, conn) +} + +// RegisterQueryHandler registers the http handlers for service Query to "mux". +// The handlers forward requests to the grpc endpoint over "conn". +func RegisterQueryHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { + return RegisterQueryHandlerClient(ctx, mux, NewQueryClient(conn)) +} + +// RegisterQueryHandlerClient registers the http handlers for service Query +// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "QueryClient". +// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "QueryClient" +// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in +// "QueryClient" to call the correct interceptors. +func RegisterQueryHandlerClient(ctx context.Context, mux *runtime.ServeMux, client QueryClient) error { + + mux.Handle("GET", pattern_Query_TokenizeShareRecordById_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Query_TokenizeShareRecordById_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_TokenizeShareRecordById_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_TokenizeShareRecordByDenom_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Query_TokenizeShareRecordByDenom_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_TokenizeShareRecordByDenom_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_TokenizeShareRecordsOwned_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Query_TokenizeShareRecordsOwned_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_TokenizeShareRecordsOwned_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_AllTokenizeShareRecords_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Query_AllTokenizeShareRecords_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_AllTokenizeShareRecords_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_LastTokenizeShareRecordId_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Query_LastTokenizeShareRecordId_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_LastTokenizeShareRecordId_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_TotalTokenizeSharedAssets_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Query_TotalTokenizeSharedAssets_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_TotalTokenizeSharedAssets_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_TotalLiquidStaked_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Query_TotalLiquidStaked_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_TotalLiquidStaked_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_TokenizeShareLockInfo_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Query_TokenizeShareLockInfo_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_TokenizeShareLockInfo_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_Params_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Query_Params_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_Params_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_TokenizeShareRecordReward_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Query_TokenizeShareRecordReward_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_TokenizeShareRecordReward_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +var ( + pattern_Query_TokenizeShareRecordById_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 1, 0, 4, 1, 5, 4}, []string{"gaia", "lsm", "v1beta1", "tokenize_share_record_by_id", "id"}, "", runtime.AssumeColonVerbOpt(false))) + + pattern_Query_TokenizeShareRecordByDenom_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 1, 0, 4, 1, 5, 4}, []string{"gaia", "lsm", "v1beta1", "tokenize_share_record_by_denom", "denom"}, "", runtime.AssumeColonVerbOpt(false))) + + pattern_Query_TokenizeShareRecordsOwned_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 1, 0, 4, 1, 5, 4}, []string{"gaia", "lsm", "v1beta1", "tokenize_share_record_owned", "owner"}, "", runtime.AssumeColonVerbOpt(false))) + + pattern_Query_AllTokenizeShareRecords_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"gaia", "lsm", "v1beta1", "tokenize_share_records"}, "", runtime.AssumeColonVerbOpt(false))) + + pattern_Query_LastTokenizeShareRecordId_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"gaia", "lsm", "v1beta1", "last_tokenize_share_record_id"}, "", runtime.AssumeColonVerbOpt(false))) + + pattern_Query_TotalTokenizeSharedAssets_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"gaia", "lsm", "v1beta1", "total_tokenize_shared_assets"}, "", runtime.AssumeColonVerbOpt(false))) + + pattern_Query_TotalLiquidStaked_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"gaia", "lsm", "v1beta1", "total_liquid_staked"}, "", runtime.AssumeColonVerbOpt(false))) + + pattern_Query_TokenizeShareLockInfo_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 1, 0, 4, 1, 5, 4}, []string{"gaia", "lsm", "v1beta1", "tokenize_share_lock_info", "address"}, "", runtime.AssumeColonVerbOpt(false))) + + pattern_Query_Params_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"gaia", "lsm", "v1beta1", "params"}, "", runtime.AssumeColonVerbOpt(false))) + + pattern_Query_TokenizeShareRecordReward_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 2, 4}, []string{"gaia", "lsm", "v1beta1", "owner_address", "tokenize_share_record_rewards"}, "", runtime.AssumeColonVerbOpt(false))) +) + +var ( + forward_Query_TokenizeShareRecordById_0 = runtime.ForwardResponseMessage + + forward_Query_TokenizeShareRecordByDenom_0 = runtime.ForwardResponseMessage + + forward_Query_TokenizeShareRecordsOwned_0 = runtime.ForwardResponseMessage + + forward_Query_AllTokenizeShareRecords_0 = runtime.ForwardResponseMessage + + forward_Query_LastTokenizeShareRecordId_0 = runtime.ForwardResponseMessage + + forward_Query_TotalTokenizeSharedAssets_0 = runtime.ForwardResponseMessage + + forward_Query_TotalLiquidStaked_0 = runtime.ForwardResponseMessage + + forward_Query_TokenizeShareLockInfo_0 = runtime.ForwardResponseMessage + + forward_Query_Params_0 = runtime.ForwardResponseMessage + + forward_Query_TokenizeShareRecordReward_0 = runtime.ForwardResponseMessage +) diff --git a/x/lsm/types/tokenize_share_record.go b/x/lsm/types/tokenize_share_record.go new file mode 100644 index 00000000000..7f20bd21161 --- /dev/null +++ b/x/lsm/types/tokenize_share_record.go @@ -0,0 +1,22 @@ +package types + +import ( + "fmt" + "strconv" + "strings" + + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/types/address" +) + +func (r TokenizeShareRecord) GetModuleAddress() sdk.AccAddress { + // NOTE: The module name is intentionally hard coded so that, if this + // function were to move to a different module in future SDK version, + // it would not break all the address lookups + moduleName := "lsm" + return address.Module(moduleName, []byte(r.ModuleAccount)) +} + +func (r TokenizeShareRecord) GetShareTokenDenom() string { + return fmt.Sprintf("%s/%s", strings.ToLower(r.Validator), strconv.Itoa(int(r.Id))) //nolint:gosec +} diff --git a/x/lsm/types/tx.pb.go b/x/lsm/types/tx.pb.go new file mode 100644 index 00000000000..097d85e9294 --- /dev/null +++ b/x/lsm/types/tx.pb.go @@ -0,0 +1,3289 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: gaia/lsm/v1beta1/tx.proto + +package types + +import ( + context "context" + fmt "fmt" + _ "github.com/cosmos/cosmos-proto" + types "github.com/cosmos/cosmos-sdk/types" + _ "github.com/cosmos/cosmos-sdk/types/msgservice" + _ "github.com/cosmos/cosmos-sdk/types/tx/amino" + _ "github.com/cosmos/gogoproto/gogoproto" + grpc1 "github.com/cosmos/gogoproto/grpc" + proto "github.com/cosmos/gogoproto/proto" + github_com_cosmos_gogoproto_types "github.com/cosmos/gogoproto/types" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + _ "google.golang.org/protobuf/types/known/timestamppb" + io "io" + math "math" + math_bits "math/bits" + time "time" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf +var _ = time.Kitchen + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// MsgUpdateParams is the Msg/UpdateParams request type. +type MsgUpdateParams struct { + // authority is the address that controls the module (defaults to x/gov unless + // overwritten). + Authority string `protobuf:"bytes,1,opt,name=authority,proto3" json:"authority,omitempty"` + // params defines the x/lsm parameters to update. + // + // NOTE: All parameters must be supplied. + Params Params `protobuf:"bytes,2,opt,name=params,proto3" json:"params"` +} + +func (m *MsgUpdateParams) Reset() { *m = MsgUpdateParams{} } +func (m *MsgUpdateParams) String() string { return proto.CompactTextString(m) } +func (*MsgUpdateParams) ProtoMessage() {} +func (*MsgUpdateParams) Descriptor() ([]byte, []int) { + return fileDescriptor_96e4ea476f40e21e, []int{0} +} +func (m *MsgUpdateParams) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgUpdateParams) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgUpdateParams.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgUpdateParams) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgUpdateParams.Merge(m, src) +} +func (m *MsgUpdateParams) XXX_Size() int { + return m.Size() +} +func (m *MsgUpdateParams) XXX_DiscardUnknown() { + xxx_messageInfo_MsgUpdateParams.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgUpdateParams proto.InternalMessageInfo + +func (m *MsgUpdateParams) GetAuthority() string { + if m != nil { + return m.Authority + } + return "" +} + +func (m *MsgUpdateParams) GetParams() Params { + if m != nil { + return m.Params + } + return Params{} +} + +// MsgUpdateParamsResponse defines the response structure for executing a +// MsgUpdateParams message. +type MsgUpdateParamsResponse struct { +} + +func (m *MsgUpdateParamsResponse) Reset() { *m = MsgUpdateParamsResponse{} } +func (m *MsgUpdateParamsResponse) String() string { return proto.CompactTextString(m) } +func (*MsgUpdateParamsResponse) ProtoMessage() {} +func (*MsgUpdateParamsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_96e4ea476f40e21e, []int{1} +} +func (m *MsgUpdateParamsResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgUpdateParamsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgUpdateParamsResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgUpdateParamsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgUpdateParamsResponse.Merge(m, src) +} +func (m *MsgUpdateParamsResponse) XXX_Size() int { + return m.Size() +} +func (m *MsgUpdateParamsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_MsgUpdateParamsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgUpdateParamsResponse proto.InternalMessageInfo + +// MsgTokenizeShares tokenizes a delegation +type MsgTokenizeShares struct { + DelegatorAddress string `protobuf:"bytes,1,opt,name=delegator_address,json=delegatorAddress,proto3" json:"delegator_address,omitempty" yaml:"delegator_address"` + ValidatorAddress string `protobuf:"bytes,2,opt,name=validator_address,json=validatorAddress,proto3" json:"validator_address,omitempty" yaml:"validator_address"` + Amount types.Coin `protobuf:"bytes,3,opt,name=amount,proto3" json:"amount"` + TokenizedShareOwner string `protobuf:"bytes,4,opt,name=tokenized_share_owner,json=tokenizedShareOwner,proto3" json:"tokenized_share_owner,omitempty"` +} + +func (m *MsgTokenizeShares) Reset() { *m = MsgTokenizeShares{} } +func (m *MsgTokenizeShares) String() string { return proto.CompactTextString(m) } +func (*MsgTokenizeShares) ProtoMessage() {} +func (*MsgTokenizeShares) Descriptor() ([]byte, []int) { + return fileDescriptor_96e4ea476f40e21e, []int{2} +} +func (m *MsgTokenizeShares) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgTokenizeShares) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgTokenizeShares.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgTokenizeShares) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgTokenizeShares.Merge(m, src) +} +func (m *MsgTokenizeShares) XXX_Size() int { + return m.Size() +} +func (m *MsgTokenizeShares) XXX_DiscardUnknown() { + xxx_messageInfo_MsgTokenizeShares.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgTokenizeShares proto.InternalMessageInfo + +// MsgTokenizeSharesResponse defines the Msg/MsgTokenizeShares response type. +type MsgTokenizeSharesResponse struct { + Amount types.Coin `protobuf:"bytes,1,opt,name=amount,proto3" json:"amount"` +} + +func (m *MsgTokenizeSharesResponse) Reset() { *m = MsgTokenizeSharesResponse{} } +func (m *MsgTokenizeSharesResponse) String() string { return proto.CompactTextString(m) } +func (*MsgTokenizeSharesResponse) ProtoMessage() {} +func (*MsgTokenizeSharesResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_96e4ea476f40e21e, []int{3} +} +func (m *MsgTokenizeSharesResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgTokenizeSharesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgTokenizeSharesResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgTokenizeSharesResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgTokenizeSharesResponse.Merge(m, src) +} +func (m *MsgTokenizeSharesResponse) XXX_Size() int { + return m.Size() +} +func (m *MsgTokenizeSharesResponse) XXX_DiscardUnknown() { + xxx_messageInfo_MsgTokenizeSharesResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgTokenizeSharesResponse proto.InternalMessageInfo + +func (m *MsgTokenizeSharesResponse) GetAmount() types.Coin { + if m != nil { + return m.Amount + } + return types.Coin{} +} + +// MsgRedeemTokensForShares redeems a tokenized share back into a native +// delegation +type MsgRedeemTokensForShares struct { + DelegatorAddress string `protobuf:"bytes,1,opt,name=delegator_address,json=delegatorAddress,proto3" json:"delegator_address,omitempty" yaml:"delegator_address"` + Amount types.Coin `protobuf:"bytes,2,opt,name=amount,proto3" json:"amount"` +} + +func (m *MsgRedeemTokensForShares) Reset() { *m = MsgRedeemTokensForShares{} } +func (m *MsgRedeemTokensForShares) String() string { return proto.CompactTextString(m) } +func (*MsgRedeemTokensForShares) ProtoMessage() {} +func (*MsgRedeemTokensForShares) Descriptor() ([]byte, []int) { + return fileDescriptor_96e4ea476f40e21e, []int{4} +} +func (m *MsgRedeemTokensForShares) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgRedeemTokensForShares) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgRedeemTokensForShares.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgRedeemTokensForShares) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgRedeemTokensForShares.Merge(m, src) +} +func (m *MsgRedeemTokensForShares) XXX_Size() int { + return m.Size() +} +func (m *MsgRedeemTokensForShares) XXX_DiscardUnknown() { + xxx_messageInfo_MsgRedeemTokensForShares.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgRedeemTokensForShares proto.InternalMessageInfo + +// MsgRedeemTokensForSharesResponse defines the Msg/MsgRedeemTokensForShares +// response type. +type MsgRedeemTokensForSharesResponse struct { + Amount types.Coin `protobuf:"bytes,1,opt,name=amount,proto3" json:"amount"` +} + +func (m *MsgRedeemTokensForSharesResponse) Reset() { *m = MsgRedeemTokensForSharesResponse{} } +func (m *MsgRedeemTokensForSharesResponse) String() string { return proto.CompactTextString(m) } +func (*MsgRedeemTokensForSharesResponse) ProtoMessage() {} +func (*MsgRedeemTokensForSharesResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_96e4ea476f40e21e, []int{5} +} +func (m *MsgRedeemTokensForSharesResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgRedeemTokensForSharesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgRedeemTokensForSharesResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgRedeemTokensForSharesResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgRedeemTokensForSharesResponse.Merge(m, src) +} +func (m *MsgRedeemTokensForSharesResponse) XXX_Size() int { + return m.Size() +} +func (m *MsgRedeemTokensForSharesResponse) XXX_DiscardUnknown() { + xxx_messageInfo_MsgRedeemTokensForSharesResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgRedeemTokensForSharesResponse proto.InternalMessageInfo + +func (m *MsgRedeemTokensForSharesResponse) GetAmount() types.Coin { + if m != nil { + return m.Amount + } + return types.Coin{} +} + +// MsgTransferTokenizeShareRecord transfer a tokenize share record +type MsgTransferTokenizeShareRecord struct { + TokenizeShareRecordId uint64 `protobuf:"varint,1,opt,name=tokenize_share_record_id,json=tokenizeShareRecordId,proto3" json:"tokenize_share_record_id,omitempty"` + Sender string `protobuf:"bytes,2,opt,name=sender,proto3" json:"sender,omitempty"` + NewOwner string `protobuf:"bytes,3,opt,name=new_owner,json=newOwner,proto3" json:"new_owner,omitempty"` +} + +func (m *MsgTransferTokenizeShareRecord) Reset() { *m = MsgTransferTokenizeShareRecord{} } +func (m *MsgTransferTokenizeShareRecord) String() string { return proto.CompactTextString(m) } +func (*MsgTransferTokenizeShareRecord) ProtoMessage() {} +func (*MsgTransferTokenizeShareRecord) Descriptor() ([]byte, []int) { + return fileDescriptor_96e4ea476f40e21e, []int{6} +} +func (m *MsgTransferTokenizeShareRecord) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgTransferTokenizeShareRecord) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgTransferTokenizeShareRecord.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgTransferTokenizeShareRecord) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgTransferTokenizeShareRecord.Merge(m, src) +} +func (m *MsgTransferTokenizeShareRecord) XXX_Size() int { + return m.Size() +} +func (m *MsgTransferTokenizeShareRecord) XXX_DiscardUnknown() { + xxx_messageInfo_MsgTransferTokenizeShareRecord.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgTransferTokenizeShareRecord proto.InternalMessageInfo + +// MsgTransferTokenizeShareRecordResponse defines the +// Msg/MsgTransferTokenizeShareRecord response type. +type MsgTransferTokenizeShareRecordResponse struct { +} + +func (m *MsgTransferTokenizeShareRecordResponse) Reset() { + *m = MsgTransferTokenizeShareRecordResponse{} +} +func (m *MsgTransferTokenizeShareRecordResponse) String() string { return proto.CompactTextString(m) } +func (*MsgTransferTokenizeShareRecordResponse) ProtoMessage() {} +func (*MsgTransferTokenizeShareRecordResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_96e4ea476f40e21e, []int{7} +} +func (m *MsgTransferTokenizeShareRecordResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgTransferTokenizeShareRecordResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgTransferTokenizeShareRecordResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgTransferTokenizeShareRecordResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgTransferTokenizeShareRecordResponse.Merge(m, src) +} +func (m *MsgTransferTokenizeShareRecordResponse) XXX_Size() int { + return m.Size() +} +func (m *MsgTransferTokenizeShareRecordResponse) XXX_DiscardUnknown() { + xxx_messageInfo_MsgTransferTokenizeShareRecordResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgTransferTokenizeShareRecordResponse proto.InternalMessageInfo + +// MsgDisableTokenizeShares prevents the tokenization of shares for a given +// address +type MsgDisableTokenizeShares struct { + DelegatorAddress string `protobuf:"bytes,1,opt,name=delegator_address,json=delegatorAddress,proto3" json:"delegator_address,omitempty" yaml:"delegator_address"` +} + +func (m *MsgDisableTokenizeShares) Reset() { *m = MsgDisableTokenizeShares{} } +func (m *MsgDisableTokenizeShares) String() string { return proto.CompactTextString(m) } +func (*MsgDisableTokenizeShares) ProtoMessage() {} +func (*MsgDisableTokenizeShares) Descriptor() ([]byte, []int) { + return fileDescriptor_96e4ea476f40e21e, []int{8} +} +func (m *MsgDisableTokenizeShares) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgDisableTokenizeShares) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgDisableTokenizeShares.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgDisableTokenizeShares) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgDisableTokenizeShares.Merge(m, src) +} +func (m *MsgDisableTokenizeShares) XXX_Size() int { + return m.Size() +} +func (m *MsgDisableTokenizeShares) XXX_DiscardUnknown() { + xxx_messageInfo_MsgDisableTokenizeShares.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgDisableTokenizeShares proto.InternalMessageInfo + +// MsgDisableTokenizeSharesResponse defines the Msg/DisableTokenizeShares +// response type. +type MsgDisableTokenizeSharesResponse struct { +} + +func (m *MsgDisableTokenizeSharesResponse) Reset() { *m = MsgDisableTokenizeSharesResponse{} } +func (m *MsgDisableTokenizeSharesResponse) String() string { return proto.CompactTextString(m) } +func (*MsgDisableTokenizeSharesResponse) ProtoMessage() {} +func (*MsgDisableTokenizeSharesResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_96e4ea476f40e21e, []int{9} +} +func (m *MsgDisableTokenizeSharesResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgDisableTokenizeSharesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgDisableTokenizeSharesResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgDisableTokenizeSharesResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgDisableTokenizeSharesResponse.Merge(m, src) +} +func (m *MsgDisableTokenizeSharesResponse) XXX_Size() int { + return m.Size() +} +func (m *MsgDisableTokenizeSharesResponse) XXX_DiscardUnknown() { + xxx_messageInfo_MsgDisableTokenizeSharesResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgDisableTokenizeSharesResponse proto.InternalMessageInfo + +// MsgEnableTokenizeShares re-enables tokenization of shares for a given address +type MsgEnableTokenizeShares struct { + DelegatorAddress string `protobuf:"bytes,1,opt,name=delegator_address,json=delegatorAddress,proto3" json:"delegator_address,omitempty" yaml:"delegator_address"` +} + +func (m *MsgEnableTokenizeShares) Reset() { *m = MsgEnableTokenizeShares{} } +func (m *MsgEnableTokenizeShares) String() string { return proto.CompactTextString(m) } +func (*MsgEnableTokenizeShares) ProtoMessage() {} +func (*MsgEnableTokenizeShares) Descriptor() ([]byte, []int) { + return fileDescriptor_96e4ea476f40e21e, []int{10} +} +func (m *MsgEnableTokenizeShares) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgEnableTokenizeShares) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgEnableTokenizeShares.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgEnableTokenizeShares) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgEnableTokenizeShares.Merge(m, src) +} +func (m *MsgEnableTokenizeShares) XXX_Size() int { + return m.Size() +} +func (m *MsgEnableTokenizeShares) XXX_DiscardUnknown() { + xxx_messageInfo_MsgEnableTokenizeShares.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgEnableTokenizeShares proto.InternalMessageInfo + +// MsgEnableTokenizeSharesResponse defines the Msg/EnableTokenizeShares response +// type. +type MsgEnableTokenizeSharesResponse struct { + CompletionTime time.Time `protobuf:"bytes,1,opt,name=completion_time,json=completionTime,proto3,stdtime" json:"completion_time"` +} + +func (m *MsgEnableTokenizeSharesResponse) Reset() { *m = MsgEnableTokenizeSharesResponse{} } +func (m *MsgEnableTokenizeSharesResponse) String() string { return proto.CompactTextString(m) } +func (*MsgEnableTokenizeSharesResponse) ProtoMessage() {} +func (*MsgEnableTokenizeSharesResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_96e4ea476f40e21e, []int{11} +} +func (m *MsgEnableTokenizeSharesResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgEnableTokenizeSharesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgEnableTokenizeSharesResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgEnableTokenizeSharesResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgEnableTokenizeSharesResponse.Merge(m, src) +} +func (m *MsgEnableTokenizeSharesResponse) XXX_Size() int { + return m.Size() +} +func (m *MsgEnableTokenizeSharesResponse) XXX_DiscardUnknown() { + xxx_messageInfo_MsgEnableTokenizeSharesResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgEnableTokenizeSharesResponse proto.InternalMessageInfo + +func (m *MsgEnableTokenizeSharesResponse) GetCompletionTime() time.Time { + if m != nil { + return m.CompletionTime + } + return time.Time{} +} + +// MsgWithdrawTokenizeShareRecordReward withdraws tokenize share rewards for a +// specific record +type MsgWithdrawTokenizeShareRecordReward struct { + OwnerAddress string `protobuf:"bytes,1,opt,name=owner_address,json=ownerAddress,proto3" json:"owner_address,omitempty" yaml:"owner_address"` + RecordId uint64 `protobuf:"varint,2,opt,name=record_id,json=recordId,proto3" json:"record_id,omitempty"` +} + +func (m *MsgWithdrawTokenizeShareRecordReward) Reset() { *m = MsgWithdrawTokenizeShareRecordReward{} } +func (m *MsgWithdrawTokenizeShareRecordReward) String() string { return proto.CompactTextString(m) } +func (*MsgWithdrawTokenizeShareRecordReward) ProtoMessage() {} +func (*MsgWithdrawTokenizeShareRecordReward) Descriptor() ([]byte, []int) { + return fileDescriptor_96e4ea476f40e21e, []int{12} +} +func (m *MsgWithdrawTokenizeShareRecordReward) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgWithdrawTokenizeShareRecordReward) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgWithdrawTokenizeShareRecordReward.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgWithdrawTokenizeShareRecordReward) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgWithdrawTokenizeShareRecordReward.Merge(m, src) +} +func (m *MsgWithdrawTokenizeShareRecordReward) XXX_Size() int { + return m.Size() +} +func (m *MsgWithdrawTokenizeShareRecordReward) XXX_DiscardUnknown() { + xxx_messageInfo_MsgWithdrawTokenizeShareRecordReward.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgWithdrawTokenizeShareRecordReward proto.InternalMessageInfo + +// MsgWithdrawTokenizeShareRecordReward defines the +// Msg/WithdrawTokenizeShareRecordReward response type. +type MsgWithdrawTokenizeShareRecordRewardResponse struct { +} + +func (m *MsgWithdrawTokenizeShareRecordRewardResponse) Reset() { + *m = MsgWithdrawTokenizeShareRecordRewardResponse{} +} +func (m *MsgWithdrawTokenizeShareRecordRewardResponse) String() string { + return proto.CompactTextString(m) +} +func (*MsgWithdrawTokenizeShareRecordRewardResponse) ProtoMessage() {} +func (*MsgWithdrawTokenizeShareRecordRewardResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_96e4ea476f40e21e, []int{13} +} +func (m *MsgWithdrawTokenizeShareRecordRewardResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgWithdrawTokenizeShareRecordRewardResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgWithdrawTokenizeShareRecordRewardResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgWithdrawTokenizeShareRecordRewardResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgWithdrawTokenizeShareRecordRewardResponse.Merge(m, src) +} +func (m *MsgWithdrawTokenizeShareRecordRewardResponse) XXX_Size() int { + return m.Size() +} +func (m *MsgWithdrawTokenizeShareRecordRewardResponse) XXX_DiscardUnknown() { + xxx_messageInfo_MsgWithdrawTokenizeShareRecordRewardResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgWithdrawTokenizeShareRecordRewardResponse proto.InternalMessageInfo + +// MsgWithdrawAllTokenizeShareRecordReward withdraws tokenize share rewards or +// all records owned by the designated owner +type MsgWithdrawAllTokenizeShareRecordReward struct { + OwnerAddress string `protobuf:"bytes,1,opt,name=owner_address,json=ownerAddress,proto3" json:"owner_address,omitempty" yaml:"owner_address"` +} + +func (m *MsgWithdrawAllTokenizeShareRecordReward) Reset() { + *m = MsgWithdrawAllTokenizeShareRecordReward{} +} +func (m *MsgWithdrawAllTokenizeShareRecordReward) String() string { return proto.CompactTextString(m) } +func (*MsgWithdrawAllTokenizeShareRecordReward) ProtoMessage() {} +func (*MsgWithdrawAllTokenizeShareRecordReward) Descriptor() ([]byte, []int) { + return fileDescriptor_96e4ea476f40e21e, []int{14} +} +func (m *MsgWithdrawAllTokenizeShareRecordReward) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgWithdrawAllTokenizeShareRecordReward) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgWithdrawAllTokenizeShareRecordReward.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgWithdrawAllTokenizeShareRecordReward) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgWithdrawAllTokenizeShareRecordReward.Merge(m, src) +} +func (m *MsgWithdrawAllTokenizeShareRecordReward) XXX_Size() int { + return m.Size() +} +func (m *MsgWithdrawAllTokenizeShareRecordReward) XXX_DiscardUnknown() { + xxx_messageInfo_MsgWithdrawAllTokenizeShareRecordReward.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgWithdrawAllTokenizeShareRecordReward proto.InternalMessageInfo + +// MsgWithdrawAllTokenizeShareRecordRewardResponse defines the +// Msg/WithdrawTokenizeShareRecordReward response type. +type MsgWithdrawAllTokenizeShareRecordRewardResponse struct { +} + +func (m *MsgWithdrawAllTokenizeShareRecordRewardResponse) Reset() { + *m = MsgWithdrawAllTokenizeShareRecordRewardResponse{} +} +func (m *MsgWithdrawAllTokenizeShareRecordRewardResponse) String() string { + return proto.CompactTextString(m) +} +func (*MsgWithdrawAllTokenizeShareRecordRewardResponse) ProtoMessage() {} +func (*MsgWithdrawAllTokenizeShareRecordRewardResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_96e4ea476f40e21e, []int{15} +} +func (m *MsgWithdrawAllTokenizeShareRecordRewardResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgWithdrawAllTokenizeShareRecordRewardResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgWithdrawAllTokenizeShareRecordRewardResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgWithdrawAllTokenizeShareRecordRewardResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgWithdrawAllTokenizeShareRecordRewardResponse.Merge(m, src) +} +func (m *MsgWithdrawAllTokenizeShareRecordRewardResponse) XXX_Size() int { + return m.Size() +} +func (m *MsgWithdrawAllTokenizeShareRecordRewardResponse) XXX_DiscardUnknown() { + xxx_messageInfo_MsgWithdrawAllTokenizeShareRecordRewardResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgWithdrawAllTokenizeShareRecordRewardResponse proto.InternalMessageInfo + +func init() { + proto.RegisterType((*MsgUpdateParams)(nil), "gaia.lsm.v1beta1.MsgUpdateParams") + proto.RegisterType((*MsgUpdateParamsResponse)(nil), "gaia.lsm.v1beta1.MsgUpdateParamsResponse") + proto.RegisterType((*MsgTokenizeShares)(nil), "gaia.lsm.v1beta1.MsgTokenizeShares") + proto.RegisterType((*MsgTokenizeSharesResponse)(nil), "gaia.lsm.v1beta1.MsgTokenizeSharesResponse") + proto.RegisterType((*MsgRedeemTokensForShares)(nil), "gaia.lsm.v1beta1.MsgRedeemTokensForShares") + proto.RegisterType((*MsgRedeemTokensForSharesResponse)(nil), "gaia.lsm.v1beta1.MsgRedeemTokensForSharesResponse") + proto.RegisterType((*MsgTransferTokenizeShareRecord)(nil), "gaia.lsm.v1beta1.MsgTransferTokenizeShareRecord") + proto.RegisterType((*MsgTransferTokenizeShareRecordResponse)(nil), "gaia.lsm.v1beta1.MsgTransferTokenizeShareRecordResponse") + proto.RegisterType((*MsgDisableTokenizeShares)(nil), "gaia.lsm.v1beta1.MsgDisableTokenizeShares") + proto.RegisterType((*MsgDisableTokenizeSharesResponse)(nil), "gaia.lsm.v1beta1.MsgDisableTokenizeSharesResponse") + proto.RegisterType((*MsgEnableTokenizeShares)(nil), "gaia.lsm.v1beta1.MsgEnableTokenizeShares") + proto.RegisterType((*MsgEnableTokenizeSharesResponse)(nil), "gaia.lsm.v1beta1.MsgEnableTokenizeSharesResponse") + proto.RegisterType((*MsgWithdrawTokenizeShareRecordReward)(nil), "gaia.lsm.v1beta1.MsgWithdrawTokenizeShareRecordReward") + proto.RegisterType((*MsgWithdrawTokenizeShareRecordRewardResponse)(nil), "gaia.lsm.v1beta1.MsgWithdrawTokenizeShareRecordRewardResponse") + proto.RegisterType((*MsgWithdrawAllTokenizeShareRecordReward)(nil), "gaia.lsm.v1beta1.MsgWithdrawAllTokenizeShareRecordReward") + proto.RegisterType((*MsgWithdrawAllTokenizeShareRecordRewardResponse)(nil), "gaia.lsm.v1beta1.MsgWithdrawAllTokenizeShareRecordRewardResponse") +} + +func init() { proto.RegisterFile("gaia/lsm/v1beta1/tx.proto", fileDescriptor_96e4ea476f40e21e) } + +var fileDescriptor_96e4ea476f40e21e = []byte{ + // 982 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x57, 0xcf, 0x6f, 0x1b, 0x45, + 0x14, 0xf6, 0x3a, 0x69, 0x14, 0x0f, 0xa5, 0x4d, 0x96, 0x94, 0xae, 0x37, 0x95, 0x1d, 0xa6, 0x08, + 0xd2, 0x40, 0x77, 0xb1, 0x91, 0x28, 0x32, 0x20, 0x14, 0x43, 0x2b, 0xf5, 0x60, 0x7e, 0x6c, 0x83, + 0x90, 0x00, 0xc9, 0x1a, 0x7b, 0xa7, 0x9b, 0x15, 0xbb, 0x3b, 0xd6, 0xce, 0x24, 0x6e, 0x38, 0x21, + 0x4e, 0xc0, 0xa9, 0x7f, 0x42, 0x8f, 0x48, 0x08, 0x29, 0x07, 0x38, 0x71, 0xe3, 0xd4, 0x63, 0xe9, + 0x05, 0x4e, 0x01, 0x25, 0x12, 0xe1, 0xdc, 0xbf, 0x00, 0xcd, 0xec, 0x78, 0xed, 0x5d, 0x8f, 0x1d, + 0x07, 0xc8, 0x25, 0xd9, 0x9d, 0xef, 0xbd, 0x37, 0xef, 0xfb, 0xf6, 0x9b, 0x1f, 0x06, 0x65, 0x0f, + 0xf9, 0xc8, 0x0e, 0x68, 0x68, 0xef, 0xd6, 0x3a, 0x98, 0xa1, 0x9a, 0xcd, 0xee, 0x59, 0xbd, 0x98, + 0x30, 0xa2, 0x2f, 0x71, 0xc8, 0x0a, 0x68, 0x68, 0x49, 0xc8, 0xac, 0x7a, 0x84, 0x78, 0x01, 0xb6, + 0x05, 0xde, 0xd9, 0xb9, 0x6b, 0x33, 0x3f, 0xc4, 0x94, 0xa1, 0xb0, 0x97, 0xa4, 0x98, 0x2b, 0x1e, + 0xf1, 0x88, 0x78, 0xb4, 0xf9, 0x93, 0x1c, 0x2d, 0x77, 0x09, 0x0d, 0x09, 0x6d, 0x27, 0x40, 0xf2, + 0x22, 0xa1, 0x4a, 0xf2, 0x66, 0x77, 0x10, 0xc5, 0x69, 0x07, 0x5d, 0xe2, 0x47, 0x12, 0x37, 0xc7, + 0xda, 0xe3, 0xfd, 0x24, 0xd8, 0x65, 0x99, 0x1b, 0x52, 0xcf, 0xde, 0xad, 0xf1, 0x7f, 0x12, 0x58, + 0x46, 0xa1, 0x1f, 0x11, 0x5b, 0xfc, 0x4d, 0x86, 0xe0, 0x4f, 0x1a, 0xb8, 0xd8, 0xa2, 0xde, 0x47, + 0x3d, 0x17, 0x31, 0xfc, 0x01, 0x8a, 0x51, 0x48, 0xf5, 0xd7, 0x40, 0x09, 0xed, 0xb0, 0x6d, 0x12, + 0xfb, 0x6c, 0xcf, 0xd0, 0xd6, 0xb4, 0xf5, 0x52, 0xd3, 0x78, 0xfc, 0xe3, 0xf5, 0x15, 0xd9, 0xe0, + 0xa6, 0xeb, 0xc6, 0x98, 0xd2, 0x3b, 0x2c, 0xf6, 0x23, 0xcf, 0x19, 0x86, 0xea, 0x6f, 0x80, 0x85, + 0x9e, 0xa8, 0x60, 0x14, 0xd7, 0xb4, 0xf5, 0xa7, 0xea, 0x86, 0x95, 0x17, 0xca, 0x4a, 0x66, 0x68, + 0x96, 0x1e, 0x1e, 0x54, 0x0b, 0xdf, 0x1d, 0xef, 0x6f, 0x68, 0x8e, 0x4c, 0x69, 0x5c, 0xff, 0xea, + 0x78, 0x7f, 0x63, 0x58, 0xec, 0xdb, 0xe3, 0xfd, 0x8d, 0x84, 0xe3, 0x3d, 0xc1, 0x32, 0xd7, 0x23, + 0x2c, 0x83, 0xcb, 0xb9, 0x21, 0x07, 0xd3, 0x1e, 0x89, 0x28, 0x86, 0xbf, 0x15, 0xc1, 0x72, 0x8b, + 0x7a, 0x5b, 0xe4, 0x73, 0x1c, 0xf9, 0x5f, 0xe0, 0x3b, 0xdb, 0x28, 0xc6, 0x54, 0xbf, 0x0d, 0x96, + 0x5d, 0x1c, 0x60, 0x0f, 0x31, 0x12, 0xb7, 0x51, 0x42, 0x41, 0x92, 0xbb, 0xf2, 0xe4, 0xa0, 0x6a, + 0xec, 0xa1, 0x30, 0x68, 0xc0, 0xb1, 0x10, 0xe8, 0x2c, 0xa5, 0x63, 0x92, 0x38, 0x2f, 0xb5, 0x8b, + 0x02, 0xdf, 0xcd, 0x94, 0x2a, 0xe6, 0x4b, 0x8d, 0x85, 0x40, 0x67, 0x29, 0x1d, 0x1b, 0x94, 0xba, + 0x01, 0x16, 0x50, 0x48, 0x76, 0x22, 0x66, 0xcc, 0x09, 0xc9, 0xca, 0x96, 0x14, 0x99, 0x7f, 0xf7, + 0x54, 0xb5, 0x77, 0x88, 0x1f, 0x35, 0xe7, 0xb9, 0x66, 0x8e, 0x0c, 0xd7, 0xeb, 0xe0, 0x12, 0x93, + 0x04, 0xdd, 0x36, 0xe5, 0x14, 0xdb, 0xa4, 0x1f, 0xe1, 0xd8, 0x98, 0xe7, 0x7d, 0x38, 0xcf, 0xa4, + 0xa0, 0xa0, 0xff, 0x3e, 0x87, 0x1a, 0x6f, 0x7f, 0xfd, 0xa0, 0x5a, 0xf8, 0xfb, 0x41, 0xb5, 0xc0, + 0xa5, 0x1e, 0x57, 0x83, 0x4b, 0x7e, 0x25, 0x2b, 0x79, 0x56, 0x43, 0xb8, 0x05, 0xca, 0x63, 0x83, + 0x03, 0xd9, 0x47, 0xa8, 0x68, 0xa7, 0xa2, 0x02, 0xff, 0xd2, 0x80, 0xd1, 0xa2, 0x9e, 0x83, 0x5d, + 0x8c, 0x43, 0x51, 0x9c, 0xde, 0x22, 0xf1, 0xff, 0xff, 0xd9, 0x86, 0x0d, 0x16, 0x4f, 0xd5, 0x60, + 0xe3, 0xd6, 0xc9, 0xba, 0x5d, 0xcd, 0xea, 0xa6, 0xe4, 0x02, 0x3f, 0x05, 0x6b, 0x93, 0xb0, 0xff, + 0xae, 0xe2, 0xaf, 0x1a, 0xa8, 0xf0, 0x8f, 0x13, 0xa3, 0x88, 0xde, 0xc5, 0x71, 0xe6, 0x23, 0x39, + 0xb8, 0x4b, 0x62, 0x57, 0xbf, 0x01, 0x8c, 0x81, 0x2d, 0xa4, 0x65, 0x62, 0x01, 0xb4, 0x7d, 0x57, + 0xcc, 0x36, 0xef, 0xa4, 0x9e, 0x1a, 0x49, 0xbb, 0xed, 0xea, 0xcf, 0x82, 0x05, 0x8a, 0x23, 0x17, + 0xc7, 0x89, 0xcb, 0x1d, 0xf9, 0xa6, 0xaf, 0x82, 0x52, 0x84, 0xfb, 0xd2, 0x78, 0x73, 0x02, 0x5a, + 0x8c, 0x70, 0x3f, 0x71, 0xdb, 0xe6, 0xa8, 0x6a, 0x32, 0x83, 0x4b, 0x75, 0x2d, 0x67, 0xb1, 0xc9, + 0x0d, 0xc3, 0x75, 0xf0, 0xc2, 0xf4, 0x88, 0x74, 0xcd, 0xff, 0x90, 0x78, 0xe8, 0x5d, 0x9f, 0xa2, + 0x4e, 0x80, 0xcf, 0x6c, 0xe9, 0xff, 0x0b, 0x2b, 0x28, 0x5b, 0x82, 0x50, 0x58, 0x41, 0x89, 0xa5, + 0x9c, 0xbe, 0xd7, 0xc4, 0x1e, 0x77, 0x33, 0x3a, 0x5b, 0x4a, 0x37, 0x4f, 0xa6, 0x04, 0xb3, 0x94, + 0x54, 0x1d, 0xc1, 0x1e, 0xa8, 0x4e, 0x80, 0x52, 0x6f, 0xb7, 0xc0, 0xc5, 0x2e, 0x09, 0x7b, 0x01, + 0x66, 0x3e, 0x89, 0xda, 0xfc, 0x88, 0x94, 0x26, 0x37, 0xad, 0xe4, 0xfc, 0xb4, 0x06, 0xe7, 0xa7, + 0xb5, 0x35, 0x38, 0x3f, 0x9b, 0x8b, 0xdc, 0xe5, 0xf7, 0xff, 0xa8, 0x6a, 0xce, 0x85, 0x61, 0x32, + 0x87, 0xe1, 0x63, 0x0d, 0x3c, 0xdf, 0xa2, 0xde, 0xc7, 0x3e, 0xdb, 0x76, 0x63, 0xd4, 0x57, 0xda, + 0xa3, 0x8f, 0x62, 0x57, 0x7f, 0x0b, 0x3c, 0x2d, 0x2c, 0x9a, 0x13, 0xca, 0x78, 0x72, 0x50, 0x5d, + 0x49, 0x84, 0xca, 0xc0, 0xd0, 0x39, 0x2f, 0xde, 0x07, 0xfb, 0xc6, 0x2a, 0x28, 0x0d, 0xd7, 0x49, + 0x51, 0xac, 0x93, 0xc5, 0x58, 0x2e, 0x8d, 0xc6, 0x7b, 0xa3, 0xea, 0x65, 0xa7, 0xe1, 0xca, 0xd9, + 0x59, 0xe5, 0x4e, 0xec, 0x15, 0x5a, 0xe0, 0xe5, 0x59, 0xe2, 0x52, 0x93, 0xfc, 0xac, 0x81, 0x17, + 0x47, 0x12, 0x36, 0x83, 0xe0, 0xac, 0x74, 0x68, 0x7c, 0x38, 0x9d, 0x6a, 0x5d, 0x4d, 0x75, 0x5a, + 0x47, 0xb0, 0x06, 0x66, 0x0d, 0x1d, 0x10, 0xae, 0xff, 0x72, 0x0e, 0xcc, 0xb5, 0xa8, 0xa7, 0x7f, + 0x06, 0xce, 0x67, 0x2e, 0x2d, 0xcf, 0x8d, 0x5f, 0x36, 0x72, 0x17, 0x04, 0xf3, 0xda, 0x89, 0x21, + 0xa9, 0x55, 0x3b, 0xe0, 0x42, 0x6e, 0xc5, 0x5d, 0x55, 0x26, 0x67, 0x83, 0xcc, 0x97, 0x66, 0x08, + 0x4a, 0xe7, 0xe8, 0x83, 0x4b, 0xea, 0x33, 0x6f, 0x43, 0x59, 0x45, 0x19, 0x6b, 0xd6, 0x67, 0x8f, + 0x4d, 0x27, 0xfe, 0x46, 0x03, 0xab, 0xd3, 0xce, 0x89, 0x57, 0xd4, 0x2c, 0x26, 0x67, 0x98, 0xaf, + 0x9f, 0x36, 0x63, 0x54, 0x04, 0xf5, 0xa6, 0xad, 0x16, 0x41, 0x19, 0x3b, 0x41, 0x84, 0xa9, 0xbb, + 0xab, 0xce, 0xc0, 0x8a, 0x72, 0x67, 0x55, 0x9b, 0x44, 0x15, 0x6a, 0xd6, 0x66, 0x0e, 0x1d, 0xcc, + 0x6a, 0x9e, 0xfb, 0x92, 0x5f, 0x7a, 0x9b, 0x6f, 0x3e, 0x3c, 0xac, 0x68, 0x8f, 0x0e, 0x2b, 0xda, + 0x9f, 0x87, 0x15, 0xed, 0xfe, 0x51, 0xa5, 0xf0, 0xe8, 0xa8, 0x52, 0xf8, 0xfd, 0xa8, 0x52, 0xf8, + 0x04, 0x7a, 0x3e, 0xdb, 0xde, 0xe9, 0x58, 0x5d, 0x12, 0xca, 0x1f, 0x04, 0xa3, 0x5b, 0x08, 0xdb, + 0xeb, 0x61, 0xda, 0x59, 0x10, 0xdb, 0xe4, 0xab, 0xff, 0x04, 0x00, 0x00, 0xff, 0xff, 0xed, 0x75, + 0xc9, 0x69, 0xa3, 0x0c, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// MsgClient is the client API for Msg service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type MsgClient interface { + // UpdateParams defines an operation for updating the x/lsm module + // parameters. + UpdateParams(ctx context.Context, in *MsgUpdateParams, opts ...grpc.CallOption) (*MsgUpdateParamsResponse, error) + // TokenizeShares defines a method for tokenizing shares from a validator. + TokenizeShares(ctx context.Context, in *MsgTokenizeShares, opts ...grpc.CallOption) (*MsgTokenizeSharesResponse, error) + // RedeemTokensForShares defines a method for redeeming tokens from a + // validator for shares. + RedeemTokensForShares(ctx context.Context, in *MsgRedeemTokensForShares, opts ...grpc.CallOption) (*MsgRedeemTokensForSharesResponse, error) + // TransferTokenizeShareRecord defines a method to transfer ownership of + // TokenizeShareRecord + TransferTokenizeShareRecord(ctx context.Context, in *MsgTransferTokenizeShareRecord, opts ...grpc.CallOption) (*MsgTransferTokenizeShareRecordResponse, error) + // DisableTokenizeShares defines a method to prevent the tokenization of an + // addresses stake + DisableTokenizeShares(ctx context.Context, in *MsgDisableTokenizeShares, opts ...grpc.CallOption) (*MsgDisableTokenizeSharesResponse, error) + // EnableTokenizeShares defines a method to re-enable the tokenization of an + // addresseses stake after it has been disabled + EnableTokenizeShares(ctx context.Context, in *MsgEnableTokenizeShares, opts ...grpc.CallOption) (*MsgEnableTokenizeSharesResponse, error) +} + +type msgClient struct { + cc grpc1.ClientConn +} + +func NewMsgClient(cc grpc1.ClientConn) MsgClient { + return &msgClient{cc} +} + +func (c *msgClient) UpdateParams(ctx context.Context, in *MsgUpdateParams, opts ...grpc.CallOption) (*MsgUpdateParamsResponse, error) { + out := new(MsgUpdateParamsResponse) + err := c.cc.Invoke(ctx, "/gaia.lsm.v1beta1.Msg/UpdateParams", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *msgClient) TokenizeShares(ctx context.Context, in *MsgTokenizeShares, opts ...grpc.CallOption) (*MsgTokenizeSharesResponse, error) { + out := new(MsgTokenizeSharesResponse) + err := c.cc.Invoke(ctx, "/gaia.lsm.v1beta1.Msg/TokenizeShares", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *msgClient) RedeemTokensForShares(ctx context.Context, in *MsgRedeemTokensForShares, opts ...grpc.CallOption) (*MsgRedeemTokensForSharesResponse, error) { + out := new(MsgRedeemTokensForSharesResponse) + err := c.cc.Invoke(ctx, "/gaia.lsm.v1beta1.Msg/RedeemTokensForShares", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *msgClient) TransferTokenizeShareRecord(ctx context.Context, in *MsgTransferTokenizeShareRecord, opts ...grpc.CallOption) (*MsgTransferTokenizeShareRecordResponse, error) { + out := new(MsgTransferTokenizeShareRecordResponse) + err := c.cc.Invoke(ctx, "/gaia.lsm.v1beta1.Msg/TransferTokenizeShareRecord", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *msgClient) DisableTokenizeShares(ctx context.Context, in *MsgDisableTokenizeShares, opts ...grpc.CallOption) (*MsgDisableTokenizeSharesResponse, error) { + out := new(MsgDisableTokenizeSharesResponse) + err := c.cc.Invoke(ctx, "/gaia.lsm.v1beta1.Msg/DisableTokenizeShares", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *msgClient) EnableTokenizeShares(ctx context.Context, in *MsgEnableTokenizeShares, opts ...grpc.CallOption) (*MsgEnableTokenizeSharesResponse, error) { + out := new(MsgEnableTokenizeSharesResponse) + err := c.cc.Invoke(ctx, "/gaia.lsm.v1beta1.Msg/EnableTokenizeShares", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// MsgServer is the server API for Msg service. +type MsgServer interface { + // UpdateParams defines an operation for updating the x/lsm module + // parameters. + UpdateParams(context.Context, *MsgUpdateParams) (*MsgUpdateParamsResponse, error) + // TokenizeShares defines a method for tokenizing shares from a validator. + TokenizeShares(context.Context, *MsgTokenizeShares) (*MsgTokenizeSharesResponse, error) + // RedeemTokensForShares defines a method for redeeming tokens from a + // validator for shares. + RedeemTokensForShares(context.Context, *MsgRedeemTokensForShares) (*MsgRedeemTokensForSharesResponse, error) + // TransferTokenizeShareRecord defines a method to transfer ownership of + // TokenizeShareRecord + TransferTokenizeShareRecord(context.Context, *MsgTransferTokenizeShareRecord) (*MsgTransferTokenizeShareRecordResponse, error) + // DisableTokenizeShares defines a method to prevent the tokenization of an + // addresses stake + DisableTokenizeShares(context.Context, *MsgDisableTokenizeShares) (*MsgDisableTokenizeSharesResponse, error) + // EnableTokenizeShares defines a method to re-enable the tokenization of an + // addresseses stake after it has been disabled + EnableTokenizeShares(context.Context, *MsgEnableTokenizeShares) (*MsgEnableTokenizeSharesResponse, error) +} + +// UnimplementedMsgServer can be embedded to have forward compatible implementations. +type UnimplementedMsgServer struct { +} + +func (*UnimplementedMsgServer) UpdateParams(ctx context.Context, req *MsgUpdateParams) (*MsgUpdateParamsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method UpdateParams not implemented") +} +func (*UnimplementedMsgServer) TokenizeShares(ctx context.Context, req *MsgTokenizeShares) (*MsgTokenizeSharesResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method TokenizeShares not implemented") +} +func (*UnimplementedMsgServer) RedeemTokensForShares(ctx context.Context, req *MsgRedeemTokensForShares) (*MsgRedeemTokensForSharesResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method RedeemTokensForShares not implemented") +} +func (*UnimplementedMsgServer) TransferTokenizeShareRecord(ctx context.Context, req *MsgTransferTokenizeShareRecord) (*MsgTransferTokenizeShareRecordResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method TransferTokenizeShareRecord not implemented") +} +func (*UnimplementedMsgServer) DisableTokenizeShares(ctx context.Context, req *MsgDisableTokenizeShares) (*MsgDisableTokenizeSharesResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method DisableTokenizeShares not implemented") +} +func (*UnimplementedMsgServer) EnableTokenizeShares(ctx context.Context, req *MsgEnableTokenizeShares) (*MsgEnableTokenizeSharesResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method EnableTokenizeShares not implemented") +} + +func RegisterMsgServer(s grpc1.Server, srv MsgServer) { + s.RegisterService(&_Msg_serviceDesc, srv) +} + +func _Msg_UpdateParams_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MsgUpdateParams) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MsgServer).UpdateParams(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/gaia.lsm.v1beta1.Msg/UpdateParams", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MsgServer).UpdateParams(ctx, req.(*MsgUpdateParams)) + } + return interceptor(ctx, in, info, handler) +} + +func _Msg_TokenizeShares_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MsgTokenizeShares) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MsgServer).TokenizeShares(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/gaia.lsm.v1beta1.Msg/TokenizeShares", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MsgServer).TokenizeShares(ctx, req.(*MsgTokenizeShares)) + } + return interceptor(ctx, in, info, handler) +} + +func _Msg_RedeemTokensForShares_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MsgRedeemTokensForShares) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MsgServer).RedeemTokensForShares(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/gaia.lsm.v1beta1.Msg/RedeemTokensForShares", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MsgServer).RedeemTokensForShares(ctx, req.(*MsgRedeemTokensForShares)) + } + return interceptor(ctx, in, info, handler) +} + +func _Msg_TransferTokenizeShareRecord_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MsgTransferTokenizeShareRecord) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MsgServer).TransferTokenizeShareRecord(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/gaia.lsm.v1beta1.Msg/TransferTokenizeShareRecord", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MsgServer).TransferTokenizeShareRecord(ctx, req.(*MsgTransferTokenizeShareRecord)) + } + return interceptor(ctx, in, info, handler) +} + +func _Msg_DisableTokenizeShares_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MsgDisableTokenizeShares) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MsgServer).DisableTokenizeShares(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/gaia.lsm.v1beta1.Msg/DisableTokenizeShares", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MsgServer).DisableTokenizeShares(ctx, req.(*MsgDisableTokenizeShares)) + } + return interceptor(ctx, in, info, handler) +} + +func _Msg_EnableTokenizeShares_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MsgEnableTokenizeShares) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MsgServer).EnableTokenizeShares(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/gaia.lsm.v1beta1.Msg/EnableTokenizeShares", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MsgServer).EnableTokenizeShares(ctx, req.(*MsgEnableTokenizeShares)) + } + return interceptor(ctx, in, info, handler) +} + +var _Msg_serviceDesc = grpc.ServiceDesc{ + ServiceName: "gaia.lsm.v1beta1.Msg", + HandlerType: (*MsgServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "UpdateParams", + Handler: _Msg_UpdateParams_Handler, + }, + { + MethodName: "TokenizeShares", + Handler: _Msg_TokenizeShares_Handler, + }, + { + MethodName: "RedeemTokensForShares", + Handler: _Msg_RedeemTokensForShares_Handler, + }, + { + MethodName: "TransferTokenizeShareRecord", + Handler: _Msg_TransferTokenizeShareRecord_Handler, + }, + { + MethodName: "DisableTokenizeShares", + Handler: _Msg_DisableTokenizeShares_Handler, + }, + { + MethodName: "EnableTokenizeShares", + Handler: _Msg_EnableTokenizeShares_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "gaia/lsm/v1beta1/tx.proto", +} + +func (m *MsgUpdateParams) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgUpdateParams) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgUpdateParams) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Params.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTx(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + if len(m.Authority) > 0 { + i -= len(m.Authority) + copy(dAtA[i:], m.Authority) + i = encodeVarintTx(dAtA, i, uint64(len(m.Authority))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *MsgUpdateParamsResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgUpdateParamsResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgUpdateParamsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *MsgTokenizeShares) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgTokenizeShares) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgTokenizeShares) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.TokenizedShareOwner) > 0 { + i -= len(m.TokenizedShareOwner) + copy(dAtA[i:], m.TokenizedShareOwner) + i = encodeVarintTx(dAtA, i, uint64(len(m.TokenizedShareOwner))) + i-- + dAtA[i] = 0x22 + } + { + size, err := m.Amount.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTx(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + if len(m.ValidatorAddress) > 0 { + i -= len(m.ValidatorAddress) + copy(dAtA[i:], m.ValidatorAddress) + i = encodeVarintTx(dAtA, i, uint64(len(m.ValidatorAddress))) + i-- + dAtA[i] = 0x12 + } + if len(m.DelegatorAddress) > 0 { + i -= len(m.DelegatorAddress) + copy(dAtA[i:], m.DelegatorAddress) + i = encodeVarintTx(dAtA, i, uint64(len(m.DelegatorAddress))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *MsgTokenizeSharesResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgTokenizeSharesResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgTokenizeSharesResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Amount.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTx(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *MsgRedeemTokensForShares) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgRedeemTokensForShares) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgRedeemTokensForShares) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Amount.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTx(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + if len(m.DelegatorAddress) > 0 { + i -= len(m.DelegatorAddress) + copy(dAtA[i:], m.DelegatorAddress) + i = encodeVarintTx(dAtA, i, uint64(len(m.DelegatorAddress))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *MsgRedeemTokensForSharesResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgRedeemTokensForSharesResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgRedeemTokensForSharesResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Amount.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTx(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *MsgTransferTokenizeShareRecord) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgTransferTokenizeShareRecord) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgTransferTokenizeShareRecord) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.NewOwner) > 0 { + i -= len(m.NewOwner) + copy(dAtA[i:], m.NewOwner) + i = encodeVarintTx(dAtA, i, uint64(len(m.NewOwner))) + i-- + dAtA[i] = 0x1a + } + if len(m.Sender) > 0 { + i -= len(m.Sender) + copy(dAtA[i:], m.Sender) + i = encodeVarintTx(dAtA, i, uint64(len(m.Sender))) + i-- + dAtA[i] = 0x12 + } + if m.TokenizeShareRecordId != 0 { + i = encodeVarintTx(dAtA, i, uint64(m.TokenizeShareRecordId)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *MsgTransferTokenizeShareRecordResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgTransferTokenizeShareRecordResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgTransferTokenizeShareRecordResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *MsgDisableTokenizeShares) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgDisableTokenizeShares) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgDisableTokenizeShares) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.DelegatorAddress) > 0 { + i -= len(m.DelegatorAddress) + copy(dAtA[i:], m.DelegatorAddress) + i = encodeVarintTx(dAtA, i, uint64(len(m.DelegatorAddress))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *MsgDisableTokenizeSharesResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgDisableTokenizeSharesResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgDisableTokenizeSharesResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *MsgEnableTokenizeShares) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgEnableTokenizeShares) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgEnableTokenizeShares) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.DelegatorAddress) > 0 { + i -= len(m.DelegatorAddress) + copy(dAtA[i:], m.DelegatorAddress) + i = encodeVarintTx(dAtA, i, uint64(len(m.DelegatorAddress))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *MsgEnableTokenizeSharesResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgEnableTokenizeSharesResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgEnableTokenizeSharesResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + n6, err6 := github_com_cosmos_gogoproto_types.StdTimeMarshalTo(m.CompletionTime, dAtA[i-github_com_cosmos_gogoproto_types.SizeOfStdTime(m.CompletionTime):]) + if err6 != nil { + return 0, err6 + } + i -= n6 + i = encodeVarintTx(dAtA, i, uint64(n6)) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *MsgWithdrawTokenizeShareRecordReward) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgWithdrawTokenizeShareRecordReward) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgWithdrawTokenizeShareRecordReward) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.RecordId != 0 { + i = encodeVarintTx(dAtA, i, uint64(m.RecordId)) + i-- + dAtA[i] = 0x10 + } + if len(m.OwnerAddress) > 0 { + i -= len(m.OwnerAddress) + copy(dAtA[i:], m.OwnerAddress) + i = encodeVarintTx(dAtA, i, uint64(len(m.OwnerAddress))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *MsgWithdrawTokenizeShareRecordRewardResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgWithdrawTokenizeShareRecordRewardResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgWithdrawTokenizeShareRecordRewardResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *MsgWithdrawAllTokenizeShareRecordReward) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgWithdrawAllTokenizeShareRecordReward) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgWithdrawAllTokenizeShareRecordReward) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.OwnerAddress) > 0 { + i -= len(m.OwnerAddress) + copy(dAtA[i:], m.OwnerAddress) + i = encodeVarintTx(dAtA, i, uint64(len(m.OwnerAddress))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *MsgWithdrawAllTokenizeShareRecordRewardResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgWithdrawAllTokenizeShareRecordRewardResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgWithdrawAllTokenizeShareRecordRewardResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func encodeVarintTx(dAtA []byte, offset int, v uint64) int { + offset -= sovTx(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *MsgUpdateParams) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Authority) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + l = m.Params.Size() + n += 1 + l + sovTx(uint64(l)) + return n +} + +func (m *MsgUpdateParamsResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *MsgTokenizeShares) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.DelegatorAddress) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + l = len(m.ValidatorAddress) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + l = m.Amount.Size() + n += 1 + l + sovTx(uint64(l)) + l = len(m.TokenizedShareOwner) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + return n +} + +func (m *MsgTokenizeSharesResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Amount.Size() + n += 1 + l + sovTx(uint64(l)) + return n +} + +func (m *MsgRedeemTokensForShares) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.DelegatorAddress) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + l = m.Amount.Size() + n += 1 + l + sovTx(uint64(l)) + return n +} + +func (m *MsgRedeemTokensForSharesResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Amount.Size() + n += 1 + l + sovTx(uint64(l)) + return n +} + +func (m *MsgTransferTokenizeShareRecord) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TokenizeShareRecordId != 0 { + n += 1 + sovTx(uint64(m.TokenizeShareRecordId)) + } + l = len(m.Sender) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + l = len(m.NewOwner) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + return n +} + +func (m *MsgTransferTokenizeShareRecordResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *MsgDisableTokenizeShares) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.DelegatorAddress) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + return n +} + +func (m *MsgDisableTokenizeSharesResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *MsgEnableTokenizeShares) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.DelegatorAddress) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + return n +} + +func (m *MsgEnableTokenizeSharesResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = github_com_cosmos_gogoproto_types.SizeOfStdTime(m.CompletionTime) + n += 1 + l + sovTx(uint64(l)) + return n +} + +func (m *MsgWithdrawTokenizeShareRecordReward) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.OwnerAddress) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + if m.RecordId != 0 { + n += 1 + sovTx(uint64(m.RecordId)) + } + return n +} + +func (m *MsgWithdrawTokenizeShareRecordRewardResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *MsgWithdrawAllTokenizeShareRecordReward) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.OwnerAddress) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + return n +} + +func (m *MsgWithdrawAllTokenizeShareRecordRewardResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func sovTx(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozTx(x uint64) (n int) { + return sovTx(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *MsgUpdateParams) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgUpdateParams: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgUpdateParams: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Authority", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Authority = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Params", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Params.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTx(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTx + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgUpdateParamsResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgUpdateParamsResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgUpdateParamsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipTx(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTx + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgTokenizeShares) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgTokenizeShares: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgTokenizeShares: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DelegatorAddress", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DelegatorAddress = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ValidatorAddress", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ValidatorAddress = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Amount", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Amount.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TokenizedShareOwner", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TokenizedShareOwner = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTx(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTx + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgTokenizeSharesResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgTokenizeSharesResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgTokenizeSharesResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Amount", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Amount.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTx(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTx + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgRedeemTokensForShares) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgRedeemTokensForShares: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgRedeemTokensForShares: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DelegatorAddress", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DelegatorAddress = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Amount", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Amount.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTx(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTx + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgRedeemTokensForSharesResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgRedeemTokensForSharesResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgRedeemTokensForSharesResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Amount", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Amount.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTx(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTx + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgTransferTokenizeShareRecord) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgTransferTokenizeShareRecord: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgTransferTokenizeShareRecord: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TokenizeShareRecordId", wireType) + } + m.TokenizeShareRecordId = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TokenizeShareRecordId |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Sender", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Sender = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NewOwner", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NewOwner = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTx(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTx + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgTransferTokenizeShareRecordResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgTransferTokenizeShareRecordResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgTransferTokenizeShareRecordResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipTx(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTx + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgDisableTokenizeShares) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgDisableTokenizeShares: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgDisableTokenizeShares: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DelegatorAddress", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DelegatorAddress = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTx(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTx + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgDisableTokenizeSharesResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgDisableTokenizeSharesResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgDisableTokenizeSharesResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipTx(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTx + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgEnableTokenizeShares) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgEnableTokenizeShares: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgEnableTokenizeShares: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DelegatorAddress", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DelegatorAddress = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTx(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTx + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgEnableTokenizeSharesResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgEnableTokenizeSharesResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgEnableTokenizeSharesResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CompletionTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := github_com_cosmos_gogoproto_types.StdTimeUnmarshal(&m.CompletionTime, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTx(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTx + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgWithdrawTokenizeShareRecordReward) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgWithdrawTokenizeShareRecordReward: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgWithdrawTokenizeShareRecordReward: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field OwnerAddress", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.OwnerAddress = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RecordId", wireType) + } + m.RecordId = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.RecordId |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTx(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTx + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgWithdrawTokenizeShareRecordRewardResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgWithdrawTokenizeShareRecordRewardResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgWithdrawTokenizeShareRecordRewardResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipTx(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTx + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgWithdrawAllTokenizeShareRecordReward) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgWithdrawAllTokenizeShareRecordReward: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgWithdrawAllTokenizeShareRecordReward: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field OwnerAddress", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.OwnerAddress = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTx(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTx + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgWithdrawAllTokenizeShareRecordRewardResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgWithdrawAllTokenizeShareRecordRewardResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgWithdrawAllTokenizeShareRecordRewardResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipTx(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTx + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipTx(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTx + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTx + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTx + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthTx + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupTx + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthTx + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthTx = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowTx = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupTx = fmt.Errorf("proto: unexpected end of group") +)