diff --git a/CHANGELOG.md b/CHANGELOG.md
index 3406dbd5d6..a563d3695d 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -39,6 +39,7 @@ Ref: https://keepachangelog.com/en/1.0.0/
### Features
+* Add the ibcratelimit module [#1498](https://github.com/provenance-io/provenance/issues/1498).
* Add CLI commands for the exchange module endpoints and queries [#1701](https://github.com/provenance-io/provenance/issues/1701).
* Add CLI command to generate autocomplete shell scripts [#1762](https://github.com/provenance-io/provenance/pull/1762).
* Create CLI commands for adding a market to a genesis file [#1757](https://github.com/provenance-io/provenance/issues/1757).
diff --git a/app/app.go b/app/app.go
index 1d4a92356b..23d472dd26 100644
--- a/app/app.go
+++ b/app/app.go
@@ -141,6 +141,9 @@ import (
"github.com/provenance-io/provenance/x/ibchooks"
ibchookskeeper "github.com/provenance-io/provenance/x/ibchooks/keeper"
ibchookstypes "github.com/provenance-io/provenance/x/ibchooks/types"
+ ibcratelimit "github.com/provenance-io/provenance/x/ibcratelimit"
+ ibcratelimitkeeper "github.com/provenance-io/provenance/x/ibcratelimit/keeper"
+ ibcratelimitmodule "github.com/provenance-io/provenance/x/ibcratelimit/module"
"github.com/provenance-io/provenance/x/marker"
markerkeeper "github.com/provenance-io/provenance/x/marker/keeper"
markertypes "github.com/provenance-io/provenance/x/marker/types"
@@ -218,6 +221,7 @@ var (
ica.AppModuleBasic{},
icq.AppModuleBasic{},
ibchooks.AppModuleBasic{},
+ ibcratelimitmodule.AppModuleBasic{},
marker.AppModuleBasic{},
attribute.AppModuleBasic{},
@@ -310,11 +314,12 @@ type App struct {
TriggerKeeper triggerkeeper.Keeper
OracleKeeper oraclekeeper.Keeper
- IBCKeeper *ibckeeper.Keeper // IBC Keeper must be a pointer in the app, so we can SetRouter on it correctly
- IBCHooksKeeper *ibchookskeeper.Keeper
- ICAHostKeeper *icahostkeeper.Keeper
- TransferKeeper *ibctransferkeeper.Keeper
- ICQKeeper icqkeeper.Keeper
+ IBCKeeper *ibckeeper.Keeper // IBC Keeper must be a pointer in the app, so we can SetRouter on it correctly
+ IBCHooksKeeper *ibchookskeeper.Keeper
+ ICAHostKeeper *icahostkeeper.Keeper
+ TransferKeeper *ibctransferkeeper.Keeper
+ ICQKeeper icqkeeper.Keeper
+ RateLimitingKeeper *ibcratelimitkeeper.Keeper
MarkerKeeper markerkeeper.Keeper
MetadataKeeper metadatakeeper.Keeper
@@ -392,6 +397,7 @@ func New(
icahosttypes.StoreKey,
icqtypes.StoreKey,
ibchookstypes.StoreKey,
+ ibcratelimit.StoreKey,
metadatatypes.StoreKey,
markertypes.StoreKey,
@@ -527,12 +533,16 @@ func New(
app.IbcHooks,
)
+ rateLimtingKeeper := ibcratelimitkeeper.NewKeeper(appCodec, keys[ibcratelimit.StoreKey], nil)
+ app.RateLimitingKeeper = &rateLimtingKeeper
+
// Create Transfer Keepers
+ rateLimitingTransferModule := ibcratelimitmodule.NewIBCMiddleware(nil, app.HooksICS4Wrapper, app.RateLimitingKeeper)
transferKeeper := ibctransferkeeper.NewKeeper(
appCodec,
keys[ibctransfertypes.StoreKey],
app.GetSubspace(ibctransfertypes.ModuleName),
- app.HooksICS4Wrapper,
+ &rateLimitingTransferModule,
app.IBCKeeper.ChannelKeeper,
&app.IBCKeeper.PortKeeper,
app.AccountKeeper,
@@ -541,7 +551,8 @@ func New(
)
app.TransferKeeper = &transferKeeper
transferModule := ibctransfer.NewIBCModule(*app.TransferKeeper)
- hooksTransferModule := ibchooks.NewIBCMiddleware(transferModule, &app.HooksICS4Wrapper)
+ rateLimitingTransferModule = *rateLimitingTransferModule.WithIBCModule(transferModule)
+ hooksTransferModule := ibchooks.NewIBCMiddleware(&rateLimitingTransferModule, &app.HooksICS4Wrapper)
app.TransferStack = &hooksTransferModule
app.NameKeeper = namekeeper.NewKeeper(
@@ -660,6 +671,7 @@ func New(
app.Ics20WasmHooks.ContractKeeper = app.WasmKeeper // app.ContractKeeper -- this changes in the next version of wasm to a permissioned keeper
app.IBCHooksKeeper.ContractKeeper = app.ContractKeeper
app.Ics20MarkerHooks.MarkerKeeper = &app.MarkerKeeper
+ app.RateLimitingKeeper.PermissionedKeeper = app.ContractKeeper
app.IbcHooks.SendPacketPreProcessors = []ibchookstypes.PreSendPacketDataProcessingFn{app.Ics20MarkerHooks.SetupMarkerMemoFn, app.Ics20WasmHooks.GetWasmSendPacketPreProcessor}
@@ -766,6 +778,7 @@ func New(
// IBC
ibc.NewAppModule(app.IBCKeeper),
+ ibcratelimitmodule.NewAppModule(appCodec, *app.RateLimitingKeeper, app.AccountKeeper, app.BankKeeper),
ibchooks.NewAppModule(app.AccountKeeper, *app.IBCHooksKeeper),
ibctransfer.NewAppModule(*app.TransferKeeper),
icqModule,
@@ -805,6 +818,7 @@ func New(
metadatatypes.ModuleName,
oracletypes.ModuleName,
wasm.ModuleName,
+ ibcratelimit.ModuleName,
ibchookstypes.ModuleName,
ibctransfertypes.ModuleName,
icqtypes.ModuleName,
@@ -835,6 +849,7 @@ func New(
nametypes.ModuleName,
genutiltypes.ModuleName,
ibchost.ModuleName,
+ ibcratelimit.ModuleName,
ibchookstypes.ModuleName,
ibctransfertypes.ModuleName,
icqtypes.ModuleName,
@@ -891,6 +906,7 @@ func New(
ibctransfertypes.ModuleName,
icqtypes.ModuleName,
icatypes.ModuleName,
+ ibcratelimit.ModuleName,
ibchookstypes.ModuleName,
// wasm after ibc transfer
wasm.ModuleName,
@@ -928,6 +944,7 @@ func New(
hold.ModuleName,
exchange.ModuleName,
+ ibcratelimit.ModuleName,
ibchookstypes.ModuleName,
icatypes.ModuleName,
icqtypes.ModuleName,
@@ -982,6 +999,7 @@ func New(
// IBC
ibc.NewAppModule(app.IBCKeeper),
+ ibcratelimitmodule.NewAppModule(appCodec, *app.RateLimitingKeeper, app.AccountKeeper, app.BankKeeper),
ibchooks.NewAppModule(app.AccountKeeper, *app.IBCHooksKeeper),
ibctransfer.NewAppModule(*app.TransferKeeper),
icaModule,
diff --git a/app/params/weights.go b/app/params/weights.go
index 04821b2d2c..17ac654c64 100644
--- a/app/params/weights.go
+++ b/app/params/weights.go
@@ -40,4 +40,6 @@ const (
// Oracle
DefaultWeightUpdateOracle int = 25
DefaultWeightSendOracleQuery int = 75
+ // Rate Limiter
+ DefaultWeightGovUpdateParams int = 100
)
diff --git a/app/upgrades.go b/app/upgrades.go
index b3d604d1c1..2ddebe28ae 100644
--- a/app/upgrades.go
+++ b/app/upgrades.go
@@ -22,6 +22,7 @@ import (
"github.com/provenance-io/provenance/x/exchange"
"github.com/provenance-io/provenance/x/hold"
ibchookstypes "github.com/provenance-io/provenance/x/ibchooks/types"
+ ibcratelimit "github.com/provenance-io/provenance/x/ibcratelimit"
markertypes "github.com/provenance-io/provenance/x/marker/types"
msgfeetypes "github.com/provenance-io/provenance/x/msgfees/types"
oracletypes "github.com/provenance-io/provenance/x/oracle/types"
@@ -174,6 +175,7 @@ var upgrades = map[string]appUpgrade{
Added: []string{icqtypes.ModuleName, oracletypes.ModuleName, ibchookstypes.StoreKey, hold.ModuleName, exchange.ModuleName},
},
"tourmaline-rc1": { // upgrade for v1.18.0-rc1
+ Added: []string{ibcratelimit.ModuleName},
Handler: func(ctx sdk.Context, app *App, vm module.VersionMap) (module.VersionMap, error) {
var err error
vm, err = runModuleMigrations(ctx, app, vm)
@@ -185,6 +187,7 @@ var upgrades = map[string]appUpgrade{
},
},
"tourmaline": { // upgrade for v1.18.0
+ Added: []string{ibcratelimit.ModuleName},
Handler: func(ctx sdk.Context, app *App, vm module.VersionMap) (module.VersionMap, error) {
var err error
vm, err = runModuleMigrations(ctx, app, vm)
diff --git a/docs/proto-docs.md b/docs/proto-docs.md
index cbcf40e138..b6bd486d97 100644
--- a/docs/proto-docs.md
+++ b/docs/proto-docs.md
@@ -188,6 +188,29 @@
- [Msg](#provenance.ibchooks.v1.Msg)
+- [provenance/ibcratelimit/v1/event.proto](#provenance/ibcratelimit/v1/event.proto)
+ - [EventAckRevertFailure](#provenance.ibcratelimit.v1.EventAckRevertFailure)
+ - [EventParamsUpdated](#provenance.ibcratelimit.v1.EventParamsUpdated)
+ - [EventTimeoutRevertFailure](#provenance.ibcratelimit.v1.EventTimeoutRevertFailure)
+
+- [provenance/ibcratelimit/v1/params.proto](#provenance/ibcratelimit/v1/params.proto)
+ - [Params](#provenance.ibcratelimit.v1.Params)
+
+- [provenance/ibcratelimit/v1/genesis.proto](#provenance/ibcratelimit/v1/genesis.proto)
+ - [GenesisState](#provenance.ibcratelimit.v1.GenesisState)
+
+- [provenance/ibcratelimit/v1/query.proto](#provenance/ibcratelimit/v1/query.proto)
+ - [ParamsRequest](#provenance.ibcratelimit.v1.ParamsRequest)
+ - [ParamsResponse](#provenance.ibcratelimit.v1.ParamsResponse)
+
+ - [Query](#provenance.ibcratelimit.v1.Query)
+
+- [provenance/ibcratelimit/v1/tx.proto](#provenance/ibcratelimit/v1/tx.proto)
+ - [MsgGovUpdateParamsRequest](#provenance.ibcratelimit.v1.MsgGovUpdateParamsRequest)
+ - [MsgGovUpdateParamsResponse](#provenance.ibcratelimit.v1.MsgGovUpdateParamsResponse)
+
+ - [Msg](#provenance.ibcratelimit.v1.Msg)
+
- [provenance/marker/v1/accessgrant.proto](#provenance/marker/v1/accessgrant.proto)
- [AccessGrant](#provenance.marker.v1.AccessGrant)
@@ -3216,6 +3239,230 @@ Msg defines the Msg service.
+
+
Top
+
+## provenance/ibcratelimit/v1/event.proto
+
+
+
+
+
+### EventAckRevertFailure
+EventAckRevertFailure is emitted when an Ack revert fails
+
+
+| Field | Type | Label | Description |
+| ----- | ---- | ----- | ----------- |
+| `module` | [string](#string) | | module is the name of the module that emitted it. |
+| `packet` | [string](#string) | | packet is the packet received on acknowledgement. |
+| `ack` | [string](#string) | | ack is the packet's inner acknowledgement message. |
+
+
+
+
+
+
+
+
+### EventParamsUpdated
+EventParamsUpdated is an event emitted when the ibcratelimit module's params have been updated.
+
+
+
+
+
+
+
+
+### EventTimeoutRevertFailure
+EventTimeoutRevertFailure is emitted when a Timeout revert fails
+
+
+| Field | Type | Label | Description |
+| ----- | ---- | ----- | ----------- |
+| `module` | [string](#string) | | module is the name of the module that emitted it. |
+| `packet` | [string](#string) | | packet is the packet received on timeout. |
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+Top
+
+## provenance/ibcratelimit/v1/params.proto
+
+
+
+
+
+### Params
+Params defines the parameters for the ibcratelimit module.
+
+
+| Field | Type | Label | Description |
+| ----- | ---- | ----- | ----------- |
+| `contract_address` | [string](#string) | | contract_address is the address of the rate limiter contract. |
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+Top
+
+## provenance/ibcratelimit/v1/genesis.proto
+
+
+
+
+
+### GenesisState
+GenesisState defines the ibcratelimit module's genesis state.
+
+
+| Field | Type | Label | Description |
+| ----- | ---- | ----- | ----------- |
+| `params` | [Params](#provenance.ibcratelimit.v1.Params) | | params are all the parameters of the module. |
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+Top
+
+## provenance/ibcratelimit/v1/query.proto
+
+
+
+
+
+### ParamsRequest
+ParamsRequest is the request type for the Query/Params RPC method.
+
+
+
+
+
+
+
+
+### ParamsResponse
+ParamsResponse is the response type for the Query/Params RPC method.
+
+
+| Field | Type | Label | Description |
+| ----- | ---- | ----- | ----------- |
+| `params` | [Params](#provenance.ibcratelimit.v1.Params) | | params defines the parameters of the module. |
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+### Query
+Query defines the gRPC querier service.
+
+| Method Name | Request Type | Response Type | Description | HTTP Verb | Endpoint |
+| ----------- | ------------ | ------------- | ------------| ------- | -------- |
+| `Params` | [ParamsRequest](#provenance.ibcratelimit.v1.ParamsRequest) | [ParamsResponse](#provenance.ibcratelimit.v1.ParamsResponse) | Params defines a gRPC query method that returns the ibcratelimit module's parameters. | GET|/provenance/ibcratelimit/v1/params|
+
+
+
+
+
+
+Top
+
+## provenance/ibcratelimit/v1/tx.proto
+
+
+
+
+
+### MsgGovUpdateParamsRequest
+MsgGovUpdateParamsRequest is a request message for the GovUpdateParams endpoint.
+
+
+| Field | Type | Label | Description |
+| ----- | ---- | ----- | ----------- |
+| `authority` | [string](#string) | | authority should be the governance module account address. |
+| `params` | [Params](#provenance.ibcratelimit.v1.Params) | | params are the new param values to set |
+
+
+
+
+
+
+
+
+### MsgGovUpdateParamsResponse
+MsgGovUpdateParamsResponse is a response message for the GovUpdateParams endpoint.
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+### Msg
+Msg is the service for ibcratelimit module's tx endpoints.
+
+| Method Name | Request Type | Response Type | Description | HTTP Verb | Endpoint |
+| ----------- | ------------ | ------------- | ------------| ------- | -------- |
+| `GovUpdateParams` | [MsgGovUpdateParamsRequest](#provenance.ibcratelimit.v1.MsgGovUpdateParamsRequest) | [MsgGovUpdateParamsResponse](#provenance.ibcratelimit.v1.MsgGovUpdateParamsResponse) | GovUpdateParams is a governance proposal endpoint for updating the exchange module's params. | |
+
+
+
+
+
Top
diff --git a/internal/ibc/ibc.go b/internal/ibc/ibc.go
new file mode 100644
index 0000000000..35e30d3b4f
--- /dev/null
+++ b/internal/ibc/ibc.go
@@ -0,0 +1,52 @@
+package ibc
+
+import (
+ "encoding/json"
+ "fmt"
+
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ channeltypes "github.com/cosmos/ibc-go/v6/modules/core/04-channel/types"
+)
+
+const IbcAcknowledgementErrorType = "ibc-acknowledgement-error"
+
+// NewEmitErrorAcknowledgement creates a new error acknowledgement after having emitted an event with the
+// details of the error.
+func NewEmitErrorAcknowledgement(ctx sdk.Context, err error, errorContexts ...string) channeltypes.Acknowledgement {
+ EmitIBCErrorEvents(ctx, err, errorContexts)
+
+ return channeltypes.NewErrorAcknowledgement(err)
+}
+
+// EmitIBCErrorEvents Emit and Log errors
+func EmitIBCErrorEvents(ctx sdk.Context, err error, errorContexts []string) {
+ logger := ctx.Logger().With("module", IbcAcknowledgementErrorType)
+ if err == nil {
+ logger.Error("no error skipping emit")
+ return
+ }
+
+ attributes := make([]sdk.Attribute, len(errorContexts)+1)
+ attributes[0] = sdk.NewAttribute("error", err.Error())
+ for i, s := range errorContexts {
+ attributes[i+1] = sdk.NewAttribute("error-context", s)
+ logger.Error(fmt.Sprintf("error-context: %v", s))
+ }
+
+ ctx.EventManager().EmitEvents(sdk.Events{
+ sdk.NewEvent(
+ IbcAcknowledgementErrorType,
+ attributes...,
+ ),
+ })
+}
+
+// IsAckError checks an IBC acknowledgement to see if it's an error.
+// This is a replacement for ack.Success() which is currently not working on some circumstances
+func IsAckError(acknowledgement []byte) bool {
+ var ackErr channeltypes.Acknowledgement_Error
+ if err := json.Unmarshal(acknowledgement, &ackErr); err == nil && len(ackErr.Error) > 0 {
+ return true
+ }
+ return false
+}
diff --git a/internal/ibc/ibc_test.go b/internal/ibc/ibc_test.go
new file mode 100644
index 0000000000..cfe78cfa0f
--- /dev/null
+++ b/internal/ibc/ibc_test.go
@@ -0,0 +1,152 @@
+package ibc_test
+
+import (
+ "encoding/json"
+ "testing"
+
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ channeltypes "github.com/cosmos/ibc-go/v6/modules/core/04-channel/types"
+ "github.com/provenance-io/provenance/app"
+ "github.com/provenance-io/provenance/internal/ibc"
+ "github.com/provenance-io/provenance/x/ibcratelimit"
+ "github.com/stretchr/testify/assert"
+ tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
+)
+
+func TestNewEmitErrorAcknowledgement(t *testing.T) {
+ testCases := []struct {
+ name string
+ err error
+ errCtx []string
+ hasEvents bool
+ ack channeltypes.Acknowledgement
+ }{
+ {
+ name: "success - emits ibc error events",
+ err: ibcratelimit.ErrRateLimitExceeded,
+ errCtx: []string{"err ctx 1", "error ctx 2"},
+ hasEvents: true,
+ ack: channeltypes.NewErrorAcknowledgement(ibcratelimit.ErrRateLimitExceeded),
+ },
+ {
+ name: "success - no ctx",
+ err: ibcratelimit.ErrRateLimitExceeded,
+ errCtx: []string{},
+ hasEvents: true,
+ ack: channeltypes.NewErrorAcknowledgement(ibcratelimit.ErrRateLimitExceeded),
+ },
+ {
+ name: "success - nil ctx",
+ err: ibcratelimit.ErrRateLimitExceeded,
+ errCtx: nil,
+ hasEvents: true,
+ ack: channeltypes.NewErrorAcknowledgement(ibcratelimit.ErrRateLimitExceeded),
+ },
+ {
+ name: "success - nil error",
+ err: nil,
+ errCtx: []string{"err ctx 1", "error ctx 2"},
+ hasEvents: false,
+ ack: channeltypes.NewErrorAcknowledgement(nil),
+ },
+ }
+
+ testApp := app.Setup(t)
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ ctx := testApp.BaseApp.NewContext(false, tmproto.Header{})
+ ack := ibc.NewEmitErrorAcknowledgement(ctx, tc.err, tc.errCtx...)
+ events := ctx.EventManager().Events()
+ assert.Equal(t, tc.hasEvents, len(events) > 0, "should correctly decide when to emit events")
+ assert.Equal(t, tc.ack, ack, "should return the correct ack")
+ })
+ }
+}
+
+func TestEmitIBCErrorEvents(t *testing.T) {
+ testCases := []struct {
+ name string
+ err error
+ errCtx []string
+ events sdk.Events
+ }{
+ {
+ name: "success - emits ibc error events",
+ err: ibcratelimit.ErrRateLimitExceeded,
+ errCtx: []string{"err ctx 1", "error ctx 2"},
+ events: []sdk.Event{
+ sdk.NewEvent(ibc.IbcAcknowledgementErrorType,
+ sdk.NewAttribute("error", "rate limit exceeded"),
+ sdk.NewAttribute("error-context", "err ctx 1"),
+ sdk.NewAttribute("error-context", "error ctx 2"),
+ ),
+ },
+ },
+ {
+ name: "success - no ctx",
+ err: ibcratelimit.ErrRateLimitExceeded,
+ errCtx: []string{},
+ events: []sdk.Event{
+ sdk.NewEvent(ibc.IbcAcknowledgementErrorType,
+ sdk.NewAttribute("error", "rate limit exceeded"),
+ ),
+ },
+ },
+ {
+ name: "success - nil ctx",
+ err: ibcratelimit.ErrRateLimitExceeded,
+ errCtx: nil,
+ events: []sdk.Event{
+ sdk.NewEvent(ibc.IbcAcknowledgementErrorType,
+ sdk.NewAttribute("error", "rate limit exceeded"),
+ ),
+ },
+ },
+ {
+ name: "success - nil error",
+ err: nil,
+ errCtx: []string{"err ctx 1", "error ctx 2"},
+ events: []sdk.Event{},
+ },
+ }
+
+ testApp := app.Setup(t)
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ ctx := testApp.BaseApp.NewContext(false, tmproto.Header{})
+ ibc.EmitIBCErrorEvents(ctx, tc.err, tc.errCtx)
+ events := ctx.EventManager().Events()
+ assert.Equal(t, tc.events, events, "should emit the correct events")
+ })
+ }
+}
+
+func TestIsAckError(t *testing.T) {
+ testCases := []struct {
+ name string
+ ack channeltypes.Acknowledgement
+ expected bool
+ }{
+ {
+ name: "success - should detect error ack",
+ ack: channeltypes.NewErrorAcknowledgement(ibcratelimit.ErrRateLimitExceeded),
+ expected: true,
+ },
+ {
+ name: "failure - should detect result ack",
+ ack: channeltypes.NewResultAcknowledgement([]byte("garbage")),
+ expected: false,
+ },
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ ack, err := json.Marshal(tc.ack.Response)
+ assert.NoError(t, err, "should not fail when marshaling ack")
+ isAck := ibc.IsAckError(ack)
+ assert.Equal(t, tc.expected, isAck, "should return the correct value")
+ })
+ }
+}
diff --git a/proto/provenance/ibcratelimit/v1/event.proto b/proto/provenance/ibcratelimit/v1/event.proto
new file mode 100644
index 0000000000..5d147e6d90
--- /dev/null
+++ b/proto/provenance/ibcratelimit/v1/event.proto
@@ -0,0 +1,28 @@
+syntax = "proto3";
+package provenance.ibcratelimit.v1;
+
+option go_package = "github.com/provenance-io/provenance/x/ibcratelimit";
+
+option java_package = "io.provenance.ibcratelimit.v1";
+option java_multiple_files = true;
+
+// EventAckRevertFailure is emitted when an Ack revert fails
+message EventAckRevertFailure {
+ // module is the name of the module that emitted it.
+ string module = 1;
+ // packet is the packet received on acknowledgement.
+ string packet = 2;
+ // ack is the packet's inner acknowledgement message.
+ string ack = 3;
+}
+
+// EventTimeoutRevertFailure is emitted when a Timeout revert fails
+message EventTimeoutRevertFailure {
+ // module is the name of the module that emitted it.
+ string module = 1;
+ // packet is the packet received on timeout.
+ string packet = 2;
+}
+
+// EventParamsUpdated is an event emitted when the ibcratelimit module's params have been updated.
+message EventParamsUpdated {}
\ No newline at end of file
diff --git a/proto/provenance/ibcratelimit/v1/genesis.proto b/proto/provenance/ibcratelimit/v1/genesis.proto
new file mode 100644
index 0000000000..c705c120dd
--- /dev/null
+++ b/proto/provenance/ibcratelimit/v1/genesis.proto
@@ -0,0 +1,15 @@
+syntax = "proto3";
+package provenance.ibcratelimit.v1;
+
+import "gogoproto/gogo.proto";
+import "provenance/ibcratelimit/v1/params.proto";
+
+option go_package = "github.com/provenance-io/provenance/x/ibcratelimit";
+option java_package = "io.provenance.ibcratelimit.v1";
+option java_multiple_files = true;
+
+// GenesisState defines the ibcratelimit module's genesis state.
+message GenesisState {
+ // params are all the parameters of the module.
+ Params params = 1 [(gogoproto.nullable) = false];
+}
diff --git a/proto/provenance/ibcratelimit/v1/params.proto b/proto/provenance/ibcratelimit/v1/params.proto
new file mode 100644
index 0000000000..995fea1ce6
--- /dev/null
+++ b/proto/provenance/ibcratelimit/v1/params.proto
@@ -0,0 +1,14 @@
+syntax = "proto3";
+package provenance.ibcratelimit.v1;
+
+import "gogoproto/gogo.proto";
+
+option go_package = "github.com/provenance-io/provenance/x/ibcratelimit";
+option java_package = "io.provenance.ibcratelimit.v1";
+option java_multiple_files = true;
+
+// Params defines the parameters for the ibcratelimit module.
+message Params {
+ // contract_address is the address of the rate limiter contract.
+ string contract_address = 1 [(gogoproto.moretags) = "yaml:\"contract_address\""];
+}
diff --git a/proto/provenance/ibcratelimit/v1/query.proto b/proto/provenance/ibcratelimit/v1/query.proto
new file mode 100644
index 0000000000..c679da67e2
--- /dev/null
+++ b/proto/provenance/ibcratelimit/v1/query.proto
@@ -0,0 +1,28 @@
+syntax = "proto3";
+package provenance.ibcratelimit.v1;
+
+import "gogoproto/gogo.proto";
+import "google/api/annotations.proto";
+import "provenance/ibcratelimit/v1/params.proto";
+
+option go_package = "github.com/provenance-io/provenance/x/ibcratelimit";
+option java_package = "io.provenance.ibcratelimit.v1";
+option java_multiple_files = true;
+
+// Query defines the gRPC querier service.
+service Query {
+ // Params defines a gRPC query method that returns the ibcratelimit module's
+ // parameters.
+ rpc Params(ParamsRequest) returns (ParamsResponse) {
+ option (google.api.http).get = "/provenance/ibcratelimit/v1/params";
+ }
+}
+
+// ParamsRequest is the request type for the Query/Params RPC method.
+message ParamsRequest {}
+
+// ParamsResponse is the response type for the Query/Params RPC method.
+message ParamsResponse {
+ // params defines the parameters of the module.
+ Params params = 1 [(gogoproto.nullable) = false];
+}
diff --git a/proto/provenance/ibcratelimit/v1/tx.proto b/proto/provenance/ibcratelimit/v1/tx.proto
new file mode 100644
index 0000000000..e410d410f3
--- /dev/null
+++ b/proto/provenance/ibcratelimit/v1/tx.proto
@@ -0,0 +1,32 @@
+syntax = "proto3";
+package provenance.ibcratelimit.v1;
+
+option go_package = "github.com/provenance-io/provenance/x/ibcratelimit";
+
+option java_package = "io.provenance.ibcratelimit.v1";
+option java_multiple_files = true;
+
+import "gogoproto/gogo.proto";
+import "cosmos_proto/cosmos.proto";
+import "cosmos/msg/v1/msg.proto";
+import "provenance/ibcratelimit/v1/params.proto";
+
+// Msg is the service for ibcratelimit module's tx endpoints.
+service Msg {
+ // GovUpdateParams is a governance proposal endpoint for updating the exchange module's params.
+ rpc GovUpdateParams(MsgGovUpdateParamsRequest) returns (MsgGovUpdateParamsResponse);
+}
+
+// MsgGovUpdateParamsRequest is a request message for the GovUpdateParams endpoint.
+message MsgGovUpdateParamsRequest {
+ option (cosmos.msg.v1.signer) = "authority";
+
+ // authority should be the governance module account address.
+ string authority = 1 [(cosmos_proto.scalar) = "cosmos.AddressString"];
+
+ // params are the new param values to set
+ Params params = 2 [(gogoproto.nullable) = false];
+}
+
+// MsgGovUpdateParamsResponse is a response message for the GovUpdateParams endpoint.
+message MsgGovUpdateParamsResponse {}
diff --git a/testutil/contracts/rate-limiter/.cargo/config b/testutil/contracts/rate-limiter/.cargo/config
new file mode 100644
index 0000000000..d284662142
--- /dev/null
+++ b/testutil/contracts/rate-limiter/.cargo/config
@@ -0,0 +1,4 @@
+[alias]
+wasm = "build --release --target wasm32-unknown-unknown"
+test = "test --lib"
+schema = "run --example schema"
\ No newline at end of file
diff --git a/testutil/contracts/rate-limiter/.gitignore b/testutil/contracts/rate-limiter/.gitignore
new file mode 100644
index 0000000000..200de49238
--- /dev/null
+++ b/testutil/contracts/rate-limiter/.gitignore
@@ -0,0 +1,2 @@
+target
+schema
diff --git a/testutil/contracts/rate-limiter/Cargo.lock b/testutil/contracts/rate-limiter/Cargo.lock
new file mode 100644
index 0000000000..9aa3c445e4
--- /dev/null
+++ b/testutil/contracts/rate-limiter/Cargo.lock
@@ -0,0 +1,968 @@
+# This file is automatically @generated by Cargo.
+# It is not intended for manual editing.
+version = 3
+
+[[package]]
+name = "ahash"
+version = "0.7.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "fcb51a0695d8f838b1ee009b3fbf66bda078cd64590202a864a8f3e8c4315c47"
+dependencies = [
+ "getrandom",
+ "once_cell",
+ "version_check",
+]
+
+[[package]]
+name = "anyhow"
+version = "1.0.75"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a4668cab20f66d8d020e1fbc0ebe47217433c1b6c8f2040faf858554e394ace6"
+
+[[package]]
+name = "autocfg"
+version = "1.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa"
+
+[[package]]
+name = "base16ct"
+version = "0.2.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4c7f02d4ea65f2c1853089ffd8d2787bdbc63de2f0d29dedbcf8ccdfa0ccd4cf"
+
+[[package]]
+name = "base64"
+version = "0.21.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9ba43ea6f343b788c8764558649e08df62f86c6ef251fdaeb1ffd010a9ae50a2"
+
+[[package]]
+name = "base64ct"
+version = "1.6.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8c3c1a368f70d6cf7302d78f8f7093da241fb8e8807c05cc9e51a125895a6d5b"
+
+[[package]]
+name = "block-buffer"
+version = "0.9.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4152116fd6e9dadb291ae18fc1ec3575ed6d84c29642d97890f4b4a3417297e4"
+dependencies = [
+ "generic-array",
+]
+
+[[package]]
+name = "block-buffer"
+version = "0.10.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3078c7629b62d3f0439517fa394996acacc5cbc91c5a20d8c658e77abd503a71"
+dependencies = [
+ "generic-array",
+]
+
+[[package]]
+name = "bnum"
+version = "0.8.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "128a44527fc0d6abf05f9eda748b9027536e12dff93f5acc8449f51583309350"
+
+[[package]]
+name = "byteorder"
+version = "1.5.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b"
+
+[[package]]
+name = "bytes"
+version = "1.5.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a2bd12c1caf447e69cd4528f47f94d203fd2582878ecb9e9465484c4148a8223"
+
+[[package]]
+name = "cfg-if"
+version = "1.0.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
+
+[[package]]
+name = "chrono"
+version = "0.4.31"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7f2c685bad3eb3d45a01354cedb7d5faa66194d1d58ba6e267a8de788f79db38"
+dependencies = [
+ "num-traits",
+]
+
+[[package]]
+name = "const-oid"
+version = "0.9.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "28c122c3980598d243d63d9a704629a2d748d101f278052ff068be5a4423ab6f"
+
+[[package]]
+name = "cosmwasm-crypto"
+version = "1.4.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a6fb22494cf7d23d0c348740e06e5c742070b2991fd41db77bba0bcfbae1a723"
+dependencies = [
+ "digest 0.10.7",
+ "ed25519-zebra",
+ "k256",
+ "rand_core 0.6.4",
+ "thiserror",
+]
+
+[[package]]
+name = "cosmwasm-derive"
+version = "1.4.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "6e199424486ea97d6b211db6387fd72e26b4a439d40cc23140b2d8305728055b"
+dependencies = [
+ "syn 1.0.109",
+]
+
+[[package]]
+name = "cosmwasm-schema"
+version = "1.4.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "fef683a9c1c4eabd6d31515719d0d2cc66952c4c87f7eb192bfc90384517dc34"
+dependencies = [
+ "cosmwasm-schema-derive",
+ "schemars",
+ "serde",
+ "serde_json",
+ "thiserror",
+]
+
+[[package]]
+name = "cosmwasm-schema-derive"
+version = "1.4.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9567025acbb4c0c008178393eb53b3ac3c2e492c25949d3bf415b9cbe80772d8"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn 1.0.109",
+]
+
+[[package]]
+name = "cosmwasm-std"
+version = "1.4.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7d89d680fb60439b7c5947b15f9c84b961b88d1f8a3b20c4bd178a3f87db8bae"
+dependencies = [
+ "base64",
+ "bnum",
+ "cosmwasm-crypto",
+ "cosmwasm-derive",
+ "derivative",
+ "forward_ref",
+ "hex",
+ "schemars",
+ "serde",
+ "serde-json-wasm 0.5.1",
+ "sha2 0.10.8",
+ "thiserror",
+]
+
+[[package]]
+name = "cosmwasm-storage"
+version = "1.4.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "54a1c574d30feffe4b8121e61e839c231a5ce21901221d2fb4d5c945968a4f00"
+dependencies = [
+ "cosmwasm-std",
+ "serde",
+]
+
+[[package]]
+name = "cpufeatures"
+version = "0.2.9"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a17b76ff3a4162b0b27f354a0c87015ddad39d35f9c0c36607a3bdd175dde1f1"
+dependencies = [
+ "libc",
+]
+
+[[package]]
+name = "crypto-bigint"
+version = "0.5.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "740fe28e594155f10cfc383984cbefd529d7396050557148f79cb0f621204124"
+dependencies = [
+ "generic-array",
+ "rand_core 0.6.4",
+ "subtle",
+ "zeroize",
+]
+
+[[package]]
+name = "crypto-common"
+version = "0.1.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3"
+dependencies = [
+ "generic-array",
+ "typenum",
+]
+
+[[package]]
+name = "curve25519-dalek"
+version = "3.2.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0b9fdf9972b2bd6af2d913799d9ebc165ea4d2e65878e329d9c6b372c4491b61"
+dependencies = [
+ "byteorder",
+ "digest 0.9.0",
+ "rand_core 0.5.1",
+ "subtle",
+ "zeroize",
+]
+
+[[package]]
+name = "cw-multi-test"
+version = "0.17.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "6d818f5323c80ed4890db7f89d65eda3f0261fe21878e628c27ea2d8de4b7ba4"
+dependencies = [
+ "anyhow",
+ "cosmwasm-std",
+ "cw-storage-plus",
+ "cw-utils",
+ "derivative",
+ "itertools 0.11.0",
+ "prost 0.12.1",
+ "schemars",
+ "serde",
+ "sha2 0.10.8",
+ "thiserror",
+]
+
+[[package]]
+name = "cw-storage-plus"
+version = "1.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3f0e92a069d62067f3472c62e30adedb4cab1754725c0f2a682b3128d2bf3c79"
+dependencies = [
+ "cosmwasm-std",
+ "schemars",
+ "serde",
+]
+
+[[package]]
+name = "cw-utils"
+version = "1.0.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1b9f351a4e4d81ef7c890e44d903f8c0bdcdc00f094fd3a181eaf70c0eec7a3a"
+dependencies = [
+ "cosmwasm-schema",
+ "cosmwasm-std",
+ "cw2",
+ "schemars",
+ "semver",
+ "serde",
+ "thiserror",
+]
+
+[[package]]
+name = "cw2"
+version = "1.1.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9431d14f64f49e41c6ef5561ed11a5391c417d0cb16455dea8cdcb9037a8d197"
+dependencies = [
+ "cosmwasm-schema",
+ "cosmwasm-std",
+ "cw-storage-plus",
+ "schemars",
+ "serde",
+ "thiserror",
+]
+
+[[package]]
+name = "der"
+version = "0.7.8"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "fffa369a668c8af7dbf8b5e56c9f744fbd399949ed171606040001947de40b1c"
+dependencies = [
+ "const-oid",
+ "zeroize",
+]
+
+[[package]]
+name = "derivative"
+version = "2.2.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "fcc3dd5e9e9c0b295d6e1e4d811fb6f157d5ffd784b8d202fc62eac8035a770b"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn 1.0.109",
+]
+
+[[package]]
+name = "digest"
+version = "0.9.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d3dd60d1080a57a05ab032377049e0591415d2b31afd7028356dbf3cc6dcb066"
+dependencies = [
+ "generic-array",
+]
+
+[[package]]
+name = "digest"
+version = "0.10.7"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292"
+dependencies = [
+ "block-buffer 0.10.4",
+ "const-oid",
+ "crypto-common",
+ "subtle",
+]
+
+[[package]]
+name = "dyn-clone"
+version = "1.0.14"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "23d2f3407d9a573d666de4b5bdf10569d73ca9478087346697dcbae6244bfbcd"
+
+[[package]]
+name = "ecdsa"
+version = "0.16.8"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a4b1e0c257a9e9f25f90ff76d7a68360ed497ee519c8e428d1825ef0000799d4"
+dependencies = [
+ "der",
+ "digest 0.10.7",
+ "elliptic-curve",
+ "rfc6979",
+ "signature",
+ "spki",
+]
+
+[[package]]
+name = "ed25519-zebra"
+version = "3.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7c24f403d068ad0b359e577a77f92392118be3f3c927538f2bb544a5ecd828c6"
+dependencies = [
+ "curve25519-dalek",
+ "hashbrown",
+ "hex",
+ "rand_core 0.6.4",
+ "serde",
+ "sha2 0.9.9",
+ "zeroize",
+]
+
+[[package]]
+name = "either"
+version = "1.9.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a26ae43d7bcc3b814de94796a5e736d4029efb0ee900c12e2d54c993ad1a1e07"
+
+[[package]]
+name = "elliptic-curve"
+version = "0.13.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d97ca172ae9dc9f9b779a6e3a65d308f2af74e5b8c921299075bdb4a0370e914"
+dependencies = [
+ "base16ct",
+ "crypto-bigint",
+ "digest 0.10.7",
+ "ff",
+ "generic-array",
+ "group",
+ "pkcs8",
+ "rand_core 0.6.4",
+ "sec1",
+ "subtle",
+ "zeroize",
+]
+
+[[package]]
+name = "ff"
+version = "0.13.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ded41244b729663b1e574f1b4fb731469f69f79c17667b5d776b16cda0479449"
+dependencies = [
+ "rand_core 0.6.4",
+ "subtle",
+]
+
+[[package]]
+name = "forward_ref"
+version = "1.0.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c8cbd1169bd7b4a0a20d92b9af7a7e0422888bd38a6f5ec29c1fd8c1558a272e"
+
+[[package]]
+name = "generic-array"
+version = "0.14.7"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a"
+dependencies = [
+ "typenum",
+ "version_check",
+ "zeroize",
+]
+
+[[package]]
+name = "getrandom"
+version = "0.2.10"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "be4136b2a15dd319360be1c07d9933517ccf0be8f16bf62a3bee4f0d618df427"
+dependencies = [
+ "cfg-if",
+ "libc",
+ "wasi",
+]
+
+[[package]]
+name = "group"
+version = "0.13.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f0f9ef7462f7c099f518d754361858f86d8a07af53ba9af0fe635bbccb151a63"
+dependencies = [
+ "ff",
+ "rand_core 0.6.4",
+ "subtle",
+]
+
+[[package]]
+name = "hashbrown"
+version = "0.12.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888"
+dependencies = [
+ "ahash",
+]
+
+[[package]]
+name = "heck"
+version = "0.4.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8"
+
+[[package]]
+name = "hex"
+version = "0.4.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70"
+
+[[package]]
+name = "hmac"
+version = "0.12.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "6c49c37c09c17a53d937dfbb742eb3a961d65a994e6bcdcf37e7399d0cc8ab5e"
+dependencies = [
+ "digest 0.10.7",
+]
+
+[[package]]
+name = "itertools"
+version = "0.10.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b0fd2260e829bddf4cb6ea802289de2f86d6a7a690192fbe91b3f46e0f2c8473"
+dependencies = [
+ "either",
+]
+
+[[package]]
+name = "itertools"
+version = "0.11.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b1c173a5686ce8bfa551b3563d0c2170bf24ca44da99c7ca4bfdab5418c3fe57"
+dependencies = [
+ "either",
+]
+
+[[package]]
+name = "itoa"
+version = "1.0.9"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "af150ab688ff2122fcef229be89cb50dd66af9e01a4ff320cc137eecc9bacc38"
+
+[[package]]
+name = "k256"
+version = "0.13.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "cadb76004ed8e97623117f3df85b17aaa6626ab0b0831e6573f104df16cd1bcc"
+dependencies = [
+ "cfg-if",
+ "ecdsa",
+ "elliptic-curve",
+ "once_cell",
+ "sha2 0.10.8",
+ "signature",
+]
+
+[[package]]
+name = "libc"
+version = "0.2.149"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a08173bc88b7955d1b3145aa561539096c421ac8debde8cbc3612ec635fee29b"
+
+[[package]]
+name = "num-traits"
+version = "0.2.17"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "39e3200413f237f41ab11ad6d161bc7239c84dcb631773ccd7de3dfe4b5c267c"
+dependencies = [
+ "autocfg",
+]
+
+[[package]]
+name = "once_cell"
+version = "1.18.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "dd8b5dd2ae5ed71462c540258bedcb51965123ad7e7ccf4b9a8cafaa4a63576d"
+
+[[package]]
+name = "opaque-debug"
+version = "0.3.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5"
+
+[[package]]
+name = "pkcs8"
+version = "0.10.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f950b2377845cebe5cf8b5165cb3cc1a5e0fa5cfa3e1f7f55707d8fd82e0a7b7"
+dependencies = [
+ "der",
+ "spki",
+]
+
+[[package]]
+name = "proc-macro2"
+version = "1.0.69"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "134c189feb4956b20f6f547d2cf727d4c0fe06722b20a0eec87ed445a97f92da"
+dependencies = [
+ "unicode-ident",
+]
+
+[[package]]
+name = "prost"
+version = "0.11.9"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0b82eaa1d779e9a4bc1c3217db8ffbeabaae1dca241bf70183242128d48681cd"
+dependencies = [
+ "bytes",
+ "prost-derive 0.11.9",
+]
+
+[[package]]
+name = "prost"
+version = "0.12.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f4fdd22f3b9c31b53c060df4a0613a1c7f062d4115a2b984dd15b1858f7e340d"
+dependencies = [
+ "bytes",
+ "prost-derive 0.12.1",
+]
+
+[[package]]
+name = "prost-derive"
+version = "0.11.9"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e5d2d8d10f3c6ded6da8b05b5fb3b8a5082514344d56c9f871412d29b4e075b4"
+dependencies = [
+ "anyhow",
+ "itertools 0.10.5",
+ "proc-macro2",
+ "quote",
+ "syn 1.0.109",
+]
+
+[[package]]
+name = "prost-derive"
+version = "0.12.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "265baba7fabd416cf5078179f7d2cbeca4ce7a9041111900675ea7c4cb8a4c32"
+dependencies = [
+ "anyhow",
+ "itertools 0.11.0",
+ "proc-macro2",
+ "quote",
+ "syn 2.0.38",
+]
+
+[[package]]
+name = "prost-types"
+version = "0.11.9"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "213622a1460818959ac1181aaeb2dc9c7f63df720db7d788b3e24eacd1983e13"
+dependencies = [
+ "prost 0.11.9",
+]
+
+[[package]]
+name = "provwasm-common"
+version = "0.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "938fe0a8914598b5aa201221ce2d9c5388412e348ea312bcba36b5077376b96d"
+dependencies = [
+ "cosmwasm-std",
+]
+
+[[package]]
+name = "provwasm-mocks"
+version = "2.0.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "02175727ba6205cb5feac4724d3ab6f4e5833dddae517961bf820f6df4d3a6bd"
+dependencies = [
+ "cosmwasm-std",
+ "provwasm-common",
+ "provwasm-std",
+ "schemars",
+ "serde",
+]
+
+[[package]]
+name = "provwasm-proc-macro"
+version = "0.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "792736fb76e0acb3118b22e5e55e456b22c67ac3ee51bfb1f7dfde09bd954560"
+dependencies = [
+ "itertools 0.10.5",
+ "proc-macro2",
+ "quote",
+ "syn 2.0.38",
+]
+
+[[package]]
+name = "provwasm-std"
+version = "2.0.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d216752a4c37d7bc39ee1d397f6500fa92cf1c191c60b501038aa98cafa07c69"
+dependencies = [
+ "base64",
+ "chrono",
+ "cosmwasm-schema",
+ "cosmwasm-std",
+ "prost 0.11.9",
+ "prost-types",
+ "provwasm-common",
+ "provwasm-proc-macro",
+ "schemars",
+ "serde",
+ "serde-cw-value",
+ "strum_macros",
+]
+
+[[package]]
+name = "quote"
+version = "1.0.33"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5267fca4496028628a95160fc423a33e8b2e6af8a5302579e322e4b520293cae"
+dependencies = [
+ "proc-macro2",
+]
+
+[[package]]
+name = "rand_core"
+version = "0.5.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "90bde5296fc891b0cef12a6d03ddccc162ce7b2aff54160af9338f8d40df6d19"
+
+[[package]]
+name = "rand_core"
+version = "0.6.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c"
+dependencies = [
+ "getrandom",
+]
+
+[[package]]
+name = "rate-limiter"
+version = "0.1.0"
+dependencies = [
+ "cosmwasm-schema",
+ "cosmwasm-std",
+ "cosmwasm-storage",
+ "cw-multi-test",
+ "cw-storage-plus",
+ "cw2",
+ "hex",
+ "provwasm-mocks",
+ "provwasm-std",
+ "schemars",
+ "serde",
+ "serde-json-wasm 1.0.0",
+ "sha2 0.10.8",
+ "thiserror",
+]
+
+[[package]]
+name = "rfc6979"
+version = "0.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f8dd2a808d456c4a54e300a23e9f5a67e122c3024119acbfd73e3bf664491cb2"
+dependencies = [
+ "hmac",
+ "subtle",
+]
+
+[[package]]
+name = "rustversion"
+version = "1.0.14"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7ffc183a10b4478d04cbbbfc96d0873219d962dd5accaff2ffbd4ceb7df837f4"
+
+[[package]]
+name = "ryu"
+version = "1.0.15"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1ad4cc8da4ef723ed60bced201181d83791ad433213d8c24efffda1eec85d741"
+
+[[package]]
+name = "schemars"
+version = "0.8.15"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1f7b0ce13155372a76ee2e1c5ffba1fe61ede73fbea5630d61eee6fac4929c0c"
+dependencies = [
+ "dyn-clone",
+ "schemars_derive",
+ "serde",
+ "serde_json",
+]
+
+[[package]]
+name = "schemars_derive"
+version = "0.8.15"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e85e2a16b12bdb763244c69ab79363d71db2b4b918a2def53f80b02e0574b13c"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "serde_derive_internals",
+ "syn 1.0.109",
+]
+
+[[package]]
+name = "sec1"
+version = "0.7.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d3e97a565f76233a6003f9f5c54be1d9c5bdfa3eccfb189469f11ec4901c47dc"
+dependencies = [
+ "base16ct",
+ "der",
+ "generic-array",
+ "pkcs8",
+ "subtle",
+ "zeroize",
+]
+
+[[package]]
+name = "semver"
+version = "1.0.20"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "836fa6a3e1e547f9a2c4040802ec865b5d85f4014efe00555d7090a3dcaa1090"
+
+[[package]]
+name = "serde"
+version = "1.0.189"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8e422a44e74ad4001bdc8eede9a4570ab52f71190e9c076d14369f38b9200537"
+dependencies = [
+ "serde_derive",
+]
+
+[[package]]
+name = "serde-cw-value"
+version = "0.7.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a75d32da6b8ed758b7d850b6c3c08f1d7df51a4df3cb201296e63e34a78e99d4"
+dependencies = [
+ "serde",
+]
+
+[[package]]
+name = "serde-json-wasm"
+version = "0.5.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "16a62a1fad1e1828b24acac8f2b468971dade7b8c3c2e672bcadefefb1f8c137"
+dependencies = [
+ "serde",
+]
+
+[[package]]
+name = "serde-json-wasm"
+version = "1.0.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "83c37d03f3b0f6b5f77c11af1e7c772de1c9af83e50bef7bb6069601900ba67b"
+dependencies = [
+ "serde",
+]
+
+[[package]]
+name = "serde_derive"
+version = "1.0.189"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1e48d1f918009ce3145511378cf68d613e3b3d9137d67272562080d68a2b32d5"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn 2.0.38",
+]
+
+[[package]]
+name = "serde_derive_internals"
+version = "0.26.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "85bf8229e7920a9f636479437026331ce11aa132b4dde37d121944a44d6e5f3c"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn 1.0.109",
+]
+
+[[package]]
+name = "serde_json"
+version = "1.0.107"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "6b420ce6e3d8bd882e9b243c6eed35dbc9a6110c9769e74b584e0d68d1f20c65"
+dependencies = [
+ "itoa",
+ "ryu",
+ "serde",
+]
+
+[[package]]
+name = "sha2"
+version = "0.9.9"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4d58a1e1bf39749807d89cf2d98ac2dfa0ff1cb3faa38fbb64dd88ac8013d800"
+dependencies = [
+ "block-buffer 0.9.0",
+ "cfg-if",
+ "cpufeatures",
+ "digest 0.9.0",
+ "opaque-debug",
+]
+
+[[package]]
+name = "sha2"
+version = "0.10.8"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "793db75ad2bcafc3ffa7c68b215fee268f537982cd901d132f89c6343f3a3dc8"
+dependencies = [
+ "cfg-if",
+ "cpufeatures",
+ "digest 0.10.7",
+]
+
+[[package]]
+name = "signature"
+version = "2.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5e1788eed21689f9cf370582dfc467ef36ed9c707f073528ddafa8d83e3b8500"
+dependencies = [
+ "digest 0.10.7",
+ "rand_core 0.6.4",
+]
+
+[[package]]
+name = "spki"
+version = "0.7.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9d1e996ef02c474957d681f1b05213dfb0abab947b446a62d37770b23500184a"
+dependencies = [
+ "base64ct",
+ "der",
+]
+
+[[package]]
+name = "strum_macros"
+version = "0.24.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1e385be0d24f186b4ce2f9982191e7101bb737312ad61c1f2f984f34bcf85d59"
+dependencies = [
+ "heck",
+ "proc-macro2",
+ "quote",
+ "rustversion",
+ "syn 1.0.109",
+]
+
+[[package]]
+name = "subtle"
+version = "2.5.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "81cdd64d312baedb58e21336b31bc043b77e01cc99033ce76ef539f78e965ebc"
+
+[[package]]
+name = "syn"
+version = "1.0.109"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "unicode-ident",
+]
+
+[[package]]
+name = "syn"
+version = "2.0.38"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e96b79aaa137db8f61e26363a0c9b47d8b4ec75da28b7d1d614c2303e232408b"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "unicode-ident",
+]
+
+[[package]]
+name = "thiserror"
+version = "1.0.49"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1177e8c6d7ede7afde3585fd2513e611227efd6481bd78d2e82ba1ce16557ed4"
+dependencies = [
+ "thiserror-impl",
+]
+
+[[package]]
+name = "thiserror-impl"
+version = "1.0.49"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "10712f02019e9288794769fba95cd6847df9874d49d871d062172f9dd41bc4cc"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn 2.0.38",
+]
+
+[[package]]
+name = "typenum"
+version = "1.17.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "42ff0bf0c66b8238c6f3b578df37d0b7848e55df8577b3f74f92a69acceeb825"
+
+[[package]]
+name = "unicode-ident"
+version = "1.0.12"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b"
+
+[[package]]
+name = "version_check"
+version = "0.9.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f"
+
+[[package]]
+name = "wasi"
+version = "0.11.0+wasi-snapshot-preview1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423"
+
+[[package]]
+name = "zeroize"
+version = "1.6.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "2a0956f1ba7c7909bfb66c2e9e4124ab6f6482560f6628b5aaeba39207c9aad9"
diff --git a/testutil/contracts/rate-limiter/Cargo.toml b/testutil/contracts/rate-limiter/Cargo.toml
new file mode 100644
index 0000000000..0426b14cbb
--- /dev/null
+++ b/testutil/contracts/rate-limiter/Cargo.toml
@@ -0,0 +1,61 @@
+[package]
+name = "rate-limiter"
+version = "0.1.0"
+authors = ["Nicolas Lara "]
+edition = "2021"
+
+exclude = [
+ # Those files are rust-optimizer artifacts. You might want to commit them for convenience but they should not be part of the source code publication.
+ "contract.wasm",
+ "hash.txt",
+]
+
+# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
+
+[lib]
+crate-type = ["cdylib", "rlib"]
+
+[features]
+# for more explicit tests, cargo test --features=backtraces
+backtraces = []
+
+[profile.release]
+codegen-units = 1
+debug = false
+debug-assertions = false
+incremental = false
+lto = true
+opt-level = 3
+overflow-checks = true
+panic = 'abort'
+rpath = false
+
+[package.metadata.scripts]
+optimize = """docker run --rm -v "$(pwd)":/code \
+ --mount type=volume,source="$(basename "$(pwd)")_cache",target=/code/target \
+ --mount type=volume,source=registry_cache,target=/usr/local/cargo/registry \
+ cosmwasm/rust-optimizer:0.14.0
+"""
+optimize-arm = """docker run --rm -v "$(pwd)":/code \
+ --mount type=volume,source="$(basename "$(pwd)")_cache",target=/code/target \
+ --mount type=volume,source=registry_cache,target=/usr/local/cargo/registry \
+ cosmwasm/rust-optimizer-arm64:0.14.0
+"""
+
+[dependencies]
+provwasm-std = { version = "2.0.0" }
+cosmwasm-schema = "1.2.5"
+cosmwasm-std = { version = "1.2.5", features = ["stargate", "cosmwasm_1_1"] }
+cosmwasm-storage = "1.2.5"
+cw-storage-plus = { version = "1.0.1", features = ["iterator"] }
+cw2 = "1.0.1"
+schemars = "0.8.10"
+serde = { version = "1.0.163", default-features = false, features = ["derive"] }
+thiserror = { version = "1.0.31" }
+sha2 = "0.10.6"
+hex = "0.4.3"
+
+[dev-dependencies]
+cw-multi-test = "0.17.0"
+provwasm-mocks = { version = "2.0.0" }
+serde-json-wasm = "1.0.0"
diff --git a/testutil/contracts/rate-limiter/Makefile b/testutil/contracts/rate-limiter/Makefile
new file mode 100644
index 0000000000..419cfc72d0
--- /dev/null
+++ b/testutil/contracts/rate-limiter/Makefile
@@ -0,0 +1,38 @@
+UNAME_M := $(shell uname -m)
+
+.PHONY: all
+all: fmt build test lint schema optimize
+
+.PHONY: fmt
+fmt:
+ @cargo fmt --all -- --check
+
+.PHONY: build
+build:
+ @cargo wasm
+
+.PHONY: test
+test:
+ @RUST_BACKTRACE=1 cargo test
+
+.PHONY: lint
+lint:
+ @cargo clippy -- -D warnings
+
+.PHONY: schema
+schema:
+ @cargo schema
+
+.PHONY: optimize
+optimize:
+ifeq ($(UNAME_M),arm64)
+ @docker run --rm -v $(CURDIR):/code \
+ --mount type=volume,source="ratelimit_cache",target=/code/target \
+ --mount type=volume,source=registry_cache,target=/usr/local/cargo/registry \
+ cosmwasm/rust-optimizer-arm64:0.14.0
+else
+ @docker run --rm -v $(CURDIR):/code \
+ --mount type=volume,source="ratelimit_cache",target=/code/target \
+ --mount type=volume,source=registry_cache,target=/usr/local/cargo/registry \
+ cosmwasm/rust-optimizer:0.14.0
+endif
\ No newline at end of file
diff --git a/testutil/contracts/rate-limiter/artifacts/checksums.txt b/testutil/contracts/rate-limiter/artifacts/checksums.txt
new file mode 100644
index 0000000000..9676e9e6d7
--- /dev/null
+++ b/testutil/contracts/rate-limiter/artifacts/checksums.txt
@@ -0,0 +1 @@
+0f88126e350653cbe13f1295bbeba80b0ff1bbe70233c4d60980275bc40187cf rate_limiter-aarch64.wasm
diff --git a/testutil/contracts/rate-limiter/artifacts/checksums_intermediate.txt b/testutil/contracts/rate-limiter/artifacts/checksums_intermediate.txt
new file mode 100644
index 0000000000..ab517fbe4f
--- /dev/null
+++ b/testutil/contracts/rate-limiter/artifacts/checksums_intermediate.txt
@@ -0,0 +1 @@
+0b2277d8c51fe5a5bd25e97d45b85ad2de77c3aa3a39fa6366ee8b8ac38c96e3 /target/wasm32-unknown-unknown/release/rate_limiter.wasm
diff --git a/testutil/contracts/rate-limiter/artifacts/rate_limiter-aarch64.wasm b/testutil/contracts/rate-limiter/artifacts/rate_limiter-aarch64.wasm
new file mode 100644
index 0000000000..b84afbf2ee
Binary files /dev/null and b/testutil/contracts/rate-limiter/artifacts/rate_limiter-aarch64.wasm differ
diff --git a/testutil/contracts/rate-limiter/artifacts/rate_limiter.wasm b/testutil/contracts/rate-limiter/artifacts/rate_limiter.wasm
new file mode 100644
index 0000000000..ad06f6bf91
Binary files /dev/null and b/testutil/contracts/rate-limiter/artifacts/rate_limiter.wasm differ
diff --git a/testutil/contracts/rate-limiter/examples/schema.rs b/testutil/contracts/rate-limiter/examples/schema.rs
new file mode 100644
index 0000000000..954edd462e
--- /dev/null
+++ b/testutil/contracts/rate-limiter/examples/schema.rs
@@ -0,0 +1,13 @@
+use cosmwasm_schema::write_api;
+
+use rate_limiter::msg::{ExecuteMsg, InstantiateMsg, MigrateMsg, QueryMsg, SudoMsg};
+
+fn main() {
+ write_api! {
+ instantiate: InstantiateMsg,
+ query: QueryMsg,
+ execute: ExecuteMsg,
+ sudo: SudoMsg,
+ migrate: MigrateMsg,
+ }
+}
diff --git a/testutil/contracts/rate-limiter/src/contract.rs b/testutil/contracts/rate-limiter/src/contract.rs
new file mode 100644
index 0000000000..30bae5b33e
--- /dev/null
+++ b/testutil/contracts/rate-limiter/src/contract.rs
@@ -0,0 +1,106 @@
+#[cfg(not(feature = "library"))]
+use cosmwasm_std::entry_point;
+use cosmwasm_std::{Binary, Deps, DepsMut, Env, MessageInfo, Response, StdResult};
+use cw2::set_contract_version;
+
+use crate::error::ContractError;
+use crate::msg::{ExecuteMsg, InstantiateMsg, MigrateMsg, QueryMsg, SudoMsg};
+use crate::state::{FlowType, GOVMODULE, IBCMODULE};
+use crate::{execute, query, sudo};
+
+// version info for migration info
+const CONTRACT_NAME: &str = "crates.io:rate-limiter";
+const CONTRACT_VERSION: &str = env!("CARGO_PKG_VERSION");
+
+#[cfg_attr(not(feature = "library"), entry_point)]
+pub fn instantiate(
+ deps: DepsMut,
+ env: Env,
+ _info: MessageInfo,
+ msg: InstantiateMsg,
+) -> Result {
+ set_contract_version(deps.storage, CONTRACT_NAME, CONTRACT_VERSION)?;
+ IBCMODULE.save(deps.storage, &msg.ibc_module)?;
+ GOVMODULE.save(deps.storage, &msg.gov_module)?;
+
+ execute::add_new_paths(deps, msg.paths, env.block.time)?;
+
+ Ok(Response::new()
+ .add_attribute("method", "instantiate")
+ .add_attribute("ibc_module", msg.ibc_module.to_string())
+ .add_attribute("gov_module", msg.gov_module.to_string()))
+}
+
+#[cfg_attr(not(feature = "library"), entry_point)]
+pub fn execute(
+ deps: DepsMut,
+ env: Env,
+ info: MessageInfo,
+ msg: ExecuteMsg,
+) -> Result {
+ match msg {
+ ExecuteMsg::AddPath {
+ channel_id,
+ denom,
+ quotas,
+ } => execute::try_add_path(deps, info.sender, channel_id, denom, quotas, env.block.time),
+ ExecuteMsg::RemovePath { channel_id, denom } => {
+ execute::try_remove_path(deps, info.sender, channel_id, denom)
+ }
+ ExecuteMsg::ResetPathQuota {
+ channel_id,
+ denom,
+ quota_id,
+ } => execute::try_reset_path_quota(
+ deps,
+ info.sender,
+ channel_id,
+ denom,
+ quota_id,
+ env.block.time,
+ ),
+ }
+}
+
+#[cfg_attr(not(feature = "library"), entry_point)]
+pub fn sudo(deps: DepsMut, env: Env, msg: SudoMsg) -> Result {
+ match msg {
+ SudoMsg::SendPacket {
+ packet,
+ #[cfg(test)]
+ channel_value_mock,
+ } => sudo::process_packet(
+ deps,
+ packet,
+ FlowType::Out,
+ env.block.time,
+ #[cfg(test)]
+ channel_value_mock,
+ ),
+ SudoMsg::RecvPacket {
+ packet,
+ #[cfg(test)]
+ channel_value_mock,
+ } => sudo::process_packet(
+ deps,
+ packet,
+ FlowType::In,
+ env.block.time,
+ #[cfg(test)]
+ channel_value_mock,
+ ),
+ SudoMsg::UndoSend { packet } => sudo::undo_send(deps, packet),
+ }
+}
+
+#[cfg_attr(not(feature = "library"), entry_point)]
+pub fn query(deps: Deps, _env: Env, msg: QueryMsg) -> StdResult {
+ match msg {
+ QueryMsg::GetQuotas { channel_id, denom } => query::get_quotas(deps, channel_id, denom),
+ }
+}
+
+#[cfg_attr(not(feature = "library"), entry_point)]
+pub fn migrate(_deps: DepsMut, _env: Env, _msg: MigrateMsg) -> Result {
+ unimplemented!()
+}
diff --git a/testutil/contracts/rate-limiter/src/contract_tests.rs b/testutil/contracts/rate-limiter/src/contract_tests.rs
new file mode 100644
index 0000000000..b51009c060
--- /dev/null
+++ b/testutil/contracts/rate-limiter/src/contract_tests.rs
@@ -0,0 +1,399 @@
+#![cfg(test)]
+
+use crate::packet::Packet;
+use crate::{contract::*, test_msg_recv, test_msg_send, ContractError};
+use cosmwasm_std::testing::{mock_dependencies, mock_env, mock_info};
+use cosmwasm_std::{from_binary, Addr, Attribute, Uint256};
+
+use crate::helpers::tests::verify_query_response;
+use crate::msg::{InstantiateMsg, PathMsg, QueryMsg, QuotaMsg, SudoMsg};
+use crate::state::tests::RESET_TIME_WEEKLY;
+use crate::state::{RateLimit, GOVMODULE, IBCMODULE, RATE_LIMIT_TRACKERS};
+
+const IBC_ADDR: &str = "IBC_MODULE";
+const GOV_ADDR: &str = "GOV_MODULE";
+
+#[test] // Tests we ccan instantiate the contract and that the owners are set correctly
+fn proper_instantiation() {
+ let mut deps = mock_dependencies();
+
+ let msg = InstantiateMsg {
+ gov_module: Addr::unchecked(GOV_ADDR),
+ ibc_module: Addr::unchecked(IBC_ADDR),
+ paths: vec![],
+ };
+ let info = mock_info(IBC_ADDR, &vec![]);
+
+ // we can just call .unwrap() to assert this was a success
+ let res = instantiate(deps.as_mut(), mock_env(), info, msg).unwrap();
+ assert_eq!(0, res.messages.len());
+
+ // The ibc and gov modules are properly stored
+ assert_eq!(IBCMODULE.load(deps.as_ref().storage).unwrap(), IBC_ADDR);
+ assert_eq!(GOVMODULE.load(deps.as_ref().storage).unwrap(), GOV_ADDR);
+}
+
+#[test] // Tests that when a packet is transferred, the peropper allowance is consummed
+fn consume_allowance() {
+ let mut deps = mock_dependencies();
+
+ let quota = QuotaMsg::new("weekly", RESET_TIME_WEEKLY, 10, 10);
+ let msg = InstantiateMsg {
+ gov_module: Addr::unchecked(GOV_ADDR),
+ ibc_module: Addr::unchecked(IBC_ADDR),
+ paths: vec![PathMsg {
+ channel_id: format!("any"),
+ denom: format!("denom"),
+ quotas: vec![quota],
+ }],
+ };
+ let info = mock_info(GOV_ADDR, &vec![]);
+ let _res = instantiate(deps.as_mut(), mock_env(), info, msg).unwrap();
+
+ let msg = test_msg_send!(
+ channel_id: format!("channel"),
+ denom: format!("denom") ,
+ channel_value: 3_300_u32.into(),
+ funds: 300_u32.into()
+ );
+ let res = sudo(deps.as_mut(), mock_env(), msg).unwrap();
+
+ let Attribute { key, value } = &res.attributes[4];
+ assert_eq!(key, "weekly_used_out");
+ assert_eq!(value, "300");
+
+ let msg = test_msg_send!(
+ channel_id: format!("channel"),
+ denom: format!("denom"),
+ channel_value: 3_300_u32.into(),
+ funds: 300_u32.into()
+ );
+ let err = sudo(deps.as_mut(), mock_env(), msg).unwrap_err();
+ assert!(matches!(err, ContractError::RateLimitExceded { .. }));
+}
+
+#[test] // Tests that the balance of send and receive is maintained (i.e: recives are sustracted from the send allowance and sends from the receives)
+fn symetric_flows_dont_consume_allowance() {
+ let mut deps = mock_dependencies();
+
+ let quota = QuotaMsg::new("weekly", RESET_TIME_WEEKLY, 10, 10);
+ let msg = InstantiateMsg {
+ gov_module: Addr::unchecked(GOV_ADDR),
+ ibc_module: Addr::unchecked(IBC_ADDR),
+ paths: vec![PathMsg {
+ channel_id: format!("any"),
+ denom: format!("denom"),
+ quotas: vec![quota],
+ }],
+ };
+ let info = mock_info(GOV_ADDR, &vec![]);
+ let _res = instantiate(deps.as_mut(), mock_env(), info.clone(), msg).unwrap();
+
+ let send_msg = test_msg_send!(
+ channel_id: format!("channel"),
+ denom: format!("denom"),
+ channel_value: 3_300_u32.into(),
+ funds: 300_u32.into()
+ );
+ let recv_msg = test_msg_recv!(
+ channel_id: format!("channel"),
+ denom: format!("denom"),
+ channel_value: 3_000_u32.into(),
+ funds: 300_u32.into()
+ );
+
+ let res = sudo(deps.as_mut(), mock_env(), send_msg.clone()).unwrap();
+ let Attribute { key, value } = &res.attributes[3];
+ assert_eq!(key, "weekly_used_in");
+ assert_eq!(value, "0");
+ let Attribute { key, value } = &res.attributes[4];
+ assert_eq!(key, "weekly_used_out");
+ assert_eq!(value, "300");
+
+ let res = sudo(deps.as_mut(), mock_env(), recv_msg.clone()).unwrap();
+ let Attribute { key, value } = &res.attributes[3];
+ assert_eq!(key, "weekly_used_in");
+ assert_eq!(value, "0");
+ let Attribute { key, value } = &res.attributes[4];
+ assert_eq!(key, "weekly_used_out");
+ assert_eq!(value, "0");
+
+ // We can still use the path. Even if we have sent more than the
+ // allowance through the path (900 > 3000*.1), the current "balance"
+ // of inflow vs outflow is still lower than the path's capacity/quota
+ let res = sudo(deps.as_mut(), mock_env(), recv_msg.clone()).unwrap();
+ let Attribute { key, value } = &res.attributes[3];
+ assert_eq!(key, "weekly_used_in");
+ assert_eq!(value, "300");
+ let Attribute { key, value } = &res.attributes[4];
+ assert_eq!(key, "weekly_used_out");
+ assert_eq!(value, "0");
+
+ let err = sudo(deps.as_mut(), mock_env(), recv_msg.clone()).unwrap_err();
+
+ assert!(matches!(err, ContractError::RateLimitExceded { .. }));
+}
+
+#[test] // Tests that we can have different quotas for send and receive. In this test we use 4% send and 1% receive
+fn asymetric_quotas() {
+ let mut deps = mock_dependencies();
+
+ let quota = QuotaMsg::new("weekly", RESET_TIME_WEEKLY, 4, 1);
+ let msg = InstantiateMsg {
+ gov_module: Addr::unchecked(GOV_ADDR),
+ ibc_module: Addr::unchecked(IBC_ADDR),
+ paths: vec![PathMsg {
+ channel_id: format!("any"),
+ denom: format!("denom"),
+ quotas: vec![quota],
+ }],
+ };
+ let info = mock_info(GOV_ADDR, &vec![]);
+ let _res = instantiate(deps.as_mut(), mock_env(), info.clone(), msg).unwrap();
+
+ // Sending 2%
+ let msg = test_msg_send!(
+ channel_id: format!("channel"),
+ denom: format!("denom"),
+ channel_value: 3_060_u32.into(),
+ funds: 60_u32.into()
+ );
+ let res = sudo(deps.as_mut(), mock_env(), msg).unwrap();
+ let Attribute { key, value } = &res.attributes[4];
+ assert_eq!(key, "weekly_used_out");
+ assert_eq!(value, "60");
+
+ // Sending 2% more. Allowed, as sending has a 4% allowance
+ let msg = test_msg_send!(
+ channel_id: format!("channel"),
+ denom: format!("denom"),
+ channel_value: 3_060_u32.into(),
+ funds: 60_u32.into()
+ );
+
+ let res = sudo(deps.as_mut(), mock_env(), msg).unwrap();
+ let Attribute { key, value } = &res.attributes[4];
+ assert_eq!(key, "weekly_used_out");
+ assert_eq!(value, "120");
+
+ // Receiving 1% should still work. 4% *sent* through the path, but we can still receive.
+ let recv_msg = test_msg_recv!(
+ channel_id: format!("channel"),
+ denom: format!("denom"),
+ channel_value: 3_000_u32.into(),
+ funds: 30_u32.into()
+ );
+ let res = sudo(deps.as_mut(), mock_env(), recv_msg).unwrap();
+ let Attribute { key, value } = &res.attributes[3];
+ assert_eq!(key, "weekly_used_in");
+ assert_eq!(value, "0");
+ let Attribute { key, value } = &res.attributes[4];
+ assert_eq!(key, "weekly_used_out");
+ assert_eq!(value, "90");
+
+ // Sending 2%. Should fail. In balance, we've sent 4% and received 1%, so only 1% left to send.
+ let msg = test_msg_send!(
+ channel_id: format!("channel"),
+ denom: format!("denom"),
+ channel_value: 3_060_u32.into(),
+ funds: 60_u32.into()
+ );
+ let err = sudo(deps.as_mut(), mock_env(), msg.clone()).unwrap_err();
+ assert!(matches!(err, ContractError::RateLimitExceded { .. }));
+
+ // Sending 1%: Allowed; because sending has a 4% allowance. We've sent 4% already, but received 1%, so there's send cappacity again
+ let msg = test_msg_send!(
+ channel_id: format!("channel"),
+ denom: format!("denom"),
+ channel_value: 3_060_u32.into(),
+ funds: 30_u32.into()
+ );
+ let res = sudo(deps.as_mut(), mock_env(), msg.clone()).unwrap();
+ let Attribute { key, value } = &res.attributes[3];
+ assert_eq!(key, "weekly_used_in");
+ assert_eq!(value, "0");
+ let Attribute { key, value } = &res.attributes[4];
+ assert_eq!(key, "weekly_used_out");
+ assert_eq!(value, "120");
+}
+
+#[test] // Tests we can get the current state of the trackers
+fn query_state() {
+ let mut deps = mock_dependencies();
+
+ let quota = QuotaMsg::new("weekly", RESET_TIME_WEEKLY, 10, 10);
+ let msg = InstantiateMsg {
+ gov_module: Addr::unchecked(GOV_ADDR),
+ ibc_module: Addr::unchecked(IBC_ADDR),
+ paths: vec![PathMsg {
+ channel_id: format!("any"),
+ denom: format!("denom"),
+ quotas: vec![quota],
+ }],
+ };
+ let info = mock_info(GOV_ADDR, &vec![]);
+ let env = mock_env();
+ let _res = instantiate(deps.as_mut(), env.clone(), info, msg).unwrap();
+
+ let query_msg = QueryMsg::GetQuotas {
+ channel_id: format!("any"),
+ denom: format!("denom"),
+ };
+
+ let res = query(deps.as_ref(), mock_env(), query_msg.clone()).unwrap();
+ let value: Vec = from_binary(&res).unwrap();
+ assert_eq!(value[0].quota.name, "weekly");
+ assert_eq!(value[0].quota.max_percentage_send, 10);
+ assert_eq!(value[0].quota.max_percentage_recv, 10);
+ assert_eq!(value[0].quota.duration, RESET_TIME_WEEKLY);
+ assert_eq!(value[0].flow.inflow, Uint256::from(0_u32));
+ assert_eq!(value[0].flow.outflow, Uint256::from(0_u32));
+ assert_eq!(
+ value[0].flow.period_end,
+ env.block.time.plus_seconds(RESET_TIME_WEEKLY)
+ );
+
+ let send_msg = test_msg_send!(
+ channel_id: format!("channel"),
+ denom: format!("denom"),
+ channel_value: 3_300_u32.into(),
+ funds: 300_u32.into()
+ );
+ sudo(deps.as_mut(), mock_env(), send_msg.clone()).unwrap();
+
+ let recv_msg = test_msg_recv!(
+ channel_id: format!("channel"),
+ denom: format!("denom"),
+ channel_value: 3_000_u32.into(),
+ funds: 30_u32.into()
+ );
+ sudo(deps.as_mut(), mock_env(), recv_msg.clone()).unwrap();
+
+ // Query
+ let res = query(deps.as_ref(), mock_env(), query_msg.clone()).unwrap();
+ let value: Vec = from_binary(&res).unwrap();
+ verify_query_response(
+ &value[0],
+ "weekly",
+ (10, 10),
+ RESET_TIME_WEEKLY,
+ 30_u32.into(),
+ 300_u32.into(),
+ env.block.time.plus_seconds(RESET_TIME_WEEKLY),
+ );
+}
+
+#[test] // Tests quota percentages are between [0,100]
+fn bad_quotas() {
+ let mut deps = mock_dependencies();
+
+ let msg = InstantiateMsg {
+ gov_module: Addr::unchecked(GOV_ADDR),
+ ibc_module: Addr::unchecked(IBC_ADDR),
+ paths: vec![PathMsg {
+ channel_id: format!("any"),
+ denom: format!("denom"),
+ quotas: vec![QuotaMsg {
+ name: "bad_quota".to_string(),
+ duration: 200,
+ send_recv: (5000, 101),
+ }],
+ }],
+ };
+ let info = mock_info(IBC_ADDR, &vec![]);
+
+ let env = mock_env();
+ instantiate(deps.as_mut(), env.clone(), info, msg).unwrap();
+
+ // If a quota is higher than 100%, we set it to 100%
+ let query_msg = QueryMsg::GetQuotas {
+ channel_id: format!("any"),
+ denom: format!("denom"),
+ };
+ let res = query(deps.as_ref(), env.clone(), query_msg).unwrap();
+ let value: Vec = from_binary(&res).unwrap();
+ verify_query_response(
+ &value[0],
+ "bad_quota",
+ (100, 100),
+ 200,
+ 0_u32.into(),
+ 0_u32.into(),
+ env.block.time.plus_seconds(200),
+ );
+}
+
+#[test] // Tests that undo reverts a packet send without affecting expiration or channel value
+fn undo_send() {
+ let mut deps = mock_dependencies();
+
+ let quota = QuotaMsg::new("weekly", RESET_TIME_WEEKLY, 10, 10);
+ let msg = InstantiateMsg {
+ gov_module: Addr::unchecked(GOV_ADDR),
+ ibc_module: Addr::unchecked(IBC_ADDR),
+ paths: vec![PathMsg {
+ channel_id: format!("any"),
+ denom: format!("denom"),
+ quotas: vec![quota],
+ }],
+ };
+ let info = mock_info(GOV_ADDR, &vec![]);
+ let _res = instantiate(deps.as_mut(), mock_env(), info.clone(), msg).unwrap();
+
+ let send_msg = test_msg_send!(
+ channel_id: format!("channel"),
+ denom: format!("denom"),
+ channel_value: 3_300_u32.into(),
+ funds: 300_u32.into()
+ );
+ let undo_msg = SudoMsg::UndoSend {
+ packet: Packet::mock(
+ format!("channel"),
+ format!("channel"),
+ format!("denom"),
+ 300_u32.into(),
+ ),
+ };
+
+ sudo(deps.as_mut(), mock_env(), send_msg.clone()).unwrap();
+
+ let trackers = RATE_LIMIT_TRACKERS
+ .load(&deps.storage, ("any".to_string(), "denom".to_string()))
+ .unwrap();
+ assert_eq!(
+ trackers.first().unwrap().flow.outflow,
+ Uint256::from(300_u32)
+ );
+ let period_end = trackers.first().unwrap().flow.period_end;
+ let channel_value = trackers.first().unwrap().quota.channel_value;
+
+ sudo(deps.as_mut(), mock_env(), undo_msg.clone()).unwrap();
+
+ let trackers = RATE_LIMIT_TRACKERS
+ .load(&deps.storage, ("any".to_string(), "denom".to_string()))
+ .unwrap();
+ assert_eq!(trackers.first().unwrap().flow.outflow, Uint256::from(0_u32));
+ assert_eq!(trackers.first().unwrap().flow.period_end, period_end);
+ assert_eq!(trackers.first().unwrap().quota.channel_value, channel_value);
+}
+
+#[test]
+fn test_basic_message() {
+ let json = r#"{"send_packet":{"packet":{"sequence":2,"source_port":"transfer","source_channel":"channel-0","destination_port":"transfer","destination_channel":"channel-0","data":{"denom":"stake","amount":"125000000000011250","sender":"osmo1dwtagd6xzl4eutwtyv6mewra627lkg3n3w26h6","receiver":"osmo1yvjkt8lnpxucjmspaj5ss4aa8562gx0a3rks8s"},"timeout_height":{"revision_height":100}}}}"#;
+ let _parsed: SudoMsg = serde_json_wasm::from_str(json).unwrap();
+ //println!("{parsed:?}");
+}
+
+#[test]
+fn test_testnet_message() {
+ let json = r#"{"send_packet":{"packet":{"sequence":4,"source_port":"transfer","source_channel":"channel-0","destination_port":"transfer","destination_channel":"channel-1491","data":{"denom":"uosmo","amount":"100","sender":"osmo1cyyzpxplxdzkeea7kwsydadg87357qnahakaks","receiver":"osmo1c584m4lq25h83yp6ag8hh4htjr92d954vklzja"},"timeout_height":{},"timeout_timestamp":1668024637477293371}}}"#;
+ let _parsed: SudoMsg = serde_json_wasm::from_str(json).unwrap();
+ //println!("{parsed:?}");
+}
+
+#[test]
+fn test_tokenfactory_message() {
+ let json = r#"{"send_packet":{"packet":{"sequence":4,"source_port":"transfer","source_channel":"channel-0","destination_port":"transfer","destination_channel":"channel-1491","data":{"denom":"transfer/channel-0/factory/osmo12smx2wdlyttvyzvzg54y2vnqwq2qjateuf7thj/czar","amount":"100000000000000000","sender":"osmo1cyyzpxplxdzkeea7kwsydadg87357qnahakaks","receiver":"osmo1c584m4lq25h83yp6ag8hh4htjr92d954vklzja"},"timeout_height":{},"timeout_timestamp":1668024476848430980}}}"#;
+ let _parsed: SudoMsg = serde_json_wasm::from_str(json).unwrap();
+ //println!("{parsed:?}");
+}
diff --git a/testutil/contracts/rate-limiter/src/error.rs b/testutil/contracts/rate-limiter/src/error.rs
new file mode 100644
index 0000000000..f5dcda9468
--- /dev/null
+++ b/testutil/contracts/rate-limiter/src/error.rs
@@ -0,0 +1,29 @@
+use cosmwasm_std::{StdError, Timestamp, Uint256};
+use thiserror::Error;
+
+#[derive(Error, Debug, PartialEq)]
+pub enum ContractError {
+ #[error("{0}")]
+ Std(#[from] StdError),
+
+ #[error("Unauthorized")]
+ Unauthorized {},
+
+ #[error("IBC Rate Limit exceeded for {channel}/{denom}. Tried to transfer {amount} which exceeds capacity on the '{quota_name}' quota ({used}/{max}). Try again after {reset:?}")]
+ RateLimitExceded {
+ channel: String,
+ denom: String,
+ amount: Uint256,
+ quota_name: String,
+ used: Uint256,
+ max: Uint256,
+ reset: Timestamp,
+ },
+
+ #[error("Quota {quota_id} not found for channel {channel_id}")]
+ QuotaNotFound {
+ quota_id: String,
+ channel_id: String,
+ denom: String,
+ },
+}
diff --git a/testutil/contracts/rate-limiter/src/execute.rs b/testutil/contracts/rate-limiter/src/execute.rs
new file mode 100644
index 0000000000..047a2179dd
--- /dev/null
+++ b/testutil/contracts/rate-limiter/src/execute.rs
@@ -0,0 +1,249 @@
+use crate::msg::{PathMsg, QuotaMsg};
+use crate::state::{Flow, Path, RateLimit, GOVMODULE, IBCMODULE, RATE_LIMIT_TRACKERS};
+use crate::ContractError;
+use cosmwasm_std::{Addr, DepsMut, Response, Timestamp};
+
+pub fn add_new_paths(
+ deps: DepsMut,
+ path_msgs: Vec,
+ now: Timestamp,
+) -> Result<(), ContractError> {
+ for path_msg in path_msgs {
+ let path = Path::new(path_msg.channel_id, path_msg.denom);
+
+ RATE_LIMIT_TRACKERS.save(
+ deps.storage,
+ path.into(),
+ &path_msg
+ .quotas
+ .iter()
+ .map(|q| RateLimit {
+ quota: q.into(),
+ flow: Flow::new(0_u128, 0_u128, now, q.duration),
+ })
+ .collect(),
+ )?
+ }
+ Ok(())
+}
+
+pub fn try_add_path(
+ deps: DepsMut,
+ sender: Addr,
+ channel_id: String,
+ denom: String,
+ quotas: Vec,
+ now: Timestamp,
+) -> Result {
+ // codenit: should we make a function for checking this authorization?
+ let ibc_module = IBCMODULE.load(deps.storage)?;
+ let gov_module = GOVMODULE.load(deps.storage)?;
+ if sender != ibc_module && sender != gov_module {
+ return Err(ContractError::Unauthorized {});
+ }
+ add_new_paths(deps, vec![PathMsg::new(&channel_id, &denom, quotas)], now)?;
+
+ Ok(Response::new()
+ .add_attribute("method", "try_add_channel")
+ .add_attribute("channel_id", channel_id)
+ .add_attribute("denom", denom))
+}
+
+pub fn try_remove_path(
+ deps: DepsMut,
+ sender: Addr,
+ channel_id: String,
+ denom: String,
+) -> Result {
+ let ibc_module = IBCMODULE.load(deps.storage)?;
+ let gov_module = GOVMODULE.load(deps.storage)?;
+ if sender != ibc_module && sender != gov_module {
+ return Err(ContractError::Unauthorized {});
+ }
+
+ let path = Path::new(&channel_id, &denom);
+ RATE_LIMIT_TRACKERS.remove(deps.storage, path.into());
+ Ok(Response::new()
+ .add_attribute("method", "try_remove_channel")
+ .add_attribute("denom", denom)
+ .add_attribute("channel_id", channel_id))
+}
+
+// Reset specified quote_id for the given channel_id
+pub fn try_reset_path_quota(
+ deps: DepsMut,
+ sender: Addr,
+ channel_id: String,
+ denom: String,
+ quota_id: String,
+ now: Timestamp,
+) -> Result {
+ let gov_module = GOVMODULE.load(deps.storage)?;
+ if sender != gov_module {
+ return Err(ContractError::Unauthorized {});
+ }
+
+ let path = Path::new(&channel_id, &denom);
+ RATE_LIMIT_TRACKERS.update(deps.storage, path.into(), |maybe_rate_limit| {
+ match maybe_rate_limit {
+ None => Err(ContractError::QuotaNotFound {
+ quota_id,
+ channel_id: channel_id.clone(),
+ denom: denom.clone(),
+ }),
+ Some(mut limits) => {
+ // Q: What happens here if quote_id not found? seems like we return ok?
+ limits.iter_mut().for_each(|limit| {
+ if limit.quota.name == quota_id.as_ref() {
+ limit.flow.expire(now, limit.quota.duration)
+ }
+ });
+ Ok(limits)
+ }
+ }
+ })?;
+
+ Ok(Response::new()
+ .add_attribute("method", "try_reset_channel")
+ .add_attribute("channel_id", channel_id))
+}
+
+#[cfg(test)]
+mod tests {
+ use cosmwasm_std::testing::{mock_dependencies, mock_env, mock_info};
+ use cosmwasm_std::{from_binary, Addr, StdError};
+
+ use crate::contract::{execute, query};
+ use crate::helpers::tests::verify_query_response;
+ use crate::msg::{ExecuteMsg, QueryMsg, QuotaMsg};
+ use crate::state::{RateLimit, GOVMODULE, IBCMODULE};
+
+ const IBC_ADDR: &str = "IBC_MODULE";
+ const GOV_ADDR: &str = "GOV_MODULE";
+
+ #[test] // Tests AddPath and RemovePath messages
+ fn management_add_and_remove_path() {
+ let mut deps = mock_dependencies();
+ IBCMODULE
+ .save(deps.as_mut().storage, &Addr::unchecked(IBC_ADDR))
+ .unwrap();
+ GOVMODULE
+ .save(deps.as_mut().storage, &Addr::unchecked(GOV_ADDR))
+ .unwrap();
+
+ let msg = ExecuteMsg::AddPath {
+ channel_id: format!("channel"),
+ denom: format!("denom"),
+ quotas: vec![QuotaMsg {
+ name: "daily".to_string(),
+ duration: 1600,
+ send_recv: (3, 5),
+ }],
+ };
+ let info = mock_info(IBC_ADDR, &vec![]);
+
+ let env = mock_env();
+ let res = execute(deps.as_mut(), env.clone(), info, msg).unwrap();
+ assert_eq!(0, res.messages.len());
+
+ let query_msg = QueryMsg::GetQuotas {
+ channel_id: format!("channel"),
+ denom: format!("denom"),
+ };
+
+ let res = query(deps.as_ref(), mock_env(), query_msg.clone()).unwrap();
+
+ let value: Vec = from_binary(&res).unwrap();
+ verify_query_response(
+ &value[0],
+ "daily",
+ (3, 5),
+ 1600,
+ 0_u32.into(),
+ 0_u32.into(),
+ env.block.time.plus_seconds(1600),
+ );
+
+ assert_eq!(value.len(), 1);
+
+ // Add another path
+ let msg = ExecuteMsg::AddPath {
+ channel_id: format!("channel2"),
+ denom: format!("denom"),
+ quotas: vec![QuotaMsg {
+ name: "daily".to_string(),
+ duration: 1600,
+ send_recv: (3, 5),
+ }],
+ };
+ let info = mock_info(IBC_ADDR, &vec![]);
+
+ let env = mock_env();
+ execute(deps.as_mut(), env.clone(), info, msg).unwrap();
+
+ // remove the first one
+ let msg = ExecuteMsg::RemovePath {
+ channel_id: format!("channel"),
+ denom: format!("denom"),
+ };
+
+ let info = mock_info(IBC_ADDR, &vec![]);
+ let env = mock_env();
+ execute(deps.as_mut(), env.clone(), info, msg).unwrap();
+
+ // The channel is not there anymore
+ let err = query(deps.as_ref(), mock_env(), query_msg.clone()).unwrap_err();
+ assert!(matches!(err, StdError::NotFound { .. }));
+
+ // The second channel is still there
+ let query_msg = QueryMsg::GetQuotas {
+ channel_id: format!("channel2"),
+ denom: format!("denom"),
+ };
+ let res = query(deps.as_ref(), mock_env(), query_msg.clone()).unwrap();
+ let value: Vec = from_binary(&res).unwrap();
+ assert_eq!(value.len(), 1);
+ verify_query_response(
+ &value[0],
+ "daily",
+ (3, 5),
+ 1600,
+ 0_u32.into(),
+ 0_u32.into(),
+ env.block.time.plus_seconds(1600),
+ );
+
+ // Paths are overriden if they share a name and denom
+ let msg = ExecuteMsg::AddPath {
+ channel_id: format!("channel2"),
+ denom: format!("denom"),
+ quotas: vec![QuotaMsg {
+ name: "different".to_string(),
+ duration: 5000,
+ send_recv: (50, 30),
+ }],
+ };
+ let info = mock_info(IBC_ADDR, &vec![]);
+
+ let env = mock_env();
+ execute(deps.as_mut(), env.clone(), info, msg).unwrap();
+
+ let query_msg = QueryMsg::GetQuotas {
+ channel_id: format!("channel2"),
+ denom: format!("denom"),
+ };
+ let res = query(deps.as_ref(), mock_env(), query_msg.clone()).unwrap();
+ let value: Vec = from_binary(&res).unwrap();
+ assert_eq!(value.len(), 1);
+
+ verify_query_response(
+ &value[0],
+ "different",
+ (50, 30),
+ 5000,
+ 0_u32.into(),
+ 0_u32.into(),
+ env.block.time.plus_seconds(5000),
+ );
+ }
+}
diff --git a/testutil/contracts/rate-limiter/src/helpers.rs b/testutil/contracts/rate-limiter/src/helpers.rs
new file mode 100644
index 0000000000..530d3b6cf2
--- /dev/null
+++ b/testutil/contracts/rate-limiter/src/helpers.rs
@@ -0,0 +1,61 @@
+#![cfg(test)]
+use schemars::JsonSchema;
+use serde::{Deserialize, Serialize};
+
+use cosmwasm_std::{to_binary, Addr, CosmosMsg, StdResult, WasmMsg};
+
+use crate::msg::ExecuteMsg;
+use crate::msg::SudoMsg;
+
+/// CwTemplateContract is a wrapper around Addr that provides a lot of helpers
+/// for working with this.
+#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq, JsonSchema)]
+pub struct RateLimitingContract(pub Addr);
+
+impl RateLimitingContract {
+ pub fn addr(&self) -> Addr {
+ self.0.clone()
+ }
+
+ pub fn call>(&self, msg: T) -> StdResult {
+ let msg = to_binary(&msg.into())?;
+ Ok(WasmMsg::Execute {
+ contract_addr: self.addr().into(),
+ msg,
+ funds: vec![],
+ }
+ .into())
+ }
+
+ pub fn sudo>(&self, msg: T) -> cw_multi_test::SudoMsg {
+ let msg = to_binary(&msg.into()).unwrap();
+ cw_multi_test::SudoMsg::Wasm(cw_multi_test::WasmSudo {
+ contract_addr: self.addr().into(),
+ msg,
+ })
+ }
+}
+
+pub mod tests {
+ use cosmwasm_std::{Timestamp, Uint256};
+
+ use crate::state::RateLimit;
+
+ pub fn verify_query_response(
+ value: &RateLimit,
+ quota_name: &str,
+ send_recv: (u32, u32),
+ duration: u64,
+ inflow: Uint256,
+ outflow: Uint256,
+ period_end: Timestamp,
+ ) {
+ assert_eq!(value.quota.name, quota_name);
+ assert_eq!(value.quota.max_percentage_send, send_recv.0);
+ assert_eq!(value.quota.max_percentage_recv, send_recv.1);
+ assert_eq!(value.quota.duration, duration);
+ assert_eq!(value.flow.inflow, inflow);
+ assert_eq!(value.flow.outflow, outflow);
+ assert_eq!(value.flow.period_end, period_end);
+ }
+}
diff --git a/testutil/contracts/rate-limiter/src/integration_tests.rs b/testutil/contracts/rate-limiter/src/integration_tests.rs
new file mode 100644
index 0000000000..bd9befeb83
--- /dev/null
+++ b/testutil/contracts/rate-limiter/src/integration_tests.rs
@@ -0,0 +1,417 @@
+#![cfg(test)]
+use crate::{helpers::RateLimitingContract, msg::ExecuteMsg, test_msg_send, ContractError};
+use cosmwasm_std::{Addr, Coin, Empty, Timestamp, Uint128, Uint256};
+use cw_multi_test::{App, AppBuilder, Contract, ContractWrapper, Executor};
+
+use crate::{
+ msg::{InstantiateMsg, PathMsg, QuotaMsg},
+ state::tests::{RESET_TIME_DAILY, RESET_TIME_MONTHLY, RESET_TIME_WEEKLY},
+};
+
+pub fn contract_template() -> Box> {
+ let contract = ContractWrapper::new(
+ crate::contract::execute,
+ crate::contract::instantiate,
+ crate::contract::query,
+ )
+ .with_sudo(crate::contract::sudo);
+ Box::new(contract)
+}
+
+const USER: &str = "USER";
+const IBC_ADDR: &str = "IBC_MODULE";
+const GOV_ADDR: &str = "GOV_MODULE";
+const NATIVE_DENOM: &str = "nosmo";
+
+fn mock_app() -> App {
+ AppBuilder::new().build(|router, _, storage| {
+ router
+ .bank
+ .init_balance(
+ storage,
+ &Addr::unchecked(USER),
+ vec![Coin {
+ denom: NATIVE_DENOM.to_string(),
+ amount: Uint128::new(1_000),
+ }],
+ )
+ .unwrap();
+ })
+}
+
+// Instantiate the contract
+fn proper_instantiate(paths: Vec) -> (App, RateLimitingContract) {
+ let mut app = mock_app();
+ let cw_code_id = app.store_code(contract_template());
+
+ let msg = InstantiateMsg {
+ gov_module: Addr::unchecked(GOV_ADDR),
+ ibc_module: Addr::unchecked(IBC_ADDR),
+ paths,
+ };
+
+ let cw_rate_limit_contract_addr = app
+ .instantiate_contract(
+ cw_code_id,
+ Addr::unchecked(GOV_ADDR),
+ &msg,
+ &[],
+ "test",
+ None,
+ )
+ .unwrap();
+
+ let cw_rate_limit_contract = RateLimitingContract(cw_rate_limit_contract_addr);
+
+ (app, cw_rate_limit_contract)
+}
+
+use cosmwasm_std::Attribute;
+
+#[test] // Checks that the RateLimit flows are expired properly when time passes
+fn expiration() {
+ let quota = QuotaMsg::new("weekly", RESET_TIME_WEEKLY, 10, 10);
+
+ let (mut app, cw_rate_limit_contract) = proper_instantiate(vec![PathMsg {
+ channel_id: format!("any"),
+ denom: format!("denom"),
+ quotas: vec![quota],
+ }]);
+
+ // Using all the allowance
+ let msg = test_msg_send!(
+ channel_id: format!("channel"),
+ denom: format!("denom"),
+ channel_value: 3_000_u32.into(),
+ funds: 300_u32.into()
+ );
+ let cosmos_msg = cw_rate_limit_contract.sudo(msg);
+ let res = app.sudo(cosmos_msg).unwrap();
+
+ let Attribute { key, value } = &res.custom_attrs(1)[3];
+ assert_eq!(key, "weekly_used_in");
+ assert_eq!(value, "0");
+ let Attribute { key, value } = &res.custom_attrs(1)[4];
+ assert_eq!(key, "weekly_used_out");
+ assert_eq!(value, "300");
+ let Attribute { key, value } = &res.custom_attrs(1)[5];
+ assert_eq!(key, "weekly_max_in");
+ assert_eq!(value, "300");
+ let Attribute { key, value } = &res.custom_attrs(1)[6];
+ assert_eq!(key, "weekly_max_out");
+ assert_eq!(value, "300");
+
+ // Another packet is rate limited
+ let msg = test_msg_send!(
+ channel_id: format!("channel"),
+ denom: format!("denom"),
+ channel_value: 3_000_u32.into(),
+ funds: 300_u32.into()
+ );
+ let cosmos_msg = cw_rate_limit_contract.sudo(msg);
+ let err = app.sudo(cosmos_msg).unwrap_err();
+
+ assert_eq!(
+ err.downcast_ref::().unwrap(),
+ &ContractError::RateLimitExceded {
+ channel: "channel".to_string(),
+ denom: "denom".to_string(),
+ amount: Uint256::from_u128(300),
+ quota_name: "weekly".to_string(),
+ used: Uint256::from_u128(300),
+ max: Uint256::from_u128(300),
+ reset: Timestamp::from_nanos(1572402219879305533),
+ }
+ );
+
+ // ... Time passes
+ app.update_block(|b| {
+ b.height += 1000;
+ b.time = b.time.plus_seconds(RESET_TIME_WEEKLY + 1)
+ });
+
+ // Sending the packet should work now
+ let msg = test_msg_send!(
+ channel_id: format!("channel"),
+ denom: format!("denom"),
+ channel_value: 3_000_u32.into(),
+ funds: 300_u32.into()
+ );
+
+ let cosmos_msg = cw_rate_limit_contract.sudo(msg);
+ let res = app.sudo(cosmos_msg).unwrap();
+
+ let Attribute { key, value } = &res.custom_attrs(1)[3];
+ assert_eq!(key, "weekly_used_in");
+ assert_eq!(value, "0");
+ let Attribute { key, value } = &res.custom_attrs(1)[4];
+ assert_eq!(key, "weekly_used_out");
+ assert_eq!(value, "300");
+ let Attribute { key, value } = &res.custom_attrs(1)[5];
+ assert_eq!(key, "weekly_max_in");
+ assert_eq!(value, "300");
+ let Attribute { key, value } = &res.custom_attrs(1)[6];
+ assert_eq!(key, "weekly_max_out");
+ assert_eq!(value, "300");
+}
+
+#[test] // Tests we can have different maximums for different quotaas (daily, weekly, etc) and that they all are active at the same time
+fn multiple_quotas() {
+ let quotas = vec![
+ QuotaMsg::new("daily", RESET_TIME_DAILY, 1, 1),
+ QuotaMsg::new("weekly", RESET_TIME_WEEKLY, 5, 5),
+ QuotaMsg::new("monthly", RESET_TIME_MONTHLY, 5, 5),
+ ];
+
+ let (mut app, cw_rate_limit_contract) = proper_instantiate(vec![PathMsg {
+ channel_id: format!("any"),
+ denom: format!("denom"),
+ quotas,
+ }]);
+
+ // Sending 1% to use the daily allowance
+ let msg = test_msg_send!(
+ channel_id: format!("channel"),
+ denom: format!("denom"),
+ channel_value: 101_u32.into(),
+ funds: 1_u32.into()
+ );
+ let cosmos_msg = cw_rate_limit_contract.sudo(msg);
+ app.sudo(cosmos_msg).unwrap();
+
+ // Another packet is rate limited
+ let msg = test_msg_send!(
+ channel_id: format!("channel"),
+ denom: format!("denom"),
+ channel_value: 101_u32.into(),
+ funds: 1_u32.into()
+ );
+ let cosmos_msg = cw_rate_limit_contract.sudo(msg);
+ app.sudo(cosmos_msg).unwrap_err();
+
+ // ... One day passes
+ app.update_block(|b| {
+ b.height += 10;
+ b.time = b.time.plus_seconds(RESET_TIME_DAILY + 1)
+ });
+
+ // Sending the packet should work now
+ let msg = test_msg_send!(
+ channel_id: format!("channel"),
+ denom: format!("denom"),
+ channel_value: 101_u32.into(),
+ funds: 1_u32.into()
+ );
+
+ let cosmos_msg = cw_rate_limit_contract.sudo(msg);
+ app.sudo(cosmos_msg).unwrap();
+
+ // Do that for 4 more days
+ for _ in 1..4 {
+ // ... One day passes
+ app.update_block(|b| {
+ b.height += 10;
+ b.time = b.time.plus_seconds(RESET_TIME_DAILY + 1)
+ });
+
+ // Sending the packet should work now
+ let msg = test_msg_send!(
+ channel_id: format!("channel"),
+ denom: format!("denom"),
+ channel_value: 101_u32.into(),
+ funds: 1_u32.into()
+ );
+ let cosmos_msg = cw_rate_limit_contract.sudo(msg);
+ app.sudo(cosmos_msg).unwrap();
+ }
+
+ // ... One day passes
+ app.update_block(|b| {
+ b.height += 10;
+ b.time = b.time.plus_seconds(RESET_TIME_DAILY + 1)
+ });
+
+ // We now have exceeded the weekly limit! Even if the daily limit allows us, the weekly doesn't
+ let msg = test_msg_send!(
+ channel_id: format!("channel"),
+ denom: format!("denom"),
+ channel_value: 101_u32.into(),
+ funds: 1_u32.into()
+ );
+ let cosmos_msg = cw_rate_limit_contract.sudo(msg);
+ app.sudo(cosmos_msg).unwrap_err();
+
+ // ... One week passes
+ app.update_block(|b| {
+ b.height += 10;
+ b.time = b.time.plus_seconds(RESET_TIME_WEEKLY + 1)
+ });
+
+ // We can still can't send because the weekly and monthly limits are the same
+ let msg = test_msg_send!(
+ channel_id: format!("channel"),
+ denom: format!("denom"),
+ channel_value: 101_u32.into(),
+ funds: 1_u32.into()
+ );
+ let cosmos_msg = cw_rate_limit_contract.sudo(msg);
+ app.sudo(cosmos_msg).unwrap_err();
+
+ // Waiting a week again, doesn't help!!
+ // ... One week passes
+ app.update_block(|b| {
+ b.height += 10;
+ b.time = b.time.plus_seconds(RESET_TIME_WEEKLY + 1)
+ });
+
+ // We can still can't send because the monthly limit hasn't passed
+ let msg = test_msg_send!(
+ channel_id: format!("channel"),
+ denom: format!("denom"),
+ channel_value: 101_u32.into(),
+ funds: 1_u32.into()
+ );
+ let cosmos_msg = cw_rate_limit_contract.sudo(msg);
+ app.sudo(cosmos_msg).unwrap_err();
+
+ // Only after two more weeks we can send again
+ app.update_block(|b| {
+ b.height += 10;
+ b.time = b.time.plus_seconds((RESET_TIME_WEEKLY * 2) + 1) // Two weeks
+ });
+
+ let msg = test_msg_send!(
+ channel_id: format!("channel"),
+ denom: format!("denom"),
+ channel_value: 101_u32.into(),
+ funds: 1_u32.into()
+ );
+ let cosmos_msg = cw_rate_limit_contract.sudo(msg);
+ app.sudo(cosmos_msg).unwrap();
+}
+
+#[test] // Tests that the channel value is based on the value at the beginning of the period
+fn channel_value_cached() {
+ let quotas = vec![
+ QuotaMsg::new("daily", RESET_TIME_DAILY, 2, 2),
+ QuotaMsg::new("weekly", RESET_TIME_WEEKLY, 5, 5),
+ ];
+
+ let (mut app, cw_rate_limit_contract) = proper_instantiate(vec![PathMsg {
+ channel_id: format!("any"),
+ denom: format!("denom"),
+ quotas,
+ }]);
+
+ // Sending 1% (half of the daily allowance)
+ let msg = test_msg_send!(
+ channel_id: format!("channel"),
+ denom: format!("denom"),
+ channel_value: 100_u32.into(),
+ funds: 1_u32.into()
+ );
+ let cosmos_msg = cw_rate_limit_contract.sudo(msg);
+ app.sudo(cosmos_msg).unwrap();
+
+ // Sending 3% is now rate limited
+ let msg = test_msg_send!(
+ channel_id: format!("channel"),
+ denom: format!("denom"),
+ channel_value: 100_u32.into(),
+ funds: 3_u32.into()
+ );
+ let cosmos_msg = cw_rate_limit_contract.sudo(msg);
+ app.sudo(cosmos_msg).unwrap_err();
+
+ // Even if the channel value increases, the percentage is calculated based on the value at period start
+ let msg = test_msg_send!(
+ channel_id: format!("channel"),
+ denom: format!("denom"),
+ channel_value: 100000_u32.into(),
+ funds: 3_u32.into()
+ );
+ let cosmos_msg = cw_rate_limit_contract.sudo(msg);
+ app.sudo(cosmos_msg).unwrap_err();
+
+ // ... One day passes
+ app.update_block(|b| {
+ b.height += 10;
+ b.time = b.time.plus_seconds(RESET_TIME_DAILY + 1)
+ });
+
+ // New Channel Value world!
+
+ // Sending 1% of a new value (10_000) passes the daily check, cause it
+ // has expired, but not the weekly check (The value for last week is
+ // sitll 100, as only 1 day has passed)
+ let msg = test_msg_send!(
+ channel_id: format!("channel"),
+ denom: format!("denom"),
+ channel_value: 10_000_u32.into(),
+ funds: 100_u32.into()
+ );
+
+ let cosmos_msg = cw_rate_limit_contract.sudo(msg);
+ app.sudo(cosmos_msg).unwrap_err();
+
+ // ... One week passes
+ app.update_block(|b| {
+ b.height += 10;
+ b.time = b.time.plus_seconds(RESET_TIME_WEEKLY + 1)
+ });
+
+ // Sending 1% of a new value should work and set the value for the day at 10_000
+ let msg = test_msg_send!(
+ channel_id: format!("channel"),
+ denom: format!("denom"),
+ channel_value: 10_000_u32.into(),
+ funds: 100_u32.into()
+ );
+
+ let cosmos_msg = cw_rate_limit_contract.sudo(msg);
+ app.sudo(cosmos_msg).unwrap();
+
+ // If the value magically decreasses. We can still send up to 100 more (1% of 10k)
+ let msg = test_msg_send!(
+ channel_id: format!("channel"),
+ denom: format!("denom"),
+ channel_value: 1_u32.into(),
+ funds: 75_u32.into()
+ );
+
+ let cosmos_msg = cw_rate_limit_contract.sudo(msg);
+ app.sudo(cosmos_msg).unwrap();
+}
+
+#[test] // Checks that RateLimits added after instantiation are respected
+fn add_paths_later() {
+ let (mut app, cw_rate_limit_contract) = proper_instantiate(vec![]);
+
+ // All sends are allowed
+ let msg = test_msg_send!(
+ channel_id: format!("channel"),
+ denom: format!("denom"),
+ channel_value: 3_000_u32.into(),
+ funds: 300_u32.into()
+ );
+ let cosmos_msg = cw_rate_limit_contract.sudo(msg.clone());
+ let res = app.sudo(cosmos_msg).unwrap();
+
+ let Attribute { key, value } = &res.custom_attrs(1)[3];
+ assert_eq!(key, "quota");
+ assert_eq!(value, "none");
+
+ // Add a weekly limit of 1%
+ let management_msg = ExecuteMsg::AddPath {
+ channel_id: format!("any"),
+ denom: format!("denom"),
+ quotas: vec![QuotaMsg::new("weekly", RESET_TIME_WEEKLY, 1, 1)],
+ };
+
+ let cosmos_msg = cw_rate_limit_contract.call(management_msg).unwrap();
+ app.execute(Addr::unchecked(GOV_ADDR), cosmos_msg).unwrap();
+
+ // Executing the same message again should fail, as it is now rate limited
+ let cosmos_msg = cw_rate_limit_contract.sudo(msg);
+ app.sudo(cosmos_msg).unwrap_err();
+}
diff --git a/testutil/contracts/rate-limiter/src/lib.rs b/testutil/contracts/rate-limiter/src/lib.rs
new file mode 100644
index 0000000000..6fcd1c32ce
--- /dev/null
+++ b/testutil/contracts/rate-limiter/src/lib.rs
@@ -0,0 +1,21 @@
+#![allow(clippy::result_large_err)]
+
+// Contract
+pub mod contract;
+mod error;
+pub mod msg;
+mod state;
+
+pub mod packet;
+
+// Functions
+mod execute;
+mod query;
+mod sudo;
+
+// Tests
+mod contract_tests;
+mod helpers;
+mod integration_tests;
+
+pub use crate::error::ContractError;
diff --git a/testutil/contracts/rate-limiter/src/management.rs b/testutil/contracts/rate-limiter/src/management.rs
new file mode 100644
index 0000000000..04dc47df80
--- /dev/null
+++ b/testutil/contracts/rate-limiter/src/management.rs
@@ -0,0 +1,250 @@
+use crate::msg::{PathMsg, QuotaMsg};
+use crate::state::{Flow, Path, RateLimit, GOVMODULE, IBCMODULE, RATE_LIMIT_TRACKERS};
+use crate::ContractError;
+use cosmwasm_std::{Addr, DepsMut, Response, Timestamp};
+
+pub fn add_new_paths(
+ deps: DepsMut,
+ path_msgs: Vec,
+ now: Timestamp,
+) -> Result<(), ContractError> {
+ for path_msg in path_msgs {
+ let path = Path::new(path_msg.channel_id, path_msg.denom);
+
+ RATE_LIMIT_TRACKERS.save(
+ deps.storage,
+ path.into(),
+ &path_msg
+ .quotas
+ .iter()
+ .map(|q| RateLimit {
+ quota: q.into(),
+ flow: Flow::new(0_u128, 0_u128, now, q.duration),
+ })
+ .collect(),
+ )?
+ }
+ Ok(())
+}
+
+pub fn try_add_path(
+ deps: DepsMut,
+ sender: Addr,
+ channel_id: String,
+ denom: String,
+ quotas: Vec,
+ now: Timestamp,
+) -> Result {
+ // codenit: should we make a function for checking this authorization?
+ let ibc_module = IBCMODULE.load(deps.storage)?;
+ let gov_module = GOVMODULE.load(deps.storage)?;
+ if sender != ibc_module && sender != gov_module {
+ return Err(ContractError::Unauthorized {});
+ }
+ add_new_paths(deps, vec![PathMsg::new(&channel_id, &denom, quotas)], now)?;
+
+ Ok(Response::new()
+ .add_attribute("method", "try_add_channel")
+ .add_attribute("channel_id", channel_id)
+ .add_attribute("denom", denom))
+}
+
+pub fn try_remove_path(
+ deps: DepsMut,
+ sender: Addr,
+ channel_id: String,
+ denom: String,
+) -> Result {
+ let ibc_module = IBCMODULE.load(deps.storage)?;
+ let gov_module = GOVMODULE.load(deps.storage)?;
+ if sender != ibc_module && sender != gov_module {
+ return Err(ContractError::Unauthorized {});
+ }
+
+ let path = Path::new(&channel_id, &denom);
+ RATE_LIMIT_TRACKERS.remove(deps.storage, path.into());
+ Ok(Response::new()
+ .add_attribute("method", "try_remove_channel")
+ .add_attribute("denom", denom)
+ .add_attribute("channel_id", channel_id))
+}
+
+// Reset specified quote_id for the given channel_id
+pub fn try_reset_path_quota(
+ deps: DepsMut,
+ sender: Addr,
+ channel_id: String,
+ denom: String,
+ quota_id: String,
+ now: Timestamp,
+) -> Result {
+ let gov_module = GOVMODULE.load(deps.storage)?;
+ if sender != gov_module {
+ return Err(ContractError::Unauthorized {});
+ }
+
+ let path = Path::new(&channel_id, &denom);
+ RATE_LIMIT_TRACKERS.update(deps.storage, path.into(), |maybe_rate_limit| {
+ match maybe_rate_limit {
+ None => Err(ContractError::QuotaNotFound {
+ quota_id,
+ channel_id: channel_id.clone(),
+ denom: denom.clone(),
+ }),
+ Some(mut limits) => {
+ // Q: What happens here if quote_id not found? seems like we return ok?
+ limits.iter_mut().for_each(|limit| {
+ if limit.quota.name == quota_id.as_ref() {
+ limit.flow.expire(now, limit.quota.duration)
+ }
+ });
+ Ok(limits)
+ }
+ }
+ })?;
+
+ Ok(Response::new()
+ .add_attribute("method", "try_reset_channel")
+ .add_attribute("channel_id", channel_id))
+}
+
+#[cfg(test)]
+mod tests {
+
+ use cosmwasm_std::testing::{mock_dependencies, mock_env, mock_info};
+ use cosmwasm_std::{from_binary, Addr, StdError};
+
+ use crate::contract::{execute, query};
+ use crate::helpers::tests::verify_query_response;
+ use crate::msg::{ExecuteMsg, QueryMsg, QuotaMsg};
+ use crate::state::{RateLimit, GOVMODULE, IBCMODULE};
+
+ const IBC_ADDR: &str = "IBC_MODULE";
+ const GOV_ADDR: &str = "GOV_MODULE";
+
+ #[test] // Tests AddPath and RemovePath messages
+ fn management_add_and_remove_path() {
+ let mut deps = mock_dependencies();
+ IBCMODULE
+ .save(deps.as_mut().storage, &Addr::unchecked(IBC_ADDR))
+ .unwrap();
+ GOVMODULE
+ .save(deps.as_mut().storage, &Addr::unchecked(GOV_ADDR))
+ .unwrap();
+
+ let msg = ExecuteMsg::AddPath {
+ channel_id: format!("channel"),
+ denom: format!("denom"),
+ quotas: vec![QuotaMsg {
+ name: "daily".to_string(),
+ duration: 1600,
+ send_recv: (3, 5),
+ }],
+ };
+ let info = mock_info(IBC_ADDR, &vec![]);
+
+ let env = mock_env();
+ let res = execute(deps.as_mut(), env.clone(), info, msg).unwrap();
+ assert_eq!(0, res.messages.len());
+
+ let query_msg = QueryMsg::GetQuotas {
+ channel_id: format!("channel"),
+ denom: format!("denom"),
+ };
+
+ let res = query(deps.as_ref(), mock_env(), query_msg.clone()).unwrap();
+
+ let value: Vec = from_binary(&res).unwrap();
+ verify_query_response(
+ &value[0],
+ "daily",
+ (3, 5),
+ 1600,
+ 0,
+ 0,
+ env.block.time.plus_seconds(1600),
+ );
+
+ assert_eq!(value.len(), 1);
+
+ // Add another path
+ let msg = ExecuteMsg::AddPath {
+ channel_id: format!("channel2"),
+ denom: format!("denom"),
+ quotas: vec![QuotaMsg {
+ name: "daily".to_string(),
+ duration: 1600,
+ send_recv: (3, 5),
+ }],
+ };
+ let info = mock_info(IBC_ADDR, &vec![]);
+
+ let env = mock_env();
+ execute(deps.as_mut(), env.clone(), info, msg).unwrap();
+
+ // remove the first one
+ let msg = ExecuteMsg::RemovePath {
+ channel_id: format!("channel"),
+ denom: format!("denom"),
+ };
+
+ let info = mock_info(IBC_ADDR, &vec![]);
+ let env = mock_env();
+ execute(deps.as_mut(), env.clone(), info, msg).unwrap();
+
+ // The channel is not there anymore
+ let err = query(deps.as_ref(), mock_env(), query_msg.clone()).unwrap_err();
+ assert!(matches!(err, StdError::NotFound { .. }));
+
+ // The second channel is still there
+ let query_msg = QueryMsg::GetQuotas {
+ channel_id: format!("channel2"),
+ denom: format!("denom"),
+ };
+ let res = query(deps.as_ref(), mock_env(), query_msg.clone()).unwrap();
+ let value: Vec = from_binary(&res).unwrap();
+ assert_eq!(value.len(), 1);
+ verify_query_response(
+ &value[0],
+ "daily",
+ (3, 5),
+ 1600,
+ 0,
+ 0,
+ env.block.time.plus_seconds(1600),
+ );
+
+ // Paths are overriden if they share a name and denom
+ let msg = ExecuteMsg::AddPath {
+ channel_id: format!("channel2"),
+ denom: format!("denom"),
+ quotas: vec![QuotaMsg {
+ name: "different".to_string(),
+ duration: 5000,
+ send_recv: (50, 30),
+ }],
+ };
+ let info = mock_info(IBC_ADDR, &vec![]);
+
+ let env = mock_env();
+ execute(deps.as_mut(), env.clone(), info, msg).unwrap();
+
+ let query_msg = QueryMsg::GetQuotas {
+ channel_id: format!("channel2"),
+ denom: format!("denom"),
+ };
+ let res = query(deps.as_ref(), mock_env(), query_msg.clone()).unwrap();
+ let value: Vec = from_binary(&res).unwrap();
+ assert_eq!(value.len(), 1);
+
+ verify_query_response(
+ &value[0],
+ "different",
+ (50, 30),
+ 5000,
+ 0,
+ 0,
+ env.block.time.plus_seconds(5000),
+ );
+ }
+}
diff --git a/testutil/contracts/rate-limiter/src/msg.rs b/testutil/contracts/rate-limiter/src/msg.rs
new file mode 100644
index 0000000000..57279c0ad6
--- /dev/null
+++ b/testutil/contracts/rate-limiter/src/msg.rs
@@ -0,0 +1,105 @@
+use cosmwasm_schema::{cw_serde, QueryResponses};
+use cosmwasm_std::Addr;
+use schemars::JsonSchema;
+use serde::{Deserialize, Serialize};
+
+#[cfg(test)]
+use cosmwasm_std::Uint256;
+
+use crate::packet::Packet;
+
+// PathMsg contains a channel_id and denom to represent a unique identifier within ibc-go, and a list of rate limit quotas
+#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq, JsonSchema)]
+pub struct PathMsg {
+ pub channel_id: String,
+ pub denom: String,
+ pub quotas: Vec,
+}
+
+impl PathMsg {
+ pub fn new(
+ channel: impl Into,
+ denom: impl Into,
+ quotas: Vec,
+ ) -> Self {
+ PathMsg {
+ channel_id: channel.into(),
+ denom: denom.into(),
+ quotas,
+ }
+ }
+}
+
+// QuotaMsg represents a rate limiting Quota when sent as a wasm msg
+#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq, JsonSchema)]
+pub struct QuotaMsg {
+ pub name: String,
+ pub duration: u64,
+ pub send_recv: (u32, u32),
+}
+
+impl QuotaMsg {
+ pub fn new(name: &str, seconds: u64, send_percentage: u32, recv_percentage: u32) -> Self {
+ QuotaMsg {
+ name: name.to_string(),
+ duration: seconds,
+ send_recv: (send_percentage, recv_percentage),
+ }
+ }
+}
+
+/// Initialize the contract with the address of the IBC module and any existing channels.
+/// Only the ibc module is allowed to execute actions on this contract
+#[cw_serde]
+pub struct InstantiateMsg {
+ pub gov_module: Addr,
+ pub ibc_module: Addr,
+ pub paths: Vec,
+}
+
+/// The caller (IBC module) is responsible for correctly calculating the funds
+/// being sent through the channel
+#[cw_serde]
+pub enum ExecuteMsg {
+ AddPath {
+ channel_id: String,
+ denom: String,
+ quotas: Vec,
+ },
+ RemovePath {
+ channel_id: String,
+ denom: String,
+ },
+ ResetPathQuota {
+ channel_id: String,
+ denom: String,
+ quota_id: String,
+ },
+}
+
+#[cw_serde]
+#[derive(QueryResponses)]
+pub enum QueryMsg {
+ #[returns(Vec)]
+ GetQuotas { channel_id: String, denom: String },
+}
+
+#[cw_serde]
+pub enum SudoMsg {
+ SendPacket {
+ packet: Packet,
+ #[cfg(test)]
+ channel_value_mock: Option,
+ },
+ RecvPacket {
+ packet: Packet,
+ #[cfg(test)]
+ channel_value_mock: Option,
+ },
+ UndoSend {
+ packet: Packet,
+ },
+}
+
+#[cw_serde]
+pub enum MigrateMsg {}
diff --git a/testutil/contracts/rate-limiter/src/packet.rs b/testutil/contracts/rate-limiter/src/packet.rs
new file mode 100644
index 0000000000..b4fcd2c490
--- /dev/null
+++ b/testutil/contracts/rate-limiter/src/packet.rs
@@ -0,0 +1,443 @@
+use crate::state::FlowType;
+use cosmwasm_std::{Addr, Deps, StdError, Uint256};
+use provwasm_std::types::cosmos::bank::v1beta1::QuerySupplyOfRequest;
+use schemars::JsonSchema;
+use serde::{Deserialize, Serialize};
+use sha2::{Digest, Sha256};
+
+#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq, JsonSchema)]
+pub struct Height {
+ /// Previously known as "epoch"
+ revision_number: Option,
+
+ /// The height of a block
+ revision_height: Option,
+}
+
+// IBC transfer data
+#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq, JsonSchema)]
+pub struct FungibleTokenData {
+ pub denom: String,
+ amount: Uint256,
+ sender: Addr,
+ receiver: Addr,
+}
+
+// An IBC packet
+#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq, JsonSchema)]
+pub struct Packet {
+ pub sequence: u64,
+ pub source_port: String,
+ pub source_channel: String,
+ pub destination_port: String,
+ pub destination_channel: String,
+ pub data: FungibleTokenData,
+ pub timeout_height: Height,
+ pub timeout_timestamp: Option,
+}
+
+use std::str::FromStr; // Needed to parse the coin's String as Uint256
+
+fn hash_denom(denom: &str) -> String {
+ let mut hasher = Sha256::new();
+ hasher.update(denom.as_bytes());
+ let result = hasher.finalize();
+ let hash = hex::encode(result);
+ format!("ibc/{}", hash.to_uppercase())
+}
+
+impl Packet {
+ pub fn mock(
+ source_channel: String,
+ dest_channel: String,
+ denom: String,
+ funds: Uint256,
+ ) -> Packet {
+ Packet {
+ sequence: 0,
+ source_port: "transfer".to_string(),
+ source_channel,
+ destination_port: "transfer".to_string(),
+ destination_channel: dest_channel,
+ data: crate::packet::FungibleTokenData {
+ denom,
+ amount: funds,
+ sender: Addr::unchecked("sender"),
+ receiver: Addr::unchecked("receiver"),
+ },
+ timeout_height: crate::packet::Height {
+ revision_number: None,
+ revision_height: None,
+ },
+ timeout_timestamp: None,
+ }
+ }
+
+ pub fn channel_value(&self, deps: Deps, direction: &FlowType) -> Result {
+ let res = QuerySupplyOfRequest {
+ denom: self.local_denom(direction),
+ }
+ .query(&deps.querier)?;
+ Uint256::from_str(&res.amount.unwrap_or_default().amount)
+ }
+
+ pub fn get_funds(&self) -> Uint256 {
+ self.data.amount
+ }
+
+ fn local_channel(&self, direction: &FlowType) -> String {
+ // Pick the appropriate channel depending on whether this is a send or a recv
+ match direction {
+ FlowType::In => self.destination_channel.clone(),
+ FlowType::Out => self.source_channel.clone(),
+ }
+ }
+
+ fn receiver_chain_is_source(&self) -> bool {
+ self.data
+ .denom
+ .starts_with(&format!("transfer/{}", self.source_channel))
+ }
+
+ fn handle_denom_for_sends(&self) -> String {
+ if !self.data.denom.starts_with("transfer/") {
+ // For native tokens we just use what's on the packet
+ return self.data.denom.clone();
+ }
+ // For non-native tokens, we need to generate the IBCDenom
+ hash_denom(&self.data.denom)
+ }
+
+ fn handle_denom_for_recvs(&self) -> String {
+ if self.receiver_chain_is_source() {
+ // These are tokens that have been sent to the counterparty and are returning
+ let unprefixed = self
+ .data
+ .denom
+ .strip_prefix(&format!("transfer/{}/", self.source_channel))
+ .unwrap_or_default();
+ let split: Vec<&str> = unprefixed.split('/').collect();
+ if split[0] == unprefixed {
+ // This is a native token. Return the unprefixed token
+ unprefixed.to_string()
+ } else {
+ // This is a non-native that was sent to the counterparty.
+ // We need to hash it.
+ // The ibc-go implementation checks that the denom has been built correctly. We
+ // don't need to do that here because if it hasn't, the transfer module will catch it.
+ hash_denom(unprefixed)
+ }
+ } else {
+ // Tokens that come directly from the counterparty.
+ // Since the sender didn't prefix them, we need to do it here.
+ let prefixed = format!("transfer/{}/", self.destination_channel) + &self.data.denom;
+ hash_denom(&prefixed)
+ }
+ }
+
+ fn local_denom(&self, direction: &FlowType) -> String {
+ match direction {
+ FlowType::In => self.handle_denom_for_recvs(),
+ FlowType::Out => self.handle_denom_for_sends(),
+ }
+ }
+
+ pub fn path_data(&self, direction: &FlowType) -> (String, String) {
+ (self.local_channel(direction), self.local_denom(direction))
+ }
+}
+
+// Helpers
+
+// Create a new packet for testing
+#[cfg(test)]
+#[macro_export]
+macro_rules! test_msg_send {
+ (channel_id: $channel_id:expr, denom: $denom:expr, channel_value: $channel_value:expr, funds: $funds:expr) => {
+ $crate::msg::SudoMsg::SendPacket {
+ packet: $crate::packet::Packet::mock($channel_id, $channel_id, $denom, $funds),
+ channel_value_mock: Some($channel_value),
+ }
+ };
+}
+
+#[cfg(test)]
+#[macro_export]
+macro_rules! test_msg_recv {
+ (channel_id: $channel_id:expr, denom: $denom:expr, channel_value: $channel_value:expr, funds: $funds:expr) => {
+ $crate::msg::SudoMsg::RecvPacket {
+ packet: $crate::packet::Packet::mock(
+ $channel_id,
+ $channel_id,
+ format!("transfer/{}/{}", $channel_id, $denom),
+ $funds,
+ ),
+ channel_value_mock: Some($channel_value),
+ }
+ };
+}
+
+#[cfg(test)]
+pub mod tests {
+ use crate::msg::SudoMsg;
+
+ use super::*;
+
+ #[test]
+ fn send_native() {
+ let packet = Packet::mock(
+ format!("channel-17-local"),
+ format!("channel-42-counterparty"),
+ format!("uosmo"),
+ 0_u128.into(),
+ );
+ assert_eq!(packet.local_denom(&FlowType::Out), "uosmo");
+ }
+
+ #[test]
+ fn send_non_native() {
+ // The transfer module "unhashes" the denom from
+ // ibc/09E4864A262249507925831FBAD69DAD08F66FAAA0640714E765912A0751289A
+ // to port/channel/denom before passing it along to the contrace
+ let packet = Packet::mock(
+ format!("channel-17-local"),
+ format!("channel-42-counterparty"),
+ format!("transfer/channel-17-local/ujuno"),
+ 0_u128.into(),
+ );
+ assert_eq!(
+ packet.local_denom(&FlowType::Out),
+ "ibc/09E4864A262249507925831FBAD69DAD08F66FAAA0640714E765912A0751289A"
+ );
+ }
+
+ #[test]
+ fn receive_non_native() {
+ // The counterparty chain sends their own native token to us
+ let packet = Packet::mock(
+ format!("channel-42-counterparty"), // The counterparty's channel is the source here
+ format!("channel-17-local"), // Our channel is the dest channel
+ format!("ujuno"), // This is unwrapped. It is our job to wrap it
+ 0_u128.into(),
+ );
+ assert_eq!(
+ packet.local_denom(&FlowType::In),
+ "ibc/09E4864A262249507925831FBAD69DAD08F66FAAA0640714E765912A0751289A"
+ );
+ }
+
+ #[test]
+ fn receive_native() {
+ // The counterparty chain sends us back our native token that they had wrapped
+ let packet = Packet::mock(
+ format!("channel-42-counterparty"), // The counterparty's channel is the source here
+ format!("channel-17-local"), // Our channel is the dest channel
+ format!("transfer/channel-42-counterparty/uosmo"),
+ 0_u128.into(),
+ );
+ assert_eq!(packet.local_denom(&FlowType::In), "uosmo");
+ }
+
+ // Let's assume we have two chains A and B (local and counterparty) connected in the following way:
+ //
+ // Chain A <---> channel-17-local <---> channel-42-counterparty <---> Chain B
+ //
+ // The following tests should pass
+ //
+
+ const WRAPPED_OSMO_ON_HUB_TRACE: &str = "transfer/channel-141/uosmo";
+ const WRAPPED_ATOM_ON_OSMOSIS_TRACE: &str = "transfer/channel-0/uatom";
+ const WRAPPED_ATOM_ON_OSMOSIS_HASH: &str =
+ "ibc/27394FB092D2ECCD56123C74F36E4C1F926001CEADA9CA97EA622B25F41E5EB2";
+ const WRAPPED_OSMO_ON_HUB_HASH: &str =
+ "ibc/14F9BC3E44B8A9C1BE1FB08980FAB87034C9905EF17CF2F5008FC085218811CC";
+
+ #[test]
+ fn sanity_check() {
+ // Examples using the official channels as of Nov 2022.
+
+ // uatom sent to osmosis
+ let packet = Packet::mock(
+ format!("channel-141"), // from: hub
+ format!("channel-0"), // to: osmosis
+ format!("uatom"),
+ 0_u128.into(),
+ );
+ assert_eq!(
+ packet.local_denom(&FlowType::In),
+ WRAPPED_ATOM_ON_OSMOSIS_HASH.clone()
+ );
+
+ // uatom on osmosis sent back to the hub
+ let packet = Packet::mock(
+ format!("channel-0"), // from: osmosis
+ format!("channel-141"), // to: hub
+ WRAPPED_ATOM_ON_OSMOSIS_TRACE.to_string(), // unwrapped before reaching the contract
+ 0_u128.into(),
+ );
+ assert_eq!(packet.local_denom(&FlowType::In), "uatom");
+
+ // osmo sent to the hub
+ let packet = Packet::mock(
+ format!("channel-0"), // from: osmosis
+ format!("channel-141"), // to: hub
+ format!("uosmo"),
+ 0_u128.into(),
+ );
+ assert_eq!(packet.local_denom(&FlowType::Out), "uosmo");
+
+ // osmo on the hub sent back to osmosis
+ // send
+ let packet = Packet::mock(
+ format!("channel-141"), // from: hub
+ format!("channel-0"), // to: osmosis
+ WRAPPED_OSMO_ON_HUB_TRACE.to_string(), // unwrapped before reaching the contract
+ 0_u128.into(),
+ );
+ assert_eq!(packet.local_denom(&FlowType::Out), WRAPPED_OSMO_ON_HUB_HASH);
+
+ // receive
+ let packet = Packet::mock(
+ format!("channel-141"), // from: hub
+ format!("channel-0"), // to: osmosis
+ WRAPPED_OSMO_ON_HUB_TRACE.to_string(), // unwrapped before reaching the contract
+ 0_u128.into(),
+ );
+ assert_eq!(packet.local_denom(&FlowType::In), "uosmo");
+
+ // Now let's pretend we're the hub.
+ // The following tests are from perspective of the the hub (i.e.: if this contract were deployed there)
+ //
+ // osmo sent to the hub
+ let packet = Packet::mock(
+ format!("channel-0"), // from: osmosis
+ format!("channel-141"), // to: hub
+ format!("uosmo"),
+ 0_u128.into(),
+ );
+ assert_eq!(packet.local_denom(&FlowType::In), WRAPPED_OSMO_ON_HUB_HASH);
+
+ // uosmo on the hub sent back to the osmosis
+ let packet = Packet::mock(
+ format!("channel-141"), // from: hub
+ format!("channel-0"), // to: osmosis
+ WRAPPED_OSMO_ON_HUB_TRACE.to_string(), // unwrapped before reaching the contract
+ 0_u128.into(),
+ );
+ assert_eq!(packet.local_denom(&FlowType::In), "uosmo");
+
+ // uatom sent to osmosis
+ let packet = Packet::mock(
+ format!("channel-141"), // from: hub
+ format!("channel-0"), // to: osmosis
+ format!("uatom"),
+ 0_u128.into(),
+ );
+ assert_eq!(packet.local_denom(&FlowType::Out), "uatom");
+
+ // utaom on the osmosis sent back to the hub
+ // send
+ let packet = Packet::mock(
+ format!("channel-0"), // from: osmosis
+ format!("channel-141"), // to: hub
+ WRAPPED_ATOM_ON_OSMOSIS_TRACE.to_string(), // unwrapped before reaching the contract
+ 0_u128.into(),
+ );
+ assert_eq!(
+ packet.local_denom(&FlowType::Out),
+ WRAPPED_ATOM_ON_OSMOSIS_HASH
+ );
+
+ // receive
+ let packet = Packet::mock(
+ format!("channel-0"), // from: osmosis
+ format!("channel-141"), // to: hub
+ WRAPPED_ATOM_ON_OSMOSIS_TRACE.to_string(), // unwrapped before reaching the contract
+ 0_u128.into(),
+ );
+ assert_eq!(packet.local_denom(&FlowType::In), "uatom");
+ }
+
+ #[test]
+ fn sanity_double() {
+ // Now let's deal with double wrapping
+
+ let juno_wrapped_osmosis_wrapped_atom_hash =
+ "ibc/6CDD4663F2F09CD62285E2D45891FC149A3568E316CE3EBBE201A71A78A69388";
+
+ // Send uatom on stored on osmosis to juno
+ // send
+ let packet = Packet::mock(
+ format!("channel-42"), // from: osmosis
+ format!("channel-0"), // to: juno
+ WRAPPED_ATOM_ON_OSMOSIS_TRACE.to_string(), // unwrapped before reaching the contract
+ 0_u128.into(),
+ );
+ assert_eq!(
+ packet.local_denom(&FlowType::Out),
+ WRAPPED_ATOM_ON_OSMOSIS_HASH
+ );
+
+ // receive
+ let packet = Packet::mock(
+ format!("channel-42"), // from: osmosis
+ format!("channel-0"), // to: juno
+ WRAPPED_ATOM_ON_OSMOSIS_TRACE.to_string(),
+ 0_u128.into(),
+ );
+ assert_eq!(
+ packet.local_denom(&FlowType::In),
+ juno_wrapped_osmosis_wrapped_atom_hash
+ );
+
+ // Send back that multi-wrapped token to osmosis
+ // send
+ let packet = Packet::mock(
+ format!("channel-0"), // from: juno
+ format!("channel-42"), // to: osmosis
+ format!("{}{}", "transfer/channel-0/", WRAPPED_ATOM_ON_OSMOSIS_TRACE), // unwrapped before reaching the contract
+ 0_u128.into(),
+ );
+ assert_eq!(
+ packet.local_denom(&FlowType::Out),
+ juno_wrapped_osmosis_wrapped_atom_hash
+ );
+
+ // receive
+ let packet = Packet::mock(
+ format!("channel-0"), // from: juno
+ format!("channel-42"), // to: osmosis
+ format!("{}{}", "transfer/channel-0/", WRAPPED_ATOM_ON_OSMOSIS_TRACE), // unwrapped before reaching the contract
+ 0_u128.into(),
+ );
+ assert_eq!(
+ packet.local_denom(&FlowType::In),
+ WRAPPED_ATOM_ON_OSMOSIS_HASH
+ );
+ }
+
+ #[test]
+ fn tokenfactory_packet() {
+ let json = r#"{"send_packet":{"packet":{"sequence":4,"source_port":"transfer","source_channel":"channel-0","destination_port":"transfer","destination_channel":"channel-1491","data":{"denom":"transfer/channel-0/factory/osmo12smx2wdlyttvyzvzg54y2vnqwq2qjateuf7thj/czar","amount":"100000000000000000","sender":"osmo1cyyzpxplxdzkeea7kwsydadg87357qnahakaks","receiver":"osmo1c584m4lq25h83yp6ag8hh4htjr92d954vklzja"},"timeout_height":{},"timeout_timestamp":1668024476848430980}}}"#;
+ let parsed: SudoMsg = serde_json_wasm::from_str(json).unwrap();
+ //println!("{parsed:?}");
+
+ match parsed {
+ SudoMsg::SendPacket { packet, .. } => {
+ assert_eq!(
+ packet.local_denom(&FlowType::Out),
+ "ibc/07A1508F49D0753EDF95FA18CA38C0D6974867D793EB36F13A2AF1A5BB148B22"
+ );
+ }
+ _ => panic!("parsed into wrong variant"),
+ }
+ }
+
+ #[test]
+ fn packet_with_memo() {
+ // extra fields (like memo) get ignored.
+ let json = r#"{"recv_packet":{"packet":{"sequence":1,"source_port":"transfer","source_channel":"channel-0","destination_port":"transfer","destination_channel":"channel-0","data":{"denom":"stake","amount":"1","sender":"osmo177uaalkhra6wth6hc9hu79f72eq903kwcusx4r","receiver":"osmo1fj6yt4pwfea4865z763fvhwktlpe020ef93dlq","memo":"some info"},"timeout_height":{"revision_height":100}}}}"#;
+ let _parsed: SudoMsg = serde_json_wasm::from_str(json).unwrap();
+ //println!("{parsed:?}");
+ }
+}
diff --git a/testutil/contracts/rate-limiter/src/query.rs b/testutil/contracts/rate-limiter/src/query.rs
new file mode 100644
index 0000000000..6431a837d4
--- /dev/null
+++ b/testutil/contracts/rate-limiter/src/query.rs
@@ -0,0 +1,12 @@
+use cosmwasm_std::{to_binary, Binary, Deps, StdResult};
+
+use crate::state::{Path, RATE_LIMIT_TRACKERS};
+
+pub fn get_quotas(
+ deps: Deps,
+ channel_id: impl Into,
+ denom: impl Into,
+) -> StdResult {
+ let path = Path::new(channel_id, denom);
+ to_binary(&RATE_LIMIT_TRACKERS.load(deps.storage, path.into())?)
+}
diff --git a/testutil/contracts/rate-limiter/src/state.rs b/testutil/contracts/rate-limiter/src/state.rs
new file mode 100644
index 0000000000..e699936d81
--- /dev/null
+++ b/testutil/contracts/rate-limiter/src/state.rs
@@ -0,0 +1,372 @@
+use cosmwasm_std::{Addr, Timestamp, Uint256};
+use schemars::JsonSchema;
+use serde::{Deserialize, Serialize};
+use std::cmp;
+
+use cw_storage_plus::{Item, Map};
+
+use crate::{msg::QuotaMsg, ContractError};
+
+/// This represents the key for our rate limiting tracker. A tuple of a denom and
+/// a channel. When interactic with storage, it's preffered to use this struct
+/// and call path.into() on it to convert it to the composite key of the
+/// RATE_LIMIT_TRACKERS map
+#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq, JsonSchema)]
+pub struct Path {
+ pub denom: String,
+ pub channel: String,
+}
+
+impl Path {
+ pub fn new(channel: impl Into, denom: impl Into) -> Self {
+ Path {
+ channel: channel.into(),
+ denom: denom.into(),
+ }
+ }
+}
+
+impl From for (String, String) {
+ fn from(path: Path) -> (String, String) {
+ (path.channel, path.denom)
+ }
+}
+
+impl From<&Path> for (String, String) {
+ fn from(path: &Path) -> (String, String) {
+ (path.channel.to_owned(), path.denom.to_owned())
+ }
+}
+
+#[derive(Debug, Clone)]
+pub enum FlowType {
+ In,
+ Out,
+}
+
+/// A Flow represents the transfer of value for a denom through an IBC channel
+/// during a time window.
+///
+/// It tracks inflows (transfers into osmosis) and outflows (transfers out of
+/// osmosis).
+///
+/// The period_end represents the last point in time for which this Flow is
+/// tracking the value transfer.
+///
+/// Periods are discrete repeating windows. A period only starts when a contract
+/// call to update the Flow (SendPacket/RecvPackt) is made, and not right after
+/// the period ends. This means that if no calls happen after a period expires,
+/// the next period will begin at the time of the next call and be valid for the
+/// specified duration for the quota.
+///
+/// This is a design decision to avoid the period calculations and thus reduce gas consumption
+#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq, JsonSchema, Copy)]
+pub struct Flow {
+ pub inflow: Uint256,
+ pub outflow: Uint256,
+ pub period_end: Timestamp,
+}
+
+impl Flow {
+ pub fn new(
+ inflow: impl Into,
+ outflow: impl Into,
+ now: Timestamp,
+ duration: u64,
+ ) -> Self {
+ Self {
+ inflow: inflow.into(),
+ outflow: outflow.into(),
+ period_end: now.plus_seconds(duration),
+ }
+ }
+
+ /// The balance of a flow is how much absolute value for the denom has moved
+ /// through the channel before period_end. It returns a tuple of
+ /// (balance_in, balance_out) where balance_in in is how much has been
+ /// transferred into the flow, and balance_out is how much value transferred
+ /// out.
+ pub fn balance(&self) -> (Uint256, Uint256) {
+ (
+ self.inflow.saturating_sub(self.outflow),
+ self.outflow.saturating_sub(self.inflow),
+ )
+ }
+
+ /// checks if the flow, in the current state, has exceeded a max allowance
+ pub fn exceeds(&self, direction: &FlowType, max_inflow: Uint256, max_outflow: Uint256) -> bool {
+ let (balance_in, balance_out) = self.balance();
+ match direction {
+ FlowType::In => balance_in > max_inflow,
+ FlowType::Out => balance_out > max_outflow,
+ }
+ }
+
+ /// returns the balance in a direction. This is used for displaying cleaner errors
+ pub fn balance_on(&self, direction: &FlowType) -> Uint256 {
+ let (balance_in, balance_out) = self.balance();
+ match direction {
+ FlowType::In => balance_in,
+ FlowType::Out => balance_out,
+ }
+ }
+
+ /// If now is greater than the period_end, the Flow is considered expired.
+ pub fn is_expired(&self, now: Timestamp) -> bool {
+ self.period_end < now
+ }
+
+ // Mutating methods
+
+ /// Expire resets the Flow to start tracking the value transfer from the
+ /// moment this method is called.
+ pub fn expire(&mut self, now: Timestamp, duration: u64) {
+ self.inflow = Uint256::from(0_u32);
+ self.outflow = Uint256::from(0_u32);
+ self.period_end = now.plus_seconds(duration);
+ }
+
+ /// Updates the current flow incrementing it by a transfer of value.
+ pub fn add_flow(&mut self, direction: FlowType, value: Uint256) {
+ match direction {
+ FlowType::In => self.inflow = self.inflow.saturating_add(value),
+ FlowType::Out => self.outflow = self.outflow.saturating_add(value),
+ }
+ }
+
+ /// Updates the current flow reducing it by a transfer of value.
+ pub fn undo_flow(&mut self, direction: FlowType, value: Uint256) {
+ match direction {
+ FlowType::In => self.inflow = self.inflow.saturating_sub(value),
+ FlowType::Out => self.outflow = self.outflow.saturating_sub(value),
+ }
+ }
+
+ /// Applies a transfer. If the Flow is expired (now > period_end), it will
+ /// reset it before applying the transfer.
+ fn apply_transfer(
+ &mut self,
+ direction: &FlowType,
+ funds: Uint256,
+ now: Timestamp,
+ quota: &Quota,
+ ) -> bool {
+ let mut expired = false;
+ if self.is_expired(now) {
+ self.expire(now, quota.duration);
+ expired = true;
+ }
+ self.add_flow(direction.clone(), funds);
+ expired
+ }
+}
+
+/// A Quota is the percentage of the denom's total value that can be transferred
+/// through the channel in a given period of time (duration)
+///
+/// Percentages can be different for send and recv
+///
+/// The name of the quota is expected to be a human-readable representation of
+/// the duration (i.e.: "weekly", "daily", "every-six-months", ...)
+#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq, JsonSchema)]
+pub struct Quota {
+ pub name: String,
+ pub max_percentage_send: u32,
+ pub max_percentage_recv: u32,
+ pub duration: u64,
+ pub channel_value: Option,
+}
+
+impl Quota {
+ /// Calculates the max capacity (absolute value in the same unit as
+ /// total_value) in each direction based on the total value of the denom in
+ /// the channel. The result tuple represents the max capacity when the
+ /// transfer is in directions: (FlowType::In, FlowType::Out)
+ pub fn capacity(&self) -> (Uint256, Uint256) {
+ match self.channel_value {
+ Some(total_value) => (
+ total_value * Uint256::from(self.max_percentage_recv) / Uint256::from(100_u32),
+ total_value * Uint256::from(self.max_percentage_send) / Uint256::from(100_u32),
+ ),
+ None => (0_u32.into(), 0_u32.into()), // This should never happen, but ig the channel value is not set, we disallow any transfer
+ }
+ }
+
+ /// returns the capacity in a direction. This is used for displaying cleaner errors
+ pub fn capacity_on(&self, direction: &FlowType) -> Uint256 {
+ let (max_in, max_out) = self.capacity();
+ match direction {
+ FlowType::In => max_in,
+ FlowType::Out => max_out,
+ }
+ }
+}
+
+impl From<&QuotaMsg> for Quota {
+ fn from(msg: &QuotaMsg) -> Self {
+ let send_recv = (
+ cmp::min(msg.send_recv.0, 100),
+ cmp::min(msg.send_recv.1, 100),
+ );
+ Quota {
+ name: msg.name.clone(),
+ max_percentage_send: send_recv.0,
+ max_percentage_recv: send_recv.1,
+ duration: msg.duration,
+ channel_value: None,
+ }
+ }
+}
+
+/// RateLimit is the main structure tracked for each channel/denom pair. Its quota
+/// represents rate limit configuration, and the flow its
+/// current state (i.e.: how much value has been transfered in the current period)
+#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq, JsonSchema)]
+pub struct RateLimit {
+ pub quota: Quota,
+ pub flow: Flow,
+}
+
+// The channel value on send depends on the amount on escrow. The ibc transfer
+// module modifies the escrow amount by "funds" on sends before calling the
+// contract. This function takes that into account so that the channel value
+// that we track matches the channel value at the moment when the ibc
+// transaction started executing
+fn calculate_channel_value(
+ channel_value: Uint256,
+ denom: &str,
+ funds: Uint256,
+ direction: &FlowType,
+) -> Uint256 {
+ match direction {
+ FlowType::Out => {
+ if denom.contains("ibc") {
+ channel_value + funds // Non-Native tokens get removed from the supply on send. Add that amount back
+ } else {
+ // The commented-out code in the golang calculate channel value is what we want, but we're currently using the whole supply temporarily for efficiency. see rate_limit.go/CalculateChannelValue(..)
+ //channel_value - funds // Native tokens increase escrow amount on send. Remove that amount here
+ channel_value
+ }
+ }
+ FlowType::In => channel_value,
+ }
+}
+
+impl RateLimit {
+ /// Checks if a transfer is allowed and updates the data structures
+ /// accordingly.
+ ///
+ /// If the transfer is not allowed, it will return a RateLimitExceeded error.
+ ///
+ /// Otherwise it will return a RateLimitResponse with the updated data structures
+ pub fn allow_transfer(
+ &mut self,
+ path: &Path,
+ direction: &FlowType,
+ funds: Uint256,
+ channel_value: Uint256,
+ now: Timestamp,
+ ) -> Result {
+ // Flow used before this transaction is applied.
+ // This is used to make error messages more informative
+ let initial_flow = self.flow.balance_on(direction);
+
+ // Apply the transfer. From here on, we will updated the flow with the new transfer
+ // and check if it exceeds the quota at the current time
+
+ let expired = self.flow.apply_transfer(direction, funds, now, &self.quota);
+ // Cache the channel value if it has never been set or it has expired.
+ if self.quota.channel_value.is_none() || expired {
+ self.quota.channel_value = Some(calculate_channel_value(
+ channel_value,
+ &path.denom,
+ funds,
+ direction,
+ ))
+ }
+
+ let (max_in, max_out) = self.quota.capacity();
+ // Return the effects of applying the transfer or an error.
+ match self.flow.exceeds(direction, max_in, max_out) {
+ true => Err(ContractError::RateLimitExceded {
+ channel: path.channel.to_string(),
+ denom: path.denom.to_string(),
+ amount: funds,
+ quota_name: self.quota.name.to_string(),
+ used: initial_flow,
+ max: self.quota.capacity_on(direction),
+ reset: self.flow.period_end,
+ }),
+ false => Ok(RateLimit {
+ quota: self.quota.clone(), // Cloning here because self.quota.name (String) does not allow us to implement Copy
+ flow: self.flow, // We can Copy flow, so this is slightly more efficient than cloning the whole RateLimit
+ }),
+ }
+ }
+}
+
+/// Only this address can manage the contract. This will likely be the
+/// governance module, but could be set to something else if needed
+pub const GOVMODULE: Item = Item::new("gov_module");
+/// Only this address can execute transfers. This will likely be the
+/// IBC transfer module, but could be set to something else if needed
+pub const IBCMODULE: Item = Item::new("ibc_module");
+
+/// RATE_LIMIT_TRACKERS is the main state for this contract. It maps a path (IBC
+/// Channel + denom) to a vector of `RateLimit`s.
+///
+/// The `RateLimit` struct contains the information about how much value of a
+/// denom has moved through the channel during the currently active time period
+/// (channel_flow.flow) and what percentage of the denom's value we are
+/// allowing to flow through that channel in a specific duration (quota)
+///
+/// For simplicity, the channel in the map keys refers to the "host" channel on
+/// the osmosis side. This means that on PacketSend it will refer to the source
+/// channel while on PacketRecv it refers to the destination channel.
+///
+/// It is the responsibility of the go module to pass the appropriate channel
+/// when sending the messages
+///
+/// The map key (String, String) represents (channel_id, denom). We use
+/// composite keys instead of a struct to avoid having to implement the
+/// PrimaryKey trait
+pub const RATE_LIMIT_TRACKERS: Map<(String, String), Vec> = Map::new("flow");
+
+#[cfg(test)]
+pub mod tests {
+ use super::*;
+
+ pub const RESET_TIME_DAILY: u64 = 60 * 60 * 24;
+ pub const RESET_TIME_WEEKLY: u64 = 60 * 60 * 24 * 7;
+ pub const RESET_TIME_MONTHLY: u64 = 60 * 60 * 24 * 30;
+
+ #[test]
+ fn flow() {
+ let epoch = Timestamp::from_seconds(0);
+ let mut flow = Flow::new(0_u32, 0_u32, epoch, RESET_TIME_WEEKLY);
+
+ assert!(!flow.is_expired(epoch));
+ assert!(!flow.is_expired(epoch.plus_seconds(RESET_TIME_DAILY)));
+ assert!(!flow.is_expired(epoch.plus_seconds(RESET_TIME_WEEKLY)));
+ assert!(flow.is_expired(epoch.plus_seconds(RESET_TIME_WEEKLY).plus_nanos(1)));
+
+ assert_eq!(flow.balance(), (0_u32.into(), 0_u32.into()));
+ flow.add_flow(FlowType::In, 5_u32.into());
+ assert_eq!(flow.balance(), (5_u32.into(), 0_u32.into()));
+ flow.add_flow(FlowType::Out, 2_u32.into());
+ assert_eq!(flow.balance(), (3_u32.into(), 0_u32.into()));
+ // Adding flow doesn't affect expiration
+ assert!(!flow.is_expired(epoch.plus_seconds(RESET_TIME_DAILY)));
+
+ flow.expire(epoch.plus_seconds(RESET_TIME_WEEKLY), RESET_TIME_WEEKLY);
+ assert_eq!(flow.balance(), (0_u32.into(), 0_u32.into()));
+ assert_eq!(flow.inflow, Uint256::from(0_u32));
+ assert_eq!(flow.outflow, Uint256::from(0_u32));
+ assert_eq!(flow.period_end, epoch.plus_seconds(RESET_TIME_WEEKLY * 2));
+
+ // Expiration has moved
+ assert!(!flow.is_expired(epoch.plus_seconds(RESET_TIME_WEEKLY).plus_nanos(1)));
+ assert!(!flow.is_expired(epoch.plus_seconds(RESET_TIME_WEEKLY * 2)));
+ assert!(flow.is_expired(epoch.plus_seconds(RESET_TIME_WEEKLY * 2).plus_nanos(1)));
+ }
+}
diff --git a/testutil/contracts/rate-limiter/src/sudo.rs b/testutil/contracts/rate-limiter/src/sudo.rs
new file mode 100644
index 0000000000..c42b5ea7e2
--- /dev/null
+++ b/testutil/contracts/rate-limiter/src/sudo.rs
@@ -0,0 +1,195 @@
+use cosmwasm_std::{DepsMut, Response, Timestamp, Uint256};
+
+use crate::{
+ packet::Packet,
+ state::{FlowType, Path, RateLimit, RATE_LIMIT_TRACKERS},
+ ContractError,
+};
+
+// This function will process a packet and extract the paths information, funds,
+// and channel value from it. This is will have to interact with the chain via grpc queries to properly
+// obtain this information.
+//
+// For backwards compatibility, we're teporarily letting the chain override the
+// denom and channel value, but these should go away in favour of the contract
+// extracting these from the packet
+pub fn process_packet(
+ deps: DepsMut,
+ packet: Packet,
+ direction: FlowType,
+ now: Timestamp,
+ #[cfg(test)] channel_value_mock: Option,
+) -> Result {
+ let (channel_id, denom) = packet.path_data(&direction);
+ #[allow(clippy::needless_borrow)]
+ let path = &Path::new(&channel_id, &denom);
+ let funds = packet.get_funds();
+
+ #[cfg(test)]
+ // When testing we override the channel value with the mock since we can't get it from the chain
+ let channel_value = match channel_value_mock {
+ Some(channel_value) => channel_value,
+ None => packet.channel_value(deps.as_ref(), &direction)?, // This should almost never be used, but left for completeness in case we want to send an empty channel_value from the test
+ };
+
+ #[cfg(not(test))]
+ let channel_value = packet.channel_value(deps.as_ref(), &direction)?;
+
+ try_transfer(deps, path, channel_value, funds, direction, now)
+}
+
+/// This function checks the rate limit and, if successful, stores the updated data about the value
+/// that has been transfered through the channel for a specific denom.
+/// If the period for a RateLimit has ended, the Flow information is reset.
+///
+/// The channel_value is the current value of the denom for the the channel as
+/// calculated by the caller. This should be the total supply of a denom
+pub fn try_transfer(
+ deps: DepsMut,
+ path: &Path,
+ channel_value: Uint256,
+ funds: Uint256,
+ direction: FlowType,
+ now: Timestamp,
+) -> Result {
+ // Sudo call. Only go modules should be allowed to access this
+
+ // Fetch potential trackers for "any" channel of the required token
+ let any_path = Path::new("any", path.denom.clone());
+ let mut any_trackers = RATE_LIMIT_TRACKERS
+ .may_load(deps.storage, any_path.clone().into())?
+ .unwrap_or_default();
+ // Fetch trackers for the requested path
+ let mut trackers = RATE_LIMIT_TRACKERS
+ .may_load(deps.storage, path.into())?
+ .unwrap_or_default();
+
+ let not_configured = trackers.is_empty() && any_trackers.is_empty();
+
+ if not_configured {
+ // No Quota configured for the current path. Allowing all messages.
+ return Ok(Response::new()
+ .add_attribute("method", "try_transfer")
+ .add_attribute("channel_id", path.channel.to_string())
+ .add_attribute("denom", path.denom.to_string())
+ .add_attribute("quota", "none"));
+ }
+
+ // If any of the RateLimits fails, allow_transfer() will return
+ // ContractError::RateLimitExceded, which we'll propagate out
+ let results: Vec = trackers
+ .iter_mut()
+ .map(|limit| limit.allow_transfer(path, &direction, funds, channel_value, now))
+ .collect::>()?;
+
+ let any_results: Vec = any_trackers
+ .iter_mut()
+ .map(|limit| limit.allow_transfer(path, &direction, funds, channel_value, now))
+ .collect::>()?;
+
+ RATE_LIMIT_TRACKERS.save(deps.storage, path.into(), &results)?;
+ RATE_LIMIT_TRACKERS.save(deps.storage, any_path.into(), &any_results)?;
+
+ let response = Response::new()
+ .add_attribute("method", "try_transfer")
+ .add_attribute("channel_id", path.channel.to_string())
+ .add_attribute("denom", path.denom.to_string());
+
+ // Adds the attributes for each path to the response. In prod, the
+ // addtribute add_rate_limit_attributes is a noop
+ let response: Result =
+ any_results.iter().fold(Ok(response), |acc, result| {
+ Ok(add_rate_limit_attributes(acc?, result))
+ });
+ results.iter().fold(Ok(response?), |acc, result| {
+ Ok(add_rate_limit_attributes(acc?, result))
+ })
+}
+
+// #[cfg(any(feature = "verbose_responses", test))]
+fn add_rate_limit_attributes(response: Response, result: &RateLimit) -> Response {
+ let (used_in, used_out) = result.flow.balance();
+ let (max_in, max_out) = result.quota.capacity();
+ // These attributes are only added during testing. That way we avoid
+ // calculating these again on prod.
+ response
+ .add_attribute(
+ format!("{}_used_in", result.quota.name),
+ used_in.to_string(),
+ )
+ .add_attribute(
+ format!("{}_used_out", result.quota.name),
+ used_out.to_string(),
+ )
+ .add_attribute(format!("{}_max_in", result.quota.name), max_in.to_string())
+ .add_attribute(
+ format!("{}_max_out", result.quota.name),
+ max_out.to_string(),
+ )
+ .add_attribute(
+ format!("{}_period_end", result.quota.name),
+ result.flow.period_end.to_string(),
+ )
+}
+
+// Leaving the attributes in until we can conditionally compile the contract
+// for the go tests in CI: https://github.com/mandrean/cw-optimizoor/issues/19
+//
+// #[cfg(not(any(feature = "verbose_responses", test)))]
+// fn add_rate_limit_attributes(response: Response, _result: &RateLimit) -> Response {
+// response
+// }
+
+// This function manually injects an inflow. This is used when reverting a
+// packet that failed ack or timed-out.
+pub fn undo_send(deps: DepsMut, packet: Packet) -> Result {
+ // Sudo call. Only go modules should be allowed to access this
+ let (channel_id, denom) = packet.path_data(&FlowType::Out); // Sends have direction out.
+ #[allow(clippy::needless_borrow)]
+ let path = &Path::new(&channel_id, &denom);
+ let any_path = Path::new("any", &denom);
+ let funds = packet.get_funds();
+
+ let mut any_trackers = RATE_LIMIT_TRACKERS
+ .may_load(deps.storage, any_path.clone().into())?
+ .unwrap_or_default();
+ let mut trackers = RATE_LIMIT_TRACKERS
+ .may_load(deps.storage, path.into())?
+ .unwrap_or_default();
+
+ let not_configured = trackers.is_empty() && any_trackers.is_empty();
+
+ if not_configured {
+ // No Quota configured for the current path. Allowing all messages.
+ return Ok(Response::new()
+ .add_attribute("method", "try_transfer")
+ .add_attribute("channel_id", path.channel.to_string())
+ .add_attribute("denom", path.denom.to_string())
+ .add_attribute("quota", "none"));
+ }
+
+ // We force update the flow to remove a failed send
+ let results: Vec = trackers
+ .iter_mut()
+ .map(|limit| {
+ limit.flow.undo_flow(FlowType::Out, funds);
+ limit.to_owned()
+ })
+ .collect();
+ let any_results: Vec = any_trackers
+ .iter_mut()
+ .map(|limit| {
+ limit.flow.undo_flow(FlowType::Out, funds);
+ limit.to_owned()
+ })
+ .collect();
+
+ RATE_LIMIT_TRACKERS.save(deps.storage, path.into(), &results)?;
+ RATE_LIMIT_TRACKERS.save(deps.storage, any_path.into(), &any_results)?;
+
+ Ok(Response::new()
+ .add_attribute("method", "undo_send")
+ .add_attribute("channel_id", path.channel.to_string())
+ .add_attribute("denom", path.denom.to_string())
+ .add_attribute("any_channel", (!any_trackers.is_empty()).to_string()))
+}
diff --git a/testutil/contracts/wasm.go b/testutil/contracts/wasm.go
index ae8a0b3816..619d124d32 100644
--- a/testutil/contracts/wasm.go
+++ b/testutil/contracts/wasm.go
@@ -18,6 +18,9 @@ var counterWasm []byte
//go:embed echo/artifacts/echo.wasm
var echoWasm []byte
+//go:embed rate-limiter/artifacts/rate_limiter.wasm
+var rateLimiterWasm []byte
+
// EchoWasm returns the echo contract wasm byte data
func EchoWasm() []byte {
return echoWasm
@@ -28,6 +31,11 @@ func CounterWasm() []byte {
return counterWasm
}
+// CounterWasm returns the counter contract wasm byte data
+func RateLimiterWasm() []byte {
+ return rateLimiterWasm
+}
+
func StoreContractCode(app *provenanceapp.App, ctx sdk.Context, wasmCode []byte) (uint64, error) {
govKeeper := wasmkeeper.NewGovPermissionKeeper(app.WasmKeeper)
creator := app.AccountKeeper.GetModuleAddress(govtypes.ModuleName)
@@ -48,3 +56,8 @@ func QueryContract(app *provenanceapp.App, ctx sdk.Context, contract sdk.AccAddr
state, err := app.WasmKeeper.QuerySmart(ctx, contract, key)
return string(state), err
}
+
+func PinContract(app *provenanceapp.App, ctx sdk.Context, codeID uint64) error {
+ err := app.ContractKeeper.PinCode(ctx, codeID)
+ return err
+}
diff --git a/testutil/ibc/testchain.go b/testutil/ibc/testchain.go
index e0d31fa29b..fff7a68c2c 100644
--- a/testutil/ibc/testchain.go
+++ b/testutil/ibc/testchain.go
@@ -17,6 +17,7 @@ import (
provenanceapp "github.com/provenance-io/provenance/app"
"github.com/provenance-io/provenance/testutil/contracts"
+ "github.com/provenance-io/provenance/x/ibcratelimit"
)
type TestChain struct {
@@ -42,6 +43,13 @@ func (chain *TestChain) StoreContractEchoDirect(suite *suite.Suite) uint64 {
return codeID
}
+func (chain *TestChain) StoreContractRateLimiterDirect(suite *suite.Suite) uint64 {
+ codeID, err := contracts.StoreContractCode(chain.GetProvenanceApp(), chain.GetContext(), contracts.RateLimiterWasm())
+ suite.Require().NoError(err, "rate limiter contract direct code load failed")
+ println("loaded rate limiter contract with code id: ", codeID)
+ return codeID
+}
+
func (chain *TestChain) InstantiateContract(suite *suite.Suite, msg string, codeID uint64) sdk.AccAddress {
addr, err := contracts.InstantiateContract(chain.GetProvenanceApp(), chain.GetContext(), msg, codeID)
suite.Require().NoError(err, "contract instantiation failed", err)
@@ -49,6 +57,11 @@ func (chain *TestChain) InstantiateContract(suite *suite.Suite, msg string, code
return addr
}
+func (chain *TestChain) PinContract(suite *suite.Suite, codeID uint64) {
+ err := contracts.PinContract(chain.GetProvenanceApp(), chain.GetContext(), codeID)
+ suite.Require().NoError(err, "contract pin failed")
+}
+
func (chain *TestChain) QueryContract(suite *suite.Suite, contract sdk.AccAddress, key []byte) string {
state, err := contracts.QueryContract(chain.GetProvenanceApp(), chain.GetContext(), contract, key)
suite.Require().NoError(err, "contract query failed", err)
@@ -56,6 +69,15 @@ func (chain *TestChain) QueryContract(suite *suite.Suite, contract sdk.AccAddres
return state
}
+func (chain *TestChain) RegisterRateLimiterContract(suite *suite.Suite, addr []byte) {
+ addrStr, err := sdk.Bech32ifyAddressBytes("cosmos", addr)
+ suite.Require().NoError(err)
+ provenanceApp := chain.GetProvenanceApp()
+ provenanceApp.RateLimitingKeeper.SetParams(chain.GetContext(), ibcratelimit.Params{
+ ContractAddress: addrStr,
+ })
+}
+
// SendMsgsNoCheck is an alternative to ibctesting.TestChain.SendMsgs so that it doesn't check for errors. That should be handled by the caller
func (chain *TestChain) SendMsgsNoCheck(msgs ...sdk.Msg) (*sdk.Result, error) {
// ensure the chain has the latest time
diff --git a/x/ibcratelimit/README.md b/x/ibcratelimit/README.md
new file mode 100644
index 0000000000..5e0f473793
--- /dev/null
+++ b/x/ibcratelimit/README.md
@@ -0,0 +1,316 @@
+# ibcratelimit
+
+## Notice
+
+**This module was forked from https://github.com/osmosis-labs/osmosis/tree/main/x/ibc-rate-limit **
+
+_Unfortunately the original version could not be directly used due to extensive osmosis references, an incompatible Cosmos SDK version, and lack of support for IBC v6.x._
+
+
+# IBC Rate Limit
+
+The IBC Rate Limit module is responsible for adding a governance-configurable rate limit to IBC transfers.
+This is a safety control, intended to protect assets on osmosis in event of:
+
+* a bug/hack on osmosis
+* a bug/hack on the counter-party chain
+* a bug/hack in IBC itself
+
+This is done in exchange for a potential (one-way) bridge liveness tradeoff, in periods of high deposits or withdrawals.
+
+The architecture of this package is a minimal go package which implements an [IBC Middleware](https://github.com/cosmos/ibc-go/blob/f57170b1d4dd202a3c6c1c61dcf302b6a9546405/docs/ibc/middleware/develop.md) that wraps the [ICS20 transfer](https://ibc.cosmos.network/main/apps/transfer/overview.html) app, and calls into a cosmwasm contract.
+The cosmwasm contract then has all of the actual IBC rate limiting logic.
+The Cosmwasm code can be found in the [`contracts`](./contracts/) package, with bytecode findable in the [`bytecode`](./bytecode/) folder. The cosmwasm VM usage allows Osmosis chain governance to choose to change this safety control with no hard forks, via a parameter change proposal, a great mitigation for faster threat adaptavity.
+
+The status of the module is being in a state suitable for some initial governance settable rate limits for high value bridged assets.
+Its not in its long term / end state for all channels by any means, but does act as a strong protection we
+can instantiate today for high value IBC connections.
+
+## Motivation
+
+The motivation of IBC-rate-limit comes from the empirical observations of blockchain bridge hacks that a rate limit would have massively reduced the stolen amount of assets in:
+
+- [Polynetwork Bridge Hack ($611 million)](https://rekt.news/polynetwork-rekt/)
+- [BNB Bridge Hack ($586 million)](https://rekt.news/bnb-bridge-rekt/)
+- [Wormhole Bridge Hack ($326 million)](https://rekt.news/wormhole-rekt/)
+- [Nomad Bridge Hack ($190 million)](https://rekt.news/nomad-rekt/)
+- [Harmony Bridge Hack ($100 million)](https://rekt.news/harmony-rekt/) - (Would require rate limit + monitoring)
+- [Dragonberry IBC bug](https://forum.cosmos.network/t/ibc-security-advisory-dragonberry/7702) (can't yet disclose amount at risk, but was saved due to being found first by altruistic Osmosis core developers)
+
+In the presence of a software bug on Osmosis, IBC itself, or on a counterparty chain, we would like to prevent the bridge from being fully depegged.
+This stems from the idea that a 30% asset depeg is ~infinitely better than a 100% depeg.
+Its _crazy_ that today these complex bridged assets can instantly go to 0 in event of bug.
+The goal of a rate limit is to raise an alert that something has potentially gone wrong, allowing validators and developers to have time to analyze, react, and protect larger portions of user funds.
+
+The thesis of this is that, it is worthwile to sacrifice liveness in the case of legitimate demand to send extreme amounts of funds, to prevent the terrible long-tail full fund risks.
+Rate limits aren't the end-all of safety controls, they're merely the simplest automated one. More should be explored and added onto IBC!
+
+## Rate limit types
+
+We express rate limits in time-based periods.
+This means, we set rate limits for (say) 6-hour, daily, and weekly intervals.
+The rate limit for a given time period stores the relevant amount of assets at the start of the rate limit.
+Rate limits are then defined on percentage terms of the asset.
+The time windows for rate limits are currently _not_ rolling, they have discrete start/end times.
+
+We allow setting separate rate limits for the inflow and outflow of assets.
+We do all of our rate limits based on the _net flow_ of assets on a channel pair. This prevents DOS issues, of someone repeatedly sending assets back and forth, to trigger rate limits and break liveness.
+
+We currently envision creating two kinds of rate limits:
+
+* Per denomination rate limits
+ - allows safety statements like "Only 30% of Stars on Osmosis can flow out in one day" or "The amount of Atom on Osmosis can at most double per day".
+* Per channel rate limits
+ - Limit the total inflow and outflow on a given IBC channel, based on "USDC" equivalent, using Osmosis as the price oracle.
+
+We currently only implement per denomination rate limits for non-native assets. We do not yet implement channel based rate limits.
+
+Currently these rate limits automatically "expire" at the end of the quota duration. TODO: Think of better designs here. E.g. can we have a constant number of subsequent quotas start filled? Or perhaps harmonically decreasing amounts of next few quotas pre-filled? Halted until DAO override seems not-great.
+
+## Instantiating rate limits
+
+Today all rate limit quotas must be set manually by governance.
+In the future, we should design towards some conservative rate limit to add as a safety-backstop automatically for channels.
+Ideas for how this could look:
+
+* One month after a channel has been created, automatically add in some USDC-based rate limit
+* One month after governance incentivizes an asset, add on a per-denomination rate limit.
+
+Definitely needs far more ideation and iteration!
+
+## Parameterizing the rate limit
+
+One element is we don't want any rate limit timespan thats too short, e.g. not enough time for humans to react to. So we wouldn't want a 1 hour rate limit, unless we think that if its hit, it could be assessed within an hour.
+
+### Handling rate limit boundaries
+
+We want to be safe against the case where say we have a daily rate limit ending at a given time, and an adversary attempts to attack near the boundary window.
+We would not like them to be able to "double extract funds" by timing their extraction near a window boundary.
+
+Admittedly, not a lot of thought has been put into how to deal with this well.
+Right now we envision simply handling this by saying if you want a quota of duration D, instead include two quotas of duration D, but offset by `D/2` from each other.
+
+Ideally we can change windows to be more 'rolling' in the future, to avoid this overhead and more cleanly handle the problem. (Perhaps rolling ~1 hour at a time)
+
+### Inflow parameterization
+
+The "Inflow" side of a rate limit is essentially protection against unforeseen bug on a counterparty chain.
+This can be quite conservative (e.g. bridged amount doubling in one week). This covers a few cases:
+
+* Counter-party chain B having a token theft attack
+ - TODO: description of how this looks
+* Counter-party chain B runaway mint
+ - TODO: description of how this looks
+* IBC theft
+ - TODO: description of how this looks
+
+It does get more complex when the counterparty chain is itself a DEX, but this is still much more protection than nothing.
+
+### Outflow parameterization
+
+The "Outflow" side of a rate limit is protection against a bug on Osmosis OR IBC.
+This has potential for much more user-frustrating issues, if set too low.
+E.g. if theres some event that causes many people to suddenly withdraw many STARS or many USDC.
+
+So this parameterization has to contend with being a tradeoff of withdrawal liveness in high volatility periods vs being a crucial safety rail, in event of on-Osmosis bug.
+
+TODO: Better fill out
+
+### Example suggested parameterization
+
+## Code structure
+
+As mentioned at the beginning of the README, the go code is a relatively minimal ICS 20 wrapper, that dispatches relevant calls to a cosmwasm contract that implements the rate limiting functionality.
+
+### Go Middleware
+
+To achieve this, the middleware needs to implement the `porttypes.Middleware` interface and the
+`porttypes.ICS4Wrapper` interface. This allows the middleware to send and receive IBC messages by wrapping
+any IBC module, and be used as an ICS4 wrapper by a transfer module (for sending packets or writing acknowledgements).
+
+Of those interfaces, just the following methods have custom logic:
+
+* `ICS4Wrapper.SendPacket` forwards to contract, with intent of tracking of value sent via an ibc channel
+* `Middleware.OnRecvPacket` forwards to contract, with intent of tracking of value received via an ibc channel
+* `Middleware.OnAcknowledgementPacket` forwards to contract, with intent of undoing the tracking of a sent packet if the acknowledgment is not a success
+* `OnTimeoutPacket` forwards to contract, with intent of undoing the tracking of a sent packet if the packet times out (is not relayed)
+
+All other methods from those interfaces are passthroughs to the underlying implementations.
+
+#### Parameters
+
+The middleware uses the following parameters:
+
+| Key | Type |
+| --------------- | ------ |
+| ContractAddress | string |
+
+1. **ContractAddress** -
+ The contract address is the address of an instantiated version of the contract provided under `./contracts/`
+
+### Cosmwasm Contract Concepts
+
+Something to keep in mind with all of the code, is that we have to reason separately about every item in the following matrix:
+
+| Native Token | Non-Native Token |
+| -------------------- | ------------------------ |
+| Send Native Token | Send Non-Native Token |
+| Receive Native Token | Receive Non-Native Token |
+| Timeout Native Send | Timeout Non-native Send |
+
+(Error ACK can reuse the same code as timeout)
+
+TODO: Spend more time on sudo messages in the following description. We need to better describe how we map the quota concepts onto the code.
+Need to describe how we get the quota beginning balance, and that its different for sends and receives.
+Explain intracacies of tracking that a timeout and/or ErrorAck must appear from the same quota, else we ignore its update to the quotas.
+
+
+The tracking contract uses the following concepts
+
+1. **RateLimit** - tracks the value flow transferred and the quota for a path.
+2. **Path** - is a (denom, channel) pair.
+3. **Flow** - tracks the value that has moved through a path during the current time window.
+4. **Quota** - is the percentage of the denom's total value that can be transferred through the path in a given period of time (duration)
+
+#### Messages
+
+The contract specifies the following messages:
+
+##### Query
+
+* GetQuotas - Returns the quotas for a path
+
+##### Exec
+
+* AddPath - Adds a list of quotas for a path
+* RemovePath - Removes a path
+* ResetPathQuota - If a rate limit has been reached, the contract's governance address can reset the quota so that transfers are allowed again
+
+##### Sudo
+
+Sudo messages can only be executed by the chain.
+
+* SendPacket - Increments the amount used out of the send quota and checks that the send is allowed. If it isn't, it will return a RateLimitExceeded error
+* RecvPacket - Increments the amount used out of the receive quota and checks that the receive is allowed. If it isn't, it will return a RateLimitExceeded error
+* UndoSend - If a send has failed, the undo message is used to remove its cost from the send quota
+
+All of these messages receive the packet from the chain and extract the necessary information to process the packet and determine if it should be the rate limited.
+
+### Necessary information
+
+To determine if a packet should be rate limited, we need:
+
+* Channel: The channel on the Osmosis side: `packet.SourceChannel` for sends, and `packet.DestinationChannel` for receives.
+* Denom: The denom of the token being transferred as known on the Osmosis side (more on that bellow)
+* Channel Value: The total value of the chanel denominated in `Denom` (i.e.: channel-17 is worth 10k osmo).
+* Funds: the amount being transferred
+
+#### Notes on Channel
+The contract also supports quotas on a custom channel called "any" that is checked on every transfer. If either the
+transfer channel or the "any" channel have a quota that has been filled, the transaction will be rate limited.
+
+#### Notes on Denom
+We always use the the denom as represented on Osmosis. For native assets that is the local denom, and for non-native
+assets it's the "ibc" prefix and the sha256 hash of the denom trace (`ibc/...`).
+
+##### Sends
+
+For native denoms, we can just use the denom in the packet. If the denom is invalid, it will fail somewhere else along the chain. Example result: `uosmo`
+
+For non-native denoms, the contract needs to hash the denom trace and append it to the `ibc/` prefix. The
+contract always receives the parsed denom (i.e.: `transfer/channel-32/uatom` instead of
+`ibc/27394FB092D2ECCD56123C74F36E4C1F926001CEADA9CA97EA622B25F41E5EB2`). This is because of the order in which
+the middleware is called. When sending a non-native denom, the packet contains `transfer/source-channel/denom` as it
+is built on the `relay.SendTransfer()` in the transfer module and then passed to the middleware. Example result: `ibc/`
+
+##### Receives
+
+This behaves slightly different if the asset is an osmosis asset that was sent to the counterparty and is being
+returned to the chain, or if the asset is being received by the chain and originates on the counterparty. In ibc this
+is called being a "source" or a "sink" respectively.
+
+If the chain is a sink for the denom, we build the local denom by prefixing the port and the channel
+(`transfer/local-channel`) and hashing that denom. Example result: `ibc/`
+
+If the chain is the source for the denom, there are two possibilities:
+
+* The token is a native token, in which case we just remove the prefix added by the counterparty. Example result: `uosmo`
+* The token is a non-native token, in which case we remove the extra prefix and hash it. Example result `ibc/`
+
+#### Notes on Channel Value
+We have iterated on different strategies for calculating the channel value. Our preferred strategy is the following:
+* For non-native tokens (`ibc/...`), the channel value should be the supply of those tokens in Osmosis
+* For native tokens, the channel value should be the total amount of tokens in escrow across all ibc channels
+
+The later ensures the limits are lower and represent the amount of native tokens that exist outside Osmosis. This is
+beneficial as we assume the majority of native tokens exist on the native chain and the amount "normal" ibc transfers is
+proportional to the tokens that have left the chain.
+
+This strategy cannot be implemented at the moment because IBC does not track the amount of tokens in escrow across
+all channels ([github issue](https://github.com/cosmos/ibc-go/issues/2664)). Instead, we use the current supply on
+Osmosis for all denoms (i.e.: treat native and non-native tokens the same way). Once that ticket is fixed, we will
+update this strategy.
+
+##### Caching
+
+The channel value varies constantly. To have better predictability, and avoid issues of the value growing if there is
+a potential infinite mint bug, we cache the channel value at the beginning of the period for every quota.
+
+This means that if we have a daily quota of 1% of the osmo supply, and the channel value is 1M osmo at the beginning of
+the quota, no more than 100k osmo can transferred during that day. If 10M osmo were to be minted or IBC'd in during that
+period, the quota will not increase until the period expired. Then it will be 1% of the new channel value (~11M)
+
+### Integration
+
+The rate limit middleware wraps the `transferIBCModule` and is added as the entry route for IBC transfers.
+
+The module is also provided to the underlying `transferIBCModule` as its `ICS4Wrapper`; previously, this would have
+pointed to a channel, which also implements the `ICS4Wrapper` interface.
+
+This integration can be seen in [osmosis/app/keepers/keepers.go](https://github.com/osmosis-labs/osmosis/blob/main/app/keepers/keepers.go)
+
+## Testing strategy
+
+
+A general testing strategy is as follows:
+
+* Setup two chains.
+* Send some tokens from A->B and some from B->A (so that there are IBC tokens to play with in both sides)
+* Add the rate limiter on A with low limits (i.e. 1% of supply)
+* Test Function for chains A' and B' and denom d
+ * Send some d tokens from A' to B' and get close to the limit.
+ * Do the same transfer making sure the amount is above the quota and verify it fails with the rate limit error
+ * Wait until the reset time has passed, and send again. The transfer should now succeed
+* Repeat the above test for the following combination of chains and tokens: `(A,B,a)`, `(B,A,a)`, `(A,B,b)`, `(B,A,b)`,
+ where `a` and `b` are native tokens to chains A and B respectively.
+
+For more comprehensive tests we can also:
+* Add a third chain C and make sure everything works properly for C tokens that have been transferred to A and to B
+* Test that the contracts gov address can reset rate limits if the quota has been hit
+* Test the queries for getting information about the state of the quotas
+* Test that rate limit symmetries hold (i.e.: sending the a token through a rate-limited channel and then sending back
+ reduces the rate limits by the same amount that it was increased during the first send)
+* Ensure that the channels between the test chains have different names (A->B="channel-0", B->A="channel-1", for example)
+
+## Known Future work
+
+Items that have been highlighted above:
+
+* Making automated rate limits get added for channels, instead of manual configuration only
+* Improving parameterization strategies / data analysis
+* Adding the USDC based rate limits
+* We need better strategies for how rate limits "expire".
+
+Not yet highlighted
+
+* Making monitoring tooling to know when approaching rate limiting and when they're hit
+* Making tooling to easily give us summaries we can use, to reason about "bug or not bug" in event of rate limit being hit
+* Enabling ways to pre-declare large transfers so as to not hit rate limits.
+ * Perhaps you can on-chain declare intent to send these assets with a large delay, that raises monitoring but bypasses rate limits?
+ * Maybe contract-based tooling to split up the transfer suffices?
+* Strategies to account for high volatility periods without hitting rate limits
+ * Can imagine "Hop network" style markets emerging
+ * Could imagine tieng it into looking at AMM volatility, or off-chain oracles
+ * but these are both things we should be wary of security bugs in.
+ * Maybe [constraint based programming with tracking of provenance](https://youtu.be/HB5TrK7A4pI?t=2852) as a solution
+* Analyze changing denom-based rate limits, to just overall withdrawal amount for Osmosis
\ No newline at end of file
diff --git a/x/ibcratelimit/client/cli/cli_test.go b/x/ibcratelimit/client/cli/cli_test.go
new file mode 100644
index 0000000000..104b52b435
--- /dev/null
+++ b/x/ibcratelimit/client/cli/cli_test.go
@@ -0,0 +1,211 @@
+package cli_test
+
+import (
+ "fmt"
+ "testing"
+
+ tmcli "github.com/tendermint/tendermint/libs/cli"
+
+ "github.com/cosmos/cosmos-sdk/client/flags"
+ "github.com/cosmos/cosmos-sdk/crypto/hd"
+ "github.com/cosmos/cosmos-sdk/crypto/keyring"
+ "github.com/cosmos/cosmos-sdk/crypto/keys/secp256k1"
+ clitestutil "github.com/cosmos/cosmos-sdk/testutil/cli"
+ "github.com/cosmos/cosmos-sdk/testutil/network"
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ authtypes "github.com/cosmos/cosmos-sdk/x/auth/types"
+ banktypes "github.com/cosmos/cosmos-sdk/x/bank/types"
+
+ "github.com/stretchr/testify/suite"
+
+ "github.com/provenance-io/provenance/internal/antewrapper"
+ "github.com/provenance-io/provenance/internal/pioconfig"
+ "github.com/provenance-io/provenance/testutil"
+ ibcratelimit "github.com/provenance-io/provenance/x/ibcratelimit"
+ ibcratelimitcli "github.com/provenance-io/provenance/x/ibcratelimit/client/cli"
+)
+
+type TestSuite struct {
+ suite.Suite
+
+ cfg network.Config
+ network *network.Network
+ keyring keyring.Keyring
+ keyringDir string
+
+ accountAddr sdk.AccAddress
+ accountKey *secp256k1.PrivKey
+ accountAddresses []sdk.AccAddress
+
+ ratelimiter string
+}
+
+func TestIntegrationTestSuite(t *testing.T) {
+ suite.Run(t, new(TestSuite))
+}
+
+func (s *TestSuite) SetupSuite() {
+ s.T().Log("setting up integration test suite")
+ pioconfig.SetProvenanceConfig("", 0)
+ s.accountKey = secp256k1.GenPrivKeyFromSecret([]byte("acc2"))
+ addr, err := sdk.AccAddressFromHexUnsafe(s.accountKey.PubKey().Address().String())
+ s.Require().NoError(err)
+ s.accountAddr = addr
+
+ s.cfg = testutil.DefaultTestNetworkConfig()
+ genesisState := s.cfg.GenesisState
+
+ s.cfg.NumValidators = 1
+ s.GenerateAccountsWithKeyrings(2)
+
+ var genBalances []banktypes.Balance
+ for i := range s.accountAddresses {
+ genBalances = append(genBalances, banktypes.Balance{Address: s.accountAddresses[i].String(), Coins: sdk.NewCoins(
+ sdk.NewCoin("nhash", sdk.NewInt(100000000)), sdk.NewCoin(s.cfg.BondDenom, sdk.NewInt(100000000)),
+ ).Sort()})
+ }
+ var bankGenState banktypes.GenesisState
+ bankGenState.Params = banktypes.DefaultParams()
+ bankGenState.Balances = genBalances
+ bankDataBz, err := s.cfg.Codec.MarshalJSON(&bankGenState)
+ s.Require().NoError(err, "should be able to marshal bank genesis state when setting up suite")
+ genesisState[banktypes.ModuleName] = bankDataBz
+
+ var authData authtypes.GenesisState
+ var genAccounts []authtypes.GenesisAccount
+ authData.Params = authtypes.DefaultParams()
+ genAccounts = append(genAccounts, authtypes.NewBaseAccount(s.accountAddresses[0], nil, 3, 0))
+ genAccounts = append(genAccounts, authtypes.NewBaseAccount(s.accountAddresses[1], nil, 4, 0))
+ accounts, err := authtypes.PackAccounts(genAccounts)
+ s.Require().NoError(err, "should be able to pack accounts for genesis state when setting up suite")
+ authData.Accounts = accounts
+ authDataBz, err := s.cfg.Codec.MarshalJSON(&authData)
+ s.Require().NoError(err, "should be able to marshal auth genesis state when setting up suite")
+ genesisState[authtypes.ModuleName] = authDataBz
+
+ s.ratelimiter = "cosmos1w6t0l7z0yerj49ehnqwqaayxqpe3u7e23edgma"
+ ratelimitData := ibcratelimit.NewGenesisState(ibcratelimit.NewParams(s.ratelimiter))
+
+ ratelimitDataBz, err := s.cfg.Codec.MarshalJSON(ratelimitData)
+ s.Require().NoError(err, "should be able to marshal ibcratelimit genesis state when setting up suite")
+ genesisState[ibcratelimit.ModuleName] = ratelimitDataBz
+
+ s.cfg.GenesisState = genesisState
+
+ s.cfg.ChainID = antewrapper.SimAppChainID
+
+ s.network, err = network.New(s.T(), s.T().TempDir(), s.cfg)
+ s.Require().NoError(err, "network.New")
+
+ _, err = s.network.WaitForHeight(6)
+ s.Require().NoError(err, "WaitForHeight")
+}
+
+func (s *TestSuite) TearDownSuite() {
+ s.Require().NoError(s.network.WaitForNextBlock(), "WaitForNextBlock")
+ s.T().Log("tearing down integration test suite")
+ s.network.Cleanup()
+}
+
+func (s *TestSuite) GenerateAccountsWithKeyrings(number int) {
+ path := hd.CreateHDPath(118, 0, 0).String()
+ s.keyringDir = s.T().TempDir()
+ kr, err := keyring.New(s.T().Name(), "test", s.keyringDir, nil, s.cfg.Codec)
+ s.Require().NoError(err, "Keyring.New")
+ s.keyring = kr
+ for i := 0; i < number; i++ {
+ keyId := fmt.Sprintf("test_key%v", i)
+ info, _, err := kr.NewMnemonic(keyId, keyring.English, path, keyring.DefaultBIP39Passphrase, hd.Secp256k1)
+ s.Require().NoError(err, "Keyring.NewMneomonic")
+ addr, err := info.GetAddress()
+ if err != nil {
+ panic(err)
+ }
+ s.accountAddresses = append(s.accountAddresses, addr)
+ }
+}
+
+func (s *TestSuite) TestGetParams() {
+ testCases := []struct {
+ name string
+ expectErrMsg string
+ expectedCode uint32
+ expectedAddress string
+ }{
+ {
+ name: "success - query for params",
+ expectedAddress: "cosmos1w6t0l7z0yerj49ehnqwqaayxqpe3u7e23edgma",
+ expectedCode: 0,
+ },
+ }
+
+ for _, tc := range testCases {
+ tc := tc
+
+ s.Run(tc.name, func() {
+ clientCtx := s.network.Validators[0].ClientCtx
+ out, err := clitestutil.ExecTestCLICmd(clientCtx, ibcratelimitcli.GetParamsCmd(), []string{fmt.Sprintf("--%s=json", tmcli.OutputFlag)})
+ if len(tc.expectErrMsg) > 0 {
+ s.EqualError(err, tc.expectErrMsg, "should have correct error message for invalid Params request")
+ } else {
+ var response ibcratelimit.Params
+ s.NoError(err, "should have no error message for valid Params request")
+ err = s.cfg.Codec.UnmarshalJSON(out.Bytes(), &response)
+ s.NoError(err, "should have no error message when unmarshalling response to Params request")
+ s.Equal(tc.expectedAddress, response.ContractAddress, "should have the correct ratelimit address")
+ }
+ })
+ }
+}
+
+func (s *TestSuite) TestParamsUpdate() {
+ testCases := []struct {
+ name string
+ args []string
+ expectErrMsg string
+ expectedCode uint32
+ signer string
+ }{
+ {
+ name: "success - address updated",
+ args: []string{"cosmos1w6t0l7z0yerj49ehnqwqaayxqpe3u7e23edgma"},
+ expectedCode: 0,
+ signer: s.accountAddresses[0].String(),
+ },
+ {
+ name: "failure - invalid number of args",
+ args: []string{"cosmos1w6t0l7z0yerj49ehnqwqaayxqpe3u7e23edgma", "invalid"},
+ expectErrMsg: "accepts 1 arg(s), received 2",
+ signer: s.accountAddresses[0].String(),
+ },
+ }
+
+ for _, tc := range testCases {
+ tc := tc
+
+ s.Run(tc.name, func() {
+
+ clientCtx := s.network.Validators[0].ClientCtx.WithKeyringDir(s.keyringDir).WithKeyring(s.keyring)
+
+ flags := []string{
+ fmt.Sprintf("--%s=%s", flags.FlagFrom, tc.signer),
+ fmt.Sprintf("--%s=true", flags.FlagSkipConfirmation),
+ fmt.Sprintf("--%s=%s", flags.FlagBroadcastMode, flags.BroadcastBlock),
+ fmt.Sprintf("--%s=%s", flags.FlagFees, sdk.NewCoins(sdk.NewCoin(s.cfg.BondDenom, sdk.NewInt(10))).String()),
+ }
+ tc.args = append(tc.args, flags...)
+
+ out, err := clitestutil.ExecTestCLICmd(clientCtx, ibcratelimitcli.GetCmdParamsUpdate(), append(tc.args, []string{fmt.Sprintf("--%s=json", tmcli.OutputFlag)}...))
+ var response sdk.TxResponse
+ marshalErr := clientCtx.Codec.UnmarshalJSON(out.Bytes(), &response)
+ if len(tc.expectErrMsg) > 0 {
+ s.Assert().EqualError(err, tc.expectErrMsg, "should have correct error for invalid ParamsUpdate request")
+ s.Assert().Equal(tc.expectedCode, response.Code, "should have correct response code for invalid ParamsUpdate request")
+ } else {
+ s.Assert().NoError(err, "should have no error for valid ParamsUpdate request")
+ s.Assert().NoError(marshalErr, out.String(), "should have no marshal error for valid ParamsUpdate request")
+ s.Assert().Equal(tc.expectedCode, response.Code, "should have correct response code for valid ParamsUpdate request")
+ }
+ })
+ }
+}
diff --git a/x/ibcratelimit/client/cli/query.go b/x/ibcratelimit/client/cli/query.go
new file mode 100644
index 0000000000..accb4b2494
--- /dev/null
+++ b/x/ibcratelimit/client/cli/query.go
@@ -0,0 +1,59 @@
+package cli
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/spf13/cobra"
+
+ "github.com/cosmos/cosmos-sdk/client"
+ "github.com/cosmos/cosmos-sdk/client/flags"
+ "github.com/cosmos/cosmos-sdk/version"
+
+ "github.com/provenance-io/provenance/x/ibcratelimit"
+)
+
+// GetQueryCmd returns the cli query commands for this module.
+func GetQueryCmd() *cobra.Command {
+ queryCmd := &cobra.Command{
+ Use: ibcratelimit.ModuleName,
+ Short: "Querying commands for the ibcratelimit module",
+ DisableFlagParsing: true,
+ SuggestionsMinimumDistance: 2,
+ RunE: client.ValidateCmd,
+ }
+
+ queryCmd.AddCommand(
+ GetParamsCmd(),
+ )
+
+ return queryCmd
+}
+
+// GetParamsCmd returns the command handler for ibcratelimit parameter querying.
+func GetParamsCmd() *cobra.Command {
+ cmd := &cobra.Command{
+ Use: "params",
+ Short: "Query the current ibcratelimit params",
+ Args: cobra.NoArgs,
+ Example: fmt.Sprintf(`$ %s query ibcratelimit params`, version.AppName),
+ RunE: func(cmd *cobra.Command, args []string) error {
+ clientCtx, err := client.GetClientQueryContext(cmd)
+ if err != nil {
+ return err
+ }
+
+ queryClient := ibcratelimit.NewQueryClient(clientCtx)
+ res, err := queryClient.Params(context.Background(), &ibcratelimit.ParamsRequest{})
+ if err != nil {
+ return err
+ }
+
+ return clientCtx.PrintProto(&res.Params)
+ },
+ }
+
+ flags.AddQueryFlagsToCmd(cmd)
+
+ return cmd
+}
diff --git a/x/ibcratelimit/client/cli/tx.go b/x/ibcratelimit/client/cli/tx.go
new file mode 100644
index 0000000000..ce43f99f67
--- /dev/null
+++ b/x/ibcratelimit/client/cli/tx.go
@@ -0,0 +1,78 @@
+package cli
+
+import (
+ "fmt"
+
+ "github.com/spf13/cobra"
+
+ "github.com/cosmos/cosmos-sdk/client"
+ "github.com/cosmos/cosmos-sdk/client/flags"
+ "github.com/cosmos/cosmos-sdk/client/tx"
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ sdktx "github.com/cosmos/cosmos-sdk/types/tx"
+ "github.com/cosmos/cosmos-sdk/version"
+ authtypes "github.com/cosmos/cosmos-sdk/x/auth/types"
+ govcli "github.com/cosmos/cosmos-sdk/x/gov/client/cli"
+ govtypes "github.com/cosmos/cosmos-sdk/x/gov/types"
+
+ "github.com/provenance-io/provenance/x/ibcratelimit"
+)
+
+// NewTxCmd is the top-level command for oracle CLI transactions.
+func NewTxCmd() *cobra.Command {
+ txCmd := &cobra.Command{
+ Use: ibcratelimit.ModuleName,
+ Aliases: []string{"rl"},
+ Short: "Transaction commands for the ibcratelimit module",
+ DisableFlagParsing: true,
+ SuggestionsMinimumDistance: 2,
+ RunE: client.ValidateCmd,
+ }
+
+ txCmd.AddCommand(
+ GetCmdParamsUpdate(),
+ )
+
+ return txCmd
+}
+
+// GetCmdParamsUpdate is a command to update the params of the module's rate limiter.
+func GetCmdParamsUpdate() *cobra.Command {
+ cmd := &cobra.Command{
+ Use: "update-params ",
+ Short: "Update the module's params",
+ Long: "Submit an update params via governance proposal along with an initial deposit.",
+ Args: cobra.ExactArgs(1),
+ Aliases: []string{"u"},
+ Example: fmt.Sprintf(`%[1]s tx ratelimitedibc update-params pb1skjwj5whet0lpe65qaq4rpq03hjxlwd9nf39lk --deposit 50000nhash`, version.AppName),
+ RunE: func(cmd *cobra.Command, args []string) (err error) {
+ clientCtx, err := client.GetClientTxContext(cmd)
+ if err != nil {
+ return err
+ }
+
+ authority := authtypes.NewModuleAddress(govtypes.ModuleName)
+
+ msg := ibcratelimit.NewMsgGovUpdateParamsRequest(
+ authority.String(),
+ args[0],
+ )
+
+ proposal, govErr := govcli.ReadGovPropFlags(clientCtx, cmd.Flags())
+ if govErr != nil {
+ return govErr
+ }
+ proposal.Messages, govErr = sdktx.SetMsgs([]sdk.Msg{msg})
+ if govErr != nil {
+ return govErr
+ }
+
+ return tx.GenerateOrBroadcastTxCLI(clientCtx, cmd.Flags(), proposal)
+ },
+ }
+
+ govcli.AddGovPropFlagsToCmd(cmd)
+ flags.AddTxFlagsToCmd(cmd)
+
+ return cmd
+}
diff --git a/x/ibcratelimit/codec.go b/x/ibcratelimit/codec.go
new file mode 100644
index 0000000000..40572390f1
--- /dev/null
+++ b/x/ibcratelimit/codec.go
@@ -0,0 +1,20 @@
+package ibcratelimit
+
+import (
+ "github.com/gogo/protobuf/proto"
+
+ "github.com/cosmos/cosmos-sdk/codec/types"
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ "github.com/cosmos/cosmos-sdk/types/msgservice"
+)
+
+// RegisterInterfaces registers implementations for the tx messages
+func RegisterInterfaces(registry types.InterfaceRegistry) {
+ messages := make([]proto.Message, len(AllRequestMsgs))
+ for i, msg := range AllRequestMsgs {
+ messages[i] = msg
+ }
+ registry.RegisterImplementations((*sdk.Msg)(nil), messages...)
+
+ msgservice.RegisterMsgServiceDesc(registry, &_Msg_serviceDesc)
+}
diff --git a/x/ibcratelimit/errors.go b/x/ibcratelimit/errors.go
new file mode 100644
index 0000000000..9036fd8c4f
--- /dev/null
+++ b/x/ibcratelimit/errors.go
@@ -0,0 +1,11 @@
+package ibcratelimit
+
+import (
+ cerrs "cosmossdk.io/errors"
+)
+
+var (
+ ErrRateLimitExceeded = cerrs.Register(ModuleName, 2, "rate limit exceeded")
+ ErrBadMessage = cerrs.Register(ModuleName, 3, "bad message")
+ ErrContractError = cerrs.Register(ModuleName, 4, "contract error")
+)
diff --git a/x/ibcratelimit/event.pb.go b/x/ibcratelimit/event.pb.go
new file mode 100644
index 0000000000..16f79c7bc4
--- /dev/null
+++ b/x/ibcratelimit/event.pb.go
@@ -0,0 +1,772 @@
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// source: provenance/ibcratelimit/v1/event.proto
+
+package ibcratelimit
+
+import (
+ fmt "fmt"
+ proto "github.com/gogo/protobuf/proto"
+ io "io"
+ math "math"
+ math_bits "math/bits"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
+
+// EventAckRevertFailure is emitted when an Ack revert fails
+type EventAckRevertFailure struct {
+ // module is the name of the module that emitted it.
+ Module string `protobuf:"bytes,1,opt,name=module,proto3" json:"module,omitempty"`
+ // packet is the packet received on acknowledgement.
+ Packet string `protobuf:"bytes,2,opt,name=packet,proto3" json:"packet,omitempty"`
+ // ack is the packet's inner acknowledgement message.
+ Ack string `protobuf:"bytes,3,opt,name=ack,proto3" json:"ack,omitempty"`
+}
+
+func (m *EventAckRevertFailure) Reset() { *m = EventAckRevertFailure{} }
+func (m *EventAckRevertFailure) String() string { return proto.CompactTextString(m) }
+func (*EventAckRevertFailure) ProtoMessage() {}
+func (*EventAckRevertFailure) Descriptor() ([]byte, []int) {
+ return fileDescriptor_6b9bde81a4017b0d, []int{0}
+}
+func (m *EventAckRevertFailure) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *EventAckRevertFailure) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_EventAckRevertFailure.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *EventAckRevertFailure) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_EventAckRevertFailure.Merge(m, src)
+}
+func (m *EventAckRevertFailure) XXX_Size() int {
+ return m.Size()
+}
+func (m *EventAckRevertFailure) XXX_DiscardUnknown() {
+ xxx_messageInfo_EventAckRevertFailure.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_EventAckRevertFailure proto.InternalMessageInfo
+
+func (m *EventAckRevertFailure) GetModule() string {
+ if m != nil {
+ return m.Module
+ }
+ return ""
+}
+
+func (m *EventAckRevertFailure) GetPacket() string {
+ if m != nil {
+ return m.Packet
+ }
+ return ""
+}
+
+func (m *EventAckRevertFailure) GetAck() string {
+ if m != nil {
+ return m.Ack
+ }
+ return ""
+}
+
+// EventTimeoutRevertFailure is emitted when a Timeout revert fails
+type EventTimeoutRevertFailure struct {
+ // module is the name of the module that emitted it.
+ Module string `protobuf:"bytes,1,opt,name=module,proto3" json:"module,omitempty"`
+ // packet is the packet received on timeout.
+ Packet string `protobuf:"bytes,2,opt,name=packet,proto3" json:"packet,omitempty"`
+}
+
+func (m *EventTimeoutRevertFailure) Reset() { *m = EventTimeoutRevertFailure{} }
+func (m *EventTimeoutRevertFailure) String() string { return proto.CompactTextString(m) }
+func (*EventTimeoutRevertFailure) ProtoMessage() {}
+func (*EventTimeoutRevertFailure) Descriptor() ([]byte, []int) {
+ return fileDescriptor_6b9bde81a4017b0d, []int{1}
+}
+func (m *EventTimeoutRevertFailure) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *EventTimeoutRevertFailure) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_EventTimeoutRevertFailure.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *EventTimeoutRevertFailure) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_EventTimeoutRevertFailure.Merge(m, src)
+}
+func (m *EventTimeoutRevertFailure) XXX_Size() int {
+ return m.Size()
+}
+func (m *EventTimeoutRevertFailure) XXX_DiscardUnknown() {
+ xxx_messageInfo_EventTimeoutRevertFailure.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_EventTimeoutRevertFailure proto.InternalMessageInfo
+
+func (m *EventTimeoutRevertFailure) GetModule() string {
+ if m != nil {
+ return m.Module
+ }
+ return ""
+}
+
+func (m *EventTimeoutRevertFailure) GetPacket() string {
+ if m != nil {
+ return m.Packet
+ }
+ return ""
+}
+
+// EventParamsUpdated is an event emitted when the ibcratelimit module's params have been updated.
+type EventParamsUpdated struct {
+}
+
+func (m *EventParamsUpdated) Reset() { *m = EventParamsUpdated{} }
+func (m *EventParamsUpdated) String() string { return proto.CompactTextString(m) }
+func (*EventParamsUpdated) ProtoMessage() {}
+func (*EventParamsUpdated) Descriptor() ([]byte, []int) {
+ return fileDescriptor_6b9bde81a4017b0d, []int{2}
+}
+func (m *EventParamsUpdated) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *EventParamsUpdated) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_EventParamsUpdated.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *EventParamsUpdated) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_EventParamsUpdated.Merge(m, src)
+}
+func (m *EventParamsUpdated) XXX_Size() int {
+ return m.Size()
+}
+func (m *EventParamsUpdated) XXX_DiscardUnknown() {
+ xxx_messageInfo_EventParamsUpdated.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_EventParamsUpdated proto.InternalMessageInfo
+
+func init() {
+ proto.RegisterType((*EventAckRevertFailure)(nil), "provenance.ibcratelimit.v1.EventAckRevertFailure")
+ proto.RegisterType((*EventTimeoutRevertFailure)(nil), "provenance.ibcratelimit.v1.EventTimeoutRevertFailure")
+ proto.RegisterType((*EventParamsUpdated)(nil), "provenance.ibcratelimit.v1.EventParamsUpdated")
+}
+
+func init() {
+ proto.RegisterFile("provenance/ibcratelimit/v1/event.proto", fileDescriptor_6b9bde81a4017b0d)
+}
+
+var fileDescriptor_6b9bde81a4017b0d = []byte{
+ // 244 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0x2b, 0x28, 0xca, 0x2f,
+ 0x4b, 0xcd, 0x4b, 0xcc, 0x4b, 0x4e, 0xd5, 0xcf, 0x4c, 0x4a, 0x2e, 0x4a, 0x2c, 0x49, 0xcd, 0xc9,
+ 0xcc, 0xcd, 0x2c, 0xd1, 0x2f, 0x33, 0xd4, 0x4f, 0x2d, 0x4b, 0xcd, 0x2b, 0xd1, 0x2b, 0x28, 0xca,
+ 0x2f, 0xc9, 0x17, 0x92, 0x42, 0xa8, 0xd3, 0x43, 0x56, 0xa7, 0x57, 0x66, 0xa8, 0x14, 0xc9, 0x25,
+ 0xea, 0x0a, 0x52, 0xea, 0x98, 0x9c, 0x1d, 0x94, 0x5a, 0x96, 0x5a, 0x54, 0xe2, 0x96, 0x98, 0x99,
+ 0x53, 0x5a, 0x94, 0x2a, 0x24, 0xc6, 0xc5, 0x96, 0x9b, 0x9f, 0x52, 0x9a, 0x93, 0x2a, 0xc1, 0xa8,
+ 0xc0, 0xa8, 0xc1, 0x19, 0x04, 0xe5, 0x81, 0xc4, 0x0b, 0x12, 0x93, 0xb3, 0x53, 0x4b, 0x24, 0x98,
+ 0x20, 0xe2, 0x10, 0x9e, 0x90, 0x00, 0x17, 0x73, 0x62, 0x72, 0xb6, 0x04, 0x33, 0x58, 0x10, 0xc4,
+ 0x54, 0xf2, 0xe6, 0x92, 0x04, 0x1b, 0x1d, 0x92, 0x99, 0x9b, 0x9a, 0x5f, 0x5a, 0x42, 0x91, 0xf1,
+ 0x4a, 0x22, 0x5c, 0x42, 0x60, 0xc3, 0x02, 0x12, 0x8b, 0x12, 0x73, 0x8b, 0x43, 0x0b, 0x52, 0x12,
+ 0x4b, 0x52, 0x53, 0x9c, 0x72, 0x4f, 0x3c, 0x92, 0x63, 0xbc, 0xf0, 0x48, 0x8e, 0xf1, 0xc1, 0x23,
+ 0x39, 0xc6, 0x09, 0x8f, 0xe5, 0x18, 0x2e, 0x3c, 0x96, 0x63, 0xb8, 0xf1, 0x58, 0x8e, 0x81, 0x4b,
+ 0x36, 0x33, 0x5f, 0x0f, 0xb7, 0xb7, 0x03, 0x18, 0xa3, 0x8c, 0xd2, 0x33, 0x4b, 0x32, 0x4a, 0x93,
+ 0xf4, 0x92, 0xf3, 0x73, 0xf5, 0x11, 0x0a, 0x75, 0x33, 0xf3, 0x91, 0x78, 0xfa, 0x15, 0x28, 0xe1,
+ 0x9a, 0xc4, 0x06, 0x0e, 0x4f, 0x63, 0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0xd2, 0x94, 0x63, 0x5e,
+ 0x79, 0x01, 0x00, 0x00,
+}
+
+func (m *EventAckRevertFailure) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *EventAckRevertFailure) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *EventAckRevertFailure) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Ack) > 0 {
+ i -= len(m.Ack)
+ copy(dAtA[i:], m.Ack)
+ i = encodeVarintEvent(dAtA, i, uint64(len(m.Ack)))
+ i--
+ dAtA[i] = 0x1a
+ }
+ if len(m.Packet) > 0 {
+ i -= len(m.Packet)
+ copy(dAtA[i:], m.Packet)
+ i = encodeVarintEvent(dAtA, i, uint64(len(m.Packet)))
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.Module) > 0 {
+ i -= len(m.Module)
+ copy(dAtA[i:], m.Module)
+ i = encodeVarintEvent(dAtA, i, uint64(len(m.Module)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *EventTimeoutRevertFailure) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *EventTimeoutRevertFailure) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *EventTimeoutRevertFailure) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Packet) > 0 {
+ i -= len(m.Packet)
+ copy(dAtA[i:], m.Packet)
+ i = encodeVarintEvent(dAtA, i, uint64(len(m.Packet)))
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.Module) > 0 {
+ i -= len(m.Module)
+ copy(dAtA[i:], m.Module)
+ i = encodeVarintEvent(dAtA, i, uint64(len(m.Module)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *EventParamsUpdated) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *EventParamsUpdated) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *EventParamsUpdated) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ return len(dAtA) - i, nil
+}
+
+func encodeVarintEvent(dAtA []byte, offset int, v uint64) int {
+ offset -= sovEvent(v)
+ base := offset
+ for v >= 1<<7 {
+ dAtA[offset] = uint8(v&0x7f | 0x80)
+ v >>= 7
+ offset++
+ }
+ dAtA[offset] = uint8(v)
+ return base
+}
+func (m *EventAckRevertFailure) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Module)
+ if l > 0 {
+ n += 1 + l + sovEvent(uint64(l))
+ }
+ l = len(m.Packet)
+ if l > 0 {
+ n += 1 + l + sovEvent(uint64(l))
+ }
+ l = len(m.Ack)
+ if l > 0 {
+ n += 1 + l + sovEvent(uint64(l))
+ }
+ return n
+}
+
+func (m *EventTimeoutRevertFailure) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Module)
+ if l > 0 {
+ n += 1 + l + sovEvent(uint64(l))
+ }
+ l = len(m.Packet)
+ if l > 0 {
+ n += 1 + l + sovEvent(uint64(l))
+ }
+ return n
+}
+
+func (m *EventParamsUpdated) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ return n
+}
+
+func sovEvent(x uint64) (n int) {
+ return (math_bits.Len64(x|1) + 6) / 7
+}
+func sozEvent(x uint64) (n int) {
+ return sovEvent(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (m *EventAckRevertFailure) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowEvent
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: EventAckRevertFailure: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: EventAckRevertFailure: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Module", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowEvent
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthEvent
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthEvent
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Module = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Packet", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowEvent
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthEvent
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthEvent
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Packet = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Ack", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowEvent
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthEvent
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthEvent
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Ack = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipEvent(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthEvent
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *EventTimeoutRevertFailure) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowEvent
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: EventTimeoutRevertFailure: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: EventTimeoutRevertFailure: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Module", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowEvent
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthEvent
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthEvent
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Module = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Packet", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowEvent
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthEvent
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthEvent
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Packet = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipEvent(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthEvent
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *EventParamsUpdated) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowEvent
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: EventParamsUpdated: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: EventParamsUpdated: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ default:
+ iNdEx = preIndex
+ skippy, err := skipEvent(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthEvent
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func skipEvent(dAtA []byte) (n int, err error) {
+ l := len(dAtA)
+ iNdEx := 0
+ depth := 0
+ for iNdEx < l {
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowEvent
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ wireType := int(wire & 0x7)
+ switch wireType {
+ case 0:
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowEvent
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ iNdEx++
+ if dAtA[iNdEx-1] < 0x80 {
+ break
+ }
+ }
+ case 1:
+ iNdEx += 8
+ case 2:
+ var length int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowEvent
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ length |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if length < 0 {
+ return 0, ErrInvalidLengthEvent
+ }
+ iNdEx += length
+ case 3:
+ depth++
+ case 4:
+ if depth == 0 {
+ return 0, ErrUnexpectedEndOfGroupEvent
+ }
+ depth--
+ case 5:
+ iNdEx += 4
+ default:
+ return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+ }
+ if iNdEx < 0 {
+ return 0, ErrInvalidLengthEvent
+ }
+ if depth == 0 {
+ return iNdEx, nil
+ }
+ }
+ return 0, io.ErrUnexpectedEOF
+}
+
+var (
+ ErrInvalidLengthEvent = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowEvent = fmt.Errorf("proto: integer overflow")
+ ErrUnexpectedEndOfGroupEvent = fmt.Errorf("proto: unexpected end of group")
+)
diff --git a/x/ibcratelimit/events.go b/x/ibcratelimit/events.go
new file mode 100644
index 0000000000..a599de0e25
--- /dev/null
+++ b/x/ibcratelimit/events.go
@@ -0,0 +1,23 @@
+package ibcratelimit
+
+// NewEventAckRevertFailure returns a new EventAckRevertFailure.
+func NewEventAckRevertFailure(module, packet, ack string) *EventAckRevertFailure {
+ return &EventAckRevertFailure{
+ Module: module,
+ Packet: packet,
+ Ack: ack,
+ }
+}
+
+// NewEventTimeoutRevertFailure returns a new EventTimeoutRevertFailure.
+func NewEventTimeoutRevertFailure(module, packet string) *EventTimeoutRevertFailure {
+ return &EventTimeoutRevertFailure{
+ Module: module,
+ Packet: packet,
+ }
+}
+
+// NewEventParamsUpdated returns a new EventParamsUpdated.
+func NewEventParamsUpdated() *EventParamsUpdated {
+ return &EventParamsUpdated{}
+}
diff --git a/x/ibcratelimit/events_test.go b/x/ibcratelimit/events_test.go
new file mode 100644
index 0000000000..568e569082
--- /dev/null
+++ b/x/ibcratelimit/events_test.go
@@ -0,0 +1,33 @@
+package ibcratelimit_test
+
+import (
+ "testing"
+
+ "github.com/provenance-io/provenance/x/ibcratelimit"
+ "github.com/stretchr/testify/assert"
+)
+
+func TestNewEventAckRevertFailure(t *testing.T) {
+ expected := &ibcratelimit.EventAckRevertFailure{
+ Module: "module",
+ Packet: "packet",
+ Ack: "ack",
+ }
+ event := ibcratelimit.NewEventAckRevertFailure(expected.Module, expected.Packet, expected.Ack)
+ assert.Equal(t, expected, event, "should create the correct event type")
+}
+
+func TestNewEventTimeoutRevertFailure(t *testing.T) {
+ expected := &ibcratelimit.EventTimeoutRevertFailure{
+ Module: "module",
+ Packet: "packet",
+ }
+ event := ibcratelimit.NewEventTimeoutRevertFailure(expected.Module, expected.Packet)
+ assert.Equal(t, expected, event, "should create the correct event type")
+}
+
+func TestNewEventParamsUpdated(t *testing.T) {
+ expected := &ibcratelimit.EventParamsUpdated{}
+ event := ibcratelimit.NewEventParamsUpdated()
+ assert.Equal(t, expected, event, "should create the correct event type")
+}
diff --git a/x/ibcratelimit/expected_keepers.go b/x/ibcratelimit/expected_keepers.go
new file mode 100644
index 0000000000..62d57b68d9
--- /dev/null
+++ b/x/ibcratelimit/expected_keepers.go
@@ -0,0 +1,7 @@
+package ibcratelimit
+
+import sdk "github.com/cosmos/cosmos-sdk/types"
+
+type PermissionedKeeper interface {
+ Sudo(ctx sdk.Context, contractAddress sdk.AccAddress, msg []byte) ([]byte, error)
+}
diff --git a/x/ibcratelimit/genesis.go b/x/ibcratelimit/genesis.go
new file mode 100644
index 0000000000..a01f1f0389
--- /dev/null
+++ b/x/ibcratelimit/genesis.go
@@ -0,0 +1,21 @@
+package ibcratelimit
+
+// DefaultGenesis creates a default GenesisState object.
+func DefaultGenesis() *GenesisState {
+ return &GenesisState{
+ Params: DefaultParams(),
+ }
+}
+
+// Validate performs basic genesis state validation returning an error upon any
+// failure.
+func (gs GenesisState) Validate() error {
+ return gs.Params.Validate()
+}
+
+// NewGenesisState returns a new instance of GenesisState object
+func NewGenesisState(params Params) *GenesisState {
+ return &GenesisState{
+ Params: params,
+ }
+}
diff --git a/x/ibcratelimit/genesis.pb.go b/x/ibcratelimit/genesis.pb.go
new file mode 100644
index 0000000000..ac0523b951
--- /dev/null
+++ b/x/ibcratelimit/genesis.pb.go
@@ -0,0 +1,324 @@
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// source: provenance/ibcratelimit/v1/genesis.proto
+
+package ibcratelimit
+
+import (
+ fmt "fmt"
+ _ "github.com/gogo/protobuf/gogoproto"
+ proto "github.com/gogo/protobuf/proto"
+ io "io"
+ math "math"
+ math_bits "math/bits"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
+
+// GenesisState defines the ibcratelimit module's genesis state.
+type GenesisState struct {
+ // params are all the parameters of the module.
+ Params Params `protobuf:"bytes,1,opt,name=params,proto3" json:"params"`
+}
+
+func (m *GenesisState) Reset() { *m = GenesisState{} }
+func (m *GenesisState) String() string { return proto.CompactTextString(m) }
+func (*GenesisState) ProtoMessage() {}
+func (*GenesisState) Descriptor() ([]byte, []int) {
+ return fileDescriptor_8046e03397972f41, []int{0}
+}
+func (m *GenesisState) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *GenesisState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_GenesisState.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *GenesisState) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_GenesisState.Merge(m, src)
+}
+func (m *GenesisState) XXX_Size() int {
+ return m.Size()
+}
+func (m *GenesisState) XXX_DiscardUnknown() {
+ xxx_messageInfo_GenesisState.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_GenesisState proto.InternalMessageInfo
+
+func (m *GenesisState) GetParams() Params {
+ if m != nil {
+ return m.Params
+ }
+ return Params{}
+}
+
+func init() {
+ proto.RegisterType((*GenesisState)(nil), "provenance.ibcratelimit.v1.GenesisState")
+}
+
+func init() {
+ proto.RegisterFile("provenance/ibcratelimit/v1/genesis.proto", fileDescriptor_8046e03397972f41)
+}
+
+var fileDescriptor_8046e03397972f41 = []byte{
+ // 208 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xd2, 0x28, 0x28, 0xca, 0x2f,
+ 0x4b, 0xcd, 0x4b, 0xcc, 0x4b, 0x4e, 0xd5, 0xcf, 0x4c, 0x4a, 0x2e, 0x4a, 0x2c, 0x49, 0xcd, 0xc9,
+ 0xcc, 0xcd, 0x2c, 0xd1, 0x2f, 0x33, 0xd4, 0x4f, 0x4f, 0xcd, 0x4b, 0x2d, 0xce, 0x2c, 0xd6, 0x2b,
+ 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x92, 0x42, 0xa8, 0xd4, 0x43, 0x56, 0xa9, 0x57, 0x66, 0x28, 0x25,
+ 0x92, 0x9e, 0x9f, 0x9e, 0x0f, 0x56, 0xa6, 0x0f, 0x62, 0x41, 0x74, 0x48, 0xa9, 0xe3, 0x31, 0xbb,
+ 0x20, 0xb1, 0x28, 0x31, 0x17, 0x6a, 0xb4, 0x52, 0x00, 0x17, 0x8f, 0x3b, 0xc4, 0xae, 0xe0, 0x92,
+ 0xc4, 0x92, 0x54, 0x21, 0x07, 0x2e, 0x36, 0x88, 0xbc, 0x04, 0xa3, 0x02, 0xa3, 0x06, 0xb7, 0x91,
+ 0x92, 0x1e, 0x6e, 0xbb, 0xf5, 0x02, 0xc0, 0x2a, 0x9d, 0x58, 0x4e, 0xdc, 0x93, 0x67, 0x08, 0x82,
+ 0xea, 0x73, 0xca, 0x3d, 0xf1, 0x48, 0x8e, 0xf1, 0xc2, 0x23, 0x39, 0xc6, 0x07, 0x8f, 0xe4, 0x18,
+ 0x27, 0x3c, 0x96, 0x63, 0xb8, 0xf0, 0x58, 0x8e, 0xe1, 0xc6, 0x63, 0x39, 0x06, 0x2e, 0xd9, 0xcc,
+ 0x7c, 0x3c, 0xa6, 0x05, 0x30, 0x46, 0x19, 0xa5, 0x67, 0x96, 0x64, 0x94, 0x26, 0xe9, 0x25, 0xe7,
+ 0xe7, 0xea, 0x23, 0x14, 0xea, 0x66, 0xe6, 0x23, 0xf1, 0xf4, 0x2b, 0x50, 0x3c, 0x94, 0xc4, 0x06,
+ 0xf6, 0x87, 0x31, 0x20, 0x00, 0x00, 0xff, 0xff, 0x58, 0x77, 0xf4, 0x5d, 0x4e, 0x01, 0x00, 0x00,
+}
+
+func (m *GenesisState) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *GenesisState) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *GenesisState) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ {
+ size, err := m.Params.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenesis(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func encodeVarintGenesis(dAtA []byte, offset int, v uint64) int {
+ offset -= sovGenesis(v)
+ base := offset
+ for v >= 1<<7 {
+ dAtA[offset] = uint8(v&0x7f | 0x80)
+ v >>= 7
+ offset++
+ }
+ dAtA[offset] = uint8(v)
+ return base
+}
+func (m *GenesisState) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.Params.Size()
+ n += 1 + l + sovGenesis(uint64(l))
+ return n
+}
+
+func sovGenesis(x uint64) (n int) {
+ return (math_bits.Len64(x|1) + 6) / 7
+}
+func sozGenesis(x uint64) (n int) {
+ return sovGenesis(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (m *GenesisState) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenesis
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: GenesisState: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: GenesisState: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Params", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenesis
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenesis
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenesis
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Params.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenesis(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenesis
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func skipGenesis(dAtA []byte) (n int, err error) {
+ l := len(dAtA)
+ iNdEx := 0
+ depth := 0
+ for iNdEx < l {
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowGenesis
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ wireType := int(wire & 0x7)
+ switch wireType {
+ case 0:
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowGenesis
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ iNdEx++
+ if dAtA[iNdEx-1] < 0x80 {
+ break
+ }
+ }
+ case 1:
+ iNdEx += 8
+ case 2:
+ var length int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowGenesis
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ length |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if length < 0 {
+ return 0, ErrInvalidLengthGenesis
+ }
+ iNdEx += length
+ case 3:
+ depth++
+ case 4:
+ if depth == 0 {
+ return 0, ErrUnexpectedEndOfGroupGenesis
+ }
+ depth--
+ case 5:
+ iNdEx += 4
+ default:
+ return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+ }
+ if iNdEx < 0 {
+ return 0, ErrInvalidLengthGenesis
+ }
+ if depth == 0 {
+ return iNdEx, nil
+ }
+ }
+ return 0, io.ErrUnexpectedEOF
+}
+
+var (
+ ErrInvalidLengthGenesis = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowGenesis = fmt.Errorf("proto: integer overflow")
+ ErrUnexpectedEndOfGroupGenesis = fmt.Errorf("proto: unexpected end of group")
+)
diff --git a/x/ibcratelimit/genesis_test.go b/x/ibcratelimit/genesis_test.go
new file mode 100644
index 0000000000..d7337058f4
--- /dev/null
+++ b/x/ibcratelimit/genesis_test.go
@@ -0,0 +1,74 @@
+package ibcratelimit_test
+
+import (
+ "testing"
+
+ "github.com/provenance-io/provenance/x/ibcratelimit"
+ "github.com/stretchr/testify/assert"
+)
+
+func TestDefaultGenesis(t *testing.T) {
+ expected := ibcratelimit.NewGenesisState(ibcratelimit.NewParams(""))
+ genesis := ibcratelimit.DefaultGenesis()
+ assert.Equal(t, expected, genesis)
+}
+
+func TestGenesisValidate(t *testing.T) {
+ testCases := []struct {
+ name string
+ addr string
+ err string
+ }{
+ {
+ name: "success - valid address",
+ addr: "cosmos1qm0hhug8kszhcp9f3ryuecz5yw8s3e5v0n2ckd",
+ },
+ {
+ name: "success - empty address",
+ addr: "",
+ },
+ {
+ name: "failure - invalid address format",
+ addr: "cosmos1234",
+ err: "decoding bech32 failed: invalid separator index 6",
+ },
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+
+ genesis := ibcratelimit.NewGenesisState(ibcratelimit.NewParams(tc.addr))
+ err := genesis.Validate()
+
+ if len(tc.err) > 0 {
+ assert.EqualError(t, err, tc.err, "should have the correct error")
+ } else {
+ assert.NoError(t, err, "should not throw an error")
+ }
+ })
+ }
+}
+
+func TestNewGenesisState(t *testing.T) {
+ tests := []struct {
+ name string
+ addr string
+ expected ibcratelimit.GenesisState
+ }{
+ {
+ name: "success - empty contract address can be used",
+ expected: ibcratelimit.GenesisState{Params: ibcratelimit.NewParams("")},
+ },
+ {
+ name: "success - params are correctly set.",
+ expected: ibcratelimit.GenesisState{Params: ibcratelimit.NewParams("cosmos1qm0hhug8kszhcp9f3ryuecz5yw8s3e5v0n2ckd")},
+ },
+ }
+
+ for _, tc := range tests {
+ t.Run(tc.name, func(t *testing.T) {
+ genesis := *ibcratelimit.NewGenesisState(ibcratelimit.NewParams(tc.expected.Params.ContractAddress))
+ assert.Equal(t, tc.expected, genesis)
+ })
+ }
+}
diff --git a/x/ibcratelimit/keeper/genesis.go b/x/ibcratelimit/keeper/genesis.go
new file mode 100644
index 0000000000..0ed23894ae
--- /dev/null
+++ b/x/ibcratelimit/keeper/genesis.go
@@ -0,0 +1,27 @@
+package keeper
+
+import (
+ sdk "github.com/cosmos/cosmos-sdk/types"
+
+ "github.com/provenance-io/provenance/x/ibcratelimit"
+)
+
+// ExportGenesis returns a GenesisState for a given context.
+func (k Keeper) ExportGenesis(ctx sdk.Context) *ibcratelimit.GenesisState {
+ params, err := k.GetParams(ctx)
+ if err != nil {
+ panic(err)
+ }
+
+ return &ibcratelimit.GenesisState{
+ Params: params,
+ }
+}
+
+// InitGenesis new ibcratelimit genesis
+func (k Keeper) InitGenesis(ctx sdk.Context, data *ibcratelimit.GenesisState) {
+ if err := data.Validate(); err != nil {
+ panic(err)
+ }
+ k.SetParams(ctx, data.Params)
+}
diff --git a/x/ibcratelimit/keeper/genesis_test.go b/x/ibcratelimit/keeper/genesis_test.go
new file mode 100644
index 0000000000..a0982b73b1
--- /dev/null
+++ b/x/ibcratelimit/keeper/genesis_test.go
@@ -0,0 +1,19 @@
+package keeper_test
+
+import (
+ sdk "github.com/cosmos/cosmos-sdk/types"
+
+ "github.com/provenance-io/provenance/x/ibcratelimit"
+)
+
+func (s *TestSuite) TestInitExportGenesis() {
+ testAddress := sdk.AccAddress([]byte("addr1_______________")).String()
+ k := s.app.RateLimitingKeeper
+
+ initialGenesis := ibcratelimit.NewGenesisState(ibcratelimit.NewParams(testAddress))
+
+ k.InitGenesis(s.ctx, initialGenesis)
+ s.Assert().Equal(testAddress, k.GetContractAddress(s.ctx))
+ exportedGenesis := k.ExportGenesis(s.ctx)
+ s.Assert().Equal(initialGenesis, exportedGenesis)
+}
diff --git a/x/ibcratelimit/keeper/grpc_query.go b/x/ibcratelimit/keeper/grpc_query.go
new file mode 100644
index 0000000000..0c86c99f5b
--- /dev/null
+++ b/x/ibcratelimit/keeper/grpc_query.go
@@ -0,0 +1,23 @@
+package keeper
+
+import (
+ "context"
+
+ sdk "github.com/cosmos/cosmos-sdk/types"
+
+ "github.com/provenance-io/provenance/x/ibcratelimit"
+)
+
+var _ ibcratelimit.QueryServer = Keeper{}
+
+// Params returns the params used by the module
+func (k Keeper) Params(ctx context.Context, _ *ibcratelimit.ParamsRequest) (*ibcratelimit.ParamsResponse, error) {
+ sdkCtx := sdk.UnwrapSDKContext(ctx)
+
+ params, err := k.GetParams(sdkCtx)
+ if err != nil {
+ return nil, err
+ }
+
+ return &ibcratelimit.ParamsResponse{Params: params}, nil
+}
diff --git a/x/ibcratelimit/keeper/grpc_query_test.go b/x/ibcratelimit/keeper/grpc_query_test.go
new file mode 100644
index 0000000000..fcec066a2b
--- /dev/null
+++ b/x/ibcratelimit/keeper/grpc_query_test.go
@@ -0,0 +1,44 @@
+package keeper_test
+
+import (
+ "github.com/provenance-io/provenance/x/ibcratelimit"
+)
+
+func (s *TestSuite) TestQueryParams() {
+ tests := []struct {
+ name string
+ contract string
+ expected ibcratelimit.ParamsResponse
+ }{
+ {
+ name: "success - params have not been set",
+ },
+ {
+ name: "success - params have been set",
+ contract: "randomaddress",
+ expected: ibcratelimit.ParamsResponse{
+ Params: ibcratelimit.Params{
+ ContractAddress: "randomaddress",
+ },
+ },
+ },
+ }
+
+ for _, tc := range tests {
+ s.Run(tc.name, func() {
+ if len(tc.contract) > 0 {
+ s.app.RateLimitingKeeper.SetParams(s.ctx, ibcratelimit.NewParams(tc.contract))
+ }
+
+ request := ibcratelimit.ParamsRequest{}
+ response, err := s.queryClient.Params(s.ctx, &request)
+
+ s.Assert().NoError(err, "should not throw an error")
+ s.Assert().Equal(tc.expected, *response, "should return correct response")
+
+ if len(tc.contract) > 0 {
+ s.app.RateLimitingKeeper.SetParams(s.ctx, ibcratelimit.DefaultParams())
+ }
+ })
+ }
+}
diff --git a/x/ibcratelimit/keeper/keeper.go b/x/ibcratelimit/keeper/keeper.go
new file mode 100644
index 0000000000..c0df185a76
--- /dev/null
+++ b/x/ibcratelimit/keeper/keeper.go
@@ -0,0 +1,92 @@
+package keeper
+
+import (
+ "github.com/gogo/protobuf/proto"
+
+ "github.com/tendermint/tendermint/libs/log"
+
+ "github.com/cosmos/cosmos-sdk/codec"
+ storetypes "github.com/cosmos/cosmos-sdk/store/types"
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ authtypes "github.com/cosmos/cosmos-sdk/x/auth/types"
+ govtypes "github.com/cosmos/cosmos-sdk/x/gov/types"
+
+ "github.com/provenance-io/provenance/x/ibcratelimit"
+)
+
+// Keeper for the ibcratelimit module
+type Keeper struct {
+ storeKey storetypes.StoreKey
+ cdc codec.BinaryCodec
+ PermissionedKeeper ibcratelimit.PermissionedKeeper
+ authority string
+}
+
+// NewKeeper Creates a new Keeper for the module.
+func NewKeeper(
+ cdc codec.BinaryCodec,
+ key storetypes.StoreKey,
+ permissionedKeeper ibcratelimit.PermissionedKeeper,
+) Keeper {
+ return Keeper{
+ storeKey: key,
+ cdc: cdc,
+ PermissionedKeeper: permissionedKeeper,
+ authority: authtypes.NewModuleAddress(govtypes.ModuleName).String(),
+ }
+}
+
+// Logger Creates a new logger for the module.
+func (k Keeper) Logger(ctx sdk.Context) log.Logger {
+ return ctx.Logger().With("module", "x/"+ibcratelimit.ModuleName)
+}
+
+// GetParams Gets the params for the module.
+func (k Keeper) GetParams(ctx sdk.Context) (params ibcratelimit.Params, err error) {
+ store := ctx.KVStore(k.storeKey)
+ key := ibcratelimit.ParamsKey
+ bz := store.Get(key)
+ if len(bz) == 0 {
+ return ibcratelimit.Params{}, nil
+ }
+ err = k.cdc.Unmarshal(bz, ¶ms)
+ return params, err
+}
+
+// SetParams Sets the params for the module.
+func (k Keeper) SetParams(ctx sdk.Context, params ibcratelimit.Params) {
+ store := ctx.KVStore(k.storeKey)
+ bz := k.cdc.MustMarshal(¶ms)
+ store.Set(ibcratelimit.ParamsKey, bz)
+}
+
+// GetContractAddress Gets the current value of the module's contract address.
+func (k Keeper) GetContractAddress(ctx sdk.Context) (contract string) {
+ params, _ := k.GetParams(ctx)
+ return params.ContractAddress
+}
+
+// IsContractConfigured Checks if the contract has been configured for the module.
+func (k Keeper) IsContractConfigured(ctx sdk.Context) bool {
+ params, err := k.GetParams(ctx)
+ if err != nil {
+ return false
+ }
+ return params.ContractAddress != ""
+}
+
+// ValidateAuthority returns an error if the provided address is not the authority.
+func (k Keeper) ValidateAuthority(addr string) error {
+ if k.authority != addr {
+ return govtypes.ErrInvalidSigner.Wrapf("expected %q got %q", k.authority, addr)
+ }
+ return nil
+}
+
+// emitEvent emits the provided event and writes any error to the error log.
+func (k Keeper) emitEvent(ctx sdk.Context, event proto.Message) {
+ err := ctx.EventManager().EmitTypedEvent(event)
+ if err != nil {
+ k.Logger(ctx).Error("error emitting event %#v: %v", event, err)
+ }
+}
diff --git a/x/ibcratelimit/keeper/keeper_test.go b/x/ibcratelimit/keeper/keeper_test.go
new file mode 100644
index 0000000000..2cd9e6888e
--- /dev/null
+++ b/x/ibcratelimit/keeper/keeper_test.go
@@ -0,0 +1,115 @@
+package keeper_test
+
+import (
+ "testing"
+
+ tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
+
+ "github.com/cosmos/cosmos-sdk/baseapp"
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ "github.com/provenance-io/provenance/app"
+ "github.com/provenance-io/provenance/x/ibcratelimit"
+ "github.com/provenance-io/provenance/x/ibcratelimit/keeper"
+ "github.com/stretchr/testify/suite"
+)
+
+type TestSuite struct {
+ suite.Suite
+
+ app *app.App
+ ctx sdk.Context
+
+ queryClient ibcratelimit.QueryClient
+ msgServer ibcratelimit.MsgServer
+}
+
+func (s *TestSuite) SetupTest() {
+ s.app = app.Setup(s.T())
+ s.ctx = s.app.BaseApp.NewContext(false, tmproto.Header{})
+ s.ctx = s.ctx.WithBlockHeight(0)
+
+ s.msgServer = keeper.NewMsgServer(*s.app.RateLimitingKeeper)
+ queryHelper := baseapp.NewQueryServerTestHelper(s.ctx, s.app.InterfaceRegistry())
+ ibcratelimit.RegisterQueryServer(queryHelper, s.app.RateLimitingKeeper)
+ s.queryClient = ibcratelimit.NewQueryClient(queryHelper)
+}
+
+func TestKeeperTestSuite(t *testing.T) {
+ suite.Run(t, new(TestSuite))
+}
+
+func (s *TestSuite) TestGetSetParams() {
+ tests := []struct {
+ name string
+ contract string
+ }{
+ {
+ name: "success - get empty params",
+ },
+ {
+ name: "success - set and get new params",
+ contract: "contractaddress",
+ },
+ }
+
+ for _, tc := range tests {
+ s.Run(tc.name, func() {
+ params := ibcratelimit.NewParams(tc.contract)
+ s.app.RateLimitingKeeper.SetParams(s.ctx, params)
+ newParams, err := s.app.RateLimitingKeeper.GetParams(s.ctx)
+ s.Assert().NoError(err)
+ s.Assert().Equal(params, newParams, "should have expected params")
+ })
+ }
+}
+
+func (s *TestSuite) TestGetContractAddress() {
+ tests := []struct {
+ name string
+ contract string
+ }{
+ {
+ name: "success - get empty contract",
+ },
+ {
+ name: "success - set and get new contract address",
+ contract: "contractaddress",
+ },
+ }
+
+ for _, tc := range tests {
+ s.Run(tc.name, func() {
+ params := ibcratelimit.NewParams(tc.contract)
+ s.app.RateLimitingKeeper.SetParams(s.ctx, params)
+ contract := s.app.RateLimitingKeeper.GetContractAddress(s.ctx)
+ s.Assert().Equal(tc.contract, contract, "should have expected contract")
+ })
+ }
+}
+
+func (s *TestSuite) TestIsContractConfigured() {
+ tests := []struct {
+ name string
+ contract string
+ expected bool
+ }{
+ {
+ name: "success - get empty contract",
+ expected: false,
+ },
+ {
+ name: "success - set and get new contract address",
+ contract: "contractaddress",
+ expected: true,
+ },
+ }
+
+ for _, tc := range tests {
+ s.Run(tc.name, func() {
+ params := ibcratelimit.NewParams(tc.contract)
+ s.app.RateLimitingKeeper.SetParams(s.ctx, params)
+ configured := s.app.RateLimitingKeeper.IsContractConfigured(s.ctx)
+ s.Assert().Equal(tc.expected, configured, "should have expected configured output")
+ })
+ }
+}
diff --git a/x/ibcratelimit/keeper/mocks_test.go b/x/ibcratelimit/keeper/mocks_test.go
new file mode 100644
index 0000000000..0c9569f5ea
--- /dev/null
+++ b/x/ibcratelimit/keeper/mocks_test.go
@@ -0,0 +1,119 @@
+package keeper_test
+
+import (
+ "encoding/json"
+ "strings"
+
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ transfertypes "github.com/cosmos/ibc-go/v6/modules/apps/transfer/types"
+ clienttypes "github.com/cosmos/ibc-go/v6/modules/core/02-client/types"
+ "github.com/cosmos/ibc-go/v6/modules/core/exported"
+ "github.com/provenance-io/provenance/x/ibcratelimit"
+)
+
+// MockPacket is a test struct that implements the PacketI interface.
+type MockPacket struct {
+ data []byte
+ validHeight bool
+}
+
+// NewMockPacket creates a new MockPacket.
+func NewMockPacket(data []byte, validHeight bool) *MockPacket {
+ return &MockPacket{
+ data: data,
+ validHeight: validHeight,
+ }
+}
+
+// GetSequence implements the PacketI interface and always returns 1.
+func (m MockPacket) GetSequence() uint64 {
+ return 1
+}
+
+// GetTimeoutHeight implements the PacketI interface and can return a valid or invalid height.
+func (m MockPacket) GetTimeoutHeight() exported.Height {
+ if !m.validHeight {
+ return nil
+ }
+ return clienttypes.Height{
+ RevisionNumber: 5,
+ RevisionHeight: 5,
+ }
+}
+
+// GetTimeoutTimestamp implements the PacketI interface and always returns 1.
+func (m MockPacket) GetTimeoutTimestamp() uint64 {
+ return 1
+}
+
+// GetSourcePort implements the PacketI interface and always returns "src-port".
+func (m MockPacket) GetSourcePort() string {
+ return "src-port"
+}
+
+// GetSourceChannel implements the PacketI interface and always returns "src-channel".
+func (m MockPacket) GetSourceChannel() string {
+ return "src-channel"
+}
+
+// GetDestPort implements the PacketI interface and always returns "dest-port".
+func (m MockPacket) GetDestPort() string {
+ return "dest-port"
+}
+
+// GetDestChannel implements the PacketI interface and always returns "dest-channel".
+func (m MockPacket) GetDestChannel() string {
+ return "dest-channel"
+}
+
+// GetData implements the PacketI interface and always returns provided data.
+func (m MockPacket) GetData() []byte {
+ return m.data
+}
+
+// ValidateBasic implements the PacketI interface and always returns nil.
+func (m MockPacket) ValidateBasic() error {
+ return nil
+}
+
+// NewMockFungiblePacketData creates a new NewFungibleTokenPacketData for testing.
+func NewMockFungiblePacketData(invalidReceiver bool) transfertypes.FungibleTokenPacketData {
+ data := transfertypes.NewFungibleTokenPacketData(
+ "denom",
+ "500",
+ "sender",
+ "receiver",
+ "memo",
+ )
+ if invalidReceiver {
+ data.Receiver = strings.Repeat("a", 4096)
+ }
+ return data
+}
+
+// NewMockSerializedPacketData creates a new serialized NewFungibleTokenPacketData for testing.
+func NewMockSerializedPacketData() []byte {
+ data := NewMockFungiblePacketData(false)
+ bytes, _ := json.Marshal(data)
+ return bytes
+}
+
+// MockPacket is a test struct that implements the PacketI interface.
+type MockPermissionedKeeper struct {
+ valid bool
+}
+
+// NewMockPermissionedKeeper is a test struct that implements the PermissionedKeeper interface.
+func NewMockPermissionedKeeper(valid bool) *MockPermissionedKeeper {
+ return &MockPermissionedKeeper{
+ valid: valid,
+ }
+}
+
+// GetTimeoutHeight implements the PermissionedKeeper interface and provides a basic error or success message.
+func (m *MockPermissionedKeeper) Sudo(ctx sdk.Context, contractAddress sdk.AccAddress, msg []byte) ([]byte, error) {
+ if !m.valid {
+ return nil, ibcratelimit.ErrRateLimitExceeded
+ }
+ return []byte("success"), nil
+}
diff --git a/x/ibcratelimit/keeper/msg_server.go b/x/ibcratelimit/keeper/msg_server.go
new file mode 100644
index 0000000000..cc2679bbbf
--- /dev/null
+++ b/x/ibcratelimit/keeper/msg_server.go
@@ -0,0 +1,35 @@
+package keeper
+
+import (
+ "context"
+
+ sdk "github.com/cosmos/cosmos-sdk/types"
+
+ "github.com/provenance-io/provenance/x/ibcratelimit"
+)
+
+// MsgServer is an alias for a Keeper that implements the ibcratelimit.MsgServer interface.
+type MsgServer struct {
+ Keeper
+}
+
+func NewMsgServer(k Keeper) ibcratelimit.MsgServer {
+ return MsgServer{
+ Keeper: k,
+ }
+}
+
+var _ ibcratelimit.MsgServer = MsgServer{}
+
+// GovUpdateParams is a governance proposal endpoint for updating the ibcratelimit module's params.
+func (k MsgServer) GovUpdateParams(goCtx context.Context, msg *ibcratelimit.MsgGovUpdateParamsRequest) (*ibcratelimit.MsgGovUpdateParamsResponse, error) {
+ if err := k.ValidateAuthority(msg.Authority); err != nil {
+ return nil, err
+ }
+
+ ctx := sdk.UnwrapSDKContext(goCtx)
+ k.SetParams(ctx, msg.Params)
+ k.emitEvent(ctx, ibcratelimit.NewEventParamsUpdated())
+
+ return &ibcratelimit.MsgGovUpdateParamsResponse{}, nil
+}
diff --git a/x/ibcratelimit/keeper/msg_server_test.go b/x/ibcratelimit/keeper/msg_server_test.go
new file mode 100644
index 0000000000..9a4769f3dc
--- /dev/null
+++ b/x/ibcratelimit/keeper/msg_server_test.go
@@ -0,0 +1,68 @@
+package keeper_test
+
+import (
+ "fmt"
+
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ "github.com/gogo/protobuf/proto"
+ "github.com/provenance-io/provenance/x/ibcratelimit"
+)
+
+func (s *TestSuite) TestGovUpdateParams() {
+ authority := s.app.OracleKeeper.GetAuthority()
+
+ tests := []struct {
+ name string
+ req *ibcratelimit.MsgGovUpdateParamsRequest
+ res *ibcratelimit.MsgGovUpdateParamsResponse
+ event *sdk.Event
+ err string
+ }{
+ {
+ name: "failure - authority does not match module authority",
+ req: &ibcratelimit.MsgGovUpdateParamsRequest{
+ Params: ibcratelimit.NewParams("cosmos1w6t0l7z0yerj49ehnqwqaayxqpe3u7e23edgma"),
+ Authority: "cosmos1w6t0l7z0yerj49ehnqwqaayxqpe3u7e23edgma",
+ },
+ res: nil,
+ err: fmt.Sprintf("expected \"%s\" got \"cosmos1w6t0l7z0yerj49ehnqwqaayxqpe3u7e23edgma\": expected gov account as only signer for proposal message", authority),
+ },
+ {
+ name: "success - rate limiter is updated",
+ req: &ibcratelimit.MsgGovUpdateParamsRequest{
+ Params: ibcratelimit.NewParams("cosmos1w6t0l7z0yerj49ehnqwqaayxqpe3u7e23edgma"),
+ Authority: authority,
+ },
+ res: &ibcratelimit.MsgGovUpdateParamsResponse{},
+ event: typedEventToEvent(ibcratelimit.NewEventParamsUpdated()),
+ },
+ }
+
+ for _, tc := range tests {
+ s.Run(tc.name, func() {
+ res, err := s.msgServer.GovUpdateParams(s.ctx, tc.req)
+ events := s.ctx.EventManager().Events()
+ numEvents := len(events)
+
+ if tc.event != nil {
+ s.Assert().Equal(1, numEvents, "should emit the correct number of events")
+ s.Assert().Equal(*tc.event, events[0], "should emit the correct event")
+ } else {
+ s.Assert().Empty(events, "should not emit events")
+ }
+
+ if len(tc.err) > 0 {
+ s.Assert().Nil(res, "should have nil response")
+ s.Assert().EqualError(err, tc.err, "should have correct error")
+ } else {
+ s.Assert().NoError(err, "should not have error")
+ s.Assert().Equal(tc.res, res, "should have the correct response")
+ }
+ })
+ }
+}
+
+func typedEventToEvent(tev proto.Message) *sdk.Event {
+ event, _ := sdk.TypedEventToEvent(tev)
+ return &event
+}
diff --git a/x/ibcratelimit/keeper/rate_limit.go b/x/ibcratelimit/keeper/rate_limit.go
new file mode 100644
index 0000000000..14f2ab448f
--- /dev/null
+++ b/x/ibcratelimit/keeper/rate_limit.go
@@ -0,0 +1,103 @@
+package keeper
+
+import (
+ "encoding/json"
+
+ errorsmod "cosmossdk.io/errors"
+
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ "github.com/cosmos/ibc-go/v6/modules/core/exported"
+
+ "github.com/provenance-io/provenance/x/ibcratelimit"
+)
+
+// CheckAndUpdateRateLimits Updates the rate limiter and checks if rate limit has been exceeded.
+func (k Keeper) CheckAndUpdateRateLimits(ctx sdk.Context, msgType string, packet exported.PacketI) error {
+ contract := k.GetContractAddress(ctx)
+
+ contractAddr, err := sdk.AccAddressFromBech32(contract)
+ if err != nil {
+ return errorsmod.Wrap(ibcratelimit.ErrContractError, err.Error())
+ }
+
+ sendPacketMsg, err := k.buildWasmExecMsg(msgType, packet)
+ if err != nil {
+ return errorsmod.Wrap(ibcratelimit.ErrContractError, err.Error())
+ }
+
+ _, err = k.PermissionedKeeper.Sudo(ctx, contractAddr, sendPacketMsg)
+ if err != nil {
+ return errorsmod.Wrap(ibcratelimit.ErrRateLimitExceeded, err.Error())
+ }
+
+ return nil
+}
+
+// UndoSendRateLimit Undos the changes made to the rate limiter.
+func (k Keeper) UndoSendRateLimit(ctx sdk.Context, contract string, packet exported.PacketI) error {
+ contractAddr, err := sdk.AccAddressFromBech32(contract)
+ if err != nil {
+ return err
+ }
+
+ unwrapped, err := ibcratelimit.UnwrapPacket(packet)
+ if err != nil {
+ return err
+ }
+
+ msg := ibcratelimit.UndoSendMsg{UndoSend: ibcratelimit.UndoPacketMsg{Packet: unwrapped}}
+ asJSON, err := json.Marshal(msg)
+ if err != nil {
+ return err
+ }
+
+ _, err = k.PermissionedKeeper.Sudo(ctx, contractAddr, asJSON)
+ if err != nil {
+ return errorsmod.Wrap(ibcratelimit.ErrContractError, err.Error())
+ }
+
+ return nil
+}
+
+// buildWasmExecMsg Constructs a Wasm Execute Message from a packet and type.
+func (k Keeper) buildWasmExecMsg(msgType string, packet exported.PacketI) ([]byte, error) {
+ unwrapped, err := ibcratelimit.UnwrapPacket(packet)
+ if err != nil {
+ return []byte{}, err
+ }
+
+ var asJSON []byte
+ switch {
+ case msgType == ibcratelimit.MsgSendPacket:
+ msg := ibcratelimit.SendPacketMsg{SendPacket: ibcratelimit.PacketMsg{
+ Packet: unwrapped,
+ }}
+ asJSON, err = json.Marshal(msg)
+ case msgType == ibcratelimit.MsgRecvPacket:
+ msg := ibcratelimit.RecvPacketMsg{RecvPacket: ibcratelimit.PacketMsg{
+ Packet: unwrapped,
+ }}
+ asJSON, err = json.Marshal(msg)
+ default:
+ return []byte{}, ibcratelimit.ErrBadMessage
+ }
+
+ if err != nil {
+ return []byte{}, err
+ }
+
+ return asJSON, nil
+}
+
+// RevertSentPacket Notifies the contract that a sent packet wasn't properly received.
+func (k Keeper) RevertSentPacket(
+ ctx sdk.Context,
+ packet exported.PacketI,
+) error {
+ if !k.IsContractConfigured(ctx) {
+ return nil
+ }
+
+ contract := k.GetContractAddress(ctx)
+ return k.UndoSendRateLimit(ctx, contract, packet)
+}
diff --git a/x/ibcratelimit/keeper/rate_limit_test.go b/x/ibcratelimit/keeper/rate_limit_test.go
new file mode 100644
index 0000000000..f72fa4b0b5
--- /dev/null
+++ b/x/ibcratelimit/keeper/rate_limit_test.go
@@ -0,0 +1,209 @@
+package keeper_test
+
+import (
+ "github.com/cosmos/ibc-go/v6/modules/core/exported"
+ "github.com/provenance-io/provenance/x/ibcratelimit"
+)
+
+func (s *TestSuite) TestCheckAndUpdateRateLimits() {
+ tests := []struct {
+ name string
+ contract string
+ msgType string
+ packet exported.PacketI
+ err string
+ mockKeeper *MockPermissionedKeeper
+ }{
+ {
+ name: "success - rate limit checked and updated on send",
+ contract: "cosmos1w6t0l7z0yerj49ehnqwqaayxqpe3u7e23edgma",
+ msgType: ibcratelimit.MsgSendPacket,
+ packet: NewMockPacket(NewMockSerializedPacketData(), true),
+ mockKeeper: NewMockPermissionedKeeper(true),
+ },
+ {
+ name: "success - rate limit checked and updated on recv",
+ contract: "cosmos1w6t0l7z0yerj49ehnqwqaayxqpe3u7e23edgma",
+ msgType: ibcratelimit.MsgRecvPacket,
+ packet: NewMockPacket(NewMockSerializedPacketData(), true),
+ mockKeeper: NewMockPermissionedKeeper(true),
+ },
+ {
+ name: "failure - an invalid contract throws error",
+ contract: "",
+ msgType: ibcratelimit.MsgSendPacket,
+ packet: nil,
+ err: "empty address string is not allowed: contract error",
+ },
+ {
+ name: "failure - throws error on bad packet",
+ contract: "cosmos1w6t0l7z0yerj49ehnqwqaayxqpe3u7e23edgma",
+ msgType: ibcratelimit.MsgSendPacket,
+ packet: NewMockPacket(NewMockSerializedPacketData(), false),
+ err: "bad message: contract error",
+ },
+ {
+ name: "failure - throws error on invalid message type",
+ contract: "cosmos1w6t0l7z0yerj49ehnqwqaayxqpe3u7e23edgma",
+ msgType: "bad message type",
+ packet: NewMockPacket(NewMockSerializedPacketData(), true),
+ err: "bad message: contract error",
+ },
+ {
+ name: "failure - throws error on bad packet data",
+ contract: "cosmos1w6t0l7z0yerj49ehnqwqaayxqpe3u7e23edgma",
+ msgType: ibcratelimit.MsgSendPacket,
+ packet: NewMockPacket([]byte("badpacketdata"), true),
+ err: "invalid character 'b' looking for beginning of value: contract error",
+ },
+ {
+ name: "failure - throws error on nil packet",
+ contract: "cosmos1w6t0l7z0yerj49ehnqwqaayxqpe3u7e23edgma",
+ msgType: ibcratelimit.MsgRecvPacket,
+ packet: nil,
+ mockKeeper: NewMockPermissionedKeeper(true),
+ err: "bad message: contract error",
+ },
+ {
+ name: "failure - throws error on bad contract operation",
+ contract: "cosmos1w6t0l7z0yerj49ehnqwqaayxqpe3u7e23edgma",
+ msgType: ibcratelimit.MsgSendPacket,
+ packet: NewMockPacket(NewMockSerializedPacketData(), true),
+ mockKeeper: NewMockPermissionedKeeper(false),
+ err: "rate limit exceeded: rate limit exceeded",
+ },
+ }
+
+ for _, tc := range tests {
+ s.Run(tc.name, func() {
+ permissionedKeeper := s.app.RateLimitingKeeper.PermissionedKeeper
+ s.app.RateLimitingKeeper.SetParams(s.ctx, ibcratelimit.NewParams(tc.contract))
+ if tc.mockKeeper != nil {
+ s.app.RateLimitingKeeper.PermissionedKeeper = tc.mockKeeper
+ }
+ err := s.app.RateLimitingKeeper.CheckAndUpdateRateLimits(s.ctx, tc.msgType, tc.packet)
+ if len(tc.err) > 0 {
+ s.Assert().EqualError(err, tc.err, "should return the correct error")
+ } else {
+ s.Assert().NoError(err)
+ }
+
+ s.app.RateLimitingKeeper.PermissionedKeeper = permissionedKeeper
+ })
+ }
+}
+
+func (s *TestSuite) TestUndoSendRateLimit() {
+ tests := []struct {
+ name string
+ contract string
+ packet exported.PacketI
+ err string
+ mockKeeper *MockPermissionedKeeper
+ }{
+ {
+ name: "success - undo rate limit",
+ contract: "cosmos1w6t0l7z0yerj49ehnqwqaayxqpe3u7e23edgma",
+ packet: NewMockPacket(NewMockSerializedPacketData(), true),
+ mockKeeper: NewMockPermissionedKeeper(true),
+ },
+ {
+ name: "failure - an invalid contract throws error",
+ contract: "",
+ packet: NewMockPacket(NewMockSerializedPacketData(), false),
+ err: "empty address string is not allowed",
+ },
+ {
+ name: "failure - throws error on bad packet",
+ contract: "cosmos1w6t0l7z0yerj49ehnqwqaayxqpe3u7e23edgma",
+ packet: NewMockPacket(NewMockSerializedPacketData(), false),
+ err: "bad message",
+ },
+ {
+ name: "failure - throws error on nil packet",
+ contract: "cosmos1w6t0l7z0yerj49ehnqwqaayxqpe3u7e23edgma",
+ packet: NewMockPacket(NewMockSerializedPacketData(), false),
+ err: "bad message",
+ },
+ {
+ name: "failure - throws error on bad contract operation",
+ contract: "cosmos1w6t0l7z0yerj49ehnqwqaayxqpe3u7e23edgma",
+ packet: NewMockPacket(NewMockSerializedPacketData(), true),
+ mockKeeper: NewMockPermissionedKeeper(false),
+ err: "rate limit exceeded: contract error",
+ },
+ }
+
+ for _, tc := range tests {
+ s.Run(tc.name, func() {
+ permissionedKeeper := s.app.RateLimitingKeeper.PermissionedKeeper
+ if tc.mockKeeper != nil {
+ s.app.RateLimitingKeeper.PermissionedKeeper = tc.mockKeeper
+ }
+
+ err := s.app.RateLimitingKeeper.UndoSendRateLimit(s.ctx, tc.contract, tc.packet)
+ if len(tc.err) > 0 {
+ s.Assert().EqualError(err, tc.err, "should return the correct error")
+ } else {
+ s.Assert().NoError(err)
+ }
+
+ s.app.RateLimitingKeeper.PermissionedKeeper = permissionedKeeper
+ })
+ }
+}
+
+func (s *TestSuite) TestRevertSentPacket() {
+ tests := []struct {
+ name string
+ contract string
+ packet exported.PacketI
+ mockKeeper *MockPermissionedKeeper
+ err string
+ }{
+ {
+ name: "success - reverts a sent packet",
+ contract: "cosmos1w6t0l7z0yerj49ehnqwqaayxqpe3u7e23edgma",
+ packet: NewMockPacket(NewMockSerializedPacketData(), true),
+ mockKeeper: NewMockPermissionedKeeper(true),
+ },
+ {
+ name: "failure - can handle unconfigured contract",
+ contract: "",
+ packet: NewMockPacket(NewMockSerializedPacketData(), true),
+ mockKeeper: NewMockPermissionedKeeper(true),
+ },
+ {
+ name: "failure - can handle nil packet",
+ contract: "cosmos1w6t0l7z0yerj49ehnqwqaayxqpe3u7e23edgma",
+ packet: nil,
+ mockKeeper: NewMockPermissionedKeeper(true),
+ err: "bad message",
+ },
+ {
+ name: "failure - throws error on bad packet",
+ contract: "cosmos1w6t0l7z0yerj49ehnqwqaayxqpe3u7e23edgma",
+ packet: NewMockPacket(NewMockSerializedPacketData(), false),
+ err: "bad message",
+ },
+ }
+
+ for _, tc := range tests {
+ s.Run(tc.name, func() {
+ permissionedKeeper := s.app.RateLimitingKeeper.PermissionedKeeper
+ s.app.RateLimitingKeeper.SetParams(s.ctx, ibcratelimit.NewParams(tc.contract))
+ if tc.mockKeeper != nil {
+ s.app.RateLimitingKeeper.PermissionedKeeper = tc.mockKeeper
+ }
+
+ err := s.app.RateLimitingKeeper.RevertSentPacket(s.ctx, tc.packet)
+ if len(tc.err) > 0 {
+ s.Assert().EqualError(err, tc.err, "should return the correct error")
+ } else {
+ s.Assert().NoError(err)
+ }
+
+ s.app.RateLimitingKeeper.PermissionedKeeper = permissionedKeeper
+ })
+ }
+}
diff --git a/x/ibcratelimit/keys.go b/x/ibcratelimit/keys.go
new file mode 100644
index 0000000000..1845708497
--- /dev/null
+++ b/x/ibcratelimit/keys.go
@@ -0,0 +1,14 @@
+package ibcratelimit
+
+const (
+ // ModuleName defines the module name
+ ModuleName = "ratelimitedibc"
+
+ // StoreKey defines the primary module store key
+ StoreKey = ModuleName
+)
+
+var (
+ // ParamsKey is the key to obtain the module's params.
+ ParamsKey = []byte{0x01}
+)
diff --git a/x/ibcratelimit/mocks_test.go b/x/ibcratelimit/mocks_test.go
new file mode 100644
index 0000000000..93a3c39416
--- /dev/null
+++ b/x/ibcratelimit/mocks_test.go
@@ -0,0 +1,89 @@
+package ibcratelimit_test
+
+import (
+ "strings"
+
+ transfertypes "github.com/cosmos/ibc-go/v6/modules/apps/transfer/types"
+ clienttypes "github.com/cosmos/ibc-go/v6/modules/core/02-client/types"
+ "github.com/cosmos/ibc-go/v6/modules/core/exported"
+)
+
+// MockPacket is a test struct that implements the PacketI interface.
+type MockPacket struct {
+ data []byte
+ validHeight bool
+}
+
+// NewMockPacket creates a new MockPacket.
+func NewMockPacket(data []byte, validHeight bool) *MockPacket {
+ return &MockPacket{
+ data: data,
+ validHeight: validHeight,
+ }
+}
+
+// GetSequence implements the PacketI interface and always returns 1.
+func (m MockPacket) GetSequence() uint64 {
+ return 1
+}
+
+// GetTimeoutHeight implements the PacketI interface and can return a valid or invalid height.
+func (m MockPacket) GetTimeoutHeight() exported.Height {
+ if !m.validHeight {
+ return nil
+ }
+ return clienttypes.Height{
+ RevisionNumber: 5,
+ RevisionHeight: 5,
+ }
+}
+
+// GetTimeoutTimestamp implements the PacketI interface and always returns 1.
+func (m MockPacket) GetTimeoutTimestamp() uint64 {
+ return 1
+}
+
+// GetSourcePort implements the PacketI interface and always returns "src-port".
+func (m MockPacket) GetSourcePort() string {
+ return "src-port"
+}
+
+// GetSourceChannel implements the PacketI interface and always returns "src-channel".
+func (m MockPacket) GetSourceChannel() string {
+ return "src-channel"
+}
+
+// GetDestPort implements the PacketI interface and always returns "dest-port".
+func (m MockPacket) GetDestPort() string {
+ return "dest-port"
+}
+
+// GetDestChannel implements the PacketI interface and always returns "dest-channel".
+func (m MockPacket) GetDestChannel() string {
+ return "dest-channel"
+}
+
+// GetData implements the PacketI interface and always returns provided data.
+func (m MockPacket) GetData() []byte {
+ return m.data
+}
+
+// ValidateBasic implements the PacketI interface and always returns nil.
+func (m MockPacket) ValidateBasic() error {
+ return nil
+}
+
+// NewMockFungiblePacketData creates a new NewFungibleTokenPacketData for testing.
+func NewMockFungiblePacketData(invalidReceiver bool) transfertypes.FungibleTokenPacketData {
+ data := transfertypes.NewFungibleTokenPacketData(
+ "denom",
+ "500",
+ "sender",
+ "receiver",
+ "memo",
+ )
+ if invalidReceiver {
+ data.Receiver = strings.Repeat("a", 4096)
+ }
+ return data
+}
diff --git a/x/ibcratelimit/module/ibc_middleware.go b/x/ibcratelimit/module/ibc_middleware.go
new file mode 100644
index 0000000000..d67e5dc18a
--- /dev/null
+++ b/x/ibcratelimit/module/ibc_middleware.go
@@ -0,0 +1,253 @@
+package module
+
+import (
+ "encoding/json"
+
+ errorsmod "cosmossdk.io/errors"
+
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
+ capabilitytypes "github.com/cosmos/cosmos-sdk/x/capability/types"
+ clienttypes "github.com/cosmos/ibc-go/v6/modules/core/02-client/types"
+ channeltypes "github.com/cosmos/ibc-go/v6/modules/core/04-channel/types"
+ porttypes "github.com/cosmos/ibc-go/v6/modules/core/05-port/types"
+ "github.com/cosmos/ibc-go/v6/modules/core/exported"
+
+ "github.com/provenance-io/provenance/internal/ibc"
+ "github.com/provenance-io/provenance/x/ibcratelimit"
+ "github.com/provenance-io/provenance/x/ibcratelimit/keeper"
+)
+
+var (
+ _ porttypes.Middleware = &IBCMiddleware{}
+)
+
+// IBCMiddleware is the middleware used by the module.
+type IBCMiddleware struct {
+ app porttypes.IBCModule
+ keeper *keeper.Keeper
+ channel porttypes.ICS4Wrapper
+}
+
+// NewIBCMiddleware Creates a new IBCMiddleware.
+func NewIBCMiddleware(app porttypes.IBCModule,
+ channel porttypes.ICS4Wrapper,
+ keeper *keeper.Keeper) IBCMiddleware {
+ return IBCMiddleware{
+ app: app,
+ keeper: keeper,
+ channel: channel,
+ }
+}
+
+// WithIBCModule Updates the Middleware's baseapp and returns it.
+func (im *IBCMiddleware) WithIBCModule(app porttypes.IBCModule) *IBCMiddleware {
+ im.app = app
+ return im
+}
+
+// OnChanOpenInit implements the IBCModule interface
+func (im *IBCMiddleware) OnChanOpenInit(ctx sdk.Context,
+ order channeltypes.Order,
+ connectionHops []string,
+ portID string,
+ channelID string,
+ channelCap *capabilitytypes.Capability,
+ counterparty channeltypes.Counterparty,
+ version string,
+) (string, error) {
+ return im.app.OnChanOpenInit(
+ ctx,
+ order,
+ connectionHops,
+ portID,
+ channelID,
+ channelCap,
+ counterparty,
+ version,
+ )
+}
+
+// OnChanOpenTry implements the IBCModule interface
+func (im *IBCMiddleware) OnChanOpenTry(
+ ctx sdk.Context,
+ order channeltypes.Order,
+ connectionHops []string,
+ portID,
+ channelID string,
+ channelCap *capabilitytypes.Capability,
+ counterparty channeltypes.Counterparty,
+ counterpartyVersion string,
+) (string, error) {
+ return im.app.OnChanOpenTry(ctx, order, connectionHops, portID, channelID, channelCap, counterparty, counterpartyVersion)
+}
+
+// OnChanOpenAck implements the IBCModule interface
+func (im *IBCMiddleware) OnChanOpenAck(
+ ctx sdk.Context,
+ portID,
+ channelID string,
+ counterpartyChannelID string,
+ counterpartyVersion string,
+) error {
+ // Here we can add initial limits when a new channel is open. For now, they can be added manually on the contract
+ return im.app.OnChanOpenAck(ctx, portID, channelID, counterpartyChannelID, counterpartyVersion)
+}
+
+// OnChanOpenConfirm implements the IBCModule interface
+func (im *IBCMiddleware) OnChanOpenConfirm(
+ ctx sdk.Context,
+ portID,
+ channelID string,
+) error {
+ // Here we can add initial limits when a new channel is open. For now, they can be added manually on the contract
+ return im.app.OnChanOpenConfirm(ctx, portID, channelID)
+}
+
+// OnChanCloseInit implements the IBCModule interface
+func (im *IBCMiddleware) OnChanCloseInit(
+ ctx sdk.Context,
+ portID,
+ channelID string,
+) error {
+ // Here we can remove the limits when a new channel is closed. For now, they can remove them manually on the contract
+ return im.app.OnChanCloseInit(ctx, portID, channelID)
+}
+
+// OnChanCloseConfirm implements the IBCModule interface
+func (im *IBCMiddleware) OnChanCloseConfirm(
+ ctx sdk.Context,
+ portID,
+ channelID string,
+) error {
+ // Here we can remove the limits when a new channel is closed. For now, they can remove them manually on the contract
+ return im.app.OnChanCloseConfirm(ctx, portID, channelID)
+}
+
+// OnRecvPacket implements the IBCModule interface
+func (im *IBCMiddleware) OnRecvPacket(
+ ctx sdk.Context,
+ packet channeltypes.Packet,
+ relayer sdk.AccAddress,
+) exported.Acknowledgement {
+ if err := ibcratelimit.ValidateReceiverAddress(packet); err != nil {
+ return ibc.NewEmitErrorAcknowledgement(ctx, ibcratelimit.ErrBadMessage, err.Error())
+ }
+
+ if !im.keeper.IsContractConfigured(ctx) {
+ // The contract has not been configured. Continue as usual
+ return im.app.OnRecvPacket(ctx, packet, relayer)
+ }
+
+ err := im.keeper.CheckAndUpdateRateLimits(ctx, "recv_packet", packet)
+ if err != nil {
+ return ibc.NewEmitErrorAcknowledgement(ctx, err)
+ }
+
+ // if this returns an Acknowledgement that isn't successful, all state changes are discarded
+ return im.app.OnRecvPacket(ctx, packet, relayer)
+}
+
+// OnAcknowledgementPacket implements the IBCModule interface
+func (im *IBCMiddleware) OnAcknowledgementPacket(
+ ctx sdk.Context,
+ packet channeltypes.Packet,
+ acknowledgement []byte,
+ relayer sdk.AccAddress,
+) error {
+ var ack channeltypes.Acknowledgement
+ if err := json.Unmarshal(acknowledgement, &ack); err != nil {
+ return errorsmod.Wrapf(sdkerrors.ErrUnknownRequest, "cannot unmarshal ICS-20 transfer packet acknowledgement: %v", err)
+ }
+
+ if ibc.IsAckError(acknowledgement) {
+ err := im.keeper.RevertSentPacket(ctx, packet) // If there is an error here we should still handle the ack
+ if err != nil {
+ eventError := ctx.EventManager().EmitTypedEvent(ibcratelimit.NewEventAckRevertFailure(
+ ibcratelimit.ModuleName,
+ string(packet.GetData()),
+ string(acknowledgement),
+ ))
+ if eventError != nil {
+ ctx.Logger().Error("unable to emit AckRevertFailure event", "err", eventError)
+ }
+ }
+ }
+
+ return im.app.OnAcknowledgementPacket(ctx, packet, acknowledgement, relayer)
+}
+
+// OnTimeoutPacket implements the IBCModule interface
+func (im *IBCMiddleware) OnTimeoutPacket(
+ ctx sdk.Context,
+ packet channeltypes.Packet,
+ relayer sdk.AccAddress,
+) error {
+ err := im.keeper.RevertSentPacket(ctx, packet) // If there is an error here we should still handle the timeout
+ if err != nil {
+ eventError := ctx.EventManager().EmitTypedEvent(ibcratelimit.NewEventTimeoutRevertFailure(
+ ibcratelimit.ModuleName,
+ string(packet.GetData()),
+ ))
+ if eventError != nil {
+ ctx.Logger().Error("unable to emit TimeoutRevertFailure event", "err", eventError)
+ }
+ }
+ return im.app.OnTimeoutPacket(ctx, packet, relayer)
+}
+
+// SendPacket implements the ICS4 interface and is called when sending packets.
+// This method retrieves the contract from the middleware's parameters and checks if the limits have been exceeded for
+// the current transfer, in which case it returns an error preventing the IBC send from taking place.
+// If the contract param is not configured, or the contract doesn't have a configuration for the (channel+denom) being
+// used, transfers are not prevented and handled by the wrapped IBC app
+func (im *IBCMiddleware) SendPacket(
+ ctx sdk.Context,
+ chanCap *capabilitytypes.Capability,
+ sourcePort string,
+ sourceChannel string,
+ timeoutHeight clienttypes.Height,
+ timeoutTimestamp uint64,
+ data []byte,
+) (sequence uint64, err error) {
+ if !im.keeper.IsContractConfigured(ctx) {
+ // The contract has not been configured. Continue as usual
+ return im.channel.SendPacket(ctx, chanCap, sourcePort, sourceChannel, timeoutHeight, timeoutTimestamp, data)
+ }
+
+ // We need the full packet so the contract can process it. If it can't be cast to a channeltypes.Packet, this
+ // should fail. The only reason that would happen is if another middleware is modifying the packet, though. In
+ // that case we can modify the middleware order or change this cast so we have all the data we need.
+ packet := channeltypes.NewPacket(
+ data,
+ sequence,
+ sourcePort,
+ sourceChannel,
+ "",
+ "",
+ timeoutHeight,
+ timeoutTimestamp,
+ )
+
+ err = im.keeper.CheckAndUpdateRateLimits(ctx, "send_packet", packet)
+ if err != nil {
+ return 0, errorsmod.Wrap(err, "rate limit SendPacket failed to authorize transfer")
+ }
+
+ return im.channel.SendPacket(ctx, chanCap, sourcePort, sourceChannel, timeoutHeight, timeoutTimestamp, data)
+}
+
+// WriteAcknowledgement implements the ICS4 Wrapper interface
+func (im *IBCMiddleware) WriteAcknowledgement(
+ ctx sdk.Context,
+ chanCap *capabilitytypes.Capability,
+ packet exported.PacketI,
+ ack exported.Acknowledgement,
+) error {
+ return im.channel.WriteAcknowledgement(ctx, chanCap, packet, ack)
+}
+
+// GetAppVersion Obtains the version of the ICS4 Wrapper.
+func (im *IBCMiddleware) GetAppVersion(ctx sdk.Context, portID, channelID string) (string, bool) {
+ return im.channel.GetAppVersion(ctx, portID, channelID)
+}
diff --git a/x/ibcratelimit/module/ibc_middleware_test.go b/x/ibcratelimit/module/ibc_middleware_test.go
new file mode 100644
index 0000000000..2bf5464d98
--- /dev/null
+++ b/x/ibcratelimit/module/ibc_middleware_test.go
@@ -0,0 +1,616 @@
+package module_test
+
+import (
+ "encoding/json"
+ "fmt"
+ "os"
+ "strconv"
+ "strings"
+ "testing"
+ "time"
+
+ bankkeeper "github.com/cosmos/cosmos-sdk/x/bank/keeper"
+ "golang.org/x/exp/slices"
+
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ transfertypes "github.com/cosmos/ibc-go/v6/modules/apps/transfer/types"
+ clienttypes "github.com/cosmos/ibc-go/v6/modules/core/02-client/types"
+ ibctesting "github.com/cosmos/ibc-go/v6/testing"
+ "github.com/stretchr/testify/suite"
+
+ sdkmath "cosmossdk.io/math"
+ sdksim "github.com/cosmos/cosmos-sdk/simapp"
+ govtypes "github.com/cosmos/cosmos-sdk/x/gov/types"
+
+ "github.com/provenance-io/provenance/app"
+ "github.com/provenance-io/provenance/internal/pioconfig"
+ testutil "github.com/provenance-io/provenance/testutil/ibc"
+ "github.com/provenance-io/provenance/x/ibcratelimit"
+
+ "github.com/tendermint/tendermint/libs/log"
+ dbm "github.com/tendermint/tm-db"
+)
+
+type MiddlewareTestSuite struct {
+ suite.Suite
+
+ *app.App
+ Ctx sdk.Context
+
+ coordinator *ibctesting.Coordinator
+
+ // testing chains used for convenience and readability
+ chainA *testutil.TestChain
+ chainB *testutil.TestChain
+ path *ibctesting.Path
+}
+
+// Setup
+func TestMiddlewareTestSuite(t *testing.T) {
+ suite.Run(t, new(MiddlewareTestSuite))
+}
+
+func SetupSimApp() (ibctesting.TestingApp, map[string]json.RawMessage) {
+ pioconfig.SetProvenanceConfig(sdk.DefaultBondDenom, 0)
+ db := dbm.NewMemDB()
+ encCdc := app.MakeEncodingConfig()
+ provenanceApp := app.New(log.NewNopLogger(), db, nil, true, map[int64]bool{}, app.DefaultNodeHome, 5, encCdc, sdksim.EmptyAppOptions{})
+ genesis := app.NewDefaultGenesisState(encCdc.Marshaler)
+ return provenanceApp, genesis
+}
+
+func (suite *MiddlewareTestSuite) SetupTest() {
+ SkipIfWSL(suite.T())
+ ibctesting.DefaultTestingAppInit = SetupSimApp
+ suite.coordinator = ibctesting.NewCoordinator(suite.T(), 2)
+
+ suite.chainA = &testutil.TestChain{
+ TestChain: suite.coordinator.GetChain(ibctesting.GetChainID(1)),
+ }
+ suite.chainB = &testutil.TestChain{
+ TestChain: suite.coordinator.GetChain(ibctesting.GetChainID(2)),
+ }
+ suite.path = NewTransferPath(suite.chainA, suite.chainB)
+ suite.coordinator.Setup(suite.path)
+
+ params := suite.chainA.GetProvenanceApp().MintKeeper.GetParams(suite.chainA.GetContext())
+ params.InflationMax = sdk.NewDec(0)
+ params.InflationRateChange = sdk.NewDec(1)
+ params.InflationMin = sdk.NewDec(0)
+ suite.chainA.GetProvenanceApp().MintKeeper.SetParams(suite.chainA.GetContext(), params)
+ suite.chainB.GetProvenanceApp().MintKeeper.SetParams(suite.chainB.GetContext(), params)
+}
+
+// MessageFromAToB sends a message from chain A to chain B.
+func (suite *MiddlewareTestSuite) MessageFromAToB(denom string, amount sdkmath.Int) sdk.Msg {
+ coin := sdk.NewCoin(denom, amount)
+ port := suite.path.EndpointA.ChannelConfig.PortID
+ channel := suite.path.EndpointA.ChannelID
+ accountFrom := suite.chainA.SenderAccount.GetAddress().String()
+ accountTo := suite.chainB.SenderAccount.GetAddress().String()
+ timeoutHeight := clienttypes.NewHeight(0, 100)
+ memo := ""
+ return transfertypes.NewMsgTransfer(
+ port,
+ channel,
+ coin,
+ accountFrom,
+ accountTo,
+ timeoutHeight,
+ 0,
+ memo,
+ )
+}
+
+// MessageFromAToB sends a message from chain B to chain A.
+func (suite *MiddlewareTestSuite) MessageFromBToA(denom string, amount sdkmath.Int) sdk.Msg {
+ coin := sdk.NewCoin(denom, amount)
+ port := suite.path.EndpointB.ChannelConfig.PortID
+ channel := suite.path.EndpointB.ChannelID
+ accountFrom := suite.chainB.SenderAccount.GetAddress().String()
+ accountTo := suite.chainA.SenderAccount.GetAddress().String()
+ timeoutHeight := clienttypes.NewHeight(0, 100)
+ memo := ""
+ return transfertypes.NewMsgTransfer(
+ port,
+ channel,
+ coin,
+ accountFrom,
+ accountTo,
+ timeoutHeight,
+ 0,
+ memo,
+ )
+}
+
+// Tests that a receiver address longer than 4096 is not accepted
+func (suite *MiddlewareTestSuite) TestInvalidReceiver() {
+ msg := transfertypes.NewMsgTransfer(
+ suite.path.EndpointB.ChannelConfig.PortID,
+ suite.path.EndpointB.ChannelID,
+ sdk.NewCoin(sdk.DefaultBondDenom, sdkmath.NewInt(1)),
+ suite.chainB.SenderAccount.GetAddress().String(),
+ strings.Repeat("x", 4097),
+ clienttypes.NewHeight(0, 100),
+ 0,
+ "",
+ )
+ _, ack, _ := suite.FullSendBToA(msg)
+ suite.Assert().Contains(ack, "error",
+ "acknowledgment is not an error")
+ suite.Assert().Contains(ack, fmt.Sprintf("ABCI code: %d", ibcratelimit.ErrBadMessage.ABCICode()),
+ "acknowledgment error is not of the right type")
+}
+
+// FullSendBToA does the entire logic from sending a message from chain B to chain A.
+func (suite *MiddlewareTestSuite) FullSendBToA(msg sdk.Msg) (*sdk.Result, string, error) {
+ sendResult, err := suite.chainB.SendMsgsNoCheck(msg)
+ suite.Assert().NoError(err)
+
+ packet, err := ibctesting.ParsePacketFromEvents(sendResult.GetEvents())
+ suite.Assert().NoError(err)
+
+ err = suite.path.EndpointA.UpdateClient()
+ suite.Assert().NoError(err)
+
+ res, err := suite.path.EndpointA.RecvPacketWithResult(packet)
+ suite.Assert().NoError(err)
+
+ ack, err := ibctesting.ParseAckFromEvents(res.GetEvents())
+ suite.Assert().NoError(err)
+
+ err = suite.path.EndpointA.UpdateClient()
+ suite.Assert().NoError(err)
+ err = suite.path.EndpointB.UpdateClient()
+ suite.Assert().NoError(err)
+
+ return sendResult, string(ack), err
+}
+
+// FullSendAToB does the entire logic from sending a message from chain A to chain B.
+func (suite *MiddlewareTestSuite) FullSendAToB(msg sdk.Msg) (*sdk.Result, string, error) {
+ sendResult, err := suite.chainA.SendMsgsNoCheck(msg)
+ if err != nil {
+ return nil, "", err
+ }
+
+ packet, err := ibctesting.ParsePacketFromEvents(sendResult.GetEvents())
+ if err != nil {
+ return nil, "", err
+ }
+
+ err = suite.path.EndpointB.UpdateClient()
+ if err != nil {
+ return nil, "", err
+ }
+
+ res, err := suite.path.EndpointB.RecvPacketWithResult(packet)
+ if err != nil {
+ return nil, "", err
+ }
+
+ ack, err := ibctesting.ParseAckFromEvents(res.GetEvents())
+ if err != nil {
+ return nil, "", err
+ }
+
+ err = suite.path.EndpointA.UpdateClient()
+ if err != nil {
+ return nil, "", err
+ }
+ err = suite.path.EndpointB.UpdateClient()
+ if err != nil {
+ return nil, "", err
+ }
+
+ return sendResult, string(ack), nil
+}
+
+// AssertSend checks that a receive on A from B was successful.
+func (suite *MiddlewareTestSuite) AssertReceive(success bool, msg sdk.Msg) (string, error) {
+ _, ack, err := suite.FullSendBToA(msg)
+ if success {
+ suite.Assert().NoError(err)
+ suite.Assert().NotContains(ack, "error",
+ "acknowledgment is an error")
+ } else {
+ suite.Assert().Contains(ack, "error",
+ "acknowledgment is not an error")
+ suite.Assert().Contains(ack, fmt.Sprintf("ABCI code: %d", ibcratelimit.ErrRateLimitExceeded.ABCICode()),
+ "acknowledgment error is not of the right type")
+ }
+ return ack, err
+}
+
+// AssertSend checks that a send from A to B was successful.
+func (suite *MiddlewareTestSuite) AssertSend(success bool, msg sdk.Msg) (*sdk.Result, error) {
+ r, _, err := suite.FullSendAToB(msg)
+ if success {
+ suite.Assert().NoError(err, "IBC send failed. Expected success. %s", err)
+ } else {
+ suite.Assert().Error(err, "IBC send succeeded. Expected failure")
+ suite.ErrorContains(err, ibcratelimit.ErrRateLimitExceeded.Error(), "Bad error type")
+ }
+ return r, err
+}
+
+// BuildChannelQuota creates a quota message.
+func (suite *MiddlewareTestSuite) BuildChannelQuota(name, channel, denom string, duration, send_percentage, recv_percentage uint32) string {
+ return fmt.Sprintf(`
+ {"channel_id": "%s", "denom": "%s", "quotas": [{"name":"%s", "duration": %d, "send_recv":[%d, %d]}] }
+ `, channel, denom, name, duration, send_percentage, recv_percentage)
+}
+
+// Tests
+
+// Test that Sending IBC messages works when the middleware isn't configured
+func (suite *MiddlewareTestSuite) TestSendTransferNoContract() {
+ one := sdkmath.NewInt(1)
+ _, err := suite.AssertSend(true, suite.MessageFromAToB(sdk.DefaultBondDenom, one))
+ suite.Assert().NoError(err)
+}
+
+// Test that Receiving IBC messages works when the middleware isn't configured
+func (suite *MiddlewareTestSuite) TestReceiveTransferNoContract() {
+ one := sdkmath.NewInt(1)
+ _, err := suite.AssertReceive(true, suite.MessageFromBToA(sdk.DefaultBondDenom, one))
+ suite.Assert().NoError(err)
+}
+
+// initializeEscrow sets up the escrow on the chain.
+func (suite *MiddlewareTestSuite) initializeEscrow() (totalEscrow, expectedSed sdk.Int) {
+ provenanceApp := suite.chainA.GetProvenanceApp()
+ supply := provenanceApp.BankKeeper.GetSupply(suite.chainA.GetContext(), sdk.DefaultBondDenom)
+
+ // Move some funds from chainA to chainB so that there is something in escrow
+ // Each user has 10% of the supply, so we send most of the funds from one user to chainA
+ transferAmount := supply.Amount.QuoRaw(20)
+
+ // When sending, the amount we're sending goes into escrow before we enter the middleware and thus
+ // it's used as part of the channel value in the rate limiting contract
+ // To account for that, we subtract the amount we'll send first (2.5% of transferAmount) here
+ sendAmount := transferAmount.QuoRaw(40)
+
+ // Send from A to B
+ _, _, err := suite.FullSendAToB(suite.MessageFromAToB(sdk.DefaultBondDenom, transferAmount.Sub(sendAmount)))
+ suite.Assert().NoError(err)
+ // Send from A to B
+ _, _, err = suite.FullSendBToA(suite.MessageFromBToA(sdk.DefaultBondDenom, transferAmount.Sub(sendAmount)))
+ suite.Assert().NoError(err)
+
+ return transferAmount, sendAmount
+}
+
+func (suite *MiddlewareTestSuite) fullSendTest(native bool) map[string]string {
+ quotaPercentage := 5
+ suite.initializeEscrow()
+ // Get the denom and amount to send
+ denom := sdk.DefaultBondDenom
+ channel := "channel-0"
+ if !native {
+ denomTrace := transfertypes.ParseDenomTrace(transfertypes.GetPrefixedDenom("transfer", "channel-0", denom))
+ fmt.Println(denomTrace)
+ denom = denomTrace.IBCDenom()
+ }
+
+ provenanceApp := suite.chainA.GetProvenanceApp()
+
+ // This is the first one. Inside the tests. It works as expected.
+ channelValue := CalculateChannelValue(suite.chainA.GetContext(), denom, provenanceApp.BankKeeper)
+
+ // The amount to be sent is send 2.5% (quota is 5%)
+ quota := channelValue.QuoRaw(int64(100 / quotaPercentage))
+ sendAmount := quota.QuoRaw(2)
+
+ fmt.Printf("Testing send rate limiting for denom=%s, channelValue=%s, quota=%s, sendAmount=%s\n", denom, channelValue, quota, sendAmount)
+
+ // Setup contract
+ suite.chainA.StoreContractRateLimiterDirect(&suite.Suite)
+ quotas := suite.BuildChannelQuota("weekly", channel, denom, 604800, 5, 5)
+ fmt.Println(quotas)
+ initMsg := CreateRateLimiterInitMessage(suite.chainA, quotas)
+ addr := suite.chainA.InstantiateContract(&suite.Suite, initMsg, 1)
+ suite.chainA.RegisterRateLimiterContract(&suite.Suite, addr)
+
+ // send 2.5% (quota is 5%)
+ fmt.Printf("Sending %s from A to B. Represented in chain A as wrapped? %v\n", denom, !native)
+ _, err := suite.AssertSend(true, suite.MessageFromAToB(denom, sendAmount))
+ suite.Assert().NoError(err)
+
+ // send 2.5% (quota is 5%)
+ fmt.Println("trying to send ", sendAmount)
+ r, _ := suite.AssertSend(true, suite.MessageFromAToB(denom, sendAmount))
+
+ // Calculate remaining allowance in the quota
+ attrs := ExtractAttributes(FindEvent(r.GetEvents(), "wasm"))
+
+ used, ok := sdkmath.NewIntFromString(attrs["weekly_used_out"])
+ suite.Assert().True(ok)
+
+ suite.Assert().Equal(used, sendAmount.MulRaw(2))
+
+ // Sending above the quota should fail. We use 2 instead of 1 here to avoid rounding issues
+ _, err = suite.AssertSend(false, suite.MessageFromAToB(denom, sdkmath.NewInt(2)))
+ suite.Assert().Error(err)
+ return attrs
+}
+
+// Test rate limiting on sends
+func (suite *MiddlewareTestSuite) TestSendTransferWithRateLimitingNative() {
+ // Sends denom=stake from A->B. Rate limit receives "stake" in the packet. Nothing to do in the contract
+ suite.fullSendTest(true)
+}
+
+// Test rate limiting on sends
+func (suite *MiddlewareTestSuite) TestSendTransferWithRateLimitingNonNative() {
+ // Sends denom=ibc/C053D637CCA2A2BA030E2C5EE1B28A16F71CCB0E45E8BE52766DC1B241B77878 from A->B.
+ // Rate limit receives "transfer/channel-0/stake" in the packet (because transfer.relay.SendTransfer is called before the middleware)
+ // and should hash it before calculating the value
+ suite.fullSendTest(false)
+}
+
+// Test rate limits are reset when the specified time period has passed
+func (suite *MiddlewareTestSuite) TestSendTransferReset() {
+ // Same test as above, but the quotas get reset after time passes
+ attrs := suite.fullSendTest(true)
+ parts := strings.Split(attrs["weekly_period_end"], ".") // Splitting timestamp into secs and nanos
+ secs, err := strconv.ParseInt(parts[0], 10, 64)
+ suite.Assert().NoError(err)
+ nanos, err := strconv.ParseInt(parts[1], 10, 64)
+ suite.Assert().NoError(err)
+ resetTime := time.Unix(secs, nanos)
+
+ // Move chainA forward one block
+ suite.chainA.NextBlock()
+ err = suite.chainA.SenderAccount.SetSequence(suite.chainA.SenderAccount.GetSequence() + 1)
+ suite.Assert().NoError(err)
+
+ // Reset time + one second
+ oneSecAfterReset := resetTime.Add(time.Second)
+ suite.coordinator.IncrementTimeBy(oneSecAfterReset.Sub(suite.coordinator.CurrentTime))
+
+ // Sending should succeed again
+ _, err = suite.AssertSend(true, suite.MessageFromAToB(sdk.DefaultBondDenom, sdkmath.NewInt(1)))
+ suite.Assert().NoError(err)
+}
+
+// Test rate limiting on receives
+func (suite *MiddlewareTestSuite) fullRecvTest(native bool) {
+ quotaPercentage := 4
+ suite.initializeEscrow()
+ // Get the denom and amount to send
+ sendDenom := sdk.DefaultBondDenom
+ localDenom := sdk.DefaultBondDenom
+ channel := "channel-0"
+ if native {
+ denomTrace := transfertypes.ParseDenomTrace(transfertypes.GetPrefixedDenom("transfer", "channel-0", localDenom))
+ localDenom = denomTrace.IBCDenom()
+ } else {
+ denomTrace := transfertypes.ParseDenomTrace(transfertypes.GetPrefixedDenom("transfer", "channel-0", sendDenom))
+ sendDenom = denomTrace.IBCDenom()
+ }
+
+ provenanceApp := suite.chainA.GetProvenanceApp()
+
+ channelValue := CalculateChannelValue(suite.chainA.GetContext(), localDenom, provenanceApp.BankKeeper)
+
+ // The amount to be sent is 2% (quota is 4%)
+ quota := channelValue.QuoRaw(int64(100 / quotaPercentage))
+ sendAmount := quota.QuoRaw(2)
+
+ fmt.Printf("Testing recv rate limiting for denom=%s, channelValue=%s, quota=%s, sendAmount=%s\n", localDenom, channelValue, quota, sendAmount)
+
+ // Setup contract
+ suite.chainA.StoreContractRateLimiterDirect(&suite.Suite)
+ quotas := suite.BuildChannelQuota("weekly", channel, localDenom, 604800, 4, 4)
+ initMsg := CreateRateLimiterInitMessage(suite.chainA, quotas)
+ addr := suite.chainA.InstantiateContract(&suite.Suite, initMsg, 1)
+ suite.chainA.RegisterRateLimiterContract(&suite.Suite, addr)
+
+ // receive 2.5% (quota is 5%)
+ fmt.Printf("Sending %s from B to A. Represented in chain A as wrapped? %v\n", sendDenom, native)
+ _, err := suite.AssertReceive(true, suite.MessageFromBToA(sendDenom, sendAmount))
+ suite.Assert().NoError(err)
+
+ // receive 2.5% (quota is 5%)
+ _, err = suite.AssertReceive(true, suite.MessageFromBToA(sendDenom, sendAmount))
+ suite.Assert().NoError(err)
+
+ // Sending above the quota should fail. We send 2 instead of 1 to account for rounding errors
+ _, err = suite.AssertReceive(false, suite.MessageFromBToA(sendDenom, sdkmath.NewInt(2)))
+ suite.Assert().NoError(err)
+}
+
+func (suite *MiddlewareTestSuite) TestRecvTransferWithRateLimitingNative() {
+ // Sends denom=stake from B->A.
+ // Rate limit receives "stake" in the packet and should wrap it before calculating the value
+ // types.ReceiverChainIsSource(packet.GetSourcePort(), packet.GetSourceChannel(), data.Denom) should return false => Wrap the token
+ suite.fullRecvTest(true)
+}
+
+func (suite *MiddlewareTestSuite) TestRecvTransferWithRateLimitingNonNative() {
+ // Sends denom=ibc/C053D637CCA2A2BA030E2C5EE1B28A16F71CCB0E45E8BE52766DC1B241B77878 from B->A.
+ // Rate limit receives "transfer/channel-0/stake" in the packet and should turn it into "stake"
+ // types.ReceiverChainIsSource(packet.GetSourcePort(), packet.GetSourceChannel(), data.Denom) should return true => unprefix. If unprefixed is not local, hash.
+ suite.fullRecvTest(false)
+}
+
+// Test no rate limiting occurs when the contract is set, but not quotas are condifured for the path
+func (suite *MiddlewareTestSuite) TestSendTransferNoQuota() {
+ // Setup contract
+ suite.chainA.StoreContractRateLimiterDirect(&suite.Suite)
+ initMsg := CreateRateLimiterInitMessage(suite.chainA, "")
+ addr := suite.chainA.InstantiateContract(&suite.Suite, initMsg, 1)
+ suite.chainA.RegisterRateLimiterContract(&suite.Suite, addr)
+
+ // send 1 token.
+ // If the contract doesn't have a quota for the current channel, all transfers are allowed
+ _, err := suite.AssertSend(true, suite.MessageFromAToB(sdk.DefaultBondDenom, sdkmath.NewInt(1)))
+ suite.Assert().NoError(err)
+}
+
+// Test rate limits are reverted if a "send" fails
+func (suite *MiddlewareTestSuite) TestFailedSendTransfer() {
+ suite.initializeEscrow()
+ // Setup contract
+ suite.chainA.StoreContractRateLimiterDirect(&suite.Suite)
+ quotas := suite.BuildChannelQuota("weekly", "channel-0", sdk.DefaultBondDenom, 604800, 1, 1)
+ initMsg := CreateRateLimiterInitMessage(suite.chainA, quotas)
+ addr := suite.chainA.InstantiateContract(&suite.Suite, initMsg, 1)
+ suite.chainA.RegisterRateLimiterContract(&suite.Suite, addr)
+
+ // Get the escrowed amount
+ provenanceApp := suite.chainA.GetProvenanceApp()
+ // ToDo: This is what we eventually want here, but using the full supply temporarily for performance reasons. See calculateChannelValue
+ // escrowAddress := transfertypes.GetEscrowAddress("transfer", "channel-0")
+ // escrowed := provenanceApp.BankKeeper.GetBalance(suite.chainA.GetContext(), escrowAddress, sdk.DefaultBondDenom)
+ escrowed := provenanceApp.BankKeeper.GetSupply(suite.chainA.GetContext(), sdk.DefaultBondDenom)
+ quota := escrowed.Amount.QuoRaw(100) // 1% of the escrowed amount
+
+ // Use the whole quota
+ coins := sdk.NewCoin(sdk.DefaultBondDenom, quota)
+ port := suite.path.EndpointA.ChannelConfig.PortID
+ channel := suite.path.EndpointA.ChannelID
+ accountFrom := suite.chainA.SenderAccount.GetAddress().String()
+ timeoutHeight := clienttypes.NewHeight(0, 100)
+ memo := ""
+ msg := transfertypes.NewMsgTransfer(port, channel, coins, accountFrom, "INVALID", timeoutHeight, 0, memo)
+
+ // Sending the message manually because AssertSend updates both clients. We need to update the clients manually
+ // for this test so that the failure to receive on chain B happens after the second packet is sent from chain A.
+ // That way we validate that chain A is blocking as expected, but the flow is reverted after the receive failure is
+ // acknowledged on chain A
+ res, err := suite.chainA.SendMsgsNoCheck(msg)
+ suite.Assert().NoError(err)
+
+ // Sending again fails as the quota is filled
+ _, err = suite.AssertSend(false, suite.MessageFromAToB(sdk.DefaultBondDenom, quota))
+ suite.Assert().Error(err)
+
+ // Move forward one block
+ suite.chainA.NextBlock()
+ err = suite.chainA.SenderAccount.SetSequence(suite.chainA.SenderAccount.GetSequence() + 1)
+ suite.Assert().NoError(err)
+ suite.chainA.Coordinator.IncrementTime()
+
+ // Update both clients
+ err = suite.path.EndpointA.UpdateClient()
+ suite.Assert().NoError(err)
+ err = suite.path.EndpointB.UpdateClient()
+ suite.Assert().NoError(err)
+
+ // Execute the acknowledgement from chain B in chain A
+
+ // extract the sent packet
+ packet, err := ibctesting.ParsePacketFromEvents(res.GetEvents())
+ suite.Assert().NoError(err)
+
+ // recv in chain b
+ res, err = suite.path.EndpointB.RecvPacketWithResult(packet)
+ suite.Assert().NoError(err)
+
+ // get the ack from the chain b's response
+ ack, err := ibctesting.ParseAckFromEvents(res.GetEvents())
+ suite.Assert().NoError(err)
+
+ // manually relay it to chain a
+ err = suite.path.EndpointA.AcknowledgePacket(packet, ack)
+ suite.Assert().NoError(err)
+
+ // We should be able to send again because the packet that exceeded the quota failed and has been reverted
+ _, err = suite.AssertSend(true, suite.MessageFromAToB(sdk.DefaultBondDenom, sdkmath.NewInt(1)))
+ suite.Assert().NoError(err)
+}
+
+func (suite *MiddlewareTestSuite) TestUnsetRateLimitingContract() {
+ // Setup contract
+ suite.chainA.StoreContractRateLimiterDirect(&suite.Suite)
+ msg := CreateRateLimiterInitMessage(suite.chainA, "")
+ addr := suite.chainA.InstantiateContract(&suite.Suite, msg, 1)
+ suite.chainA.RegisterRateLimiterContract(&suite.Suite, addr)
+
+ // Unset the contract param
+ suite.chainA.RegisterRateLimiterContract(&suite.Suite, []byte(""))
+ contractAddress := suite.chainA.GetProvenanceApp().RateLimitingKeeper.GetContractAddress(suite.chainA.GetContext())
+ suite.Assert().Equal("", contractAddress, "should unregister contract")
+
+}
+
+// FindEvent finds an event with a matching name.
+func FindEvent(events []sdk.Event, name string) sdk.Event {
+ index := slices.IndexFunc(events, func(e sdk.Event) bool { return e.Type == name })
+ if index == -1 {
+ return sdk.Event{}
+ }
+ return events[index]
+}
+
+// ExtractAttributes returns the event's attributes in a map.
+func ExtractAttributes(event sdk.Event) map[string]string {
+ attrs := make(map[string]string)
+ if event.Attributes == nil {
+ return attrs
+ }
+ for _, a := range event.Attributes {
+ attrs[string(a.Key)] = string(a.Value)
+ }
+ return attrs
+}
+
+// CreateRateLimiterInitMessage creates a contract init message for the rate limiter using the supplied quotas.
+func CreateRateLimiterInitMessage(chain *testutil.TestChain, quotas string) string {
+ provenanceApp := chain.GetProvenanceApp()
+ transferModule := provenanceApp.AccountKeeper.GetModuleAddress(transfertypes.ModuleName)
+ govModule := provenanceApp.AccountKeeper.GetModuleAddress(govtypes.ModuleName)
+
+ initMsg := fmt.Sprintf(`{
+ "gov_module": "%s",
+ "ibc_module":"%s",
+ "paths": [%s]
+ }`,
+ govModule, transferModule, quotas)
+ return initMsg
+}
+
+// CalculateChannelValue returns the total number of denom on a channel.
+func CalculateChannelValue(ctx sdk.Context, denom string, bankKeeper bankkeeper.Keeper) sdkmath.Int {
+ return bankKeeper.GetSupply(ctx, denom).Amount
+
+ // ToDo: The commented-out code bellow is what we want to happen, but we're temporarily
+ // using the whole supply for efficiency until there's a solution for
+ // https://github.com/cosmos/ibc-go/issues/2664
+
+ // For non-native (ibc) tokens, return the supply if the token in osmosis
+ //if strings.HasPrefix(denom, "ibc/") {
+ // return bankKeeper.GetSupplyWithOffset(ctx, denom).Amount
+ //}
+ //
+ // For native tokens, obtain the balance held in escrow for all potential channels
+ //channels := channelKeeper.GetAllChannels(ctx)
+ //balance := osmomath.NewInt(0)
+ //for _, channel := range channels {
+ // escrowAddress := transfertypes.GetEscrowAddress("transfer", channel.ChannelId)
+ // balance = balance.Add(bankKeeper.GetBalance(ctx, escrowAddress, denom).Amount)
+ //
+ //}
+ //return balance
+}
+
+// NewTransferPath creates a new ibc transfer path for testing.
+func NewTransferPath(chainA, chainB *testutil.TestChain) *ibctesting.Path {
+ path := ibctesting.NewPath(chainA.TestChain, chainB.TestChain)
+ path.EndpointA.ChannelConfig.PortID = ibctesting.TransferPort
+ path.EndpointB.ChannelConfig.PortID = ibctesting.TransferPort
+ path.EndpointA.ChannelConfig.Version = transfertypes.Version
+ path.EndpointB.ChannelConfig.Version = transfertypes.Version
+ return path
+}
+
+// SkipIfWSL skips the test if it being ran on WSL.
+func SkipIfWSL(t *testing.T) {
+ t.Helper()
+ skip := os.Getenv("SKIP_WASM_WSL_TESTS")
+ if skip == "true" {
+ t.Skip("Skipping Wasm tests")
+ }
+}
diff --git a/x/ibcratelimit/module/module.go b/x/ibcratelimit/module/module.go
new file mode 100644
index 0000000000..50f84fb4ca
--- /dev/null
+++ b/x/ibcratelimit/module/module.go
@@ -0,0 +1,190 @@
+package module
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "math/rand"
+
+ "github.com/gorilla/mux"
+ "github.com/grpc-ecosystem/grpc-gateway/runtime"
+ "github.com/spf13/cobra"
+
+ abci "github.com/tendermint/tendermint/abci/types"
+
+ "github.com/cosmos/cosmos-sdk/client"
+ "github.com/cosmos/cosmos-sdk/codec"
+ codectypes "github.com/cosmos/cosmos-sdk/codec/types"
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ "github.com/cosmos/cosmos-sdk/types/module"
+ simtypes "github.com/cosmos/cosmos-sdk/types/simulation"
+ authkeeper "github.com/cosmos/cosmos-sdk/x/auth/keeper"
+ bankkeeper "github.com/cosmos/cosmos-sdk/x/bank/keeper"
+
+ "github.com/provenance-io/provenance/x/ibcratelimit"
+ ibcratelimitcli "github.com/provenance-io/provenance/x/ibcratelimit/client/cli"
+ "github.com/provenance-io/provenance/x/ibcratelimit/keeper"
+ "github.com/provenance-io/provenance/x/ibcratelimit/simulation"
+)
+
+var (
+ _ module.AppModule = AppModule{}
+ _ module.AppModuleBasic = AppModuleBasic{}
+ _ module.AppModuleSimulation = AppModule{}
+)
+
+// AppModuleBasic defines the basic application module used by the ibcratelimit module.
+type AppModuleBasic struct {
+ cdc codec.Codec
+}
+
+// Name returns the ibcratelimit module's name.
+func (AppModuleBasic) Name() string { return ibcratelimit.ModuleName }
+
+// RegisterLegacyAminoCodec registers the ibcratelimit module's types for the given codec.
+func (AppModuleBasic) RegisterLegacyAminoCodec(_ *codec.LegacyAmino) {
+}
+
+// RegisterInterfaces registers interfaces and implementations of the ibcratelimit module.
+func (AppModuleBasic) RegisterInterfaces(cdc codectypes.InterfaceRegistry) {
+ ibcratelimit.RegisterInterfaces(cdc)
+}
+
+// DefaultGenesis returns default genesis state as raw bytes for the ibcratelimit
+// module.
+func (AppModuleBasic) DefaultGenesis(cdc codec.JSONCodec) json.RawMessage {
+ return cdc.MustMarshalJSON(ibcratelimit.DefaultGenesis())
+}
+
+// ValidateGenesis performs genesis state validation for the ibcratelimit module.
+func (b AppModuleBasic) ValidateGenesis(cdc codec.JSONCodec, _ client.TxEncodingConfig, bz json.RawMessage) error {
+ var genState ibcratelimit.GenesisState
+ if err := cdc.UnmarshalJSON(bz, &genState); err != nil {
+ return fmt.Errorf("failed to unmarshal %s genesis state: %w", ibcratelimit.ModuleName, err)
+ }
+ return genState.Validate()
+}
+
+// RegisterRESTRoutes registers the REST routes for the ibcratelimit module.
+// Deprecated: RegisterRESTRoutes is deprecated.
+func (b AppModuleBasic) RegisterRESTRoutes(_ client.Context, _ *mux.Router) {
+}
+
+// RegisterGRPCGatewayRoutes registers the gRPC Gateway routes for the ibcratelimit module.
+func (b AppModuleBasic) RegisterGRPCGatewayRoutes(clientCtx client.Context, mux *runtime.ServeMux) {
+ if err := ibcratelimit.RegisterQueryHandlerClient(context.Background(), mux, ibcratelimit.NewQueryClient(clientCtx)); err != nil {
+ panic(err)
+ }
+}
+
+// GetQueryCmd returns the cli query commands for the ibcratelimit module
+func (b AppModuleBasic) GetQueryCmd() *cobra.Command {
+ return ibcratelimitcli.GetQueryCmd()
+}
+
+// GetTxCmd returns the transaction commands for the ibcratelimit module
+func (b AppModuleBasic) GetTxCmd() *cobra.Command {
+ return ibcratelimitcli.NewTxCmd()
+}
+
+// AppModule implements the sdk.AppModule interface
+type AppModule struct {
+ AppModuleBasic
+ keeper keeper.Keeper
+ accountKeeper authkeeper.AccountKeeper
+ bankKeeper bankkeeper.Keeper
+}
+
+// NewAppModule creates a new AppModule object
+func NewAppModule(cdc codec.Codec, keeper keeper.Keeper, accountKeeper authkeeper.AccountKeeper, bankKeeper bankkeeper.Keeper) AppModule {
+ return AppModule{
+ AppModuleBasic: AppModuleBasic{cdc: cdc},
+ keeper: keeper,
+ accountKeeper: accountKeeper,
+ bankKeeper: bankKeeper,
+ }
+}
+
+// GenerateGenesisState creates a randomized GenState of the ibcratelimit module.
+func (am AppModule) GenerateGenesisState(simState *module.SimulationState) {
+ simulation.RandomizedGenState(simState)
+}
+
+// ProposalContents returns content functions used to simulate governance proposals.
+func (am AppModule) ProposalContents(_ module.SimulationState) []simtypes.WeightedProposalContent {
+ return nil
+}
+
+// RandomizedParams returns randomized module parameters for param change proposals.
+func (am AppModule) RandomizedParams(_ *rand.Rand) []simtypes.ParamChange {
+ return nil
+}
+
+// RegisterStoreDecoder registers a func to decode each module's defined types from their corresponding store key
+func (am AppModule) RegisterStoreDecoder(sdr sdk.StoreDecoderRegistry) {
+ sdr[ibcratelimit.StoreKey] = simulation.NewDecodeStore(am.cdc)
+}
+
+// WeightedOperations returns simulation operations (i.e msgs) with their respective weight
+func (am AppModule) WeightedOperations(simState module.SimulationState) []simtypes.WeightedOperation {
+ return simulation.WeightedOperations(
+ simState.AppParams, simState.Cdc, am.keeper, am.accountKeeper, am.bankKeeper,
+ )
+}
+
+// Name returns the ibcratelimit module's name.
+func (AppModule) Name() string {
+ return ibcratelimit.ModuleName
+}
+
+// RegisterInvariants does nothing, there are no invariants to enforce
+func (AppModule) RegisterInvariants(_ sdk.InvariantRegistry) {}
+
+// Deprecated: Route returns the message routing key for the ibcratelimit module.
+func (am AppModule) Route() sdk.Route {
+ return sdk.Route{}
+}
+
+// QuerierRoute returns the route we respond to for abci queries
+func (AppModule) QuerierRoute() string { return "" }
+
+// LegacyQuerierHandler returns the ibcratelimit module sdk.Querier.
+func (am AppModule) LegacyQuerierHandler(_ *codec.LegacyAmino) sdk.Querier {
+ return nil
+}
+
+// InitGenesis performs the txfees module's genesis initialization It returns
+// no validator updates.
+func (am AppModule) InitGenesis(ctx sdk.Context, cdc codec.JSONCodec, gs json.RawMessage) []abci.ValidatorUpdate {
+ var genState ibcratelimit.GenesisState
+ // Initialize global index to index in genesis state
+ cdc.MustUnmarshalJSON(gs, &genState)
+ am.keeper.InitGenesis(ctx, &genState)
+
+ return []abci.ValidatorUpdate{}
+}
+
+// ExportGenesis returns the txfees module's exported genesis state as raw JSON bytes.
+func (am AppModule) ExportGenesis(ctx sdk.Context, cdc codec.JSONCodec) json.RawMessage {
+ genState := am.keeper.ExportGenesis(ctx)
+ return cdc.MustMarshalJSON(genState)
+}
+
+// ConsensusVersion implements AppModule/ConsensusVersion.
+func (AppModule) ConsensusVersion() uint64 { return 1 }
+
+// BeginBlock executes all ABCI BeginBlock logic respective to the ibcratelimit module.
+func (am AppModule) BeginBlock(_ sdk.Context, _ abci.RequestBeginBlock) {}
+
+// EndBlock executes all ABCI EndBlock logic respective to the ibcratelimit module. It
+// returns no validator updates.
+func (am AppModule) EndBlock(_ sdk.Context, _ abci.RequestEndBlock) []abci.ValidatorUpdate {
+ return []abci.ValidatorUpdate{}
+}
+
+// RegisterServices registers a GRPC query service to respond to the
+// module-specific GRPC queries.
+func (am AppModule) RegisterServices(cfg module.Configurator) {
+ ibcratelimit.RegisterQueryServer(cfg.QueryServer(), am.keeper)
+ ibcratelimit.RegisterMsgServer(cfg.MsgServer(), keeper.NewMsgServer(am.keeper))
+}
diff --git a/x/ibcratelimit/msgs.go b/x/ibcratelimit/msgs.go
new file mode 100644
index 0000000000..c84104cc75
--- /dev/null
+++ b/x/ibcratelimit/msgs.go
@@ -0,0 +1,33 @@
+package ibcratelimit
+
+import (
+ fmt "fmt"
+
+ sdk "github.com/cosmos/cosmos-sdk/types"
+)
+
+var AllRequestMsgs = []sdk.Msg{
+ (*MsgGovUpdateParamsRequest)(nil),
+}
+
+// NewMsgGovUpdateParamsRequest creates a new GovUpdateParams message.
+func NewMsgGovUpdateParamsRequest(authority, ratelimiter string) *MsgGovUpdateParamsRequest {
+ return &MsgGovUpdateParamsRequest{
+ Authority: authority,
+ Params: NewParams(ratelimiter),
+ }
+}
+
+// ValidateBasic runs stateless validation checks on the message.
+func (m MsgGovUpdateParamsRequest) ValidateBasic() error {
+ if _, err := sdk.AccAddressFromBech32(m.Authority); err != nil {
+ return fmt.Errorf("invalid authority: %w", err)
+ }
+ return m.Params.Validate()
+}
+
+// GetSigners indicates that the message must have been signed by the address provided.
+func (m MsgGovUpdateParamsRequest) GetSigners() []sdk.AccAddress {
+ addr := sdk.MustAccAddressFromBech32(m.Authority)
+ return []sdk.AccAddress{addr}
+}
diff --git a/x/ibcratelimit/msgs_test.go b/x/ibcratelimit/msgs_test.go
new file mode 100644
index 0000000000..e7dd362e66
--- /dev/null
+++ b/x/ibcratelimit/msgs_test.go
@@ -0,0 +1,105 @@
+package ibcratelimit_test
+
+import (
+ "testing"
+
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ "github.com/provenance-io/provenance/testutil/assertions"
+ "github.com/provenance-io/provenance/x/ibcratelimit"
+ "github.com/stretchr/testify/assert"
+)
+
+func TestNewMsgGovUpdateParamsRequest(t *testing.T) {
+ expected := &ibcratelimit.MsgGovUpdateParamsRequest{
+ Authority: "authority",
+ Params: ibcratelimit.NewParams("contract"),
+ }
+ event := ibcratelimit.NewMsgGovUpdateParamsRequest(expected.Authority, expected.Params.ContractAddress)
+ assert.Equal(t, expected, event, "should create the correct with correct content")
+}
+
+func TestNewMsgGovUpdateParamsValidateBasic(t *testing.T) {
+ tests := []struct {
+ name string
+ authority string
+ contract string
+ err string
+ }{
+ {
+ name: "success - valid message",
+ authority: "cosmos1qm0hhug8kszhcp9f3ryuecz5yw8s3e5v0n2ckd",
+ contract: "cosmos1qm0hhug8kszhcp9f3ryuecz5yw8s3e5v0n2ckd",
+ },
+ {
+ name: "success - empty contract",
+ authority: "cosmos1qm0hhug8kszhcp9f3ryuecz5yw8s3e5v0n2ckd",
+ contract: "",
+ },
+ {
+ name: "failure - invalid authority",
+ authority: "authority",
+ contract: "cosmos1qm0hhug8kszhcp9f3ryuecz5yw8s3e5v0n2ckd",
+ err: "invalid authority: decoding bech32 failed: invalid separator index -1",
+ },
+ {
+ name: "failure - invalid contract",
+ authority: "cosmos1qm0hhug8kszhcp9f3ryuecz5yw8s3e5v0n2ckd",
+ contract: "contract",
+ err: "decoding bech32 failed: invalid separator index -1",
+ },
+ }
+
+ for _, tc := range tests {
+ t.Run(tc.name, func(t *testing.T) {
+ msg := ibcratelimit.NewMsgGovUpdateParamsRequest(tc.authority, tc.contract)
+ err := msg.ValidateBasic()
+
+ if len(tc.err) > 0 {
+ assert.EqualError(t, err, tc.err, "should return correct error")
+ } else {
+ assert.NoError(t, err, "should not throw an error")
+ }
+ })
+ }
+}
+
+func TestMsgGovUpdateParamsRequestGetSigners(t *testing.T) {
+ tests := []struct {
+ name string
+ authority string
+ err string
+ }{
+ {
+ name: "success - valid signer",
+ authority: "cosmos1qm0hhug8kszhcp9f3ryuecz5yw8s3e5v0n2ckd",
+ },
+ {
+ name: "failure - missing signer",
+ authority: "",
+ err: "empty address string is not allowed",
+ },
+ {
+ name: "failure - invalid signer",
+ authority: "authority",
+ err: "decoding bech32 failed: invalid separator index -1",
+ },
+ }
+
+ for _, tc := range tests {
+ t.Run(tc.name, func(t *testing.T) {
+ msg := ibcratelimit.NewMsgGovUpdateParamsRequest(tc.authority, "contract")
+
+ if len(tc.err) > 0 {
+ assertions.RequirePanicEquals(t, func() {
+ msg.GetSigners()
+ }, tc.err, "should panic with correct message")
+ } else {
+ signers := make([]sdk.AccAddress, 1)
+ assert.NotPanics(t, func() {
+ signers = msg.GetSigners()
+ }, "should not panic")
+ assert.Equal(t, signers[0].String(), tc.authority)
+ }
+ })
+ }
+}
diff --git a/x/ibcratelimit/packet.go b/x/ibcratelimit/packet.go
new file mode 100644
index 0000000000..d9ac1a279d
--- /dev/null
+++ b/x/ibcratelimit/packet.go
@@ -0,0 +1,94 @@
+package ibcratelimit
+
+import (
+ "encoding/json"
+
+ errorsmod "cosmossdk.io/errors"
+
+ sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
+ transfertypes "github.com/cosmos/ibc-go/v6/modules/apps/transfer/types"
+ clienttypes "github.com/cosmos/ibc-go/v6/modules/core/02-client/types"
+ "github.com/cosmos/ibc-go/v6/modules/core/exported"
+)
+
+const (
+ // MsgSendPacket is the operation used for tracking a sent packet.
+ MsgSendPacket = "send_packet"
+ // MsgRecvPacket is the operation used for tracking a received packet.
+ MsgRecvPacket = "recv_packet"
+)
+
+// UndoSendMsg is an ibcratelimit contract message meant to undo tracked sends.
+type UndoSendMsg struct {
+ UndoSend UndoPacketMsg `json:"undo_send"`
+}
+
+// UndoPacketMsg is an operation done by the UndoSendMsg.
+type UndoPacketMsg struct {
+ Packet UnwrappedPacket `json:"packet"`
+}
+
+// SendPacketMsg is an ibcratelimit contract message meant to track sends.
+type SendPacketMsg struct {
+ SendPacket PacketMsg `json:"send_packet"`
+}
+
+// RecvPacketMsg is an ibcratelimit contract message meant to track receives.
+type RecvPacketMsg struct {
+ RecvPacket PacketMsg `json:"recv_packet"`
+}
+
+// PacketMsg contains
+type PacketMsg struct {
+ Packet UnwrappedPacket `json:"packet"`
+}
+
+// UnwrappedPacket is a FungibleTokenPacket.
+type UnwrappedPacket struct {
+ Sequence uint64 `json:"sequence"`
+ SourcePort string `json:"source_port"`
+ SourceChannel string `json:"source_channel"`
+ DestinationPort string `json:"destination_port"`
+ DestinationChannel string `json:"destination_channel"`
+ Data transfertypes.FungibleTokenPacketData `json:"data"`
+ TimeoutHeight clienttypes.Height `json:"timeout_height"`
+ TimeoutTimestamp uint64 `json:"timeout_timestamp,omitempty"`
+}
+
+// ValidateReceiverAddress Checks if the receiver is valid for the transfer data.
+func ValidateReceiverAddress(packet exported.PacketI) error {
+ var packetData transfertypes.FungibleTokenPacketData
+ if err := json.Unmarshal(packet.GetData(), &packetData); err != nil {
+ return err
+ }
+ if len(packetData.Receiver) >= 4096 {
+ return errorsmod.Wrapf(sdkerrors.ErrInvalidAddress, "IBC Receiver address too long. Max supported length is %d", 4096)
+ }
+ return nil
+}
+
+// UnwrapPacket Converts a PacketI into an UnwrappedPacket structure.
+func UnwrapPacket(packet exported.PacketI) (UnwrappedPacket, error) {
+ if packet == nil {
+ return UnwrappedPacket{}, ErrBadMessage
+ }
+ var packetData transfertypes.FungibleTokenPacketData
+ err := json.Unmarshal(packet.GetData(), &packetData)
+ if err != nil {
+ return UnwrappedPacket{}, err
+ }
+ height, ok := packet.GetTimeoutHeight().(clienttypes.Height)
+ if !ok {
+ return UnwrappedPacket{}, ErrBadMessage
+ }
+ return UnwrappedPacket{
+ Sequence: packet.GetSequence(),
+ SourcePort: packet.GetSourcePort(),
+ SourceChannel: packet.GetSourceChannel(),
+ DestinationPort: packet.GetDestPort(),
+ DestinationChannel: packet.GetDestChannel(),
+ Data: packetData,
+ TimeoutHeight: height,
+ TimeoutTimestamp: packet.GetTimeoutTimestamp(),
+ }, nil
+}
diff --git a/x/ibcratelimit/packet_test.go b/x/ibcratelimit/packet_test.go
new file mode 100644
index 0000000000..a6f3f6aff0
--- /dev/null
+++ b/x/ibcratelimit/packet_test.go
@@ -0,0 +1,130 @@
+package ibcratelimit_test
+
+import (
+ "encoding/json"
+ "testing"
+
+ clienttypes "github.com/cosmos/ibc-go/v6/modules/core/02-client/types"
+ "github.com/provenance-io/provenance/x/ibcratelimit"
+ "github.com/stretchr/testify/assert"
+)
+
+func TestValidateReceiverAddress(t *testing.T) {
+ tests := []struct {
+ name string
+ packetFn func() []byte
+ err string
+ }{
+ {
+ name: "success - packet is valid",
+ packetFn: func() []byte {
+ data := NewMockFungiblePacketData(false)
+ bytes, _ := json.Marshal(data)
+ return bytes
+ },
+ },
+ {
+ name: "failure - long receiver name",
+ packetFn: func() []byte {
+ data := NewMockFungiblePacketData(true)
+ bytes, _ := json.Marshal(data)
+ return bytes
+ },
+ err: "IBC Receiver address too long. Max supported length is 4096: invalid address",
+ },
+ {
+ name: "failure - invalid packet type",
+ packetFn: func() []byte {
+ return []byte("garbage")
+ },
+ err: "invalid character 'g' looking for beginning of value",
+ },
+ {
+ name: "failure - nil data",
+ packetFn: func() []byte {
+ return nil
+ },
+ err: "unexpected end of JSON input",
+ },
+ }
+
+ for _, tc := range tests {
+ t.Run(tc.name, func(t *testing.T) {
+ err := ibcratelimit.ValidateReceiverAddress(NewMockPacket(tc.packetFn(), false))
+ if len(tc.err) > 0 {
+ assert.EqualError(t, err, tc.err, "should return correct error when invalid")
+ } else {
+ assert.NoError(t, err, "should not return an error when valid")
+ }
+ })
+ }
+}
+
+func TestUnwrapPacket(t *testing.T) {
+ tests := []struct {
+ name string
+ packetFn func() []byte
+ validHeight bool
+ err string
+ expected ibcratelimit.UnwrappedPacket
+ }{
+ {
+ name: "success - packet data and height are valid",
+ packetFn: func() []byte {
+ data := NewMockFungiblePacketData(false)
+ bytes, _ := json.Marshal(data)
+ return bytes
+ },
+ expected: ibcratelimit.UnwrappedPacket{
+ Sequence: 1,
+ SourcePort: "src-port",
+ SourceChannel: "src-channel",
+ DestinationPort: "dest-port",
+ DestinationChannel: "dest-channel",
+ TimeoutHeight: clienttypes.Height{
+ RevisionNumber: 5,
+ RevisionHeight: 5,
+ },
+ TimeoutTimestamp: 1,
+ Data: NewMockFungiblePacketData(false),
+ },
+ validHeight: true,
+ },
+ {
+ name: "failure - height is invalid",
+ packetFn: func() []byte {
+ data := NewMockFungiblePacketData(false)
+ bytes, _ := json.Marshal(data)
+ return bytes
+ },
+ err: "bad message",
+ },
+ {
+ name: "failure - invalid packet data",
+ packetFn: func() []byte {
+ return []byte("garbage")
+ },
+ err: "invalid character 'g' looking for beginning of value",
+ },
+ {
+ name: "failure - nil data",
+ packetFn: func() []byte {
+ return nil
+ },
+ err: "unexpected end of JSON input",
+ },
+ }
+
+ for _, tc := range tests {
+ t.Run(tc.name, func(t *testing.T) {
+ unwrapped, err := ibcratelimit.UnwrapPacket(NewMockPacket(tc.packetFn(), tc.validHeight))
+ if len(tc.err) > 0 {
+ assert.Equal(t, tc.expected, unwrapped, "should return an empty unwrapped packet on failure")
+ assert.EqualError(t, err, tc.err, "should return correct error when invalid")
+ } else {
+ assert.Equal(t, tc.expected, unwrapped, "should return an unwrapped packet with correct data")
+ assert.NoError(t, err, "should not return an error when valid")
+ }
+ })
+ }
+}
diff --git a/x/ibcratelimit/params.go b/x/ibcratelimit/params.go
new file mode 100644
index 0000000000..cd8d78980b
--- /dev/null
+++ b/x/ibcratelimit/params.go
@@ -0,0 +1,38 @@
+package ibcratelimit
+
+import (
+ sdk "github.com/cosmos/cosmos-sdk/types"
+)
+
+// NewParams creates a new Params object.
+func NewParams(contractAddress string) Params {
+ return Params{
+ ContractAddress: contractAddress,
+ }
+}
+
+// DefaultParams creates default ibcratelimit module parameters.
+func DefaultParams() Params {
+ return NewParams("")
+}
+
+// Validate verifies all params are correct
+func (p Params) Validate() error {
+ return validateContractAddress(p.ContractAddress)
+}
+
+// validateContractAddress Checks if the supplied address is a valid contract address.
+func validateContractAddress(addr string) error {
+ // Empty strings are valid for unsetting the param
+ if addr == "" {
+ return nil
+ }
+
+ // Checks that the contract address is valid
+ _, err := sdk.AccAddressFromBech32(addr)
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
diff --git a/x/ibcratelimit/params.pb.go b/x/ibcratelimit/params.pb.go
new file mode 100644
index 0000000000..cb8b1d2208
--- /dev/null
+++ b/x/ibcratelimit/params.pb.go
@@ -0,0 +1,323 @@
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// source: provenance/ibcratelimit/v1/params.proto
+
+package ibcratelimit
+
+import (
+ fmt "fmt"
+ _ "github.com/gogo/protobuf/gogoproto"
+ proto "github.com/gogo/protobuf/proto"
+ io "io"
+ math "math"
+ math_bits "math/bits"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
+
+// Params defines the parameters for the ibcratelimit module.
+type Params struct {
+ // contract_address is the address of the rate limiter contract.
+ ContractAddress string `protobuf:"bytes,1,opt,name=contract_address,json=contractAddress,proto3" json:"contract_address,omitempty" yaml:"contract_address"`
+}
+
+func (m *Params) Reset() { *m = Params{} }
+func (m *Params) String() string { return proto.CompactTextString(m) }
+func (*Params) ProtoMessage() {}
+func (*Params) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ee0cbe8442d3fe43, []int{0}
+}
+func (m *Params) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *Params) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_Params.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *Params) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Params.Merge(m, src)
+}
+func (m *Params) XXX_Size() int {
+ return m.Size()
+}
+func (m *Params) XXX_DiscardUnknown() {
+ xxx_messageInfo_Params.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Params proto.InternalMessageInfo
+
+func (m *Params) GetContractAddress() string {
+ if m != nil {
+ return m.ContractAddress
+ }
+ return ""
+}
+
+func init() {
+ proto.RegisterType((*Params)(nil), "provenance.ibcratelimit.v1.Params")
+}
+
+func init() {
+ proto.RegisterFile("provenance/ibcratelimit/v1/params.proto", fileDescriptor_ee0cbe8442d3fe43)
+}
+
+var fileDescriptor_ee0cbe8442d3fe43 = []byte{
+ // 210 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0x2f, 0x28, 0xca, 0x2f,
+ 0x4b, 0xcd, 0x4b, 0xcc, 0x4b, 0x4e, 0xd5, 0xcf, 0x4c, 0x4a, 0x2e, 0x4a, 0x2c, 0x49, 0xcd, 0xc9,
+ 0xcc, 0xcd, 0x2c, 0xd1, 0x2f, 0x33, 0xd4, 0x2f, 0x48, 0x2c, 0x4a, 0xcc, 0x2d, 0xd6, 0x2b, 0x28,
+ 0xca, 0x2f, 0xc9, 0x17, 0x92, 0x42, 0x28, 0xd4, 0x43, 0x56, 0xa8, 0x57, 0x66, 0x28, 0x25, 0x92,
+ 0x9e, 0x9f, 0x9e, 0x0f, 0x56, 0xa6, 0x0f, 0x62, 0x41, 0x74, 0x28, 0x05, 0x70, 0xb1, 0x05, 0x80,
+ 0x4d, 0x10, 0x72, 0xe3, 0x12, 0x48, 0xce, 0xcf, 0x2b, 0x29, 0x4a, 0x4c, 0x2e, 0x89, 0x4f, 0x4c,
+ 0x49, 0x29, 0x4a, 0x2d, 0x2e, 0x96, 0x60, 0x54, 0x60, 0xd4, 0xe0, 0x74, 0x92, 0xfe, 0x74, 0x4f,
+ 0x5e, 0xbc, 0x32, 0x31, 0x37, 0xc7, 0x4a, 0x09, 0x5d, 0x85, 0x52, 0x10, 0x3f, 0x4c, 0xc8, 0x11,
+ 0x22, 0xe2, 0x94, 0x7b, 0xe2, 0x91, 0x1c, 0xe3, 0x85, 0x47, 0x72, 0x8c, 0x0f, 0x1e, 0xc9, 0x31,
+ 0x4e, 0x78, 0x2c, 0xc7, 0x70, 0xe1, 0xb1, 0x1c, 0xc3, 0x8d, 0xc7, 0x72, 0x0c, 0x5c, 0xb2, 0x99,
+ 0x60, 0x5b, 0x71, 0x38, 0x30, 0x80, 0x31, 0xca, 0x28, 0x3d, 0xb3, 0x24, 0xa3, 0x34, 0x49, 0x2f,
+ 0x39, 0x3f, 0x57, 0x1f, 0xa1, 0x50, 0x37, 0x33, 0x1f, 0x89, 0xa7, 0x5f, 0x81, 0x12, 0x04, 0x49,
+ 0x6c, 0x60, 0x7f, 0x18, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff, 0x41, 0x39, 0x5d, 0x4a, 0x24, 0x01,
+ 0x00, 0x00,
+}
+
+func (m *Params) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Params) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Params) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.ContractAddress) > 0 {
+ i -= len(m.ContractAddress)
+ copy(dAtA[i:], m.ContractAddress)
+ i = encodeVarintParams(dAtA, i, uint64(len(m.ContractAddress)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func encodeVarintParams(dAtA []byte, offset int, v uint64) int {
+ offset -= sovParams(v)
+ base := offset
+ for v >= 1<<7 {
+ dAtA[offset] = uint8(v&0x7f | 0x80)
+ v >>= 7
+ offset++
+ }
+ dAtA[offset] = uint8(v)
+ return base
+}
+func (m *Params) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.ContractAddress)
+ if l > 0 {
+ n += 1 + l + sovParams(uint64(l))
+ }
+ return n
+}
+
+func sovParams(x uint64) (n int) {
+ return (math_bits.Len64(x|1) + 6) / 7
+}
+func sozParams(x uint64) (n int) {
+ return sovParams(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (m *Params) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowParams
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Params: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Params: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ContractAddress", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowParams
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthParams
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthParams
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ContractAddress = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipParams(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthParams
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func skipParams(dAtA []byte) (n int, err error) {
+ l := len(dAtA)
+ iNdEx := 0
+ depth := 0
+ for iNdEx < l {
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowParams
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ wireType := int(wire & 0x7)
+ switch wireType {
+ case 0:
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowParams
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ iNdEx++
+ if dAtA[iNdEx-1] < 0x80 {
+ break
+ }
+ }
+ case 1:
+ iNdEx += 8
+ case 2:
+ var length int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowParams
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ length |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if length < 0 {
+ return 0, ErrInvalidLengthParams
+ }
+ iNdEx += length
+ case 3:
+ depth++
+ case 4:
+ if depth == 0 {
+ return 0, ErrUnexpectedEndOfGroupParams
+ }
+ depth--
+ case 5:
+ iNdEx += 4
+ default:
+ return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+ }
+ if iNdEx < 0 {
+ return 0, ErrInvalidLengthParams
+ }
+ if depth == 0 {
+ return iNdEx, nil
+ }
+ }
+ return 0, io.ErrUnexpectedEOF
+}
+
+var (
+ ErrInvalidLengthParams = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowParams = fmt.Errorf("proto: integer overflow")
+ ErrUnexpectedEndOfGroupParams = fmt.Errorf("proto: unexpected end of group")
+)
diff --git a/x/ibcratelimit/params_test.go b/x/ibcratelimit/params_test.go
new file mode 100644
index 0000000000..b801c6ec05
--- /dev/null
+++ b/x/ibcratelimit/params_test.go
@@ -0,0 +1,71 @@
+package ibcratelimit_test
+
+import (
+ "testing"
+
+ "github.com/provenance-io/provenance/x/ibcratelimit"
+ "github.com/stretchr/testify/assert"
+)
+
+func TestValidateParams(t *testing.T) {
+ testCases := map[string]struct {
+ addr interface{}
+ expected bool
+ }{
+ "valid_addr": {
+ addr: "cosmos1qm0hhug8kszhcp9f3ryuecz5yw8s3e5v0n2ckd",
+ expected: true,
+ },
+ "invalid_addr": {
+ addr: "cosmos1234",
+ expected: false,
+ },
+ }
+
+ for name, tc := range testCases {
+ t.Run(name, func(t *testing.T) {
+ addr, ok := tc.addr.(string)
+ assert.True(t, ok, "unexpected type of address")
+
+ params := ibcratelimit.Params{
+ ContractAddress: addr,
+ }
+
+ err := params.Validate()
+
+ if !tc.expected {
+ assert.Error(t, err)
+ } else {
+ assert.NoError(t, err)
+ }
+ })
+ }
+}
+
+func TestNewParams(t *testing.T) {
+ tests := []struct {
+ name string
+ addr string
+ }{
+ {
+ name: "success - empty contract address can be used",
+ addr: "",
+ },
+ {
+ name: "success - address is correctly set.",
+ addr: "cosmos1qm0hhug8kszhcp9f3ryuecz5yw8s3e5v0n2ckd",
+ },
+ }
+
+ for _, tc := range tests {
+ t.Run(tc.name, func(t *testing.T) {
+ params := ibcratelimit.NewParams(tc.addr)
+ assert.Equal(t, tc.addr, params.ContractAddress)
+ })
+ }
+}
+
+func TestDefaultParams(t *testing.T) {
+ params := ibcratelimit.DefaultParams()
+ assert.Equal(t, "", params.ContractAddress)
+}
diff --git a/x/ibcratelimit/query.pb.go b/x/ibcratelimit/query.pb.go
new file mode 100644
index 0000000000..867b242efa
--- /dev/null
+++ b/x/ibcratelimit/query.pb.go
@@ -0,0 +1,539 @@
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// source: provenance/ibcratelimit/v1/query.proto
+
+package ibcratelimit
+
+import (
+ context "context"
+ fmt "fmt"
+ _ "github.com/gogo/protobuf/gogoproto"
+ grpc1 "github.com/gogo/protobuf/grpc"
+ proto "github.com/gogo/protobuf/proto"
+ _ "google.golang.org/genproto/googleapis/api/annotations"
+ grpc "google.golang.org/grpc"
+ codes "google.golang.org/grpc/codes"
+ status "google.golang.org/grpc/status"
+ io "io"
+ math "math"
+ math_bits "math/bits"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
+
+// ParamsRequest is the request type for the Query/Params RPC method.
+type ParamsRequest struct {
+}
+
+func (m *ParamsRequest) Reset() { *m = ParamsRequest{} }
+func (m *ParamsRequest) String() string { return proto.CompactTextString(m) }
+func (*ParamsRequest) ProtoMessage() {}
+func (*ParamsRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_530d9ff030c0dc3e, []int{0}
+}
+func (m *ParamsRequest) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ParamsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_ParamsRequest.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *ParamsRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ParamsRequest.Merge(m, src)
+}
+func (m *ParamsRequest) XXX_Size() int {
+ return m.Size()
+}
+func (m *ParamsRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_ParamsRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ParamsRequest proto.InternalMessageInfo
+
+// ParamsResponse is the response type for the Query/Params RPC method.
+type ParamsResponse struct {
+ // params defines the parameters of the module.
+ Params Params `protobuf:"bytes,1,opt,name=params,proto3" json:"params"`
+}
+
+func (m *ParamsResponse) Reset() { *m = ParamsResponse{} }
+func (m *ParamsResponse) String() string { return proto.CompactTextString(m) }
+func (*ParamsResponse) ProtoMessage() {}
+func (*ParamsResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_530d9ff030c0dc3e, []int{1}
+}
+func (m *ParamsResponse) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ParamsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_ParamsResponse.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *ParamsResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ParamsResponse.Merge(m, src)
+}
+func (m *ParamsResponse) XXX_Size() int {
+ return m.Size()
+}
+func (m *ParamsResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_ParamsResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ParamsResponse proto.InternalMessageInfo
+
+func (m *ParamsResponse) GetParams() Params {
+ if m != nil {
+ return m.Params
+ }
+ return Params{}
+}
+
+func init() {
+ proto.RegisterType((*ParamsRequest)(nil), "provenance.ibcratelimit.v1.ParamsRequest")
+ proto.RegisterType((*ParamsResponse)(nil), "provenance.ibcratelimit.v1.ParamsResponse")
+}
+
+func init() {
+ proto.RegisterFile("provenance/ibcratelimit/v1/query.proto", fileDescriptor_530d9ff030c0dc3e)
+}
+
+var fileDescriptor_530d9ff030c0dc3e = []byte{
+ // 285 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0x2b, 0x28, 0xca, 0x2f,
+ 0x4b, 0xcd, 0x4b, 0xcc, 0x4b, 0x4e, 0xd5, 0xcf, 0x4c, 0x4a, 0x2e, 0x4a, 0x2c, 0x49, 0xcd, 0xc9,
+ 0xcc, 0xcd, 0x2c, 0xd1, 0x2f, 0x33, 0xd4, 0x2f, 0x2c, 0x4d, 0x2d, 0xaa, 0xd4, 0x2b, 0x28, 0xca,
+ 0x2f, 0xc9, 0x17, 0x92, 0x42, 0xa8, 0xd3, 0x43, 0x56, 0xa7, 0x57, 0x66, 0x28, 0x25, 0x92, 0x9e,
+ 0x9f, 0x9e, 0x0f, 0x56, 0xa6, 0x0f, 0x62, 0x41, 0x74, 0x48, 0xc9, 0xa4, 0xe7, 0xe7, 0xa7, 0xe7,
+ 0xa4, 0xea, 0x27, 0x16, 0x64, 0xea, 0x27, 0xe6, 0xe5, 0xe5, 0x97, 0x24, 0x96, 0x64, 0xe6, 0xe7,
+ 0x15, 0x43, 0x65, 0xd5, 0xf1, 0xd8, 0x5b, 0x90, 0x58, 0x94, 0x98, 0x0b, 0x55, 0xa8, 0xc4, 0xcf,
+ 0xc5, 0x1b, 0x00, 0xe6, 0x07, 0xa5, 0x16, 0x96, 0xa6, 0x16, 0x97, 0x28, 0x05, 0x71, 0xf1, 0xc1,
+ 0x04, 0x8a, 0x0b, 0xf2, 0xf3, 0x8a, 0x53, 0x85, 0x1c, 0xb8, 0xd8, 0x20, 0x5a, 0x24, 0x18, 0x15,
+ 0x18, 0x35, 0xb8, 0x8d, 0x94, 0xf4, 0x70, 0x3b, 0x56, 0x0f, 0xa2, 0xd7, 0x89, 0xe5, 0xc4, 0x3d,
+ 0x79, 0x86, 0x20, 0xa8, 0x3e, 0xa3, 0xa9, 0x8c, 0x5c, 0xac, 0x81, 0x20, 0xdf, 0x0a, 0x75, 0x33,
+ 0x72, 0xb1, 0x41, 0x94, 0x08, 0x69, 0x12, 0x36, 0x06, 0xea, 0x26, 0x29, 0x2d, 0x62, 0x94, 0x42,
+ 0x5c, 0xab, 0xa4, 0xd5, 0x74, 0xf9, 0xc9, 0x64, 0x26, 0x15, 0x21, 0x25, 0x7d, 0x82, 0x41, 0xe0,
+ 0x94, 0x7b, 0xe2, 0x91, 0x1c, 0xe3, 0x85, 0x47, 0x72, 0x8c, 0x0f, 0x1e, 0xc9, 0x31, 0x4e, 0x78,
+ 0x2c, 0xc7, 0x70, 0xe1, 0xb1, 0x1c, 0xc3, 0x8d, 0xc7, 0x72, 0x0c, 0x5c, 0xb2, 0x99, 0xf9, 0x78,
+ 0xec, 0x0c, 0x60, 0x8c, 0x32, 0x4a, 0xcf, 0x2c, 0xc9, 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0x45,
+ 0xb2, 0x48, 0x37, 0x33, 0x1f, 0xd9, 0xda, 0x0a, 0x14, 0x8b, 0x93, 0xd8, 0xc0, 0x41, 0x6e, 0x0c,
+ 0x08, 0x00, 0x00, 0xff, 0xff, 0xae, 0xf7, 0xe5, 0x5c, 0x15, 0x02, 0x00, 0x00,
+}
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ context.Context
+var _ grpc.ClientConn
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the grpc package it is being compiled against.
+const _ = grpc.SupportPackageIsVersion4
+
+// QueryClient is the client API for Query service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
+type QueryClient interface {
+ // Params defines a gRPC query method that returns the ibcratelimit module's
+ // parameters.
+ Params(ctx context.Context, in *ParamsRequest, opts ...grpc.CallOption) (*ParamsResponse, error)
+}
+
+type queryClient struct {
+ cc grpc1.ClientConn
+}
+
+func NewQueryClient(cc grpc1.ClientConn) QueryClient {
+ return &queryClient{cc}
+}
+
+func (c *queryClient) Params(ctx context.Context, in *ParamsRequest, opts ...grpc.CallOption) (*ParamsResponse, error) {
+ out := new(ParamsResponse)
+ err := c.cc.Invoke(ctx, "/provenance.ibcratelimit.v1.Query/Params", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+// QueryServer is the server API for Query service.
+type QueryServer interface {
+ // Params defines a gRPC query method that returns the ibcratelimit module's
+ // parameters.
+ Params(context.Context, *ParamsRequest) (*ParamsResponse, error)
+}
+
+// UnimplementedQueryServer can be embedded to have forward compatible implementations.
+type UnimplementedQueryServer struct {
+}
+
+func (*UnimplementedQueryServer) Params(ctx context.Context, req *ParamsRequest) (*ParamsResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method Params not implemented")
+}
+
+func RegisterQueryServer(s grpc1.Server, srv QueryServer) {
+ s.RegisterService(&_Query_serviceDesc, srv)
+}
+
+func _Query_Params_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(ParamsRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(QueryServer).Params(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/provenance.ibcratelimit.v1.Query/Params",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(QueryServer).Params(ctx, req.(*ParamsRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+var _Query_serviceDesc = grpc.ServiceDesc{
+ ServiceName: "provenance.ibcratelimit.v1.Query",
+ HandlerType: (*QueryServer)(nil),
+ Methods: []grpc.MethodDesc{
+ {
+ MethodName: "Params",
+ Handler: _Query_Params_Handler,
+ },
+ },
+ Streams: []grpc.StreamDesc{},
+ Metadata: "provenance/ibcratelimit/v1/query.proto",
+}
+
+func (m *ParamsRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ParamsRequest) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ParamsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ return len(dAtA) - i, nil
+}
+
+func (m *ParamsResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ParamsResponse) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ParamsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ {
+ size, err := m.Params.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintQuery(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func encodeVarintQuery(dAtA []byte, offset int, v uint64) int {
+ offset -= sovQuery(v)
+ base := offset
+ for v >= 1<<7 {
+ dAtA[offset] = uint8(v&0x7f | 0x80)
+ v >>= 7
+ offset++
+ }
+ dAtA[offset] = uint8(v)
+ return base
+}
+func (m *ParamsRequest) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ return n
+}
+
+func (m *ParamsResponse) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.Params.Size()
+ n += 1 + l + sovQuery(uint64(l))
+ return n
+}
+
+func sovQuery(x uint64) (n int) {
+ return (math_bits.Len64(x|1) + 6) / 7
+}
+func sozQuery(x uint64) (n int) {
+ return sovQuery(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (m *ParamsRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ParamsRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ParamsRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ default:
+ iNdEx = preIndex
+ skippy, err := skipQuery(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ParamsResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ParamsResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ParamsResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Params", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthQuery
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Params.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipQuery(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func skipQuery(dAtA []byte) (n int, err error) {
+ l := len(dAtA)
+ iNdEx := 0
+ depth := 0
+ for iNdEx < l {
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ wireType := int(wire & 0x7)
+ switch wireType {
+ case 0:
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ iNdEx++
+ if dAtA[iNdEx-1] < 0x80 {
+ break
+ }
+ }
+ case 1:
+ iNdEx += 8
+ case 2:
+ var length int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ length |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if length < 0 {
+ return 0, ErrInvalidLengthQuery
+ }
+ iNdEx += length
+ case 3:
+ depth++
+ case 4:
+ if depth == 0 {
+ return 0, ErrUnexpectedEndOfGroupQuery
+ }
+ depth--
+ case 5:
+ iNdEx += 4
+ default:
+ return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+ }
+ if iNdEx < 0 {
+ return 0, ErrInvalidLengthQuery
+ }
+ if depth == 0 {
+ return iNdEx, nil
+ }
+ }
+ return 0, io.ErrUnexpectedEOF
+}
+
+var (
+ ErrInvalidLengthQuery = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowQuery = fmt.Errorf("proto: integer overflow")
+ ErrUnexpectedEndOfGroupQuery = fmt.Errorf("proto: unexpected end of group")
+)
diff --git a/x/ibcratelimit/query.pb.gw.go b/x/ibcratelimit/query.pb.gw.go
new file mode 100644
index 0000000000..3c25603511
--- /dev/null
+++ b/x/ibcratelimit/query.pb.gw.go
@@ -0,0 +1,148 @@
+// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT.
+// source: provenance/ibcratelimit/v1/query.proto
+
+/*
+Package ibcratelimit is a reverse proxy.
+
+It translates gRPC into RESTful JSON APIs.
+*/
+package ibcratelimit
+
+import (
+ "context"
+ "io"
+ "net/http"
+
+ "github.com/golang/protobuf/descriptor"
+ "github.com/golang/protobuf/proto"
+ "github.com/grpc-ecosystem/grpc-gateway/runtime"
+ "github.com/grpc-ecosystem/grpc-gateway/utilities"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/grpclog"
+ "google.golang.org/grpc/status"
+)
+
+// Suppress "imported and not used" errors
+var _ codes.Code
+var _ io.Reader
+var _ status.Status
+var _ = runtime.String
+var _ = utilities.NewDoubleArray
+var _ = descriptor.ForMessage
+
+func request_Query_Params_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var protoReq ParamsRequest
+ var metadata runtime.ServerMetadata
+
+ msg, err := client.Params(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+ return msg, metadata, err
+
+}
+
+func local_request_Query_Params_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var protoReq ParamsRequest
+ var metadata runtime.ServerMetadata
+
+ msg, err := server.Params(ctx, &protoReq)
+ return msg, metadata, err
+
+}
+
+// RegisterQueryHandlerServer registers the http handlers for service Query to "mux".
+// UnaryRPC :call QueryServer directly.
+// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906.
+// Note that using this registration option will cause many gRPC library features (such as grpc.SendHeader, etc) to stop working. Consider using RegisterQueryHandlerFromEndpoint instead.
+func RegisterQueryHandlerServer(ctx context.Context, mux *runtime.ServeMux, server QueryServer) error {
+
+ mux.Handle("GET", pattern_Query_Params_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := local_request_Query_Params_0(rctx, inboundMarshaler, server, req, pathParams)
+ ctx = runtime.NewServerMetadataContext(ctx, md)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+
+ forward_Query_Params_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+
+ })
+
+ return nil
+}
+
+// RegisterQueryHandlerFromEndpoint is same as RegisterQueryHandler but
+// automatically dials to "endpoint" and closes the connection when "ctx" gets done.
+func RegisterQueryHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) {
+ conn, err := grpc.Dial(endpoint, opts...)
+ if err != nil {
+ return err
+ }
+ defer func() {
+ if err != nil {
+ if cerr := conn.Close(); cerr != nil {
+ grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr)
+ }
+ return
+ }
+ go func() {
+ <-ctx.Done()
+ if cerr := conn.Close(); cerr != nil {
+ grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr)
+ }
+ }()
+ }()
+
+ return RegisterQueryHandler(ctx, mux, conn)
+}
+
+// RegisterQueryHandler registers the http handlers for service Query to "mux".
+// The handlers forward requests to the grpc endpoint over "conn".
+func RegisterQueryHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error {
+ return RegisterQueryHandlerClient(ctx, mux, NewQueryClient(conn))
+}
+
+// RegisterQueryHandlerClient registers the http handlers for service Query
+// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "QueryClient".
+// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "QueryClient"
+// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in
+// "QueryClient" to call the correct interceptors.
+func RegisterQueryHandlerClient(ctx context.Context, mux *runtime.ServeMux, client QueryClient) error {
+
+ mux.Handle("GET", pattern_Query_Params_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ rctx, err := runtime.AnnotateContext(ctx, mux, req)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := request_Query_Params_0(rctx, inboundMarshaler, client, req, pathParams)
+ ctx = runtime.NewServerMetadataContext(ctx, md)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+
+ forward_Query_Params_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+
+ })
+
+ return nil
+}
+
+var (
+ pattern_Query_Params_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"provenance", "ibcratelimit", "v1", "params"}, "", runtime.AssumeColonVerbOpt(false)))
+)
+
+var (
+ forward_Query_Params_0 = runtime.ForwardResponseMessage
+)
diff --git a/x/ibcratelimit/simulation/decoder.go b/x/ibcratelimit/simulation/decoder.go
new file mode 100644
index 0000000000..57027ad758
--- /dev/null
+++ b/x/ibcratelimit/simulation/decoder.go
@@ -0,0 +1,29 @@
+package simulation
+
+import (
+ "bytes"
+ "fmt"
+
+ "github.com/cosmos/cosmos-sdk/codec"
+ "github.com/cosmos/cosmos-sdk/types/kv"
+
+ "github.com/provenance-io/provenance/x/ibcratelimit"
+)
+
+// NewDecodeStore returns a decoder function closure that unmarshalls the KVPair's
+// Value
+func NewDecodeStore(cdc codec.Codec) func(kvA, kvB kv.Pair) string {
+ return func(kvA, kvB kv.Pair) string {
+ switch {
+ case bytes.Equal(kvA.Key[:1], ibcratelimit.ParamsKey):
+ var attribA, attribB ibcratelimit.Params
+
+ cdc.MustUnmarshal(kvA.Value, &attribA)
+ cdc.MustUnmarshal(kvB.Value, &attribB)
+
+ return fmt.Sprintf("Params: A:[%v] B:[%v]\n", attribA, attribB)
+ default:
+ panic(fmt.Sprintf("unexpected %s key %X (%s)", ibcratelimit.ModuleName, kvA.Key, kvA.Key))
+ }
+ }
+}
diff --git a/x/ibcratelimit/simulation/decoder_test.go b/x/ibcratelimit/simulation/decoder_test.go
new file mode 100644
index 0000000000..d392eb7f51
--- /dev/null
+++ b/x/ibcratelimit/simulation/decoder_test.go
@@ -0,0 +1,55 @@
+package simulation_test
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+
+ "github.com/cosmos/cosmos-sdk/simapp"
+ "github.com/cosmos/cosmos-sdk/types/kv"
+
+ "github.com/provenance-io/provenance/testutil/assertions"
+ "github.com/provenance-io/provenance/x/ibcratelimit"
+ "github.com/provenance-io/provenance/x/ibcratelimit/simulation"
+)
+
+func TestDecodeStore(t *testing.T) {
+ cdc := simapp.MakeTestEncodingConfig().Codec
+ dec := simulation.NewDecodeStore(cdc)
+ params := func(contract string) []byte {
+ p := ibcratelimit.NewParams("contract a")
+ return cdc.MustMarshal(&p)
+ }
+
+ tests := []struct {
+ name string
+ kvA kv.Pair
+ kvB kv.Pair
+ exp string
+ expPanic string
+ }{
+ {
+ name: "failure - unknown key type",
+ kvA: kv.Pair{Key: []byte{0x9a}, Value: []byte{0x9b}},
+ kvB: kv.Pair{Key: []byte{0x9c}, Value: []byte{0x9d}},
+ expPanic: "unexpected ratelimitedibc key 9A (\x9a)",
+ },
+ {
+ name: "success - ParamsKey",
+ kvA: kv.Pair{Key: ibcratelimit.ParamsKey, Value: params("contract a")},
+ kvB: kv.Pair{Key: ibcratelimit.ParamsKey, Value: params("contract b")},
+ exp: "Params: A:[{contract a}] B:[{contract a}]\n",
+ },
+ }
+
+ for _, tc := range tests {
+ t.Run(tc.name, func(t *testing.T) {
+ var actual string
+ testFunc := func() {
+ actual = dec(tc.kvA, tc.kvB)
+ }
+ assertions.RequirePanicEquals(t, testFunc, tc.expPanic, "running decoder")
+ assert.Equal(t, tc.exp, actual, "decoder result")
+ })
+ }
+}
diff --git a/x/ibcratelimit/simulation/genesis.go b/x/ibcratelimit/simulation/genesis.go
new file mode 100644
index 0000000000..c0de7a68b2
--- /dev/null
+++ b/x/ibcratelimit/simulation/genesis.go
@@ -0,0 +1,44 @@
+package simulation
+
+import (
+ "encoding/json"
+ "fmt"
+ "math/rand"
+
+ "github.com/cosmos/cosmos-sdk/types/module"
+ simtypes "github.com/cosmos/cosmos-sdk/types/simulation"
+
+ "github.com/provenance-io/provenance/x/ibcratelimit"
+)
+
+// Simulation parameter constants
+const (
+ Contract = "contract"
+)
+
+// ContractFn randomized conntract address
+func ContractFn(r *rand.Rand, accs []simtypes.Account) string {
+ randomAccount, _ := RandomAccs(r, accs, 1)
+ if r.Intn(2) > 0 || len(randomAccount) == 0 {
+ return ""
+ }
+ return randomAccount[0].Address.String()
+}
+
+// RandomizedGenState generates a random GenesisState for ibcratelimit
+func RandomizedGenState(simState *module.SimulationState) {
+ var contract string
+ simState.AppParams.GetOrGenerate(
+ simState.Cdc, Contract, &contract, simState.Rand,
+ func(r *rand.Rand) { contract = ContractFn(r, simState.Accounts) },
+ )
+
+ genesis := ibcratelimit.NewGenesisState(ibcratelimit.NewParams(contract))
+ simState.GenState[ibcratelimit.ModuleName] = simState.Cdc.MustMarshalJSON(genesis)
+
+ bz, err := json.MarshalIndent(simState.GenState[ibcratelimit.ModuleName], "", " ")
+ if err != nil {
+ panic(err)
+ }
+ fmt.Printf("Selected randomly generated ratelimitedibc parameters:\n%s\n", bz)
+}
diff --git a/x/ibcratelimit/simulation/genesis_test.go b/x/ibcratelimit/simulation/genesis_test.go
new file mode 100644
index 0000000000..af9707ebc7
--- /dev/null
+++ b/x/ibcratelimit/simulation/genesis_test.go
@@ -0,0 +1,108 @@
+package simulation_test
+
+import (
+ "encoding/json"
+ "math/rand"
+ "testing"
+
+ "github.com/cosmos/cosmos-sdk/codec"
+ codectypes "github.com/cosmos/cosmos-sdk/codec/types"
+ "github.com/cosmos/cosmos-sdk/types/module"
+ simtypes "github.com/cosmos/cosmos-sdk/types/simulation"
+ "github.com/provenance-io/provenance/x/ibcratelimit"
+ "github.com/provenance-io/provenance/x/ibcratelimit/simulation"
+ "github.com/stretchr/testify/assert"
+)
+
+func TestContractFn(t *testing.T) {
+ accs := simtypes.RandomAccounts(rand.New(rand.NewSource(0)), 3)
+
+ tests := []struct {
+ name string
+ seed int64
+ expected string
+ accounts []simtypes.Account
+ }{
+ {
+ name: "success - returns an empty account",
+ seed: 0,
+ accounts: accs,
+ expected: "",
+ },
+ {
+ name: "success - returns a random account",
+ seed: 3,
+ accounts: accs,
+ expected: "cosmos1tp4es44j4vv8m59za3z0tm64dkmlnm8wg2frhc",
+ },
+ {
+ name: "success - returns a different random account",
+ seed: 2,
+ accounts: accs,
+ expected: "cosmos12jszjrc0qhjt0ugt2uh4ptwu0h55pq6qfp9ecl",
+ },
+ }
+ for _, tc := range tests {
+ t.Run(tc.name, func(t *testing.T) {
+ r := rand.New(rand.NewSource(tc.seed))
+ port := simulation.ContractFn(r, tc.accounts)
+ assert.Equal(t, tc.expected, port, "should return correct random contract")
+ })
+ }
+}
+
+func TestRandomizedGenState(t *testing.T) {
+ accs := simtypes.RandomAccounts(rand.New(rand.NewSource(0)), 3)
+ tests := []struct {
+ name string
+ seed int64
+ expRateLimitGen *ibcratelimit.GenesisState
+ accounts []simtypes.Account
+ }{
+ {
+ name: "success - can handle no accounts",
+ seed: 0,
+ accounts: nil,
+ expRateLimitGen: &ibcratelimit.GenesisState{
+ Params: ibcratelimit.NewParams(""),
+ },
+ },
+ {
+ name: "success - can handle accounts",
+ seed: 1,
+ accounts: accs,
+ expRateLimitGen: &ibcratelimit.GenesisState{
+ Params: ibcratelimit.NewParams(""),
+ },
+ },
+ {
+ name: "success - has different output",
+ seed: 2,
+ accounts: accs,
+ expRateLimitGen: &ibcratelimit.GenesisState{
+ Params: ibcratelimit.NewParams("cosmos12jszjrc0qhjt0ugt2uh4ptwu0h55pq6qfp9ecl"),
+ },
+ },
+ }
+
+ for _, tc := range tests {
+ t.Run(tc.name, func(t *testing.T) {
+ simState := &module.SimulationState{
+ AppParams: make(simtypes.AppParams),
+ Cdc: codec.NewProtoCodec(codectypes.NewInterfaceRegistry()),
+ Rand: rand.New(rand.NewSource(tc.seed)),
+ GenState: make(map[string]json.RawMessage),
+ Accounts: tc.accounts,
+ }
+ simulation.RandomizedGenState(simState)
+
+ if assert.NotEmpty(t, simState.GenState[ibcratelimit.ModuleName]) {
+ rateLimitGenState := &ibcratelimit.GenesisState{}
+ err := simState.Cdc.UnmarshalJSON(simState.GenState[ibcratelimit.ModuleName], rateLimitGenState)
+ if assert.NoError(t, err, "UnmarshalJSON(ratelimitedibc gen state)") {
+ assert.Equal(t, tc.expRateLimitGen, rateLimitGenState, "hold ratelimitedibc state")
+ }
+ }
+ })
+ }
+}
diff --git a/x/ibcratelimit/simulation/operations.go b/x/ibcratelimit/simulation/operations.go
new file mode 100644
index 0000000000..ff4010ff85
--- /dev/null
+++ b/x/ibcratelimit/simulation/operations.go
@@ -0,0 +1,136 @@
+package simulation
+
+import (
+ "fmt"
+ "math/rand"
+
+ "github.com/cosmos/cosmos-sdk/baseapp"
+ "github.com/cosmos/cosmos-sdk/codec"
+ "github.com/cosmos/cosmos-sdk/simapp/helpers"
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ simtypes "github.com/cosmos/cosmos-sdk/types/simulation"
+ authkeeper "github.com/cosmos/cosmos-sdk/x/auth/keeper"
+ bankkeeper "github.com/cosmos/cosmos-sdk/x/bank/keeper"
+ "github.com/cosmos/cosmos-sdk/x/bank/testutil"
+ "github.com/cosmos/cosmos-sdk/x/simulation"
+
+ simappparams "github.com/provenance-io/provenance/app/params"
+ "github.com/provenance-io/provenance/internal/pioconfig"
+ "github.com/provenance-io/provenance/x/ibcratelimit"
+ "github.com/provenance-io/provenance/x/ibcratelimit/keeper"
+)
+
+// Simulation operation weights constants
+const (
+ //nolint:gosec // not credentials
+ OpWeightMsgUpdateParams = "op_weight_msg_update_params"
+)
+
+// WeightedOperations returns all the operations from the module with their respective weights
+func WeightedOperations(
+ appParams simtypes.AppParams, cdc codec.JSONCodec, k keeper.Keeper, ak authkeeper.AccountKeeperI, bk bankkeeper.Keeper,
+) simulation.WeightedOperations {
+ var (
+ weightMsgUpdateParams int
+ )
+
+ appParams.GetOrGenerate(cdc, OpWeightMsgUpdateParams, &weightMsgUpdateParams, nil,
+ func(_ *rand.Rand) {
+ weightMsgUpdateParams = simappparams.DefaultWeightGovUpdateParams
+ },
+ )
+
+ return simulation.WeightedOperations{
+ simulation.NewWeightedOperation(
+ weightMsgUpdateParams,
+ SimulateMsgGovUpdateParams(k, ak, bk),
+ ),
+ }
+}
+
+// SimulateMsgGovUpdateParams sends a MsgUpdateParams.
+func SimulateMsgGovUpdateParams(_ keeper.Keeper, ak authkeeper.AccountKeeperI, bk bankkeeper.Keeper) simtypes.Operation {
+ return func(
+ r *rand.Rand, app *baseapp.BaseApp, ctx sdk.Context, accs []simtypes.Account, chainID string,
+ ) (simtypes.OperationMsg, []simtypes.FutureOperation, error) {
+ raccs, err := RandomAccs(r, accs, uint64(len(accs)))
+ if err != nil {
+ return simtypes.NoOpMsg(sdk.MsgTypeURL(&ibcratelimit.MsgGovUpdateParamsRequest{}), sdk.MsgTypeURL(&ibcratelimit.MsgGovUpdateParamsRequest{}), err.Error()), nil, nil
+ }
+
+ // 50% chance to be from the module's authority
+ from := raccs[0]
+ to := raccs[1]
+
+ msg := ibcratelimit.NewMsgGovUpdateParamsRequest(from.Address.String(), to.Address.String())
+
+ return Dispatch(r, app, ctx, from, chainID, msg, ak, bk, nil)
+ }
+}
+
+// Dispatch sends an operation to the chain using a given account/funds on account for fees. Failures on the server side
+// are handled as no-op msg operations with the error string as the status/response.
+func Dispatch(
+ r *rand.Rand,
+ app *baseapp.BaseApp,
+ ctx sdk.Context,
+ from simtypes.Account,
+ chainID string,
+ msg sdk.Msg,
+ ak authkeeper.AccountKeeperI,
+ bk bankkeeper.Keeper,
+ futures []simtypes.FutureOperation,
+) (
+ simtypes.OperationMsg,
+ []simtypes.FutureOperation,
+ error,
+) {
+ account := ak.GetAccount(ctx, from.Address)
+ spendable := bk.SpendableCoins(ctx, account.GetAddress())
+
+ fees, err := simtypes.RandomFees(r, ctx, spendable)
+ if err != nil {
+ return simtypes.NoOpMsg(sdk.MsgTypeURL(msg), sdk.MsgTypeURL(msg), "unable to generate fees"), nil, err
+ }
+ err = testutil.FundAccount(bk, ctx, account.GetAddress(), sdk.NewCoins(sdk.Coin{
+ Denom: pioconfig.GetProvenanceConfig().BondDenom,
+ Amount: sdk.NewInt(1_000_000_000_000_000),
+ }))
+ if err != nil {
+ return simtypes.NoOpMsg(sdk.MsgTypeURL(msg), sdk.MsgTypeURL(msg), "unable to fund account"), nil, err
+ }
+ txGen := simappparams.MakeTestEncodingConfig().TxConfig
+ tx, err := helpers.GenSignedMockTx(
+ r,
+ txGen,
+ []sdk.Msg{msg},
+ fees,
+ helpers.DefaultGenTxGas,
+ chainID,
+ []uint64{account.GetAccountNumber()},
+ []uint64{account.GetSequence()},
+ from.PrivKey,
+ )
+ if err != nil {
+ return simtypes.NoOpMsg(sdk.MsgTypeURL(msg), sdk.MsgTypeURL(msg), "unable to generate mock tx"), nil, err
+ }
+
+ _, _, err = app.SimDeliver(txGen.TxEncoder(), tx)
+ if err != nil {
+ return simtypes.NoOpMsg(sdk.MsgTypeURL(msg), sdk.MsgTypeURL(msg), err.Error()), nil, nil
+ }
+
+ return simtypes.NewOperationMsg(msg, true, "", &codec.ProtoCodec{}), futures, nil
+}
+
+func RandomAccs(r *rand.Rand, accs []simtypes.Account, count uint64) ([]simtypes.Account, error) {
+ if uint64(len(accs)) < count {
+ return nil, fmt.Errorf("cannot choose %d accounts because there are only %d", count, len(accs))
+ }
+ raccs := make([]simtypes.Account, 0, len(accs))
+ raccs = append(raccs, accs...)
+ r.Shuffle(len(raccs), func(i, j int) {
+ raccs[i], raccs[j] = raccs[j], raccs[i]
+ })
+ return raccs[:count], nil
+}
diff --git a/x/ibcratelimit/simulation/operations_test.go b/x/ibcratelimit/simulation/operations_test.go
new file mode 100644
index 0000000000..c1d042f344
--- /dev/null
+++ b/x/ibcratelimit/simulation/operations_test.go
@@ -0,0 +1,199 @@
+package simulation_test
+
+import (
+ "bytes"
+ "fmt"
+ "math/rand"
+ "strings"
+
+ abci "github.com/tendermint/tendermint/abci/types"
+ tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
+
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ simtypes "github.com/cosmos/cosmos-sdk/types/simulation"
+ "github.com/cosmos/cosmos-sdk/x/bank/testutil"
+ "github.com/provenance-io/provenance/app"
+ simappparams "github.com/provenance-io/provenance/app/params"
+ "github.com/provenance-io/provenance/x/ibcratelimit"
+ "github.com/provenance-io/provenance/x/ibcratelimit/simulation"
+ "github.com/stretchr/testify/suite"
+)
+
+type SimTestSuite struct {
+ suite.Suite
+
+ ctx sdk.Context
+ app *app.App
+}
+
+func (s *SimTestSuite) SetupTest() {
+ s.app = app.Setup(s.T())
+ s.ctx = s.app.BaseApp.NewContext(false, tmproto.Header{})
+}
+
+// LogOperationMsg logs all fields of the provided operationMsg.
+func (s *SimTestSuite) LogOperationMsg(operationMsg simtypes.OperationMsg, msg string, args ...interface{}) {
+ msgFmt := "%s"
+ if len(bytes.TrimSpace(operationMsg.Msg)) == 0 {
+ msgFmt = " %q"
+ }
+ fmtLines := []string{
+ fmt.Sprintf(msg, args...),
+ "operationMsg.Route: %q",
+ "operationMsg.Name: %q",
+ "operationMsg.Comment: %q",
+ "operationMsg.OK: %t",
+ "operationMsg.Msg: " + msgFmt,
+ }
+ s.T().Logf(strings.Join(fmtLines, "\n"),
+ operationMsg.Route, operationMsg.Name, operationMsg.Comment, operationMsg.OK, string(operationMsg.Msg),
+ )
+}
+
+func (s *SimTestSuite) TestWeightedOperations() {
+ cdc := s.app.AppCodec()
+ appParams := make(simtypes.AppParams)
+
+ weightedOps := simulation.WeightedOperations(appParams, cdc, *s.app.RateLimitingKeeper,
+ s.app.AccountKeeper, s.app.BankKeeper,
+ )
+
+ // setup 3 accounts
+ source := rand.NewSource(1)
+ r := rand.New(source)
+ accs := s.getTestingAccounts(r, 3)
+
+ // begin a new block
+ s.app.BeginBlock(abci.RequestBeginBlock{Header: tmproto.Header{Height: s.app.LastBlockHeight() + 1, AppHash: s.app.LastCommitID().Hash}})
+
+ expected := []struct {
+ weight int
+ opMsgRoute string
+ opMsgName string
+ }{
+ {simappparams.DefaultWeightGovUpdateParams, sdk.MsgTypeURL(&ibcratelimit.MsgGovUpdateParamsRequest{}), sdk.MsgTypeURL(&ibcratelimit.MsgGovUpdateParamsRequest{})},
+ }
+
+ expNames := make([]string, len(expected))
+ for i, exp := range expected {
+ expNames[i] = exp.opMsgName
+ }
+
+ // Run all the ops and get the operation messages and their names.
+ opMsgs := make([]simtypes.OperationMsg, len(weightedOps))
+ actualNames := make([]string, len(weightedOps))
+ for i, w := range weightedOps {
+ opMsgs[i], _, _ = w.Op()(r, s.app.BaseApp, s.ctx, accs, "")
+ actualNames[i] = opMsgs[i].Name
+ }
+
+ // First, make sure the op names are as expected since a failure there probably means the rest will fail.
+ // And it's probably easier to address when you've got a nice list comparison of names and their orderings.
+ s.Require().Equal(expNames, actualNames, "operation message names")
+
+ // Now assert that each entry was as expected.
+ for i := range expected {
+ s.Assert().Equal(expected[i].weight, weightedOps[i].Weight(), "weightedOps[%d].Weight", i)
+ s.Assert().Equal(expected[i].opMsgRoute, opMsgs[i].Route, "weightedOps[%d] operationMsg.Route", i)
+ s.Assert().Equal(expected[i].opMsgName, opMsgs[i].Name, "weightedOps[%d] operationMsg.Name", i)
+ }
+}
+
+func (s *SimTestSuite) TestSimulateMsgGovUpdateParams() {
+ // setup 3 accounts
+ source := rand.NewSource(1)
+ r := rand.New(source)
+ accounts := s.getTestingAccounts(r, 3)
+
+ // begin a new block
+ s.app.BeginBlock(abci.RequestBeginBlock{Header: tmproto.Header{Height: s.app.LastBlockHeight() + 1, AppHash: s.app.LastCommitID().Hash}})
+
+ // execute operation
+ op := simulation.SimulateMsgGovUpdateParams(*s.app.RateLimitingKeeper, s.app.AccountKeeper, s.app.BankKeeper)
+ operationMsg, futureOperations, err := op(r, s.app.BaseApp, s.ctx, accounts, "")
+ s.Require().NoError(err, "SimulateMsgGovUpdateParams op(...) error")
+ s.LogOperationMsg(operationMsg, "good")
+
+ var msg ibcratelimit.MsgGovUpdateParamsRequest
+ s.Require().NoError(s.app.AppCodec().UnmarshalJSON(operationMsg.Msg, &msg), "UnmarshalJSON(operationMsg.Msg)")
+
+ s.Assert().True(operationMsg.OK, "operationMsg.OK")
+ s.Assert().Equal(sdk.MsgTypeURL(&msg), operationMsg.Name, "operationMsg.Name")
+ s.Assert().Equal(sdk.MsgTypeURL(&msg), operationMsg.Route, "operationMsg.Route")
+ s.Assert().Len(futureOperations, 0, "futureOperations")
+}
+
+func (s *SimTestSuite) TestRandomAccs() {
+ source := rand.NewSource(1)
+ r := rand.New(source)
+ accounts := s.getTestingAccounts(r, 3)
+
+ tests := []struct {
+ name string
+ accs []simtypes.Account
+ expected []simtypes.Account
+ count uint64
+ err string
+ }{
+ {
+ name: "valid - return nothing when count is 0",
+ accs: []simtypes.Account{},
+ expected: []simtypes.Account{},
+ count: 0,
+ },
+ {
+ name: "valid - return 1 when count is 1",
+ accs: []simtypes.Account{accounts[0]},
+ expected: []simtypes.Account{accounts[0]},
+ count: 1,
+ },
+ {
+ name: "valid - return multiple when count greater than 1",
+ accs: []simtypes.Account{accounts[0], accounts[1]},
+ expected: []simtypes.Account{accounts[1], accounts[0]},
+ count: 2,
+ },
+ {
+ name: "valid - return is limited by count",
+ accs: []simtypes.Account{accounts[0], accounts[1], accounts[2]},
+ expected: []simtypes.Account{accounts[1]},
+ count: 1,
+ },
+ {
+ name: "invalid - return error when count is greater than length",
+ accs: []simtypes.Account{accounts[0], accounts[1]},
+ expected: []simtypes.Account{},
+ count: 3,
+ err: "cannot choose 3 accounts because there are only 2",
+ },
+ }
+
+ for _, tc := range tests {
+ s.Run(tc.name, func() {
+ raccs, err := simulation.RandomAccs(r, tc.accs, tc.count)
+ if len(tc.err) == 0 {
+ s.Require().NoError(err, "should have no error for successful RandomAccs")
+ s.Require().Equal(tc.expected, raccs, "should have correct output for successful RandomAccs")
+ } else {
+ s.Require().EqualError(err, tc.err, "should have correct error message for RandomAccs")
+ }
+ })
+ }
+}
+
+func (s *SimTestSuite) getTestingAccounts(r *rand.Rand, n int) []simtypes.Account {
+ accounts := simtypes.RandomAccounts(r, n)
+
+ initAmt := sdk.TokensFromConsensusPower(1000000, sdk.DefaultPowerReduction)
+ initCoins := sdk.NewCoins(sdk.NewCoin(sdk.DefaultBondDenom, initAmt))
+
+ // add coins to the accounts
+ for _, account := range accounts {
+ acc := s.app.AccountKeeper.NewAccountWithAddress(s.ctx, account.Address)
+ s.app.AccountKeeper.SetAccount(s.ctx, acc)
+ err := testutil.FundAccount(s.app.BankKeeper, s.ctx, account.Address, initCoins)
+ s.Require().NoError(err)
+ }
+
+ return accounts
+}
diff --git a/x/ibcratelimit/tx.pb.go b/x/ibcratelimit/tx.pb.go
new file mode 100644
index 0000000000..dfb43d217d
--- /dev/null
+++ b/x/ibcratelimit/tx.pb.go
@@ -0,0 +1,594 @@
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// source: provenance/ibcratelimit/v1/tx.proto
+
+package ibcratelimit
+
+import (
+ context "context"
+ fmt "fmt"
+ _ "github.com/cosmos/cosmos-proto"
+ _ "github.com/cosmos/cosmos-sdk/types/msgservice"
+ _ "github.com/gogo/protobuf/gogoproto"
+ grpc1 "github.com/gogo/protobuf/grpc"
+ proto "github.com/gogo/protobuf/proto"
+ grpc "google.golang.org/grpc"
+ codes "google.golang.org/grpc/codes"
+ status "google.golang.org/grpc/status"
+ io "io"
+ math "math"
+ math_bits "math/bits"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
+
+// MsgGovUpdateParamsRequest is a request message for the GovUpdateParams endpoint.
+type MsgGovUpdateParamsRequest struct {
+ // authority should be the governance module account address.
+ Authority string `protobuf:"bytes,1,opt,name=authority,proto3" json:"authority,omitempty"`
+ // params are the new param values to set
+ Params Params `protobuf:"bytes,2,opt,name=params,proto3" json:"params"`
+}
+
+func (m *MsgGovUpdateParamsRequest) Reset() { *m = MsgGovUpdateParamsRequest{} }
+func (m *MsgGovUpdateParamsRequest) String() string { return proto.CompactTextString(m) }
+func (*MsgGovUpdateParamsRequest) ProtoMessage() {}
+func (*MsgGovUpdateParamsRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_e09935355436fc3e, []int{0}
+}
+func (m *MsgGovUpdateParamsRequest) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *MsgGovUpdateParamsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_MsgGovUpdateParamsRequest.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *MsgGovUpdateParamsRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_MsgGovUpdateParamsRequest.Merge(m, src)
+}
+func (m *MsgGovUpdateParamsRequest) XXX_Size() int {
+ return m.Size()
+}
+func (m *MsgGovUpdateParamsRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_MsgGovUpdateParamsRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MsgGovUpdateParamsRequest proto.InternalMessageInfo
+
+func (m *MsgGovUpdateParamsRequest) GetAuthority() string {
+ if m != nil {
+ return m.Authority
+ }
+ return ""
+}
+
+func (m *MsgGovUpdateParamsRequest) GetParams() Params {
+ if m != nil {
+ return m.Params
+ }
+ return Params{}
+}
+
+// MsgGovUpdateParamsResponse is a response message for the GovUpdateParams endpoint.
+type MsgGovUpdateParamsResponse struct {
+}
+
+func (m *MsgGovUpdateParamsResponse) Reset() { *m = MsgGovUpdateParamsResponse{} }
+func (m *MsgGovUpdateParamsResponse) String() string { return proto.CompactTextString(m) }
+func (*MsgGovUpdateParamsResponse) ProtoMessage() {}
+func (*MsgGovUpdateParamsResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_e09935355436fc3e, []int{1}
+}
+func (m *MsgGovUpdateParamsResponse) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *MsgGovUpdateParamsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_MsgGovUpdateParamsResponse.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *MsgGovUpdateParamsResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_MsgGovUpdateParamsResponse.Merge(m, src)
+}
+func (m *MsgGovUpdateParamsResponse) XXX_Size() int {
+ return m.Size()
+}
+func (m *MsgGovUpdateParamsResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_MsgGovUpdateParamsResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MsgGovUpdateParamsResponse proto.InternalMessageInfo
+
+func init() {
+ proto.RegisterType((*MsgGovUpdateParamsRequest)(nil), "provenance.ibcratelimit.v1.MsgGovUpdateParamsRequest")
+ proto.RegisterType((*MsgGovUpdateParamsResponse)(nil), "provenance.ibcratelimit.v1.MsgGovUpdateParamsResponse")
+}
+
+func init() {
+ proto.RegisterFile("provenance/ibcratelimit/v1/tx.proto", fileDescriptor_e09935355436fc3e)
+}
+
+var fileDescriptor_e09935355436fc3e = []byte{
+ // 338 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0x2e, 0x28, 0xca, 0x2f,
+ 0x4b, 0xcd, 0x4b, 0xcc, 0x4b, 0x4e, 0xd5, 0xcf, 0x4c, 0x4a, 0x2e, 0x4a, 0x2c, 0x49, 0xcd, 0xc9,
+ 0xcc, 0xcd, 0x2c, 0xd1, 0x2f, 0x33, 0xd4, 0x2f, 0xa9, 0xd0, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17,
+ 0x92, 0x42, 0x28, 0xd2, 0x43, 0x56, 0xa4, 0x57, 0x66, 0x28, 0x25, 0x92, 0x9e, 0x9f, 0x9e, 0x0f,
+ 0x56, 0xa6, 0x0f, 0x62, 0x41, 0x74, 0x48, 0x49, 0x26, 0xe7, 0x17, 0xe7, 0xe6, 0x17, 0xc7, 0x43,
+ 0x24, 0x20, 0x1c, 0xa8, 0x94, 0x38, 0x84, 0xa7, 0x9f, 0x5b, 0x9c, 0x0e, 0xb2, 0x24, 0xb7, 0x38,
+ 0x1d, 0x2a, 0xa1, 0x8e, 0xc7, 0x29, 0x05, 0x89, 0x45, 0x89, 0xb9, 0x50, 0x13, 0x94, 0x96, 0x32,
+ 0x72, 0x49, 0xfa, 0x16, 0xa7, 0xbb, 0xe7, 0x97, 0x85, 0x16, 0xa4, 0x24, 0x96, 0xa4, 0x06, 0x80,
+ 0x25, 0x83, 0x52, 0x0b, 0x4b, 0x53, 0x8b, 0x4b, 0x84, 0xcc, 0xb8, 0x38, 0x13, 0x4b, 0x4b, 0x32,
+ 0xf2, 0x8b, 0x32, 0x4b, 0x2a, 0x25, 0x18, 0x15, 0x18, 0x35, 0x38, 0x9d, 0x24, 0x2e, 0x6d, 0xd1,
+ 0x15, 0x81, 0x3a, 0xc2, 0x31, 0x25, 0xa5, 0x28, 0xb5, 0xb8, 0x38, 0xb8, 0xa4, 0x28, 0x33, 0x2f,
+ 0x3d, 0x08, 0xa1, 0x54, 0xc8, 0x81, 0x8b, 0x0d, 0x62, 0x8b, 0x04, 0x93, 0x02, 0xa3, 0x06, 0xb7,
+ 0x91, 0x92, 0x1e, 0x6e, 0x5f, 0xeb, 0x41, 0xac, 0x74, 0x62, 0x39, 0x71, 0x4f, 0x9e, 0x21, 0x08,
+ 0xaa, 0xcf, 0x8a, 0xaf, 0xe9, 0xf9, 0x06, 0x2d, 0x84, 0x89, 0x4a, 0x32, 0x5c, 0x52, 0xd8, 0x9c,
+ 0x59, 0x5c, 0x90, 0x9f, 0x57, 0x9c, 0x6a, 0xd4, 0xc1, 0xc8, 0xc5, 0xec, 0x5b, 0x9c, 0x2e, 0xd4,
+ 0xc0, 0xc8, 0xc5, 0x8f, 0xa6, 0x46, 0xc8, 0x14, 0x9f, 0xdd, 0x38, 0xbd, 0x2e, 0x65, 0x46, 0xaa,
+ 0x36, 0x88, 0x53, 0x9c, 0x72, 0x4f, 0x3c, 0x92, 0x63, 0xbc, 0xf0, 0x48, 0x8e, 0xf1, 0xc1, 0x23,
+ 0x39, 0xc6, 0x09, 0x8f, 0xe5, 0x18, 0x2e, 0x3c, 0x96, 0x63, 0xb8, 0xf1, 0x58, 0x8e, 0x81, 0x4b,
+ 0x36, 0x33, 0x1f, 0x8f, 0x99, 0x01, 0x8c, 0x51, 0x46, 0xe9, 0x99, 0x25, 0x19, 0xa5, 0x49, 0x7a,
+ 0xc9, 0xf9, 0xb9, 0xfa, 0x08, 0x85, 0xba, 0x99, 0xf9, 0x48, 0x3c, 0xfd, 0x0a, 0x94, 0xf8, 0x4c,
+ 0x62, 0x03, 0x47, 0xa3, 0x31, 0x20, 0x00, 0x00, 0xff, 0xff, 0xaa, 0xaf, 0x2e, 0x72, 0x7c, 0x02,
+ 0x00, 0x00,
+}
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ context.Context
+var _ grpc.ClientConn
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the grpc package it is being compiled against.
+const _ = grpc.SupportPackageIsVersion4
+
+// MsgClient is the client API for Msg service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
+type MsgClient interface {
+ // GovUpdateParams is a governance proposal endpoint for updating the exchange module's params.
+ GovUpdateParams(ctx context.Context, in *MsgGovUpdateParamsRequest, opts ...grpc.CallOption) (*MsgGovUpdateParamsResponse, error)
+}
+
+type msgClient struct {
+ cc grpc1.ClientConn
+}
+
+func NewMsgClient(cc grpc1.ClientConn) MsgClient {
+ return &msgClient{cc}
+}
+
+func (c *msgClient) GovUpdateParams(ctx context.Context, in *MsgGovUpdateParamsRequest, opts ...grpc.CallOption) (*MsgGovUpdateParamsResponse, error) {
+ out := new(MsgGovUpdateParamsResponse)
+ err := c.cc.Invoke(ctx, "/provenance.ibcratelimit.v1.Msg/GovUpdateParams", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+// MsgServer is the server API for Msg service.
+type MsgServer interface {
+ // GovUpdateParams is a governance proposal endpoint for updating the exchange module's params.
+ GovUpdateParams(context.Context, *MsgGovUpdateParamsRequest) (*MsgGovUpdateParamsResponse, error)
+}
+
+// UnimplementedMsgServer can be embedded to have forward compatible implementations.
+type UnimplementedMsgServer struct {
+}
+
+func (*UnimplementedMsgServer) GovUpdateParams(ctx context.Context, req *MsgGovUpdateParamsRequest) (*MsgGovUpdateParamsResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method GovUpdateParams not implemented")
+}
+
+func RegisterMsgServer(s grpc1.Server, srv MsgServer) {
+ s.RegisterService(&_Msg_serviceDesc, srv)
+}
+
+func _Msg_GovUpdateParams_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(MsgGovUpdateParamsRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(MsgServer).GovUpdateParams(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/provenance.ibcratelimit.v1.Msg/GovUpdateParams",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(MsgServer).GovUpdateParams(ctx, req.(*MsgGovUpdateParamsRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+var _Msg_serviceDesc = grpc.ServiceDesc{
+ ServiceName: "provenance.ibcratelimit.v1.Msg",
+ HandlerType: (*MsgServer)(nil),
+ Methods: []grpc.MethodDesc{
+ {
+ MethodName: "GovUpdateParams",
+ Handler: _Msg_GovUpdateParams_Handler,
+ },
+ },
+ Streams: []grpc.StreamDesc{},
+ Metadata: "provenance/ibcratelimit/v1/tx.proto",
+}
+
+func (m *MsgGovUpdateParamsRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *MsgGovUpdateParamsRequest) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *MsgGovUpdateParamsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ {
+ size, err := m.Params.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintTx(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ if len(m.Authority) > 0 {
+ i -= len(m.Authority)
+ copy(dAtA[i:], m.Authority)
+ i = encodeVarintTx(dAtA, i, uint64(len(m.Authority)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *MsgGovUpdateParamsResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *MsgGovUpdateParamsResponse) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *MsgGovUpdateParamsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ return len(dAtA) - i, nil
+}
+
+func encodeVarintTx(dAtA []byte, offset int, v uint64) int {
+ offset -= sovTx(v)
+ base := offset
+ for v >= 1<<7 {
+ dAtA[offset] = uint8(v&0x7f | 0x80)
+ v >>= 7
+ offset++
+ }
+ dAtA[offset] = uint8(v)
+ return base
+}
+func (m *MsgGovUpdateParamsRequest) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Authority)
+ if l > 0 {
+ n += 1 + l + sovTx(uint64(l))
+ }
+ l = m.Params.Size()
+ n += 1 + l + sovTx(uint64(l))
+ return n
+}
+
+func (m *MsgGovUpdateParamsResponse) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ return n
+}
+
+func sovTx(x uint64) (n int) {
+ return (math_bits.Len64(x|1) + 6) / 7
+}
+func sozTx(x uint64) (n int) {
+ return sovTx(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (m *MsgGovUpdateParamsRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: MsgGovUpdateParamsRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: MsgGovUpdateParamsRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Authority", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthTx
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthTx
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Authority = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Params", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthTx
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthTx
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Params.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipTx(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthTx
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *MsgGovUpdateParamsResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: MsgGovUpdateParamsResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: MsgGovUpdateParamsResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ default:
+ iNdEx = preIndex
+ skippy, err := skipTx(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthTx
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func skipTx(dAtA []byte) (n int, err error) {
+ l := len(dAtA)
+ iNdEx := 0
+ depth := 0
+ for iNdEx < l {
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowTx
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ wireType := int(wire & 0x7)
+ switch wireType {
+ case 0:
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowTx
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ iNdEx++
+ if dAtA[iNdEx-1] < 0x80 {
+ break
+ }
+ }
+ case 1:
+ iNdEx += 8
+ case 2:
+ var length int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowTx
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ length |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if length < 0 {
+ return 0, ErrInvalidLengthTx
+ }
+ iNdEx += length
+ case 3:
+ depth++
+ case 4:
+ if depth == 0 {
+ return 0, ErrUnexpectedEndOfGroupTx
+ }
+ depth--
+ case 5:
+ iNdEx += 4
+ default:
+ return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+ }
+ if iNdEx < 0 {
+ return 0, ErrInvalidLengthTx
+ }
+ if depth == 0 {
+ return iNdEx, nil
+ }
+ }
+ return 0, io.ErrUnexpectedEOF
+}
+
+var (
+ ErrInvalidLengthTx = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowTx = fmt.Errorf("proto: integer overflow")
+ ErrUnexpectedEndOfGroupTx = fmt.Errorf("proto: unexpected end of group")
+)