Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Uday seiv2 rebase #473

Closed
wants to merge 33 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
33 commits
Select commit Hold shift + click to select a range
ca54bb7
Adding token factory specific denom endpoint (#457)
dssei Mar 14, 2024
a0e55f4
Revert "Adding token factory specific denom endpoint" (#463)
dssei Mar 19, 2024
e8a28d3
Rework abort handling to not be dependent on extracting codes from de…
udpatil Mar 19, 2024
9ad11f9
[OCC] fix undesired colon causing re-executions (#466) (#467)
stevenlanders Mar 19, 2024
c44408a
Add readcache to validationIterator to prevent race conditions causin…
udpatil Mar 26, 2024
f50cde8
Expose parent on CacheKV (#352)
codchen Nov 17, 2023
e84526e
Set TxIndex before generating dependencies (#358)
codchen Nov 22, 2023
6b93029
change DeliverTx to take typed tx (#360)
codchen Dec 4, 2023
1298183
Integrate with pending txs in mempool (#381)
codchen Dec 16, 2023
5c4a816
Add checkTx cb (#382)
codchen Dec 20, 2023
71d56be
Add wei support in bank keeper (#386)
codchen Dec 28, 2023
8e772e1
Add ACL constants for Bank Wei prefix (#387)
codchen Dec 28, 2023
3c5acd0
[EVM] Add pending nonce support (#390)
stevenlanders Jan 4, 2024
6c440c2
rebase
codchen Jan 11, 2024
074310d
[EVM] Allow multiple txs from same account in a block (#397)
stevenlanders Jan 19, 2024
8bfcdde
Expose VersionExists (#400)
codchen Jan 23, 2024
c0424e0
Add `DeleteAll` method to store type (#402)
codchen Jan 24, 2024
ebf0877
Allow balance to go negative in SendCoinsAndWei (#417)
codchen Feb 1, 2024
3ab9417
Expose add/sub balance methods on bank keeper (#432)
codchen Feb 13, 2024
b81dc45
Remove escrow account in wei logic (#434)
codchen Feb 14, 2024
72468e6
Add evm address to keys output (#442)
codchen Feb 27, 2024
470dd97
Amplify cosmos tx priority (#443)
codchen Feb 27, 2024
16689ef
occ-evm compatibility changes
udpatil Jan 8, 2024
0198ac2
update generate estimated writeset for evm compatibility
udpatil Jan 9, 2024
b60b6c8
use absoluteIndex instead of relative Index for txIndex
udpatil Jan 11, 2024
992c5ce
Fix mvkv to adhere to updated KVStore interface
udpatil Jan 25, 2024
98061c8
update schduler and tests
udpatil Jan 26, 2024
9032b1f
update scheduler test call pattern
udpatil Feb 28, 2024
5bdb1d8
Integrate with tendermint EVM tx replacement logic (#446)
codchen Mar 7, 2024
08f4b6f
Fix indexesValidated and PrefillEstimates to operate on absolute idx …
udpatil Mar 11, 2024
1c2ea84
Modify max incarnation fallback (#460)
udpatil Mar 14, 2024
b38b6b6
[OCC] if no txs, avoid scheduler overhead, limit tasks (#468)
stevenlanders Mar 20, 2024
936f5e3
Fix DeleteAll for CacheKV/MultiVersionKV so that all keys are properl…
codchen Mar 22, 2024
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 3 additions & 3 deletions .github/workflows/test.yml
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ jobs:
- uses: actions/checkout@v3
- uses: actions/setup-go@v3
with:
go-version: 1.19
go-version: 1.21
- name: Create a file with all core Cosmos SDK pkgs
run: go list ./... > pkgs.txt
- name: Split pkgs into 10 files
Expand Down Expand Up @@ -83,7 +83,7 @@ jobs:
- uses: actions/setup-python@v3
- uses: actions/setup-go@v3
with:
go-version: 1.19
go-version: 1.21
- uses: technote-space/[email protected]
with:
PATTERNS: |
Expand Down Expand Up @@ -118,7 +118,7 @@ jobs:
- uses: actions/checkout@v3
- uses: actions/setup-go@v3
with:
go-version: 1.19
go-version: 1.21

# Download all coverage reports from the 'tests' job
- name: Download coverage reports
Expand Down
81 changes: 60 additions & 21 deletions baseapp/abci.go
Original file line number Diff line number Diff line change
Expand Up @@ -12,8 +12,6 @@ import (
"syscall"
"time"

"github.com/cosmos/cosmos-sdk/tasks"

"github.com/armon/go-metrics"
"github.com/gogo/protobuf/proto"
abci "github.com/tendermint/tendermint/abci/types"
Expand All @@ -23,6 +21,7 @@ import (

"github.com/cosmos/cosmos-sdk/codec"
snapshottypes "github.com/cosmos/cosmos-sdk/snapshots/types"
"github.com/cosmos/cosmos-sdk/tasks"
"github.com/cosmos/cosmos-sdk/telemetry"
sdk "github.com/cosmos/cosmos-sdk/types"
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
Expand Down Expand Up @@ -203,7 +202,7 @@ func (app *BaseApp) EndBlock(ctx sdk.Context, req abci.RequestEndBlock) (res abc
// internal CheckTx state if the AnteHandler passes. Otherwise, the ResponseCheckTx
// will contain releveant error information. Regardless of tx execution outcome,
// the ResponseCheckTx will contain relevant gas execution context.
func (app *BaseApp) CheckTx(ctx context.Context, req *abci.RequestCheckTx) (*abci.ResponseCheckTx, error) {
func (app *BaseApp) CheckTx(ctx context.Context, req *abci.RequestCheckTx) (*abci.ResponseCheckTxV2, error) {
defer telemetry.MeasureSince(time.Now(), "abci", "check_tx")

var mode runTxMode
Expand All @@ -220,34 +219,55 @@ func (app *BaseApp) CheckTx(ctx context.Context, req *abci.RequestCheckTx) (*abc
}

sdkCtx := app.getContextForTx(mode, req.Tx)
gInfo, result, _, priority, err := app.runTx(sdkCtx, mode, req.Tx)
tx, err := app.txDecoder(req.Tx)
if err != nil {
res := sdkerrors.ResponseCheckTx(err, 0, 0, app.trace)
return &abci.ResponseCheckTxV2{ResponseCheckTx: &res}, err
}
gInfo, result, _, priority, pendingTxChecker, expireTxHandler, txCtx, err := app.runTx(sdkCtx, mode, tx, sha256.Sum256(req.Tx))
if err != nil {
res := sdkerrors.ResponseCheckTx(err, gInfo.GasWanted, gInfo.GasUsed, app.trace)
return &res, err
return &abci.ResponseCheckTxV2{ResponseCheckTx: &res}, err
}

return &abci.ResponseCheckTx{
GasWanted: int64(gInfo.GasWanted), // TODO: Should type accept unsigned ints?
Data: result.Data,
Priority: priority,
}, nil
res := &abci.ResponseCheckTxV2{
ResponseCheckTx: &abci.ResponseCheckTx{
GasWanted: int64(gInfo.GasWanted), // TODO: Should type accept unsigned ints?
Data: result.Data,
Priority: priority,
},
ExpireTxHandler: expireTxHandler,
EVMNonce: txCtx.EVMNonce(),
EVMSenderAddress: txCtx.EVMSenderAddress(),
IsEVM: txCtx.IsEVM(),
}
if pendingTxChecker != nil {
res.IsPendingTransaction = true
res.Checker = pendingTxChecker
}

return res, nil
}

// DeliverTxBatch executes multiple txs
func (app *BaseApp) DeliverTxBatch(ctx sdk.Context, req sdk.DeliverTxBatchRequest) (res sdk.DeliverTxBatchResponse) {
scheduler := tasks.NewScheduler(app.concurrencyWorkers, app.TracingInfo, app.DeliverTx)
// This will basically no-op the actual prefill if the metadata for the txs is empty
responses := make([]*sdk.DeliverTxResult, 0, len(req.TxEntries))

// process all txs, this will also initializes the MVS if prefill estimates was disabled
if len(req.TxEntries) == 0 {
return sdk.DeliverTxBatchResponse{Results: responses}
}

// avoid overhead for empty batches
scheduler := tasks.NewScheduler(app.concurrencyWorkers, app.TracingInfo, app.DeliverTx)
txRes, err := scheduler.ProcessAll(ctx, req.TxEntries)
if err != nil {
// TODO: handle error
ctx.Logger().Error("error while processing scheduler", "err", err)
panic(err)
}

responses := make([]*sdk.DeliverTxResult, 0, len(req.TxEntries))
for _, tx := range txRes {
responses = append(responses, &sdk.DeliverTxResult{Response: tx})
}

return sdk.DeliverTxBatchResponse{Results: responses}
}

Expand All @@ -256,9 +276,7 @@ func (app *BaseApp) DeliverTxBatch(ctx sdk.Context, req sdk.DeliverTxBatchReques
// Otherwise, the ResponseDeliverTx will contain relevant error information.
// Regardless of tx execution outcome, the ResponseDeliverTx will contain relevant
// gas execution context.
// TODO: (occ) this is the function called from sei-chain to perform execution of a transaction.
// We'd likely replace this with an execution tasks that is scheduled by the OCC scheduler
func (app *BaseApp) DeliverTx(ctx sdk.Context, req abci.RequestDeliverTx) (res abci.ResponseDeliverTx) {
func (app *BaseApp) DeliverTx(ctx sdk.Context, req abci.RequestDeliverTx, tx sdk.Tx, checksum [32]byte) (res abci.ResponseDeliverTx) {
defer telemetry.MeasureSince(time.Now(), "abci", "deliver_tx")
defer func() {
for _, streamingListener := range app.abciListeners {
Expand All @@ -278,7 +296,7 @@ func (app *BaseApp) DeliverTx(ctx sdk.Context, req abci.RequestDeliverTx) (res a
telemetry.SetGauge(float32(gInfo.GasWanted), "tx", "gas", "wanted")
}()

gInfo, result, anteEvents, _, err := app.runTx(ctx.WithTxBytes(req.Tx).WithVoteInfos(app.voteInfos), runTxModeDeliver, req.Tx)
gInfo, result, anteEvents, _, _, _, resCtx, err := app.runTx(ctx.WithTxBytes(req.Tx).WithVoteInfos(app.voteInfos), runTxModeDeliver, tx, checksum)
if err != nil {
resultStr = "failed"
// if we have a result, use those events instead of just the anteEvents
Expand All @@ -288,13 +306,20 @@ func (app *BaseApp) DeliverTx(ctx sdk.Context, req abci.RequestDeliverTx) (res a
return sdkerrors.ResponseDeliverTxWithEvents(err, gInfo.GasWanted, gInfo.GasUsed, sdk.MarkEventsToIndex(anteEvents, app.indexEvents), app.trace)
}

return abci.ResponseDeliverTx{
res = abci.ResponseDeliverTx{
GasWanted: int64(gInfo.GasWanted), // TODO: Should type accept unsigned ints?
GasUsed: int64(gInfo.GasUsed), // TODO: Should type accept unsigned ints?
Log: result.Log,
Data: result.Data,
Events: sdk.MarkEventsToIndex(result.Events, app.indexEvents),
}
if resCtx.IsEVM() {
res.EvmTxInfo = &abci.EvmTxInfo{
SenderAddress: resCtx.EVMSenderAddress(),
Nonce: resCtx.EVMNonce(),
}
}
return
}

func (app *BaseApp) WriteStateToCommitAndGetWorkingHash() []byte {
Expand Down Expand Up @@ -1048,6 +1073,20 @@ func (app *BaseApp) ProcessProposal(ctx context.Context, req *abci.RequestProces
}
}()

defer func() {
if err := recover(); err != nil {
app.logger.Error(
"panic recovered in ProcessProposal",
"height", req.Height,
"time", req.Time,
"hash", fmt.Sprintf("%X", req.Hash),
"panic", err,
)

resp = &abci.ResponseProcessProposal{Status: abci.ResponseProcessProposal_REJECT}
}
}()

if app.processProposalHandler != nil {
resp, err = app.processProposalHandler(app.processProposalState.ctx, req)
if err != nil {
Expand Down
56 changes: 36 additions & 20 deletions baseapp/baseapp.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,6 @@ package baseapp

import (
"context"
"crypto/sha256"
"fmt"
"reflect"
"strings"
Expand Down Expand Up @@ -158,8 +157,9 @@ type BaseApp struct { //nolint: maligned

ChainID string

votesInfoLock sync.RWMutex
commitLock *sync.Mutex
votesInfoLock sync.RWMutex
commitLock *sync.Mutex
checkTxStateLock *sync.RWMutex

compactionInterval uint64

Expand Down Expand Up @@ -275,7 +275,8 @@ func NewBaseApp(
TracingInfo: &tracing.Info{
Tracer: &tr,
},
commitLock: &sync.Mutex{},
commitLock: &sync.Mutex{},
checkTxStateLock: &sync.RWMutex{},
}

app.TracingInfo.SetContext(context.Background())
Expand Down Expand Up @@ -530,6 +531,8 @@ func (app *BaseApp) IsSealed() bool { return app.sealed }
func (app *BaseApp) setCheckState(header tmproto.Header) {
ms := app.cms.CacheMultiStore()
ctx := sdk.NewContext(ms, header, true, app.logger).WithMinGasPrices(app.minGasPrices)
app.checkTxStateLock.Lock()
defer app.checkTxStateLock.Unlock()
if app.checkState == nil {
app.checkState = &state{
ms: ms,
Expand Down Expand Up @@ -802,16 +805,15 @@ func (app *BaseApp) getContextForTx(mode runTxMode, txBytes []byte) sdk.Context

// cacheTxContext returns a new context based off of the provided context with
// a branched multi-store.
// TODO: (occ) This is an example of where we wrap the multistore with a cache multistore, and then return a modified context using that multistore
func (app *BaseApp) cacheTxContext(ctx sdk.Context, txBytes []byte) (sdk.Context, sdk.CacheMultiStore) {
func (app *BaseApp) cacheTxContext(ctx sdk.Context, checksum [32]byte) (sdk.Context, sdk.CacheMultiStore) {
ms := ctx.MultiStore()
// TODO: https://github.com/cosmos/cosmos-sdk/issues/2824
msCache := ms.CacheMultiStore()
if msCache.TracingEnabled() {
msCache = msCache.SetTracingContext(
sdk.TraceContext(
map[string]interface{}{
"txHash": fmt.Sprintf("%X", sha256.Sum256(txBytes)),
"txHash": fmt.Sprintf("%X", checksum),
},
),
).(sdk.CacheMultiStore)
Expand All @@ -827,8 +829,16 @@ func (app *BaseApp) cacheTxContext(ctx sdk.Context, txBytes []byte) (sdk.Context
// Note, gas execution info is always returned. A reference to a Result is
// returned if the tx does not run out of gas and if all the messages are valid
// and execute successfully. An error is returned otherwise.
func (app *BaseApp) runTx(ctx sdk.Context, mode runTxMode, txBytes []byte) (gInfo sdk.GasInfo, result *sdk.Result, anteEvents []abci.Event, priority int64, err error) {

func (app *BaseApp) runTx(ctx sdk.Context, mode runTxMode, tx sdk.Tx, checksum [32]byte) (
gInfo sdk.GasInfo,
result *sdk.Result,
anteEvents []abci.Event,
priority int64,
pendingTxChecker abci.PendingTxChecker,
expireHandler abci.ExpireTxHandler,
txCtx sdk.Context,
err error,
) {
defer telemetry.MeasureThroughputSinceWithLabels(
telemetry.TxCount,
[]metrics.Label{
Expand All @@ -852,7 +862,7 @@ func (app *BaseApp) runTx(ctx sdk.Context, mode runTxMode, txBytes []byte) (gInf
spanCtx, span := app.TracingInfo.StartWithContext("RunTx", ctx.TraceSpanContext())
defer span.End()
ctx = ctx.WithTraceSpanContext(spanCtx)
span.SetAttributes(attribute.String("txHash", fmt.Sprintf("%X", sha256.Sum256(txBytes))))
span.SetAttributes(attribute.String("txHash", fmt.Sprintf("%X", checksum)))
}

// NOTE: GasWanted should be returned by the AnteHandler. GasUsed is
Expand All @@ -875,15 +885,14 @@ func (app *BaseApp) runTx(ctx sdk.Context, mode runTxMode, txBytes []byte) (gInf
gInfo = sdk.GasInfo{GasWanted: gasWanted, GasUsed: ctx.GasMeter().GasConsumed()}
}()

tx, err := app.txDecoder(txBytes)
if err != nil {
return sdk.GasInfo{}, nil, nil, 0, err
if tx == nil {
return sdk.GasInfo{}, nil, nil, 0, nil, nil, ctx, sdkerrors.Wrap(sdkerrors.ErrTxDecode, "tx decode error")
}

msgs := tx.GetMsgs()

if err := validateBasicTxMsgs(msgs); err != nil {
return sdk.GasInfo{}, nil, nil, 0, err
return sdk.GasInfo{}, nil, nil, 0, nil, nil, ctx, err
}

if app.anteHandler != nil {
Expand All @@ -904,7 +913,7 @@ func (app *BaseApp) runTx(ctx sdk.Context, mode runTxMode, txBytes []byte) (gInf
// NOTE: Alternatively, we could require that AnteHandler ensures that
// writes do not happen if aborted/failed. This may have some
// performance benefits, but it'll be more difficult to get right.
anteCtx, msCache = app.cacheTxContext(ctx, txBytes)
anteCtx, msCache = app.cacheTxContext(ctx, checksum)
anteCtx = anteCtx.WithEventManager(sdk.NewEventManager())
newCtx, err := app.anteHandler(anteCtx, tx, mode == runTxModeSimulate)

Expand All @@ -928,7 +937,7 @@ func (app *BaseApp) runTx(ctx sdk.Context, mode runTxMode, txBytes []byte) (gInf
// GasMeter expected to be set in AnteHandler
gasWanted = ctx.GasMeter().Limit()
if err != nil {
return gInfo, nil, nil, 0, err
return gInfo, nil, nil, 0, nil, nil, ctx, err
}

// Dont need to validate in checkTx mode
Expand All @@ -944,11 +953,13 @@ func (app *BaseApp) runTx(ctx sdk.Context, mode runTxMode, txBytes []byte) (gInf
op.EmitValidationFailMetrics()
}
errMessage := fmt.Sprintf("Invalid Concurrent Execution antehandler missing %d access operations", len(missingAccessOps))
return gInfo, nil, nil, 0, sdkerrors.Wrap(sdkerrors.ErrInvalidConcurrencyExecution, errMessage)
return gInfo, nil, nil, 0, nil, nil, ctx, sdkerrors.Wrap(sdkerrors.ErrInvalidConcurrencyExecution, errMessage)
}
}

priority = ctx.Priority()
pendingTxChecker = ctx.PendingTxChecker()
expireHandler = ctx.ExpireTxHandler()
msCache.Write()
anteEvents = events.ToABCIEvents()
if app.TracingEnabled {
Expand All @@ -959,7 +970,7 @@ func (app *BaseApp) runTx(ctx sdk.Context, mode runTxMode, txBytes []byte) (gInf
// Create a new Context based off of the existing Context with a MultiStore branch
// in case message processing fails. At this point, the MultiStore
// is a branch of a branch.
runMsgCtx, msCache := app.cacheTxContext(ctx, txBytes)
runMsgCtx, msCache := app.cacheTxContext(ctx, checksum)

// Attempt to execute all messages and only update state if all messages pass
// and we're in DeliverTx. Note, runMsgs will never return a reference to a
Expand All @@ -974,7 +985,10 @@ func (app *BaseApp) runTx(ctx sdk.Context, mode runTxMode, txBytes []byte) (gInf
// append the events in the order of occurrence
result.Events = append(anteEvents, result.Events...)
}
return gInfo, result, anteEvents, priority, err
if ctx.CheckTxCallback() != nil {
ctx.CheckTxCallback()(ctx, err)
}
return gInfo, result, anteEvents, priority, pendingTxChecker, expireHandler, ctx, err
}

// runMsgs iterates through a list of messages and executes them with the provided
Expand Down Expand Up @@ -1021,7 +1035,7 @@ func (app *BaseApp) runMsgs(ctx sdk.Context, msgs []sdk.Msg, mode runTxMode) (*s
err error
)

msgCtx, msgMsCache := app.cacheTxContext(ctx, []byte{})
msgCtx, msgMsCache := app.cacheTxContext(ctx, [32]byte{})
msgCtx = msgCtx.WithMessageIndex(i)

startTime := time.Now()
Expand Down Expand Up @@ -1164,5 +1178,7 @@ func (app *BaseApp) ReloadDB() error {
}

func (app *BaseApp) GetCheckCtx() sdk.Context {
app.checkTxStateLock.RLock()
defer app.checkTxStateLock.RUnlock()
return app.checkState.ctx
}
Loading
Loading