diff --git a/.gitignore b/.gitignore index 4efbf96d..1356373b 100644 --- a/.gitignore +++ b/.gitignore @@ -15,6 +15,7 @@ # vendor/ /dist/ .vscode +.idea config/config.mainnet.toml config/config.testnet.toml diff --git a/bridgectrl/bridgectrl.go b/bridgectrl/bridgectrl.go index 49738050..d06a3e66 100644 --- a/bridgectrl/bridgectrl.go +++ b/bridgectrl/bridgectrl.go @@ -84,9 +84,10 @@ func (bt *BridgeController) GetExitRoot(ctx context.Context, networkID int, dbTx } func (bt *BridgeController) AddRollupExitLeaf(ctx context.Context, rollupLeaf etherman.RollupExitLeaf, dbTx pgx.Tx) error { + logger := log.LoggerFromCtx(ctx) err := bt.rollupsTree.addRollupExitLeaf(ctx, rollupLeaf, dbTx) if err != nil { - log.Error("error adding rollupleaf. Error: ", err) + logger.Error("error adding rollupleaf. Error: ", err) return err } return nil diff --git a/bridgectrl/merkletree.go b/bridgectrl/merkletree.go index 44fd77c3..3349c84b 100644 --- a/bridgectrl/merkletree.go +++ b/bridgectrl/merkletree.go @@ -187,6 +187,7 @@ func buildIntermediate(leaves [][KeyLen]byte) ([][][]byte, [][32]byte) { } func (mt *MerkleTree) updateLeaf(ctx context.Context, depositID uint64, leaves [][KeyLen]byte, dbTx pgx.Tx) error { + logger := log.LoggerFromCtx(ctx) var ( nodes [][][][]byte ns [][][]byte @@ -206,7 +207,7 @@ func (mt *MerkleTree) updateLeaf(ctx context.Context, depositID uint64, leaves [ if len(ns) != 1 { return fmt.Errorf("error: more than one root detected: %+v", nodes) } - log.Debug("Root calculated: ", common.Bytes2Hex(ns[0][0])) + logger.Debug("Root calculated: ", common.Bytes2Hex(ns[0][0])) err := mt.store.SetRoot(ctx, ns[0][0], depositID, mt.network, dbTx) if err != nil { return err @@ -260,7 +261,8 @@ func (mt *MerkleTree) getLeaves(ctx context.Context, dbTx pgx.Tx) ([][KeyLen]byt return result, nil } -func (mt *MerkleTree) buildMTRoot(leaves [][KeyLen]byte) (common.Hash, error) { +func (mt *MerkleTree) buildMTRoot(ctx context.Context, leaves [][KeyLen]byte) (common.Hash, error) { + logger := log.LoggerFromCtx(ctx) var ( nodes [][][][]byte ns [][][]byte @@ -279,13 +281,13 @@ func (mt *MerkleTree) buildMTRoot(leaves [][KeyLen]byte) (common.Hash, error) { if len(ns) != 1 { return common.Hash{}, fmt.Errorf("error: more than one root detected: %+v", nodes) } - log.Debug("Root calculated: ", common.Bytes2Hex(ns[0][0])) + logger.Debug("Root calculated: ", common.Bytes2Hex(ns[0][0])) return common.BytesToHash(ns[0][0]), nil } func (mt MerkleTree) storeLeaves(ctx context.Context, leaves [][KeyLen]byte, blockID uint64, dbTx pgx.Tx) error { - root, err := mt.buildMTRoot(leaves) + root, err := mt.buildMTRoot(ctx, leaves) if err != nil { return err } @@ -311,9 +313,10 @@ func (mt MerkleTree) storeLeaves(ctx context.Context, leaves [][KeyLen]byte, blo // } func (mt MerkleTree) addRollupExitLeaf(ctx context.Context, rollupLeaf etherman.RollupExitLeaf, dbTx pgx.Tx) error { + logger := log.LoggerFromCtx(ctx) storedRollupLeaves, err := mt.store.GetLatestRollupExitLeaves(ctx, dbTx) if err != nil { - log.Error("error getting latest rollup exit leaves. Error: ", err) + logger.Error("error getting latest rollup exit leaves. Error: ", err) return err } // If rollupLeaf.RollupId is lower or equal than len(storedRollupLeaves), we can add it in the proper position of the array @@ -346,7 +349,7 @@ func (mt MerkleTree) addRollupExitLeaf(ctx context.Context, rollupLeaf etherman. } err = mt.storeLeaves(ctx, leaves, rollupLeaf.BlockID, dbTx) if err != nil { - log.Error("error storing leaves. Error: ", err) + logger.Error("error storing leaves. Error: ", err) return err } return nil diff --git a/bridgectrl/merkletree_test.go b/bridgectrl/merkletree_test.go index 0185d9e7..ac1367d8 100644 --- a/bridgectrl/merkletree_test.go +++ b/bridgectrl/merkletree_test.go @@ -326,7 +326,7 @@ func TestBuildMTRootAndStore(t *testing.T) { } if len(leaves) != 0 { - root, err := mt.buildMTRoot(leaves) + root, err := mt.buildMTRoot(ctx, leaves) require.NoError(t, err) require.Equal(t, testVector.CurrentRoot, root.String()) } @@ -334,7 +334,7 @@ func TestBuildMTRootAndStore(t *testing.T) { var res [KeyLen]byte copy(res[:], common.Hex2Bytes(testVector.NewLeaf.CurrentHash[2:])) leaves = append(leaves, res) - newRoot, err := mt.buildMTRoot(leaves) + newRoot, err := mt.buildMTRoot(ctx, leaves) require.NoError(t, err) require.Equal(t, testVector.NewRoot, newRoot.String()) @@ -452,7 +452,7 @@ func TestPerformanceComputeRoot(t *testing.T) { log.Debug("End creating leaves: ", time.Now().Unix()-initTime) initTime = time.Now().Unix() log.Debug("Init computing root: ", initTime) - _, err = mt.buildMTRoot(leaves) + _, err = mt.buildMTRoot(ctx, leaves) require.NoError(t, err) log.Debug("End creating leaves: ", time.Now().Unix()-initTime) } diff --git a/claimtxman/claimtxman.go b/claimtxman/claimtxman.go index cc0e1ca4..b891431d 100644 --- a/claimtxman/claimtxman.go +++ b/claimtxman/claimtxman.go @@ -61,6 +61,8 @@ func NewClaimTxManager(cfg Config, chExitRootEvent chan *etherman.GlobalExitRoot } ctx, cancel := context.WithCancel(ctx) auth, err := client.GetSignerFromKeystore(ctx, cfg.PrivateKey) + logger := log.WithFields("component", fmt.Sprintf("claimtxman_%v", l2NetworkID)) + ctx = log.CtxWithLogger(ctx, logger) return &ClaimTxManager{ ctx: ctx, cancel: cancel, @@ -83,90 +85,94 @@ func NewClaimTxManager(cfg Config, chExitRootEvent chan *etherman.GlobalExitRoot func (tm *ClaimTxManager) Start() { ticker := time.NewTicker(tm.cfg.FrequencyToMonitorTxs.Duration) for { + logger := utils.LoggerWithRandomTraceID(log.LoggerFromCtx(tm.ctx)) + ctx := log.CtxWithLogger(tm.ctx, logger) select { case <-tm.ctx.Done(): return case netID := <-tm.chSynced: if netID == tm.l2NetworkID && !tm.synced { - log.Info("NetworkID synced: ", netID) + logger.Info("NetworkID synced: ", netID) tm.synced = true } case ger := <-tm.chExitRootEvent: if tm.synced { - log.Debug("UpdateDepositsStatus for ger: ", ger.GlobalExitRoot) + logger.Debug("UpdateDepositsStatus for ger: ", ger.GlobalExitRoot) go func() { - err := tm.updateDepositsStatus(ger) + err := tm.updateDepositsStatus(ctx, ger) if err != nil { - log.Errorf("failed to update deposits status: %v", err) + logger.Errorf("failed to update deposits status: %v", err) } }() } else { - log.Infof("Waiting for networkID %d to be synced before processing deposits", tm.l2NetworkID) + logger.Infof("Waiting for networkID %d to be synced before processing deposits", tm.l2NetworkID) } case <-ticker.C: - err := tm.monitorTxs(tm.ctx) + err := tm.monitorTxs(ctx) if err != nil { - log.Errorf("failed to monitor txs: %v", err) + logger.Errorf("failed to monitor txs: %v", err) } } } } -func (tm *ClaimTxManager) updateDepositsStatus(ger *etherman.GlobalExitRoot) error { - dbTx, err := tm.storage.BeginDBTransaction(tm.ctx) +func (tm *ClaimTxManager) updateDepositsStatus(ctx context.Context, ger *etherman.GlobalExitRoot) error { + logger := log.LoggerFromCtx(ctx) + dbTx, err := tm.storage.BeginDBTransaction(ctx) if err != nil { return err } - err = tm.processDepositStatus(ger, dbTx) + err = tm.processDepositStatus(ctx, ger, dbTx) if err != nil { - log.Errorf("error processing ger. Error: %v", err) - rollbackErr := tm.storage.Rollback(tm.ctx, dbTx) + logger.Errorf("error processing ger. Error: %v", err) + rollbackErr := tm.storage.Rollback(ctx, dbTx) if rollbackErr != nil { - log.Errorf("claimtxman error rolling back state. RollbackErr: %v, err: %s", rollbackErr, err.Error()) + logger.Errorf("claimtxman error rolling back state. RollbackErr: %v, err: %s", rollbackErr, err.Error()) return rollbackErr } return err } - err = tm.storage.Commit(tm.ctx, dbTx) + err = tm.storage.Commit(ctx, dbTx) if err != nil { - log.Errorf("AddClaimTx committing dbTx. Err: %v", err) - rollbackErr := tm.storage.Rollback(tm.ctx, dbTx) + logger.Errorf("AddClaimTx committing dbTx. Err: %v", err) + rollbackErr := tm.storage.Rollback(ctx, dbTx) if rollbackErr != nil { - log.Fatalf("claimtxman error rolling back state. RollbackErr: %s, err: %s", rollbackErr.Error(), err.Error()) + logger.Fatalf("claimtxman error rolling back state. RollbackErr: %s, err: %s", rollbackErr.Error(), err.Error()) } - log.Fatalf("AddClaimTx committing dbTx, err: %s", err.Error()) + logger.Fatalf("AddClaimTx committing dbTx, err: %s", err.Error()) } return nil } -func (tm *ClaimTxManager) processDepositStatus(ger *etherman.GlobalExitRoot, dbTx pgx.Tx) error { +func (tm *ClaimTxManager) processDepositStatus(ctx context.Context, ger *etherman.GlobalExitRoot, dbTx pgx.Tx) error { + logger := log.LoggerFromCtx(ctx) if ger.BlockID != 0 { // L2 exit root is updated - log.Infof("Rollup exitroot %v is updated", ger.ExitRoots[1]) - if err := tm.storage.UpdateL2DepositsStatus(tm.ctx, ger.ExitRoots[1][:], tm.rollupID, tm.l2NetworkID, dbTx); err != nil { - log.Errorf("error updating L2DepositsStatus. Error: %v", err) + logger.Infof("Rollup exitroot %v is updated", ger.ExitRoots[1]) + if err := tm.storage.UpdateL2DepositsStatus(ctx, ger.ExitRoots[1][:], tm.rollupID, tm.l2NetworkID, dbTx); err != nil { + logger.Errorf("error updating L2DepositsStatus. Error: %v", err) return err } } else { // L1 exit root is updated in the trusted state - log.Infof("Mainnet exitroot %v is updated", ger.ExitRoots[0]) - deposits, err := tm.storage.UpdateL1DepositsStatus(tm.ctx, ger.ExitRoots[0][:], dbTx) + logger.Infof("Mainnet exitroot %v is updated", ger.ExitRoots[0]) + deposits, err := tm.storage.UpdateL1DepositsStatus(ctx, ger.ExitRoots[0][:], dbTx) if err != nil { - log.Errorf("error getting and updating L1DepositsStatus. Error: %v", err) + logger.Errorf("error getting and updating L1DepositsStatus. Error: %v", err) return err } for _, deposit := range deposits { - claimHash, err := tm.bridgeService.GetDepositStatus(tm.ctx, deposit.DepositCount, deposit.DestinationNetwork) + claimHash, err := tm.bridgeService.GetDepositStatus(ctx, deposit.DepositCount, deposit.DestinationNetwork) if err != nil { - log.Errorf("error getting deposit status for deposit %d. Error: %v", deposit.DepositCount, err) + logger.Errorf("error getting deposit status for deposit %d. Error: %v", deposit.DepositCount, err) return err } - if len(claimHash) > 0 || deposit.LeafType == LeafTypeMessage && !tm.isDepositMessageAllowed(deposit) { - log.Infof("Ignoring deposit: %d, leafType: %d, claimHash: %s, deposit.OriginalAddress: %s", deposit.DepositCount, deposit.LeafType, claimHash, deposit.OriginalAddress.String()) + if len(claimHash) > 0 || deposit.LeafType == LeafTypeMessage && !tm.isDepositMessageAllowed(ctx, deposit) { + logger.Infof("Ignoring deposit: %d, leafType: %d, claimHash: %s, deposit.OriginalAddress: %s", deposit.DepositCount, deposit.LeafType, claimHash, deposit.OriginalAddress.String()) continue } - log.Infof("create the claim tx for the deposit %d", deposit.DepositCount) - ger, proof, rollupProof, err := tm.bridgeService.GetClaimProof(deposit.DepositCount, deposit.NetworkID, dbTx) + logger.Infof("create the claim tx for the deposit %d", deposit.DepositCount) + ger, proof, rollupProof, err := tm.bridgeService.GetClaimProof(ctx, deposit.DepositCount, deposit.NetworkID, dbTx) if err != nil { - log.Errorf("error getting Claim Proof for deposit %d. Error: %v", deposit.DepositCount, err) + logger.Errorf("error getting Claim Proof for deposit %d. Error: %v", deposit.DepositCount, err) return err } var ( @@ -177,7 +183,7 @@ func (tm *ClaimTxManager) processDepositStatus(ger *etherman.GlobalExitRoot, dbT mtProof[i] = proof[i] mtRollupProof[i] = rollupProof[i] } - tx, err := tm.l2Node.BuildSendClaim(tm.ctx, deposit, mtProof, mtRollupProof, + tx, err := tm.l2Node.BuildSendClaim(ctx, deposit, mtProof, mtRollupProof, ðerman.GlobalExitRoot{ ExitRoots: []common.Hash{ ger.ExitRoots[0], @@ -185,11 +191,11 @@ func (tm *ClaimTxManager) processDepositStatus(ger *etherman.GlobalExitRoot, dbT }}, 1, 1, 1, tm.rollupID, tm.auth) if err != nil { - log.Errorf("error BuildSendClaim tx for deposit %d. Error: %v", deposit.DepositCount, err) + logger.Errorf("error BuildSendClaim tx for deposit %d. Error: %v", deposit.DepositCount, err) return err } - if err = tm.addClaimTx(deposit.DepositCount, tm.auth.From, tx.To(), nil, tx.Data(), dbTx); err != nil { - log.Errorf("error adding claim tx for deposit %d. Error: %v", deposit.DepositCount, err) + if err = tm.addClaimTx(ctx, deposit.DepositCount, tm.auth.From, tx.To(), nil, tx.Data(), dbTx); err != nil { + logger.Errorf("error adding claim tx for deposit %d. Error: %v", deposit.DepositCount, err) return err } } @@ -197,19 +203,20 @@ func (tm *ClaimTxManager) processDepositStatus(ger *etherman.GlobalExitRoot, dbT return nil } -func (tm *ClaimTxManager) isDepositMessageAllowed(deposit *etherman.Deposit) bool { +func (tm *ClaimTxManager) isDepositMessageAllowed(ctx context.Context, deposit *etherman.Deposit) bool { + logger := log.LoggerFromCtx(ctx) for _, addr := range tm.cfg.AuthorizedClaimMessageAddresses { if deposit.OriginalAddress == addr { - log.Infof("MessageBridge from authorized account detected: %+v, account: %s", deposit, addr.String()) + logger.Infof("MessageBridge from authorized account detected: %+v, account: %s", deposit, addr.String()) return true } } - log.Infof("MessageBridge Not authorized. DepositCount: %d", deposit.DepositCount) + logger.Infof("MessageBridge Not authorized. DepositCount: %d", deposit.DepositCount) return false } -func (tm *ClaimTxManager) getNextNonce(from common.Address) (uint64, error) { - nonce, err := tm.l2Node.NonceAt(tm.ctx, from, nil) +func (tm *ClaimTxManager) getNextNonce(ctx context.Context, from common.Address) (uint64, error) { + nonce, err := tm.l2Node.NonceAt(ctx, from, nil) if err != nil { return 0, err } @@ -222,7 +229,8 @@ func (tm *ClaimTxManager) getNextNonce(from common.Address) (uint64, error) { return nonce, nil } -func (tm *ClaimTxManager) addClaimTx(depositCount uint, from common.Address, to *common.Address, value *big.Int, data []byte, dbTx pgx.Tx) error { +func (tm *ClaimTxManager) addClaimTx(ctx context.Context, depositCount uint, from common.Address, to *common.Address, value *big.Int, data []byte, dbTx pgx.Tx) error { + logger := log.LoggerFromCtx(ctx) // get gas tx := ethereum.CallMsg{ From: from, @@ -230,21 +238,21 @@ func (tm *ClaimTxManager) addClaimTx(depositCount uint, from common.Address, to Value: value, Data: data, } - gas, err := tm.l2Node.EstimateGas(tm.ctx, tx) + gas, err := tm.l2Node.EstimateGas(ctx, tx) for i := 1; err != nil && err.Error() != runtime.ErrExecutionReverted.Error() && i < tm.cfg.RetryNumber; i++ { - log.Warnf("error while doing gas estimation. Retrying... Error: %v, Data: %s", err, common.Bytes2Hex(data)) + logger.Warnf("error while doing gas estimation. Retrying... Error: %v, Data: %s", err, common.Bytes2Hex(data)) time.Sleep(tm.cfg.RetryInterval.Duration) - gas, err = tm.l2Node.EstimateGas(tm.ctx, tx) + gas, err = tm.l2Node.EstimateGas(ctx, tx) } if err != nil { - log.Errorf("failed to estimate gas. Ignoring tx... Error: %v, data: %s", err, common.Bytes2Hex(data)) + logger.Errorf("failed to estimate gas. Ignoring tx... Error: %v, data: %s", err, common.Bytes2Hex(data)) return nil } // get next nonce - nonce, err := tm.getNextNonce(from) + nonce, err := tm.getNextNonce(ctx, from) if err != nil { err := fmt.Errorf("failed to get current nonce: %v", err) - log.Errorf("error getting next nonce. Error: %s", err.Error()) + logger.Errorf("error getting next nonce. Error: %s", err.Error()) return err } @@ -256,10 +264,10 @@ func (tm *ClaimTxManager) addClaimTx(depositCount uint, from common.Address, to } // add to storage - err = tm.storage.AddClaimTx(tm.ctx, mTx, dbTx) + err = tm.storage.AddClaimTx(ctx, mTx, dbTx) if err != nil { err := fmt.Errorf("failed to add tx to get monitored: %v", err) - log.Errorf("error adding claim tx to db. Error: %s", err.Error()) + logger.Errorf("error adding claim tx to db. Error: %s", err.Error()) return err } @@ -268,7 +276,8 @@ func (tm *ClaimTxManager) addClaimTx(depositCount uint, from common.Address, to // monitorTxs process all pending monitored tx func (tm *ClaimTxManager) monitorTxs(ctx context.Context) error { - dbTx, err := tm.storage.BeginDBTransaction(tm.ctx) + logger := log.LoggerFromCtx(ctx) + dbTx, err := tm.storage.BeginDBTransaction(ctx) if err != nil { return err } @@ -276,21 +285,22 @@ func (tm *ClaimTxManager) monitorTxs(ctx context.Context) error { statusesFilter := []ctmtypes.MonitoredTxStatus{ctmtypes.MonitoredTxStatusCreated} mTxs, err := tm.storage.GetClaimTxsByStatus(ctx, statusesFilter, dbTx) if err != nil { - log.Errorf("failed to get created monitored txs: %v", err) - rollbackErr := tm.storage.Rollback(tm.ctx, dbTx) + logger.Errorf("failed to get created monitored txs: %v", err) + rollbackErr := tm.storage.Rollback(ctx, dbTx) if rollbackErr != nil { - log.Errorf("claimtxman error rolling back state. RollbackErr: %s, err: %v", rollbackErr.Error(), err) + logger.Errorf("claimtxman error rolling back state. RollbackErr: %s, err: %v", rollbackErr.Error(), err) return rollbackErr } return fmt.Errorf("failed to get created monitored txs: %v", err) } isResetNonce := false // it will reset the nonce in one cycle - log.Infof("found %v monitored tx to process", len(mTxs)) + logger.Infof("found %v monitored tx to process", len(mTxs)) for _, mTx := range mTxs { mTx := mTx // force variable shadowing to avoid pointer conflicts - mTxLog := log.WithFields("monitoredTx", mTx.DepositID) + mTxLog := logger.WithFields("monitoredTx", mTx.DepositID) mTxLog.Infof("processing tx with nonce %d", mTx.Nonce) + ctx := log.CtxWithLogger(ctx, mTxLog) // check if any of the txs in the history was mined mined := false @@ -328,7 +338,7 @@ func (tm *ClaimTxManager) monitorTxs(ctx context.Context) error { continue } } - log.Infof("tx: %s not mined yet", txHash.String()) + mTxLog.Infof("tx: %s not mined yet", txHash.String()) allHistoryTxMined = false continue @@ -396,7 +406,7 @@ func (tm *ClaimTxManager) monitorTxs(ctx context.Context) error { } //Multiply gasPrice by 10 to increase the efficiency of the tx in the sequence mTx.GasPrice = big.NewInt(0).Mul(gasPrice, big.NewInt(10)) //nolint:gomnd - log.Infof("Using gasPrice: %s. The gasPrice suggested by the network is %s", mTx.GasPrice.String(), gasPrice.String()) + mTxLog.Infof("Using gasPrice: %s. The gasPrice suggested by the network is %s", mTx.GasPrice.String(), gasPrice.String()) // rebuild transaction tx := mTx.Tx() @@ -459,12 +469,12 @@ func (tm *ClaimTxManager) monitorTxs(ctx context.Context) error { } } - err = tm.storage.Commit(tm.ctx, dbTx) + err = tm.storage.Commit(ctx, dbTx) if err != nil { - log.Errorf("UpdateClaimTx committing dbTx, err: %v", err) - rollbackErr := tm.storage.Rollback(tm.ctx, dbTx) + logger.Errorf("UpdateClaimTx committing dbTx, err: %v", err) + rollbackErr := tm.storage.Rollback(ctx, dbTx) if rollbackErr != nil { - log.Errorf("claimtxman error rolling back state. RollbackErr: %s, err: %v", rollbackErr.Error(), err) + logger.Errorf("claimtxman error rolling back state. RollbackErr: %s, err: %v", rollbackErr.Error(), err) return rollbackErr } return err @@ -476,7 +486,7 @@ func (tm *ClaimTxManager) monitorTxs(ctx context.Context) error { // accordingly to the current information stored and the current // state of the blockchain func (tm *ClaimTxManager) ReviewMonitoredTx(ctx context.Context, mTx *ctmtypes.MonitoredTx, reviewNonce bool) error { - mTxLog := log.WithFields("monitoredTx", mTx.DepositID) + mTxLog := log.LoggerFromCtx(ctx) mTxLog.Debug("reviewing") // get gas tx := ethereum.CallMsg{ @@ -489,7 +499,7 @@ func (tm *ClaimTxManager) ReviewMonitoredTx(ctx context.Context, mTx *ctmtypes.M for i := 1; err != nil && err.Error() != runtime.ErrExecutionReverted.Error() && i < tm.cfg.RetryNumber; i++ { mTxLog.Warnf("error during gas estimation. Retrying... Error: %v, Data: %s", err, common.Bytes2Hex(tx.Data)) time.Sleep(tm.cfg.RetryInterval.Duration) - gas, err = tm.l2Node.EstimateGas(tm.ctx, tx) + gas, err = tm.l2Node.EstimateGas(ctx, tx) } if err != nil { err := fmt.Errorf("failed to estimate gas. Error: %v, Data: %s", err, common.Bytes2Hex(tx.Data)) @@ -505,7 +515,7 @@ func (tm *ClaimTxManager) ReviewMonitoredTx(ctx context.Context, mTx *ctmtypes.M if reviewNonce { // check nonce - nonce, err := tm.getNextNonce(mTx.From) + nonce, err := tm.getNextNonce(ctx, mTx.From) if err != nil { err := fmt.Errorf("failed to get nonce: %v", err) mTxLog.Errorf(err.Error()) diff --git a/claimtxman/interfaces.go b/claimtxman/interfaces.go index 47f16235..64f6832c 100644 --- a/claimtxman/interfaces.go +++ b/claimtxman/interfaces.go @@ -23,6 +23,6 @@ type storageInterface interface { } type bridgeServiceInterface interface { - GetClaimProof(depositCnt, networkID uint, dbTx pgx.Tx) (*etherman.GlobalExitRoot, [][bridgectrl.KeyLen]byte, [][bridgectrl.KeyLen]byte, error) + GetClaimProof(ctx context.Context, depositCnt, networkID uint, dbTx pgx.Tx) (*etherman.GlobalExitRoot, [][bridgectrl.KeyLen]byte, [][bridgectrl.KeyLen]byte, error) GetDepositStatus(ctx context.Context, depositCount uint, destNetworkID uint) (string, error) } diff --git a/etherman/etherman.go b/etherman/etherman.go index 3fe0a52b..6ddac390 100644 --- a/etherman/etherman.go +++ b/etherman/etherman.go @@ -215,6 +215,7 @@ type Order struct { } func (etherMan *Client) readEvents(ctx context.Context, query ethereum.FilterQuery) ([]Block, map[common.Hash][]Order, error) { + logger := log.LoggerFromCtx(ctx) logs, err := etherMan.EtherClient.FilterLogs(ctx, query) if err != nil { return nil, nil, err @@ -224,7 +225,7 @@ func (etherMan *Client) readEvents(ctx context.Context, query ethereum.FilterQue for _, vLog := range logs { err := etherMan.processEvent(ctx, vLog, &blocks, &blocksOrder) if err != nil { - log.Warnf("error processing event. Retrying... Error: %s. vLog: %+v", err.Error(), vLog) + logger.Warnf("error processing event. Retrying... Error: %s. vLog: %+v", err.Error(), vLog) return nil, nil, err } } @@ -232,6 +233,7 @@ func (etherMan *Client) readEvents(ctx context.Context, query ethereum.FilterQue } func (etherMan *Client) processEvent(ctx context.Context, vLog types.Log, blocks *[]Block, blocksOrder *map[common.Hash][]Order) error { + logger := log.LoggerFromCtx(ctx) switch vLog.Topics[0] { case updateGlobalExitRootSignatureHash: return etherMan.updateGlobalExitRootEvent(ctx, vLog, blocks, blocksOrder) @@ -246,139 +248,141 @@ func (etherMan *Client) processEvent(ctx context.Context, vLog types.Log, blocks case newWrappedTokenEventSignatureHash: return etherMan.tokenWrappedEvent(ctx, vLog, blocks, blocksOrder) case initializedProxySignatureHash: - log.Debug("Initialized proxy event detected. Ignoring...") + logger.Debug("Initialized proxy event detected. Ignoring...") return nil case adminChangedSignatureHash: - log.Debug("AdminChanged event detected. Ignoring...") + logger.Debug("AdminChanged event detected. Ignoring...") return nil case beaconUpgradedSignatureHash: - log.Debug("BeaconUpgraded event detected. Ignoring...") + logger.Debug("BeaconUpgraded event detected. Ignoring...") return nil case upgradedSignatureHash: - log.Debug("Upgraded event detected. Ignoring...") + logger.Debug("Upgraded event detected. Ignoring...") return nil case transferOwnershipSignatureHash: - log.Debug("TransferOwnership event detected. Ignoring...") + logger.Debug("TransferOwnership event detected. Ignoring...") return nil case setBatchFeeSignatureHash: - log.Debug("SetBatchFee event detected. Ignoring...") + logger.Debug("SetBatchFee event detected. Ignoring...") return nil case setTrustedAggregatorSignatureHash: - log.Debug("SetTrustedAggregator event detected. Ignoring...") + logger.Debug("SetTrustedAggregator event detected. Ignoring...") return nil case setVerifyBatchTimeTargetSignatureHash: - log.Debug("SetVerifyBatchTimeTarget event detected. Ignoring...") + logger.Debug("SetVerifyBatchTimeTarget event detected. Ignoring...") return nil case setMultiplierBatchFeeSignatureHash: - log.Debug("SetMultiplierBatchFee event detected. Ignoring...") + logger.Debug("SetMultiplierBatchFee event detected. Ignoring...") return nil case setPendingStateTimeoutSignatureHash: - log.Debug("SetPendingStateTimeout event detected. Ignoring...") + logger.Debug("SetPendingStateTimeout event detected. Ignoring...") return nil case setTrustedAggregatorTimeoutSignatureHash: - log.Debug("SetTrustedAggregatorTimeout event detected. Ignoring...") + logger.Debug("SetTrustedAggregatorTimeout event detected. Ignoring...") return nil case overridePendingStateSignatureHash: - log.Debug("OverridePendingState event detected. Ignoring...") + logger.Debug("OverridePendingState event detected. Ignoring...") return nil case proveNonDeterministicPendingStateSignatureHash: - log.Debug("ProveNonDeterministicPendingState event detected. Ignoring...") + logger.Debug("ProveNonDeterministicPendingState event detected. Ignoring...") return nil case consolidatePendingStateSignatureHash: - log.Debug("ConsolidatePendingState event detected. Ignoring...") + logger.Debug("ConsolidatePendingState event detected. Ignoring...") return nil case verifyBatchesTrustedAggregatorSignatureHash: return etherMan.verifyBatchesTrustedAggregatorEvent(ctx, vLog, blocks, blocksOrder) case rollupManagerVerifyBatchesSignatureHash: return etherMan.verifyBatchesEvent(ctx, vLog, blocks, blocksOrder) case onSequenceBatchesSignatureHash: - log.Debug("OnSequenceBatches event detected. Ignoring...") + logger.Debug("OnSequenceBatches event detected. Ignoring...") return nil case updateRollupSignatureHash: - log.Debug("UpdateRollup event detected. Ignoring...") + logger.Debug("UpdateRollup event detected. Ignoring...") return nil case addExistingRollupSignatureHash: return etherMan.AddExistingRollupEvent(ctx, vLog, blocks, blocksOrder) case createNewRollupSignatureHash: return etherMan.createNewRollupEvent(ctx, vLog, blocks, blocksOrder) case obsoleteRollupTypeSignatureHash: - log.Debug("ObsoleteRollupType event detected. Ignoring...") + logger.Debug("ObsoleteRollupType event detected. Ignoring...") return nil case addNewRollupTypeSignatureHash: - log.Debug("AddNewRollupType event detected. Ignoring...") + logger.Debug("AddNewRollupType event detected. Ignoring...") return nil case initializedSignatureHash: - log.Debug("Initialized event detected. Ignoring...") + logger.Debug("Initialized event detected. Ignoring...") return nil case roleAdminChangedSignatureHash: - log.Debug("RoleAdminChanged event detected. Ignoring...") + logger.Debug("RoleAdminChanged event detected. Ignoring...") return nil case roleGrantedSignatureHash: - log.Debug("RoleGranted event detected. Ignoring...") + logger.Debug("RoleGranted event detected. Ignoring...") return nil case roleRevokedSignatureHash: - log.Debug("RoleRevoked event detected. Ignoring...") + logger.Debug("RoleRevoked event detected. Ignoring...") return nil case emergencyStateActivatedSignatureHash: - log.Debug("EmergencyStateActivated event detected. Ignoring...") + logger.Debug("EmergencyStateActivated event detected. Ignoring...") return nil case emergencyStateDeactivatedSignatureHash: - log.Debug("EmergencyStateDeactivated event detected. Ignoring...") + logger.Debug("EmergencyStateDeactivated event detected. Ignoring...") return nil case oldVerifyBatchesTrustedAggregatorSignatureHash: - log.Debug("OldVerifyBatchesTrustedAggregator event detected. Ignoring...") + logger.Debug("OldVerifyBatchesTrustedAggregator event detected. Ignoring...") return nil case updateZkEVMVersionSignatureHash: - log.Debug("UpdateZkEVMVersion event detected. Ignoring...") + logger.Debug("UpdateZkEVMVersion event detected. Ignoring...") return nil case oldConsolidatePendingStateSignatureHash: - log.Debug("OldConsolidatePendingState event detected. Ignoring...") + logger.Debug("OldConsolidatePendingState event detected. Ignoring...") return nil case oldOverridePendingStateSignatureHash: - log.Debug("OldOverridePendingState event detected. Ignoring...") + logger.Debug("OldOverridePendingState event detected. Ignoring...") return nil case sequenceBatchesPreEtrogSignatureHash: - log.Debug("SequenceBatchesPreEtrog event detected. Ignoring...") + logger.Debug("SequenceBatchesPreEtrog event detected. Ignoring...") return nil case setForceBatchTimeoutSignatureHash: - log.Debug("SetForceBatchTimeout event detected. Ignoring...") + logger.Debug("SetForceBatchTimeout event detected. Ignoring...") return nil case setTrustedSequencerURLSignatureHash: - log.Debug("SetTrustedSequencerURL event detected. Ignoring...") + logger.Debug("SetTrustedSequencerURL event detected. Ignoring...") return nil case setTrustedSequencerSignatureHash: - log.Debug("SetTrustedSequencer event detected. Ignoring...") + logger.Debug("SetTrustedSequencer event detected. Ignoring...") return nil case verifyBatchesSignatureHash: - log.Debug("VerifyBatches event detected. Ignoring...") + logger.Debug("VerifyBatches event detected. Ignoring...") return nil case sequenceForceBatchesSignatureHash: - log.Debug("SequenceForceBatches event detected. Ignoring...") + logger.Debug("SequenceForceBatches event detected. Ignoring...") return nil case forceBatchSignatureHash: - log.Debug("ForceBatch event detected. Ignoring...") + logger.Debug("ForceBatch event detected. Ignoring...") return nil case sequenceBatchesSignatureHash: - log.Debug("SequenceBatches event detected. Ignoring...") + logger.Debug("SequenceBatches event detected. Ignoring...") return nil case acceptAdminRoleSignatureHash: - log.Debug("AcceptAdminRole event detected. Ignoring...") + logger.Debug("AcceptAdminRole event detected. Ignoring...") return nil case transferAdminRoleSignatureHash: - log.Debug("TransferAdminRole event detected. Ignoring...") + logger.Debug("TransferAdminRole event detected. Ignoring...") return nil } - log.Warnf("Event not registered: %+v", vLog) + logger.Warnf("Event not registered: %+v", vLog) return nil } func (etherMan *Client) updateGlobalExitRootEvent(ctx context.Context, vLog types.Log, blocks *[]Block, blocksOrder *map[common.Hash][]Order) error { - log.Debug("UpdateGlobalExitRoot event detected. Processing...") + logger := log.LoggerFromCtx(ctx) + logger.Debug("UpdateGlobalExitRoot event detected. Processing...") return etherMan.processUpdateGlobalExitRootEvent(ctx, vLog.Topics[1], vLog.Topics[2], vLog, blocks, blocksOrder) } func (etherMan *Client) updateL1InfoTreeEvent(ctx context.Context, vLog types.Log, blocks *[]Block, blocksOrder *map[common.Hash][]Order) error { - log.Debug("UpdateL1InfoTree event detected") + logger := log.LoggerFromCtx(ctx) + logger.Debug("UpdateL1InfoTree event detected") globalExitRoot, err := etherMan.PolygonZkEVMGlobalExitRoot.ParseUpdateL1InfoTree(vLog) if err != nil { return err @@ -387,6 +391,7 @@ func (etherMan *Client) updateL1InfoTreeEvent(ctx context.Context, vLog types.Lo } func (etherMan *Client) processUpdateGlobalExitRootEvent(ctx context.Context, mainnetExitRoot, rollupExitRoot common.Hash, vLog types.Log, blocks *[]Block, blocksOrder *map[common.Hash][]Order) error { + logger := log.LoggerFromCtx(ctx) fullBlock, err := etherMan.EtherClient.BlockByHash(ctx, vLog.BlockHash) if err != nil { return fmt.Errorf("error getting hashParent. BlockNumber: %d. Error: %v", vLog.BlockNumber, err) @@ -406,7 +411,7 @@ func (etherMan *Client) processUpdateGlobalExitRootEvent(ctx context.Context, ma } else if (*blocks)[len(*blocks)-1].BlockHash == vLog.BlockHash && (*blocks)[len(*blocks)-1].BlockNumber == vLog.BlockNumber { (*blocks)[len(*blocks)-1].GlobalExitRoots = append((*blocks)[len(*blocks)-1].GlobalExitRoots, gExitRoot) } else { - log.Error("Error processing UpdateGlobalExitRoot event. BlockHash:", vLog.BlockHash, ". BlockNumber: ", vLog.BlockNumber) + logger.Error("Error processing UpdateGlobalExitRoot event. BlockHash:", vLog.BlockHash, ". BlockNumber: ", vLog.BlockNumber) return fmt.Errorf("error processing UpdateGlobalExitRoot event") } or := Order{ @@ -418,7 +423,8 @@ func (etherMan *Client) processUpdateGlobalExitRootEvent(ctx context.Context, ma } func (etherMan *Client) depositEvent(ctx context.Context, vLog types.Log, blocks *[]Block, blocksOrder *map[common.Hash][]Order) error { - log.Debug("Deposit event detected. Processing...") + logger := log.LoggerFromCtx(ctx) + logger.Debug("Deposit event detected. Processing...") d, err := etherMan.PolygonBridge.ParseBridgeEvent(vLog) if err != nil { return err @@ -446,7 +452,7 @@ func (etherMan *Client) depositEvent(ctx context.Context, vLog types.Log, blocks } else if (*blocks)[len(*blocks)-1].BlockHash == vLog.BlockHash && (*blocks)[len(*blocks)-1].BlockNumber == vLog.BlockNumber { (*blocks)[len(*blocks)-1].Deposits = append((*blocks)[len(*blocks)-1].Deposits, deposit) } else { - log.Error("Error processing deposit event. BlockHash:", vLog.BlockHash, ". BlockNumber: ", vLog.BlockNumber) + logger.Error("Error processing deposit event. BlockHash:", vLog.BlockHash, ". BlockNumber: ", vLog.BlockNumber) return fmt.Errorf("error processing Deposit event") } or := Order{ @@ -458,7 +464,8 @@ func (etherMan *Client) depositEvent(ctx context.Context, vLog types.Log, blocks } func (etherMan *Client) oldClaimEvent(ctx context.Context, vLog types.Log, blocks *[]Block, blocksOrder *map[common.Hash][]Order) error { - log.Debug("Old claim event detected. Processing...") + logger := log.LoggerFromCtx(ctx) + logger.Debug("Old claim event detected. Processing...") c, err := etherMan.OldPolygonBridge.ParseClaimEvent(vLog) if err != nil { return err @@ -467,7 +474,8 @@ func (etherMan *Client) oldClaimEvent(ctx context.Context, vLog types.Log, block } func (etherMan *Client) newClaimEvent(ctx context.Context, vLog types.Log, blocks *[]Block, blocksOrder *map[common.Hash][]Order) error { - log.Debug("New claim event detected. Processing...") + logger := log.LoggerFromCtx(ctx) + logger.Debug("New claim event detected. Processing...") c, err := etherMan.PolygonBridge.ParseClaimEvent(vLog) if err != nil { return err @@ -480,6 +488,7 @@ func (etherMan *Client) newClaimEvent(ctx context.Context, vLog types.Log, block } func (etherMan *Client) claimEvent(ctx context.Context, vLog types.Log, blocks *[]Block, blocksOrder *map[common.Hash][]Order, amount *big.Int, destinationAddress, originAddress common.Address, Index uint, originNetwork uint, rollupIndex uint64, mainnetFlag bool) error { + logger := log.LoggerFromCtx(ctx) var claim Claim claim.Amount = amount claim.DestinationAddress = destinationAddress @@ -502,7 +511,7 @@ func (etherMan *Client) claimEvent(ctx context.Context, vLog types.Log, blocks * } else if (*blocks)[len(*blocks)-1].BlockHash == vLog.BlockHash && (*blocks)[len(*blocks)-1].BlockNumber == vLog.BlockNumber { (*blocks)[len(*blocks)-1].Claims = append((*blocks)[len(*blocks)-1].Claims, claim) } else { - log.Error("Error processing claim event. BlockHash:", vLog.BlockHash, ". BlockNumber: ", vLog.BlockNumber) + logger.Error("Error processing claim event. BlockHash:", vLog.BlockHash, ". BlockNumber: ", vLog.BlockNumber) return fmt.Errorf("error processing claim event") } or := Order{ @@ -514,7 +523,8 @@ func (etherMan *Client) claimEvent(ctx context.Context, vLog types.Log, blocks * } func (etherMan *Client) tokenWrappedEvent(ctx context.Context, vLog types.Log, blocks *[]Block, blocksOrder *map[common.Hash][]Order) error { - log.Debug("TokenWrapped event detected. Processing...") + logger := log.LoggerFromCtx(ctx) + logger.Debug("TokenWrapped event detected. Processing...") tw, err := etherMan.PolygonBridge.ParseNewWrappedToken(vLog) if err != nil { return err @@ -536,7 +546,7 @@ func (etherMan *Client) tokenWrappedEvent(ctx context.Context, vLog types.Log, b } else if (*blocks)[len(*blocks)-1].BlockHash == vLog.BlockHash && (*blocks)[len(*blocks)-1].BlockNumber == vLog.BlockNumber { (*blocks)[len(*blocks)-1].Tokens = append((*blocks)[len(*blocks)-1].Tokens, tokenWrapped) } else { - log.Error("Error processing TokenWrapped event. BlockHash:", vLog.BlockHash, ". BlockNumber: ", vLog.BlockNumber) + logger.Error("Error processing TokenWrapped event. BlockHash:", vLog.BlockHash, ". BlockNumber: ", vLog.BlockNumber) return fmt.Errorf("error processing TokenWrapped event") } or := Order{ @@ -594,26 +604,29 @@ func (etherMan *Client) GetNetworkID(ctx context.Context) (uint, error) { } func (etherMan *Client) verifyBatchesTrustedAggregatorEvent(ctx context.Context, vLog types.Log, blocks *[]Block, blocksOrder *map[common.Hash][]Order) error { - log.Debug("VerifyBatchesTrustedAggregator event detected. Processing...") + logger := log.LoggerFromCtx(ctx) + logger.Debug("VerifyBatchesTrustedAggregator event detected. Processing...") vb, err := etherMan.PolygonRollupManager.ParseVerifyBatchesTrustedAggregator(vLog) if err != nil { - log.Error("error parsing verifyBatchesTrustedAggregator event. Error: ", err) + logger.Error("error parsing verifyBatchesTrustedAggregator event. Error: ", err) return err } return etherMan.verifyBatches(ctx, vLog, blocks, blocksOrder, uint(vb.RollupID), vb.NumBatch, vb.StateRoot, vb.ExitRoot, vb.Aggregator) } func (etherMan *Client) verifyBatchesEvent(ctx context.Context, vLog types.Log, blocks *[]Block, blocksOrder *map[common.Hash][]Order) error { - log.Debug("RollupManagerVerifyBatches event detected. Processing...") + logger := log.LoggerFromCtx(ctx) + logger.Debug("RollupManagerVerifyBatches event detected. Processing...") vb, err := etherMan.PolygonRollupManager.ParseVerifyBatches(vLog) if err != nil { - log.Error("error parsing VerifyBatches event. Error: ", err) + logger.Error("error parsing VerifyBatches event. Error: ", err) return err } return etherMan.verifyBatches(ctx, vLog, blocks, blocksOrder, uint(vb.RollupID), vb.NumBatch, vb.StateRoot, vb.ExitRoot, vb.Aggregator) } func (etherMan *Client) verifyBatches(ctx context.Context, vLog types.Log, blocks *[]Block, blocksOrder *map[common.Hash][]Order, rollupID uint, batchNum uint64, stateRoot, localExitRoot common.Hash, aggregator common.Address) error { + logger := log.LoggerFromCtx(ctx) var verifyBatch VerifiedBatch verifyBatch.BlockNumber = vLog.BlockNumber verifyBatch.BatchNumber = batchNum @@ -634,7 +647,7 @@ func (etherMan *Client) verifyBatches(ctx context.Context, vLog types.Log, block } else if (*blocks)[len(*blocks)-1].BlockHash == vLog.BlockHash && (*blocks)[len(*blocks)-1].BlockNumber == vLog.BlockNumber { (*blocks)[len(*blocks)-1].VerifiedBatches = append((*blocks)[len(*blocks)-1].VerifiedBatches, verifyBatch) } else { - log.Error("Error processing verifyBatch event. BlockHash:", vLog.BlockHash, ". BlockNumber: ", vLog.BlockNumber) + logger.Error("Error processing verifyBatch event. BlockHash:", vLog.BlockHash, ". BlockNumber: ", vLog.BlockNumber) return fmt.Errorf("error processing verifyBatch event") } or := Order{ @@ -681,7 +694,8 @@ func GenerateGlobalIndex(mainnetFlag bool, rollupIndex uint, localExitRootIndex } func (etherMan *Client) createNewRollupEvent(ctx context.Context, vLog types.Log, blocks *[]Block, blocksOrder *map[common.Hash][]Order) error { - log.Debug("CreateNewRollup event detected. Processing...") + logger := log.LoggerFromCtx(ctx) + logger.Debug("CreateNewRollup event detected. Processing...") rollup, err := etherMan.PolygonRollupManager.ParseCreateNewRollup(vLog) if err != nil { return err @@ -701,7 +715,7 @@ func (etherMan *Client) createNewRollupEvent(ctx context.Context, vLog types.Log } else if (*blocks)[len(*blocks)-1].BlockHash == vLog.BlockHash && (*blocks)[len(*blocks)-1].BlockNumber == vLog.BlockNumber { (*blocks)[len(*blocks)-1].ActivateEtrog = append((*blocks)[len(*blocks)-1].ActivateEtrog, true) } else { - log.Error("Error processing TokenWrapped event. BlockHash:", vLog.BlockHash, ". BlockNumber: ", vLog.BlockNumber) + logger.Error("Error processing TokenWrapped event. BlockHash:", vLog.BlockHash, ". BlockNumber: ", vLog.BlockNumber) return fmt.Errorf("error processing TokenWrapped event") } or := Order{ @@ -713,7 +727,8 @@ func (etherMan *Client) createNewRollupEvent(ctx context.Context, vLog types.Log } func (etherMan *Client) AddExistingRollupEvent(ctx context.Context, vLog types.Log, blocks *[]Block, blocksOrder *map[common.Hash][]Order) error { - log.Debug("AddExistingRollup event detected. Processing...") + logger := log.LoggerFromCtx(ctx) + logger.Debug("AddExistingRollup event detected. Processing...") rollup, err := etherMan.PolygonRollupManager.ParseAddExistingRollup(vLog) if err != nil { return err @@ -733,7 +748,7 @@ func (etherMan *Client) AddExistingRollupEvent(ctx context.Context, vLog types.L } else if (*blocks)[len(*blocks)-1].BlockHash == vLog.BlockHash && (*blocks)[len(*blocks)-1].BlockNumber == vLog.BlockNumber { (*blocks)[len(*blocks)-1].ActivateEtrog = append((*blocks)[len(*blocks)-1].ActivateEtrog, true) } else { - log.Error("Error processing TokenWrapped event. BlockHash:", vLog.BlockHash, ". BlockNumber: ", vLog.BlockNumber) + logger.Error("Error processing TokenWrapped event. BlockHash:", vLog.BlockHash, ". BlockNumber: ", vLog.BlockNumber) return fmt.Errorf("error processing TokenWrapped event") } or := Order{ diff --git a/go.mod b/go.mod index 7d2ac7c0..412ba552 100644 --- a/go.mod +++ b/go.mod @@ -158,3 +158,5 @@ require ( gopkg.in/yaml.v3 v3.0.1 // indirect rsc.io/tmplfunc v0.0.3 // indirect ) + +replace github.com/0xPolygonHermez/zkevm-node => github.com/okx/x1-node v0.2.0-RC1.0.20240223065502-1389c1b4fe32 diff --git a/go.sum b/go.sum index 6810d21d..da34f047 100644 --- a/go.sum +++ b/go.sum @@ -41,8 +41,6 @@ dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/0xPolygonHermez/zkevm-data-streamer v0.1.18 h1:InqeTcHrNbfj1OUfn2aFplFay7ibd7KhYqvmMZYZfn0= github.com/0xPolygonHermez/zkevm-data-streamer v0.1.18/go.mod h1:0QkAXcFa92mFJrCbN3UPUJGJYes851yEgYHLONnaosE= -github.com/0xPolygonHermez/zkevm-node v0.5.0-RC18 h1:KwU+cI5ezKTXp8f3XGRnQ5eJs+9TrOsk6NuXfCSWD7Y= -github.com/0xPolygonHermez/zkevm-node v0.5.0-RC18/go.mod h1:5gBhcNqgT9Ur/VkkO1hC9kxuUPwywdMMBJ9LdC2csU8= github.com/AndreasBriese/bbloom v0.0.0-20190306092124-e2d15f34fcf9/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 h1:w+iIsaOQNcT7OZ575w+acHgRric5iCyQh+xv+KJ4HB8= github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= @@ -595,6 +593,8 @@ github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OS github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= +github.com/okx/x1-node v0.2.0-RC1.0.20240223065502-1389c1b4fe32 h1:pc44MYOd/OgE6OPm8KBVm2AAX9+ARKVf+nM/vDBDCT4= +github.com/okx/x1-node v0.2.0-RC1.0.20240223065502-1389c1b4fe32/go.mod h1:5gBhcNqgT9Ur/VkkO1hC9kxuUPwywdMMBJ9LdC2csU8= github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec= github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= diff --git a/server/interceptors.go b/server/interceptors.go new file mode 100644 index 00000000..cd356f5c --- /dev/null +++ b/server/interceptors.go @@ -0,0 +1,46 @@ +package server + +import ( + "context" + "time" + + "github.com/0xPolygonHermez/zkevm-bridge-service/utils" + "github.com/0xPolygonHermez/zkevm-node/log" + "google.golang.org/grpc" + "google.golang.org/grpc/metadata" + "google.golang.org/protobuf/encoding/protojson" + "google.golang.org/protobuf/proto" +) + +func NewRequestLogInterceptor() grpc.UnaryServerInterceptor { + return func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { + startTime := time.Now() + methodName := info.FullMethod + + // Set up the trace id for logging + traceID := utils.GenerateTraceID() + logger := utils.LoggerWithTraceID(log.LoggerFromCtx(ctx), traceID) + ctx = log.CtxWithLogger(ctx, logger) + + logger.Debugf("method[%v] start", methodName) + + // Return the trace id to the client through the header + header := metadata.New(map[string]string{ + "trace-id": traceID, + }) + err := grpc.SendHeader(ctx, header) + if err != nil { + logger.Infof("method[%v] SendHeader error[%v]", methodName, err) + return nil, err + } + + // Actual process of the request + resp, err := handler(ctx, req) + + duration := time.Since(startTime) + reqJson, _ := protojson.Marshal(req.(proto.Message)) + respJson, _ := protojson.Marshal(resp.(proto.Message)) + logger.Infof("method[%v] req[%v] resp[%v] err[%v] processTime[%v]", methodName, string(reqJson), string(respJson), err, duration.String()) + return resp, err + } +} diff --git a/server/server.go b/server/server.go index 6c841de3..575d7657 100644 --- a/server/server.go +++ b/server/server.go @@ -75,7 +75,7 @@ func runGRPCServer(ctx context.Context, bridgeServer pb.BridgeServiceServer, por return err } - server := grpc.NewServer() + server := grpc.NewServer(grpc.UnaryInterceptor(NewRequestLogInterceptor())) pb.RegisterBridgeServiceServer(server, bridgeServer) healthService := newHealthChecker() diff --git a/server/service.go b/server/service.go index 40e2a3ba..4d4517a7 100644 --- a/server/service.go +++ b/server/service.go @@ -150,8 +150,8 @@ func (s *bridgeService) getRollupExitProof(rollupIndex uint, root common.Hash, d } // GetClaimProof returns the merkle proof to claim the given deposit. -func (s *bridgeService) GetClaimProof(depositCnt, networkID uint, dbTx pgx.Tx) (*etherman.GlobalExitRoot, [][bridgectrl.KeyLen]byte, [][bridgectrl.KeyLen]byte, error) { - ctx := context.Background() +func (s *bridgeService) GetClaimProof(ctx context.Context, depositCnt, networkID uint, dbTx pgx.Tx) (*etherman.GlobalExitRoot, [][bridgectrl.KeyLen]byte, [][bridgectrl.KeyLen]byte, error) { + logger := log.LoggerFromCtx(ctx) if dbTx == nil { // if the call comes from the rest API deposit, err := s.storage.GetDeposit(ctx, depositCnt, networkID, nil) @@ -182,19 +182,19 @@ func (s *bridgeService) GetClaimProof(depositCnt, networkID uint, dbTx pgx.Tx) ( if networkID == 0 { // Mainnet merkleProof, err = s.getProof(depositCnt, globalExitRoot.ExitRoots[tID], dbTx) if err != nil { - log.Error("error getting merkleProof. Error: ", err) + logger.Error("error getting merkleProof. Error: ", err) return nil, nil, nil, fmt.Errorf("getting the proof failed, error: %v, network: %d", err, networkID) } rollupMerkleProof = emptyProof() } else { // Rollup rollupMerkleProof, rollupLeaf, err = s.getRollupExitProof(s.rollupID-1, globalExitRoot.ExitRoots[tID], dbTx) if err != nil { - log.Error("error getting rollupProof. Error: ", err) + logger.Error("error getting rollupProof. Error: ", err) return nil, nil, nil, fmt.Errorf("getting the rollup proof failed, error: %v, network: %d", err, networkID) } merkleProof, err = s.getProof(depositCnt, rollupLeaf, dbTx) if err != nil { - log.Error("error getting merkleProof. Error: ", err) + logger.Error("error getting merkleProof. Error: ", err) return nil, nil, nil, fmt.Errorf("getting the proof failed, error: %v, network: %d", err, networkID) } } @@ -333,7 +333,7 @@ func (s *bridgeService) GetClaims(ctx context.Context, req *pb.GetClaimsRequest) // GetProof returns the merkle proof for the given deposit. // Bridge rest API endpoint func (s *bridgeService) GetProof(ctx context.Context, req *pb.GetProofRequest) (*pb.GetProofResponse, error) { - globalExitRoot, merkleProof, rollupMerkleProof, err := s.GetClaimProof(uint(req.DepositCnt), uint(req.NetId), nil) + globalExitRoot, merkleProof, rollupMerkleProof, err := s.GetClaimProof(ctx, uint(req.DepositCnt), uint(req.NetId), nil) if err != nil { return nil, err } diff --git a/synchronizer/synchronizer.go b/synchronizer/synchronizer.go index 57b63f2a..8f8c81d7 100644 --- a/synchronizer/synchronizer.go +++ b/synchronizer/synchronizer.go @@ -8,6 +8,7 @@ import ( "time" "github.com/0xPolygonHermez/zkevm-bridge-service/etherman" + "github.com/0xPolygonHermez/zkevm-bridge-service/utils" "github.com/0xPolygonHermez/zkevm-bridge-service/utils/gerror" "github.com/0xPolygonHermez/zkevm-node/log" "github.com/ethereum/go-ethereum/common" @@ -53,22 +54,24 @@ func NewSynchronizer( if err != nil { log.Fatal("error getting networkID. Error: ", err) } + logger := log.WithFields("component", fmt.Sprintf("synchronizer_%v", networkID)) + ctx = log.CtxWithLogger(ctx, logger) ger, err := storage.(storageInterface).GetLatestL1SyncedExitRoot(ctx, nil) if err != nil { if err == gerror.ErrStorageNotFound { ger.ExitRoots = []common.Hash{{}, {}} } else { - log.Fatal("error getting last L1 synced exitroot. Error: ", err) + logger.Fatal("error getting last L1 synced exitroot. Error: ", err) } } // Read db to see if the LxLy is already activated isActivated, err := storage.(storageInterface).IsLxLyActivated(ctx, nil) if err != nil { - log.Fatal("error checking if LxLyEtrog is activated. Error: ", err) + logger.Fatal("error checking if LxLyEtrog is activated. Error: ", err) } if isActivated { - log.Info("LxLyEtrog already activated") + logger.Info("LxLyEtrog already activated") } if networkID == 0 { return &ClientSynchronizer{ @@ -104,64 +107,67 @@ var waitDuration = time.Duration(0) // Sync function will read the last state synced and will continue from that point. // Sync() will read blockchain events to detect rollup updates func (s *ClientSynchronizer) Sync() error { + logger := log.LoggerFromCtx(s.ctx) // If there is no lastEthereumBlock means that sync from the beginning is necessary. If not, it continues from the retrieved ethereum block // Get the latest synced block. If there is no block on db, use genesis block - log.Infof("NetworkID: %d, Synchronization started", s.networkID) + logger.Infof("NetworkID: %d, Synchronization started", s.networkID) lastBlockSynced, err := s.storage.GetLastBlock(s.ctx, s.networkID, nil) if err != nil { if err == gerror.ErrStorageNotFound { - log.Warnf("networkID: %d, error getting the latest ethereum block. No data stored. Setting genesis block. Error: %v", s.networkID, err) + logger.Warnf("networkID: %d, error getting the latest ethereum block. No data stored. Setting genesis block. Error: %v", s.networkID, err) lastBlockSynced = ðerman.Block{ BlockNumber: s.genBlockNumber, NetworkID: s.networkID, } - log.Warnf("networkID: %d, error getting the latest block. No data stored. Using initial block: %+v. Error: %s", + logger.Warnf("networkID: %d, error getting the latest block. No data stored. Using initial block: %+v. Error: %s", s.networkID, lastBlockSynced, err.Error()) } else { - log.Fatalf("networkID: %d, unexpected error getting the latest block. Error: %s", s.networkID, err.Error()) + logger.Fatalf("networkID: %d, unexpected error getting the latest block. Error: %s", s.networkID, err.Error()) } } - log.Debugf("NetworkID: %d, initial lastBlockSynced: %+v", s.networkID, lastBlockSynced) + logger.Debugf("NetworkID: %d, initial lastBlockSynced: %+v", s.networkID, lastBlockSynced) for { select { case <-s.ctx.Done(): - log.Debugf("NetworkID: %d, synchronizer ctx done", s.networkID) + logger.Debugf("NetworkID: %d, synchronizer ctx done", s.networkID) return nil case <-time.After(waitDuration): - log.Debugf("NetworkID: %d, syncing...", s.networkID) + logger := utils.LoggerWithRandomTraceID(logger) + ctx := log.CtxWithLogger(s.ctx, logger) + logger.Debugf("NetworkID: %d, syncing...", s.networkID) //Sync L1Blocks - if lastBlockSynced, err = s.syncBlocks(lastBlockSynced); err != nil { - log.Warnf("networkID: %d, error syncing blocks: %v", s.networkID, err) - lastBlockSynced, err = s.storage.GetLastBlock(s.ctx, s.networkID, nil) + if lastBlockSynced, err = s.syncBlocks(ctx, lastBlockSynced); err != nil { + logger.Warnf("networkID: %d, error syncing blocks: %v", s.networkID, err) + lastBlockSynced, err = s.storage.GetLastBlock(ctx, s.networkID, nil) if err != nil { - log.Fatalf("networkID: %d, error getting lastBlockSynced to resume the synchronization... Error: ", s.networkID, err) + logger.Fatalf("networkID: %d, error getting lastBlockSynced to resume the synchronization... Error: ", s.networkID, err) } - if s.ctx.Err() != nil { + if ctx.Err() != nil { continue } } if !s.synced { // Check latest Block - header, err := s.etherMan.HeaderByNumber(s.ctx, nil) + header, err := s.etherMan.HeaderByNumber(ctx, nil) if err != nil { - log.Warnf("networkID: %d, error getting latest block from. Error: %s", s.networkID, err.Error()) + logger.Warnf("networkID: %d, error getting latest block from. Error: %s", s.networkID, err.Error()) continue } lastKnownBlock := header.Number.Uint64() if lastBlockSynced.BlockNumber == lastKnownBlock && !s.synced { - log.Infof("NetworkID %d Synced!", s.networkID) + logger.Infof("NetworkID %d Synced!", s.networkID) waitDuration = s.cfg.SyncInterval.Duration s.synced = true s.chSynced <- s.networkID } if lastBlockSynced.BlockNumber > lastKnownBlock { if s.networkID == 0 { - log.Fatalf("networkID: %d, error: latest Synced BlockNumber (%d) is higher than the latest Proposed block (%d) in the network", s.networkID, lastBlockSynced.BlockNumber, lastKnownBlock) + logger.Fatalf("networkID: %d, error: latest Synced BlockNumber (%d) is higher than the latest Proposed block (%d) in the network", s.networkID, lastBlockSynced.BlockNumber, lastKnownBlock) } else { - log.Errorf("networkID: %d, error: latest Synced BlockNumber (%d) is higher than the latest Proposed block (%d) in the network", s.networkID, lastBlockSynced.BlockNumber, lastKnownBlock) - err = s.resetState(lastKnownBlock) + logger.Errorf("networkID: %d, error: latest Synced BlockNumber (%d) is higher than the latest Proposed block (%d) in the network", s.networkID, lastBlockSynced.BlockNumber, lastKnownBlock) + err = s.resetState(ctx, lastKnownBlock) if err != nil { - log.Errorf("networkID: %d, error resetting the state to a previous block. Error: %v", s.networkID, err) + logger.Errorf("networkID: %d, error resetting the state to a previous block. Error: %v", s.networkID, err) continue } } @@ -170,10 +176,10 @@ func (s *ClientSynchronizer) Sync() error { if s.networkID != 0 { continue } - log.Infof("networkID: %d, Virtual state is synced, getting trusted state", s.networkID) - err = s.syncTrustedState() + logger.Infof("networkID: %d, Virtual state is synced, getting trusted state", s.networkID) + err = s.syncTrustedState(ctx) if err != nil { - log.Errorf("networkID: %d, error getting current trusted state", s.networkID) + logger.Errorf("networkID: %d, error getting current trusted state", s.networkID) } } } @@ -185,23 +191,24 @@ func (s *ClientSynchronizer) Stop() { s.cancelCtx() } -func (s *ClientSynchronizer) syncTrustedState() error { - lastGER, err := s.zkEVMClient.GetLatestGlobalExitRoot(s.ctx) +func (s *ClientSynchronizer) syncTrustedState(ctx context.Context) error { + logger := log.LoggerFromCtx(ctx) + lastGER, err := s.zkEVMClient.GetLatestGlobalExitRoot(ctx) if err != nil { - log.Warnf("networkID: %d, failed to get latest ger from trusted state. Error: %v", s.networkID, err) + logger.Warnf("networkID: %d, failed to get latest ger from trusted state. Error: %v", s.networkID, err) return err } if lastGER == (common.Hash{}) { - log.Debugf("networkID: %d, syncTrustedState: skipping GlobalExitRoot because there is no result", s.networkID) + logger.Debugf("networkID: %d, syncTrustedState: skipping GlobalExitRoot because there is no result", s.networkID) return nil } - exitRoots, err := s.zkEVMClient.ExitRootsByGER(s.ctx, lastGER) + exitRoots, err := s.zkEVMClient.ExitRootsByGER(ctx, lastGER) if err != nil { - log.Warnf("networkID: %d, failed to get exitRoots from trusted state. Error: %v", s.networkID, err) + logger.Warnf("networkID: %d, failed to get exitRoots from trusted state. Error: %v", s.networkID, err) return err } if exitRoots == nil { - log.Debugf("networkID: %d, syncTrustedState: skipping exitRoots because there is no result", s.networkID) + logger.Debugf("networkID: %d, syncTrustedState: skipping exitRoots because there is no result", s.networkID) return nil } ger := ðerman.GlobalExitRoot{ @@ -211,9 +218,9 @@ func (s *ClientSynchronizer) syncTrustedState() error { exitRoots.RollupExitRoot, }, } - isUpdated, err := s.storage.AddTrustedGlobalExitRoot(s.ctx, ger, nil) + isUpdated, err := s.storage.AddTrustedGlobalExitRoot(ctx, ger, nil) if err != nil { - log.Error("networkID: %d, error storing latest trusted globalExitRoot. Error: %v", s.networkID, err) + logger.Error("networkID: %d, error storing latest trusted globalExitRoot. Error: %v", s.networkID, err) return err } if isUpdated { @@ -223,24 +230,25 @@ func (s *ClientSynchronizer) syncTrustedState() error { } // This function syncs the node from a specific block to the latest -func (s *ClientSynchronizer) syncBlocks(lastBlockSynced *etherman.Block) (*etherman.Block, error) { +func (s *ClientSynchronizer) syncBlocks(ctx context.Context, lastBlockSynced *etherman.Block) (*etherman.Block, error) { + logger := log.LoggerFromCtx(ctx) // This function will read events fromBlockNum to latestEthBlock. Check reorg to be sure that everything is ok. - block, err := s.checkReorg(lastBlockSynced) + block, err := s.checkReorg(ctx, lastBlockSynced) if err != nil { - log.Errorf("networkID: %d, error checking reorgs. Retrying... Err: %s", s.networkID, err.Error()) + logger.Errorf("networkID: %d, error checking reorgs. Retrying... Err: %s", s.networkID, err.Error()) return lastBlockSynced, fmt.Errorf("networkID: %d, error checking reorgs", s.networkID) } if block != nil { - err = s.resetState(block.BlockNumber) + err = s.resetState(ctx, block.BlockNumber) if err != nil { - log.Errorf("networkID: %d, error resetting the state to a previous block. Retrying... Error: %s", s.networkID, err.Error()) + logger.Errorf("networkID: %d, error resetting the state to a previous block. Retrying... Error: %s", s.networkID, err.Error()) return lastBlockSynced, fmt.Errorf("networkID: %d, error resetting the state to a previous block", s.networkID) } return block, nil } - log.Debugf("NetworkID: %d, after checkReorg: no reorg detected", s.networkID) + logger.Debugf("NetworkID: %d, after checkReorg: no reorg detected", s.networkID) // Call the blockchain to retrieve data - header, err := s.etherMan.HeaderByNumber(s.ctx, nil) + header, err := s.etherMan.HeaderByNumber(ctx, nil) if err != nil { return lastBlockSynced, err } @@ -254,30 +262,30 @@ func (s *ClientSynchronizer) syncBlocks(lastBlockSynced *etherman.Block) (*ether for { toBlock := fromBlock + s.cfg.SyncChunkSize - log.Debugf("NetworkID: %d, Getting bridge info from block %d to block %d", s.networkID, fromBlock, toBlock) + logger.Debugf("NetworkID: %d, Getting bridge info from block %d to block %d", s.networkID, fromBlock, toBlock) // This function returns the rollup information contained in the ethereum blocks and an extra param called order. // Order param is a map that contains the event order to allow the synchronizer store the info in the same order that is read. // Name can be different in the order struct. This name is an identifier to check if the next info that must be stored in the db. // The value pos (position) tells what is the array index where this value is. - blocks, order, err := s.etherMan.GetRollupInfoByBlockRange(s.ctx, fromBlock, &toBlock) + blocks, order, err := s.etherMan.GetRollupInfoByBlockRange(ctx, fromBlock, &toBlock) if err != nil { return lastBlockSynced, err } - err = s.processBlockRange(blocks, order) + err = s.processBlockRange(ctx, blocks, order) if err != nil { return lastBlockSynced, err } if len(blocks) > 0 { lastBlockSynced = &blocks[len(blocks)-1] for i := range blocks { - log.Debug("NetworkID: ", s.networkID, ", Position: ", i, ". BlockNumber: ", blocks[i].BlockNumber, ". BlockHash: ", blocks[i].BlockHash) + logger.Debug("NetworkID: ", s.networkID, ", Position: ", i, ". BlockNumber: ", blocks[i].BlockNumber, ". BlockHash: ", blocks[i].BlockHash) } } fromBlock = toBlock + 1 if lastKnownBlock.Cmp(new(big.Int).SetUint64(toBlock)) < 1 { if !s.synced { - log.Infof("NetworkID %d Synced!", s.networkID) + logger.Infof("NetworkID %d Synced!", s.networkID) waitDuration = s.cfg.SyncInterval.Duration s.synced = true s.chSynced <- s.networkID @@ -286,7 +294,7 @@ func (s *ClientSynchronizer) syncBlocks(lastBlockSynced *etherman.Block) (*ether } if len(blocks) == 0 { // If there is no events in the checked blocks range and lastKnownBlock > fromBlock. // Store the latest block of the block range. Get block info and process the block - fb, err := s.etherMan.EthBlockByNumber(s.ctx, toBlock) + fb, err := s.etherMan.EthBlockByNumber(ctx, toBlock) if err != nil { return lastBlockSynced, err } @@ -296,13 +304,13 @@ func (s *ClientSynchronizer) syncBlocks(lastBlockSynced *etherman.Block) (*ether ParentHash: fb.ParentHash(), ReceivedAt: time.Unix(int64(fb.Time()), 0), } - err = s.processBlockRange([]etherman.Block{b}, order) + err = s.processBlockRange(ctx, []etherman.Block{b}, order) if err != nil { return lastBlockSynced, err } lastBlockSynced = &b - log.Debugf("NetworkID: %d, Storing empty block. BlockNumber: %d. BlockHash: %s", + logger.Debugf("NetworkID: %d, Storing empty block. BlockNumber: %d. BlockHash: %s", s.networkID, b.BlockNumber, b.BlockHash.String()) } } @@ -310,26 +318,27 @@ func (s *ClientSynchronizer) syncBlocks(lastBlockSynced *etherman.Block) (*ether return lastBlockSynced, nil } -func (s *ClientSynchronizer) processBlockRange(blocks []etherman.Block, order map[common.Hash][]etherman.Order) error { +func (s *ClientSynchronizer) processBlockRange(ctx context.Context, blocks []etherman.Block, order map[common.Hash][]etherman.Order) error { + logger := log.LoggerFromCtx(ctx) // New info has to be included into the db using the state var isNewGer bool for i := range blocks { // Begin db transaction - dbTx, err := s.storage.BeginDBTransaction(s.ctx) + dbTx, err := s.storage.BeginDBTransaction(ctx) if err != nil { - log.Errorf("networkID: %d, error creating db transaction to store block. BlockNumber: %d. Error: %v", + logger.Errorf("networkID: %d, error creating db transaction to store block. BlockNumber: %d. Error: %v", s.networkID, blocks[i].BlockNumber, err) return err } // Add block information blocks[i].NetworkID = s.networkID - log.Infof("NetworkID: %d. Syncing block: %d", s.networkID, blocks[i].BlockNumber) - blockID, err := s.storage.AddBlock(s.ctx, &blocks[i], dbTx) + logger.Infof("NetworkID: %d. Syncing block: %d", s.networkID, blocks[i].BlockNumber) + blockID, err := s.storage.AddBlock(ctx, &blocks[i], dbTx) if err != nil { - log.Errorf("networkID: %d, error storing block. BlockNumber: %d, error: %v", s.networkID, blocks[i].BlockNumber, err) - rollbackErr := s.storage.Rollback(s.ctx, dbTx) + logger.Errorf("networkID: %d, error storing block. BlockNumber: %d, error: %v", s.networkID, blocks[i].BlockNumber, err) + rollbackErr := s.storage.Rollback(ctx, dbTx) if rollbackErr != nil { - log.Errorf("networkID: %d, error rolling back state to store block. BlockNumber: %d, rollbackErr: %v, err: %s", + logger.Errorf("networkID: %d, error rolling back state to store block. BlockNumber: %d, rollbackErr: %v, err: %s", s.networkID, blocks[i].BlockNumber, rollbackErr, err.Error()) return rollbackErr } @@ -339,42 +348,42 @@ func (s *ClientSynchronizer) processBlockRange(blocks []etherman.Block, order ma switch element.Name { case etherman.GlobalExitRootsOrder: isNewGer = true - err = s.processGlobalExitRoot(blocks[i].GlobalExitRoots[element.Pos], blockID, dbTx) + err = s.processGlobalExitRoot(ctx, blocks[i].GlobalExitRoots[element.Pos], blockID, dbTx) if err != nil { return err } case etherman.DepositsOrder: - err = s.processDeposit(blocks[i].Deposits[element.Pos], blockID, dbTx) + err = s.processDeposit(ctx, blocks[i].Deposits[element.Pos], blockID, dbTx) if err != nil { return err } case etherman.ClaimsOrder: - err = s.processClaim(blocks[i].Claims[element.Pos], blockID, dbTx) + err = s.processClaim(ctx, blocks[i].Claims[element.Pos], blockID, dbTx) if err != nil { return err } case etherman.TokensOrder: - err = s.processTokenWrapped(blocks[i].Tokens[element.Pos], blockID, dbTx) + err = s.processTokenWrapped(ctx, blocks[i].Tokens[element.Pos], blockID, dbTx) if err != nil { return err } case etherman.VerifyBatchOrder: - err = s.processVerifyBatch(blocks[i].VerifiedBatches[element.Pos], blockID, dbTx) + err = s.processVerifyBatch(ctx, blocks[i].VerifiedBatches[element.Pos], blockID, dbTx) if err != nil { return err } case etherman.ActivateEtrogOrder: // this is activated when the bridge detects the CreateNewRollup or the AddExistingRollup event from the rollupManager - log.Info("Event received. Activating LxLyEtrog...") + logger.Info("Event received. Activating LxLyEtrog...") } } - err = s.storage.Commit(s.ctx, dbTx) + err = s.storage.Commit(ctx, dbTx) if err != nil { - log.Errorf("networkID: %d, error committing state to store block. BlockNumber: %d, err: %v", + logger.Errorf("networkID: %d, error committing state to store block. BlockNumber: %d, err: %v", s.networkID, blocks[i].BlockNumber, err) - rollbackErr := s.storage.Rollback(s.ctx, dbTx) + rollbackErr := s.storage.Rollback(ctx, dbTx) if rollbackErr != nil { - log.Errorf("networkID: %d, error rolling back state. BlockNumber: %d, rollbackErr: %v, err: %s", + logger.Errorf("networkID: %d, error rolling back state. BlockNumber: %d, rollbackErr: %v, err: %s", s.networkID, blocks[i].BlockNumber, rollbackErr, err.Error()) return rollbackErr } @@ -383,13 +392,13 @@ func (s *ClientSynchronizer) processBlockRange(blocks []etherman.Block, order ma } if isNewGer { // Send latest GER stored to claimTxManager - ger, err := s.storage.GetLatestL1SyncedExitRoot(s.ctx, nil) + ger, err := s.storage.GetLatestL1SyncedExitRoot(ctx, nil) if err != nil { - log.Errorf("networkID: %d, error getting latest GER stored on database. Error: %v", s.networkID, err) + logger.Errorf("networkID: %d, error getting latest GER stored on database. Error: %v", s.networkID, err) return err } if s.l1RollupExitRoot != ger.ExitRoots[1] { - log.Debugf("Updating ger: %+v", ger) + logger.Debugf("Updating ger: %+v", ger) s.l1RollupExitRoot = ger.ExitRoots[1] s.chExitRootEvent <- ger } @@ -398,53 +407,54 @@ func (s *ClientSynchronizer) processBlockRange(blocks []etherman.Block, order ma } // This function allows reset the state until an specific ethereum block -func (s *ClientSynchronizer) resetState(blockNumber uint64) error { - log.Infof("NetworkID: %d. Reverting synchronization to block: %d", s.networkID, blockNumber) - dbTx, err := s.storage.BeginDBTransaction(s.ctx) +func (s *ClientSynchronizer) resetState(ctx context.Context, blockNumber uint64) error { + logger := log.LoggerFromCtx(ctx) + logger.Infof("NetworkID: %d. Reverting synchronization to block: %d", s.networkID, blockNumber) + dbTx, err := s.storage.BeginDBTransaction(ctx) if err != nil { - log.Errorf("networkID: %d, Error starting a db transaction to reset the state. Error: %v", s.networkID, err) + logger.Errorf("networkID: %d, Error starting a db transaction to reset the state. Error: %v", s.networkID, err) return err } - err = s.storage.Reset(s.ctx, blockNumber, s.networkID, dbTx) + err = s.storage.Reset(ctx, blockNumber, s.networkID, dbTx) if err != nil { - log.Errorf("networkID: %d, error resetting the state. Error: %v", s.networkID, err) - rollbackErr := s.storage.Rollback(s.ctx, dbTx) + logger.Errorf("networkID: %d, error resetting the state. Error: %v", s.networkID, err) + rollbackErr := s.storage.Rollback(ctx, dbTx) if rollbackErr != nil { - log.Errorf("networkID: %d, error rolling back state to store block. BlockNumber: %d, rollbackErr: %v, error : %s", + logger.Errorf("networkID: %d, error rolling back state to store block. BlockNumber: %d, rollbackErr: %v, error : %s", s.networkID, blockNumber, rollbackErr, err.Error()) return rollbackErr } return err } - depositCnt, err := s.storage.GetNumberDeposits(s.ctx, s.networkID, blockNumber, dbTx) + depositCnt, err := s.storage.GetNumberDeposits(ctx, s.networkID, blockNumber, dbTx) if err != nil { - log.Error("networkID: %d, error getting GetNumberDeposits. Error: %v", s.networkID, err) - rollbackErr := s.storage.Rollback(s.ctx, dbTx) + logger.Error("networkID: %d, error getting GetNumberDeposits. Error: %v", s.networkID, err) + rollbackErr := s.storage.Rollback(ctx, dbTx) if rollbackErr != nil { - log.Errorf("networkID: %d, error rolling back state to store block. BlockNumber: %d, rollbackErr: %v, error : %s", + logger.Errorf("networkID: %d, error rolling back state to store block. BlockNumber: %d, rollbackErr: %v, error : %s", s.networkID, blockNumber, rollbackErr, err.Error()) return rollbackErr } return err } - err = s.bridgeCtrl.ReorgMT(s.ctx, uint(depositCnt), s.networkID, dbTx) + err = s.bridgeCtrl.ReorgMT(ctx, uint(depositCnt), s.networkID, dbTx) if err != nil { - log.Error("networkID: %d, error resetting ReorgMT the state. Error: %v", s.networkID, err) - rollbackErr := s.storage.Rollback(s.ctx, dbTx) + logger.Error("networkID: %d, error resetting ReorgMT the state. Error: %v", s.networkID, err) + rollbackErr := s.storage.Rollback(ctx, dbTx) if rollbackErr != nil { - log.Errorf("networkID: %d, error rolling back state to store block. BlockNumber: %d, rollbackErr: %v, error : %s", + logger.Errorf("networkID: %d, error rolling back state to store block. BlockNumber: %d, rollbackErr: %v, error : %s", s.networkID, blockNumber, rollbackErr, err.Error()) return rollbackErr } return err } - err = s.storage.Commit(s.ctx, dbTx) + err = s.storage.Commit(ctx, dbTx) if err != nil { - log.Errorf("networkID: %d, error committing the resetted state. Error: %v", s.networkID, err) - rollbackErr := s.storage.Rollback(s.ctx, dbTx) + logger.Errorf("networkID: %d, error committing the resetted state. Error: %v", s.networkID, err) + rollbackErr := s.storage.Rollback(ctx, dbTx) if rollbackErr != nil { - log.Errorf("networkID: %d, error rolling back state to store block. BlockNumber: %d, rollbackErr: %v, error : %s", + logger.Errorf("networkID: %d, error rolling back state to store block. BlockNumber: %d, rollbackErr: %v, error : %s", s.networkID, blockNumber, rollbackErr, err.Error()) return rollbackErr } @@ -462,56 +472,57 @@ If hash or hash parent don't match, reorg detected and the function will return must be reverted. Then, check the previous ethereum block synced, get block info from the blockchain and check hash and has parent. This operation has to be done until a match is found. */ -func (s *ClientSynchronizer) checkReorg(latestBlock *etherman.Block) (*etherman.Block, error) { +func (s *ClientSynchronizer) checkReorg(ctx context.Context, latestBlock *etherman.Block) (*etherman.Block, error) { + logger := log.LoggerFromCtx(ctx) // This function only needs to worry about reorgs if some of the reorganized blocks contained rollup info. latestBlockSynced := *latestBlock var depth uint64 for { - block, err := s.etherMan.EthBlockByNumber(s.ctx, latestBlock.BlockNumber) + block, err := s.etherMan.EthBlockByNumber(ctx, latestBlock.BlockNumber) if err != nil { - log.Errorf("networkID: %d, error getting latest block synced from blockchain. Block: %d, error: %v", + logger.Errorf("networkID: %d, error getting latest block synced from blockchain. Block: %d, error: %v", s.networkID, latestBlock.BlockNumber, err) return nil, err } if block.NumberU64() != latestBlock.BlockNumber { err = fmt.Errorf("networkID: %d, wrong ethereum block retrieved from blockchain. Block numbers don't match."+ " BlockNumber stored: %d. BlockNumber retrieved: %d", s.networkID, latestBlock.BlockNumber, block.NumberU64()) - log.Error("error: ", err) + logger.Error("error: ", err) return nil, err } // Compare hashes if (block.Hash() != latestBlock.BlockHash || block.ParentHash() != latestBlock.ParentHash) && latestBlock.BlockNumber > s.genBlockNumber { - log.Info("NetworkID: ", s.networkID, ", [checkReorg function] => latestBlockNumber: ", latestBlock.BlockNumber) - log.Info("NetworkID: ", s.networkID, ", [checkReorg function] => latestBlockHash: ", latestBlock.BlockHash) - log.Info("NetworkID: ", s.networkID, ", [checkReorg function] => latestBlockHashParent: ", latestBlock.ParentHash) - log.Info("NetworkID: ", s.networkID, ", [checkReorg function] => BlockNumber: ", latestBlock.BlockNumber, block.NumberU64()) - log.Info("NetworkID: ", s.networkID, ", [checkReorg function] => BlockHash: ", block.Hash()) - log.Info("NetworkID: ", s.networkID, ", [checkReorg function] => BlockHashParent: ", block.ParentHash()) + logger.Info("NetworkID: ", s.networkID, ", [checkReorg function] => latestBlockNumber: ", latestBlock.BlockNumber) + logger.Info("NetworkID: ", s.networkID, ", [checkReorg function] => latestBlockHash: ", latestBlock.BlockHash) + logger.Info("NetworkID: ", s.networkID, ", [checkReorg function] => latestBlockHashParent: ", latestBlock.ParentHash) + logger.Info("NetworkID: ", s.networkID, ", [checkReorg function] => BlockNumber: ", latestBlock.BlockNumber, block.NumberU64()) + logger.Info("NetworkID: ", s.networkID, ", [checkReorg function] => BlockHash: ", block.Hash()) + logger.Info("NetworkID: ", s.networkID, ", [checkReorg function] => BlockHashParent: ", block.ParentHash()) depth++ - log.Info("NetworkID: ", s.networkID, ", REORG: Looking for the latest correct block. Depth: ", depth) + logger.Info("NetworkID: ", s.networkID, ", REORG: Looking for the latest correct block. Depth: ", depth) // Reorg detected. Getting previous block - dbTx, err := s.storage.BeginDBTransaction(s.ctx) + dbTx, err := s.storage.BeginDBTransaction(ctx) if err != nil { - log.Errorf("networkID: %d, error creating db transaction to get previous blocks. Error: %v", s.networkID, err) + logger.Errorf("networkID: %d, error creating db transaction to get previous blocks. Error: %v", s.networkID, err) return nil, err } - latestBlock, err = s.storage.GetPreviousBlock(s.ctx, s.networkID, depth, dbTx) - errC := s.storage.Commit(s.ctx, dbTx) + latestBlock, err = s.storage.GetPreviousBlock(ctx, s.networkID, depth, dbTx) + errC := s.storage.Commit(ctx, dbTx) if errC != nil { - log.Errorf("networkID: %d, error committing dbTx, err: %v", s.networkID, errC) - rollbackErr := s.storage.Rollback(s.ctx, dbTx) + logger.Errorf("networkID: %d, error committing dbTx, err: %v", s.networkID, errC) + rollbackErr := s.storage.Rollback(ctx, dbTx) if rollbackErr != nil { - log.Errorf("networkID: %d, error rolling back state. RollbackErr: %v, err: %s", + logger.Errorf("networkID: %d, error rolling back state. RollbackErr: %v, err: %s", s.networkID, rollbackErr, errC.Error()) return nil, rollbackErr } return nil, errC } if errors.Is(err, gerror.ErrStorageNotFound) { - log.Warnf("networkID: %d, error checking reorg: previous block not found in db: %v", s.networkID, err) + logger.Warnf("networkID: %d, error checking reorg: previous block not found in db: %v", s.networkID, err) return ðerman.Block{}, nil } else if err != nil { - log.Errorf("networkID: %d, error detected getting previous block: %v", s.networkID, err) + logger.Errorf("networkID: %d, error detected getting previous block: %v", s.networkID, err) return nil, err } } else { @@ -519,43 +530,44 @@ func (s *ClientSynchronizer) checkReorg(latestBlock *etherman.Block) (*etherman. } } if latestBlockSynced.BlockHash != latestBlock.BlockHash { - log.Infof("NetworkID: %d, reorg detected in block: %d", s.networkID, latestBlockSynced.BlockNumber) + logger.Infof("NetworkID: %d, reorg detected in block: %d", s.networkID, latestBlockSynced.BlockNumber) return latestBlock, nil } - log.Debugf("NetworkID: %d, no reorg detected", s.networkID) + logger.Debugf("NetworkID: %d, no reorg detected", s.networkID) return nil, nil } -func (s *ClientSynchronizer) processVerifyBatch(verifyBatch etherman.VerifiedBatch, blockID uint64, dbTx pgx.Tx) error { +func (s *ClientSynchronizer) processVerifyBatch(ctx context.Context, verifyBatch etherman.VerifiedBatch, blockID uint64, dbTx pgx.Tx) error { + logger := log.LoggerFromCtx(ctx) if verifyBatch.RollupID == s.etherMan.GetRollupID()-1 { // Just check that the calculated RollupExitRoot is fine network, err := s.bridgeCtrl.GetNetworkID(s.networkID) if err != nil { - log.Errorf("networkID: %d, error getting NetworkID. Error: %v", s.networkID, err) - rollbackErr := s.storage.Rollback(s.ctx, dbTx) + logger.Errorf("networkID: %d, error getting NetworkID. Error: %v", s.networkID, err) + rollbackErr := s.storage.Rollback(ctx, dbTx) if rollbackErr != nil { - log.Errorf("networkID: %d, error rolling back state. BlockNumber: %d, rollbackErr: %v, error : %s", + logger.Errorf("networkID: %d, error rolling back state. BlockNumber: %d, rollbackErr: %v, error : %s", s.networkID, verifyBatch.BlockNumber, rollbackErr, err.Error()) return rollbackErr } return err } - ok, err := s.storage.CheckIfRootExists(s.ctx, verifyBatch.LocalExitRoot.Bytes(), network, dbTx) + ok, err := s.storage.CheckIfRootExists(ctx, verifyBatch.LocalExitRoot.Bytes(), network, dbTx) if err != nil { - log.Errorf("networkID: %d, error Checking if root exists. Error: %v", s.networkID, err) - rollbackErr := s.storage.Rollback(s.ctx, dbTx) + logger.Errorf("networkID: %d, error Checking if root exists. Error: %v", s.networkID, err) + rollbackErr := s.storage.Rollback(ctx, dbTx) if rollbackErr != nil { - log.Errorf("networkID: %d, error rolling back state. BlockNumber: %d, rollbackErr: %v, error : %s", + logger.Errorf("networkID: %d, error rolling back state. BlockNumber: %d, rollbackErr: %v, error : %s", s.networkID, verifyBatch.BlockNumber, rollbackErr, err.Error()) return rollbackErr } return err } if !ok { - log.Errorf("networkID: %d, Root: %s doesn't exist!", s.networkID, verifyBatch.LocalExitRoot.String()) - rollbackErr := s.storage.Rollback(s.ctx, dbTx) + logger.Errorf("networkID: %d, Root: %s doesn't exist!", s.networkID, verifyBatch.LocalExitRoot.String()) + rollbackErr := s.storage.Rollback(ctx, dbTx) if rollbackErr != nil { - log.Errorf("networkID: %d, error rolling back state. BlockNumber: %d, rollbackErr: %v, error : %s", + logger.Errorf("networkID: %d, error rolling back state. BlockNumber: %d, rollbackErr: %v, error : %s", s.networkID, verifyBatch.BlockNumber, rollbackErr, err.Error()) return rollbackErr } @@ -568,12 +580,12 @@ func (s *ClientSynchronizer) processVerifyBatch(verifyBatch etherman.VerifiedBat RollupId: verifyBatch.RollupID, } // Update rollupExitRoot - err := s.bridgeCtrl.AddRollupExitLeaf(s.ctx, rollupLeaf, dbTx) + err := s.bridgeCtrl.AddRollupExitLeaf(ctx, rollupLeaf, dbTx) if err != nil { - log.Errorf("networkID: %d, error adding rollup exit leaf. Error: %v", s.networkID, err) - rollbackErr := s.storage.Rollback(s.ctx, dbTx) + logger.Errorf("networkID: %d, error adding rollup exit leaf. Error: %v", s.networkID, err) + rollbackErr := s.storage.Rollback(ctx, dbTx) if rollbackErr != nil { - log.Errorf("networkID: %d, error rolling back state. BlockNumber: %d, rollbackErr: %v, error : %s", + logger.Errorf("networkID: %d, error rolling back state. BlockNumber: %d, rollbackErr: %v, error : %s", s.networkID, verifyBatch.BlockNumber, rollbackErr, err.Error()) return rollbackErr } @@ -582,15 +594,16 @@ func (s *ClientSynchronizer) processVerifyBatch(verifyBatch etherman.VerifiedBat return nil } -func (s *ClientSynchronizer) processGlobalExitRoot(globalExitRoot etherman.GlobalExitRoot, blockID uint64, dbTx pgx.Tx) error { +func (s *ClientSynchronizer) processGlobalExitRoot(ctx context.Context, globalExitRoot etherman.GlobalExitRoot, blockID uint64, dbTx pgx.Tx) error { + logger := log.LoggerFromCtx(ctx) // Store GlobalExitRoot globalExitRoot.BlockID = blockID - err := s.storage.AddGlobalExitRoot(s.ctx, &globalExitRoot, dbTx) + err := s.storage.AddGlobalExitRoot(ctx, &globalExitRoot, dbTx) if err != nil { - log.Errorf("networkID: %d, error storing the GlobalExitRoot in processGlobalExitRoot. BlockNumber: %d. Error: %v", s.networkID, globalExitRoot.BlockNumber, err) - rollbackErr := s.storage.Rollback(s.ctx, dbTx) + logger.Errorf("networkID: %d, error storing the GlobalExitRoot in processGlobalExitRoot. BlockNumber: %d. Error: %v", s.networkID, globalExitRoot.BlockNumber, err) + rollbackErr := s.storage.Rollback(ctx, dbTx) if rollbackErr != nil { - log.Errorf("networkID: %d, error rolling back state. BlockNumber: %d, rollbackErr: %v, error : %s", + logger.Errorf("networkID: %d, error rolling back state. BlockNumber: %d, rollbackErr: %v, error : %s", s.networkID, globalExitRoot.BlockNumber, rollbackErr, err.Error()) return rollbackErr } @@ -599,27 +612,28 @@ func (s *ClientSynchronizer) processGlobalExitRoot(globalExitRoot etherman.Globa return nil } -func (s *ClientSynchronizer) processDeposit(deposit etherman.Deposit, blockID uint64, dbTx pgx.Tx) error { +func (s *ClientSynchronizer) processDeposit(ctx context.Context, deposit etherman.Deposit, blockID uint64, dbTx pgx.Tx) error { + logger := log.LoggerFromCtx(ctx) deposit.BlockID = blockID deposit.NetworkID = s.networkID - depositID, err := s.storage.AddDeposit(s.ctx, &deposit, dbTx) + depositID, err := s.storage.AddDeposit(ctx, &deposit, dbTx) if err != nil { - log.Errorf("networkID: %d, failed to store new deposit locally, BlockNumber: %d, Deposit: %+v err: %v", s.networkID, deposit.BlockNumber, deposit, err) - rollbackErr := s.storage.Rollback(s.ctx, dbTx) + logger.Errorf("networkID: %d, failed to store new deposit locally, BlockNumber: %d, Deposit: %+v err: %v", s.networkID, deposit.BlockNumber, deposit, err) + rollbackErr := s.storage.Rollback(ctx, dbTx) if rollbackErr != nil { - log.Errorf("networkID: %d, error rolling back state to store block. BlockNumber: %v, rollbackErr: %v, err: %s", + logger.Errorf("networkID: %d, error rolling back state to store block. BlockNumber: %v, rollbackErr: %v, err: %s", s.networkID, deposit.BlockNumber, rollbackErr, err.Error()) return rollbackErr } return err } - err = s.bridgeCtrl.AddDeposit(s.ctx, &deposit, depositID, dbTx) + err = s.bridgeCtrl.AddDeposit(ctx, &deposit, depositID, dbTx) if err != nil { - log.Errorf("networkID: %d, failed to store new deposit in the bridge tree, BlockNumber: %d, Deposit: %+v err: %v", s.networkID, deposit.BlockNumber, deposit, err) - rollbackErr := s.storage.Rollback(s.ctx, dbTx) + logger.Errorf("networkID: %d, failed to store new deposit in the bridge tree, BlockNumber: %d, Deposit: %+v err: %v", s.networkID, deposit.BlockNumber, deposit, err) + rollbackErr := s.storage.Rollback(ctx, dbTx) if rollbackErr != nil { - log.Errorf("networkID: %d, error rolling back state to store block. BlockNumber: %v, rollbackErr: %v, err: %s", + logger.Errorf("networkID: %d, error rolling back state to store block. BlockNumber: %v, rollbackErr: %v, err: %s", s.networkID, deposit.BlockNumber, rollbackErr, err.Error()) return rollbackErr } @@ -628,19 +642,20 @@ func (s *ClientSynchronizer) processDeposit(deposit etherman.Deposit, blockID ui return nil } -func (s *ClientSynchronizer) processClaim(claim etherman.Claim, blockID uint64, dbTx pgx.Tx) error { +func (s *ClientSynchronizer) processClaim(ctx context.Context, claim etherman.Claim, blockID uint64, dbTx pgx.Tx) error { + logger := log.LoggerFromCtx(ctx) if claim.RollupIndex != uint64(s.etherMan.GetRollupID()) && claim.RollupIndex != 0 { - log.Debugf("Claim for different Rollup (RollupID: %d, RollupIndex: %d). Ignoring...", s.etherMan.GetRollupID(), claim.RollupIndex) + logger.Debugf("Claim for different Rollup (RollupID: %d, RollupIndex: %d). Ignoring...", s.etherMan.GetRollupID(), claim.RollupIndex) return nil } claim.BlockID = blockID claim.NetworkID = s.networkID - err := s.storage.AddClaim(s.ctx, &claim, dbTx) + err := s.storage.AddClaim(ctx, &claim, dbTx) if err != nil { - log.Errorf("networkID: %d, error storing new Claim in Block: %d, Claim: %+v, err: %v", s.networkID, claim.BlockNumber, claim, err) - rollbackErr := s.storage.Rollback(s.ctx, dbTx) + logger.Errorf("networkID: %d, error storing new Claim in Block: %d, Claim: %+v, err: %v", s.networkID, claim.BlockNumber, claim, err) + rollbackErr := s.storage.Rollback(ctx, dbTx) if rollbackErr != nil { - log.Errorf("networkID: %d, error rolling back state to store block. BlockNumber: %d, rollbackErr: %v, err: %s", + logger.Errorf("networkID: %d, error rolling back state to store block. BlockNumber: %d, rollbackErr: %v, err: %s", s.networkID, claim.BlockNumber, rollbackErr, err.Error()) return rollbackErr } @@ -649,15 +664,16 @@ func (s *ClientSynchronizer) processClaim(claim etherman.Claim, blockID uint64, return nil } -func (s *ClientSynchronizer) processTokenWrapped(tokenWrapped etherman.TokenWrapped, blockID uint64, dbTx pgx.Tx) error { +func (s *ClientSynchronizer) processTokenWrapped(ctx context.Context, tokenWrapped etherman.TokenWrapped, blockID uint64, dbTx pgx.Tx) error { + logger := log.LoggerFromCtx(ctx) tokenWrapped.BlockID = blockID tokenWrapped.NetworkID = s.networkID - err := s.storage.AddTokenWrapped(s.ctx, &tokenWrapped, dbTx) + err := s.storage.AddTokenWrapped(ctx, &tokenWrapped, dbTx) if err != nil { - log.Errorf("networkID: %d, error storing new L1 TokenWrapped in Block: %d, ExitRoot: %+v, err: %v", s.networkID, tokenWrapped.BlockNumber, tokenWrapped, err) - rollbackErr := s.storage.Rollback(s.ctx, dbTx) + logger.Errorf("networkID: %d, error storing new L1 TokenWrapped in Block: %d, ExitRoot: %+v, err: %v", s.networkID, tokenWrapped.BlockNumber, tokenWrapped, err) + rollbackErr := s.storage.Rollback(ctx, dbTx) if rollbackErr != nil { - log.Errorf("networkID: %d, error rolling back state to store block. BlockNumber: %d, rollbackErr: %v, err: %s", + logger.Errorf("networkID: %d, error rolling back state to store block. BlockNumber: %d, rollbackErr: %v, err: %s", s.networkID, tokenWrapped.BlockNumber, rollbackErr, err.Error()) return rollbackErr } diff --git a/utils/client.go b/utils/client.go index e96a1536..182e1439 100644 --- a/utils/client.go +++ b/utils/client.go @@ -159,6 +159,7 @@ func (c *Client) MintERC20(ctx context.Context, erc20Addr common.Address, amount func (c *Client) SendBridgeAsset(ctx context.Context, tokenAddr common.Address, amount *big.Int, destNetwork uint32, destAddr *common.Address, metadata []byte, auth *bind.TransactOpts, ) error { + logger := log.LoggerFromCtx(ctx) emptyAddr := common.Address{} if tokenAddr == emptyAddr { auth.Value = amount @@ -168,7 +169,7 @@ func (c *Client) SendBridgeAsset(ctx context.Context, tokenAddr common.Address, } tx, err := c.bridge.BridgeAsset(auth, destNetwork, *destAddr, amount, tokenAddr, true, metadata) if err != nil { - log.Error("Error: ", err) + logger.Error("Error: ", err) return err } // wait transfer to be included in a batch @@ -180,9 +181,10 @@ func (c *Client) SendBridgeAsset(ctx context.Context, tokenAddr common.Address, func (c *Client) SendBridgeMessage(ctx context.Context, destNetwork uint32, destAddr common.Address, metadata []byte, auth *bind.TransactOpts, ) error { + logger := log.LoggerFromCtx(ctx) tx, err := c.bridge.BridgeMessage(auth, destNetwork, destAddr, true, metadata) if err != nil { - log.Error("Error: ", err) + logger.Error("Error: ", err) return err } // wait transfer to be included in a batch @@ -192,6 +194,7 @@ func (c *Client) SendBridgeMessage(ctx context.Context, destNetwork uint32, dest // BuildSendClaim builds a tx data to be sent to the bridge method SendClaim. func (c *Client) BuildSendClaim(ctx context.Context, deposit *etherman.Deposit, smtProof [mtHeight][keyLen]byte, smtRollupProof [mtHeight][keyLen]byte, globalExitRoot *etherman.GlobalExitRoot, nonce, gasPrice int64, gasLimit uint64, rollupID uint, auth *bind.TransactOpts) (*types.Transaction, error) { + logger := log.LoggerFromCtx(ctx) opts := *auth opts.NoSend = true // force nonce, gas limit and gas price to avoid querying it from the chain @@ -217,7 +220,7 @@ func (c *Client) BuildSendClaim(ctx context.Context, deposit *etherman.Deposit, if tx != nil { txHash = tx.Hash().String() } - log.Error("Error: ", err, ". Tx Hash: ", txHash) + logger.Error("Error: ", err, ". Tx Hash: ", txHash) return nil, fmt.Errorf("failed to build SendClaim tx, err: %v", err) } @@ -226,6 +229,7 @@ func (c *Client) BuildSendClaim(ctx context.Context, deposit *etherman.Deposit, // SendClaim sends a claim transaction. func (c *Client) SendClaim(ctx context.Context, deposit *pb.Deposit, smtProof [mtHeight][keyLen]byte, smtRollupProof [mtHeight][keyLen]byte, globalExitRoot *etherman.GlobalExitRoot, auth *bind.TransactOpts) error { + logger := log.LoggerFromCtx(ctx) amount, _ := new(big.Int).SetString(deposit.Amount, encoding.Base10) var ( tx *types.Transaction @@ -242,7 +246,7 @@ func (c *Client) SendClaim(ctx context.Context, deposit *pb.Deposit, smtProof [m if tx != nil { txHash = tx.Hash().String() } - log.Error("Error: ", err, ". Tx Hash: ", txHash) + logger.Error("Error: ", err, ". Tx Hash: ", txHash) return err } diff --git a/utils/constant.go b/utils/constant.go new file mode 100644 index 00000000..411b461c --- /dev/null +++ b/utils/constant.go @@ -0,0 +1,6 @@ +package utils + +const ( + TraceID = "traceID" + traceIDLen = 16 +) diff --git a/utils/helpers.go b/utils/helpers.go index 3320d474..08e8e16c 100644 --- a/utils/helpers.go +++ b/utils/helpers.go @@ -3,6 +3,8 @@ package utils import ( "crypto/sha256" "math/rand" + + "github.com/0xPolygonHermez/zkevm-node/log" ) func generateRandomString(length int) string { @@ -19,3 +21,21 @@ func GenerateRandomHash() [sha256.Size]byte { rs := generateRandomString(10) //nolint:gomnd return sha256.Sum256([]byte(rs)) } + +// GenerateTraceID generates a random trace ID. +func GenerateTraceID() string { + return generateRandomString(traceIDLen) +} + +// LoggerWithTraceID returns a wrapping logger with a specific trace id +func LoggerWithTraceID(logger *log.Logger, traceID string) *log.Logger { + if logger == nil { + logger = log.GetDefaultLog() + } + return logger.WithFields(TraceID, traceID) +} + +// LoggerWithRandomTraceID returns a wrapping logger with a random trace id +func LoggerWithRandomTraceID(logger *log.Logger) *log.Logger { + return LoggerWithTraceID(logger, GenerateTraceID()) +}